commit d99c8c04840736f2044a144493a83bfb0d95efff Author: Alibek Omarov Date: Wed Jul 14 01:44:10 2021 +0300 linux-headers-5.4.0-2.3 diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..2ac6cfb --- /dev/null +++ b/Makefile @@ -0,0 +1,1877 @@ +# SPDX-License-Identifier: GPL-2.0 +VERSION = 5 +PATCHLEVEL = 4 +SUBLEVEL = 58 +EXTRAVERSION = -2.3 +NAME = Kleptomaniac Octopus + +# *DOCUMENTATION* +# To see a list of typical targets execute "make help" +# More info can be located in ./README +# Comments in this file are targeted only to the developer, do not +# expect to learn how to build the kernel reading this file. + +# That's our default target when none is given on the command line +PHONY := _all +_all: + +# We are using a recursive build, so we need to do a little thinking +# to get the ordering right. +# +# Most importantly: sub-Makefiles should only ever modify files in +# their own directory. If in some directory we have a dependency on +# a file in another dir (which doesn't happen often, but it's often +# unavoidable when linking the built-in.a targets which finally +# turn into vmlinux), we will call a sub make in that other dir, and +# after that we are sure that everything which is in that other dir +# is now up to date. +# +# The only cases where we need to modify files which have global +# effects are thus separated out and done before the recursive +# descending is started. They are now explicitly listed as the +# prepare rule. + +ifneq ($(sub_make_done),1) + +# Do not use make's built-in rules and variables +# (this increases performance and avoids hard-to-debug behaviour) +MAKEFLAGS += -rR + +# Avoid funny character set dependencies +unexport LC_ALL +LC_COLLATE=C +LC_NUMERIC=C +export LC_COLLATE LC_NUMERIC + +# Avoid interference with shell env settings +unexport GREP_OPTIONS + +# Beautify output +# --------------------------------------------------------------------------- +# +# Normally, we echo the whole command before executing it. By making +# that echo $($(quiet)$(cmd)), we now have the possibility to set +# $(quiet) to choose other forms of output instead, e.g. +# +# quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@ +# cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< +# +# If $(quiet) is empty, the whole command will be printed. +# If it is set to "quiet_", only the short version will be printed. +# If it is set to "silent_", nothing will be printed at all, since +# the variable $(silent_cmd_cc_o_c) doesn't exist. +# +# A simple variant is to prefix commands with $(Q) - that's useful +# for commands that shall be hidden in non-verbose mode. +# +# $(Q)ln $@ :< +# +# If KBUILD_VERBOSE equals 0 then the above command will be hidden. +# If KBUILD_VERBOSE equals 1 then the above command is displayed. +# +# To put more focus on warnings, be less verbose as default +# Use 'make V=1' to see the full commands + +ifeq ("$(origin V)", "command line") + KBUILD_VERBOSE = $(V) +endif +ifndef KBUILD_VERBOSE + KBUILD_VERBOSE = 0 +endif + +ifeq ($(KBUILD_VERBOSE),1) + quiet = + Q = +else + quiet=quiet_ + Q = @ +endif + +# If the user is running make -s (silent mode), suppress echoing of +# commands + +ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),) + quiet=silent_ +endif + +export quiet Q KBUILD_VERBOSE + +# Kbuild will save output files in the current working directory. +# This does not need to match to the root of the kernel source tree. +# +# For example, you can do this: +# +# cd /dir/to/store/output/files; make -f /dir/to/kernel/source/Makefile +# +# If you want to save output files in a different location, there are +# two syntaxes to specify it. +# +# 1) O= +# Use "make O=dir/to/store/output/files/" +# +# 2) Set KBUILD_OUTPUT +# Set the environment variable KBUILD_OUTPUT to point to the output directory. +# export KBUILD_OUTPUT=dir/to/store/output/files/; make +# +# The O= assignment takes precedence over the KBUILD_OUTPUT environment +# variable. + +# Do we want to change the working directory? +ifeq ("$(origin O)", "command line") + KBUILD_OUTPUT := $(O) +endif + +ifneq ($(KBUILD_OUTPUT),) +# Make's built-in functions such as $(abspath ...), $(realpath ...) cannot +# expand a shell special character '~'. We use a somewhat tedious way here. +abs_objtree := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd) +$(if $(abs_objtree),, \ + $(error failed to create output directory "$(KBUILD_OUTPUT)")) + +# $(realpath ...) resolves symlinks +abs_objtree := $(realpath $(abs_objtree)) +else +abs_objtree := $(CURDIR) +endif # ifneq ($(KBUILD_OUTPUT),) + +ifeq ($(abs_objtree),$(CURDIR)) +# Suppress "Entering directory ..." unless we are changing the work directory. +MAKEFLAGS += --no-print-directory +else +need-sub-make := 1 +endif + +abs_srctree := $(realpath $(dir $(lastword $(MAKEFILE_LIST)))) + +ifneq ($(words $(subst :, ,$(abs_srctree))), 1) +$(error source directory cannot contain spaces or colons) +endif + +ifneq ($(abs_srctree),$(abs_objtree)) +# Look for make include files relative to root of kernel src +# +# This does not become effective immediately because MAKEFLAGS is re-parsed +# once after the Makefile is read. We need to invoke sub-make. +MAKEFLAGS += --include-dir=$(abs_srctree) +need-sub-make := 1 +endif + +ifneq ($(filter 3.%,$(MAKE_VERSION)),) +# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x +# We need to invoke sub-make to avoid implicit rules in the top Makefile. +need-sub-make := 1 +# Cancel implicit rules for this Makefile. +$(lastword $(MAKEFILE_LIST)): ; +endif + +export abs_srctree abs_objtree +export sub_make_done := 1 + +ifeq ($(need-sub-make),1) + +PHONY += $(MAKECMDGOALS) sub-make + +$(filter-out _all sub-make $(lastword $(MAKEFILE_LIST)), $(MAKECMDGOALS)) _all: sub-make + @: + +# Invoke a second make in the output directory, passing relevant variables +sub-make: + $(Q)$(MAKE) -C $(abs_objtree) -f $(abs_srctree)/Makefile $(MAKECMDGOALS) + +endif # need-sub-make +endif # sub_make_done + +# We process the rest of the Makefile if this is the final invocation of make +ifeq ($(need-sub-make),) + +# Do not print "Entering directory ...", +# but we want to display it when entering to the output directory +# so that IDEs/editors are able to understand relative filenames. +MAKEFLAGS += --no-print-directory + +# Call a source code checker (by default, "sparse") as part of the +# C compilation. +# +# Use 'make C=1' to enable checking of only re-compiled files. +# Use 'make C=2' to enable checking of *all* source files, regardless +# of whether they are re-compiled or not. +# +# See the file "Documentation/dev-tools/sparse.rst" for more details, +# including where to get the "sparse" utility. + +ifeq ("$(origin C)", "command line") + KBUILD_CHECKSRC = $(C) +endif +ifndef KBUILD_CHECKSRC + KBUILD_CHECKSRC = 0 +endif + +# Use make M=dir or set the environment variable KBUILD_EXTMOD to specify the +# directory of external module to build. Setting M= takes precedence. +ifeq ("$(origin M)", "command line") + KBUILD_EXTMOD := $(M) +endif + +export KBUILD_CHECKSRC KBUILD_EXTMOD + +extmod-prefix = $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/) + +ifeq ($(abs_srctree),$(abs_objtree)) + # building in the source tree + srctree := . + building_out_of_srctree := +else + ifeq ($(abs_srctree)/,$(dir $(abs_objtree))) + # building in a subdirectory of the source tree + srctree := .. + else + srctree := $(abs_srctree) + endif + building_out_of_srctree := 1 +endif + +ifneq ($(KBUILD_ABS_SRCTREE),) +srctree := $(abs_srctree) +endif + +objtree := . +VPATH := $(srctree) + +export building_out_of_srctree srctree objtree VPATH + +# To make sure we do not include .config for any of the *config targets +# catch them early, and hand them over to scripts/kconfig/Makefile +# It is allowed to specify more targets when calling make, including +# mixing *config targets and build targets. +# For example 'make oldconfig all'. +# Detect when mixed targets is specified, and make a second invocation +# of make so .config is not included in this case either (for *config). + +version_h := include/generated/uapi/linux/version.h +old_version_h := include/linux/version.h + +clean-targets := %clean mrproper cleandocs +no-dot-config-targets := $(clean-targets) \ + cscope gtags TAGS tags help% %docs check% coccicheck \ + $(version_h) headers headers_% archheaders archscripts \ + %asm-generic kernelversion %src-pkg +no-sync-config-targets := $(no-dot-config-targets) install %install \ + kernelrelease +single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/ + +config-build := +mixed-build := +need-config := 1 +may-sync-config := 1 +single-build := + +ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),) + ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),) + need-config := + endif +endif + +ifneq ($(filter $(no-sync-config-targets), $(MAKECMDGOALS)),) + ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),) + may-sync-config := + endif +endif + +ifneq ($(KBUILD_EXTMOD),) + may-sync-config := +endif + +ifeq ($(KBUILD_EXTMOD),) + ifneq ($(filter config %config,$(MAKECMDGOALS)),) + config-build := 1 + ifneq ($(words $(MAKECMDGOALS)),1) + mixed-build := 1 + endif + endif +endif + +# We cannot build single targets and the others at the same time +ifneq ($(filter $(single-targets), $(MAKECMDGOALS)),) + single-build := 1 + ifneq ($(filter-out $(single-targets), $(MAKECMDGOALS)),) + mixed-build := 1 + endif +endif + +# For "make -j clean all", "make -j mrproper defconfig all", etc. +ifneq ($(filter $(clean-targets),$(MAKECMDGOALS)),) + ifneq ($(filter-out $(clean-targets),$(MAKECMDGOALS)),) + mixed-build := 1 + endif +endif + +# install and modules_install need also be processed one by one +ifneq ($(filter install,$(MAKECMDGOALS)),) + ifneq ($(filter modules_install,$(MAKECMDGOALS)),) + mixed-build := 1 + endif +endif + +ifdef mixed-build +# =========================================================================== +# We're called with mixed targets (*config and build targets). +# Handle them one by one. + +PHONY += $(MAKECMDGOALS) __build_one_by_one + +$(filter-out __build_one_by_one, $(MAKECMDGOALS)): __build_one_by_one + @: + +__build_one_by_one: + $(Q)set -e; \ + for i in $(MAKECMDGOALS); do \ + $(MAKE) -f $(srctree)/Makefile $$i; \ + done + +else # !mixed-build + +include scripts/Kbuild.include + +# Read KERNELRELEASE from include/config/kernel.release (if it exists) +KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) +KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) +export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION + +include scripts/subarch.include + +# Cross compiling and selecting different set of gcc/bin-utils +# --------------------------------------------------------------------------- +# +# When performing cross compilation for other architectures ARCH shall be set +# to the target architecture. (See arch/* for the possibilities). +# ARCH can be set during invocation of make: +# make ARCH=ia64 +# Another way is to have ARCH set in the environment. +# The default ARCH is the host where make is executed. + +# CROSS_COMPILE specify the prefix used for all executables used +# during compilation. Only gcc and related bin-utils executables +# are prefixed with $(CROSS_COMPILE). +# CROSS_COMPILE can be set on the command line +# make CROSS_COMPILE=ia64-linux- +# Alternatively CROSS_COMPILE can be set in the environment. +# Default value for CROSS_COMPILE is not to prefix executables +# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile +ARCH ?= $(SUBARCH) + +# Architecture as present in compile.h +UTS_MACHINE := $(ARCH) +SRCARCH := $(ARCH) + +# Additional ARCH settings for x86 +ifeq ($(ARCH),i386) + SRCARCH := x86 +endif +ifeq ($(ARCH),x86_64) + SRCARCH := x86 +endif + +# Additional ARCH settings for sparc +ifeq ($(ARCH),sparc32) + SRCARCH := sparc +endif +ifeq ($(ARCH),sparc64) + SRCARCH := sparc +endif + +# Additional ARCH settings for sh +ifeq ($(ARCH),sh64) + SRCARCH := sh +endif + +KCONFIG_CONFIG ?= .config +export KCONFIG_CONFIG + +# SHELL used by kbuild +CONFIG_SHELL := sh + +HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null) +HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null) +HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null) + +HOSTCC = gcc +HOSTCXX = g++ +KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \ + -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \ + $(HOSTCFLAGS) +KBUILD_HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS) $(HOSTCXXFLAGS) +KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS) +KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS) + +# Make variables (CC, etc...) +CC = $(CROSS_COMPILE)gcc +CPP = $(CC) -E +ifeq ($(call cc-lcc-yn),y) +AS := $(shell $(CC) -print-prog-name=as) +LD := $(shell $(CC) -print-prog-name=ld) +AR := $(shell $(CC) -print-prog-name=ar) +NM := $(shell $(CC) -print-prog-name=nm) +STRIP := $(shell $(CC) -print-prog-name=strip) +OBJCOPY := $(shell $(CC) -print-prog-name=objcopy) +OBJDUMP := $(shell $(CC) -print-prog-name=objdump) +OBJSIZE := $(shell $(CC) -print-prog-name=size) +else +AS = $(CROSS_COMPILE)as +LD = $(CROSS_COMPILE)ld +AR = $(CROSS_COMPILE)ar +NM = $(CROSS_COMPILE)nm +STRIP = $(CROSS_COMPILE)strip +OBJCOPY = $(CROSS_COMPILE)objcopy +OBJDUMP = $(CROSS_COMPILE)objdump +OBJSIZE = $(CROSS_COMPILE)size +endif +PAHOLE = pahole +LEX = flex +YACC = bison +AWK = awk +INSTALLKERNEL := installkernel +DEPMOD = /sbin/depmod +PERL = perl +PYTHON = python +PYTHON2 = python2 +PYTHON3 = python3 +CHECK = sparse +BASH = bash + +CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ + -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) +NOSTDINC_FLAGS := +CFLAGS_MODULE = +AFLAGS_MODULE = +LDFLAGS_MODULE = +CFLAGS_KERNEL = +AFLAGS_KERNEL = +LDFLAGS_vmlinux = + +-include .kernelvariables + +# Use USERINCLUDE when you must reference the UAPI directories only. +USERINCLUDE := \ + -I$(srctree)/arch/$(SRCARCH)/include/uapi \ + -I$(objtree)/arch/$(SRCARCH)/include/generated/uapi \ + -I$(srctree)/include/uapi \ + -I$(objtree)/include/generated/uapi \ + -include $(srctree)/include/linux/kconfig.h + +# Use LINUXINCLUDE when you must reference the include/ directory. +# Needed to be compatible with the O= option +LINUXINCLUDE := \ + -I$(srctree)/arch/$(SRCARCH)/include \ + -I$(objtree)/arch/$(SRCARCH)/include/generated \ + $(if $(building_out_of_srctree),-I$(srctree)/include) \ + -I$(objtree)/include \ + $(USERINCLUDE) + +KBUILD_AFLAGS := -D__ASSEMBLY__ -fno-PIE +ifeq ($(call cc-lcc-yn),y) +# Although lcc-1.24 supports -fshort-wchar many users are still +# using lcc-1.23, so when they compile kernel modules themselves +# we must avoid passing "-fshort-wchar" to it. +KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \ + -fno-strict-aliasing -fno-common -fno-PIE \ + -Werror=implicit-function-declaration -Werror=implicit-int \ + -Wno-format-security \ + -std=gnu89 +else +KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \ + -fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE \ + -Werror=implicit-function-declaration -Werror=implicit-int \ + -Wno-format-security \ + -std=gnu89 +endif +ifeq ($(call cc-lcc-yn),y) +KBUILD_CFLAGS += -fno-ident +endif +KBUILD_CPPFLAGS := -D__KERNEL__ +KBUILD_AFLAGS_KERNEL := +KBUILD_CFLAGS_KERNEL := +KBUILD_AFLAGS_MODULE := -DMODULE +KBUILD_CFLAGS_MODULE := -DMODULE +KBUILD_LDFLAGS_MODULE := +export KBUILD_LDS_MODULE := $(srctree)/scripts/module-common.lds +KBUILD_LDFLAGS := +GCC_PLUGINS_CFLAGS := +CLANG_FLAGS := + +export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC +export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE PAHOLE LEX YACC AWK INSTALLKERNEL +export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX +export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE + +export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE +export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN +export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE +export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE +export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL + +# Files to ignore in find ... statements + +export RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o \ + -name CVS -o -name .pc -o -name .hg -o -name .git \) \ + -prune -o +export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \ + --exclude CVS --exclude .pc --exclude .hg --exclude .git + +# =========================================================================== +# Rules shared between *config targets and build targets + +# Basic helpers built in scripts/basic/ +PHONY += scripts_basic +scripts_basic: + $(Q)$(MAKE) $(build)=scripts/basic + $(Q)rm -f .tmp_quiet_recordmcount + +PHONY += outputmakefile +# Before starting out-of-tree build, make sure the source tree is clean. +# outputmakefile generates a Makefile in the output directory, if using a +# separate output directory. This allows convenient use of make in the +# output directory. +# At the same time when output Makefile generated, generate .gitignore to +# ignore whole output directory +outputmakefile: +ifdef building_out_of_srctree + $(Q)if [ -f $(srctree)/.config -o \ + -d $(srctree)/include/config -o \ + -d $(srctree)/arch/$(SRCARCH)/include/generated ]; then \ + echo >&2 "***"; \ + echo >&2 "*** The source tree is not clean, please run 'make$(if $(findstring command line, $(origin ARCH)), ARCH=$(ARCH)) mrproper'"; \ + echo >&2 "*** in $(abs_srctree)";\ + echo >&2 "***"; \ + false; \ + fi + $(Q)ln -fsn $(srctree) source + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree) + $(Q)test -e .gitignore || \ + { echo "# this is build directory, ignore it"; echo "*"; } > .gitignore +endif + +ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) +ifneq ($(CROSS_COMPILE),) +CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) +GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) +GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) +endif +ifneq ($(GCC_TOOLCHAIN),) +CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) +endif +ifeq ($(shell $(AS) --version 2>&1 | head -n 1 | grep clang),) +CLANG_FLAGS += -no-integrated-as +endif +CLANG_FLAGS += -Werror=unknown-warning-option +KBUILD_CFLAGS += $(CLANG_FLAGS) +KBUILD_AFLAGS += $(CLANG_FLAGS) +export CLANG_FLAGS +endif + +# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. +# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. +# CC_VERSION_TEXT is referenced from Kconfig (so it needs export), +# and from include/config/auto.conf.cmd to detect the compiler upgrade. +CC_VERSION_TEXT = $(shell $(CC) --version 2>/dev/null | head -n 1) + +ifdef config-build +# =========================================================================== +# *config targets only - make sure prerequisites are updated, and descend +# in scripts/kconfig to make the *config target + +# Read arch specific Makefile to set KBUILD_DEFCONFIG as needed. +# KBUILD_DEFCONFIG may point out an alternative default configuration +# used for 'make defconfig' +include arch/$(SRCARCH)/Makefile +export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT + +config: outputmakefile scripts_basic FORCE + $(Q)$(MAKE) $(build)=scripts/kconfig $@ + +%config: outputmakefile scripts_basic FORCE + $(Q)$(MAKE) $(build)=scripts/kconfig $@ + +else #!config-build +# =========================================================================== +# Build targets only - this includes vmlinux, arch specific targets, clean +# targets and others. In general all targets except *config targets. + +# If building an external module we do not care about the all: rule +# but instead _all depend on modules +PHONY += all +ifeq ($(KBUILD_EXTMOD),) +_all: all +else +_all: modules +endif + +# Decide whether to build built-in, modular, or both. +# Normally, just do built-in. + +KBUILD_MODULES := +KBUILD_BUILTIN := 1 + +# If we have only "make modules", don't compile built-in objects. +ifeq ($(MAKECMDGOALS),modules) + KBUILD_BUILTIN := +endif + +# If we have "make modules", compile modules +# in addition to whatever we do anyway. +# Just "make" or "make all" shall build modules as well + +ifneq ($(filter all _all modules nsdeps,$(MAKECMDGOALS)),) + KBUILD_MODULES := 1 +endif + +ifeq ($(MAKECMDGOALS),) + KBUILD_MODULES := 1 +endif + +export KBUILD_MODULES KBUILD_BUILTIN + +ifdef need-config +include include/config/auto.conf +endif + +ifeq ($(KBUILD_EXTMOD),) +# Objects we will link into vmlinux / subdirs we need to visit +init-y := init/ +drivers-y := drivers/ sound/ +drivers-$(CONFIG_SAMPLES) += samples/ +net-y := net/ +libs-y := lib/ +core-y := usr/ +virt-y := virt/ +endif # KBUILD_EXTMOD + +# The all: target is the default when no target is given on the +# command line. +# This allow a user to issue only 'make' to build a kernel including modules +# Defaults to vmlinux, but the arch makefile usually adds further targets +all: vmlinux + +ifneq ($(call cc-lcc-yn),y) +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ + $(call cc-option,-fno-tree-loop-im) \ + $(call cc-disable-warning,maybe-uninitialized,) +else +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ + $(call cc-disable-warning,maybe-uninitialized,) +endif +export CFLAGS_GCOV + +# The arch Makefiles can override CC_FLAGS_FTRACE. We may also append it later. +ifdef CONFIG_FUNCTION_TRACER + CC_FLAGS_FTRACE := -pg +endif + +RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register +RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register +RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk +RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline +RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) +RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG))) +export RETPOLINE_CFLAGS +export RETPOLINE_VDSO_CFLAGS + +include arch/$(SRCARCH)/Makefile + +ifdef need-config +ifdef may-sync-config +# Read in dependencies to all Kconfig* files, make sure to run syncconfig if +# changes are detected. This should be included after arch/$(SRCARCH)/Makefile +# because some architectures define CROSS_COMPILE there. +include include/config/auto.conf.cmd + +$(KCONFIG_CONFIG): + @echo >&2 '***' + @echo >&2 '*** Configuration file "$@" not found!' + @echo >&2 '***' + @echo >&2 '*** Please run some configurator (e.g. "make oldconfig" or' + @echo >&2 '*** "make menuconfig" or "make xconfig").' + @echo >&2 '***' + @/bin/false + +# The actual configuration files used during the build are stored in +# include/generated/ and include/config/. Update them if .config is newer than +# include/config/auto.conf (which mirrors .config). +# +# This exploits the 'multi-target pattern rule' trick. +# The syncconfig should be executed only once to make all the targets. +%/auto.conf %/auto.conf.cmd %/tristate.conf: $(KCONFIG_CONFIG) + $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig +else # !may-sync-config +# External modules and some install targets need include/generated/autoconf.h +# and include/config/auto.conf but do not care if they are up-to-date. +# Use auto.conf to trigger the test +PHONY += include/config/auto.conf + +include/config/auto.conf: + $(Q)test -e include/generated/autoconf.h -a -e $@ || ( \ + echo >&2; \ + echo >&2 " ERROR: Kernel configuration is invalid."; \ + echo >&2 " include/generated/autoconf.h or $@ are missing.";\ + echo >&2 " Run 'make oldconfig && make prepare' on kernel src to fix it."; \ + echo >&2 ; \ + /bin/false) + +endif # may-sync-config +endif # need-config + +KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) +KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) +KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) +KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) +KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) + +ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE +KBUILD_CFLAGS += -O2 +else ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3 +KBUILD_CFLAGS += -O3 +else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE +KBUILD_CFLAGS += -Os +endif + +# Tell gcc to never replace conditional load with a non-conditional one +KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) +KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races) + +include scripts/Makefile.kcov +include scripts/Makefile.gcc-plugins + +ifdef CONFIG_READABLE_ASM +# Disable optimizations that make assembler listings hard to read. +# reorder blocks reorders the control in the function +# ipa clone creates specialized cloned functions +# partial inlining inlines only parts of functions +KBUILD_CFLAGS += $(call cc-option,-fno-reorder-blocks,) \ + $(call cc-option,-fno-ipa-cp-clone,) \ + $(call cc-option,-fno-partial-inlining) +endif + +ifneq ($(CONFIG_FRAME_WARN),0) +KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN}) +endif + +stackp-flags-$(CONFIG_CC_HAS_STACKPROTECTOR_NONE) := -fno-stack-protector +stackp-flags-$(CONFIG_STACKPROTECTOR) := -fstack-protector +stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong + +KBUILD_CFLAGS += $(stackp-flags-y) + +ifdef CONFIG_CC_IS_CLANG +KBUILD_CPPFLAGS += -Qunused-arguments +KBUILD_CFLAGS += -Wno-format-invalid-specifier +KBUILD_CFLAGS += -Wno-gnu +# Quiet clang warning: comparison of unsigned expression < 0 is always false +KBUILD_CFLAGS += -Wno-tautological-compare +# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the +# source of a reference will be _MergedGlobals and not on of the whitelisted names. +# See modpost pattern 2 +KBUILD_CFLAGS += -mno-global-merge +else + +# These warnings generated too much noise in a regular build. +# Use make W=1 to enable them (see scripts/Makefile.extrawarn) +KBUILD_CFLAGS += -Wno-unused-but-set-variable + +# Warn about unmarked fall-throughs in switch statement. +# Disabled for clang while comment to attribute conversion happens and +# https://github.com/ClangBuiltLinux/linux/issues/636 is discussed. +KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,) +endif + +KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) +ifdef CONFIG_FRAME_POINTER +KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls +else +# Some targets (ARM with Thumb2, for example), can't be built with frame +# pointers. For those, we don't have FUNCTION_TRACER automatically +# select FRAME_POINTER. However, FUNCTION_TRACER adds -pg, and this is +# incompatible with -fomit-frame-pointer with current GCC, so we don't use +# -fomit-frame-pointer with FUNCTION_TRACER. +ifndef CONFIG_FUNCTION_TRACER +KBUILD_CFLAGS += -fomit-frame-pointer +endif +endif + +# Initialize all stack variables with a pattern, if desired. +ifdef CONFIG_INIT_STACK_ALL +KBUILD_CFLAGS += -ftrivial-auto-var-init=pattern +endif + +DEBUG_CFLAGS := $(call cc-option, -fno-var-tracking-assignments) + +ifdef CONFIG_DEBUG_INFO +ifdef CONFIG_DEBUG_INFO_SPLIT +DEBUG_CFLAGS += -gsplit-dwarf +else +DEBUG_CFLAGS += -g +endif +KBUILD_AFLAGS += -Wa,-gdwarf-2 +endif +ifdef CONFIG_DEBUG_INFO_DWARF4 +DEBUG_CFLAGS += -gdwarf-4 +endif + +ifdef CONFIG_DEBUG_INFO_REDUCED +DEBUG_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \ + $(call cc-option,-fno-var-tracking) +endif + +KBUILD_CFLAGS += $(DEBUG_CFLAGS) +export DEBUG_CFLAGS + +ifdef CONFIG_FUNCTION_TRACER +ifdef CONFIG_FTRACE_MCOUNT_RECORD + # gcc 5 supports generating the mcount tables directly + ifeq ($(call cc-option-yn,-mrecord-mcount),y) + CC_FLAGS_FTRACE += -mrecord-mcount + export CC_USING_RECORD_MCOUNT := 1 + endif + ifdef CONFIG_HAVE_NOP_MCOUNT + ifeq ($(call cc-option-yn, -mnop-mcount),y) + CC_FLAGS_FTRACE += -mnop-mcount + CC_FLAGS_USING += -DCC_USING_NOP_MCOUNT + endif + endif +endif +ifdef CONFIG_HAVE_FENTRY + ifeq ($(call cc-option-yn, -mfentry),y) + CC_FLAGS_FTRACE += -mfentry + CC_FLAGS_USING += -DCC_USING_FENTRY + endif +endif +export CC_FLAGS_FTRACE +KBUILD_CFLAGS += $(CC_FLAGS_FTRACE) $(CC_FLAGS_USING) +KBUILD_AFLAGS += $(CC_FLAGS_USING) +ifdef CONFIG_DYNAMIC_FTRACE + ifdef CONFIG_HAVE_C_RECORDMCOUNT + BUILD_C_RECORDMCOUNT := y + export BUILD_C_RECORDMCOUNT + endif +endif +endif + +# We trigger additional mismatches with less inlining +ifdef CONFIG_DEBUG_SECTION_MISMATCH +KBUILD_CFLAGS += $(call cc-option, -fno-inline-functions-called-once) +endif + +ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections +LDFLAGS_vmlinux += --gc-sections +endif + +ifdef CONFIG_LIVEPATCH +KBUILD_CFLAGS += $(call cc-option, -flive-patching=inline-clone) +endif + +# arch Makefile may override CC so keep this after arch Makefile is included +NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) + +# warn about C99 declaration after statement +KBUILD_CFLAGS += -Wdeclaration-after-statement + +# Variable Length Arrays (VLAs) should not be used anywhere in the kernel +KBUILD_CFLAGS += -Wvla + +# disable pointer signed / unsigned warnings in gcc 4.0 +KBUILD_CFLAGS += -Wno-pointer-sign + +# disable stringop warnings in gcc 8+ +KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) + +# We'll want to enable this eventually, but it's not going away for 5.7 at least +KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) +KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) +KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow) + +# Another good warning that we'll want to enable eventually +KBUILD_CFLAGS += $(call cc-disable-warning, restrict) + +# Enabled with W=2, disabled by default as noisy +KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) + +# MCST: The original gcc bug which caused introduction of -fno-strict-overflow +# (optimizing away pointer overflow checking) does not exist in lcc, and this +# option prohibits many compiler optimizations. +ifneq ($(call cc-lcc-yn),y) +# disable invalid "can't wrap" optimizations for signed / pointers +KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) +endif + +# clang sets -fmerge-all-constants by default as optimization, but this +# is non-conforming behavior for C and in fact breaks the kernel, so we +# need to disable it here generally. +KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants) + +# for gcc -fno-merge-all-constants disables everything, but it is fine +# to have actual conforming behavior enabled. +KBUILD_CFLAGS += $(call cc-option,-fmerge-constants) + +# Make sure -fstack-check isn't enabled (like gentoo apparently did) +KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) + +# conserve stack if available +KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) + +# Prohibit date/time macros, which would make the build non-deterministic +KBUILD_CFLAGS += $(call cc-option,-Werror=date-time) + +# enforce correct pointer usage +KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) + +# Require designated initializers for all marked structures +KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) + +# change __FILE__ to the relative path from the srctree +KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) + +# ensure -fcf-protection is disabled when using retpoline as it is +# incompatible with -mindirect-branch=thunk-extern +ifdef CONFIG_RETPOLINE +KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) +endif + +include scripts/Makefile.kasan +include scripts/Makefile.extrawarn +include scripts/Makefile.ubsan + +# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments +KBUILD_CPPFLAGS += $(KCPPFLAGS) +KBUILD_AFLAGS += $(KAFLAGS) +KBUILD_CFLAGS += $(KCFLAGS) + +KBUILD_LDFLAGS_MODULE += --build-id +LDFLAGS_vmlinux += --build-id + +ifeq ($(CONFIG_STRIP_ASM_SYMS),y) +LDFLAGS_vmlinux += $(call ld-option, -X,) +endif + +ifeq ($(CONFIG_RELR),y) +LDFLAGS_vmlinux += --pack-dyn-relocs=relr +endif + +# make the checker run with the right architecture +CHECKFLAGS += --arch=$(ARCH) + +# insure the checker run with the right endianness +CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian) + +# the checker needs the correct machine size +CHECKFLAGS += $(if $(CONFIG_64BIT),-m64,-m32) + +# Default kernel image to build when no specific target is given. +# KBUILD_IMAGE may be overruled on the command line or +# set in the environment +# Also any assignments in arch/$(ARCH)/Makefile take precedence over +# this default value +export KBUILD_IMAGE ?= vmlinux + +# +# INSTALL_PATH specifies where to place the updated kernel and system map +# images. Default is /boot, but you can set it to other values +export INSTALL_PATH ?= /boot + +# +# INSTALL_DTBS_PATH specifies a prefix for relocations required by build roots. +# Like INSTALL_MOD_PATH, it isn't defined in the Makefile, but can be passed as +# an argument if needed. Otherwise it defaults to the kernel install path +# +export INSTALL_DTBS_PATH ?= $(INSTALL_PATH)/dtbs/$(KERNELRELEASE) + +# +# INSTALL_MOD_PATH specifies a prefix to MODLIB for module directory +# relocations required by build roots. This is not defined in the +# makefile but the argument can be passed to make if needed. +# + +MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE) +export MODLIB + +# +# INSTALL_MOD_STRIP, if defined, will cause modules to be +# stripped after they are installed. If INSTALL_MOD_STRIP is '1', then +# the default option --strip-debug will be used. Otherwise, +# INSTALL_MOD_STRIP value will be used as the options to the strip command. + +ifdef INSTALL_MOD_STRIP +ifeq ($(INSTALL_MOD_STRIP),1) +mod_strip_cmd = $(STRIP) --strip-debug +else +mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP) +endif # INSTALL_MOD_STRIP=1 +else +mod_strip_cmd = true +endif # INSTALL_MOD_STRIP +export mod_strip_cmd + +# CONFIG_MODULE_COMPRESS, if defined, will cause module to be compressed +# after they are installed in agreement with CONFIG_MODULE_COMPRESS_GZIP +# or CONFIG_MODULE_COMPRESS_XZ. + +mod_compress_cmd = true +ifdef CONFIG_MODULE_COMPRESS + ifdef CONFIG_MODULE_COMPRESS_GZIP + mod_compress_cmd = gzip -n -f + endif # CONFIG_MODULE_COMPRESS_GZIP + ifdef CONFIG_MODULE_COMPRESS_XZ + mod_compress_cmd = xz -f + endif # CONFIG_MODULE_COMPRESS_XZ +endif # CONFIG_MODULE_COMPRESS +export mod_compress_cmd + +ifdef CONFIG_MODULE_SIG_ALL +$(eval $(call config_filename,MODULE_SIG_KEY)) + +mod_sign_cmd = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY) certs/signing_key.x509 +else +mod_sign_cmd = true +endif +export mod_sign_cmd + +HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) + +ifdef CONFIG_STACK_VALIDATION + has_libelf := $(call try-run,\ + echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) + ifeq ($(has_libelf),1) + objtool_target := tools/objtool FORCE + else + SKIP_STACK_VALIDATION := 1 + export SKIP_STACK_VALIDATION + endif +endif + +PHONY += prepare0 + +export MODORDER := $(extmod-prefix)modules.order + +ifeq ($(KBUILD_EXTMOD),) +core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ ltt/ + +vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ + $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ + $(net-y) $(net-m) $(libs-y) $(libs-m) $(virt-y))) + +vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \ + $(patsubst %/,%,$(filter %/, $(init-) $(core-) \ + $(drivers-) $(net-) $(libs-) $(virt-)))) + +build-dirs := $(vmlinux-dirs) +clean-dirs := $(vmlinux-alldirs) + +init-y := $(patsubst %/, %/built-in.a, $(init-y)) +core-y := $(patsubst %/, %/built-in.a, $(core-y)) +drivers-y := $(patsubst %/, %/built-in.a, $(drivers-y)) +net-y := $(patsubst %/, %/built-in.a, $(net-y)) +libs-y1 := $(patsubst %/, %/lib.a, $(libs-y)) +libs-y2 := $(patsubst %/, %/built-in.a, $(filter-out %.a, $(libs-y))) +virt-y := $(patsubst %/, %/built-in.a, $(virt-y)) + +# Externally visible symbols (used by link-vmlinux.sh) +export KBUILD_VMLINUX_OBJS := $(head-y) $(init-y) $(core-y) $(libs-y2) \ + $(drivers-y) $(net-y) $(virt-y) +export KBUILD_VMLINUX_LIBS := $(libs-y1) +export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds +export LDFLAGS_vmlinux +# used by scripts/Makefile.package +export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) LICENSES arch include scripts tools) + +vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS) + +# Recurse until adjust_autoksyms.sh is satisfied +PHONY += autoksyms_recursive +ifdef CONFIG_TRIM_UNUSED_KSYMS +autoksyms_recursive: descend modules.order + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \ + "$(MAKE) -f $(srctree)/Makefile vmlinux" +endif + +# For the kernel to actually contain only the needed exported symbols, +# we have to build modules as well to determine what those symbols are. +# (this can be evaluated only once include/config/auto.conf has been included) +ifdef CONFIG_TRIM_UNUSED_KSYMS + KBUILD_MODULES := 1 +endif + +autoksyms_h := $(if $(CONFIG_TRIM_UNUSED_KSYMS), include/generated/autoksyms.h) + +$(autoksyms_h): + $(Q)mkdir -p $(dir $@) + $(Q)touch $@ + +ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink) + +# Final link of vmlinux with optional arch pass after final link +cmd_link-vmlinux = \ + $(CONFIG_SHELL) $< $(LD) $(KBUILD_LDFLAGS) $(LDFLAGS_vmlinux) ; \ + $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true) + +vmlinux: scripts/link-vmlinux.sh autoksyms_recursive $(vmlinux-deps) FORCE + +$(call if_changed,link-vmlinux) + +targets := vmlinux + +# The actual objects are generated when descending, +# make sure no implicit rule kicks in +$(sort $(vmlinux-deps)): descend ; + +filechk_kernel.release = \ + echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))" + +# Store (new) KERNELRELEASE string in include/config/kernel.release +include/config/kernel.release: FORCE + $(call filechk,kernel.release) + +# Additional helpers built in scripts/ +# Carefully list dependencies so we do not try to build scripts twice +# in parallel +PHONY += scripts +scripts: scripts_basic scripts_dtc + $(Q)$(MAKE) $(build)=$(@) + +# Things we need to do before we recursively start building the kernel +# or the modules are listed in "prepare". +# A multi level approach is used. prepareN is processed before prepareN-1. +# archprepare is used in arch Makefiles and when processed asm symlink, +# version.h and scripts_basic is processed / created. + +PHONY += prepare archprepare + +archprepare: outputmakefile archheaders archscripts scripts include/config/kernel.release \ + asm-generic $(version_h) $(autoksyms_h) include/generated/utsrelease.h + +prepare0: archprepare + $(Q)$(MAKE) $(build)=scripts/mod + $(Q)$(MAKE) $(build)=. + +# All the preparing.. +prepare: prepare0 prepare-objtool + +# Support for using generic headers in asm-generic +asm-generic := -f $(srctree)/scripts/Makefile.asm-generic obj + +PHONY += asm-generic uapi-asm-generic +asm-generic: uapi-asm-generic + $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/asm \ + generic=include/asm-generic +uapi-asm-generic: + $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/uapi/asm \ + generic=include/uapi/asm-generic + +PHONY += prepare-objtool +prepare-objtool: $(objtool_target) +ifeq ($(SKIP_STACK_VALIDATION),1) +ifdef CONFIG_UNWINDER_ORC + @echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 + @false +else + @echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 +endif +endif + +# Generate some files +# --------------------------------------------------------------------------- + +# KERNELRELEASE can change from a few different places, meaning version.h +# needs to be updated, so this check is forced on all builds + +uts_len := 64 +define filechk_utsrelease.h + if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \ + echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \ + exit 1; \ + fi; \ + echo \#define UTS_RELEASE \"$(KERNELRELEASE)\" +endef + +define filechk_version.h + echo \#define LINUX_VERSION_CODE $(shell \ + expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \ + echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))' +endef + +$(version_h): FORCE + $(call filechk,version.h) + $(Q)rm -f $(old_version_h) + +include/generated/utsrelease.h: include/config/kernel.release FORCE + $(call filechk,utsrelease.h) + +PHONY += headerdep +headerdep: + $(Q)find $(srctree)/include/ -name '*.h' | xargs --max-args 1 \ + $(srctree)/scripts/headerdep.pl -I$(srctree)/include + +# --------------------------------------------------------------------------- +# Kernel headers + +#Default location for installed headers +export INSTALL_HDR_PATH = $(objtree)/usr + +quiet_cmd_headers_install = INSTALL $(INSTALL_HDR_PATH)/include + cmd_headers_install = \ + mkdir -p $(INSTALL_HDR_PATH); \ + rsync -mrl --include='*/' --include='*\.h' --exclude='*' \ + usr/include $(INSTALL_HDR_PATH) + +PHONY += headers_install +headers_install: headers + $(call cmd,headers_install) + +PHONY += archheaders archscripts + +hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj + +PHONY += headers +headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts + $(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \ + $(error Headers not exportable for the $(SRCARCH) architecture)) + $(Q)$(MAKE) $(hdr-inst)=include/uapi + $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi + +# Deprecated. It is no-op now. +PHONY += headers_check +headers_check: + @: + +ifdef CONFIG_HEADERS_INSTALL +prepare: headers +endif + +PHONY += scripts_unifdef +scripts_unifdef: scripts_basic + $(Q)$(MAKE) $(build)=scripts scripts/unifdef + +# --------------------------------------------------------------------------- +# Kernel selftest + +PHONY += kselftest +kselftest: + $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests run_tests + +kselftest-%: FORCE + $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests $* + +PHONY += kselftest-merge +kselftest-merge: + $(if $(wildcard $(objtree)/.config),, $(error No .config exists, config your kernel first!)) + $(Q)find $(srctree)/tools/testing/selftests -name config | \ + xargs $(srctree)/scripts/kconfig/merge_config.sh -m $(objtree)/.config + $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig + +# --------------------------------------------------------------------------- +# Devicetree files + +ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/boot/dts/),) +dtstree := arch/$(SRCARCH)/boot/dts +endif + +ifneq ($(dtstree),) + +%.dtb: include/config/kernel.release scripts_dtc + $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ + +PHONY += dtbs dtbs_install dtbs_check +dtbs: include/config/kernel.release scripts_dtc + $(Q)$(MAKE) $(build)=$(dtstree) + +ifneq ($(filter dtbs_check, $(MAKECMDGOALS)),) +dtbs: dt_binding_check +endif + +dtbs_check: export CHECK_DTBS=1 +dtbs_check: dtbs + +dtbs_install: + $(Q)$(MAKE) $(dtbinst)=$(dtstree) + +ifdef CONFIG_OF_EARLY_FLATTREE +all: dtbs +endif + +endif + +PHONY += scripts_dtc +scripts_dtc: scripts_basic + $(Q)$(MAKE) $(build)=scripts/dtc + +PHONY += dt_binding_check +dt_binding_check: scripts_dtc + $(Q)$(MAKE) $(build)=Documentation/devicetree/bindings + +# --------------------------------------------------------------------------- +# Modules + +ifdef CONFIG_MODULES + +# By default, build modules as well + +all: modules + +# When we're building modules with modversions, we need to consider +# the built-in objects during the descend as well, in order to +# make sure the checksums are up to date before we record them. +ifdef CONFIG_MODVERSIONS + KBUILD_BUILTIN := 1 +endif + +# Build modules +# +# A module can be listed more than once in obj-m resulting in +# duplicate lines in modules.order files. Those are removed +# using awk while concatenating to the final file. + +PHONY += modules +modules: $(if $(KBUILD_BUILTIN),vmlinux) modules.order modules.builtin + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/modules-check.sh + +modules.order: descend + $(Q)$(AWK) '!x[$$0]++' $(addsuffix /$@, $(build-dirs)) > $@ + +modbuiltin-dirs := $(addprefix _modbuiltin_, $(build-dirs)) + +modules.builtin: $(modbuiltin-dirs) + $(Q)$(AWK) '!x[$$0]++' $(addsuffix /$@, $(build-dirs)) > $@ + +PHONY += $(modbuiltin-dirs) +# tristate.conf is not included from this Makefile. Add it as a prerequisite +# here to make it self-healing in case somebody accidentally removes it. +$(modbuiltin-dirs): include/config/tristate.conf + $(Q)$(MAKE) $(modbuiltin)=$(patsubst _modbuiltin_%,%,$@) + +# Target to prepare building external modules +PHONY += modules_prepare +modules_prepare: prepare + +# Target to install modules +PHONY += modules_install +modules_install: _modinst_ _modinst_post + +PHONY += _modinst_ +_modinst_: + @rm -rf $(MODLIB)/kernel + @rm -f $(MODLIB)/source + @mkdir -p $(MODLIB)/kernel + @ln -s $(abspath $(srctree)) $(MODLIB)/source + @if [ ! $(objtree) -ef $(MODLIB)/build ]; then \ + rm -f $(MODLIB)/build ; \ + ln -s $(CURDIR) $(MODLIB)/build ; \ + fi + @sed 's:^:kernel/:' modules.order > $(MODLIB)/modules.order + @sed 's:^:kernel/:' modules.builtin > $(MODLIB)/modules.builtin + @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/ + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst + +# This depmod is only for convenience to give the initial +# boot a modules.dep even before / is mounted read-write. However the +# boot script depmod is the master version. +PHONY += _modinst_post +_modinst_post: _modinst_ + $(call cmd,depmod) + +ifeq ($(CONFIG_MODULE_SIG), y) +PHONY += modules_sign +modules_sign: + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modsign +endif + +else # CONFIG_MODULES + +# Modules not configured +# --------------------------------------------------------------------------- + +PHONY += modules modules_install +modules modules_install: + @echo >&2 + @echo >&2 "The present kernel configuration has modules disabled." + @echo >&2 "Type 'make config' and enable loadable module support." + @echo >&2 "Then build a kernel with module support enabled." + @echo >&2 + @exit 1 + +endif # CONFIG_MODULES + +### +# Cleaning is done on three levels. +# make clean Delete most generated files +# Leave enough to build external modules +# make mrproper Delete the current configuration, and all generated files +# make distclean Remove editor backup files, patch leftover files and the like + +# Directories & files removed with 'make clean' +CLEAN_DIRS += include/ksym +CLEAN_FILES += modules.builtin.modinfo + +# Directories & files removed with 'make mrproper' +MRPROPER_DIRS += include/config include/generated \ + arch/$(SRCARCH)/include/generated .tmp_objdiff \ + debian/ snap/ tar-install/ +MRPROPER_FILES += .config .config.old .version \ + Module.symvers \ + signing_key.pem signing_key.priv signing_key.x509 \ + x509.genkey extra_certificates signing_key.x509.keyid \ + signing_key.x509.signer vmlinux-gdb.py \ + *.spec + +# Directories & files removed with 'make distclean' +DISTCLEAN_DIRS += +DISTCLEAN_FILES += tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS + +# clean - Delete most, but leave enough to build external modules +# +clean: rm-dirs := $(CLEAN_DIRS) +clean: rm-files := $(CLEAN_FILES) + +PHONY += archclean vmlinuxclean + +vmlinuxclean: + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean + $(Q)$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) clean) + +clean: archclean vmlinuxclean + +# mrproper - Delete all generated files, including .config +# +mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS)) +mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) +mrproper-dirs := $(addprefix _mrproper_,scripts) + +PHONY += $(mrproper-dirs) mrproper +$(mrproper-dirs): + $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) + +mrproper: clean $(mrproper-dirs) + $(call cmd,rmdirs) + $(call cmd,rmfiles) + +# distclean +# +distclean: rm-dirs := $(wildcard $(DISTCLEAN_DIRS)) +distclean: rm-files := $(wildcard $(DISTCLEAN_FILES)) + +PHONY += distclean + +distclean: mrproper + $(call cmd,rmdirs) + $(call cmd,rmfiles) + @find $(srctree) $(RCS_FIND_IGNORE) \ + \( -name '*.orig' -o -name '*.rej' -o -name '*~' \ + -o -name '*.bak' -o -name '#*#' -o -name '*%' \ + -o -name 'core' \) \ + -type f -print | xargs rm -f + + +# Packaging of the kernel to various formats +# --------------------------------------------------------------------------- + +%src-pkg: FORCE + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.package $@ +%pkg: include/config/kernel.release FORCE + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.package $@ + +# Brief documentation of the typical targets used +# --------------------------------------------------------------------------- + +boards := $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*_defconfig) +boards := $(sort $(notdir $(boards))) +board-dirs := $(dir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*/*_defconfig)) +board-dirs := $(sort $(notdir $(board-dirs:/=))) + +PHONY += help +help: + @echo 'Cleaning targets:' + @echo ' clean - Remove most generated files but keep the config and' + @echo ' enough build support to build external modules' + @echo ' mrproper - Remove all generated files + config + various backup files' + @echo ' distclean - mrproper + remove editor backup and patch files' + @echo '' + @echo 'Configuration targets:' + @$(MAKE) -f $(srctree)/scripts/kconfig/Makefile help + @echo '' + @echo 'Other generic targets:' + @echo ' all - Build all targets marked with [*]' + @echo '* vmlinux - Build the bare kernel' + @echo '* modules - Build all modules' + @echo ' modules_install - Install all modules to INSTALL_MOD_PATH (default: /)' + @echo ' dir/ - Build all files in dir and below' + @echo ' dir/file.[ois] - Build specified target only' + @echo ' dir/file.ll - Build the LLVM assembly file' + @echo ' (requires compiler support for LLVM assembly generation)' + @echo ' dir/file.lst - Build specified mixed source/assembly target only' + @echo ' (requires a recent binutils and recent build (System.map))' + @echo ' dir/file.ko - Build module including final link' + @echo ' modules_prepare - Set up for building external modules' + @echo ' tags/TAGS - Generate tags file for editors' + @echo ' cscope - Generate cscope index' + @echo ' gtags - Generate GNU GLOBAL index' + @echo ' kernelrelease - Output the release version string (use with make -s)' + @echo ' kernelversion - Output the version stored in Makefile (use with make -s)' + @echo ' image_name - Output the image name (use with make -s)' + @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \ + echo ' (default: $(INSTALL_HDR_PATH))'; \ + echo '' + @echo 'Static analysers:' + @echo ' checkstack - Generate a list of stack hogs' + @echo ' namespacecheck - Name space analysis on compiled kernel' + @echo ' versioncheck - Sanity check on version.h usage' + @echo ' includecheck - Check for duplicate included header files' + @echo ' export_report - List the usages of all exported symbols' + @echo ' headerdep - Detect inclusion cycles in headers' + @echo ' coccicheck - Check with Coccinelle' + @echo '' + @echo 'Tools:' + @echo ' nsdeps - Generate missing symbol namespace dependencies' + @echo '' + @echo 'Kernel selftest:' + @echo ' kselftest - Build and run kernel selftest (run as root)' + @echo ' Build, install, and boot kernel before' + @echo ' running kselftest on it' + @echo ' kselftest-clean - Remove all generated kselftest files' + @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existing' + @echo ' .config.' + @echo '' + @$(if $(dtstree), \ + echo 'Devicetree:'; \ + echo '* dtbs - Build device tree blobs for enabled boards'; \ + echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'; \ + echo ' dt_binding_check - Validate device tree binding documents'; \ + echo ' dtbs_check - Validate device tree source files';\ + echo '') + + @echo 'Userspace tools targets:' + @echo ' use "make tools/help"' + @echo ' or "cd tools; make help"' + @echo '' + @echo 'Kernel packaging:' + @$(MAKE) -f $(srctree)/scripts/Makefile.package help + @echo '' + @echo 'Documentation targets:' + @$(MAKE) -f $(srctree)/Documentation/Makefile dochelp + @echo '' + @echo 'Architecture specific targets ($(SRCARCH)):' + @$(if $(archhelp),$(archhelp),\ + echo ' No architecture specific help defined for $(SRCARCH)') + @echo '' + @$(if $(boards), \ + $(foreach b, $(boards), \ + printf " %-24s - Build for %s\\n" $(b) $(subst _defconfig,,$(b));) \ + echo '') + @$(if $(board-dirs), \ + $(foreach b, $(board-dirs), \ + printf " %-16s - Show %s-specific targets\\n" help-$(b) $(b);) \ + printf " %-16s - Show all of the above\\n" help-boards; \ + echo '') + + @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build' + @echo ' make V=2 [targets] 2 => give reason for rebuild of target' + @echo ' make O=dir [targets] Locate all output files in "dir", including .config' + @echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)' + @echo ' make C=2 [targets] Force check of all c source with $$CHECK' + @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' + @echo ' make W=n [targets] Enable extra build checks, n=1,2,3 where' + @echo ' 1: warnings which may be relevant and do not occur too often' + @echo ' 2: warnings which occur quite often but may still be relevant' + @echo ' 3: more obscure warnings, can most likely be ignored' + @echo ' Multiple levels can be combined with W=12 or W=123' + @echo '' + @echo 'Execute "make" or "make all" to build all targets marked with [*] ' + @echo 'For further info see the ./README file' + + +help-board-dirs := $(addprefix help-,$(board-dirs)) + +help-boards: $(help-board-dirs) + +boards-per-dir = $(sort $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig))) + +$(help-board-dirs): help-%: + @echo 'Architecture specific targets ($(SRCARCH) $*):' + @$(if $(boards-per-dir), \ + $(foreach b, $(boards-per-dir), \ + printf " %-24s - Build for %s\\n" $*/$(b) $(subst _defconfig,,$(b));) \ + echo '') + + +# Documentation targets +# --------------------------------------------------------------------------- +DOC_TARGETS := xmldocs latexdocs pdfdocs htmldocs epubdocs cleandocs \ + linkcheckdocs dochelp refcheckdocs +PHONY += $(DOC_TARGETS) +$(DOC_TARGETS): + $(Q)$(MAKE) $(build)=Documentation $@ + +# Misc +# --------------------------------------------------------------------------- + +PHONY += scripts_gdb +scripts_gdb: prepare0 + $(Q)$(MAKE) $(build)=scripts/gdb + $(Q)ln -fsn $(abspath $(srctree)/scripts/gdb/vmlinux-gdb.py) + +ifdef CONFIG_GDB_SCRIPTS +all: scripts_gdb +endif + +else # KBUILD_EXTMOD + +### +# External module support. +# When building external modules the kernel used as basis is considered +# read-only, and no consistency checks are made and the make +# system is not used on the basis kernel. If updates are required +# in the basis kernel ordinary make commands (without M=...) must +# be used. +# +# The following are the only valid targets when building external +# modules. +# make M=dir clean Delete all automatically generated files +# make M=dir modules Make all modules in specified dir +# make M=dir Same as 'make M=dir modules' +# make M=dir modules_install +# Install the modules built in the module directory +# Assumes install directory is already created + +# We are always building modules +KBUILD_MODULES := 1 + +PHONY += $(objtree)/Module.symvers +$(objtree)/Module.symvers: + @test -e $(objtree)/Module.symvers || ( \ + echo; \ + echo " WARNING: Symbol version dump $(objtree)/Module.symvers"; \ + echo " is missing; modules will have no dependencies and modversions."; \ + echo ) + +build-dirs := $(KBUILD_EXTMOD) +PHONY += modules +modules: descend $(objtree)/Module.symvers + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost + +PHONY += modules_install +modules_install: _emodinst_ _emodinst_post + +install-dir := $(if $(INSTALL_MOD_DIR),$(INSTALL_MOD_DIR),extra) +PHONY += _emodinst_ +_emodinst_: + $(Q)mkdir -p $(MODLIB)/$(install-dir) + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst + +PHONY += _emodinst_post +_emodinst_post: _emodinst_ + $(call cmd,depmod) + +clean-dirs := $(KBUILD_EXTMOD) +clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers + +PHONY += / +/: + @echo >&2 '"$(MAKE) /" is no longer supported. Please use "$(MAKE) ./" instead.' + +PHONY += help +help: + @echo ' Building external modules.' + @echo ' Syntax: make -C path/to/kernel/src M=$$PWD target' + @echo '' + @echo ' modules - default target, build the module(s)' + @echo ' modules_install - install the module' + @echo ' clean - remove generated files in module directory only' + @echo '' + +PHONY += prepare +endif # KBUILD_EXTMOD + +# Single targets +# --------------------------------------------------------------------------- +# To build individual files in subdirectories, you can do like this: +# +# make foo/bar/baz.s +# +# The supported suffixes for single-target are listed in 'single-targets' +# +# To build only under specific subdirectories, you can do like this: +# +# make foo/bar/baz/ + +ifdef single-build + +# .ko is special because modpost is needed +single-ko := $(sort $(filter %.ko, $(MAKECMDGOALS))) +single-no-ko := $(sort $(patsubst %.ko,%.mod, $(MAKECMDGOALS))) + +$(single-ko): single_modpost + @: +$(single-no-ko): descend + @: + +ifeq ($(KBUILD_EXTMOD),) +# For the single build of in-tree modules, use a temporary file to avoid +# the situation of modules_install installing an invalid modules.order. +MODORDER := .modules.tmp +endif + +PHONY += single_modpost +single_modpost: $(single-no-ko) + $(Q){ $(foreach m, $(single-ko), echo $(extmod-prefix)$m;) } > $(MODORDER) + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost + +KBUILD_MODULES := 1 + +export KBUILD_SINGLE_TARGETS := $(addprefix $(extmod-prefix), $(single-no-ko)) + +# trim unrelated directories +build-dirs := $(foreach d, $(build-dirs), \ + $(if $(filter $(d)/%, $(KBUILD_SINGLE_TARGETS)), $(d))) + +endif + +# Handle descending into subdirectories listed in $(build-dirs) +# Preset locale variables to speed up the build process. Limit locale +# tweaks to this spot to avoid wrong language settings when running +# make menuconfig etc. +# Error messages still appears in the original language +PHONY += descend $(build-dirs) +descend: $(build-dirs) +$(build-dirs): prepare + $(Q)$(MAKE) $(build)=$@ \ + single-build=$(if $(filter-out $@/, $(single-no-ko)),1) \ + need-builtin=1 need-modorder=1 + +clean-dirs := $(addprefix _clean_, $(clean-dirs)) +PHONY += $(clean-dirs) clean +$(clean-dirs): + $(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@) + +clean: $(clean-dirs) + $(call cmd,rmdirs) + $(call cmd,rmfiles) + @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \ + \( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \ + -o -name '*.ko.*' \ + -o -name '*.dtb' -o -name '*.dtb.S' -o -name '*.dt.yaml' \ + -o -name '*.dwo' -o -name '*.lst' \ + -o -name '*.su' -o -name '*.mod' -o -name '*.ns_deps' \ + -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \ + -o -name '*.lex.c' -o -name '*.tab.[ch]' \ + -o -name '*.asn1.[ch]' \ + -o -name '*.symtypes' -o -name 'modules.order' \ + -o -name modules.builtin -o -name '.tmp_*.o.*' \ + -o -name '*.c.[012]*.*' \ + -o -name '*.ll' \ + -o -name '*.gcno' \) -type f -print | xargs rm -f + +# Generate tags for editors +# --------------------------------------------------------------------------- +quiet_cmd_tags = GEN $@ + cmd_tags = $(BASH) $(srctree)/scripts/tags.sh $@ + +tags TAGS cscope gtags: FORCE + $(call cmd,tags) + +# Script to generate missing namespace dependencies +# --------------------------------------------------------------------------- + +PHONY += nsdeps + +nsdeps: modules + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost nsdeps + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/$@ + +# Scripts to check various things for consistency +# --------------------------------------------------------------------------- + +PHONY += includecheck versioncheck coccicheck namespacecheck export_report + +includecheck: + find $(srctree)/* $(RCS_FIND_IGNORE) \ + -name '*.[hcS]' -type f -print | sort \ + | xargs $(PERL) -w $(srctree)/scripts/checkincludes.pl + +versioncheck: + find $(srctree)/* $(RCS_FIND_IGNORE) \ + -name '*.[hcS]' -type f -print | sort \ + | xargs $(PERL) -w $(srctree)/scripts/checkversion.pl + +coccicheck: + $(Q)$(BASH) $(srctree)/scripts/$@ + +namespacecheck: + $(PERL) $(srctree)/scripts/namespace.pl + +export_report: + $(PERL) $(srctree)/scripts/export_report.pl + +PHONY += checkstack kernelrelease kernelversion image_name + +# UML needs a little special treatment here. It wants to use the host +# toolchain, so needs $(SUBARCH) passed to checkstack.pl. Everyone +# else wants $(ARCH), including people doing cross-builds, which means +# that $(SUBARCH) doesn't work here. +ifeq ($(ARCH), um) +CHECKSTACK_ARCH := $(SUBARCH) +else +CHECKSTACK_ARCH := $(ARCH) +endif +checkstack: + $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \ + $(PERL) $(srctree)/scripts/checkstack.pl $(CHECKSTACK_ARCH) + +kernelrelease: + @echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))" + +kernelversion: + @echo $(KERNELVERSION) + +image_name: + @echo $(KBUILD_IMAGE) + +# Clear a bunch of variables before executing the submake + +ifeq ($(quiet),silent_) +tools_silent=s +endif + +tools/: FORCE + $(Q)mkdir -p $(objtree)/tools + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ + +tools/%: FORCE + $(Q)mkdir -p $(objtree)/tools + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $* + +# FIXME Should go into a make.lib or something +# =========================================================================== + +quiet_cmd_rmdirs = $(if $(wildcard $(rm-dirs)),CLEAN $(wildcard $(rm-dirs))) + cmd_rmdirs = rm -rf $(rm-dirs) + +quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))) + cmd_rmfiles = rm -f $(rm-files) + +# Run depmod only if we have System.map and depmod is executable +quiet_cmd_depmod = DEPMOD $(KERNELRELEASE) + cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \ + $(KERNELRELEASE) + +# read saved command lines for existing targets +existing-targets := $(wildcard $(sort $(targets))) + +-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd) + +endif # config-targets +endif # mixed-build +endif # need-sub-make + +PHONY += FORCE +FORCE: + +# Declare the contents of the PHONY variable as phony. We keep that +# information in a variable so we can use it in if_changed and friends. +.PHONY: $(PHONY) diff --git a/arch/e2k/Makefile b/arch/e2k/Makefile new file mode 100644 index 0000000..8c2470d --- /dev/null +++ b/arch/e2k/Makefile @@ -0,0 +1,252 @@ +# e2k/Makefile +# +# This file is included by the global makefile so that you can add your own +# architecture-specific flags and dependencies. Remember to do have actions +# for "archclean" and "archdep" for cleaning up and making dependencies for +# this architecture +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. + +KBUILD_DEFCONFIG ?= defconfig + +AS = $(shell $(CC) -print-prog-name=as) +OBJDUMP = $(shell $(CC) -print-prog-name=objdump) +LD = $(shell $(CC) -print-prog-name=ld) +OBJCOPY = $(shell $(CC) -print-prog-name=objcopy) + +KBUILD_CFLAGS += -fkernel -gline -masm-inline $(call cc-option,-fforbid-fp) \ + $(call cc-option,-fmax-errors=5) + +ifeq ($(PROFILE_GENERATE), 1) +KBUILD_CFLAGS += -fprofile-generate-kernel +endif +ifeq ($(origin PROFILE_USE), undefined) +else +KBUILD_CFLAGS += -fprofile-use="$(PROFILE_USE)" +endif + +KBUILD_CFLAGS += $(call cc-option,-finline-functions,) \ + $(call cc-option,-finline-functions-called-once,) + +# Some uninteresting or broken warnings can be disabled with #pragma's only +KBUILD_CFLAGS += -Wno-array-bounds -Wno-duplicate-type-qualifier \ + -Wno-builtin-functions-redefined -Wno-reduced-alignment \ + -Wno-unused-value -Wno-overflow -Wno-signed-one-bit-field \ + -include $(srctree)/arch/e2k/include/asm/override-lcc-warnings.h + +LDFLAGS_vmlinux := +CHECKFLAGS += -D__e2k__ + +CFLAGS += -pipe -D__linux__ + +KBUILD_CFLAGS += $(CFLAGS) + +ifdef CONFIG_SMP_DAM_BUG + KBUILD_CFLAGS += -fno-dam-call +endif + +CFLAGS_GENERIC := -march=elbrus-v2 +CFLAGS_ES2 := -mtune=elbrus-2c+ +CFLAGS_E2S := -mtune=elbrus-4c +CFLAGS_E8C := -mtune=elbrus-8c +CFLAGS_E1CP := -mtune=elbrus-1c+ +CFLAGS_E8C2 := -mtune=elbrus-8c2 +CFLAGS_E12C := -mtune=elbrus-12c +CFLAGS_E16C := -mtune=elbrus-16c +CFLAGS_E2C3 := -mtune=elbrus-2c3 + +CFLAGS_ALL_CPUS := $(CFLAGS_ES2) $(CFLAGS_E2S) $(CFLAGS_E8C) $(CFLAGS_E1CP) \ + $(CFLAGS_E8C2) $(CFLAGS_E12C) $(CFLAGS_E16C) $(CFLAGS_E2C3) +export CFLAGS_ALL_CPUS + +CFLAGS_E2K_SIC := $(CFLAGS_ES2) + +export CFLAGS_ES2 CFLAGS_E2S CFLAGS_E8C CFLAGS_E1CP CFLAGS_E8C2 CFLAGS_E2C3 \ + CFLAGS_E12C CFLAGS_E16C CFLAGS_E2K_SIC + +ifeq ($(CONFIG_E2K_MACHINE),y) + ifeq ($(CONFIG_E2K_ES2_DSP),y) + KBUILD_CFLAGS += $(CFLAGS_ES2) + KBUILD_AFLAGS += $(CFLAGS_ES2) + TARGET_MDL := 04 + else + ifeq ($(CONFIG_E2K_ES2_RU),y) + KBUILD_CFLAGS += $(CFLAGS_ES2) + KBUILD_AFLAGS += $(CFLAGS_ES2) + TARGET_MDL := 06 + else + ifeq ($(CONFIG_E2K_E2S),y) + KBUILD_CFLAGS += $(CFLAGS_E2S) + KBUILD_AFLAGS += $(CFLAGS_E2S) + TARGET_MDL := 03 + else + ifeq ($(CONFIG_E2K_E8C),y) + KBUILD_CFLAGS += $(CFLAGS_E8C) + KBUILD_AFLAGS += $(CFLAGS_E8C) + TARGET_MDL := 07 + else + ifeq ($(CONFIG_E2K_E1CP),y) + KBUILD_CFLAGS += $(CFLAGS_E1CP) + KBUILD_AFLAGS += $(CFLAGS_E1CP) + TARGET_MDL := 08 + else + ifeq ($(CONFIG_E2K_E8C2),y) + KBUILD_CFLAGS += $(CFLAGS_E8C2) + KBUILD_AFLAGS += $(CFLAGS_E8C2) + TARGET_MDL := 09 + else + ifeq ($(CONFIG_E2K_E12C),y) + KBUILD_CFLAGS += $(CFLAGS_E12C) + KBUILD_AFLAGS += $(CFLAGS_E12C) + TARGET_MDL := 0a + else + ifeq ($(CONFIG_E2K_E16C),y) + KBUILD_CFLAGS += $(CFLAGS_E16C) + KBUILD_AFLAGS += $(CFLAGS_E16C) + TARGET_MDL := 0b + else + ifeq ($(CONFIG_E2K_E2C3),y) + KBUILD_CFLAGS += $(CFLAGS_E2C3) + KBUILD_AFLAGS += $(CFLAGS_E2C3) + TARGET_MDL := 0c + else + error "Invalid e2k machine type" + endif # ifeq ($(CONFIG_E2K_E2C3),y) + endif # ifeq ($(CONFIG_E2K_E16C),y) + endif # ifeq ($(CONFIG_E2K_E12C),y) + endif # ifeq ($(CONFIG_E2K_E8C2),y) + endif # ifeq ($(CONFIG_E2K_E1CP),y) + endif # ifeq ($(CONFIG_E2K_E8C),y) + endif # ifeq ($(CONFIG_E2K_E2S),y) + endif # ifeq ($(CONFIG_E2K_ES2_RU),y) + endif # ifeq ($(CONFIG_E2K_ES2_DSP),y) +else # ! ifeq ($(CONFIG_E2K_MACHINE),y) +KBUILD_CFLAGS += $(CFLAGS_GENERIC) +KBUILD_AFLAGS += $(CFLAGS_GENERIC) +TARGET_MDL := 00 +endif + +KBUILD_LDFLAGS += --relaxed-e2k-machine-check +KBUILD_CFLAGS += $(cflags-y) + +libs-y += arch/e2k/lib/ +core-y += arch/e2k/kernel/ \ + arch/e2k/mm/ \ + arch/e2k/p2v/ \ + arch/e2k/fast_syscalls/ + +core-$(CONFIG_PROTECTED_MODE) += arch/e2k/3p/ + +drivers-$(CONFIG_PCI) += arch/e2k/pci/ + +# suspend and hibernation support +drivers-$(CONFIG_PM) += arch/e2k/power/ + +#KVM hypervisor and guest support +core-$(CONFIG_KVM) += arch/e2k/kvm/ +core-$(CONFIG_KVM_GUEST) += arch/e2k/kvm/guest/ + +# Elbrus common modules +core-y += arch/l/ +drivers-$(CONFIG_PCI) += arch/l/pci/ + +boot := arch/e2k/boot +all: es2boot + +MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot + +.PHONY: clean archclean archmrproper archdep bootimage image zImage + +es2boot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_ES2=y boot + +e2sboot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E2S=y boot + +e8cboot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E8C=y boot + +e1cpboot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E1CP=y boot + +e8c2boot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E8C2=y CONFIG_E8C=y boot + +e12cboot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E12C=y boot + +e16cboot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E16C=y boot + +e2c3boot: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_E2C3=y boot + +image: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_BOOT=y $(objtree)/image.boot + $(Q)echo "Target mdl: $(TARGET_MDL)"; \ + echo $(TARGET_MDL) | \ + xxd -r -p | \ + dd of=$(objtree)/image.boot bs=1 seek=258 count=1 conv=notrunc 2>/dev/null; \ + echo 00000000 | xxd -r -p | \ + dd of=$(objtree)/image.boot bs=1 seek=1588 count=4 conv=notrunc 2>/dev/null; \ + e2k_kernel_csum=`cksum $(objtree)/image.boot | awk '{ printf "%08x\n", $$1 }'`; \ + echo "Kernel image check sum: $$e2k_kernel_csum"; \ + echo $$e2k_kernel_csum | \ + sed 's/\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)/\7\8\5\6\3\4\1\2/' | \ + xxd -r -p | \ + dd of=$(objtree)/image.boot bs=1 seek=1588 count=4 conv=notrunc 2>/dev/null; \ + echo 'Kernel: image.boot is ready' ' (#'`cat .version`')' + +zImage: vmlinux + $(Q)$(MAKE) $(build)=$(boot) CONFIG_BOOT=y $(objtree)/zImage + $(Q)echo "Target mdl: $(TARGET_MDL)"; \ + echo $(TARGET_MDL) | \ + xxd -r -p | \ + dd of=$(objtree)/zImage bs=1 seek=258 count=1 conv=notrunc 2>/dev/null; \ + echo 00000000 | xxd -r -p | \ + dd of=$(objtree)/zImage bs=1 seek=1588 count=4 conv=notrunc 2>/dev/null; \ + e2k_kernel_csum=`cksum $(objtree)/zImage | awk '{ printf "%08x\n", $$1 }'`; \ + echo "Kernel image check sum: $$e2k_kernel_csum"; \ + echo $$e2k_kernel_csum | \ + sed 's/\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)\(.\)/\7\8\5\6\3\4\1\2/' | \ + xxd -r -p | \ + dd of=$(objtree)/zImage bs=1 seek=1588 count=4 conv=notrunc 2>/dev/null; \ + echo 'Kernel: zImage is ready' ' (#'`cat .version`')' + +image.boot: bootimage +bootimage: image + +archclean: + $(Q)$(MAKE) $(clean)=arch/e2k/boot + +archmrproper: + +archdep: + @$(MAKEBOOT) dep + +install-headers: + @$(MAKEBOOT) install-headers + +install-includes: include/linux/version.h arch/e2k/include FORCE + $(CONFIG_SHELL) scripts/gen-osl-include -l $(srctree) -r $(ROOT_WA) + +build-install: FORCE + $(CONFIG_SHELL) scripts/gen-osl-build -l $(srctree) -m $(MODLIB) + +define archhelp + echo '* image/bootimage - Kernel boot image (image.boot)' + echo ' zImage - Compressed kernel boot image (image.boot)' + echo ' install-headers - Install kernel headers in ' + echo ' /usr/include' + echo ' es2boot - Build kernel boot image with small embedded boot for es2 simulator' + echo ' e2sboot - Build kernel boot image with small embedded boot for e2s simulator' + echo ' e8cboot - Build kernel boot image with small embedded boot for e8c simulator' + echo ' e1cpboot - Build kernel boot image with small embedded boot for e1cp simulator' + echo ' e8c2boot - Build kernel boot image with small embedded boot for e8c2 simulator' + echo ' e12cboot - Build kernel boot image with small embedded boot for e12c simulator' + echo ' e16cboot - Build kernel boot image with small embedded boot for e16c simulator' + echo ' e2c3boot - Build kernel boot image with small embedded boot for e2c3 simulator' + echo ' [with_kernel=1] - When building boot, build in compressed kernel into the boot image' +endef diff --git a/arch/e2k/include/asm-l/acenv.h b/arch/e2k/include/asm-l/acenv.h new file mode 100644 index 0000000..6993514 --- /dev/null +++ b/arch/e2k/include/asm-l/acenv.h @@ -0,0 +1,14 @@ + +#ifndef _ASM_L_ACENV_H_ +#define _ASM_L_ACENV_H_ + +int __acpi_acquire_global_lock(unsigned int *lock); +int __acpi_release_global_lock(unsigned int *lock); + +#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) + +#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_release_global_lock(&facs->global_lock)) + +#endif /* _ASM_L_ACENV_H_ */ diff --git a/arch/e2k/include/asm-l/acpi.h b/arch/e2k/include/asm-l/acpi.h new file mode 100644 index 0000000..cab294b --- /dev/null +++ b/arch/e2k/include/asm-l/acpi.h @@ -0,0 +1,137 @@ +#ifndef _ASM_L_ACPI_H +#define _ASM_L_ACPI_H + +/* + * Copyright (C) 2001 Paul Diefenbaugh + * Copyright (C) 2001 Patrick Mochel + * Copuright (C) 2012 Evgeny Kravtsunov + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include +#include + +#include + +#define COMPILER_DEPENDENT_INT64 long +#define COMPILER_DEPENDENT_UINT64 unsigned long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ + +#define ACPI_ASM_MACROS +#define BREAKPOINT3 +#define ACPI_DISABLE_IRQS() raw_local_irq_disable() +#define ACPI_ENABLE_IRQS() raw_local_irq_enable() + + + +#ifdef CONFIG_ACPI +#include +enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, + IDLE_POLL}; + +extern int acpi_lapic; +extern int acpi_ioapic; +extern int acpi_noirq; +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_ht; +extern int acpi_pci_disabled; +extern int acpi_skip_timer_override; +extern int acpi_use_timer_override; +extern int acpi_fix_pin2_polarity; + +extern u8 acpi_sci_flags; +extern int acpi_sci_override_gsi; +void acpi_pic_sci_set_trigger(unsigned int, u16); + +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_ht = 0; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); + +static inline void acpi_noirq_set(void) { acpi_noirq = 1; } +static inline void acpi_disable_pci(void) +{ + acpi_pci_disabled = 1; + acpi_noirq_set(); +} + +/* routines for saving/restoring kernel state */ +extern int acpi_save_state_mem(void); +extern void acpi_restore_state_mem(void); + +extern unsigned long acpi_wakeup_address; + +/* + * Check if the CPU can handle C2 and deeper + */ +static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) +{ + /* here check machine type taken from mptable */ + return 1; +} + +/* + * Elbrus won't implement _PDC as it is deprecated in ACPI4.0 in favor of _OSC + */ +static inline bool arch_has_acpi_pdc(void) +{ + return 0; +} + +static inline void arch_acpi_set_pdc_bits(u32 *buf) +{ + return; +} + +#else /* !CONFIG_ACPI */ + +#define acpi_lapic 0 +#define acpi_ioapic 0 +#define acpi_disable_cmcff 0 +static inline void acpi_noirq_set(void) { } +static inline void acpi_disable_pci(void) { } +static inline void disable_acpi(void) { } + +#endif /* !CONFIG_ACPI */ + +#define ARCH_HAS_POWER_INIT 0 + +#define acpi_unlazy_tlb(x) + +#endif /* _ASM_L_ACPI_H */ diff --git a/arch/e2k/include/asm-l/apic.h b/arch/e2k/include/asm-l/apic.h new file mode 100644 index 0000000..e266988 --- /dev/null +++ b/arch/e2k/include/asm-l/apic.h @@ -0,0 +1,776 @@ +#ifndef _ASM_L_APIC_H +#define _ASM_L_APIC_H + +#include +#include + +#if 0 +#include +#include +#include +#include +#else +#include +#include +#endif +#include +#include +#include +#include +#include +#include + +#if defined CONFIG_E2K || defined CONFIG_E90S +# define cpu_has_tsc 1 +# define cpu_has_apic 1 +# define cpu_has_x2apic 0 + +# define READ_APIC_ID() GET_APIC_ID(arch_apic_read(APIC_ID)) + +extern int first_system_vector; +#endif + +#if 0 +#define ARCH_APICTIMER_STOPS_ON_C3 1 +#endif + +/* + * Debugging macros + */ +#define APIC_QUIET 0 +#define APIC_VERBOSE 1 +#define APIC_DEBUG 2 + +/* + * Define the default level of output to be very little + * This can be turned up by using apic=verbose for more + * information and apic=debug for _lots_ of information. + * apic_verbosity is defined in apic.c + */ +#define apic_printk(v, s, a...) do { \ + if ((v) <= apic_verbosity) \ + printk(s, ##a); \ + } while (0) + +extern unsigned int calibration_result; + +#if defined(CONFIG_L_LOCAL_APIC) && defined(CONFIG_L_X86_32) +extern void generic_apic_probe(void); +#else +static inline void generic_apic_probe(void) +{ +} +#endif + +#ifdef CONFIG_L_LOCAL_APIC + +# define READ_APIC_ID() GET_APIC_ID(arch_apic_read(APIC_ID)) +# define BOOT_READ_APIC_ID() GET_APIC_ID(boot_arch_apic_read(APIC_ID)) + +extern unsigned int apic_verbosity; +extern int local_apic_timer_c2_ok; + +#if 0 +extern int disable_apic; +#else +#define disable_apic 0 +#endif +extern unsigned int lapic_timer_frequency; + +#ifdef CONFIG_SMP +extern void __inquire_remote_apic(int apicid); +#else /* CONFIG_SMP */ +static inline void __inquire_remote_apic(int apicid) +{ +} +#endif /* CONFIG_SMP */ + +static inline void default_inquire_remote_apic(int apicid) +{ + if (apic_verbosity >= APIC_DEBUG) + __inquire_remote_apic(apicid); +} + +/* + * With 82489DX we can't rely on apic feature bit + * retrieved via cpuid but still have to deal with + * such an apic chip so we assume that SMP configuration + * is found from MP table (64bit case uses ACPI mostly + * which set smp presence flag as well so we are safe + * to use this helper too). + */ +static inline bool apic_from_smp_config(void) +{ + return smp_found_config && !disable_apic; +} + +/* + * Basic functions accessing APICs. + */ +#ifdef CONFIG_PARAVIRT +#include +#endif + +#if 0 +#ifdef CONFIG_L_X86_64 +extern int is_vsmp_box(void); +#else +static inline int is_vsmp_box(void) +{ + return 0; +} +#endif +#else +# define is_vsmp_box() 0 +#endif +extern void xapic_wait_icr_idle(void); +extern u32 safe_xapic_wait_icr_idle(void); +extern void xapic_icr_write(u32, u32); +extern int setup_profiling_timer(unsigned int); + +#if 0 +static inline void native_apic_mem_write(u32 reg, u32 v) +{ + volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); + + alternative_io("movl %0, %1", "xchgl %0, %1", X86_FEATURE_11AP, + ASM_OUTPUT2("=r" (v), "=m" (*addr)), + ASM_OUTPUT2("0" (v), "m" (*addr))); +} + +static inline u32 native_apic_mem_read(u32 reg) +{ + return *((volatile u32 *)(APIC_BASE + reg)); +} +#else +static inline void native_apic_mem_write(u32 reg, u32 v) +{ + arch_apic_write(reg, v); +} + +static inline u32 native_apic_mem_read(u32 reg) +{ + return arch_apic_read(reg); +} +#endif + +extern void native_apic_wait_icr_idle(void); +extern u32 native_safe_apic_wait_icr_idle(void); +extern void native_apic_icr_write(u32 low, u32 id); +extern u64 native_apic_icr_read(void); + +extern int x2apic_mode; + +#ifdef CONFIG_X86_X2APIC +/* + * Make previous memory operations globally visible before + * sending the IPI through x2apic wrmsr. We need a serializing instruction or + * mfence for this. + */ +static inline void x2apic_wrmsr_fence(void) +{ + asm volatile("mfence" : : : "memory"); +} + +static inline void native_apic_msr_write(u32 reg, u32 v) +{ + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || + reg == APIC_LVR) + return; + + wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0); +} + +static inline void native_apic_msr_eoi_write(u32 reg, u32 v) +{ + wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0); +} + +static inline u32 native_apic_msr_read(u32 reg) +{ + u64 msr; + + if (reg == APIC_DFR) + return -1; + + rdmsrl(APIC_BASE_MSR + (reg >> 4), msr); + return (u32)msr; +} + +static inline void native_x2apic_wait_icr_idle(void) +{ + /* no need to wait for icr idle in x2apic */ + return; +} + +static inline u32 native_safe_x2apic_wait_icr_idle(void) +{ + /* no need to wait for icr idle in x2apic */ + return 0; +} + +static inline void native_x2apic_icr_write(u32 low, u32 id) +{ + wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); +} + +static inline u64 native_x2apic_icr_read(void) +{ + unsigned long val; + + rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val); + return val; +} + +extern int x2apic_phys; +extern int x2apic_preenabled; +extern void check_x2apic(void); +extern void enable_x2apic(void); +extern void x2apic_icr_write(u32 low, u32 id); +static inline int x2apic_enabled(void) +{ + u64 msr; + + if (!cpu_has_x2apic) + return 0; + + rdmsrl(MSR_IA32_APICBASE, msr); + if (msr & X2APIC_ENABLE) + return 1; + return 0; +} + +#define x2apic_supported() (cpu_has_x2apic) +static inline void x2apic_force_phys(void) +{ + x2apic_phys = 1; +} +#else +static inline void disable_x2apic(void) +{ +} +static inline void check_x2apic(void) +{ +} +static inline void enable_x2apic(void) +{ +} +static inline int x2apic_enabled(void) +{ + return 0; +} +static inline void x2apic_force_phys(void) +{ +} + +#define nox2apic 0 +#define x2apic_preenabled 0 +#define x2apic_supported() 0 +#endif + +extern void enable_IR_x2apic(void); + +extern int get_physical_broadcast(void); + +extern int lapic_get_maxlvt(void); +extern void clear_local_APIC(void); +extern void connect_bsp_APIC(void); +extern void disconnect_bsp_APIC(int virt_wire_setup); +extern void disable_local_APIC(void); + +#ifdef CONFIG_E2K +extern void clear_local_APIC(void); +#endif /* CONFIG_E2K */ + +extern void lapic_shutdown(void); +extern int verify_local_APIC(void); +extern void sync_Arb_IDs(void); +extern void init_bsp_APIC(void); +extern void setup_local_APIC(void); +extern void end_local_APIC_setup(void); +extern void bsp_end_local_APIC_setup(void); +extern void init_apic_mappings(void); +void register_lapic_address(unsigned long address); +extern void setup_boot_APIC_clock(void); +extern void setup_secondary_APIC_clock(void); +extern int APIC_init_uniprocessor(void); +extern int apic_force_enable(unsigned long addr); + +/* + * On 32bit this is mach-xxx local + */ +#ifdef CONFIG_L_X86_64 +extern int apic_is_clustered_box(void); +#else +static inline int apic_is_clustered_box(void) +{ + return 0; +} +#endif + +extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); + +#else /* !CONFIG_L_LOCAL_APIC */ +static inline void lapic_shutdown(void) { } +#define local_apic_timer_c2_ok 1 +static inline void init_apic_mappings(void) { } +static inline void disable_local_APIC(void) { } + +#ifdef CONFIG_E2K +static inline void clear_local_APIC(void) { } +#endif /* CONFIG_E2K */ + +# define setup_boot_APIC_clock x86_init_noop +# define setup_secondary_APIC_clock x86_init_noop +#endif /* !CONFIG_L_LOCAL_APIC */ + +#ifdef CONFIG_L_X86_64 +#define SET_APIC_ID(x) (apic->set_apic_id(x)) +#else + +#endif + +/* + * Copyright 2004 James Cleverdon, IBM. + * Subject to the GNU Public License, v.2 + * + * Generic APIC sub-arch data struct. + * + * Hacked for x86-64 by James Cleverdon from i386 architecture code by + * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and + * James Cleverdon. + */ +struct apic { + char *name; + + int (*probe)(void); + int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); + int (*apic_id_valid)(int apicid); + int (*apic_id_registered)(void); + + u32 irq_delivery_mode; + u32 irq_dest_mode; + + const struct cpumask *(*target_cpus)(void); + + int disable_esr; + + int dest_logical; + unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); + unsigned long (*check_apicid_present)(int apicid); + + void (*vector_allocation_domain)(int cpu, struct cpumask *retmask, + const struct cpumask *mask); + void (*init_apic_ldr)(void); + + void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); + + void (*setup_apic_routing)(void); + int (*multi_timer_check)(int apic, int irq); + int (*cpu_present_to_apicid)(int mps_cpu); + void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); + void (*setup_portio_remap)(void); + int (*check_phys_apicid_present)(int phys_apicid); + void (*enable_apic_mode)(void); + int (*phys_pkg_id)(int cpuid_apic, int index_msb); + + /* + * When one of the next two hooks returns 1 the apic + * is switched to this. Essentially they are additional + * probe functions: + */ + int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid); + + unsigned int (*get_apic_id)(unsigned long x); + unsigned long (*set_apic_id)(unsigned int id); + unsigned long apic_id_mask; + + int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, + const struct cpumask *andmask, + unsigned int *apicid); + + /* ipi */ + void (*send_IPI_mask)(const struct cpumask *mask, int vector); + void (*send_IPI_mask_allbutself)(const struct cpumask *mask, + int vector); + void (*send_IPI_allbutself)(int vector); + void (*send_IPI_all)(int vector); + void (*send_IPI_self)(int vector); + + /* wakeup_secondary_cpu */ + int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip); + + int trampoline_phys_low; + int trampoline_phys_high; + + void (*wait_for_init_deassert)(atomic_t *deassert); + void (*smp_callin_clear_local_apic)(void); + void (*inquire_remote_apic)(int apicid); + + /* apic ops */ + u32 (*read)(u32 reg); + void (*write)(u32 reg, u32 v); + /* + * ->eoi_write() has the same signature as ->write(). + * + * Drivers can support both ->eoi_write() and ->write() by passing the same + * callback value. Kernel can override ->eoi_write() and fall back + * on write for EOI. + */ + void (*eoi_write)(u32 reg, u32 v); + u64 (*icr_read)(void); + void (*icr_write)(u32 low, u32 high); + void (*wait_icr_idle)(void); + u32 (*safe_wait_icr_idle)(void); + +#ifdef CONFIG_L_X86_32 + /* + * Called very early during boot from get_smp_config(). It should + * return the logical apicid. x86_[bios]_cpu_to_apicid is + * initialized before this function is called. + * + * If logical apicid can't be determined that early, the function + * may return BAD_APICID. Logical apicid will be configured after + * init_apic_ldr() while bringing up CPUs. Note that NUMA affinity + * won't be applied properly during early boot in this case. + */ + int (*x86_32_early_logical_apicid)(int cpu); + + /* + * Optional method called from setup_local_APIC() after logical + * apicid is guaranteed to be known to initialize apicid -> node + * mapping if NUMA initialization hasn't done so already. Don't + * add new users. + */ + int (*x86_32_numa_cpu_node)(int cpu); +#endif +}; + +/* + * Pointer to the local APIC driver in use on this system (there's + * always just one such driver in use - the kernel decides via an + * early probing process which one it picks - and then sticks to it): + */ +extern struct apic *apic; + +/* + * APIC drivers are probed based on how they are listed in the .apicdrivers + * section. So the order is important and enforced by the ordering + * of different apic driver files in the Makefile. + * + * For the files having two apic drivers, we use apic_drivers() + * to enforce the order with in them. + */ +#define apic_driver(sym) \ + static const struct apic *__apicdrivers_##sym __used \ + __aligned(sizeof(struct apic *)) \ + __section(.apicdrivers) = { &sym } + +#define apic_drivers(sym1, sym2) \ + static struct apic *__apicdrivers_##sym1##sym2[2] __used \ + __aligned(sizeof(struct apic *)) \ + __section(.apicdrivers) = { &sym1, &sym2 } + +extern struct apic *__apicdrivers[], *__apicdrivers_end[]; + +/* + * APIC functionality to boot other CPUs - only used on SMP: + */ +#ifdef CONFIG_SMP +extern atomic_t init_deasserted; +extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); +#endif + +#ifdef CONFIG_L_LOCAL_APIC + +static inline u32 apic_read(u32 reg) +{ + return apic->read(reg); +} + +static inline void apic_write(u32 reg, u32 val) +{ + apic->write(reg, val); +} + +static inline void apic_eoi(void) +{ + apic->eoi_write(APIC_EOI, APIC_EOI_ACK); +} + +static inline u64 apic_icr_read(void) +{ + return apic->icr_read(); +} + +static inline void apic_icr_write(u32 low, u32 high) +{ + apic->icr_write(low, high); +} + +static inline void apic_wait_icr_idle(void) +{ + apic->wait_icr_idle(); +} + +static inline u32 safe_apic_wait_icr_idle(void) +{ + return apic->safe_wait_icr_idle(); +} + +extern void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)); + +#else /* CONFIG_L_LOCAL_APIC */ + +static inline u32 apic_read(u32 reg) { return 0; } +static inline void apic_write(u32 reg, u32 val) { } +static inline void apic_eoi(void) { } +static inline u64 apic_icr_read(void) { return 0; } +static inline void apic_icr_write(u32 low, u32 high) { } +static inline void apic_wait_icr_idle(void) { } +static inline u32 safe_apic_wait_icr_idle(void) { return 0; } +static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {} + +#endif /* CONFIG_L_LOCAL_APIC */ + +static inline void ack_APIC_irq(void) +{ + /* + * ack_APIC_irq() actually gets compiled as a single instruction + * ... yummie. + */ + apic_eoi(); +} + +static inline unsigned default_get_apic_id(unsigned long x) +{ + unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); + + if (APIC_XAPIC(ver)/* || boot_cpu_has(X86_FEATURE_EXTD_APICID)*/) + return (x >> 24) & 0xFF; + else + return (x >> 24) & 0x0F; +} + +/* + * Warm reset vector default position: + */ +#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467 +#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 + +#ifdef CONFIG_L_X86_64 +extern int default_acpi_madt_oem_check(char *, char *); + +extern void apic_send_IPI_self(int vector); + +#endif + +static inline void default_wait_for_init_deassert(atomic_t *deassert) +{ + while (!atomic_read(deassert)) + cpu_relax(); + return; +} + +extern void generic_bigsmp_probe(void); + + +#ifdef CONFIG_L_LOCAL_APIC + +#if 0 +#include +#endif + +#define APIC_DFR_VALUE (APIC_DFR_FLAT) + +static inline const struct cpumask *default_target_cpus(void) +{ +#ifdef CONFIG_SMP + return cpu_online_mask; +#else + return cpumask_of(0); +#endif +} + +static inline const struct cpumask *online_target_cpus(void) +{ + return cpu_online_mask; +} + +DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid); +DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); +#ifdef CONFIG_SMP +#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +#else +#define cpu_physical_id(cpu) boot_cpu_physical_apicid +#endif + + +static inline unsigned int read_apic_id(void) +{ + unsigned int reg; + + reg = apic_read(APIC_ID); + + return apic->get_apic_id(reg); +} + +static inline int default_apic_id_valid(int apicid) +{ + return (apicid < 255); +} + +extern void default_setup_apic_routing(void); + +extern struct apic apic_noop; + +#ifdef CONFIG_L_X86_32 + +static inline int noop_x86_32_early_logical_apicid(int cpu) +{ + return BAD_APICID; +} + +/* + * Set up the logical destination ID. + * + * Intel recommends to set DFR, LDR and TPR before enabling + * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel + * document number 292116). So here it goes... + */ +extern void default_init_apic_ldr(void); + +static inline int default_apic_id_registered(void) +{ + return physid_isset(read_apic_id(), phys_cpu_present_map); +} + +static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; +} + +#endif + +static inline int +flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask, + unsigned int *apicid) +{ + unsigned long cpu_mask = cpumask_bits(cpumask)[0] & + cpumask_bits(andmask)[0] & + cpumask_bits(cpu_online_mask)[0] & + APIC_ALL_CPUS; + + if (likely(cpu_mask)) { + *apicid = (unsigned int)cpu_mask; + return 0; + } else { + return -EINVAL; + } +} + +extern int +default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask, + unsigned int *apicid); + +static inline void +flat_vector_allocation_domain(int cpu, struct cpumask *retmask, + const struct cpumask *mask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + cpumask_clear(retmask); + cpumask_bits(retmask)[0] = APIC_ALL_CPUS; +} + +static inline void +default_vector_allocation_domain(int cpu, struct cpumask *retmask, + const struct cpumask *mask) +{ + cpumask_copy(retmask, cpumask_of(cpu)); +} + +static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) +{ + return physid_isset(apicid, *map); +} + +static inline unsigned long default_check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) +{ + *retmap = *phys_map; +} + +static inline int __default_cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); + else + return BAD_APICID; +} + +static inline int +__default_check_phys_apicid_present(int phys_apicid) +{ + return physid_isset(phys_apicid, phys_cpu_present_map); +} + +/* #ifdef CONFIG_L_X86_32 */ +#if 1 +static inline int default_cpu_present_to_apicid(int mps_cpu) +{ + return __default_cpu_present_to_apicid(mps_cpu); +} + +static inline int +default_check_phys_apicid_present(int phys_apicid) +{ + return __default_check_phys_apicid_present(phys_apicid); +} +#else +extern int default_cpu_present_to_apicid(int mps_cpu); +extern int default_check_phys_apicid_present(int phys_apicid); +#endif + +#endif /* CONFIG_L_LOCAL_APIC */ + +static inline void entering_irq(void) +{ + l_irq_enter(); + exit_idle(); +} + +static inline void entering_ack_irq(void) +{ + entering_irq(); + ack_APIC_irq(); +} + +static inline void exiting_irq(void) +{ + l_irq_exit(); +} + +static inline void exiting_ack_irq(void) +{ + l_irq_exit(); + /* Ack only at the end to avoid potential reentry */ + ack_APIC_irq(); +} + +extern void ioapic_zap_locks(void); +struct irq_data; +extern void ack_apic_edge(struct irq_data *data); +#endif /* _ASM_L_APIC_H */ diff --git a/arch/e2k/include/asm-l/apicdef.h b/arch/e2k/include/asm-l/apicdef.h new file mode 100644 index 0000000..e9a1165 --- /dev/null +++ b/arch/e2k/include/asm-l/apicdef.h @@ -0,0 +1,529 @@ +#ifndef _ASM_L_APICDEF_H +#define _ASM_L_APICDEF_H + +/* + * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) + * + * Alan Cox , 1995. + * Ingo Molnar , 1999, 2000 + */ + +#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 +#define APIC_DEFAULT_PHYS_BASE 0xfee00000 + +/* + * This is the IO-APIC register space as specified + * by Intel docs: + */ +#define IO_APIC_SLOT_SIZE 1024 +#define APIC_REGS_SIZE 0x1000 + +#define APIC_BSP 0x10 +#define APIC_BSP_ENABLE 0x00000800 +#define APIC_BSP_IS_BSP 0x00000100 +#define APIC_ENABLE(x) ((x) & APIC_BSP_ENABLE) +#define BootStrap(x) ((x) & APIC_BSP_IS_BSP) +#define APIC_ID 0x20 +#define APIC_ID_SHIFT 24 +#define APIC_ID_SIZE 8 +#define APIC_ID_BIT_MASK ((1 << APIC_ID_SIZE) - 1) +#define APIC_ID_MASK (APIC_ID_BIT_MASK << \ + APIC_ID_SHIFT) +#define GET_APIC_ID(x) (((x) >> APIC_ID_SHIFT) & \ + APIC_ID_BIT_MASK) +#define APIC_LVR 0x30 +#define APIC_LVR_MASK 0xFF00FF +#define APIC_LVR_DIRECTED_EOI (1 << 24) +#define APIC_MAXLVT 0x03 +#define APIC_VERSION 0x10 +#define GET_APIC_VERSION(x) ((x) & 0xFFu) +#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu) +#define SET_APIC_VERSION(x) ((x) & 0xFF) +#define SET_APIC_MAXLVT(x) (((x) & 0xff) << 16) +#if 0 +# define APIC_INTEGRATED(x) ((x) & 0xF0u) +#else +# define APIC_INTEGRATED(x) (1) +#endif +#define APIC_XAPIC(x) ((x) >= 0x14) +#define APIC_EXT_SPACE(x) ((x) & 0x80000000) +#define APIC_TASKPRI 0x80 +#define APIC_TPRI_MASK 0xFFu +#define APIC_ARBPRI 0x90 +#define APIC_ARBPRI_MASK 0xFFu +#define APIC_PROCPRI 0xA0 +#define APIC_EOI 0xB0 +#define APIC_EOI_ACK 0x0 +#define APIC_RRR 0xC0 +#define APIC_LDR 0xD0 +#define APIC_LDR_MASK (0xFFu << 24) +#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu) +#define SET_APIC_LOGICAL_ID(x) (((x) << 24)) +#define APIC_ALL_CPUS 0xFFu +#define APIC_DFR 0xE0 +#define GET_APIC_DLVR_MODE(x) (((x) >> 28) & 0xF) +#define APIC_DFR_CLUSTER 0x0FFFFFFFul +#define APIC_DFR_FLAT 0xFFFFFFFFul +#define APIC_SPIV 0xF0 +#define APIC_SPIV_DIRECTED_EOI (1 << 12) +#define APIC_SPIV_FOCUS_DISABLED (1 << 9) +#define APIC_SPIV_APIC_ENABLED (1 << 8) +#define APIC_SOFT_ENABLED(x) ((x) & APIC_SPIV_APIC_ENABLED) +#define APIC_FOCUS_DISABLED(x) ((x) & APIC_SPIV_FOCUS_DISABLED) +#define APIC_SPIV_SPURIOUS_VECT 0x000FF +#define GET_SPURIOUS_VECTOR(x) ((x) & APIC_SPIV_SPURIOUS_VECT) +#define SET_SPURIOUS_VECTOR(x) ((x) & APIC_SPIV_SPURIOUS_VECT) +#define APIC_ISR 0x100 +#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ +#define APIC_TMR 0x180 +#define APIC_IRR 0x200 +#define APIC_ESR 0x280 +#define APIC_ESR_SEND_CS 0x00001 +#define APIC_ESR_RECV_CS 0x00002 +#define APIC_ESR_SEND_ACC 0x00004 +#define APIC_ESR_RECV_ACC 0x00008 +#define APIC_ESR_SENDILL 0x00020 +#define APIC_ESR_RECVILL 0x00040 +#define APIC_ESR_ILLREGA 0x00080 +#define APIC_LVTCMCI 0x2f0 +#define APIC_ICR 0x300 +#define APIC_DEST_SELF 0x40000 +#define APIC_DEST_ALLINC 0x80000 +#define APIC_DEST_ALLBUT 0xC0000 +#define APIC_ICR_RR_MASK 0x30000 +#define APIC_ICR_RR_INVALID 0x00000 +#define APIC_ICR_RR_INPROG 0x10000 +#define APIC_ICR_RR_VALID 0x20000 +#define APIC_INT_LEVELTRIG 0x08000 +#define APIC_INT_ASSERT 0x04000 +#define APIC_ICR_BUSY 0x01000 +#define APIC_DEST_LOGICAL 0x00800 +#define APIC_DEST_PHYSICAL 0x00000 +#define APIC_DM_FIXED 0x00000 +#define APIC_DM_LOWEST 0x00100 +#define APIC_DM_SMI 0x00200 +#define APIC_DM_REMRD 0x00300 +#define APIC_DM_NMI 0x00400 +#define APIC_DM_INIT 0x00500 +#define APIC_DM_STARTUP 0x00600 +#define APIC_DM_EXTINT 0x00700 +#define APIC_VECTOR_MASK 0x000FF +#define APIC_ICR2 0x310 +#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF) +#define SET_APIC_DEST_FIELD(x) ((x) << 24) +#define APIC_LVTT 0x320 +#define APIC_LVTTHMR 0x330 +#define APIC_LVTPC 0x340 +#define APIC_LVT0 0x350 +#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18) +#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3) +#define SET_APIC_TIMER_BASE(x) (((x) << 18)) +#define APIC_TIMER_BASE_CLKIN 0x0 +#define APIC_TIMER_BASE_TMBASE 0x1 +#define APIC_TIMER_BASE_DIV 0x2 +#define APIC_LVT_TIMER_PERIODIC (1 << 17) +#define APIC_LVT_MASKED (1 << 16) +#define APIC_LVT_LEVEL_TRIGGER (1 << 15) +#define APIC_LVT_REMOTE_IRR (1 << 14) +#define APIC_INPUT_POLARITY (1 << 13) +#define APIC_SEND_PENDING (1 << 12) +#define APIC_MODE_MASK 0x700 +#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7) +#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8)) +#define APIC_MODE_FIXED 0x0 +#define APIC_MODE_NMI 0x4 +#define APIC_MODE_EXTINT 0x7 +#define APIC_LVT1 0x360 +#define APIC_LVTERR 0x370 +#define APIC_TMICT 0x380 +#define APIC_TMCCT 0x390 +#define APIC_TDCR 0x3E0 +#define APIC_SELF_IPI 0x3F0 +#define APIC_TDR_DIV_TMBASE (1 << 2) +#define APIC_TDR_DIV_1 0xB +#define APIC_TDR_DIV_2 0x0 +#define APIC_TDR_DIV_4 0x1 +#define APIC_TDR_DIV_8 0x2 +#define APIC_TDR_DIV_16 0x3 +#define APIC_TDR_DIV_32 0x8 +#define APIC_TDR_DIV_64 0x9 +#define APIC_TDR_DIV_128 0xA +#if 0 +#define APIC_EFEAT 0x400 +#define APIC_ECTRL 0x410 +#define APIC_EILVTn(n) (0x500 + 0x10 * n) +#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ +#define APIC_EILVT_NR_AMD_10H 4 +#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) +#define APIC_EILVT_MSG_FIX 0x0 +#define APIC_EILVT_MSG_SMI 0x2 +#define APIC_EILVT_MSG_NMI 0x4 +#define APIC_EILVT_MSG_EXT 0x7 +#define APIC_EILVT_MASKED (1 << 16) +#endif +#define APIC_NM_TIMER_LVTT 0xf00 +#define APIC_NM_TIMER_INIT_COUNT 0xf10 +#define APIC_NM_TIMER_CURRENT_COUNT 0xf20 +#define APIC_NM_TIMER_DIVIDER 0xf30 +#define APIC_LVT2 0xf40 +#define APIC_LVT3 0xf50 +#define APIC_DSP APIC_LVT3 +#define APIC_LVT4 0xf60 +#define APIC_M_ERM 0xfc0 +#define APIC_NM_WATCHDOG 0x80000000 +#define APIC_NM_WATCHDOG1 0x40000000 +#define APIC_NM_SPECIAL 0x20000 +#define APIC_NM_TIMER 0x10000 +#define APIC_NM_NMI_DEBUG_MASK 0x8000 +#define APIC_NM_INTQLAPIC_MASK 0x4000 +#define APIC_NM_INT_VIOLAT_MASK 0x2000 +#define APIC_NM 0xfe0 +#define APIC_NM_BIT_MASK 0x7ff00 +#define APIC_NM_PCI 0x40000 +#define APIC_NM_SPECIAL 0x20000 +#define APIC_NM_TIMER 0x10000 +#define APIC_NM_NMI_DEBUG 0x8000 +#define APIC_NM_INTQLAPIC 0x4000 +#define APIC_NM_INT_VIOLAT 0x2000 +#define APIC_NM_STARTUP 0x1000 +#define APIC_NM_INIT 0x0800 +#define APIC_NM_NMI 0x0400 +#define APIC_NM_SMI 0x0200 +#define APIC_NM_EXTINT 0x0100 +#define APIC_NM_STARTUP_ADDR 0x00ff +#define GET_APIC_STARTUP_ADDR(x) ((x) & APIC_NM_STARTUP_ADDR) +#define APIC_NM_MASK(x) ((x) & APIC_NM_BIT_MASK) +#define GET_APIC_NM_BITS(x) (((x) & APIC_NM_BIT_MASK) >> 9) +#define APIC_NM_IS_STRATUP(x) ((x) & APIC_NM_STARTUP) +#define APIC_NM_IS_INIT(x) ((x) & APIC_NM_INIT) +#define APIC_NM_IS_NMI(x) ((x) & APIC_NM_NMI) +#define APIC_NM_IS_SMI(x) ((x) & APIC_NM_SMI) +#define APIC_VECT 0xff0 +#define APIC_VECT_VECTOR_MASK 0x000000ff +#define APIC_VECT_EXTINT (1 << 31) +#define APIC_VECT_VECTOR(x) ((x) & APIC_VECT_VECTOR_MASK) +#define APIC_VECT_IS_EXTINT(x) ((x) & APIC_VECT_EXTINT) + +#if 0 +#define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) +#define APIC_BASE_MSR 0x800 +#else +#define APIC_BASE 0x00000000fee00000UL +#endif +#define X2APIC_ENABLE (1UL << 10) + +/* + * a maximum number of IO-APICs depends on the following: + * each IO link can have IOHUB with IO-APIC + * each node can have embedded IO-APIC + */ +#define MAX_IO_APICS (MAX_NUMIOLINKS + MAX_NUMNODES) +#define MAX_LOCAL_APIC MAX_APICS +#if 0 +#ifdef CONFIG_L_X86_32 +# define MAX_IO_APICS 64 +# define MAX_LOCAL_APIC 256 +#else +# define MAX_IO_APICS 128 +# define MAX_LOCAL_APIC 32768 +#endif +#endif + +/* + * All x86-64 systems are xAPIC compatible. + * In the following, "apicid" is a physical APIC ID. + */ +#define XAPIC_DEST_CPUS_SHIFT 4 +#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) +#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) +#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) +#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) +#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) +#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT) + +#if 0 +#ifndef __ASSEMBLY__ +/* + * the local APIC register structure, memory mapped. Not terribly well + * tested, but we might eventually use this one in the future - the + * problem why we cannot use it right now is the P5 APIC, it has an + * errata which cannot take 8-bit reads and writes, only 32-bit ones ... + */ +#define u32 unsigned int + +struct local_apic { + +/*000*/ struct { u32 __reserved[4]; } __reserved_01; + +/*010*/ struct { u32 __reserved[4]; } __reserved_02; + +/*020*/ struct { /* APIC ID Register */ + u32 __reserved_1 : 24, + phys_apic_id : 4, + __reserved_2 : 4; + u32 __reserved[3]; + } id; + +/*030*/ const + struct { /* APIC Version Register */ + u32 version : 8, + __reserved_1 : 8, + max_lvt : 8, + __reserved_2 : 8; + u32 __reserved[3]; + } version; + +/*040*/ struct { u32 __reserved[4]; } __reserved_03; + +/*050*/ struct { u32 __reserved[4]; } __reserved_04; + +/*060*/ struct { u32 __reserved[4]; } __reserved_05; + +/*070*/ struct { u32 __reserved[4]; } __reserved_06; + +/*080*/ struct { /* Task Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } tpr; + +/*090*/ const + struct { /* Arbitration Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } apr; + +/*0A0*/ const + struct { /* Processor Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } ppr; + +/*0B0*/ struct { /* End Of Interrupt Register */ + u32 eoi; + u32 __reserved[3]; + } eoi; + +/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; + +/*0D0*/ struct { /* Logical Destination Register */ + u32 __reserved_1 : 24, + logical_dest : 8; + u32 __reserved_2[3]; + } ldr; + +/*0E0*/ struct { /* Destination Format Register */ + u32 __reserved_1 : 28, + model : 4; + u32 __reserved_2[3]; + } dfr; + +/*0F0*/ struct { /* Spurious Interrupt Vector Register */ + u32 spurious_vector : 8, + apic_enabled : 1, + focus_cpu : 1, + __reserved_2 : 22; + u32 __reserved_3[3]; + } svr; + +/*100*/ struct { /* In Service Register */ +/*170*/ u32 bitfield; + u32 __reserved[3]; + } isr [8]; + +/*180*/ struct { /* Trigger Mode Register */ +/*1F0*/ u32 bitfield; + u32 __reserved[3]; + } tmr [8]; + +/*200*/ struct { /* Interrupt Request Register */ +/*270*/ u32 bitfield; + u32 __reserved[3]; + } irr [8]; + +/*280*/ union { /* Error Status Register */ + struct { + u32 send_cs_error : 1, + receive_cs_error : 1, + send_accept_error : 1, + receive_accept_error : 1, + __reserved_1 : 1, + send_illegal_vector : 1, + receive_illegal_vector : 1, + illegal_register_address : 1, + __reserved_2 : 24; + u32 __reserved_3[3]; + } error_bits; + struct { + u32 errors; + u32 __reserved_3[3]; + } all_errors; + } esr; + +/*290*/ struct { u32 __reserved[4]; } __reserved_08; + +/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; + +/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; + +/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; + +/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; + +/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; + +/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; + +/*300*/ struct { /* Interrupt Command Register 1 */ + u32 vector : 8, + delivery_mode : 3, + destination_mode : 1, + delivery_status : 1, + __reserved_1 : 1, + level : 1, + trigger : 1, + __reserved_2 : 2, + shorthand : 2, + __reserved_3 : 12; + u32 __reserved_4[3]; + } icr1; + +/*310*/ struct { /* Interrupt Command Register 2 */ + union { + u32 __reserved_1 : 24, + phys_dest : 4, + __reserved_2 : 4; + u32 __reserved_3 : 24, + logical_dest : 8; + } dest; + u32 __reserved_4[3]; + } icr2; + +/*320*/ struct { /* LVT - Timer */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + timer_mode : 1, + __reserved_3 : 14; + u32 __reserved_4[3]; + } lvt_timer; + +/*330*/ struct { /* LVT - Thermal Sensor */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_thermal; + +/*340*/ struct { /* LVT - Performance Counter */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_pc; + +/*350*/ struct { /* LVT - LINT0 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint0; + +/*360*/ struct { /* LVT - LINT1 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint1; + +/*370*/ struct { /* LVT - Error */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_error; + +/*380*/ struct { /* Timer Initial Count Register */ + u32 initial_count; + u32 __reserved_2[3]; + } timer_icr; + +/*390*/ const + struct { /* Timer Current Count Register */ + u32 curr_count; + u32 __reserved_2[3]; + } timer_ccr; + +/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; + +/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; + +/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; + +/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; + +/*3E0*/ struct { /* Timer Divide Configuration Register */ + u32 divisor : 4, + __reserved_1 : 28; + u32 __reserved_2[3]; + } timer_dcr; + +/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; + +} __attribute__ ((packed)); + +#undef u32 +#endif /* __ASSEMBLY__ */ +#endif + +#if 0 +#ifdef CONFIG_L_X86_32 + #define BAD_APICID 0xFFu +#else + #define BAD_APICID 0xFFFFu +#endif +#else + #define BAD_APICID 0xFFu +#endif + +#ifndef __ASSEMBLY__ +enum ioapic_irq_destination_types { + dest_Fixed = 0, + dest_LowestPrio = 1, + dest_SMI = 2, + dest__reserved_1 = 3, + dest_NMI = 4, + dest_INIT = 5, + dest__reserved_2 = 6, + dest_ExtINT = 7 +}; +#endif + +#endif /* _ASM_L_APICDEF_H */ diff --git a/arch/e2k/include/asm-l/boot_profiling.h b/arch/e2k/include/asm-l/boot_profiling.h new file mode 100644 index 0000000..a03efb9 --- /dev/null +++ b/arch/e2k/include/asm-l/boot_profiling.h @@ -0,0 +1,35 @@ +#ifndef _ASM_L_BOOT_PROFILING_H +#define _ASM_L_BOOT_PROFILING_H + +#ifdef CONFIG_BOOT_TRACE + +#include + +extern void notrace add_boot_trace_event(const char *fmt, ...); +extern struct boot_tracepoint *boot_trace_prev_event(int cpu, + struct boot_tracepoint *event); +extern struct boot_tracepoint *boot_trace_next_event(int cpu, + struct boot_tracepoint *event); +extern void stop_boot_trace(void); + +# define BOOT_TRACE_ARRAY_SIZE (1500 + 20 * NR_CPUS) + +struct boot_tracepoint { + char name[81]; + unsigned int cpu; + u64 cycles; + struct list_head list; +}; + +extern struct boot_tracepoint boot_trace_events[BOOT_TRACE_ARRAY_SIZE]; +extern struct list_head boot_trace_cpu_events_list[]; +extern atomic_t boot_trace_top_event; +extern int boot_trace_enabled; +# define BOOT_TRACEPOINT(...) add_boot_trace_event(__VA_ARGS__) + +#else /* !CONFIG_BOOT_TRACE */ +# define BOOT_TRACEPOINT(...) do { } while(0) +#endif /* CONFIG_BOOT_TRACE */ + +#endif /* _ASM_L_BOOT_PROFILING_H */ + diff --git a/arch/e2k/include/asm-l/bootinfo.h b/arch/e2k/include/asm-l/bootinfo.h new file mode 100644 index 0000000..0ff5c64 --- /dev/null +++ b/arch/e2k/include/asm-l/bootinfo.h @@ -0,0 +1,315 @@ +#ifndef _L_BOOTINFO_H_ +#define _L_BOOTINFO_H_ + +#if defined(__KERNEL__) || defined(__KVM_BOOTINFO_SUPPORT__) + +/* + * 0x0: + * 0x1: extended command line + */ +#define BOOTBLOCK_VER 0x1 + +#define KSTRMAX_SIZE 128 +#define KSTRMAX_SIZE_EX 512 +#define BIOS_INFO_SIGN_SIZE 8 +#define KERNEL_ARGS_STRING_EX_SIGN_SIZE 22 +#define BOOT_VER_STR_SIZE 128 +#define BOOTBLOCK_SIZE 0x1000 /* 1 PAGE_SIZE */ +#define X86BOOT_SIGNATURE 0x8086 +#define ROMLOADER_SIGNATURE 0xe200 +#define KVM_GUEST_SIGNATURE 0x20e2 +#define BIOS_INFO_SIGNATURE "E2KBIOS" +#define KVM_INFO_SIGNATURE "E2KKVM" +#define KERNEL_ARGS_STRING_EX_SIGNATURE "KERNEL_ARGS_STRING_EX" +#define BOOT_KERNEL_ARGS_STRING_EX_SIGNATURE \ + boot_va_to_pa(KERNEL_ARGS_STRING_EX_SIGNATURE) + +/* + * Below is boot information that comes out of the x86 code of Linux/E2K + * loader proto. + */ + +/* L_MAX_NODE_PHYS_BANKS = 4 sometimes is not enough, so we increase it to + * an arbitary value (8 now). The old L_MAX_NODE_PHYS_BANKS we rename to + * L_MAX_NODE_PHYS_BANKS_FUSTY and take in mind for boot_info compatibility. + * + * L_MAX_NODE_PHYS_BANKS_FUSTY and L_MAX_MEM_NUMNODES describe max size of + * array of memory banks on all nodes and should be in accordance with old value + * of L_MAX_PHYS_BANKS for compatibility with boot_info old structure (bank) + * size, so L_MAX_NODE_PHYS_BANKS_FUSTY * L_MAX_MEM_NUMNODES should be + * equal to 32. + */ +#define L_MAX_NODE_PHYS_BANKS 64 /* max number of memory banks */ + /* on one node */ +#define L_MAX_NODE_PHYS_BANKS_FUSTY 4 /* fusty max number of memory */ + /* banks on one node */ +#define L_MAX_PHYS_BANKS_EX 64 /* max number of memory banks */ + /* in banks_ex field of */ + /* boot_info */ +#define L_MAX_MEM_NUMNODES 8 /* max number of nodes in the */ + /* list of memory banks on */ + /* each node */ +#define L_MAX_BUSY_AREAS 4 /* max number of busy areas */ + /* occupied by BIOS and should be */ + /* kept unchanged by kernel to */ + /* support recovery mode */ + +#ifndef __ASSEMBLY__ + +typedef struct bank_info { + __u64 address; /* start address of bank */ + __u64 size; /* size of bank in bytes */ +} bank_info_t; + +typedef struct node_banks { + bank_info_t banks[L_MAX_NODE_PHYS_BANKS_FUSTY]; /* memory banks array */ + /* of a node */ +} node_banks_t; + +typedef struct boot_times { + __u64 arch; + __u64 unpack; + __u64 pci; + __u64 drivers1; + __u64 drivers2; + __u64 menu; + __u64 sm; + __u64 kernel; + __u64 reserved[8]; +} boot_times_t; + +typedef struct bios_info { + __u8 signature[BIOS_INFO_SIGN_SIZE]; /* signature, */ + /* 'E2KBIOS' */ + __u8 boot_ver[BOOT_VER_STR_SIZE]; /* boot version */ + __u8 mb_type; /* mother board type */ + __u8 chipset_type; /* chipset type */ + __u8 cpu_type; /* cpu type */ + __u8 kernel_args_string_ex[KSTRMAX_SIZE_EX]; /* extended command */ + /* line of kernel */ + /* used to pass */ + /* command line */ + /* from e2k BIOS */ + __u8 reserved1; /* reserved1 */ + __u32 cache_lines_damaged; /* number of damaged */ + /* cache lines */ + __u64 nodes_mem_slabs_deprecated[52]; /* array of slabs */ + /* accessible memory */ + /* on each node */ + /* accessible memory */ + /* on each node */ + bank_info_t banks_ex[L_MAX_PHYS_BANKS_EX]; /* extended array of */ + /* descriptors of */ + /* banks of available */ + /* physical memory */ + __u64 devtree; /* devtree pointer */ + __u32 bootlog_addr; /* bootlog address */ + __u32 bootlog_len; /* bootlog length */ + __u8 uuid[16]; /* UUID boot device */ +} bios_info_t; + +typedef struct boot_info { + __u16 signature; /* signature, 0x8086 */ + __u8 target_mdl; /* target cpu model number */ + __u8 reserved1; /* reserved1 */ + __u16 reserved2; /* reserved2 */ + __u8 vga_mode; /* vga mode */ + __u8 num_of_banks; /* number of available physical memory banks */ + /* see below bank array */ + /* total number on all nodes or 0 */ + __u64 kernel_base; /* base address to load kernel image */ + /* if 0 then BIOS can load at any address */ + /* but address should be large page size */ + /* aligned - 4 Mb */ + __u64 kernel_size; /* kernel image byte's size */ + __u64 ramdisk_base; /* base address to load RAM-disk */ + /* now not used */ + __u64 ramdisk_size; /* RAM-disk byte's size */ + + __u16 num_of_cpus; /* number of started physical CPU(s) */ + __u16 mach_flags; /* machine identifacition flags */ + /* should be set by our romloader and BIOS */ + __u16 num_of_busy; /* number of busy areas occupied by BIOS */ + /* see below busy array */ + __u16 num_of_nodes; /* number of nodes on NUMA system */ + __u64 mp_table_base; /* MP-table base address */ + __u64 serial_base; /* base address of serial port for Am85c30 */ + /* Used for debugging purpose */ + __u64 nodes_map; /* online nodes map */ + __u64 mach_serialn; /* serial number of the machine */ + __u8 mac_addr[6]; /* base MAC address for ethernet cards */ + __u16 reserved3; /* reserved3 */ + + char kernel_args_string[KSTRMAX_SIZE]; /* command line of kernel */ + /* used to pass command line */ + /* from e2k BIOS */ + node_banks_t nodes_mem[L_MAX_MEM_NUMNODES]; /* array of */ + /* descriptors of banks of */ + /* available physical memory */ + /* on each node */ + bank_info_t busy[L_MAX_BUSY_AREAS]; /* descriptors of areas */ + /* occupied by BIOS, all this */ + /* shoud be kept in system */ + /* recovery mode */ + u64 cntp_info_deprecated[32]; /* control points */ + /* info to save and */ + /* restore them state */ + u64 dmp_deprecated[20]; /* Info for future work of */ + /* dump analyzer */ + __u64 reserved4[13]; /* reserved4 */ + __u8 mb_name[16]; /* Motherboard product name */ + __u32 reserved5; /* reserved5 */ + __u32 kernel_csum; /* kernel image control sum */ + bios_info_t bios; /* extended BIOS info */ + /* SHOULD BE LAST ITEM into this */ + /* structure */ +} boot_info_t; + +typedef struct bootblock_struct { + boot_info_t info; /* general kernel<->BIOS info */ + __u8 /* zip area to make size of */ + /* bootblock struct - constant */ + gap[BOOTBLOCK_SIZE - + sizeof (boot_info_t) - + sizeof (boot_times_t) - + 1 - /* u8 : bootblock_ver */ + 4 - /* u32 : reserved1 */ + 2 - /* u16 : kernel_flags */ + 1 - /* u8 : reserved2 */ + 5 - /* u8 : number of cnt points */ + /* u8 : current # of cnt point */ + /* u8 : number of cnt points */ + /* ready in the memory */ + /* u8 : number of cnt points */ + /* saved on the disk */ + /* u8 : all control points */ + /* is created */ + 8 - /* u64 : dump sector */ + 8 - /* u64 : cnt point sector */ + 2 - /* u16 : dump device */ + 2 - /* u16 : cnt point device */ + 2 - /* u16 : boot_flags */ + 2]; /* u16 : x86_marker */ + __u8 bootblock_ver; /* bootblock version number */ + __u32 reserved1; /* reserved1 */ + boot_times_t boot_times; /* boot load times */ + __u16 kernel_flags; /* kernel flags, boot should */ + /* not modify it */ + __u8 reserved2; /* reserved2 */ + + __u8 cnt_points_num_deprecated; /* number of control points */ + /* all memory will be devided */ + /* on this number of parts */ + __u8 cur_cnt_point_deprecated; /* current # of active */ + /* control point (running */ + /* part) */ + __u8 mem_cnt_points_deprecated; /* number of started control */ + /* points (ready in the memory) */ + __u8 disk_cnt_points_deprecated; /* number of control points */ + /* saved on the disk (ready */ + /* to be loaded from disk) */ + __u8 cnt_points_created_deprecated; /* all control points created */ + /* in the memory and on disk */ + __u64 dump_sector_deprecated; /* start sector # to dump */ + /* physical memory */ + __u64 cnt_point_sector_deprecated; /* start sector # to save */ + /* restore control points */ + __u16 dump_dev_deprecated; /* disk # to dump memory */ + __u16 cnt_point_dev_deprecated; /* disk # for save/restore */ + /* control point */ + + __u16 boot_flags; /* boot flags: if non */ + /* zero then this structure */ + /* is recovery info */ + /* structure instead of boot */ + /* info structure */ + __u16 x86_marker; /* marker of the end of x86 */ + /* boot block (0xAA55) */ +} bootblock_struct_t; + +extern bootblock_struct_t *bootblock_virt; /* bootblock structure */ + /* virtual pointer */ +#endif /* ! __ASSEMBLY__ */ + +/* + * Boot block flags to elaborate boot modes + */ + +#define RECOVERY_BB_FLAG 0x0001 /* recovery flag: if non zero then */ + /* this structure is recovery info */ + /* structure instead of boot info */ + /* structure */ + /* BIOS should not clear memory */ + /* and should keep current state of */ + /* physical memory */ +#define CNT_POINT_BB_FLAG 0x0002 /* kernel restarted in the mode of */ + /* control point creation */ + /* BIOS should read kernel image from */ + /* the disk to the specified area of */ + /* the memory and start kernel (this */ + /* flag should be with */ + /* RECOVERY_BB_FLAG flag) */ +#define NO_READ_IMAGE_BB_FLAG 0x0004 /* BIOS should not read kernel image */ + /* from disk and start current */ + /* image in the specified area of */ + /* the memory (this flag should be */ + /* with RECOVERY_BB_FLAG flag) */ +#define DUMP_ANALYZE_BB_FLAG 0x0008 /* This flag is used only by kernel */ + /* to indicate dump analyzer mode */ +#define MEMORY_DUMP_BB_FLAG 0x0010 /* BIOS should dump all physical */ + /* memory before start all other */ + /* actions */ + +/* + * The machine identification flags + */ + +#define SIMULATOR_MACH_FLAG 0x0001 /* system is running on */ + /* simulator */ +#define PROTOTYPE_MACH_FLAG_DEPRECATED 0x0002 /* machine is prototype */ +#define IOHUB_MACH_FLAG 0x0004 /* machine has IOHUB */ +#define OLDMGA_MACH_FLAG 0x0008 /* MGA card has old firmware */ +#define MULTILINK_MACH_FLAG 0x0010 /* some nodes are connected */ + /* by sevral IP links */ +#define MSI_MACH_FLAG 0x0020 /* boot inits right values in */ + /* apic to support MSI. */ + /* Meanfull for e2k only. For */ + /* v9 it always true */ +#define KVM_GUEST_MACH_FLAG 0x0100 /* system is running */ + /* as KVM guest */ + +/* + * The chipset types + */ + +#define CHIPSET_TYPE_PIIX4 0x01 /* PIIX4 */ +#define CHIPSET_TYPE_IOHUB 0x02 /* IOHUB */ + +/* + * The chipset types names + */ + +#define GET_CHIPSET_TYPE_NAME(type) \ +({ \ + char *name; \ + \ + switch (type) { \ + case CHIPSET_TYPE_PIIX4: \ + name = "PIIX4"; \ + break; \ + case CHIPSET_TYPE_IOHUB: \ + name = "IOHUB"; \ + break; \ + default: \ + name = "?????"; \ + } \ + \ + name; \ +}) + +extern char *mcst_mb_name; + +#endif /* __KERNEL__ || __KVM_BOOTINFO_SUPPORT__ */ + +#endif /* _L_BOOTINFO_H_ */ + diff --git a/arch/e2k/include/asm-l/clk_rt.h b/arch/e2k/include/asm-l/clk_rt.h new file mode 100644 index 0000000..b1aea1d --- /dev/null +++ b/arch/e2k/include/asm-l/clk_rt.h @@ -0,0 +1,22 @@ +#ifndef _ASM_L_CLK_RT_H +#define _ASM_L_CLK_RT_H + +#define CLK_RT_NO 0 +#define CLK_RT_RTC 1 +#define CLK_RT_EXT 2 +#define CLK_RT_RESUME 3 + +extern struct clocksource clocksource_clk_rt; + +extern int clk_rt_mode; +extern atomic_t num_clk_rt_register; +extern int clk_rt_register(void *); +extern struct clocksource clocksource_clk_rt; +extern int proc_clk_rt(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int read_clk_rt_freq(void); +extern void clk_rt_set_mode(void *mode_arg); +extern u64 raw_read_clk_rt(void); +extern struct clocksource lt_cs; +extern struct clocksource *curr_clocksource; +#endif diff --git a/arch/e2k/include/asm-l/clkr.h b/arch/e2k/include/asm-l/clkr.h new file mode 100644 index 0000000..c6136a4 --- /dev/null +++ b/arch/e2k/include/asm-l/clkr.h @@ -0,0 +1,6 @@ +#ifndef _ASM_L_CLKR_H +#define _ASM_L_CLKR_H + +extern struct clocksource clocksource_clkr; + +#endif diff --git a/arch/e2k/include/asm-l/console.h b/arch/e2k/include/asm-l/console.h new file mode 100644 index 0000000..3c4c900 --- /dev/null +++ b/arch/e2k/include/asm-l/console.h @@ -0,0 +1,59 @@ + +#ifndef _L_CONSOLE_H_ +#define _L_CONSOLE_H_ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SERIAL_PRINTK +# ifdef CONFIG_SERIAL_AM85C30_CONSOLE +extern serial_console_opts_t am85c30_serial_console; +# endif + +extern serial_console_opts_t *serial_console_opts; +# define opts_entry(opts, member) opts->member +# define serial_console_opts_entry(entry) opts_entry(serial_console_opts, entry) + +extern unsigned char serial_dump_console_num; + +extern void *get_serial_console_io_base(void); + +extern void setup_serial_dump_console(boot_info_t *); +#endif /* CONFIG_SERIAL_PRINTK */ + +#ifdef CONFIG_L_EARLY_PRINTK +extern void dump_printk(char const *fmt_v, ...); +extern void dump_vprintk(char const *fmt, va_list ap); +extern void dump_puts(const char *s); +extern void dump_putns(const char *s, int n); + +# ifdef CONFIG_EARLY_DUMP_CONSOLE +extern void register_early_dump_console(void); +# else +static inline void register_early_dump_console(void) { }; +# endif + +# ifdef CONFIG_EARLY_PRINTK +extern int switch_to_early_dump_console(void); +extern void switch_from_early_dump_console(void); +# endif + +#else /* !CONFIG_L_EARLY_PRINTK */ +# define dump_printk printk +# define dump_vprintk vprintk +# define dump_puts(s) printk("%s", (s)) +static inline void register_early_dump_console(void) { }; + +#endif /* CONFIG_L_EARLY_PRINTK */ + +#if defined(CONFIG_SERIAL_AM85C30_CONSOLE) && defined(CONFIG_SERIAL_L_ZILOG) +extern raw_spinlock_t *uap_a_reg_lock; +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* _L_CONSOLE_H_ */ diff --git a/arch/e2k/include/asm-l/console_types.h b/arch/e2k/include/asm-l/console_types.h new file mode 100644 index 0000000..c27a654 --- /dev/null +++ b/arch/e2k/include/asm-l/console_types.h @@ -0,0 +1,31 @@ +#ifndef _L_CONSOLE_TYPES_H_ +#define _L_CONSOLE_TYPES_H_ + +#ifndef __ASSEMBLY__ +#include +#ifdef CONFIG_E2K +# include +extern boot_spinlock_t vprint_lock; +#endif + +#define L_LMS_CONS_DATA_PORT LMS_CONS_DATA_PORT +#define L_LMS_CONS_STATUS_PORT LMS_CONS_STATUS_PORT + +#define SERIAL_CONSOLE_8250_NAME "8250" + +#if defined CONFIG_SERIAL_PRINTK || defined CONFIG_SERIAL_BOOT_PRINTK +# define SERIAL_CONSOLE_16550_NAME "ns16550" +# define SERIAL_CONSOLE_AM85C30_NAME "AM85C30" + +typedef struct serial_console_opts_ { + char* name; + unsigned long long io_base; + unsigned char (*serial_getc)(void); + int (*serial_tstc)(void); + int (*init)(void *serial_io_base); + void (*serial_putc)(unsigned char c); +} serial_console_opts_t; +#endif /* SERIAL_PRINTK || SERIAL_BOOT_PRINTK */ + +#endif /* __ASSEMBLY__ */ +#endif /* _L_CONSOLE_H_ */ diff --git a/arch/e2k/include/asm-l/devtree.h b/arch/e2k/include/asm-l/devtree.h new file mode 100644 index 0000000..9727c86 --- /dev/null +++ b/arch/e2k/include/asm-l/devtree.h @@ -0,0 +1,13 @@ +#ifndef _ASM_L_DEVTREE_H +#define _ASM_L_DEVTREE_H +#include +int device_tree_init(void); +void get_dtb_from_boot(u8*, u32); +u32 get_dtb_size(void); +extern int devtree_detected; + +#ifdef CONFIG_DTB_L_TEST +extern unsigned char test_blob[]; +#endif + +#endif /* _ASM_L_DEVTREE_H */ diff --git a/arch/e2k/include/asm-l/dma-direct.h b/arch/e2k/include/asm-l/dma-direct.h new file mode 100644 index 0000000..2dfbc53 --- /dev/null +++ b/arch/e2k/include/asm-l/dma-direct.h @@ -0,0 +1,29 @@ +#ifndef ___ASM_L_DMA_DIRECT_H +#define ___ASM_L_DMA_DIRECT_H + +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ + if (!dev) /* caller knows better */ + return true; + if (!dev->dma_mask) + return false; +#if defined(CONFIG_E2K) && defined(CONFIG_NUMA) + if (cpu_has(CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE)) { + if (page_to_nid(phys_to_page(addr)) != dev_to_node(dev)) + return false; + } +#endif + return addr + size - 1 <= *dev->dma_mask; +} + +static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr; +} + +static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return daddr; +} + +#endif /* ___ASM_L_DMA_DIRECT_H */ diff --git a/arch/e2k/include/asm-l/dma-mapping.h b/arch/e2k/include/asm-l/dma-mapping.h new file mode 100644 index 0000000..5211538 --- /dev/null +++ b/arch/e2k/include/asm-l/dma-mapping.h @@ -0,0 +1,21 @@ +#ifndef ___ASM_L_DMA_MAPPING_H +#define ___ASM_L_DMA_MAPPING_H + +#include +#include +#include + +/* + * No easy way to get cache size on all processors + * so return the maximum possible to be safe. + */ +#define ARCH_DMA_MINALIGN (1 << INTERNODE_CACHE_SHIFT) + +extern const struct dma_map_ops *dma_ops; + +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) +{ + return dma_ops; +} + +#endif /* ___ASM_L_DMA_MAPPING_H */ diff --git a/arch/e2k/include/asm-l/epic.h b/arch/e2k/include/asm-l/epic.h new file mode 100644 index 0000000..d965572 --- /dev/null +++ b/arch/e2k/include/asm-l/epic.h @@ -0,0 +1,103 @@ +#ifndef __ASM_L_EPIC_H +#define __ASM_L_EPIC_H + +#ifdef __KERNEL__ +#include +#include + +extern unsigned int early_prepic_node_read_w(int node, unsigned int reg); +extern void early_prepic_node_write_w(int node, unsigned int reg, + unsigned int v); +extern unsigned int prepic_node_read_w(int node, unsigned int reg); +extern void prepic_node_write_w(int node, unsigned int reg, unsigned int v); + +/* + * Verbosity can be turned on by passing 'epic_debug' cmdline parameter + * epic_debug is defined in epic.c + */ +extern bool epic_debug; +#define epic_printk(s, a...) do { \ + if (epic_debug) \ + printk(s, ##a); \ + } while (0) + +extern bool epic_bgi_mode; +extern unsigned int cepic_timer_delta; +extern void setup_boot_epic_clock(void); +extern void __init setup_bsp_epic(void); + +/* + * CEPIC_ID register has 10 valid bits: 2 for prepicn (node) and 8 for + * cepicn (core in node). Since currently kernel does not support NR_CPUS > 64, + * we ignore 4 most significant bits of cepicn. + * + * For example, core 0 on node 1 will have full cepic id = 256 and short cepic + * id = 16 + */ +static inline unsigned int cepic_id_full_to_short(unsigned int reg_value) +{ + union cepic_id reg_id; + + reg_id.raw = reg_value; + reg_id.bits.cepicn_reserved = 0; + return reg_id.bits.prepicn << CEPIC_ID_SHORT_VALID_BITS + | reg_id.bits.cepicn; +} + +static inline unsigned int cepic_id_short_to_full(unsigned int cepic_id) +{ + union cepic_id reg_id; + + reg_id.raw = 0; + reg_id.bits.cepicn = cepic_id & CEPIC_ID_SHORT_VALID_MASK; + reg_id.bits.prepicn = cepic_id >> CEPIC_ID_SHORT_VALID_BITS; + return reg_id.raw; +} + +static inline unsigned int read_epic_id(void) +{ + return cepic_id_full_to_short(epic_read_w(CEPIC_ID)); +} + +static inline bool read_epic_bsp(void) +{ + union cepic_ctrl reg; + + reg.raw = epic_read_w(CEPIC_CTRL); + return reg.bits.bsp_core; +} + +extern void __init_recv setup_prepic(void); +extern void ack_epic_irq(void); +extern void epic_send_IPI(unsigned int dest_id, int vector); +extern void epic_send_IPI_mask(const struct cpumask *mask, int vector); +extern void epic_send_IPI_self(int vector); +extern void epic_send_IPI_mask_allbutself(const struct cpumask *mask, + int vector); +extern void epic_wait_icr_idle(void); +extern void clear_cepic(void); + +extern __visible void epic_smp_timer_interrupt(struct pt_regs *regs); +extern __visible void epic_smp_spurious_interrupt(struct pt_regs *regs); +extern __visible void epic_smp_error_interrupt(struct pt_regs *regs); +extern __visible void prepic_smp_error_interrupt(struct pt_regs *regs); +extern __visible void epic_smp_irq_move_cleanup_interrupt(struct pt_regs *regs); +extern __visible void epic_smp_irq_work_interrupt(struct pt_regs *regs); +extern __visible void cepic_epic_interrupt(struct pt_regs *regs); +extern __visible void epic_hc_emerg_interrupt(struct pt_regs *regs); +extern __visible void epic_iommu_interrupt(struct pt_regs *regs); +extern __visible void epic_uncore_interrupt(struct pt_regs *regs); +extern __visible void epic_ipcc_interrupt(struct pt_regs *regs); +extern __visible void epic_hc_interrupt(struct pt_regs *regs); +extern __visible void epic_pcs_interrupt(struct pt_regs *regs); +#ifdef CONFIG_KVM_ASYNC_PF +extern __visible void epic_pv_apf_wake(struct pt_regs *regs); +#endif /* CONFIG_KVM_ASYNC_PF */ +#ifdef CONFIG_SMP +extern __visible void epic_smp_reschedule_interrupt(struct pt_regs *regs); +extern __visible void epic_smp_call_function_interrupt(struct pt_regs *regs); +extern __visible void epic_smp_call_function_single_interrupt( + struct pt_regs *regs); +#endif +#endif /* __KERNEL__ */ +#endif /* __ASM_L_EPIC_H */ diff --git a/arch/e2k/include/asm-l/epic_regs.h b/arch/e2k/include/asm-l/epic_regs.h new file mode 100644 index 0000000..9f96ed2 --- /dev/null +++ b/arch/e2k/include/asm-l/epic_regs.h @@ -0,0 +1,669 @@ +#ifndef __ASM_L_EPIC_REGS_H +#define __ASM_L_EPIC_REGS_H + +#include + +#ifndef __ASSEMBLY__ +#ifdef __LITTLE_ENDIAN +union cepic_ctrl { + u32 raw; + struct { + u32 __reserved1 : 8, + bsp_core : 1, + __reserved2 : 1, + soft_en : 1, + __reserved3 : 21; + } __packed bits; +}; + +/* Ignore 4 bits of CEPIC (core) ID so that physical core ID is <= 64 */ +union cepic_id { + u32 raw; + struct { + u32 cepicn : 4, + cepicn_reserved : 4, + prepicn : 2, + __reserved2 : 22; + } __packed bits; +}; + +union cepic_ctrl2 { + u32 raw; + struct { + u32 mi_gst_blk : 1, + nmi_gst_blk : 1, + int_hv : 1, + __reserved1 : 1, + clear_gst : 1, + __reserved2 : 3, + timer_stop : 1, + __reserved3 : 23; + } __packed bits; +}; + +union cepic_dat { + u64 raw; + struct { + u64 __reserved1 : 6, + dat_cop : 2, + __reserved2 : 4, + stat : 1, + __reserved3 : 7, + index : 10, + __reserved4 : 2, + __reserved5 : 8, + gst_dst : 10, + __reserved6 : 2, + gst_id : 12; + } __packed bits; +}; + +union cepic_epic_int { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 2, + stat : 1, + __reserved2 : 3, + mask : 1, + __reserved3 : 15; + } __packed bits; +}; + +union cepic_epic_int2 { + u64 raw; + struct { + u64 vect : 10, + dst_sh : 2, + __reserved1 : 1, + dlvm : 3, + __reserved2 : 4, + gst_id : 12, + __reserved3 : 12, + gst_dst : 10, + __reserved4 : 10; + } __packed bits; +}; + +union cepic_cpr { + u32 raw; + struct { + u32 __reserved1 : 8, + cpr : 3, + __reserved2 : 21; + } __packed bits; +}; + +union cepic_esr { + u32 raw; + struct { + u32 __reserved1 : 5, + rq_addr_err : 1, + rq_virt_err : 1, + rq_cop_err : 1, + ms_gstid_err : 1, + ms_virt_err : 1, + ms_err : 1, + ms_icr_err : 1, + __reserved2 : 20; + } __packed bits; +}; + +union cepic_esr2 { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 2, + stat : 1, + __reserved2 : 3, + mask : 1, + __reserved3 : 15; + } __packed bits; +}; + +union cepic_eoi { + u32 raw; + struct { + u32 __reserved1 : 16, + rcpr : 3, + __reserved2 : 13; + } __packed bits; +}; + +union cepic_cir { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 2, + stat : 1, + __reserved2 : 19; + } __packed bits; +}; + +union cepic_gstbase_hi { + u32 raw; + struct { + u32 gstbase_hi : 4, + __reserved : 28; + } __packed bits; +}; + +union cepic_gstid { + u32 raw; + struct { + u32 gstid : 12, + __reserved : 20; + } __packed bits; +}; + +union cepic_pnmirr { + u32 raw; + struct { + u32 startup_entry : 8, + __reserved1 : 1, + smi : 1, + nmi : 1, + init : 1, + startup : 1, + int_violat : 1, + __reserved2 : 2, + nm_timer : 1, + nm_special : 1, + __reserved3 : 14; + } __packed bits; +}; + +union cepic_icr { + u64 raw; + struct { + u64 vect : 10, + dst_sh : 2, + stat : 1, + dlvm : 3, + __reserved1 : 4, + gst_id : 12, + __reserved2 : 8, + dst : 10, + __reserved3 : 14; + } __packed bits; +}; + +union cepic_timer_lvtt { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 2, + stat : 1, + __reserved2 : 3, + mask : 1, + mode : 1, + __reserved3 : 14; + } __packed bits; +}; + +union cepic_timer_div { + u32 raw; + struct { + u32 divider : 4, + __reserved1 : 28; + } __packed bits; +}; + +union cepic_nm_timer_lvtt { + u32 raw; + struct { + u32 __reserved1 : 17, + mode : 1, + __reserved2 : 14; + } __packed bits; +}; + +union cepic_nm_timer_div { + u32 raw; + struct { + u32 divider : 4, + __reserved1 : 28; + } __packed bits; +}; + +union cepic_svr { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 22; + } __packed bits; +}; + +union cepic_pnmirr_mask { + u32 raw; + struct { + u32 __reserved1 : 9, + smi : 1, + nmi : 1, + __reserved2 : 2, + int_violat : 1, + __reserved3 : 2, + nm_timer : 1, + nm_special : 1, + __reserved4 : 14; + } __packed bits; +}; + +union cepic_vect_inta { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 6, + cpr : 3, + __reserved2 : 13; + } __packed bits; +}; + +union prepic_ctrl { + u32 raw; + struct { + u32 __reserved1 : 8, + bsp : 1, + __reserved2 : 2, + epic_en : 1, + __reserved3 : 20; + } __packed bits; +}; + +union prepic_id { + u32 raw; + struct { + u32 __reserved1 : 8, + prepicn : 2, + __reserved2 : 22; + } __packed bits; +}; + +union prepic_ctrl2 { + u32 raw; + struct { + u32 __reserved1 : 9, + bgi_mode : 1, + __reserved2 : 2, + virt_en : 1, + __reserved3 : 19; + } __packed bits; +}; + +union prepic_err_int { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 2, + stat : 1, + dlvm : 3, + mask : 1, + __reserved2 : 3, + dst : 10, + __reserved3 : 2; + } __packed bits; +}; + +union prepic_linpn { + u32 raw; + struct { + u32 vect : 10, + __reserved1 : 2, + stat : 1, + dlvm : 3, + mask : 1, + __reserved2 : 3, + dst : 10, + __reserved3 : 2; + } __packed bits; +}; + +typedef struct kvm_epic_page { +/*000*/ u32 ctrl; + u32 id; + u32 cpr; + u32 esr; + u32 esr2; + u32 cir; + atomic_t esr_new; + u32 svr; + u64 icr; + u32 timer_lvtt; + u32 timer_init; + u32 timer_cur; + u32 timer_div; + u32 nm_timer_lvtt; + u32 nm_timer_init; + u32 nm_timer_cur; + u32 nm_timer_div; + u32 pnmirr_mask; +/*04c*/ u32 __reserved1[45]; +/*100*/ atomic64_t pmirr[16]; +/*180*/ u32 __reserved2[24]; +/*1e0*/ atomic_t pnmirr; + u32 __reserved3[263]; +/*600*/ u8 pnmirr_byte[16]; +/*610*/ u32 __reserved4[124]; +/*800*/ u8 pmirr_byte[1024]; +} epic_page_t; + +#elif defined(__BIG_ENDIAN) + +union cepic_ctrl { + u32 raw; + struct { + u32 __reserved3 : 21, + soft_en : 1, + __reserved2 : 1, + bsp_core : 1, + __reserved1 : 8; + } __packed bits; +}; + +/* Ignore 4 bits of CEPIC (core) ID so that physical core ID is <= 64 */ +union cepic_id { + u32 raw; + struct { + u32 __reserved2 : 22, + prepicn : 2, + cepicn_reserved : 4, + cepicn : 4; + } __packed bits; +}; + +union cepic_ctrl2 { + u32 raw; + struct { + u32 __reserved3 : 23, + timer_stop : 1, + __reserved2 : 3, + clear_gst : 1, + __reserved1 : 1, + int_hv : 1, + nmi_gst_blk : 1, + mi_gst_blk : 1; + } __packed bits; +}; + +union cepic_dat { + u64 raw; + struct { + u64 gst_id : 12, + __reserved6 : 2, + gst_dst : 10, + __reserved5 : 8, + __reserved4 : 2, + index : 10, + __reserved3 : 7, + stat : 1, + __reserved2 : 4, + dat_cop : 2, + __reserved1 : 6; + } __packed bits; +}; + +union cepic_epic_int { + u32 raw; + struct { + u32 __reserved3 : 15, + mask : 1, + __reserved2 : 3, + stat : 1, + __reserved1 : 2, + vect : 10; + } __packed bits; +}; + +union cepic_epic_int2 { + u64 raw; + struct { + u64 __reserved4 : 10, + gst_dst : 10, + __reserved3 : 12, + gst_id : 12, + __reserved2 : 4, + dlvm : 3, + __reserved1 : 1, + dst_sh : 2, + vect : 10; + } __packed bits; +}; + +union cepic_cpr { + u32 raw; + struct { + u32 __reserved2 : 21, + cpr : 3, + __reserved1 : 8; + } __packed bits; +}; + +union cepic_esr { + u32 raw; + struct { + u32 __reserved2 : 20, + ms_icr_err : 1, + ms_err : 1, + ms_virt_err : 1, + ms_gstid_err : 1, + rq_cop_err : 1, + rq_virt_err : 1, + rq_addr_err : 1, + __reserved1 : 5; + } __packed bits; +}; + +union cepic_esr2 { + u32 raw; + struct { + u32 __reserved3 : 15, + mask : 1, + __reserved2 : 3, + stat : 1, + __reserved1 : 2, + vect : 10; + } __packed bits; +}; + +union cepic_eoi { + u32 raw; + struct { + u32 __reserved2 : 13, + rcpr : 3, + __reserved1 : 16; + } __packed bits; +}; + +union cepic_cir { + u32 raw; + struct { + u32 __reserved2 : 19, + stat : 1, + __reserved1 : 2, + vect : 10; + } __packed bits; +}; + +union cepic_gstbase_hi { + u32 raw; + struct { + u32 __reserved : 28, + gstbase_hi : 4; + } __packed bits; +}; + +union cepic_gstid { + u32 raw; + struct { + u32 __reserved : 20, + gstid : 12; + } __packed bits; +}; + +union cepic_pnmirr { + u32 raw; + struct { + u32 __reserved3 : 14, + nm_special : 1, + nm_timer : 1, + __reserved2 : 2, + int_violat : 1, + startup : 1, + init : 1, + nmi : 1, + smi : 1, + __reserved1 : 1, + startup_entry : 8; + } __packed bits; +}; + +union cepic_icr { + u64 raw; + struct { + u64 __reserved3 : 14, + dst : 10, + __reserved2 : 8, + gst_id : 12, + __reserved1 : 4, + dlvm : 3, + stat : 1, + dst_sh : 2, + vect : 10; + } __packed bits; +}; + +union cepic_timer_lvtt { + u32 raw; + struct { + u32 __reserved3 : 14, + mode : 1, + mask : 1, + __reserved2 : 3, + stat : 1, + __reserved1 : 2, + vect : 10; + } __packed bits; +}; + +union cepic_timer_div { + u32 raw; + struct { + u32 __reserved1 : 28, + divider : 4; + } __packed bits; +}; + +union cepic_nm_timer_lvtt { + u32 raw; + struct { + u32 __reserved2 : 14, + mode : 1, + __reserved1 : 17; + } __packed bits; +}; + +union cepic_nm_timer_div { + u32 raw; + struct { + u32 __reserved1 : 28, + divider : 4; + } __packed bits; +}; + +union cepic_svr { + u32 raw; + struct { + u32 __reserved1 : 22, + vect : 10; + } __packed bits; +}; + +union cepic_pnmirr_mask { + u32 raw; + struct { + u32 __reserved4 : 14, + nm_special : 1, + nm_timer : 1, + __reserved3 : 2, + int_violat : 1, + __reserved2 : 2, + nmi : 1, + smi : 1, + __reserved1 : 9; + } __packed bits; +}; + +union cepic_vect_inta { + u32 raw; + struct { + u32 __reserved2 : 13, + cpr : 3, + __reserved1 : 6, + vect : 10; + } __packed bits; +}; + +union prepic_ctrl { + u32 raw; + struct { + u32 __reserved3 : 20, + epic_en : 1, + __reserved2 : 2, + bsp : 1, + __reserved1 : 8; + } __packed bits; +}; + +union prepic_id { + u32 raw; + struct { + u32 __reserved2 : 22, + prepicn : 2, + __reserved1 : 8; + } __packed bits; +}; + +union prepic_ctrl2 { + u32 raw; + struct { + u32 __reserved3 : 19, + virt_en : 1, + __reserved2 : 2, + bgi_mode : 1, + __reserved1 : 9; + } __packed bits; +}; + +union prepic_err_int { + u32 raw; + struct { + u32 __reserved3 : 2, + dst : 10, + __reserved2 : 3, + mask : 1, + dlvm : 3, + stat : 1, + __reserved1 : 2, + vect : 10; + } __packed bits; +}; + +union prepic_linpn { + u32 raw; + struct { + u32 __reserved3 : 2, + dst : 10, + __reserved2 : 3, + mask : 1, + dlvm : 3, + stat : 1, + __reserved1 : 2, + vect : 10; + } __packed bits; +}; + +#else /*__BIG_ENDIAN*/ +# error FIXME +#endif +#endif /* !(__ASSEMBLY__) */ +#endif /* __ASM_L_EPIC_REGS_H */ diff --git a/arch/e2k/include/asm-l/epicdef.h b/arch/e2k/include/asm-l/epicdef.h new file mode 100644 index 0000000..4ea0ef8 --- /dev/null +++ b/arch/e2k/include/asm-l/epicdef.h @@ -0,0 +1,92 @@ +#ifndef _ASM_L_EPICDEF_H +#define _ASM_L_EPICDEF_H + +/* + * Constants for EPICs (CEPIC, IOEPIC) + */ + +#define MAX_EPICS_ORDER 10 + +#define EPIC_REGS_SIZE 0x2000 +#define IO_EPIC_REGS_SIZE 0x100000 + + +/* CEPIC registers */ +#define CEPIC_CTRL 0x0 +#define CEPIC_CTRL_BSP_CORE 0x100 +#define CEPIC_ID 0x10 +#define CEPIC_ID_BIT_MASK 0x3ff +#define CEPIC_ID_SHORT_VALID_BITS 4 +#define CEPIC_ID_SHORT_VALID_MASK 0xf +#define CEPIC_CPR 0x70 +#define CEPIC_CPR_CORE_PRIORITY_SHIFT 8 +#define CEPIC_ESR 0x80 +#define CEPIC_ESR_BIT_MASK 0x7e0 +#define CEPIC_ESR2 0x90 +#define CEPIC_EOI 0xa0 +#define CEPIC_CIR 0xb0 + +#define CEPIC_PMIRR 0x100 +#define CEPIC_PMIRR_NR_BITS 0x400 +#define CEPIC_PMIRR_NR_REGS 0x20 +#define CEPIC_PMIRR_NR_DREGS 0x10 +#define CEPIC_PNMIRR 0x1e0 +#define CEPIC_PNMIRR_BIT_MASK 0x33e00 +#define CEPIC_PNMIRR_NMI 0x400 +#define CEPIC_PNMIRR_STARTUP 0x1000 +#define CEPIC_PNMIRR_STARTUP_ENTRY 0xff +#define CEPIC_ESR_NEW 0x1f0 + +#define CEPIC_ICR 0x200 +#define CEPIC_ICR_DST_FULL 0 +#define CEPIC_ICR_DST_SELF 1 +#define CEPIC_ICR_DST_ALLBUT 2 +#define CEPIC_ICR_DST_ALLINC 3 +#define CEPIC_ICR_DLVM_FIXED_EXT 0 +#define CEPIC_ICR_DLVM_FIXED_IPI 1 +#define CEPIC_ICR_DLVM_SMI 2 +#define CEPIC_ICR_DLVM_NM_SPECIAL 3 +#define CEPIC_ICR_DLVM_NMI 4 +#define CEPIC_ICR_DLVM_INIT 5 +#define CEPIC_ICR_DLVM_STARTUP 6 +#define CEPIC_ICR2 0x204 +#define CEPIC_TIMER_LVTT 0x220 +#define CEPIC_TIMER_INIT 0x230 +#define CEPIC_TIMER_CUR 0x240 +#define CEPIC_TIMER_DIV 0x250 +#define CEPIC_TIMER_DIV_1 0xb +#define CEPIC_NM_TIMER_LVTT 0x260 +#define CEPIC_NM_TIMER_INIT 0x270 +#define CEPIC_NM_TIMER_CUR 0x280 +#define CEPIC_NM_TIMER_DIV 0x290 +#define CEPIC_SVR 0x2a0 +#define CEPIC_PNMIRR_MASK 0x2d0 +#define CEPIC_VECT_INTA 0x2f0 +#define CEPIC_VECT_INTA_VMASK 0x3ff +#define CEPIC_VECT_INTA_PRI_SHIFT 16 + +/* CEPIC (HP) registers */ +#define CEPIC_GUEST 0x1000 + +#define CEPIC_CTRL2 0x1820 +#define CEPIC_DAT 0x1830 +#define CEPIC_DAT_READ 0 +#define CEPIC_DAT_INVALIDATE 2 +#define CEPIC_DAT_WRITE 3 +#define CEPIC_DAT2 0x1834 +#define CEPIC_EPIC_INT 0x1850 +#define CEPIC_EPIC_INT2 0x1860 +#define CEPIC_EPIC_INT3 0x1864 +#define CEPIC_GSTBASE_LO 0x18c0 +#define CEPIC_GSTBASE_HI 0x18c4 +#define CEPIC_GSTID 0x18d0 + +#define CEPIC_PMIRR_OR 0x1900 +#define CEPIC_PNMIRR_OR 0x19e0 +#define CEPIC_ESR_NEW_OR 0x19f0 + +#define CEPIC_PNMIRR_INT_VIOLAT_BIT 13 + +#define BAD_EPICID 0xffff + +#endif /* _ASM_L_EPICDEF_H */ diff --git a/arch/e2k/include/asm-l/gpio.h b/arch/e2k/include/asm-l/gpio.h new file mode 100644 index 0000000..be55091 --- /dev/null +++ b/arch/e2k/include/asm-l/gpio.h @@ -0,0 +1,50 @@ +/* + * arch/l/include/gpio.h + * + * Copyright (C) 2012 Evgeny Kravtsunov + * + * AC97-GPIO Controller (part of Elbrus IOHUB). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_GPIO_H_ +#define __ASM_ARCH_GPIO_H_ + +#include + +/* IOHUB GPIO pins */ +#define IOUHB_GPIO_0 0 +#define IOHUB_GPIO_1 1 +#define IOHUB_GPIO_2 2 +#define IOHUB_GPIO_3 3 +#define IOHUB_GPIO_4 4 +#define IOHUB_GPIO_5 5 +#define IOHUB_GPIO_6 6 +#define IOHUB_GPIO_7 7 +#define IOHUB_GPIO_8 8 +#define IOHUB_GPIO_9 9 +#define IOHUB_GPIO_10 10 +#define IOHUB_GPIO_11 11 +#define IOHUB_GPIO_12 12 +#define IOHUB_GPIO_13 13 +#define IOHUB_GPIO_14 14 +#define IOHUB_GPIO_15 15 + +/* Amount of iohub's own gpios: */ +#define ARCH_NR_IOHUB_GPIOS 16 +#define ARCH_NR_IOHUB2_GPIOS 32 +#define ARCH_MAX_NR_OWN_GPIOS ARCH_NR_IOHUB2_GPIOS + +#if IS_ENABLED(CONFIG_INPUT_LTC2954) +#define LTC2954_IRQ_GPIO_PIN IOHUB_GPIO_3 +#define LTC2954_KILL_GPIO_PIN IOHUB_GPIO_4 +#endif /* CONFIG_INPUT_LTC2954 */ + +#ifdef CONFIG_GPIOLIB +#include +#endif /* CONFIG_GPIOLIB */ + +#endif diff --git a/arch/e2k/include/asm-l/hardirq.h b/arch/e2k/include/asm-l/hardirq.h new file mode 100644 index 0000000..816611f --- /dev/null +++ b/arch/e2k/include/asm-l/hardirq.h @@ -0,0 +1,55 @@ +#ifndef __ASM_L_HARDIRQ_H +#define __ASM_L_HARDIRQ_H + +#include +#include + +typedef struct { + unsigned int __softirq_pending; + unsigned int __nmi_count; /* arch dependent */ +#ifdef CONFIG_L_LOCAL_APIC + unsigned int apic_timer_irqs; /* arch dependent */ + unsigned int irq_spurious_count; + unsigned int icr_read_retry_count; + unsigned int apic_irq_work_irqs; +#endif +#ifdef CONFIG_SMP + unsigned int irq_resched_count; + unsigned int irq_call_count; +# ifdef CONFIG_E2K + /* + * irq_tlb_count is double-counted in irq_call_count, so it must be + * subtracted from irq_call_count when displaying irq_call_count + */ + unsigned int irq_tlb_count; +# endif +#endif +#if (IS_ENABLED(CONFIG_RDMA) || IS_ENABLED(CONFIG_RDMA_SIC) || \ + IS_ENABLED(CONFIG_RDMA_NET)) + unsigned int irq_rdma_count; +#endif +#ifdef CONFIG_E2K +#if IS_ENABLED(CONFIG_ELDSP) + unsigned int irq_eldsp_count; +#endif +#endif +} ____cacheline_aligned irq_cpustat_t; + +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); + +extern void ack_bad_irq(unsigned int irq); + +#define __ARCH_IRQ_STAT +#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) + +#define inc_irq_stat(member) __IRQ_STAT(raw_smp_processor_id(), member) ++ + +extern u64 arch_irq_stat_cpu(unsigned int cpu); +#define arch_irq_stat_cpu arch_irq_stat_cpu + +extern u64 arch_irq_stat(void); +#define arch_irq_stat arch_irq_stat + +#include + +#endif /* __ASM_L_HARDIRQ_H */ diff --git a/arch/e2k/include/asm-l/hw_irq.h b/arch/e2k/include/asm-l/hw_irq.h new file mode 100644 index 0000000..671e326 --- /dev/null +++ b/arch/e2k/include/asm-l/hw_irq.h @@ -0,0 +1,141 @@ +#ifndef _ASM_L_HW_IRQ_H +#define _ASM_L_HW_IRQ_H + +/* required by linux/irq.h */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_L_LOCAL_APIC +#ifdef CONFIG_PIC +# define platform_legacy_irq(irq) ((irq) < 16) +#else +# define platform_legacy_irq(irq) 0 +#endif +#endif + +/* + * Various low-level irq details needed by irq.c, process.c, + * time.c, io_apic.c and smp.c + * + * Interrupt entry/exit code at both C and assembly level + */ + +extern atomic_t irq_err_count; + +/* IOAPIC */ +#ifdef CONFIG_PIC +# define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) +extern unsigned long io_apic_irqs; +#else +# define IO_APIC_IRQ(x) 1 +#endif + +extern void disable_IO_APIC(void); + +struct io_apic_irq_attr { + int ioapic; + int ioapic_pin; + int trigger; + int polarity; +}; + +static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, + int ioapic, int ioapic_pin, + int trigger, int polarity) +{ + irq_attr->ioapic = ioapic; + irq_attr->ioapic_pin = ioapic_pin; + irq_attr->trigger = trigger; + irq_attr->polarity = polarity; +} + +/* + * This is performance-critical, we want to do it O(1) + * + * Most irqs are mapped 1:1 with pins. + */ +struct irq_cfg { + struct irq_pin_list *irq_2_pin; + cpumask_var_t domain; + cpumask_var_t old_domain; + u8 vector; + u8 move_in_progress : 1; +#ifdef CONFIG_INTR_REMAP + struct irq_2_iommu irq_2_iommu; +#endif +}; + +extern int IO_APIC_get_PCI_irq_vector(int domain, int bus, int devfn, int pin, + struct io_apic_irq_attr *irq_attr); +extern int IO_APIC_get_fix_irq_vector(int domain, int bus, int slot, int func, + int irq); + +extern void (*interrupt[NR_VECTORS])(struct pt_regs *regs); +#ifdef CONFIG_TRACING +#define trace_interrupt interrupt +#endif + +#define VECTOR_UNDEFINED -1 +#define VECTOR_RETRIGGERED -2 + +typedef int vector_irq_t[NR_VECTORS]; +DECLARE_PER_CPU(vector_irq_t, vector_irq); + +extern void lock_vector_lock(void); +extern void unlock_vector_lock(void); +extern void __setup_vector_irq(int cpu); + +#define IO_APIC_VECTOR(irq) ({ \ + struct irq_cfg *__cfg = irq_cfg(irq); \ + (__cfg) ? __cfg->vector : 0; \ +}) + +extern void setup_ioapic_dest(void); + +/* Statistics */ +extern atomic_t irq_err_count; +extern atomic_t irq_mis_count; + +/* EISA */ +extern void eisa_set_level_irq(unsigned int irq); + +/* SMP */ +extern __visible void smp_apic_timer_interrupt(struct pt_regs *); +extern __visible void smp_spurious_interrupt(struct pt_regs *); +extern __visible void smp_error_interrupt(struct pt_regs *); +extern __visible void smp_irq_move_cleanup_interrupt(struct pt_regs *); +extern __visible void smp_irq_work_interrupt(struct pt_regs *); +#ifdef CONFIG_SMP +extern __visible void smp_reschedule_interrupt(struct pt_regs *regs); +extern __visible void smp_call_function_interrupt(struct pt_regs *regs); +extern __visible void smp_call_function_single_interrupt(struct pt_regs *regs); +#endif + +#ifdef CONFIG_TRACING +/* Interrupt handlers registered during init_IRQ */ +extern void smp_trace_apic_timer_interrupt(struct pt_regs *regs); +extern void smp_trace_error_interrupt(struct pt_regs *regs); +extern void smp_trace_irq_work_interrupt(struct pt_regs *regs); +extern void smp_trace_spurious_interrupt(struct pt_regs *regs); +extern void smp_trace_reschedule_interrupt(struct pt_regs *regs); +extern void smp_trace_call_function_interrupt(struct pt_regs *regs); +extern void smp_trace_call_function_single_interrupt(struct pt_regs *regs); +#define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt +#endif /* CONFIG_TRACING */ + +extern void do_nmi(struct pt_regs * regs); +extern void l_init_system_handlers_table(void); +extern void epic_init_system_handlers_table(void); +extern void setup_APIC_vector_handler(int vector, + void (*handler)(struct pt_regs *), bool system, char *name); +extern void do_IRQ(struct pt_regs * regs, unsigned int vector); + +#endif /* _ASM_L_HW_IRQ_H */ diff --git a/arch/e2k/include/asm-l/i2c-spi.h b/arch/e2k/include/asm-l/i2c-spi.h new file mode 100644 index 0000000..e3e6cba --- /dev/null +++ b/arch/e2k/include/asm-l/i2c-spi.h @@ -0,0 +1,42 @@ +#ifndef __L_ASM_SPI_H__ +#define __L_ASM_SPI_H__ + +#include +#include + +/* PCI registers definitions for reset */ + +#define PCI_RESET_CONTROL 0x60 +#define L_SOFTWARE_RESET_TO_HARD 0x00000004 /* software reset */ + /* to hardware reset */ +#define L_WATCHDOG_RESET_TO_HARD 0x00000008 /* watchdog reset */ + /* to hardware reset */ +#define L_SOFTWARE_RESET_TO_SOFT 0x00000010 /* software reset */ + /* to soft reset */ +#define L_WATCHDOG_RESET_TO_SOFT 0x00000020 /* watchdog reset */ + /* to soft reset */ +#define L_RED_RESET_OUT 0x80000080 /* Led control */ +#define PCI_SOFT_RESET_CONTROL 0x64 +#define L_SOFTWARE_RESET 0x00000001 +#define L_SOFTWARE_RESET_DONE 0x00000002 +#define L_LAST_RESET_INFO 0x000000fc /* last reset type */ +#define PCI_SOFT_RESET_DURATION 0x68 +#define L_IOHUB_SOFT_RESET_DURATION 0x0000ffff +#define L_IOHUB2_SOFT_RESET_DURATION 0x00ffffff + +/* Common SPI & I2C definitions */ + +#define I2C_SPI_CNTRL_AREA_SIZE 0x40 +#define I2C_SPI_DATA_AREA_SIZE 0x40 + +#define I2C_SPI_DEFAULT_IRQ 23 +#define I2C_MAX_ADAPTERS_PER_CONTROLLER 5 + +#define I2C_MAX_BUSSES I2C_MAX_ADAPTERS_PER_CONTROLLER + +#ifdef CONFIG_E2K +extern int iohub_i2c_line_id; +#else +#define iohub_i2c_line_id 0 +#endif +#endif /* __L_ASM_SPI_H__ */ diff --git a/arch/e2k/include/asm-l/idle.h b/arch/e2k/include/asm-l/idle.h new file mode 100644 index 0000000..0cf10bb --- /dev/null +++ b/arch/e2k/include/asm-l/idle.h @@ -0,0 +1,7 @@ +#ifndef _ASM_L_IDLE_H +#define _ASM_L_IDLE_H + +static inline void enter_idle(void) { } +static inline void exit_idle(void) { } + +#endif /* _ASM_L_IDLE_H */ diff --git a/arch/e2k/include/asm-l/io_apic.h b/arch/e2k/include/asm-l/io_apic.h new file mode 100644 index 0000000..abc67d1 --- /dev/null +++ b/arch/e2k/include/asm-l/io_apic.h @@ -0,0 +1,307 @@ +#ifndef _ASM_L_IO_APIC_H +#define _ASM_L_IO_APIC_H + +#include +#include +#include +#include +#if 0 +#include +#endif +/* + * Intel IO-APIC support for SMP and UP systems. + * + * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar + */ + +/* I/O Unit Redirection Table */ +#define IO_APIC_REDIR_VECTOR_MASK 0x000FF +#define IO_APIC_REDIR_DEST_LOGICAL 0x00800 +#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000 +#define IO_APIC_REDIR_SEND_PENDING (1 << 12) +#define IO_APIC_REDIR_REMOTE_IRR (1 << 14) +#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15) +#define IO_APIC_REDIR_MASKED (1 << 16) + +#if 0 +/* + * The structure of the IO-APIC: + */ +union IO_APIC_reg_00 { + u32 raw; + struct { + u32 __reserved_2 : 14, + LTS : 1, + delivery_type : 1, + __reserved_1 : 8, + ID : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_01 { + u32 raw; + struct { + u32 version : 8, + __reserved_2 : 7, + PRQ : 1, + entries : 8, + __reserved_1 : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_02 { + u32 raw; + struct { + u32 __reserved_2 : 24, + arbitration : 4, + __reserved_1 : 4; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_03 { + u32 raw; + struct { + u32 boot_DT : 1, + __reserved_1 : 31; + } __attribute__ ((packed)) bits; +}; + +struct IO_APIC_route_entry { + __u32 vector : 8, + delivery_mode : 3, /* 000: FIXED + * 001: lowest prio + * 111: ExtINT + */ + dest_mode : 1, /* 0: physical, 1: logical */ + delivery_status : 1, + polarity : 1, + irr : 1, + trigger : 1, /* 0: edge, 1: level */ + mask : 1, /* 0: enabled, 1: disabled */ + __reserved_2 : 15; + + __u32 __reserved_3 : 24, + dest : 8; +} __attribute__ ((packed)); + +struct IR_IO_APIC_route_entry { + __u64 vector : 8, + zero : 3, + index2 : 1, + delivery_status : 1, + polarity : 1, + irr : 1, + trigger : 1, + mask : 1, + reserved : 31, + format : 1, + index : 15; +} __attribute__ ((packed)); +#endif + +#define IOAPIC_AUTO -1 +#define IOAPIC_EDGE 0 +#define IOAPIC_LEVEL 1 + +#ifdef CONFIG_L_IO_APIC + +extern DECLARE_BITMAP(used_vectors, NR_VECTORS); + +/* + * # of IO-APICs and # of IRQ routing registers + */ +extern int nr_ioapics; + +extern int mpc_ioapic_id(int ioapic); +extern unsigned long mpc_ioapic_addr(int ioapic); +extern struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic); + +#define MP_MAX_IOAPIC_PIN 127 + +/* # of MP IRQ source entries */ +extern int mp_irq_entries; + +/* MP IRQ source entries */ +extern struct mpc_intsrc mp_irqs[]; + +/* non-0 if default (table-less) MP configuration */ +extern int mpc_default_type; + +/* Older SiS APIC requires we rewrite the index register */ +extern int sis_apic_bug; + +/* 1 if "noapic" boot option passed */ +extern int skip_ioapic_setup; + +/* 1 if "noapic" boot option passed */ +extern int noioapicquirk; + +/* -1 if "noapic" boot option passed */ +extern int noioapicreroute; + +/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ +extern int timer_through_8259; + +/* + * If we use the IO-APIC for IRQ routing, disable automatic + * assignment of PCI IRQ's. + */ +#ifdef CONFIG_PIC +#define io_apic_assign_pci_irqs \ + (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) +#else +#define io_apic_assign_pci_irqs \ + (mp_irq_entries && !skip_ioapic_setup) +#endif + +extern void setup_IO_APIC(void); +extern void enable_IO_APIC(void); + +struct io_apic_irq_attr; +struct irq_cfg; +struct device; +extern int io_apic_set_pci_routing(struct device *dev, int irq, + struct io_apic_irq_attr *irq_attr); +void setup_IO_APIC_irq_extra(u32 gsi); +extern void ioapic_insert_resources(void); + +extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *, + unsigned int, int, + struct io_apic_irq_attr *); +extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *, + unsigned int, int, + struct io_apic_irq_attr *); +extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg); + +struct pci_dev; +struct msi_msg; +extern void native_compose_msi_msg(struct pci_dev *pdev, + unsigned int irq, unsigned int dest, + struct msi_msg *msg, u8 hpet_id); +extern void native_eoi_ioapic_pin(int apic, int pin, int vector); +int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); + +extern int save_ioapic_entries(void); +extern void mask_ioapic_entries(void); +extern int restore_ioapic_entries(void); + +extern void probe_nr_irqs_gsi(void); +extern int get_nr_irqs_gsi(void); +extern int set_ioapic_affinity_irq(unsigned int, const struct cpumask *); + +extern void setup_ioapic_ids_from_mpc(void); +extern void setup_ioapic_ids_from_mpc_nocheck(void); + +struct mp_ioapic_gsi{ + u32 gsi_base; + u32 gsi_end; +}; +extern struct mp_ioapic_gsi mp_gsi_routing[]; +extern u32 gsi_top; +int mp_find_ioapic(u32 gsi); +int mp_find_ioapic_pin(int ioapic, u32 gsi); +#if defined CONFIG_E2K || defined CONFIG_E90S +void __init mp_register_ioapic(int id, unsigned long address, u32 gsi_base); +#else +void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); +#endif +extern void __init pre_init_apic_IRQ0(void); + +extern void mp_save_irq(struct mpc_intsrc *m); + +extern void disable_ioapic_support(void); + +extern void __init native_io_apic_init_mappings(void); +extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg); +extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val); +extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val); +extern void native_disable_io_apic(void); +extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries); +extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries); +struct irq_data; +extern int native_ioapic_set_affinity(struct irq_data *, + const struct cpumask *, + bool); + +static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) +{ +#if 0 + return x86_io_apic_ops.read(apic, reg); +#else + return native_io_apic_read(apic, reg); +#endif +} + +static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) +{ +#if 0 + x86_io_apic_ops.write(apic, reg, value); +#else + native_io_apic_write(apic, reg, value); +#endif +} +static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) +{ +#if 0 + x86_io_apic_ops.modify(apic, reg, value); +#else + native_io_apic_modify(apic, reg, value); +#endif +} + +extern void io_apic_eoi(unsigned int apic, unsigned int vector); + +extern unsigned int __create_irqs(unsigned int from, unsigned int count, + int node); +extern void destroy_irqs(unsigned int irq, unsigned int count); +extern int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, + struct msi_msg *msg, u8 hpet_id); +extern int ioapic_retrigger_irq(struct irq_data *data); +extern int __ioapic_set_affinity(struct irq_data *data, + const struct cpumask *mask, + unsigned int *dest_id); +extern int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, + int pin); +extern unsigned int ioapic_cfg_get_pin(struct irq_cfg *cfg); +extern unsigned int ioapic_cfg_get_idx(struct irq_cfg *cfg); + +#else /* !CONFIG_L_IO_APIC */ + +#define io_apic_assign_pci_irqs 0 +#define setup_ioapic_ids_from_mpc x86_init_noop +static const int timer_through_8259 = 0; +static inline void ioapic_insert_resources(void) { } +#define gsi_top (NR_IRQS_LEGACY) +static inline int mp_find_ioapic(u32 gsi) { return 0; } + +struct io_apic_irq_attr; +static inline int io_apic_set_pci_routing(struct device *dev, int irq, + struct io_apic_irq_attr *irq_attr) { return 0; } + +static inline int save_ioapic_entries(void) +{ + return -ENOMEM; +} + +static inline void mask_ioapic_entries(void) { } +static inline int restore_ioapic_entries(void) +{ + return -ENOMEM; +} + +static inline void mp_save_irq(struct mpc_intsrc *m) { }; +static inline void disable_ioapic_support(void) { } +#define native_io_apic_init_mappings NULL +#define native_io_apic_read NULL +#define native_io_apic_write NULL +#define native_io_apic_modify NULL +#define native_disable_io_apic NULL +#define native_io_apic_print_entries NULL +#define native_ioapic_set_affinity NULL +#define native_setup_ioapic_entry NULL +#define native_compose_msi_msg NULL +#define native_eoi_ioapic_pin NULL +#endif +extern int __init calibrate_APIC_clock(void); + +#endif /* _ASM_L_IO_APIC_H */ diff --git a/arch/e2k/include/asm-l/io_epic.h b/arch/e2k/include/asm-l/io_epic.h new file mode 100644 index 0000000..947cdc4 --- /dev/null +++ b/arch/e2k/include/asm-l/io_epic.h @@ -0,0 +1,77 @@ +#ifndef _ASM_L_IO_EPIC_H +#define _ASM_L_IO_EPIC_H + +#include +#include +#include +#include + +#define IOEPIC_ID 0x0 +#define IOEPIC_VERSION 0x4 +#define IOEPIC_INT_RID(pin) (0x800 + 0x4 * pin) +#define IOEPIC_TABLE_INT_CTRL(pin) (0x20 + 0x1000 * pin) +#define IOEPIC_TABLE_MSG_DATA(pin) (0x24 + 0x1000 * pin) +#define IOEPIC_TABLE_ADDR_HIGH(pin) (0x28 + 0x1000 * pin) +#define IOEPIC_TABLE_ADDR_LOW(pin) (0x2c + 0x1000 * pin) + +#define MAX_IO_EPICS (MAX_NUMIOLINKS + MAX_NUMNODES) + +#define IOEPIC_AUTO -1 +#define IOEPIC_EDGE 0 +#define IOEPIC_LEVEL 1 + +#define IOEPIC_VERSION_1 1 +#define IOEPIC_VERSION_2 2 /* Fast level EOI (without reading int_ctrl) */ + +extern int nr_ioepics; +extern void setup_io_epic(void); +extern void __init mp_register_ioepic(int ver, int id, int node, + unsigned long address, u32 gsi_base); +extern int ioepic_pin_to_irq(unsigned int pin, struct pci_dev *dev); + +struct mp_ioepic_gsi { + unsigned int gsi_base; + unsigned int gsi_end; +}; + +/* + * cpumask fields 'domain' and 'old_domain' from APIC irq_cfg are replaced with + * int dest here. Similar to APIC in physical addressing mode, there is + * no need for a cpumask, if only one CPU bit is set in it at all times + */ +struct epic_irq_cfg { + unsigned short pin; + unsigned short epic; + unsigned short old_dest; + unsigned short dest; + unsigned short vector; + unsigned char move_in_progress : 1; +#ifdef CONFIG_INTR_REMAP + struct irq_2_iommu irq_2_iommu; +#endif +}; + +#define IO_EPIC_VECTOR(irq) ({ \ + struct epic_irq_cfg *__cfg = irq_get_chip_data(irq); \ + (__cfg) ? __cfg->vector : 0; \ +}) + +struct io_epic_irq_attr { + int ioepic; + int ioepic_pin; + int trigger; + int rid; +}; + +struct irq_chip; +extern struct irq_chip ioepic_chip; +extern unsigned long used_vectors[]; + +extern unsigned long io_epic_base_node(int node); +/* FIXME should be removed after proper passthrough implementation */ +extern unsigned int io_epic_read(unsigned int epic, unsigned int reg); +extern void io_epic_write(unsigned int epic, unsigned int reg, + unsigned int value); +extern int pirq_enable_irq(struct pci_dev *dev); + +#endif /* _ASM_L_IO_EPIC_H */ diff --git a/arch/e2k/include/asm-l/io_epic_regs.h b/arch/e2k/include/asm-l/io_epic_regs.h new file mode 100644 index 0000000..7e6ef45 --- /dev/null +++ b/arch/e2k/include/asm-l/io_epic_regs.h @@ -0,0 +1,147 @@ +#ifndef __ASM_L_IO_EPIC_REGS_H +#define __ASM_L_IO_EPIC_REGS_H + +#include + +#ifdef __LITTLE_ENDIAN +/* The structure of the IO-EPIC */ +union IO_EPIC_ID { + u32 raw; + struct { + u32 id : 16, + nodeid : 16; + } __packed bits; +}; + +union IO_EPIC_VERSION { + u32 raw; + struct { + u32 version : 8, + __reserved2 : 8, + entries : 8, + __reserved1 : 8; + } __packed bits; +}; + +union IO_EPIC_INT_CTRL { + u32 raw; + struct { + u32 __reserved3 : 12, + delivery_status : 1, + software_int : 1, + __reserved2 : 1, + trigger : 1, /* 0: edge, 1: level */ + mask : 1, /* 0: enabled, 1: disabled */ + __reserved1 : 15; + } __packed bits; +}; + +union IO_EPIC_MSG_DATA { + u32 raw; + struct { + u32 vector : 10, + __reserved2 : 3, + dlvm : 3, + __reserved1 : 16; + } __packed bits; +}; + +union IO_EPIC_MSG_ADDR_LOW { + u32 raw; + struct { + u32 __reserved3 : 2, + msg_type : 3, + __reserved2 : 1, + dst : 10, + __reserved1 : 4, + MSI : 12; + } __packed bits; +}; + +union IO_EPIC_REQ_ID { + u32 raw; + struct { + u32 fn : 3, + dev : 5, + bus : 8, + __reserved1 : 16; + } __packed bits; +}; +#elif defined(__BIG_ENDIAN) +/* The structure of the IO-EPIC */ +union IO_EPIC_ID { + u32 raw; + struct { + u32 nodeid : 16, + id : 16; + } __packed bits; +}; + +union IO_EPIC_VERSION { + u32 raw; + struct { + u32 __reserved1 : 8, + entries : 8, + __reserved2 : 8, + version : 8; + } __packed bits; +}; + +union IO_EPIC_INT_CTRL { + u32 raw; + struct { + u32 __reserved1 : 15, + mask : 1, /* 0: enabled, 1: disabled */ + trigger : 1, /* 0: edge, 1: level */ + __reserved2 : 1, + software_int : 1, + delivery_status : 1, + __reserved3 : 12; + } __packed bits; +}; + +union IO_EPIC_MSG_DATA { + u32 raw; + struct { + u32 __reserved1 : 16, + dlvm : 3, + __reserved2 : 3, + vector : 10; + } __packed bits; +}; + +union IO_EPIC_MSG_ADDR_LOW { + u32 raw; + struct { + u32 MSI : 12, + __reserved1 : 4, + dst : 10, + __reserved2 : 1, + msg_type : 3, + __reserved3 : 2; + } __packed bits; +}; + +union IO_EPIC_REQ_ID { + u32 raw; + struct { + u32 __reserved1 : 16, + bus : 8, + dev : 5, + fn : 3; + } __packed bits; +}; + +#else /*__BIG_ENDIAN*/ +# error What is the endianess? +#endif + +struct IO_EPIC_route_entry { + union IO_EPIC_INT_CTRL int_ctrl; + union IO_EPIC_MSG_DATA msg_data; + u32 addr_high; + union IO_EPIC_MSG_ADDR_LOW addr_low; + union IO_EPIC_REQ_ID rid; +} __packed; + +#endif /* __ASM_L_IO_EPIC_REGS_H */ diff --git a/arch/e2k/include/asm-l/io_pic.h b/arch/e2k/include/asm-l/io_pic.h new file mode 100644 index 0000000..491e5df --- /dev/null +++ b/arch/e2k/include/asm-l/io_pic.h @@ -0,0 +1,106 @@ +#ifndef __ASM_L_IO_PIC_H +#define __ASM_L_IO_PIC_H + +/* + * Choose between IO-PICs in arch/l. If CONFIG_EPIC=n, IO-APIC is chosen + * statically. If CONFIG_EPIC=y (only on e2k), use both IO-APIC and IO-EPIC + * calls, depending on nr_ioapics and nr_ioepics variables + */ + +#ifdef CONFIG_EPIC + +#include +#include + +struct io_apic_irq_attr; +extern int io_epic_get_PCI_irq_vector(int bus, int devfn, int pin); +extern int IO_APIC_get_PCI_irq_vector(int domain, int bus, int devfn, int pin, + struct io_apic_irq_attr *irq_attr); +static inline int IO_PIC_get_PCI_irq_vector(int domain, int bus, int slot, + int pin, struct io_apic_irq_attr *irq_attr) +{ + int pic_irq = -1; + + if (nr_ioepics) + pic_irq = io_epic_get_PCI_irq_vector(bus, slot, pin); + + if (pic_irq == -1 && nr_ioapics) + pic_irq = IO_APIC_get_PCI_irq_vector(domain, bus, slot, pin, + irq_attr); + return pic_irq; +} + +extern int io_epic_get_fix_irq_vector(int domain, int bus, int slot, int func, + int irq); +extern int IO_APIC_get_fix_irq_vector(int domain, int bus, int slot, int func, + int irq); +static inline int IO_PIC_get_fix_irq_vector(int domain, int bus, int slot, + int func, int irq) +{ + int pic_irq = -1; + + if (nr_ioepics) + pic_irq = io_epic_get_fix_irq_vector(domain, bus, slot, func, + irq); + + if (pic_irq == -1 && nr_ioapics) + pic_irq = IO_APIC_get_fix_irq_vector(domain, bus, slot, func, + irq); + return pic_irq; +} + +extern void __epic_setup_vector_irq(int cpu); +extern void __apic_setup_vector_irq(int cpu); +static inline void __pic_setup_vector_irq(int cpu) +{ + if (nr_ioepics) + __epic_setup_vector_irq(cpu); + if (nr_ioapics) + __apic_setup_vector_irq(cpu); +} + +extern void print_IO_APICs(void); +extern void print_IO_EPICs(void); +static inline void print_IO_PICs(void) +{ + if (nr_ioepics) + print_IO_EPICs(); + if (nr_ioapics) + print_IO_APICs(); +} + +#else /* !(CONFIG_EPIC) */ + +#include + +struct io_apic_irq_attr; +extern int IO_APIC_get_PCI_irq_vector(int domain, int bus, int devfn, int pin, + struct io_apic_irq_attr *irq_attr); +static inline int IO_PIC_get_PCI_irq_vector(int domain, int bus, int slot, + int pin, struct io_apic_irq_attr *irq_attr) +{ + return IO_APIC_get_PCI_irq_vector(domain, bus, slot, pin, irq_attr); +} + +extern int IO_APIC_get_fix_irq_vector(int domain, int bus, int slot, int func, + int irq); +static inline int IO_PIC_get_fix_irq_vector(int domain, int bus, int slot, + int func, int irq) +{ + return IO_APIC_get_fix_irq_vector(domain, bus, slot, func, irq); +} + +extern void __apic_setup_vector_irq(int cpu); +static inline void __pic_setup_vector_irq(int cpu) +{ + __apic_setup_vector_irq(cpu); +} + +extern void print_IO_APICs(void); +static inline void print_IO_PICs(void) +{ + print_IO_APICs(); +} + +#endif /* !(CONFIG_EPIC) */ +#endif /* __ASM_L_IO_PIC_H */ diff --git a/arch/e2k/include/asm-l/iolinkmask.h b/arch/e2k/include/asm-l/iolinkmask.h new file mode 100644 index 0000000..bc83a3f --- /dev/null +++ b/arch/e2k/include/asm-l/iolinkmask.h @@ -0,0 +1,606 @@ +#ifndef __ASM_L_IOLINKMASK_H +#define __ASM_L_IOLINKMASK_H + +/* + * Based on include/linux/nodemask.h + * IOLINKmasks provide a bitmap suitable for representing the + * set of IOLINK's in a system, one bit position per IOLINK domain number. + * + * IOLINK can be represented by global domain number and as + * pair: node and local link number on the node, + * So main macroses and functions operate with domain number and + * can have appropriate macroses to operate with pair of node and link #, + * for axample: + * iolink_set(domain, ...) + * node_iolink_set(node, link, ...) + * + * IOLINK is common name of IO management and can be connected to IOHUB + * (controller of peripheral interfaces) or RDMA (DMA with remoute systems) + * So macroses have alternative to operate with IOLINKS as IOHUBs and RDMAs, + * for example: + * iolink_set(...) + * iohub_set(...) + * rdma_set(...) + * + * See detailed comments in the file linux/bitmap.h describing the + * data type on which these iolinkmasks are based. + * + * For details of iolinkmask_scnprintf() and iolinkmask_parse(), + * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c. + * For details of iolinklist_scnprintf() and iolinklist_parse(), see + * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. + * + * The available iolinkmask operations are: + * + * void iolink_set(iolink, mask) turn on bit 'iolink' in mask + * void iolink_clear(iolink, mask) turn off bit 'iolink' in mask + * void iolinks_setall(mask) set all bits + * void iolinks_clear(mask) clear all bits + * int iolink_isset(iolink, mask) true iff bit 'iolink' set in mask + * int iolink_test_and_set(iolink, mask) test and set bit 'iolink' in mask + * + * void iolinks_and(dst, src1, src2) dst = src1 & src2 [intersection] + * void iolinks_or(dst, src1, src2) dst = src1 | src2 [union] + * void iolinks_xor(dst, src1, src2) dst = src1 ^ src2 + * void iolinks_andnot(dst, src1, src2) dst = src1 & ~src2 + * void iolinks_complement(dst, src) dst = ~src + * + * int iolinks_equal(mask1, mask2) Does mask1 == mask2? + * int iolinks_intersects(mask1, mask2) Do mask1 and mask2 intersect? + * int iolinks_subset(mask1, mask2) Is mask1 a subset of mask2? + * int iolinks_empty(mask) Is mask empty (no bits sets)? + * int iolinks_full(mask) Is mask full (all bits sets)? + * int iolinks_weight(mask) Hamming weight - number of set bits + * + * void iolinks_shift_right(dst, src, n) Shift right + * void iolinks_shift_left(dst, src, n) Shift left + * + * int first_iolink(mask) Number lowest set bit, or MAX_NUMIOLINKS + * int next_iolink(iolink, mask) Next iolink past 'iolink', or MAX_NUMIOLINKS + * int first_unset_iolink(mask) First iolink not set in mask, or + * MAX_NUMIOLINKS. + * + * iolinkmask_t iolinkmask_of_iolink(iolink) Return iolinkmask with bit 'iolink' set + * IOLINK_MASK_ALL Initializer - all bits set + * IOLINK_MASK_NONE Initializer - no bits set + * unsigned long *iolinks_addr(mask) Array of unsigned long's in mask + * + * int iolinkmask_scnprintf(buf, len, mask) Format iolinkmask for printing + * int iolinkmask_parse(ubuf, ulen, mask) Parse ascii string as iolinkmask + * int iolinklist_scnprintf(buf, len, mask) Format iolinkmask as list for printing + * int iolinklist_parse(buf, map) Parse ascii string as iolinklist + * + * for_each_iolink_mask(iolink, mask) for-loop iolink over mask + * + * int num_online_iolinks() Number of online IOLINKs + * int num_possible_iolinks() Number of all possible IOLINKs + * + * int iolink_online(iolink) Is some iolink domain online? + * int iolink_possible(iolink) Is some iolink domain possible? + * + * iolink_set_online(iolink) set bit 'iolink' in iolink_online_map + * iolink_set_offline(iolink) clear bit 'iolink' in iolink_online_map + * + * for_each_iolink(iolink) for-loop iolink over iolink_possible_map + * for_each_online_iolink(iolink) for-loop iolink over iolink_online_map + * + * Subtlety: + * 1) The 'type-checked' form of iolink_isset() causes gcc (3.3.2, anyway) + * to generate slightly worse code. So use a simple one-line #define + * for iolink_isset(), instead of wrapping an inline inside a macro, the + * way we do the other calls. + */ + +#include +#include +#include +#include +#include +#include + +#define MAX_NUMIOLINKS MACH_MAX_NUMIOLINKS +#define MAX_NUMIOHUBS MAX_NUMIOLINKS +#define NODE_NUMIOLINKS MACH_NODE_NUMIOLINKS + +typedef struct { DECLARE_BITMAP(bits, MAX_NUMIOLINKS); } iolinkmask_t; +extern iolinkmask_t _unused_iolinkmask_arg_; + +#define iolink_set(domain, dst) __iolink_set((domain), &(dst)) +#define node_iolink_set(node, link, dst) \ + iolink_set(node_iolink_to_domain((node), (link)), (dst)) +#define iohub_set(domain, dst) iolink_set((domain), (dst)) +#define node_iohub_set(node, link, dst) \ + iohub_set(node_iohub_to_domain((node), (link)), (dst)) +#define rdma_set(domain, dst) iolink_set((domain), (dst)) +#define node_rdma_set(node, link, dst) \ + rdma_set(node_rdma_to_domain((node), (link)), (dst)) +static inline void __iolink_set(int domain, volatile iolinkmask_t *dstp) +{ + set_bit(domain, dstp->bits); +} + +#define iolink_clear(domain, dst) __iolink_clear((domain), &(dst)) +#define node_iolink_clear(node, link, dst) \ + iolink_clear(node_iolink_to_domain((node), (link)), (dst)) +#define iohub_clear(domain, dst) iolink_clear((domain), (dst)) +#define node_iohub_clear(node, link, dst) \ + iohub_clear(node_iohub_to_domain((node), (link)), (dst)) +#define rdma_clear(domain, dst) iolink_clear((domain), (dst)) +#define node_rdma_clear(node, link, dst) \ + rdma_clear(node_rdma_to_domain((node), (link)), (dst)) +static inline void __iolink_clear(int domain, volatile iolinkmask_t *dstp) +{ + clear_bit(domain, dstp->bits); +} + +#define iolinks_setall(dst) __iolinks_setall(&(dst), MAX_NUMIOLINKS) +static inline void __iolinks_setall(iolinkmask_t *dstp, int nbits) +{ + bitmap_fill(dstp->bits, nbits); +} + +#define iolinks_clear(dst) __iolinks_clear(&(dst), MAX_NUMIOLINKS) +static inline void __iolinks_clear(iolinkmask_t *dstp, int nbits) +{ + bitmap_zero(dstp->bits, nbits); +} + +/* No static inline type checking - see Subtlety (1) above. */ +#define iolink_isset(domain, iolinkmask) test_bit((domain), (iolinkmask).bits) +#define node_iolink_isset(node, link, iolinkmask) \ + iolink_isset(node_iolink_to_domain((node), (link)), \ + (iolinkmask).bits) +#define iohub_isset(domain, iolinkmask) iolink_isset((domain), (iolinkmask)) +#define node_iohub_isset(node, link, iolinkmask) \ + iohub_isset(node_iohub_to_domain((node), (link)), \ + (iolinkmask).bits) +#define rdma_isset(domain, iolinkmask) iolink_isset((domain), (iolinkmask)) +#define node_rdma_isset(node, link, iolinkmask) \ + rdma_isset(node_rdma_to_domain((node), (link)), \ + (iolinkmask).bits) + +#define iolink_test_and_set(domain, iolinkmask) \ + __iolink_test_and_set((domain), &(iolinkmask)) +#define node_iolink_test_and_set(node, link, iolinkmask) \ + iolink_test_and_set(node_iolink_to_domain((node), (link)), \ + (iolinkmask)) +#define iohub_test_and_set(domain, iolinkmask) \ + iolink_test_and_set((domain), (iolinkmask)) +#define node_iohub_test_and_set(node, link, iolinkmask) \ + iohub_test_and_set(node_iohub_to_domain((node), (link)), \ + (iolinkmask)) +#define rdma_test_and_set(domain, iolinkmask) \ + iolink_test_and_set((domain), (iolinkmask)) +#define node_rdma_test_and_set(node, link, iolinkmask) \ + rdma_test_and_set(node_rdma_to_domain((node), (link)), \ + (iolinkmask)) +static inline int __iolink_test_and_set(int domain, iolinkmask_t *addr) +{ + return test_and_set_bit(domain, addr->bits); +} + +#define iolinks_and(dst, src1, src2) \ + __iolinks_and(&(dst), &(src1), &(src2), MAX_NUMIOLINKS) +static inline void __iolinks_and(iolinkmask_t *dstp, const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define iolinks_or(dst, src1, src2) \ + __iolinks_or(&(dst), &(src1), &(src2), MAX_NUMIOLINKS) +static inline void __iolinks_or(iolinkmask_t *dstp, const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define iolinks_xor(dst, src1, src2) \ + __iolinks_xor(&(dst), &(src1), &(src2), MAX_NUMIOLINKS) +static inline void __iolinks_xor(iolinkmask_t *dstp, const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define iolinks_andnot(dst, src1, src2) \ + __iolinks_andnot(&(dst), &(src1), &(src2), MAX_NUMIOLINKS) +static inline void __iolinks_andnot(iolinkmask_t *dstp, const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define iolinks_complement(dst, src) \ + __iolinks_complement(&(dst), &(src), MAX_NUMIOLINKS) +static inline void __iolinks_complement(iolinkmask_t *dstp, + const iolinkmask_t *srcp, int nbits) +{ + bitmap_complement(dstp->bits, srcp->bits, nbits); +} + +#define iolinks_equal(src1, src2) \ + __iolinks_equal(&(src1), &(src2), MAX_NUMIOLINKS) +static inline int __iolinks_equal(const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + return bitmap_equal(src1p->bits, src2p->bits, nbits); +} + +#define iolinks_intersects(src1, src2) \ + __iolinks_intersects(&(src1), &(src2), MAX_NUMIOLINKS) +static inline int __iolinks_intersects(const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + return bitmap_intersects(src1p->bits, src2p->bits, nbits); +} + +#define iolinks_subset(src1, src2) \ + __iolinks_subset(&(src1), &(src2), MAX_NUMIOLINKS) +static inline int __iolinks_subset(const iolinkmask_t *src1p, + const iolinkmask_t *src2p, int nbits) +{ + return bitmap_subset(src1p->bits, src2p->bits, nbits); +} + +#define iolinks_empty(src) __iolinks_empty(&(src), MAX_NUMIOLINKS) +static inline int __iolinks_empty(const iolinkmask_t *srcp, int nbits) +{ + return bitmap_empty(srcp->bits, nbits); +} + +#define iolinks_full(iolinkmask) __iolinks_full(&(iolinkmask), MAX_NUMIOLINKS) +static inline int __iolinks_full(const iolinkmask_t *srcp, int nbits) +{ + return bitmap_full(srcp->bits, nbits); +} + +#define iolinks_weight(iolinkmask) __iolinks_weight(&(iolinkmask), MAX_NUMIOLINKS) +static inline int __iolinks_weight(const iolinkmask_t *srcp, int nbits) +{ + return bitmap_weight(srcp->bits, nbits); +} + +#define iolinks_shift_right(dst, src, n) \ + __iolinks_shift_right(&(dst), &(src), (n), MAX_NUMIOLINKS) +static inline void __iolinks_shift_right(iolinkmask_t *dstp, + const iolinkmask_t *srcp, int n, int nbits) +{ + bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); +} + +#define iolinks_shift_left(dst, src, n) \ + __iolinks_shift_left(&(dst), &(src), (n), MAX_NUMIOLINKS) +static inline void __iolinks_shift_left(iolinkmask_t *dstp, + const iolinkmask_t *srcp, int n, int nbits) +{ + bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); +} + +/* FIXME: better would be to fix all architectures to never return + > MAX_NUMIOLINKS, then the silly min_ts could be dropped. */ + +#define first_iolink(src) __first_iolink(&(src)) +static inline int __first_iolink(const iolinkmask_t *srcp) +{ + return min_t(int, MAX_NUMIOLINKS, find_first_bit(srcp->bits, MAX_NUMIOLINKS)); +} + +#define next_iolink(n, src) __next_iolink((n), &(src)) +static inline int __next_iolink(int n, const iolinkmask_t *srcp) +{ + return min_t(int, MAX_NUMIOLINKS, find_next_bit(srcp->bits, + MAX_NUMIOLINKS, n+1)); +} + +#define iolinkmask_of_iolink(domain) \ +({ \ + typeof(_unused_iolinkmask_arg_) m; \ + if (sizeof(m) == sizeof(unsigned long)) { \ + m.bits[0] = 1UL<<(domain); \ + } else { \ + iolinks_clear(m); \ + iolink_set((domain), m); \ + } \ + m; \ +}) +#define iolinkmask_of_node_iolink(node, link) \ + iolinkmask_of_iolink(node_iohub_to_domain((node), (link))) + +#define first_unset_iolink(mask) __first_unset_iolink(&(mask)) +static inline int __first_unset_iolink(const iolinkmask_t *maskp) +{ + return min_t(int,MAX_NUMIOLINKS, + find_first_zero_bit(maskp->bits, MAX_NUMIOLINKS)); +} + +#define IOLINK_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMIOLINKS) + +#if MAX_NUMIOLINKS <= BITS_PER_LONG + +#define IOLINK_MASK_ALL \ +((iolinkmask_t) { { \ + [BITS_TO_LONGS(MAX_NUMIOLINKS)-1] = IOLINK_MASK_LAST_WORD \ +} }) + +#else + +#define IOLINK_MASK_ALL \ +((iolinkmask_t) { { \ + [0 ... BITS_TO_LONGS(MAX_NUMIOLINKS)-2] = ~0UL, \ + [BITS_TO_LONGS(MAX_NUMIOLINKS)-1] = IOLINK_MASK_LAST_WORD \ +} }) + +#endif + +#define IOLINK_MASK_NONE \ +((iolinkmask_t) { { \ + [0 ... BITS_TO_LONGS(MAX_NUMIOLINKS)-1] = 0UL \ +} }) + +#define CHUNKSZ 32 +#define nbits_to_hold_value(val) fls(val) +#define BASEDEC 10 /* fancier cpuset lists input in decimal */ + +/** + * bitmap_scnprintf - convert bitmap to an ASCII hex string. + * @buf: byte buffer into which string is placed + * @buflen: reserved size of @buf, in bytes + * @maskp: pointer to bitmap to convert + * @nmaskbits: size of bitmap, in bits + * + * Exactly @nmaskbits bits are displayed. Hex digits are grouped into + * comma-separated sets of eight digits per set. Returns the number of + * characters which were written to *buf, excluding the trailing \0. + */ +static int bitmap_scnprintf(char *buf, unsigned int buflen, + const unsigned long *maskp, int nmaskbits) +{ + int i, word, bit, len = 0; + unsigned long val; + const char *sep = ""; + int chunksz; + u32 chunkmask; + + chunksz = nmaskbits & (CHUNKSZ - 1); + if (chunksz == 0) + chunksz = CHUNKSZ; + + i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ; + for (; i >= 0; i -= CHUNKSZ) { + chunkmask = ((1ULL << chunksz) - 1); + word = i / BITS_PER_LONG; + bit = i % BITS_PER_LONG; + val = (maskp[word] >> bit) & chunkmask; + len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep, + (chunksz+3)/4, val); + chunksz = CHUNKSZ; + sep = ","; + } + return len; +} +#undef CHUNKSZ + +/* + * bscnl_emit(buf, buflen, rbot, rtop, bp) + * + * Helper routine for bitmap_scnlistprintf(). Write decimal number + * or range to buf, suppressing output past buf+buflen, with optional + * comma-prefix. Return len of what was written to *buf, excluding the + * trailing \0. + */ +static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len) +{ + if (len > 0) + len += scnprintf(buf + len, buflen - len, ","); + if (rbot == rtop) + len += scnprintf(buf + len, buflen - len, "%d", rbot); + else + len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop); + return len; +} + +/** + * bitmap_scnlistprintf - convert bitmap to list format ASCII string + * @buf: byte buffer into which string is placed + * @buflen: reserved size of @buf, in bytes + * @maskp: pointer to bitmap to convert + * @nmaskbits: size of bitmap, in bits + * + * Output format is a comma-separated list of decimal numbers and + * ranges. Consecutively set bits are shown as two hyphen-separated + * decimal numbers, the smallest and largest bit numbers set in + * the range. Output format is compatible with the format + * accepted as input by bitmap_parselist(). + * + * The return value is the number of characters which were written to *buf + * excluding the trailing '\0', as per ISO C99's scnprintf. + */ +static int bitmap_scnlistprintf(char *buf, unsigned int buflen, + const unsigned long *maskp, int nmaskbits) +{ + int len = 0; + /* current bit is 'cur', most recently seen range is [rbot, rtop] */ + int cur, rbot, rtop; + + if (buflen == 0) + return 0; + buf[0] = 0; + + cur = find_first_bit(maskp, nmaskbits); + rbot = cur; + while (cur < nmaskbits) { + rtop = cur; + cur = find_next_bit(maskp, nmaskbits, cur+1); + if (cur >= nmaskbits || cur > rtop + 1) { + len = bscnl_emit(buf, buflen, rbot, rtop, len); + rbot = cur; + } + } + return len; +} + +#define iolinks_addr(src) ((src).bits) + +#define iolinkmask_scnprintf(buf, len, src) \ + __iolinkmask_scnprintf((buf), (len), &(src), MAX_NUMIOLINKS) +static inline int __iolinkmask_scnprintf(char *buf, int len, + const iolinkmask_t *srcp, int nbits) +{ + return bitmap_scnprintf(buf, len, srcp->bits, nbits); +} + +#define iolinkmask_parse(ubuf, ulen, dst) \ + __iolinkmask_parse((ubuf), (ulen), &(dst), MAX_NUMIOLINKS) +static inline int __iolinkmask_parse(const char __user *buf, int len, + iolinkmask_t *dstp, int nbits) +{ + return bitmap_parse(buf, len, dstp->bits, nbits); +} + +#define iolinklist_scnprintf(buf, len, src) \ + __iolinklist_scnprintf((buf), (len), &(src), MAX_NUMIOLINKS) +static inline int __iolinklist_scnprintf(char *buf, int len, + const iolinkmask_t *srcp, int nbits) +{ + return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); +} + +#define iolinklist_parse(buf, dst) __iolinklist_parse((buf), &(dst), MAX_NUMIOLINKS) +static inline int __iolinklist_parse(const char *buf, iolinkmask_t *dstp, int nbits) +{ + return bitmap_parselist(buf, dstp->bits, nbits); +} + +#if defined(CONFIG_IOHUB_DOMAINS) && MAX_NUMIOLINKS > 1 +#define for_each_iolink_mask(domain, mask) \ + for ((domain) = first_iolink(mask); \ + (domain) < MAX_NUMIOLINKS; \ + (domain) = next_iolink((domain), (mask))) +#define for_each_node_iolink_mask(domain, node, link, mask) \ + for ((domain) = first_iolink(mask), \ + (node) = iolink_domain_to_node((domain)), \ + (link) = iolink_domain_to_link((domain)); \ + (domain) < MAX_NUMIOLINKS; \ + (domain) = next_iolink((domain), (mask)), \ + (node) = iolink_domain_to_node((domain)), \ + (link) = iolink_domain_to_link((domain))) +#else /* MAX_NUMIOLINKS == 1 */ +#define for_each_iolink_mask(domain, mask) \ + if (HAS_MACHINE_E2K_IOHUB) \ + for ((domain) = 0; (domain) < 1; (domain)++) +#define for_each_node_iolink_mask(domain, node, link, mask) \ + if (HAS_MACHINE_E2K_IOHUB) \ + for ((domain) = 0, (node) = 0, (link) = 0; \ + (domain) < 1; (domain)++) +#endif /* MAX_NUMIOLINKS */ + +/* + * The following particular system iolinkmasks and operations + * on them manage all possible and online iolinks. + */ + +#if defined(CONFIG_IOHUB_DOMAINS) && MAX_NUMIOLINKS > 1 +extern int iolinks_num; +extern iolinkmask_t iolink_iohub_map; +extern iolinkmask_t iolink_online_iohub_map; +extern int iolink_iohub_num; +extern int iolink_online_iohub_num; +extern iolinkmask_t iolink_rdma_map; +extern iolinkmask_t iolink_online_rdma_map; +extern int iolink_rdma_num; +extern int iolink_online_rdma_num; + +#define num_online_iolinks() (num_online_iohubs() + num_online_rdmas()) +#define num_possible_iolinks() iolinks_num +#define num_online_iohubs() iolink_online_iohub_num +#define num_possible_iohubs() iolink_iohub_num +#define num_online_rdmas() iolink_online_rdma_num +#define num_possible_rdmas() iolink_rdma_num +#define iolink_online(domain) (iohub_online(domain) || rdma_online(domain)) +#define iolink_possible(domain) (iohab_possible(domain) || \ + rdma_possible(domain)) +#define node_iolink_online(node, link) \ + iolink_online(node_iolink_to_domain(node, link)) +#define node_iolink_possible(node, link) \ + iolink_possible(node_iolink_to_domain(node, link)) +#define iohub_online(domain) iolink_isset((domain), iolink_online_iohub_map) +#define iohab_possible(domain) iolink_isset((domain), iolink_iohub_map) +#define node_iohub_online(node, link) \ + iohub_online(node_iohub_to_domain(node, link)) +#define node_iohub_possible(node, link) \ + iohab_possible(node_iohub_to_domain(node, link)) +#define first_iohub_online() first_iolink(iolink_online_iohub_map) +#define rdma_online(domain) iolink_isset((domain), iolink_online_rdma_map) +#define rdma_possible(domain) iolink_isset((domain), iolink_rdma_map) +#define node_rdma_online(node, link) \ + rdma_online(node_rdma_to_domain(node, link)) +#define node_rdma_possible(node, link) \ + rdma_possible(node_rdma_to_domain(node, link)) +#else +#define iolinks_num 1 +#define iolink_iohub_num 1 +#define num_online_iolinks() 1 +#define num_possible_iolinks() 1 +#define num_online_iohubs() 1 +#define num_possible_iohubs() 1 +#define num_online_rdmas() 0 +#define num_possible_rdmas() 0 +#define iolink_online(domain) ((domain) == 0) +#define iolink_possible(domain) ((domain) == 0) +#define node_iolink_online(node, link) \ + ((node) == 0 && (link) == 0) +#define node_iolink_possible(node, link) \ + ((node) == 0 && (link) == 0) +#define iohub_online(domain) ((domain) == 0) +#define iohab_possible(domain) ((domain) == 0) +#define node_iohub_online(node, link) \ + ((node) == 0 && (link) == 0) +#define node_iohub_possible(node, link) \ + ((node) == 0 && (link) == 0) +#define first_iohub_online() 0 +#define rdma_online(domain) 0 +#define rdma_possible(domain) 0 +#define node_rdma_online(node, link) 0 +#define node_rdma_possible(node, link) 0 +#endif + +#define iohub_set_online(domain) \ + set_bit((domain), iolink_online_iohub_map.bits) +#define iohub_set_offline(domain) \ + clear_bit((domain), iolink_online_iohub_map.bits) +#define node_iohub_set_online(node, link) \ + iohub_set_online(node_iohub_to_domain((node), (link)) +#define node_iohub_set_offline(node, link) \ + iohub_set_offline(node_iohub_to_domain((node), (link)) +#define rdma_set_online(domain) \ + set_bit((domain), iolink_online_rdma_map.bits) +#define rdma_set_offline(domain) \ + clear_bit((domain), iolink_online_rdma_map.bits) +#define node_rdma_set_online(node, link) \ + rdma_set_online(node_rdma_to_domain((node), (link)) +#define node_rdma_set_offline(node, link) \ + rdma_set_offline(node_rdma_to_domain((node), (link)) + +#define for_each_iohub(domain) \ + for_each_iolink_mask((domain), iolink_iohub_map) +#define for_each_online_iohub(domain) \ + for_each_iolink_mask((domain), iolink_online_iohub_map) +#define for_each_node_iohub(domain, node, link) \ + for_each_node_iolink_mask((domain), (node), (link), \ + iolink_iohub_map) +#define for_each_online_node_iohub(domain, node, link) \ + for_each_node_iolink_mask((domain), (node), (link), \ + iolink_online_iohub_map) +#define for_each_rdma(domain) \ + for_each_iolink_mask((domain), iolink_rdma_map) +#define for_each_online_rdma(domain) \ + for_each_iolink_mask((domain), iolink_online_rdma_map) +#define for_each_node_rdma(domain, node, link) \ + for_each_node_iolink_mask((domain), (node), (link), \ + iolink_rdma_map) +#define for_each_online_node_rdma(domain, node, link) \ + for_each_node_iolink_mask((domain), (node), (link), \ + iolink_online_rdma_map) + +#endif /* __ASM_L_IOLINKMASK_H */ diff --git a/arch/e2k/include/asm-l/ipi.h b/arch/e2k/include/asm-l/ipi.h new file mode 100644 index 0000000..9209adc --- /dev/null +++ b/arch/e2k/include/asm-l/ipi.h @@ -0,0 +1,164 @@ +#ifndef _ASM_L_IPI_H +#define _ASM_L_IPI_H + +#ifdef CONFIG_L_LOCAL_APIC + +/* + * Copyright 2004 James Cleverdon, IBM. + * Subject to the GNU Public License, v.2 + * + * Generic APIC InterProcessor Interrupt code. + * + * Moved to include file by James Cleverdon from + * arch/x86-64/kernel/smp.c + * + * Copyrights from kernel/smp.c: + * + * (c) 1995 Alan Cox, Building #3 + * (c) 1998-99, 2000 Ingo Molnar + * (c) 2002,2003 Andi Kleen, SuSE Labs. + * Subject to the GNU Public License, v.2 + */ + +#include +#include +#include + +/* + * the following functions deal with sending IPIs between CPUs. + * + * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. + */ + +static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector, + unsigned int dest) +{ + unsigned int icr = shortcut | dest; + + switch (vector) { + default: + icr |= APIC_DM_FIXED | vector; + break; + case NMI_VECTOR: + icr |= APIC_DM_NMI; + break; + } + return icr; +} + +static inline int __prepare_ICR2(unsigned int mask) +{ + return SET_APIC_DEST_FIELD(mask); +} + +static inline void __xapic_wait_icr_idle(void) +{ + while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY) + cpu_relax(); +} + +static inline void +__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) +{ + /* + * Subtle. In the case of the 'never do double writes' workaround + * we have to lock out interrupts to be safe. As we don't care + * of the value read we use an atomic rmw access to avoid costly + * cli/sti. Otherwise we use an even cheaper single atomic write + * to the APIC. + */ + unsigned int cfg; + + /* + * Wait for idle. + */ + __xapic_wait_icr_idle(); + + /* + * No need to touch the target chip field + */ + cfg = __prepare_ICR(shortcut, vector, dest); + + /* + * Send the IPI. The write to APIC_ICR fires this off. + */ + native_apic_mem_write(APIC_ICR, cfg); +} + +/* + * This is used to send an IPI with no shorthand notation (the destination is + * specified in bits 56 to 63 of the ICR). + */ +static inline void + __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) +{ + unsigned long cfg; + + /* + * Wait for idle. + */ + if (unlikely(vector == NMI_VECTOR)) + safe_apic_wait_icr_idle(); + else + __xapic_wait_icr_idle(); + + /* + * prepare target chip field + */ + cfg = __prepare_ICR2(mask); + native_apic_mem_write(APIC_ICR2, cfg); + + /* + * program the ICR + */ + cfg = __prepare_ICR(0, vector, dest); + + /* + * Send the IPI. The write to APIC_ICR fires this off. + */ + native_apic_mem_write(APIC_ICR, cfg); +} + +extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, + int vector); +extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, + int vector); +#if 0 +extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, + int vector); +extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, + int vector); +#endif + +/* Avoid include hell */ +#define NMI_VECTOR 0x02 + +extern int no_broadcast; + +static inline void __default_local_send_IPI_allbutself(int vector) +{ + if (no_broadcast || vector == NMI_VECTOR) + apic->send_IPI_mask_allbutself(cpu_online_mask, vector); + else + __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical); +} + +static inline void __default_local_send_IPI_all(int vector) +{ + if (no_broadcast || vector == NMI_VECTOR) + apic->send_IPI_mask(cpu_online_mask, vector); + else + __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical); +} + +#ifdef CONFIG_L_X86_32 +extern void default_send_IPI_mask_logical(const struct cpumask *mask, + int vector); +extern void default_send_IPI_allbutself(int vector); +extern void default_send_IPI_all(int vector); +extern void default_send_IPI_self(int vector); +#endif + +#endif + +#endif /* _ASM_L_IPI_H */ diff --git a/arch/e2k/include/asm-l/irq_numbers.h b/arch/e2k/include/asm-l/irq_numbers.h new file mode 100644 index 0000000..a2f34e3 --- /dev/null +++ b/arch/e2k/include/asm-l/irq_numbers.h @@ -0,0 +1,9 @@ +#ifndef _ASM_L_IRQ_NUMBERS_H +#define _ASM_L_IRQ_NUMBERS_H + +#include + +/* Number of additional (chained) interrupts */ +#define I2C_SPI_IRQS_NUM 2 + +#endif diff --git a/arch/e2k/include/asm-l/irq_remapping.h b/arch/e2k/include/asm-l/irq_remapping.h new file mode 100644 index 0000000..d0d264e --- /dev/null +++ b/arch/e2k/include/asm-l/irq_remapping.h @@ -0,0 +1,6 @@ +#ifndef _ASM_L_IRQ_REMAPPING_H +#define _ASM_L_IRQ_REMAPPING_H + +#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) + +#endif /* _ASM_L_IRQ_REMAPPING_H */ diff --git a/arch/e2k/include/asm-l/irq_vectors.h b/arch/e2k/include/asm-l/irq_vectors.h new file mode 100644 index 0000000..2141f82 --- /dev/null +++ b/arch/e2k/include/asm-l/irq_vectors.h @@ -0,0 +1,201 @@ +#ifndef _ASM_L_IRQ_VECTORS_H +#define _ASM_L_IRQ_VECTORS_H + +#include + +/* + * Linux IRQ vector layout. + * + * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can + * be defined by Linux. They are used as a jump table by the CPU when a + * given vector is triggered - by a CPU-external, CPU-internal or + * software-triggered event. + * + * Linux sets the kernel code address each entry jumps to early during + * bootup, and never changes them. This is the general layout of the + * IDT entries: + * + * Vectors 0 ... 31 : system traps and exceptions - hardcoded events + * Vectors 32 ... 127 : device interrupts + * Vector 128 : legacy int80 syscall interface + * Vectors 129 ... 237 : device interrupts + * Vectors 238 ... 255 : special interrupts + * + * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. + * + * This file enumerates the exact layout of them: + */ + +#define NMI_VECTOR 0x02 +#define MCE_VECTOR 0x12 + +/* + * IDT vectors usable for external interrupt sources start + * at 0x20: + */ +#define FIRST_EXTERNAL_VECTOR 0x20 +#if 0 +/* + * We start allocating at 0x21 to spread out vectors evenly between + * priority levels. (0x80 is the syscall vector) + */ +#define VECTOR_OFFSET_START 1 +#else +#define VECTOR_OFFSET_START 0 +#endif + +#if 0 +#ifdef CONFIG_X86_32 +# define SYSCALL_VECTOR 0x80 +# define IA32_SYSCALL_VECTOR 0x80 +#else +# define IA32_SYSCALL_VECTOR 0x80 +#endif +#endif + +/* + * Reserve the lowest usable priority level 0x20 - 0x2f for triggering + * cleanup after irq migration. + */ +#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR + +/* + * Vectors 0x30-0x3f are used for ISA interrupts. + */ +#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) + +#define IRQ1_VECTOR (IRQ0_VECTOR + 1) +#define IRQ2_VECTOR (IRQ0_VECTOR + 2) +#define IRQ3_VECTOR (IRQ0_VECTOR + 3) +#define IRQ4_VECTOR (IRQ0_VECTOR + 4) +#define IRQ5_VECTOR (IRQ0_VECTOR + 5) +#define IRQ6_VECTOR (IRQ0_VECTOR + 6) +#define IRQ7_VECTOR (IRQ0_VECTOR + 7) +#define IRQ8_VECTOR (IRQ0_VECTOR + 8) +#define IRQ9_VECTOR (IRQ0_VECTOR + 9) +#define IRQ10_VECTOR (IRQ0_VECTOR + 10) +#define IRQ11_VECTOR (IRQ0_VECTOR + 11) +#define IRQ12_VECTOR (IRQ0_VECTOR + 12) +#define IRQ13_VECTOR (IRQ0_VECTOR + 13) +#define IRQ14_VECTOR (IRQ0_VECTOR + 14) +#define IRQ15_VECTOR (IRQ0_VECTOR + 15) + +/* + * Special IRQ vectors used by the SMP architecture, 0xf0-0xff + * + * some of the following vectors are 'rare', they are merged + * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. + * TLB, reschedule and local APIC vectors are performance-critical. + */ + +#define SPURIOUS_APIC_VECTOR 0xff +/* + * Sanity check + */ +#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F) +# error SPURIOUS_APIC_VECTOR definition error +#endif + +#if 0 +#define ERROR_APIC_VECTOR 0xfe +#define RESCHEDULE_VECTOR 0xfd +#define CALL_FUNCTION_VECTOR 0xfc +#define CALL_FUNCTION_SINGLE_VECTOR 0xfb +#define THERMAL_APIC_VECTOR 0xfa +#define THRESHOLD_APIC_VECTOR 0xf9 +#define REBOOT_VECTOR 0xf8 + +/* f0-f7 used for spreading out TLB flushes: */ +#define INVALIDATE_TLB_VECTOR_END 0xf7 +#define INVALIDATE_TLB_VECTOR_START 0xf0 +#define NUM_INVALIDATE_TLB_VECTORS 8 + +/* + * Local APIC timer IRQ vector is on a different priority level, + * to work around the 'lost local interrupt if more than 2 IRQ + * sources per level' errata. + */ +#define LOCAL_TIMER_VECTOR 0xef + +/* + * Generic system vector for platform specific use + */ +#define X86_PLATFORM_IPI_VECTOR 0xed + +/* + * Performance monitoring pending work vector: + */ +#define LOCAL_PENDING_VECTOR 0xec + +#define UV_BAU_MESSAGE 0xea + +/* + * Self IPI vector for machine checks + */ +#define MCE_SELF_VECTOR 0xeb +#endif + +/* + * First APIC vector available to drivers: (vectors 0x30-0xee) we + * start at 0x31(0x41) to spread out vectors evenly between priority + * levels. (0x80 is the syscall vector) + */ +#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2) + +#ifdef CONFIG_EPIC +#define NR_VECTORS 1024 +#else +#define NR_VECTORS 256 +#endif +#define NR_VECTORS_APIC 256 + +#define FPU_IRQ 13 + +#define FIRST_VM86_IRQ 3 +#define LAST_VM86_IRQ 15 + +#ifndef __ASSEMBLY__ +static inline int invalid_vm86_irq(int irq) +{ + return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ; +} +#endif + +/* + * Size the maximum number of interrupts. + * + * If the irq_desc[] array has a sparse layout, we can size things + * generously - it scales up linearly with the maximum number of CPUs, + * and the maximum number of IO-APICs, whichever is higher. + * + * In other cases we size more conservatively, to not create too large + * static arrays. + */ + +#if 0 +#define NR_IRQS_LEGACY 16 +#else +#define NR_IRQS_LEGACY 0 +#endif + +#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS ) +#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) + +#ifdef CONFIG_L_IO_APIC +# ifdef CONFIG_SPARSE_IRQ +# define NR_IRQS \ + (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ + (NR_VECTORS + CPU_VECTOR_LIMIT) : \ + (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) +# else +# if NR_CPUS < MAX_IO_APICS +# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) +# else +# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) +# endif +# endif +#else /* !CONFIG_L_IO_APIC: */ +# define NR_IRQS NR_IRQS_LEGACY +#endif + +#endif /* _ASM_L_IRQ_VECTORS_H */ diff --git a/arch/e2k/include/asm-l/irq_work.h b/arch/e2k/include/asm-l/irq_work.h new file mode 100644 index 0000000..5ecb04c --- /dev/null +++ b/arch/e2k/include/asm-l/irq_work.h @@ -0,0 +1,16 @@ +#ifndef _ASM_L_IRQ_WORK_H +#define _ASM_L_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + //TODO only arm does it this way! (see bug 120742) +#ifdef CONFIG_SMP + return true; +#else + return false; +#endif +} + +extern void arch_irq_work_raise(void); + +#endif /* _ASM_L_IRQ_WORK_H */ diff --git a/arch/e2k/include/asm-l/irqdomain.h b/arch/e2k/include/asm-l/irqdomain.h new file mode 100644 index 0000000..d26075b --- /dev/null +++ b/arch/e2k/include/asm-l/irqdomain.h @@ -0,0 +1,63 @@ +#ifndef _ASM_IRQDOMAIN_H +#define _ASM_IRQDOMAIN_H + +#include +#include + +#ifdef CONFIG_X86_LOCAL_APIC +enum { + /* Allocate contiguous CPU vectors */ + X86_IRQ_ALLOC_CONTIGUOUS_VECTORS = 0x1, +}; + +extern struct irq_domain *x86_vector_domain; + +extern void init_irq_alloc_info(struct irq_alloc_info *info, + const struct cpumask *mask); +extern void copy_irq_alloc_info(struct irq_alloc_info *dst, + struct irq_alloc_info *src); +#endif /* CONFIG_X86_LOCAL_APIC */ + +#ifdef CONFIG_X86_IO_APIC +struct device_node; +struct irq_data; + +enum ioapic_domain_type { + IOAPIC_DOMAIN_INVALID, + IOAPIC_DOMAIN_LEGACY, + IOAPIC_DOMAIN_STRICT, + IOAPIC_DOMAIN_DYNAMIC, +}; + +struct ioapic_domain_cfg { + enum ioapic_domain_type type; + const struct irq_domain_ops *ops; + struct device_node *dev; +}; + +extern const struct irq_domain_ops mp_ioapic_irqdomain_ops; + +extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg); +extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs); +extern void mp_irqdomain_activate(struct irq_domain *domain, + struct irq_data *irq_data); +extern void mp_irqdomain_deactivate(struct irq_domain *domain, + struct irq_data *irq_data); +extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain); +#endif /* CONFIG_X86_IO_APIC */ + +#ifdef CONFIG_PCI_MSI +extern void arch_init_msi_domain(struct irq_domain *domain); +#else +static inline void arch_init_msi_domain(struct irq_domain *domain) { } +#endif + +#ifdef CONFIG_HT_IRQ +extern void arch_init_htirq_domain(struct irq_domain *domain); +#else +static inline void arch_init_htirq_domain(struct irq_domain *domain) { } +#endif + +#endif diff --git a/arch/e2k/include/asm-l/l-uncached.h b/arch/e2k/include/asm-l/l-uncached.h new file mode 100644 index 0000000..5ab2e6c --- /dev/null +++ b/arch/e2k/include/asm-l/l-uncached.h @@ -0,0 +1,11 @@ +#ifndef _L_UNCACHED_H +#define _L_UNCACHED_H + +void *l_alloc_uncached(struct device *dev, size_t size, + phys_addr_t *phys_addr, gfp_t gfp); +void l_free_uncached(struct device *dev, size_t size, void *cpu_addr); + +int l_init_uncached_pool(void); +void l_destroy_uncached_pool(void); + +#endif /* !(_L_UNCACHED_H) */ diff --git a/arch/e2k/include/asm-l/l_ide.h b/arch/e2k/include/asm-l/l_ide.h new file mode 100644 index 0000000..5b96ab3 --- /dev/null +++ b/arch/e2k/include/asm-l/l_ide.h @@ -0,0 +1,13 @@ +#ifndef _L_IDE_H_ +#define _L_IDE_H_ + +#include + +static void l_init_iops (ide_hwif_t *hwif) +{ +} + +#define L_FORCE_NATIVE_MODE 1 +#define L_DEAULT_IDE_DMA_MODE ATA_UDMA5 /* default max UDMA capable */ + +#endif /*_L_IDE_H_*/ diff --git a/arch/e2k/include/asm-l/l_pmc.h b/arch/e2k/include/asm-l/l_pmc.h new file mode 100644 index 0000000..1a80d13 --- /dev/null +++ b/arch/e2k/include/asm-l/l_pmc.h @@ -0,0 +1,140 @@ +#ifndef __L_ASM_PMC_H__ +#define __L_ASM_PMC_H__ + +#include +#include +#include +#include + +#define PMC_L_MAX_IDLE_STATES 4 + +#define PMC_L_TEMP_RG_CUR_REG_0 0x20 +#define PMC_L_TEMP_RG_CUR_REG_1 0x24 +#define PMC_L_GPE0_STS_REG 0x28 +#define PMC_L_GPE0_STS_CLR 0xf +#define PMC_L_GPE0_EN_REG 0x2c +#define PMC_L_TEMP_RG0_REG 0x30 +#define PMC_L_TEMP_RG1_REG 0x34 +#define PMC_L_TEMP_RG2_REG 0x38 +#define PMC_L_TEMP_RG3_REG 0x3c +#define PMC_L_TEMP_RG_CUR_REG_2 0x40 +#define PMC_L_TEMP_RGX_FALL (0x0 << 12) +#define PMC_L_TEMP_RGX_RISE (0x3 << 12) +#define PMC_L_PC_S0_REG 0x100 +#define PMC_L_PC_S1_REG 0x104 + +#define PMC_L_COFVID_3D_STATUS_REG 0x140 +#define PMC_L_P_STATE_3D_CNTRL_REG 0x148 +#define PMC_L_P_STATE_3D_STATUS_REG 0x14c +#define PMC_L_P_STATE_3D_VALUE_0_REG 0x150 +#define PMC_L_P_STATE_3D_VALUE_1_REG 0x154 +#define PMC_L_P_STATE_3D_VALUE_2_REG 0x158 +#define PMC_L_P_STATE_3D_VALUE_3_REG 0x15c +#define PMC_L_C_STATE_3D_REG 0x160 +#define PMC_L_2D_FC_REG 0x164 + +#define PMC_L_REGS_AREA_SIZE 0x168 + +/* Bits in PMC registers: */ +/* P_State_value_X (RW): */ +#define PMC_L_P_STATE_VALUE_VID_MASK 0x0000fe00 +#define PMC_L_P_STATE_VALUE_VID_SHIFT 9 +#define PMC_L_P_STATE_VALUE_DID_MASK 0x000001f0 +#define PMC_L_P_STATE_VALUE_DID_SHIFT 4 +#define PMC_L_P_STATE_VALUE_FID_MASK 0x0000000f +#define PMC_L_P_STATE_VALUE_FID_SHIFT 0 + +/* P_State_Cntrl (RW): */ +#define PMC_L_P_STATE_CNTRL_MASK 0x3 +#define PMC_L_P_STATE_CNTRL_SHIFT 0 +#define PMC_L_P_STATE_CNTRL_P0_VAL 0x0 +#define PMC_L_P_STATE_CNTRL_P1_VAL 0x1 +#define PMC_L_P_STATE_CNTRL_P2_VAL 0x2 +#define PMC_L_P_STATE_CNTRL_P3_VAL 0x3 + +/* P_State_status (RO): */ +#define PMC_L_P_STATE_STATUS_MASK 0x3 +#define PMC_L_P_STATE_STATUS_SHIFT 0 + +/* P_State_3D_Cntrl (RW): */ +#define PMC_L_P_STATE_3D_CNTRL_MASK 0x3 +#define PMC_L_P_STATE_3D_CNTRL_SHIFT 0 +#define PMC_L_P_STATE_3D_CNTRL_P0_VAL 0x0 +#define PMC_L_P_STATE_3D_CNTRL_P1_VAL 0x1 +#define PMC_L_P_STATE_3D_CNTRL_P2_VAL 0x2 +#define PMC_L_P_STATE_3D_CNTRL_P3_VAL 0x3 + +/* COVFID_status (contains RW, Status, RM, RO bits): */ +#define PMC_L_COVFID_STATUS_PMCEN_VAL 0x0000000000000001 /* RW - 0 Bit */ +#define PMC_L_COVFID_STATUS_RMWEN_VAL 0x4000000000000000 /* RM - 62 Bit */ +#define PMC_L_COVFID_STATUS_VMAX_MASK 0x3f80000000000000 /* RM - 61:55 Bits */ +#define PMC_L_COVFID_STATUS_VMAX_SHIFT 55 +#define PMC_L_COVFID_STATUS_VMIN_MASK 0x007f000000000000 /* RM - 54:48 Bits */ +#define PMC_L_COVFID_STATUS_VMIN_SHIFT 48 +#define PMC_L_COVFID_STATUS_FMAX_MASK 0x0000ff0000000000 /* RM - 26:20 Bits */ +#define PMC_L_COVFID_STATUS_FMAX_SHIFT 40 +#define PMC_L_COVFID_STATUS_TRANS_VAL 0x0000000000000002 /* RO - 1 Bit */ +#define PMC_L_COVFID_STATUS_PNUM_MASK 0x000000000000000c /* RO - 3:2 Bits */ +#define PMC_L_COVFID_STATUS_PNUM_SHIFT 2 +#define PMC_L_COVFID_STATUS_VID_MASK 0x000000000003f000 /* RO - 18:12 Bits */ +#define PMC_L_COVFID_STATUS_VID_SHIFT 12 +#define PMC_L_COVFID_STATUS_FID_MASK 0x0000000000000ff0 /* RO - 11:4 Bits */ +#define PMC_L_COVFID_STATUS_FID_SHIFT 4 + +#define PMC_L_COVFID_RM_MASK (PMC_L_COVFID_STATUS_VMAX_MASK | \ + PMC_L_COVFID_STATUS_VMIN_MASK | \ + PMC_L_COVFID_STATUS_FMAX_MASK) + +#define PMC_L_MAX_PSTATES 4 +#define PMC_L_PRECISION 10 +#define MAX_NUM_PMCS 1 +#define SPMC_TEMP_BAD_VALUE -1000 + +/* The driver supports 1 passive trip point and 1 critical trip point */ +enum l_pmc_thermal_trip { + LPMC_TRIP_PASSIVE, + LPMC_TRIP_CRITICAL, + LPMC_TRIP_NUM, +}; + +#define LPMC_TRIP_POINTS_MSK ((1 << LPMC_TRIP_NUM) - 1) + +struct l_pmc { + unsigned char type; + unsigned char version; + void __iomem *cntrl_base; + void __iomem *data_base; + unsigned long vrange; /* VMAX, VMIN, FMAX */ + unsigned int data_size; + unsigned int p_state[PMC_L_MAX_PSTATES]; /* VID, + * DID, + * FID + */ + unsigned int freq; /* Frequency in KHz */ + struct pci_dev *pdev; + struct platform_device *i2c_chan; + struct thermal_zone_device *thermal; + enum thermal_device_mode thermal_mode; + int trip_temp[LPMC_TRIP_NUM]; + int trip_hyst[LPMC_TRIP_NUM]; + raw_spinlock_t thermal_lock; + struct thermal_cooling_device *cdev; + struct cpufreq_policy *policy; +}; + +extern struct l_pmc l_pmc[MAX_NUM_PMCS]; + +#if defined(CONFIG_L_PMC) || defined(CONFIG_S2_PMC) +extern int spmc_get_temp_cur0(void); +int pmc_l_gpufreq_set_scale(unsigned char scale); +int pmc_l_gpufreq_get_scale(void); +int pmc_l_gpufreq_get_frequency(void); +extern unsigned int load_threshold; +#else +int spmc_get_temp_cur0(void) { return SPMC_TEMP_BAD_VALUE; } +#endif /* CONFIG_L_PMC || CONFIG_S2_PMC */ + + + +#endif /* __L_ASM_PMC_H__ */ + diff --git a/arch/e2k/include/asm-l/l_spmc.h b/arch/e2k/include/asm-l/l_spmc.h new file mode 100644 index 0000000..22327e9 --- /dev/null +++ b/arch/e2k/include/asm-l/l_spmc.h @@ -0,0 +1,14 @@ +#ifndef __L_ASM_SPMC_H__ +#define __L_ASM_SPMC_H__ + +#ifdef CONFIG_ACPI_L_SPMC +extern void do_spmc_halt(void); +#else +static inline void do_spmc_halt(void) { + printk(KERN_ERR "Board does not use KPI-2: SPMC is not present.\n"); + return; +} +#endif + +#endif /* __L_ASM_SPMC_H__ */ + diff --git a/arch/e2k/include/asm-l/l_timer.h b/arch/e2k/include/asm-l/l_timer.h new file mode 100644 index 0000000..f362e05 --- /dev/null +++ b/arch/e2k/include/asm-l/l_timer.h @@ -0,0 +1,103 @@ +#ifndef _L_ASM_L_TIMER_H +#define _L_ASM_L_TIMER_H + +#include + +/* + * Elbrus timer + */ + +extern struct clock_event_device *global_clock_event; +extern int get_lt_timer(void); +extern u32 lt_read(void); +extern struct clocksource lt_cs; + +/* New timer registers */ +#define PIT_COUNTER_LIMIT 0x00 +#define PIT_COUNTER_START_VALUE 0x04 +#define PIT_COUNTER 0x08 +#define PIT_COUNTER_CONTROL 0x0c +#define PIT_WD_COUNTER 0x10 +#define PIT_WD_COUNTER_LOW PIT_WD_COUNTER +#define PIT_WD_COUNTER_HIGH (PIT_WD_COUNTER_LOW + 0x04) +#define PIT_WD_LIMIT 0x18 +#define PIT_POWER_COUNTER 0x1c +#define PIT_POWER_COUNTER_LOW PIT_POWER_COUNTER +#define PIT_POWER_COUNTER_HIGH (PIT_POWER_COUNTER_LOW + 0x04) +#define PIT_WD_CONTROL 0x24 +#define PIT_RESET_COUNTER 0x28 +#define PIT_RESET_COUNTER_LOW PIT_RESET_COUNTER +#define PIT_RESET_COUNTER_HIGH (PIT_RESET_COUNTER_LOW + 0x04) + +typedef struct lt_regs { + u32 counter_limit; /* timer counter limit value */ + u32 counter_start; /* start value of counter */ + u32 counter; /* timer counter */ + u32 counter_cntr; /* timer control register */ + u32 wd_counter; /* watchdog counter */ + u32 wd_prescaler; /* watchdog prescaler */ + u32 wd_limit; /* watchdog limit */ + u32 power_counter_lo; /* power counter low bits */ + u32 power_counter_hi; /* power counter high bits */ + u32 wd_control; /* watchdog control register */ + u32 reset_counter_lo; /* reset counter low bits */ + u32 reset_counter_hi; /* reset counter low bits */ +} lt_regs_t; + +extern unsigned long long lt_phys_base; +extern lt_regs_t *lt_regs; + +extern void setup_lt_timer(void); +extern int __init init_lt_clocksource(void); + +/* counters registers structure */ +#define LT_COUNTER_SHIFT 9 /* [30: 9] counters value */ +#define LT_COUNTER_LIMIT_SHIFT 31 /* [31] Limit bit */ +#define LT_COUNTER_LIMIT_BIT (1 << LT_COUNTER_LIMIT_SHIFT) + +#define LT_WRITE_COUNTER_VALUE(count) ((count) << LT_COUNTER_SHIFT) +#define LT_READ_COUNTER_VALUE(count) ((count) >> LT_COUNTER_SHIFT) +#define LT_NSEC_PER_COUNTER_INCR 100 /* 10 MHz == 100 nunosec */ + +/* counter control register structure */ +#define LT_COUNTER_CNTR_START 0x00000001 /* start/stop timer */ +#define LT_COUNTER_CNTR_INVERTL 0x00000002 /* invert limit bit */ +#define LT_COUNTER_CNTR_LINIT 0x00000004 /* Limit bit initial state */ + /* 1 - limit bit set to 1 */ + +#define LT_COUNTER_CNTR_LAUNCH (LT_COUNTER_CNTR_START) +#define LT_INVERT_COUNTER_CNTR_LAUNCH (LT_COUNTER_CNTR_LAUNCH | \ + LT_COUNTER_CNTR_INVERTL | \ + LT_COUNTER_CNTR_LINIT) +#define LT_COUNTER_CNTR_STOP (0) + +#define WD_CLOCK_TICK_RATE 10000000L +#define WD_LATCH(tick_rate) (((tick_rate) + HZ/2) / HZ) +#define WD_LIMIT_SHIFT 12 +#define WD_WRITE_COUNTER_VALUE(count) (count) +#define WD_READ_COUNTER_VALUE(count) ((count) << WD_LIMIT_SHIFT) +#define WD_SET_COUNTER_VAL(sek) \ + (WD_WRITE_COUNTER_VALUE(WD_CLOCK_TICK_RATE * (sek))) + +#define WD_INTR_MODE 0x1 +#define WD_ENABLE 0x2 +#define WD_EVENT 0x4 + +#define WD_COUNTER_BASE 0x10 + +/* System timer Registers (structure see asm/l_timer_regs.h) */ + +#define COUNTER_LIMIT 0x00 +#define COUNTER_START_VALUE 0x04 +#define L_COUNTER 0x08 +#define COUNTER_CONTROL 0x0c +#define WD_COUNTER_L 0x10 +#define WD_COUNTER_H 0x14 +#define WD_LIMIT 0x18 +#define POWER_COUNTER_L 0x1c +#define POWER_COUNTER_H 0x20 +#define WD_CONTROL 0x24 +#define RESET_COUNTER_L 0x28 +#define RESET_COUNTER_H 0x2c + +#endif /* _L_ASM_L_TIMER_H */ diff --git a/arch/e2k/include/asm-l/l_timer_regs.h b/arch/e2k/include/asm-l/l_timer_regs.h new file mode 100644 index 0000000..1846cbd --- /dev/null +++ b/arch/e2k/include/asm-l/l_timer_regs.h @@ -0,0 +1,119 @@ +#ifndef _L_ASM_L_TIMER_REGS_H +#define _L_ASM_L_TIMER_REGS_H + +#include + +/* + * Elbrus System timer Registers + */ + +#define COUNTER_LIMIT 0x00 +typedef struct counter_limit_fields { + u32 unused : 9; /* [8:0] */ + u32 c_l : 22; /* [30:9] */ + u32 l : 1; /* [31] */ +} counter_limit_fields_t; +typedef union counter_limit { + u32 word; + counter_limit_fields_t fields; +} counter_limit_t; +#define COUNTER_START_VALUE 0x04 +typedef struct counter_st_v_fields { + u32 unused : 9; /* [8:0] */ + u32 c_st_v : 22; /* [30:9] */ + u32 l : 1; /* [31] */ +} counter_st_v_fields_t; +typedef union counter_st_v { + u32 word; + counter_st_v_fields_t fields; +} counter_st_v_t; +#define COUNTER 0x08 +typedef struct counter_fields { + u32 unused : 9; /* [8:0] */ + u32 c : 22; /* [30:9] */ + u32 l : 1; /* [31] */ +} counter_fields_t; +typedef union counter { + u32 word; + counter_fields_t fields; +} counter_t; +#define COUNTER_CONTROL 0x0c +typedef struct counter_control_fields { + u32 s_s : 1; /* [0] */ + u32 inv_l : 1; /* [1] */ + u32 l_ini : 1; /* [2] */ + u32 unused : 29; /* [31:3] */ +} counter_control_fields_t; +typedef union counter_control { + u32 word; + counter_control_fields_t fields; +} counter_control_t; +#define WD_COUNTER_L 0x10 +typedef struct wd_counter_l_fields { + u32 wd_c : 32; /* [31:0] */ +} wd_counter_l_fields_t; +typedef union wd_counter_l { + u32 word; + wd_counter_l_fields_t fields; +} wd_counter_l_t; +#define WD_COUNTER_H 0x14 +typedef struct wd_counter_h_fields { + u32 wd_c : 32; /* [31:0] */ +} wd_counter_h_fields_t; +typedef union wd_counter_h { + u32 word; + wd_counter_h_fields_t fields; +} wd_counter_h_t; +#define WD_LIMIT 0x18 +typedef struct wd_limit_fields { + u32 wd_l : 32; /* [31:0] */ +} wd_limit_fields_t; +typedef union wd_limit { + u32 word; + wd_limit_fields_t fields; +} wd_limit_t; +#define POWER_COUNTER_L 0x1c +typedef struct power_counter_l_fields { + u32 pw_c : 32; /* [31:0] */ +} power_counter_l_fields_t; +typedef union power_counter_l { + u32 word; + power_counter_l_fields_t fields; +} power_counter_l_t; +#define POWER_COUNTER_H 0x20 +typedef struct power_counter_h_fields { + u32 pw_c : 32; /* [31:0] */ +} power_counter_h_fields_t; +typedef union power_counter_h { + u32 word; + power_counter_h_fields_t fields; +} power_counter_h_t; +#define WD_CONTROL 0x24 +typedef struct wd_control_fields { + u32 w_m : 1; /* [0] */ + u32 w_out_e : 1; /* [1] */ + u32 w_evn : 1; /* [2] */ + u32 unused : 29; /* [31:3] */ +} wd_control_fields_t; +typedef union wd_control { + u32 word; + wd_control_fields_t fields; +} wd_control_t; +#define RESET_COUNTER_L 0x28 +typedef struct reset_counter_l_fields { + u32 rst : 32; /* [31:0] */ +} reset_counter_l_fields_t; +typedef union reset_counter_l { + u32 word; + reset_counter_l_fields_t fields; +} reset_counter_l_t; +#define RESET_COUNTER_H 0x2c +typedef struct reset_counter_h_fields { + u32 rst : 32; /* [31:0] */ +} reset_counter_h_fields_t; +typedef union reset_counter_h { + u32 word; + reset_counter_h_fields_t fields; +} reset_counter_h_t; + +#endif /* _L_ASM_L_TIMER_REGS_H */ diff --git a/arch/e2k/include/asm-l/mpspec.h b/arch/e2k/include/asm-l/mpspec.h new file mode 100644 index 0000000..f1c795b --- /dev/null +++ b/arch/e2k/include/asm-l/mpspec.h @@ -0,0 +1,634 @@ +#ifndef __L_ASM_MPSPEC_H +#define __L_ASM_MPSPEC_H + +/* + * Structure definitions for SMP machines following the + * Intel Multiprocessing Specification 1.1 and 1.4. + */ + +#ifndef __ASSEMBLY__ + +#include + +#include + +#include +#include +#ifdef CONFIG_E2K +#include +#endif + + +/* + * This tag identifies where the SMP configuration + * information is. + */ +#ifdef __LITTLE_ENDIAN +#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') +#elif __BIG_ENDIAN +#define SMP_MAGIC_IDENT ('_'|('P'<<8)|('M'<<16)|('_'<<24)) +#else +#error not byte order defined +#endif /*__BIG_ENDIAN*/ + +/* + * a maximum of NR_CPUS APICs with the current APIC ID architecture. + * a maximum of IO-APICs is summary: + * each IO link can have IOHUB with IO-APIC + * each node can have embeded IO-APIC + */ +#define MAX_LOCAL_APICS (NR_CPUS * 2) /* apic numbering can be with holes */ +#define MAX_IO_APICS (MAX_NUMIOLINKS + MAX_NUMNODES) +#define MAX_APICS MAX_LOCAL_APICS + +#define SMP_FLOATING_TABLE_LEN sizeof(struct intel_mp_floating) + +struct intel_mp_floating +{ + char mpf_signature[4]; /* "_MP_" */ + unsigned long mpf_physptr; /* Configuration table address */ + unsigned char mpf_length; /* Our length (paragraphs) */ + unsigned char mpf_specification;/* Specification version */ + unsigned char mpf_checksum; /* Checksum (makes sum 0) */ + unsigned char mpf_feature1; /* Standard or configuration ? */ + unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ + unsigned char mpf_feature3; /* Unused (0) */ + unsigned char mpf_feature4; /* Unused (0) */ + unsigned char mpf_feature5; /* Unused (0) */ +}; + +#define MPF_64_BIT_SPECIFICATION 8 /* MPF specification describe */ + /* new MP table compatible */ + /* with 64-bits arch */ +#define MP_SPEC_ADDR_ALIGN 4 /* addresses can be */ + /* word-aligned */ +#define MP_NEW_ADDR_ALIGN 8 /* all addresses should be */ + /* double-word aligned */ + +#define ALIGN_BYTES_DOWN(addr, bytes) (((addr) / (bytes)) * (bytes)) +#define ALIGN_BYTES_UP(addr, bytes) ((((addr) + (bytes)-1) / (bytes)) * \ + (bytes)) +#define MP_ALIGN_BYTES(addr, bytes) ALIGN_BYTES_UP(addr, bytes) + +#define IS_64_BIT_MP_SPECS() \ + (boot_mpf_found->mpf_specification == MPF_64_BIT_SPECIFICATION) +#define MP_ADDR_ALIGN(addr) \ + (unsigned char *)(MP_ALIGN_BYTES((unsigned long long)(addr), \ + (IS_64_BIT_MP_SPECS()) ? MP_NEW_ADDR_ALIGN : \ + MP_SPEC_ADDR_ALIGN)) +#define MP_SIZE_ALIGN(addr) \ + MP_ALIGN_BYTES((unsigned long long)(addr), \ + (IS_64_BIT_MP_SPECS()) ? MP_NEW_ADDR_ALIGN : \ + MP_SPEC_ADDR_ALIGN) +#define enable_update_mptable 0 + +struct mpc_table +{ + char mpc_signature[4]; +#define MPC_SIGNATURE "PCMP" + unsigned short mpc_length; /* Size of table */ + char mpc_spec; /* 0x01 */ + char mpc_checksum; + char mpc_oem[8]; + char mpc_productid[12]; + unsigned int mpc_oemptr; /* 0 if not present */ + unsigned short mpc_oemsize; /* 0 if not present */ + unsigned short mpc_oemcount; + unsigned int mpc_lapic; /* APIC address */ + unsigned short mpe_length; /* Extended Table size */ + unsigned char mpe_checksum; /* Extended Table checksum */ + unsigned char reserved; +}; + +/* Followed by entries */ + +#define MP_PROCESSOR 0 +#define MP_BUS 1 +#define MP_IOAPIC 2 +#define MP_INTSRC 3 +#define MP_LINTSRC 4 +#define MP_TIMER 5 +#define MP_I2C_SPI 6 +#define MP_IOLINK 7 +#define MP_PMC 8 +#define MP_BDEV 9 +#define MP_GPIO_ACT 10 +#define MP_IOEPIC 11 + +struct mpc_config_processor +{ + unsigned char mpc_type; /* MP_PROCESSOR */ + unsigned char mpc_apicid; /* Local APIC number */ + unsigned char mpc_apicver; /* Its versions */ + unsigned char mpc_cpuflag; +#define CPU_ENABLED 1 /* Processor is available */ +#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ + unsigned int mpc_cpufeature; +#define CPU_STEPPING_MASK 0x0F +#define CPU_MODEL_MASK 0xF0 +#define CPU_FAMILY_MASK 0xF00 + unsigned int mpc_featureflag; /* CPUID feature value */ + unsigned int mpc_cepictimerfreq; /* Frequency of CEPIC timer */ + unsigned int mpc_reserved; +}; + +struct mpc_config_bus +{ + unsigned char mpc_type; /* MP_BUS */ + unsigned char mpc_busid; + unsigned char mpc_bustype[6]; +}; + +/* List of Bus Type string values, Intel MP Spec. */ +#define BUSTYPE_EISA "EISA" +#define BUSTYPE_ISA "ISA" +#define BUSTYPE_INTERN "INTERN" /* Internal BUS */ +#define BUSTYPE_MCA "MCA" +#define BUSTYPE_VL "VL" /* Local bus */ +#define BUSTYPE_PCI "PCI" +#define BUSTYPE_PCMCIA "PCMCIA" +#define BUSTYPE_CBUS "CBUS" +#define BUSTYPE_CBUSII "CBUSII" +#define BUSTYPE_FUTURE "FUTURE" +#define BUSTYPE_MBI "MBI" +#define BUSTYPE_MBII "MBII" +#define BUSTYPE_MPI "MPI" +#define BUSTYPE_MPSA "MPSA" +#define BUSTYPE_NUBUS "NUBUS" +#define BUSTYPE_TC "TC" +#define BUSTYPE_VME "VME" +#define BUSTYPE_XPRESS "XPRESS" + +struct mpc_ioapic +{ + unsigned char type; /* MP_IOAPIC */ + unsigned char apicid; + unsigned char apicver; + unsigned char flags; +#define MPC_APIC_USABLE 0x01 + unsigned long apicaddr; +}; + +struct mpc_ioepic { + unsigned char type; /* MP_IOEPIC */ + unsigned char epicver; + unsigned short epicid; + unsigned short nodeid; + unsigned char reserved[2]; + unsigned long epicaddr; +} __packed; + +#define MPC_IOIRQFLAG_PO_BS 0x0 /* Bus specific */ +#define MPC_IOIRQFLAG_PO_AH 0x1 /* Active high */ +#define MPC_IOIRQFLAG_PO_RES 0x2 /* Reserved */ +#define MPC_IOIRQFLAG_PO_AL 0x3 /* Active low */ + +#define MPC_IOIRQFLAG_EL_BS 0x0 /* Bus specific */ +#define MPC_IOIRQFLAG_EL_FS 0x4 /* Trigger by front */ +#define MPC_IOIRQFLAG_EL_RES 0x8 /* Reserved */ +#define MPC_IOIRQFLAG_EL_LS 0xC /* Trigger by level */ + +struct mpc_intsrc +{ + unsigned char type; /* MP_INTSRC */ + unsigned char irqtype; + unsigned short irqflag; + unsigned char srcbus; + unsigned char srcbusirq; + unsigned char dstapic; + unsigned char dstirq; +}; + +enum mp_irq_source_types { + mp_INT = 0, + mp_NMI = 1, + mp_SMI = 2, + mp_ExtINT = 3, + mp_FixINT = 4 /* fixed interrupt pin for PCI */ +}; + +#define MP_IRQDIR_DEFAULT 0 +#define MP_IRQDIR_HIGH 1 +#define MP_IRQDIR_LOW 3 + +#ifdef CONFIG_BIOS +#define MP_IRQ_POLARITY_DEFAULT 0x0 +#define MP_IRQ_POLARITY_HIGH 0x1 +#define MP_IRQ_POLARITY_LOW 0x3 +#define MP_IRQ_POLARITY_MASK 0x3 +#define MP_IRQ_TRIGGER_DEFAULT 0x0 +#define MP_IRQ_TRIGGER_EDGE 0x4 +#define MP_IRQ_TRIGGER_LEVEL 0xc +#define MP_IRQ_TRIGGER_MASK 0xc +#endif /* CONFIG_BIOS */ + + +struct mpc_config_lintsrc +{ + unsigned char mpc_type; /* MP_LINTSRC */ + unsigned char mpc_irqtype; + unsigned short mpc_irqflag; + unsigned char mpc_srcbusid; + unsigned char mpc_srcbusirq; + unsigned char mpc_destapic; +#define MP_APIC_ALL 0xFF + unsigned char mpc_destapiclint; +}; + +/* + * Default configurations + * + * 1 2 CPU ISA 82489DX + * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining + * 3 2 CPU EISA 82489DX + * 4 2 CPU MCA 82489DX + * 5 2 CPU ISA+PCI + * 6 2 CPU EISA+PCI + * 7 2 CPU MCA+PCI + */ + +#define MAX_IRQ_SOURCES (128 * MAX_NUMIOHUBS) + +/* (32 * nodes) for PCI, and one number is a special case */ +#define MAX_MP_BUSSES 256 + +enum mp_bustype { + MP_BUS_ISA = 1, + MP_BUS_EISA, + MP_BUS_PCI, + MP_BUS_MCA +}; + +/* + * IO link configurations + */ + +#define MAX_NUMIOLINKS MACH_MAX_NUMIOLINKS +#define MAX_NUMIOHUBS MAX_NUMIOLINKS +#define NODE_NUMIOLINKS MACH_NODE_NUMIOLINKS + +typedef struct mpc_config_iolink { + unsigned char mpc_type; /* type is MP_IOLINK */ + unsigned char mpc_iolink_type; /* type of IO link: IOHUB or RDMA */ + unsigned short mpc_iolink_ver; /* version of IOHUB or RDMA */ + unsigned int mpc_reserved; /* reserved */ + int node; /* number od node: 0 - 3 */ + int link; /* local number of link on node: 0-1 */ + short bus_min; /* number of root bus on IOHUB */ + short bus_max; /* number of max bus on IOHUB */ + short apicid; /* IO-APIC id connected to the */ + /* IOHUB */ + short mpc_reserv16; /* reserved 16-bits value */ + unsigned long pci_mem_start; /* PCI mem area for IOMMU v6 */ + unsigned long pci_mem_end; +} mpc_config_iolink_t; + +enum mp_iolink_type { + MP_IOLINK_IOHUB = 1, /* IO link is IOHUB */ + MP_IOLINK_RDMA /* IO link is RDMA controller */ +}; + +enum mp_iolink_ver { + MP_IOHUB_FPGA_VER = 0x10, /* IOHUB implemented on FPGA (Altera) */ +}; +#define MAX_MP_TIMERS 4 + +typedef struct mpc_config_timer { + unsigned char mpc_type; /* MP_TIMER */ + unsigned char mpc_timertype; + unsigned char mpc_timerver; + unsigned char mpc_timerflags; + unsigned long mpc_timeraddr; +} mpc_config_timer_t; + +enum mp_timertype { + MP_PIT_TYPE, /* programmed interval timer */ + MP_LT_TYPE, /* Elbrus iohub timer */ + MP_HPET_TYPE, /* High presicion eventualy timer */ + MP_RTC_TYPE, /* real time clock */ + MP_PM_TYPE /* power managment timer */ +}; + +#define MP_LT_VERSION 1 +#define MP_LT_FLAGS 0 + +#define MP_RTC_VER_CY14B101P 2 +#define MP_RTC_FLAG_SYNCINTR 0x01 + +typedef struct mpc_config_i2c { + unsigned char mpc_type; /* MP_I2C_SPI */ + unsigned char mpc_max_channel; + unsigned char mpc_i2c_irq; + unsigned char mpc_revision; + unsigned long mpc_i2ccntrladdr; + unsigned long mpc_i2cdataaddr; +} mpc_config_i2c_t; + +typedef struct mpc_config_pmc { + unsigned char mpc_type; /* MP_PMC */ + unsigned char mpc_pmc_type; /* Izumrud or Processor-2 */ + unsigned char mpc_pmc_version; + unsigned char mpc_pmc_vmax; /* VMAX: bits 40:34 in l_pmc.vrange */ + unsigned char mpc_pmc_vmin; /* VMIN: bits 33:27 in l_pmc.vrange */ + unsigned char mpc_pmc_fmax; /* FMAX: bits 26:20 in l_pmc.vrange */ + unsigned char reserved[2]; + unsigned long mpc_pmc_cntrl_addr; /* base of pmc regs */ + unsigned long mpc_pmc_data_addr; + unsigned int mpc_pmc_data_size; + unsigned int mpc_pmc_p_state[4]; /* VID 15:9, DID 8:4, FID 3:0 */ + unsigned int mpc_pmc_freq; /* Frequency in KHz */ +} mpc_config_pmc_t; + + + +typedef struct mpc_bdev { + unsigned char mpc_type; /* MP_BDEV */ + unsigned char mpc_bustype; /* I2C or SPI */ + unsigned char mpc_nodeid; + unsigned char mpc_linkid; + unsigned char mpc_busid; + unsigned char mpc_baddr; + unsigned char mpc_bdev_name[16]; +} mpc_bdev_t; + +#define MPC_BDEV_DTYPE_I2C 1 +#define MPC_BDEV_DTYPE_SPI 2 + +typedef struct mpc_gpio_act { + unsigned char mpc_type; /* MP_GPIO_ACT */ + unsigned char mpc_nodeid; + unsigned char mpc_linkid; + unsigned char mpc_busid; + unsigned char mpc_gpio_pin; + unsigned char mpc_pin_direction; + unsigned char mpc_gpio_act_name[16]; +} mpc_gpio_act_t; + +#define MP_GPIO_ACT_DIRECTION_IN 1 +#define MP_GPIO_ACT_DIRECTION_OUT 2 + +#ifdef __KERNEL__ +struct iohub_sysdata; +void mp_pci_add_resources(struct list_head *resources, + struct iohub_sysdata *sd); +#ifdef CONFIG_IOHUB_DOMAINS +struct iohub_sysdata; +extern int mp_find_iolink_root_busnum(int node, int link); +extern int mp_find_iolink_io_apicid(int node, int link); +extern int mp_fix_io_apicid(unsigned int src_apicid, unsigned int new_apicid); +void mp_pci_add_resources(struct list_head *resources, + struct iohub_sysdata *sd); +#else +static inline int mp_fix_io_apicid(unsigned int src_apicid, + unsigned int new_apicid) +{ + return 0; +} +#endif /* CONFIG_IOHUB_DOMAINS */ +extern int get_bus_to_io_apicid(int busnum); + +#if defined(CONFIG_MCA) || defined(CONFIG_EISA) +extern int mp_bus_id_to_type [MAX_MP_BUSSES]; +#endif + +extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); + +extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; +extern unsigned int boot_cpu_physical_apicid; +extern int smp_found_config; +extern void find_smp_config(boot_info_t *bblock); +extern void get_smp_config(void); +extern int nr_ioapics; +extern int apic_version[MAX_LOCAL_APIC]; +extern int mp_irq_entries; +extern struct mpc_intsrc mp_irqs []; +extern int mpc_default_type; +extern unsigned long mp_lapic_addr; +extern int pic_mode; +extern int using_apic_timer; +extern mpc_config_timer_t mp_timers[MAX_MP_TIMERS]; +extern int nr_timers; +extern int rtc_model; +extern int rtc_syncintr; + +#define early_iohub_online(node, link) mach_early_iohub_online((node), (link)) +#define early_sic_init() mach_early_sic_init() +#endif /* __KERNEL__ */ + +#ifdef CONFIG_ENABLE_BIOS_MPTABLE +#define MPE_SYSTEM_ADDRESS_SPACE 0x80 +#define MPE_BUS_HIERARCHY 0x81 +#define MPE_COMPATIBILITY_ADDRESS_SPACE 0x82 + +struct mp_exten_config { + unsigned char mpe_type; + unsigned char mpe_length; +}; + +typedef struct mp_exten_config *mpe_t; + +struct mp_exten_system_address_space { + unsigned char mpe_type; + unsigned char mpe_length; + unsigned char mpe_busid; + unsigned char mpe_address_type; +#define ADDRESS_TYPE_IO 0 +#define ADDRESS_TYPE_MEM 1 +#define ADDRESS_TYPE_PREFETCH 2 + unsigned int mpe_address_base_low; + unsigned int mpe_address_base_high; + unsigned int mpe_address_length_low; + unsigned int mpe_address_length_high; +}; + +struct mp_exten_bus_hierarchy { + unsigned char mpe_type; + unsigned char mpe_length; + unsigned char mpe_busid; + unsigned char mpe_bus_info; +#define BUS_SUBTRACTIVE_DECODE 1 + unsigned char mpe_parent_busid; + unsigned char reserved[3]; +}; + +struct mp_exten_compatibility_address_space { + unsigned char mpe_type; + unsigned char mpe_length; + unsigned char mpe_busid; + unsigned char mpe_address_modifier; +#define ADDRESS_RANGE_SUBTRACT 1 +#define ADDRESS_RANGE_ADD 0 + unsigned int mpe_range_list; +#define RANGE_LIST_IO_ISA 0 + /* X100 - X3FF + * X500 - X7FF + * X900 - XBFF + * XD00 - XFFF + */ +#define RANGE_LIST_IO_VGA 1 + /* X3B0 - X3BB + * X3C0 - X3DF + * X7B0 - X7BB + * X7C0 - X7DF + * XBB0 - XBBB + * XBC0 - XBDF + * XFB0 - XFBB + * XFC0 - XCDF + */ +}; + +/* Default local apic addr */ +#define LAPIC_ADDR 0xFEE00000 + +#ifdef __KERNEL__ +void *smp_next_mpc_entry(struct mpc_table *mc); +void *smp_next_mpe_entry(struct mpc_table *mc); + +void smp_write_processor(struct mpc_table *mc, + unsigned char apicid, unsigned char apicver, + unsigned char cpuflag, unsigned int cpufeature, + unsigned int featureflag, unsigned int cepictimerfreq); +void smp_write_processors(struct mpc_table *mc, + unsigned int phys_cpu_num); +void smp_write_bus(struct mpc_table *mc, + unsigned char id, unsigned char *bustype); +void smp_write_ioapic(struct mpc_table *mc, + unsigned char id, unsigned char ver, + unsigned long apicaddr); +void smp_write_ioepic(struct mpc_table *mc, + unsigned short id, unsigned short nodeid, + unsigned char ver, unsigned long epicaddr); +void smp_write_iolink(struct mpc_table *mc, + int node, int link, + short bus_min, short bus_max, + short picid, + unsigned long pci_mem_start, unsigned long pci_mem_end); +void smp_write_intsrc(struct mpc_table *mc, + unsigned char irqtype, unsigned short irqflag, + unsigned char srcbus, unsigned char srcbusirq, + unsigned char dstapic, unsigned char dstirq); +void smp_write_lintsrc(struct mpc_table *mc, + unsigned char irqtype, unsigned short irqflag, + unsigned char srcbusid, unsigned char srcbusirq, + unsigned char destapic, unsigned char destapiclint); +void smp_write_address_space(struct mpc_table *mc, + unsigned char busid, unsigned char address_type, + unsigned int address_base_low, unsigned int address_base_high, + unsigned int address_length_low, unsigned int address_length_high); +void smp_write_bus_hierarchy(struct mpc_table *mc, + unsigned char busid, unsigned char bus_info, + unsigned char parent_busid); +void smp_write_compatibility_address_space(struct mpc_table *mc, + unsigned char busid, unsigned char address_modifier, + unsigned int range_list); +unsigned char smp_compute_checksum(void *v, int len); +void smp_write_floating_table(struct intel_mp_floating *mpf); +unsigned int write_smp_table(struct intel_mp_floating *mpf, unsigned int phys_cpu_num); +void smp_i2c_spi_timer(struct mpc_table *mc, + unsigned char timertype, unsigned char timerver, + unsigned char timerflags, unsigned long timeraddr); +void smp_i2c_spi_dev(struct mpc_table *mc, unsigned char max_channel, + unsigned char irq, unsigned long i2cdevaddr); +//#define MAX_CPUS 16 /* 16 way CPU system */ +#endif /* __KERNEL__ */ + +/* A table (per mainboard) listing the initial apicid of each cpu. */ +//extern unsigned int initial_apicid[MAX_CPUS]; +#endif /* CONFIG_ENABLE_BIOS_MPTABLE */ + +int generic_processor_info(int apicid, int version); + +#ifdef __KERNEL__ +extern void print_bootblock(bootblock_struct_t *bootblock); +#endif /* __KERNEL__ */ + +#ifdef CONFIG_ACPI +extern void mp_register_ioapic(int id, unsigned long address, u32 gsi_base); +extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, + u32 gsi); +extern void mp_config_acpi_legacy_irqs(void); +struct device; +extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level, + int active_high_low); +extern int acpi_probe_gsi(void); +#ifdef CONFIG_L_IO_APIC +extern int mp_find_ioapic(u32 gsi); +extern int mp_find_ioapic_pin(int ioapic, u32 gsi); +#endif +#else /* !CONFIG_ACPI: */ +static inline int acpi_probe_gsi(void) +{ + return 0; +} +#endif /* CONFIG_ACPI */ + +/* physid definitions */ +/* + * On e2k and sparc lapics number is the same as cpus number + * IO-APICs number is defined by MAX_IO_APICS + * IO-APICs IDs can be placed higher than local APICs IDs or at its hole + * so physid_t cannot be a synonim to cpumask_t. + */ +#include + +#define MAX_PHYSID_NUM (NR_CPUS + MAX_IO_APICS) +typedef struct physid_mask { + DECLARE_BITMAP(bits, MAX_PHYSID_NUM); +} physid_mask_t; + +#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_PHYSID_NUM) + +#define physid_set(physid, map) set_bit((physid), (map).bits) +#define physid_clear(physid, map) clear_bit((physid), (map).bits) +#define physid_isset(physid, map) test_bit((physid), (map).bits) +#define physid_test_and_set(physid, map) test_and_set_bit((physid), (map).bits) + +#define physids_and(dstp, src1, src2) \ + bitmap_and((dst).bits, (src1).bits, (src2).bits, MAX_PHYSID_NUM) + +#define physids_or(dst, src1, src2) \ + bitmap_or((dst).bits, (src1).bits, (src2).bits, MAX_PHYSID_NUM) + +#define physids_clear(map) \ + bitmap_zero((map).bits, MAX_PHYSID_NUM) + +#define physids_complement(dst, src) \ + bitmap_complement((dst).bits, (src).bits, MAX_PHYSID_NUM) + +#define physids_empty(map) \ + bitmap_empty((map).bits, MAX_PHYSID_NUM) + +#define physids_equal(map1, map2) \ + bitmap_equal((map1).bits, (map2).bits, MAX_PHYSID_NUM) + +#define physids_weight(map) \ + bitmap_weight((map).bits, MAX_PHYSID_NUM) + +#define physids_shift_left(dst, src, n) \ + bitmap_shift_left((dst).bits, (src).bits, (n), MAX_PHYSID_NUM) + +static inline unsigned long physids_coerce(physid_mask_t *map) +{ + return map->bits[0]; +} + +static inline void physids_promote(unsigned long physids, physid_mask_t *map) +{ + physids_clear(*map); + map->bits[0] = physids; +} + +static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map) +{ + physids_clear(*map); + physid_set(physid, *map); +} + +#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } +#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } + +extern physid_mask_t phys_cpu_present_map; + +#endif /* __ASSEMBLY__ */ + +#endif /* __L_ASM_MPSPEC_H */ diff --git a/arch/e2k/include/asm-l/msidef.h b/arch/e2k/include/asm-l/msidef.h new file mode 100644 index 0000000..b3bd65d --- /dev/null +++ b/arch/e2k/include/asm-l/msidef.h @@ -0,0 +1,53 @@ +#ifndef _ASM_L_MSIDEF_H +#define _ASM_L_MSIDEF_H + +/* + * Constants for Intel APIC based MSI messages. + */ + +/* + * Shifts for MSI data + */ + +#define MSI_DATA_VECTOR_SHIFT 0 +#define MSI_DATA_VECTOR_MASK 0x000000ff +#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \ + MSI_DATA_VECTOR_MASK) + +#define MSI_DATA_DELIVERY_MODE_SHIFT 8 +#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) +#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) + +#define MSI_DATA_LEVEL_SHIFT 14 +#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) +#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) + +#define MSI_DATA_TRIGGER_SHIFT 15 +#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) +#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) + +/* + * Shift/mask fields for msi address + */ + +#define MSI_ADDR_DEST_MODE_SHIFT 2 +#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT) +#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) + +#define MSI_ADDR_REDIRECTION_SHIFT 3 +#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) + /* dedicated cpu */ +#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) + /* lowest priority */ + +#define MSI_ADDR_DEST_ID_SHIFT 12 +#define MSI_ADDR_DEST_ID_MASK 0x00ffff0 +#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ + MSI_ADDR_DEST_ID_MASK) +#define MSI_ADDR_EXT_DEST_ID(dest) ((dest) & 0xffffff00) + +#define MSI_ADDR_IR_EXT_INT (1 << 4) +#define MSI_ADDR_IR_SHV (1 << 3) +#define MSI_ADDR_IR_INDEX1(index) ((index & 0x8000) >> 13) +#define MSI_ADDR_IR_INDEX2(index) ((index & 0x7fff) << 5) +#endif /* _ASM_L_MSIDEF_H */ diff --git a/arch/e2k/include/asm-l/mtrr.h b/arch/e2k/include/asm-l/mtrr.h new file mode 100644 index 0000000..ab526bc --- /dev/null +++ b/arch/e2k/include/asm-l/mtrr.h @@ -0,0 +1,122 @@ +/* Generic MTRR (Memory Type Range Register) ioctls. + + Copyright (C) 1997-1999 Richard Gooch + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with this library; if not, write to the Free + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + Richard Gooch may be reached by email at rgooch@atnf.csiro.au + The postal address is: + Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. +*/ +#ifndef _LINUX_MTRR_H +#define _LINUX_MTRR_H + +#include + +#define MTRR_IOCTL_BASE 'M' + +struct mtrr_sentry +{ + unsigned long base; /* Base address */ + unsigned long size; /* Size of region */ + unsigned int type; /* Type of region */ +}; + +struct mtrr_gentry +{ + unsigned int regnum; /* Register number */ + unsigned long base; /* Base address */ + unsigned long size; /* Size of region */ + unsigned int type; /* Type of region */ +}; + +/* These are the various ioctls */ +#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) +#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) +#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) +#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry) +#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry) +#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry) +#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry) +#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry) +#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry) +#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry) + +/* These are the region types */ +#define MTRR_TYPE_UNCACHABLE 0 +#define MTRR_TYPE_WRCOMB 1 +/*#define MTRR_TYPE_ 2*/ +/*#define MTRR_TYPE_ 3*/ +#define MTRR_TYPE_WRTHROUGH 4 +#define MTRR_TYPE_WRPROT 5 +#define MTRR_TYPE_WRBACK 6 +#define MTRR_NUM_TYPES 7 + +#ifdef MTRR_NEED_STRINGS +static char *mtrr_strings[MTRR_NUM_TYPES] = +{ + "uncachable", /* 0 */ + "write-combining", /* 1 */ + "?", /* 2 */ + "?", /* 3 */ + "write-through", /* 4 */ + "write-protect", /* 5 */ + "write-back", /* 6 */ +}; +#endif + +#ifdef __KERNEL__ + +/* The following functions are for use by other drivers */ +# ifdef CONFIG_MTRR +extern int mtrr_add (unsigned long base, unsigned long size, + unsigned int type, char increment); +extern int mtrr_add_page (unsigned long base, unsigned long size, + unsigned int type, char increment); +extern int mtrr_del (int reg, unsigned long base, unsigned long size); +extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); +# else +static __inline__ int mtrr_add (unsigned long base, unsigned long size, + unsigned int type, char increment) +{ + return -ENODEV; +} +static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, + unsigned int type, char increment) +{ + return -ENODEV; +} +static __inline__ int mtrr_del (int reg, unsigned long base, + unsigned long size) +{ + return -ENODEV; +} +static __inline__ int mtrr_del_page (int reg, unsigned long base, + unsigned long size) +{ + return -ENODEV; +} +# endif + +/* The following functions are for initialisation: don't use them! */ +extern int mtrr_init (void); +# if defined(CONFIG_SMP) && defined(CONFIG_MTRR) +extern void mtrr_init_boot_cpu (void); +extern void mtrr_init_secondary_cpu (void); +# endif + +#endif + +#endif /* _LINUX_MTRR_H */ diff --git a/arch/e2k/include/asm-l/nmi.h b/arch/e2k/include/asm-l/nmi.h new file mode 100644 index 0000000..8d5aedf --- /dev/null +++ b/arch/e2k/include/asm-l/nmi.h @@ -0,0 +1,22 @@ +#ifndef _ASM_L_NMI_H +#define _ASM_L_NMI_H + +#include +#include + +#ifdef ARCH_HAS_NMI_WATCHDOG +extern unsigned int nmi_watchdog; +#define NMI_NONE 0 +#define NMI_IO_APIC 1 +#define NMI_LOCAL_APIC 2 +#define NMI_INVALID 3 +#endif + +void lapic_watchdog_stop(void); +int lapic_watchdog_init(unsigned nmi_hz); +int lapic_wd_event(unsigned nmi_hz); +unsigned lapic_adjust_nmi_hz(unsigned hz); +void stop_nmi(void); +void restart_nmi(void); + +#endif /* _ASM_L_NMI_H */ diff --git a/arch/e2k/include/asm-l/of_device.h b/arch/e2k/include/asm-l/of_device.h new file mode 100644 index 0000000..32a43a1 --- /dev/null +++ b/arch/e2k/include/asm-l/of_device.h @@ -0,0 +1,47 @@ + +#ifndef _ASM_L_OF_DEVICE_H +#define _ASM_L_OF_DEVICE_H +#ifdef __KERNEL__ + +#include +#include +#include +#include + +/* + * The of_device is a kind of "base class" that is a superset of + * struct device for use by devices attached to an OF node and + * probed using OF properties. + */ +struct of_device; +struct of_device +{ + char name[32]; + struct of_device *parent; + struct device dev; + struct device_node *node; + struct resource resource[PROMREG_MAX]; + unsigned int irqs[PROMINTR_MAX]; + int num_irqs; + struct proc_dir_entry *pde; /* this node's proc directory */ + int registered; +// void *sysdata; + + int p2s_id; +// int slot; +// int portid; + int clock_freq; +}; + +extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name); +extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size); + +extern struct device_node **l_allnodes; + +#if 0 +extern int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus); +extern void of_unregister_driver(struct of_platform_driver *drv); +#endif + +#endif /* __KERNEL__ */ +#endif /* _ASM_L_OF_DEVI */ diff --git a/arch/e2k/include/asm-l/pci.h b/arch/e2k/include/asm-l/pci.h new file mode 100644 index 0000000..2af6d42 --- /dev/null +++ b/arch/e2k/include/asm-l/pci.h @@ -0,0 +1,159 @@ +#ifndef _L_PCI_H +#define _L_PCI_H + +#if !defined ___ASM_SPARC_PCI_H && !defined _E2K_PCI_H +# error Do not include "asm-l/pci.h" directly, use "linux/pci.h" instead +#endif + +#include +#include +#include + +#ifdef __KERNEL__ + +#define PCI_PROBE_BIOS 0x0001 +#define PCI_PROBE_CONF1 0x0002 +#define PCI_PROBE_CONF2 0x0004 +#define PCI_PROBE_MMCONF 0x0008 +#define PCI_PROBE_L 0x0010 +#define PCI_PROBE_MASK 0x001f + +#define PCI_NO_SORT 0x0100 +#define PCI_BIOS_SORT 0x0200 +#define PCI_NO_CHECKS 0x0400 +#define PCI_USE_PIRQ_MASK 0x0800 +#define PCI_ASSIGN_ROMS 0x1000 +#define PCI_BIOS_IRQ_SCAN 0x2000 +#define PCI_ASSIGN_ALL_BUSSES 0x4000 + +#undef CONFIG_CMD +#define CONFIG_CMD(bus, devfn, where) \ + ((bus&0xFF)<<20)|((devfn&0xFF)<<12)|(where&0xFFF) + +#define L_IOHUB_ROOT_BUS_NUM 0x00 +#define L_IOHUB_ROOT_SLOT 0x00 /* BSP IOHUB start slot (devfn) */ + /* on root bus 0 */ +#define SLOTS_PER_L_IOHUB 4 /* number of slots reserved per */ + /* each IOHUB */ + +#ifndef L_IOHUB_SLOTS_NUM +#define L_IOHUB_SLOTS_NUM 2 /* number of slots (devfns) for */ + /* each IOHUB on root bus */ +#endif + +extern int IOHUB_revision; +static inline int is_prototype(void) +{ + return IOHUB_revision >= 0xf0; +} + +extern unsigned long pirq_table_addr; +struct e2k_iommu; +struct pci_dev; +struct pci_bus; +enum pci_mmap_state; +struct pci_ops; + +typedef struct iohub_sysdata { +#ifdef CONFIG_IOHUB_DOMAINS + int domain; /* IOHUB (PCI) domain */ + int node; /* NUMA node */ + int link; /* local number of IO link on the node */ +#endif /* CONFIG_IOHUB_DOMAINS */ + u32 pci_msi_addr_lo; /* MSI transaction address */ + u32 pci_msi_addr_hi; /* MSI transaction upper address */ + u8 revision; /* IOHUB revision */ + u8 generation; /* IOHUB generation */ + + struct resource mem_space; + void *l_iommu; +} iohub_sysdata_t; + +#define iohub_revision(pdev) ({ \ + struct iohub_sysdata *sd = pdev->bus->sysdata; \ + (sd->revision >> 1); \ +}) +#define iohub_generation(pdev) ({ \ + struct iohub_sysdata *sd = pdev->bus->sysdata; \ + sd->generation; \ +}) + +#ifdef CONFIG_IOHUB_DOMAINS + +#define pci_domain_nr(bus) ({ \ + struct iohub_sysdata *sd = bus->sysdata; \ + sd->domain; \ +}) + +#define pci_proc_domain(bus) pci_domain_nr(bus) + +static inline int pci_iohub_domain_to_slot(const int domain) +{ + return L_IOHUB_ROOT_SLOT + domain * SLOTS_PER_L_IOHUB; +} +/* Returns the node based on pci bus */ +#define __pcibus_to_node(bus) ({ \ + const struct iohub_sysdata *sd = bus->sysdata; \ + sd->node; \ +}) +#define __pcibus_to_link(bus) ({ \ + const struct iohub_sysdata *sd = bus->sysdata; \ + sd->link; \ +}) + +#else /* ! CONFIG_IOHUB_DOMAINS */ +#define __pcibus_to_node(bus) 0 /* only one IOHUB on node #0 */ +#define __pcibus_to_link(bus) 0 +#endif /* CONFIG_IOHUB_DOMAINS */ + +/* Can be used to override the logic in pci_scan_bus for skipping + already-configured bus numbers - to be used for buggy BIOSes + or architectures with incomplete PCI setup by the loader */ + +#ifdef CONFIG_PCI +extern unsigned int pcibios_assign_all_busses(void); +#else +#define pcibios_assign_all_busses() 0 +#endif +#define pcibios_scan_all_fns(a, b) 0 + +/* the next function placed at drivers/pci/probe.c and updated only to */ +/* support commonroot bus domains */ +unsigned int pci_scan_root_child_bus(struct pci_bus *bus); + +struct pci_bus * pcibios_scan_root(int bus); + +/* scan a bus after allocating a iohub_sysdata for it */ +extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, + int node); + +void __init pcibios_fixup_resources(struct pci_bus *pbus); +int pcibios_enable_resources(struct pci_dev *, int); + +void pcibios_set_master(struct pci_dev *dev); +void pcibios_penalize_isa_irq(int irq, int active); +int l_pci_direct_init(void); + +extern int (*pcibios_enable_irq)(struct pci_dev *dev); +extern void (*pcibios_disable_irq)(struct pci_dev *dev); + +extern raw_spinlock_t pci_config_lock; + +extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, + size_t count); +extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, + size_t count); +extern int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state); + +#ifndef L_IOPORT_RESOURCE_OFFSET +#define L_IOPORT_RESOURCE_OFFSET 0UL +#endif +#ifndef L_IOMEM_RESOURCE_OFFSET +#define L_IOMEM_RESOURCE_OFFSET 0UL +#endif + +#endif /* __KERNEL__ */ + +#endif /* _L_PCI_H */ diff --git a/arch/e2k/include/asm-l/pci_l.h b/arch/e2k/include/asm-l/pci_l.h new file mode 100644 index 0000000..c63a367 --- /dev/null +++ b/arch/e2k/include/asm-l/pci_l.h @@ -0,0 +1,7 @@ +#ifndef _L_PCI_L_H +#define _L_PCI_L_H + +extern unsigned int pci_probe; + +#endif + diff --git a/arch/e2k/include/asm-l/pcie_fixup.h b/arch/e2k/include/asm-l/pcie_fixup.h new file mode 100644 index 0000000..f0e69ef --- /dev/null +++ b/arch/e2k/include/asm-l/pcie_fixup.h @@ -0,0 +1,34 @@ +#ifndef _ASM_L_PCIE_FIXUP_H_ +#define _ASM_L_PCIE_FIXUP_H_ + +#undef memset_io +#define memset_io(a,b,c) \ +({ \ + u64 i; \ + for (i = 0; i != (c); i++) { \ + writeb((b), (u8 *)(a) + i); \ + readb((u8 *)(a) + i); \ + } \ +}) + +#undef memcpy_fromio +#define memcpy_fromio(a,b,c) \ +({ \ + u64 i; \ + for (i = 0; i != (c); i++) { \ + u8 t = readb((u8 *)(b) + i); \ + *((u8 *)(a) + i) = t; \ + } \ +}) + +#undef memcpy_toio +#define memcpy_toio(a,b,c) \ +({ \ + u64 i; \ + for (i = 0; i != (c); i++) { \ + writeb(*((u8 *)(b) + i), (u8 *)(a) + i); \ + readb((u8 *)(a) + i); \ + } \ +}) + +#endif /*_ASM_L_PCIE_FIXUP_H_*/ diff --git a/arch/e2k/include/asm-l/percpu.h b/arch/e2k/include/asm-l/percpu.h new file mode 100644 index 0000000..341dfa1 --- /dev/null +++ b/arch/e2k/include/asm-l/percpu.h @@ -0,0 +1,67 @@ +#ifndef _ASM_L_PERCPU_H_ +#define _ASM_L_PERCPU_H_ + +#ifdef CONFIG_SMP + +/* + * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu + * variables that are initialized and accessed before there are per_cpu + * areas allocated. + */ + +#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ + DEFINE_PER_CPU(_type, _name) = _initvalue; \ + __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ + { [0 ... NR_CPUS-1] = _initvalue }; \ + __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map + +#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ + DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \ + __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ + { [0 ... NR_CPUS-1] = _initvalue }; \ + __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map + +#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ + EXPORT_PER_CPU_SYMBOL(_name) + +#define DECLARE_EARLY_PER_CPU(_type, _name) \ + DECLARE_PER_CPU(_type, _name); \ + extern __typeof__(_type) *_name##_early_ptr; \ + extern __typeof__(_type) _name##_early_map[] + +#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ + DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \ + extern __typeof__(_type) *_name##_early_ptr; \ + extern __typeof__(_type) _name##_early_map[] + +#define early_per_cpu_ptr(_name) (_name##_early_ptr) +#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) +#define early_per_cpu(_name, _cpu) \ + *(early_per_cpu_ptr(_name) ? \ + &early_per_cpu_ptr(_name)[_cpu] : \ + &per_cpu(_name, _cpu)) + +#else /* !CONFIG_SMP */ +#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ + DEFINE_PER_CPU(_type, _name) = _initvalue + +#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ + DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue + +#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ + EXPORT_PER_CPU_SYMBOL(_name) + +#define DECLARE_EARLY_PER_CPU(_type, _name) \ + DECLARE_PER_CPU(_type, _name) + +#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ + DECLARE_PER_CPU_READ_MOSTLY(_type, _name) + +#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) +#define early_per_cpu_ptr(_name) NULL +/* no early_per_cpu_map() */ + +#endif /* !CONFIG_SMP */ + +#endif /* _ASM_L_PERCPU_H_ */ + diff --git a/arch/e2k/include/asm-l/pic.h b/arch/e2k/include/asm-l/pic.h new file mode 100644 index 0000000..74ff414 --- /dev/null +++ b/arch/e2k/include/asm-l/pic.h @@ -0,0 +1,343 @@ +#ifndef __ASM_L_PIC_H +#define __ASM_L_PIC_H + +/* + * Choose between PICs in arch/l. If CONFIG_EPIC=n, APIC is chosen statically + * If CONFIG_EPIC=y (only on e2k), choose dynamically based on CPU_FEAT_EPIC + */ + +extern int first_system_vector; +extern int apic_get_vector(void); + +#ifdef CONFIG_EPIC + +#include +#include +#include + + +static inline unsigned int read_pic_id(void) +{ + if (cpu_has_epic()) + return read_epic_id(); + else + return read_apic_id(); +} + +extern void epic_processor_info(int epicid, int version, + unsigned int cepic_freq); +extern int generic_processor_info(int apicid, int version); +static inline void pic_processor_info(int picid, int picver, unsigned int freq) +{ + if (cpu_has_epic()) + epic_processor_info(picid, picver, freq); + else + generic_processor_info(picid, picver); +} + +extern int get_cepic_timer_frequency(void); +static inline int get_pic_timer_frequency(void) +{ + if (cpu_has_epic()) + return get_cepic_timer_frequency(); + else + return -1; /* standard constant value */ +} + + +/* IO-APIC definitions */ +struct irq_data; +extern void ioapic_ack_epic_edge(struct irq_data *data); +extern void ack_apic_edge(struct irq_data *data); +static inline void ioapic_ack_pic_edge(struct irq_data *data) +{ + if (cpu_has_epic()) + ioapic_ack_epic_edge(data); + else + ack_apic_edge(data); +} + +extern void ioapic_ack_epic_level(struct irq_data *data); +extern void ack_apic_level(struct irq_data *data); +static inline void ioapic_ack_pic_level(struct irq_data *data) +{ + if (cpu_has_epic()) + ioapic_ack_epic_level(data); + else + ack_apic_level(data); +} + +struct irq_chip; +extern struct irq_chip ioepic_to_apic_chip; +static inline bool irqchip_is_ioepic_to_apic(struct irq_chip *chip) +{ + return chip == &ioepic_to_apic_chip; +} + +/* IRQ definitions */ +#ifdef CONFIG_IRQ_WORK +extern void epic_irq_work_raise(void); +extern void apic_irq_work_raise(void); +static inline void pic_irq_work_raise(void) +{ + if (cpu_has_epic()) + epic_irq_work_raise(); + else + apic_irq_work_raise(); +} +#endif + +#ifdef CONFIG_SMP +extern void epic_send_call_function_ipi_mask(const struct cpumask *mask); +extern void apic_send_call_function_ipi_mask(const struct cpumask *mask); +static inline void pic_send_call_function_ipi_mask(const struct cpumask *mask) +{ + if (cpu_has_epic()) + epic_send_call_function_ipi_mask(mask); + else + apic_send_call_function_ipi_mask(mask); +} + +extern void epic_send_call_function_single_ipi(int cpu); +extern void apic_send_call_function_single_ipi(int cpu); +static inline void pic_send_call_function_single_ipi(int cpu) +{ + if (cpu_has_epic()) + epic_send_call_function_single_ipi(cpu); + else + apic_send_call_function_single_ipi(cpu); +} + +extern void epic_smp_send_reschedule(int cpu); +extern void apic_smp_send_reschedule(int cpu); +static inline void pic_send_reschedule(int cpu) +{ + if (cpu_has_epic()) + epic_smp_send_reschedule(cpu); + else + apic_smp_send_reschedule(cpu); +} +#endif + +struct pt_regs; +extern noinline notrace void epic_do_nmi(struct pt_regs *regs); +extern noinline notrace void apic_do_nmi(struct pt_regs *regs); +static inline void pic_do_nmi(struct pt_regs *regs) +{ + if (cpu_has_epic()) + epic_do_nmi(regs); + else + apic_do_nmi(regs); +} + +static inline void ack_pic_irq(void) +{ + if (cpu_has_epic()) + ack_epic_irq(); + else + ack_APIC_irq(); +} + +/* For do_postpone_tick() */ +extern void cepic_timer_interrupt(void); +extern void local_apic_timer_interrupt(void); +static inline void local_pic_timer_interrupt(void) +{ + if (cpu_has_epic()) + cepic_timer_interrupt(); + else + local_apic_timer_interrupt(); +} + +extern int print_local_APICs(bool force); +extern int print_epics(bool force); +static inline int print_local_pics(bool force) +{ + if (cpu_has_epic()) + return print_epics(force); + else + return print_local_APICs(force); +} + +struct pci_dev; +extern int native_setup_msi_irqs_epic(struct pci_dev *dev, int nvec, int type); +extern int native_setup_msi_irqs_apic(struct pci_dev *dev, int nvec, int type); +static inline int setup_msi_irqs_pic(struct pci_dev *dev, int nvec, int type) +{ + if (cpu_has_epic()) + return native_setup_msi_irqs_epic(dev, nvec, type); + else + return native_setup_msi_irqs_apic(dev, nvec, type); +} + +extern void native_teardown_msi_irq_epic(unsigned int irq); +extern void native_teardown_msi_irq_apic(unsigned int irq); +static inline void teardown_msi_irq_pic(unsigned int irq) +{ + if (cpu_has_epic()) + native_teardown_msi_irq_epic(irq); + else + native_teardown_msi_irq_apic(irq); +} + +extern void __init_recv setup_secondary_epic_clock(void); +extern void setup_secondary_APIC_clock(void); +static inline void __init_recv setup_secondary_pic_clock(void) +{ + if (cpu_has_epic()) + setup_secondary_epic_clock(); + else + setup_secondary_APIC_clock(); +} + +extern int epic_get_vector(void); +static inline int pic_get_vector(void) +{ + if (cpu_has_epic()) + return epic_get_vector(); + else + return apic_get_vector(); +} + +extern int ioepic_pin_to_irq_num(unsigned int pin, struct pci_dev *dev); +extern int ioepic_pin_to_msi_ioapic_irq(unsigned int pin, struct pci_dev *dev); +static inline int ioepic_pin_to_irq_pic(unsigned int pin, struct pci_dev *dev) +{ + if (cpu_has_epic()) + return ioepic_pin_to_irq_num(pin, dev); + else + return ioepic_pin_to_msi_ioapic_irq(pin, dev); +} + +static inline void __init setup_boot_pic_clock(void) +{ + if (cpu_has_epic()) + setup_boot_epic_clock(); + else + setup_boot_APIC_clock(); +} + +extern void __init init_apic_mappings(void); +static inline void __init init_pic_mappings(void) +{ + if (!cpu_has_epic()) + return init_apic_mappings(); +} + +extern void setup_cepic(void); + +#else /* !(CONFIG_EPIC) */ + +#include + +static inline unsigned int read_pic_id(void) +{ + return read_apic_id(); +} + +extern int generic_processor_info(int apicid, int version); +static inline void pic_processor_info(int picid, int picver, unsigned int freq) +{ + generic_processor_info(picid, picver); +} + +static inline int get_pic_timer_frequency(void) +{ + return -1; /* standard constant value */ +} + +/* IO-APIC definitions */ +struct irq_data; +extern void ack_apic_edge(struct irq_data *data); +static inline void ioapic_ack_pic_edge(struct irq_data *data) +{ + ack_apic_edge(data); +} + +extern void ack_apic_level(struct irq_data *data); +static inline void ioapic_ack_pic_level(struct irq_data *data) +{ + ack_apic_level(data); +} + +struct irq_chip; +static inline bool irqchip_is_ioepic_to_apic(struct irq_chip *chip) +{ + return 0; +} + +/* IRQ definitions */ +extern void apic_irq_work_raise(void); +static inline void pic_irq_work_raise(void) +{ + apic_irq_work_raise(); +} + +extern void apic_send_call_function_ipi_mask(const struct cpumask *mask); +static inline void pic_send_call_function_ipi_mask(const struct cpumask *mask) +{ + apic_send_call_function_ipi_mask(mask); +} + +extern void apic_send_call_function_single_ipi(int cpu); +static inline void pic_send_call_function_single_ipi(int cpu) +{ + apic_send_call_function_single_ipi(cpu); +} + +extern void apic_smp_send_reschedule(int cpu); +static inline void pic_send_reschedule(int cpu) +{ + apic_smp_send_reschedule(cpu); +} + +struct pt_regs; +extern noinline notrace void apic_do_nmi(struct pt_regs *regs); +static inline void pic_do_nmi(struct pt_regs *regs) +{ + apic_do_nmi(regs); +} + +static inline void ack_pic_irq(void) +{ + ack_APIC_irq(); +} + +/* For do_postpone_tick() */ +extern void local_apic_timer_interrupt(void); +static inline void local_pic_timer_interrupt(void) +{ + local_apic_timer_interrupt(); +} + +extern int print_local_APICs(bool force); +static inline int print_local_pics(bool force) +{ + return print_local_APICs(force); +} + +struct pci_dev; +extern int native_setup_msi_irqs_apic(struct pci_dev *dev, int nvec, int type); +static inline int setup_msi_irqs_pic(struct pci_dev *dev, int nvec, int type) +{ + return native_setup_msi_irqs_apic(dev, nvec, type); +} + +extern void native_teardown_msi_irq_apic(unsigned int irq); +static inline void teardown_msi_irq_pic(unsigned int irq) +{ + native_teardown_msi_irq_apic(irq); +} + +static inline void __init setup_boot_pic_clock(void) +{ + setup_boot_APIC_clock(); +} + +extern void __init init_apic_mappings(void); +static inline void __init init_pic_mappings(void) +{ + return init_apic_mappings(); +} +#endif /* !(CONFIG_EPIC) */ +#endif /* __ASM_L_PIC_H */ diff --git a/arch/e2k/include/asm-l/serial.h b/arch/e2k/include/asm-l/serial.h new file mode 100644 index 0000000..93d673b --- /dev/null +++ b/arch/e2k/include/asm-l/serial.h @@ -0,0 +1,460 @@ +/* + * include/asm-l/serial.h + */ +#ifndef _L_SERIAL_H +#define _L_SERIAL_H + +/* + * This assumes you have a 1.8432 MHz clock for your UART. + * + * It'd be nice if someone built a serial card with a 24.576 MHz + * clock, since the 16550A is capable of handling a top speed of 1.5 + * megabits/second; but this requires the faster clock. + */ + +#define BASE_BAUD ( 1843200 / 16 ) + +/* Standard COM flags (except for COM4, because of the 8514 problem) */ +#ifdef CONFIG_SERIAL_DETECT_IRQ +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) +#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) +#else +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) +#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF +#endif + +#ifdef CONFIG_SERIAL_MANY_PORTS +#define FOURPORT_FLAGS ASYNC_FOURPORT +#define ACCENT_FLAGS 0 +#define BOCA_FLAGS 0 +#define HUB6_FLAGS 0 +#define RS_TABLE_SIZE 64 +#else +#define RS_TABLE_SIZE +#endif + +#define NS16550_SERIAL_PORT_0 0x3f8 +#define NS16550_SERIAL_PORT_1 0x2f8 +#define NS16550_SERIAL_PORT_2 0x3e8 +#define NS16550_SERIAL_PORT_3 0x2e8 + +#ifdef CONFIG_E2K +#define SERIAL_PORT_DFNS \ + /* UART CLK PORT IRQ FLAGS */ \ + { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ + { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ + { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ + { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ +#endif + +#define AM85C30_RES_Tx_P 0x28 +#define AM85C30_EXT_INT_ENAB 0x01 +#define AM85C30_TxINT_ENAB 0x02 +#define AM85C30_RxINT_MASK 0x18 + +/* AM85C30 WRITE Registers */ + +#define AM85C30_WR0 0x00 +#define AM85C30_WR1 0x01 +#define AM85C30_WR2 0x02 +#define AM85C30_WR3 0x03 +#define AM85C30_WR4 0x04 +#define AM85C30_WR5 0x05 +#define AM85C30_WR6 0x06 +#define AM85C30_WR7 0x07 +#define AM85C30_WR8 0x08 +#define AM85C30_WR9 0x09 +#define AM85C30_WR10 0x0a +#define AM85C30_WR11 0x0b +#define AM85C30_WR12 0x0c +#define AM85C30_WR13 0x0d +#define AM85C30_WR14 0x0e +#define AM85C30_WR15 0x0f + +/* READ (Status) Registers */ + +#define AM85C30_RR0 0x00 +#define AM85C30_RR1 0x01 +#define AM85C30_RR2 0x02 +#define AM85C30_RR3 0x03 +#define AM85C30_RR8 0x08 +#define AM85C30_RR10 0x0a +#define AM85C30_RR12 0x0c +#define AM85C30_RR13 0x0d + +#define AM85C30_D0 (0x01 << 0) +#define AM85C30_D1 (0x01 << 1) +#define AM85C30_D2 (0x01 << 2) +#define AM85C30_D3 (0x01 << 3) +#define AM85C30_D4 (0x01 << 4) +#define AM85C30_D5 (0x01 << 5) +#define AM85C30_D6 (0x01 << 6) +#define AM85C30_D7 (0x01 << 7) + +/* WR0 */ +/* D2,D1,D0 +* Register Access Pointer +* +* 000 - N0, [N8]* +* 001 - N1, [N9]* +* 010 - N2, [N10]* +* 011 - N3, [N11]* +* 100 - N4, [N12]* +* 101 - N5, [N13]* +* 110 - N6, [N14]* +* 111 - N7, [N15]* +* +* if Point High Register Group = 1 +* +* D5,D4,D3 +* +* SCC Command +* +* 000 - Null Code +* 001 - Point High Register Group +* 010 - Reset Ext/Status Interrupts +* 011 - Send Abort +* 100 - Enable Int. on Next Rx Character +* 101 - Reset Tx Int. Pending +* 110 - Error Reset +* 111 - Reset Highest IUS +* +* D7,D6 +* SCC Command +* +* 00 - Null Code +* 01 - Reset Rx CRC Checker +* 10 - Reset Tx CRC Generator +* 11 - Reset Tx Underrun/EOM Latch +*/ + +/* WR1 */ +/* D0 +* Ext. Int. Enable +* D1 +* Tx Int. Enable +* D2 +* Parity is Special Condition +* D4,D3 +* Rx Int Mode +* +* 00 - Rx Int Disable +* 01 - Rx Int on First Char. or Special Condition +* 10 - Int on All Rx Char. or Special Condition +* 11 - Rx Int. on Special Condition Only +* D5 +* Wait/DMA Request on Receive/Transmit +* D6 +* Wait/DMA Request Function +* D7 +* Wait/DMA Request Enable +*/ + +/* WR2 */ +/* D7 - D0 +* Interrupt Vector +*/ + +/* WR3 */ +/* D0 +* Rx Enable +* D1 +* Sync Character Load Inhibit +* D2 +* Address Search Mode (SDLC) +* D3 +* Rx CRC Enable +* D4 +* Enter Hunt Mode +* D5 +* Auto Enable +* D7,D6 +* +* 00 - Rx 5 Bits / Character +* 01 - Rx 6 Bits / Character +* 10 - Rx 7 Bits / Character +* 11 - Rx 8 Bits / Character +*/ + +/* WR4 */ +/* D0 +* ParityEnable +* D1 +* Parity Even(0) / Odd(1) +* D3,D2 +* +* 00 - Sync Modes Enable +* 01 - 1 Stop Bit / Character +* 10 - 1.5 Stop Bits / Character +* 11 - 2 Stop Bits / Character +* D5,D4 +* +* 00 - 8-Bit Sync Character +* 01 - 16-Bit Sync Character +* 10 - SDLC Mode +* 11 - External Sync Mode +* D7,D6 +* +* 00 - X1 Clock Mode +* 01 - X16 Clock Mode +* 10 - X32 Clock Mode +* 11 - X64 Clock Mode +*/ + +/* WR5 */ +/* D0 +* Tx CRC Enable +* D1 +* RTS +* D2 +* SDLC-/CRC-16 +* D3 +* Tx Enable +* D4 +* Send Break +* D6,D5 +* +* 00 - Tx 5 Bits / Character +* 01 - Tx 6 Bits / Character +* 10 - Tx 7 Bits / Character +* 11 - Tx 8 Bits / Character +* D7 +* DTR +*/ + +/* WR6 */ +/* D5-D0 +* xN constant +* D7,D6 +* Reserved (not used in asynchronous mode) +*/ + +/* WR7 */ +/* D6-D0 +* Reserved (not used in asynchronous mode) +* D7 +* xN Mode Enable +*/ + +/* WR8 */ +/* D7-D0 +* Transmit Buffer +*/ + +/* WR9 */ +/* D0 +* Vector Includes Status +* D1 +* No Vector +* D2 +* Disable Lower Chain +* D3 +* Master Interrupt Enable +* D4 +* Status High/Low_ +* D5 +* Interrupt Masking Without INTACK_ +* D7-D6 +* +* 00 - No Reset +* 01 - Channel B Reset +* 10 - Channel A Reset +* 11 - Force Hardware Reset +*/ + +/* WR10 */ +/* D0 +* 6 bit / 8 bit SYNC +* D1 +* Loop Mode +* D2 +* Abort/Flag on Underrun +* D3 +* Mark/Flag Idle +* D4 +* Go Active on Poll +* D6-D5 +* +* 00 - NRZ +* 01 - NRZI +* 10 - FM1 (Transition = 1) +* 11 - FM0 (Transition = 0) +* D7 +* CRC Preset '1' or '0' +*/ + +/* WR11 */ +/* D1-D0 +* +* 00 - TRxC Out = XTAL output +* 01 - TRxC Out = Transmit Clock +* 10 - TRxC Out = BRG output +* 11 - TRxC Out = DPLL output +* D2 +* TRxC O/I +* D4-D3 +* +* 00 - Transmit Clock = RTxC pin +* 01 - Transmit Clock = TRxC pin +* 10 - Transmit Clock = BRG output +* 11 - Transmit Clock = DPLL output +* D6-D5 +* +* 00 - Receive Clock = RTxC pin +* 01 - Receive Clock = TRxC pin +* 10 - Receive Clock = BRG output +* 11 - Receive Clock = DPLL output +* D7 +* RTxC XTAL / NO XTAL +*/ + +/* WR12 */ +/* D7-D0 +* Lower Byte of Time Constant +*/ + +/* WR13 */ +/* D7-D0 +* Upper Byte of Time Constant +*/ + +/* WR14 */ +/* D0 +* BRG Enable +* D1 +* BRG Source +* D2 +* DTR / REQUESTt Function +* D3 +* Auto Echo +* D4 +* Local Loopback +* D7-D5 +* +* 000 - Null Command +* 001 - Enter Search Mode +* 010 - Reset Missing Clock +* 011 - Disable DPLL +* 100 - Set Source = BR Generator +* 101 - Set Source = RTxC_ +* 110 - Set FM Mode +* 111 - Set NRZI Mode +*/ + +/* WR15 */ +/* D0 +* SDLC/HDLC Enhancement Enable +* D1 +* Zero Count IE (Interrupt Enable) +* D2 +* 10 * 19-bit Frame Status FIFO Enable +* D3 +* DCD IE +* D4 +* Sync/Hunt IE +* D5 +* CTS IE +* D6 +* Tx Underrun / EOM IE +* D7 +* Break/Abort IE +*/ + + +/* RR0 */ +/* D0 +* Rx Character Availiable +* D1 +* Zero Count +* D2 +* Tx Buffer Empty +* D3 +* DCD +* D4 +* Sync/Hunt +* D5 +* CTS +* D6 +* Tx Underrun / EOM +* D7 +* Break/Abort +*/ + +/* RR1 */ +/* D0 +* All Sent +* D1 +* Residue Code 2 +* D2 +* Residue Code 1 +* D3 +* Residue Code 0 +* D4 +* Parity Error +* D5 +* Rx Overrun Error +* D6 +* CRC / Framing Error +* D7 +* End of Frame (SDLC) +*/ + +/* RR2 */ +/* D7-D0 +* Interrupt Vector +* +* Channel A RR2 = WR2 +* Channel B RR2 = Interrupt Vector Modified* +* +* * +* D3 D2 D1 Status High/Low = 0 +* D4 D5 D6 Status High/Low = 1 +* +* 0 0 0 Ch B Transmit Buffer Empty +* 0 0 1 Ch B External/Status Change +* 0 1 0 Ch B Receive Char. Availiable +* 0 1 1 Ch B Special Receive Condition +* 1 0 0 Ch A Transmit Buffer Empty +* 1 0 1 Ch A External/Status Change +* 1 1 0 Ch A Receive Char. Availiable +* 1 1 1 Ch A Special Receive Condition +*/ + +/* RR3 */ +/* D0 +* Channel B Ext/Status IP (Interrupt Pending) +* D1 +* Channel B Tx IP +* D2 +* Channel B Rx IP +* D3 +* Channel A Ext/Status IP +* D4 +* Channel A Tx IP +* D5 +* Channel A Rx IP +* D7-D6 +* Always 00 +*/ + +/* RR8 */ +/* D7-D0 +* Receive Buffer +*/ + +/* RR10 */ +/* D7-D0 +* Reserved (not used in asynchronous mode) +*/ + +/* RR12 */ +/* D7-D0 +* Lower Byte of Time Constant +*/ + +/* RR13 */ +/* D7-D0 +* Upper Byte of Time Constant +*/ + +#endif /* ! _L_SERIAL_H */ diff --git a/arch/e2k/include/asm-l/setup.h b/arch/e2k/include/asm-l/setup.h new file mode 100644 index 0000000..62fe6fb --- /dev/null +++ b/arch/e2k/include/asm-l/setup.h @@ -0,0 +1,12 @@ +#ifndef _L_SETUP_H +#define _L_SETUP_H + +#include + +int l_set_ethernet_macaddr(struct pci_dev *pdev, char *macaddr); +extern int (*l_set_boot_mode)(int); + +int l_setup_arch(void); +void l_setup_vga(void); +unsigned long measure_cpu_freq(int cpu); +#endif /* _L_SETUP_H */ diff --git a/arch/e2k/include/asm-l/sic_regs.h b/arch/e2k/include/asm-l/sic_regs.h new file mode 100644 index 0000000..c468b7f --- /dev/null +++ b/arch/e2k/include/asm-l/sic_regs.h @@ -0,0 +1,332 @@ + +#ifndef _L_SIC_REGS_H_ +#define _L_SIC_REGS_H_ + +#ifdef __KERNEL__ + +#include + +#include +#include + +#undef DEBUG_ERALY_NBSR_MODE +#undef DebugENBSR +#define DEBUG_ERALY_NBSR_MODE 0 /* early NBSR access */ +#ifndef CONFIG_BOOT_E2K +#define DebugENBSR(fmt, args...) \ + ({ if (DEBUG_ERALY_NBSR_MODE) \ + printk(fmt, ##args); }) +#else /* CONFIG_BOOT_E2K */ +#define DebugENBSR(fmt, args...) \ + ({ if (DEBUG_ERALY_NBSR_MODE) \ + rom_printk(fmt, ##args); }) +#endif /* ! CONFIG_BOOT_E2K */ + +#undef DEBUG_NBSR_MODE +#undef DebugNBSR +#define DEBUG_NBSR_MODE 0 /* NBSR access */ +#define DebugNBSR(fmt, args...) \ + ({ if (DEBUG_NBSR_MODE) \ + printk(fmt, ##args); }) + +#ifndef __ASSEMBLY__ + +static inline unsigned int +early_sic_read_node_nbsr_reg(int node_id, int reg_offset) +{ + unsigned char *node_nbsr; + unsigned char *addr; + unsigned int reg_value; + + node_nbsr = THE_NODE_NBSR_PHYS_BASE(node_id); + addr = node_nbsr + reg_offset; + reg_value = nbsr_early_read(addr); + DebugENBSR("early_sic_read_node_nbsr_reg() node %d reg 0x%x read 0x%x " + "from 0x%px\n", + node_id, reg_offset, reg_value, addr); + return reg_value; +} + +static inline void +early_sic_write_node_nbsr_reg(int node_id, int reg_offset, unsigned int reg_val) +{ + unsigned char *node_nbsr; + unsigned char *addr; + + node_nbsr = THE_NODE_NBSR_PHYS_BASE(node_id); + DebugENBSR("early_sic_write_node_nbsr_reg() node NBSR is %px\n", + node_nbsr); + addr = node_nbsr + reg_offset; + nbsr_early_write(reg_val, addr); + DebugENBSR("early_sic_write_node_nbsr_reg() node %d reg 0x%x write " + "0x%x to 0x%px\n", + node_id, reg_offset, reg_val, addr); +} + +static inline unsigned int +early_sic_read_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset) +{ + unsigned int reg_value; + +#ifndef CONFIG_BOOT_E2K + if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) { + printk(KERN_ERR "sic_read_node_iolink_nbsr_reg() bad IO link " + "# %d (< 0 or >= max %d)\n", + io_link, MACH_NODE_NUMIOLINKS); + return (unsigned int)-1; + } +#endif /* ! CONFIG_BOOT_E2K */ + reg_value = early_sic_read_node_nbsr_reg(node_id, + SIC_io_reg_offset(io_link, reg_offset)); + return reg_value; +} + +static inline void +early_sic_write_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset, + unsigned int reg_value) +{ +#ifndef CONFIG_BOOT_E2K + if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) { + printk(KERN_ERR "early_sic_write_node_iolink_nbsr_reg() bad " + "IO link # %d (< 0 or >= max %d)\n", + io_link, MACH_NODE_NUMIOLINKS); + return; + } +#endif /* ! CONFIG_BOOT_E2K */ + early_sic_write_node_nbsr_reg(node_id, + SIC_io_reg_offset(io_link, reg_offset), reg_value); +} + +static inline unsigned int +sic_read_node_nbsr_reg(int node_id, int reg_offset) +{ + unsigned char *node_nbsr; + unsigned int reg_value; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_read_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + reg_value = nbsr_read(&node_nbsr[reg_offset]); + DebugNBSR("sic_read_node_nbsr_reg() node %d reg 0x%x read 0x%x " + "from 0x%px\n", + node_id, reg_offset, reg_value, + &node_nbsr[reg_offset]); + return reg_value; +} + +static inline unsigned long +sic_readll_node_nbsr_reg(int node_id, int reg_offset) +{ + unsigned char *node_nbsr; + unsigned long reg_value; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_readll_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + reg_value = nbsr_readll(&node_nbsr[reg_offset]); + DebugNBSR("sic_readll_node_nbsr_reg() node %d reg 0x%x read 0x%lx " + "from 0x%px\n", + node_id, reg_offset, reg_value, + &node_nbsr[reg_offset]); + return reg_value; +} + +static inline u16 +sic_readw_node_nbsr_reg(int node_id, int reg_offset) +{ + unsigned char *node_nbsr; + u16 reg_value; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_readw_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + reg_value = nbsr_readw(&node_nbsr[reg_offset]); + DebugNBSR("sic_readw_node_nbsr_reg() node %d reg 0x%x read 0x%x " + "from 0x%px\n", + node_id, reg_offset, reg_value, + &node_nbsr[reg_offset]); + return reg_value; +} + +static inline unsigned int +sic_read_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset) +{ + unsigned int reg_value; + + if (!HAS_MACHINE_L_SIC) { + printk(KERN_ERR "sic_read_node_iolink_nbsr_reg() machine has " + "not SIC\n"); + return (unsigned int)-1; + } + if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) { + printk(KERN_ERR "sic_read_node_iolink_nbsr_reg() bad IO link " + "# %d (< 0 or >= max %d)\n", + io_link, MACH_NODE_NUMIOLINKS); + return (unsigned int)-1; + } + reg_value = sic_read_node_nbsr_reg(node_id, + SIC_io_reg_offset(io_link, reg_offset)); + return reg_value; +} + +static inline unsigned long +sic_readll_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset) +{ + unsigned long reg_value; + + if (!HAS_MACHINE_L_SIC) { + printk(KERN_ERR "sic_readll_node_iolink_nbsr_reg() machine has " + "not SIC\n"); + return (unsigned int)-1; + } + if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) { + printk(KERN_ERR "sic_readll_node_iolink_nbsr_reg() bad IO link " + "# %d (< 0 or >= max %d)\n", + io_link, MACH_NODE_NUMIOLINKS); + return (unsigned int)-1; + } + reg_value = sic_readll_node_nbsr_reg(node_id, + SIC_io_reg_offset(io_link, reg_offset)); + return reg_value; +} + +static inline void +sic_write_node_nbsr_reg(int node_id, int reg_offset, unsigned int reg_value) +{ + unsigned char *node_nbsr; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_write_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + nbsr_write(reg_value, &node_nbsr[reg_offset]); + DebugNBSR("sic_write_node_nbsr_reg() node %d reg 0x%x writenn 0x%x to " + "0x%px\n", + node_id, reg_offset, reg_value, &node_nbsr[reg_offset]); +} + +static inline void sic_write_node_nbsr_reg_relaxed(int node_id, int reg_offset, + unsigned int reg_value) +{ + unsigned char *node_nbsr; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_write_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + nbsr_write_relaxed(reg_value, &node_nbsr[reg_offset]); + DebugNBSR("sic_write_node_nbsr_reg() node %d reg 0x%x writenn 0x%x to " + "0x%px\n", + node_id, reg_offset, reg_value, &node_nbsr[reg_offset]); +} + +static inline void +sic_writell_node_nbsr_reg(int node_id, int reg_offset, unsigned long reg_value) +{ + unsigned char *node_nbsr; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_writell_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + nbsr_writell(reg_value, &node_nbsr[reg_offset]); + DebugNBSR("sic_writell_node_nbsr_reg() node %d reg 0x%x written 0x%lx to " + "0x%px\n", + node_id, reg_offset, reg_value, &node_nbsr[reg_offset]); +} + +static inline void +sic_writew_node_nbsr_reg(int node_id, int reg_offset, u16 reg_value) +{ + unsigned char *node_nbsr; + + node_nbsr = sic_get_node_nbsr_base(node_id); + if (node_nbsr == NULL) { + panic("sic_writew_node_nbsr_reg() node #%d has not mapping " + "to SIC(NBSR) registers\n", node_id); + } + nbsr_writew(reg_value, &node_nbsr[reg_offset]); + DebugNBSR("sic_writew_node_nbsr_reg() node %d reg 0x%x written 0x%x to " + "0x%px\n", + node_id, reg_offset, reg_value, &node_nbsr[reg_offset]); +} + +static inline void +sic_write_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset, + unsigned int reg_value) +{ + if (!HAS_MACHINE_L_SIC) { + printk(KERN_ERR "sic_write_node_iolink_nbsr_reg() machine has " + "not SIC\n"); + return; + } + if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) { + printk(KERN_ERR "sic_write_node_iolink_nbsr_reg() bad IO link " + "# %d (< 0 or >= max %d)\n", + io_link, MACH_NODE_NUMIOLINKS); + return; + } + sic_write_node_nbsr_reg(node_id, + SIC_io_reg_offset(io_link, reg_offset), reg_value); +} + +static inline void +sic_writell_node_iolink_nbsr_reg(int node_id, int io_link, int reg_offset, + unsigned long reg_value) +{ + if (!HAS_MACHINE_L_SIC) { + printk(KERN_ERR "sic_writell_node_iolink_nbsr_reg() machine has " + "not SIC\n"); + return; + } + if (io_link < 0 || io_link >= MACH_NODE_NUMIOLINKS) { + printk(KERN_ERR "sic_writell_node_iolink_nbsr_reg() bad IO link " + "# %d (< 0 or >= max %d)\n", + io_link, MACH_NODE_NUMIOLINKS); + return; + } + sic_writell_node_nbsr_reg(node_id, + SIC_io_reg_offset(io_link, reg_offset), reg_value); +} + + +static inline unsigned int +sic_read_nbsr_reg(int reg_offset) +{ + return sic_read_node_nbsr_reg(numa_node_id(), reg_offset); +} + +static inline unsigned int +sic_read_iolink_nbsr_reg(int io_link, int reg_offset) +{ + return sic_read_node_iolink_nbsr_reg(numa_node_id(), io_link, + reg_offset); +} + +static inline void +sic_write_nbsr_reg(int reg_offset, unsigned int reg_value) +{ + sic_write_node_nbsr_reg(numa_node_id(), reg_offset, reg_value); +} + +static inline void +sic_write_iolink_nbsr_reg(int io_link, int reg_offset, unsigned int reg_value) +{ + sic_write_node_iolink_nbsr_reg(numa_node_id(), io_link, reg_offset, + reg_value); +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _L_SIC_REGS_H_ */ diff --git a/arch/e2k/include/asm-l/smp.h b/arch/e2k/include/asm-l/smp.h new file mode 100644 index 0000000..12ef529 --- /dev/null +++ b/arch/e2k/include/asm-l/smp.h @@ -0,0 +1,8 @@ +#ifndef _ASM_L_SMP_H +#define _ASM_L_SMP_H + +extern unsigned int mp_num_processors; +extern unsigned int num_processors; +extern unsigned int disabled_cpus; + +#endif /* _ASM_L_SMP_H */ diff --git a/arch/e2k/include/asm-l/swiotlb.h b/arch/e2k/include/asm-l/swiotlb.h new file mode 100644 index 0000000..27594bf --- /dev/null +++ b/arch/e2k/include/asm-l/swiotlb.h @@ -0,0 +1,3 @@ +#pragma once + +extern int l_use_swiotlb; diff --git a/arch/e2k/include/asm-l/tree_entry.h b/arch/e2k/include/asm-l/tree_entry.h new file mode 100644 index 0000000..2cbbd7a --- /dev/null +++ b/arch/e2k/include/asm-l/tree_entry.h @@ -0,0 +1,42 @@ +#ifndef __TREE_ENTRY_H +#define __TREE_ENTRY_H + +#define MAX_PROPERTY 8 +#define ATTRIB_NAME 0 + +struct prom_property { + const char *name; + void *value; + int size; +}; +struct tree_entry { + struct tree_entry *sibling; + struct tree_entry *child; + int node; + struct prom_property prop[MAX_PROPERTY]; /*NULEWOE SWOJSTWO D.B. IMENEM */ +}; + +extern struct tree_entry *sbus_root_node; + +extern void scan_sbus(struct tree_entry *root, unsigned long start_addr, + int slot_len, int slot_num); +extern void init_known_nodes(struct tree_entry *root); +extern struct tree_entry *get_te_by_node(int node); +extern struct tree_entry *copy_sbus_dev(struct tree_entry *dev); +extern void free_sbus_dev(struct tree_entry *dev); + +extern int prom_getchild(int node); +extern int prom_getproperty(int node, const char *prop, char *buffer, int bufsize); +extern int prom_node_has_property(int node, char *prop); +extern int prom_getproplen(int node, const char *prop); +extern int prom_setprop(int node, const char *pname, char *value, int size); +extern char * prom_firstprop(int node, char *bufer); +extern char * prom_nextprop(int node, char *oprop, char *buffer); +extern int prom_searchsiblings(int node_start, char *nodename); +extern int prom_getsibling(int node); +extern int prom_getint(int node, char *prop); +extern int prom_getbool(int node, char *prop); +extern int prom_getintdefault(int node, char *property, int deflt); +extern void prom_getstring(int node, char *prop, char *user_buf, int ubuf_size); + +#endif /* __TREE_ENTRY_H */ diff --git a/arch/e2k/include/asm/3p.h b/arch/e2k/include/asm/3p.h new file mode 100644 index 0000000..20c6616 --- /dev/null +++ b/arch/e2k/include/asm/3p.h @@ -0,0 +1,113 @@ +#ifndef _E2K_3P_H_ +#define _E2K_3P_H_ + +#ifdef __KERNEL__ + +#include +#include +#include + +struct vm_area_struct; +struct pt_regs; +struct file; +extern int do_global_sp(struct pt_regs *regs, trap_cellar_t *tcellar); +extern int lw_global_sp(struct pt_regs *regs); +extern void free_global_sp(void); +extern int delete_records(unsigned int psl_from); +extern void mark_all_global_sp(struct pt_regs *regs, pid_t pid); +extern int interpreted_ap_code(struct pt_regs *regs, + struct vm_area_struct **vma, e2k_addr_t *address); + +struct syscall_attrs { + u32 mask; /* for coding specs see systable.c */ + /* The next 6 fields specify minimum allowed argument size + * in case of argument-descriptor. + * If negative value, this means size is defined by corresponding arg. + * F.e. value (-3) means size is specified by argument #3. + */ + short size1; /* min allowed size of arg1 of particular system call */ + short size2; /* minimum allowed size of arg2 */ + short size3; /* minimum allowed size of arg3 */ + short size4; /* minimum allowed size of arg4 */ + u16 size5; /* minimum allowed size of arg5 */ + u16 size6; /* minimum allowed size of arg6 */ +} __aligned(16) /* For faster address calculation */; +extern const struct syscall_attrs sys_protcall_args[]; +extern const char *sys_call_ID_to_name[]; + +/* + * Definition of ttable entry number used for protected system calls. + * This is under agreement with protected mode compiler/plib team. + */ +#define PMODE_NEW_SYSCALL_TRAPNUM 10 + +/* + * List of protected mode system calls supported. + * For the moment it covers all the calls implemented in plib library. + */ + +#define __NR_P_get_mem 500 +#define __NR_P_free_mem 501 +#define __NR_P_dump_umem 507 + + +/* + * Here are some stuff that belongs to LOCAL->GLOBAL operation support + */ + +typedef struct global_store_trace_record global_store_t; + +typedef enum { + TYPE_GLOBAL = 0, + TYPE_BOUND, + TYPE_INIT, +} type_global_type_t; + +struct global_store_trace_record { + global_store_t *prev; /*that is struct list_head list; */ + global_store_t *next; + type_global_type_t type; + unsigned int lcl_psl; + unsigned int orig_psr_lw; /* to keep track */ + e2k_addr_t global_p; + pid_t pid; + e2k_addr_t new_address; + e2k_addr_t old_address; + unsigned long word1; /*the first word of SAP */ + unsigned long word2; /*the second word of SAP */ + e2k_addr_t sbr; + /* + * just to care about perhaps I need to store the LOCAL here + * as a backup. + */ +}; + +#define IS_SAP_LO(addr) \ +({ \ + e2k_rwsap_lo_struct_t *sap_lo; \ + sap_lo = (e2k_rwsap_lo_struct_t *) addr; \ + (AS_SAP_STRUCT((*sap_lo)).itag == E2K_SAP_ITAG ? \ + (NATIVE_LOAD_TAGD(addr) == E2K_SAP_LO_ETAG ? 1 : 0) : 0); \ +}) + +#define IS_SAP_HI(addr) \ +({ \ + (NATIVE_LOAD_TAGD(addr) == E2K_SAP_HI_ETAG ? 1 : 0); \ +}) + +#define IS_AP_LO(addr) \ +({ \ + e2k_rwap_lo_struct_t *ap_lo; \ + ap_lo = (e2k_rwap_lo_struct_t *) addr; \ + (AS_AP_STRUCT((*ap_lo)).itag == E2K_AP_ITAG ? \ + (NATIVE_LOAD_TAGD(addr) == E2K_AP_LO_ETAG ? 1 : 0) : 0); \ +}) + +#define IS_AP_HI(addr) \ +({ \ + (NATIVE_LOAD_TAGD(addr) == E2K_AP_HI_ETAG ? 1 : 0); \ +}) + +#endif /* __KERNEL__ */ + +#endif /* _E2K_3P_H_ */ diff --git a/arch/e2k/include/asm/Kbuild b/arch/e2k/include/asm/Kbuild new file mode 100644 index 0000000..6196c57 --- /dev/null +++ b/arch/e2k/include/asm/Kbuild @@ -0,0 +1,15 @@ +### generic + +generic-y += bugs.h +generic-y += div64.h +generic-y += errno.h +generic-y += emergency-restart.h +generic-y += irq_regs.h +generic-y += kmap_types.h +generic-y += local64.h +generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h +generic-y += preempt.h +generic-y += qrwlock.h +generic-y += xor.h +generic-y += mmiowb.h diff --git a/arch/e2k/include/asm/a.out.h b/arch/e2k/include/asm/a.out.h new file mode 100644 index 0000000..397dad7 --- /dev/null +++ b/arch/e2k/include/asm/a.out.h @@ -0,0 +1,28 @@ +#ifndef __E2K_A_OUT_H__ +#define __E2K_A_OUT_H__ + +#ifndef __ASSEMBLY__ + +struct exec +{ + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ + unsigned int a_text; /* length of text, in bytes */ + unsigned int a_data; /* length of data, in bytes */ + unsigned int a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned int a_syms; /* length of symbol table data in file, in bytes */ + unsigned int a_entry; /* start address */ + unsigned int a_trsize; /* length of relocation info for text, in bytes */ + unsigned int a_drsize; /* length of relocation info for data, in bytes */ +}; + +#endif /* __ASSEMBLY__ */ + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#ifdef __KERNEL__ + +#endif + +#endif /* __E2K_A_OUT_H__ */ diff --git a/arch/e2k/include/asm/aau_context.h b/arch/e2k/include/asm/aau_context.h new file mode 100644 index 0000000..f30e0f5 --- /dev/null +++ b/arch/e2k/include/asm/aau_context.h @@ -0,0 +1,248 @@ +/* + * aau_context.h - saving/loading AAU context. + * + * In this file you can see various lists of similar operations. All + * of these operations are of AAU access. The hint is the following: + * AAU regiters can be obtained only through LDAA operation with index + * hardcoded into the AAU syllable. So, index as variable can not be + * substituted. As a cosequence we can not pack them into the loop and + * they are forced to be in lists. + */ +#ifndef _E2K_AAU_CONTEXT_H_ +#define _E2K_AAU_CONTEXT_H_ + +#include +#include +#include +#include + +/******************************* DEBUG DEFINES ********************************/ +#undef DEBUG_AAU_CHECK + +#define DEBUG_AAU_CHECK 0 +#define DbgChk if (DEBUG_AAU_CHECK) printk +/******************************************************************************/ + +typedef union e2k_fapb_aps { + union { + struct { + u64 abs : 5; /* [4:0] area base */ + u64 asz : 3; /* [7:5] area size */ + u64 ind : 4; /* [11:8] initial index (si == 0) */ + u64 incr : 3; /* [14:12] AAINCR number (si == 0) */ + u64 d : 5; /* [19:15] AAD number */ + u64 mrng : 5; /* [24:20] element size */ + u64 fmt : 3; /* [27:25] format */ + u64 dcd : 2; /* [29:28] data cache disabled */ + u64 si : 1; /* [30] secondary index access */ + u64 ct : 1; /* [31] control transfer (left ch.) */ + u64 disp : 32; + }; + struct { + u64 __x1 : 8; + u64 area : 5; /* [12:8] APB area index (si == 1) */ + u64 am : 1; /* [13] (si == 1) */ + u64 be : 1; /* [14] big endian (si == 1) */ + u64 __x2 : 16; + u64 dpl : 1; /* [31] duplicate (right channel) */ + u64 __x3 : 32; + }; + } fields; + u64 word; +} e2k_fapb_instr_t; + +/* constants to pick LSR register fields up */ +#define LSR_LCNT_MASK 0xFFFFFFFF +#define LSR_LDMC_MASK 0x1 +#define LSR_LDMC_SHIFT 39 +#define LSR_ECNT_MASK 0x1f +#define LSR_ECNT_SHIFT 32 +#define LSR_PCNT_MASK 0xf +#define LSR_PCNT_SHIFT 48 +#define LSR_VLC_MASK 0x1 +#define LSR_VLC_SHIFT 37 + +#define get_lcnt(reg) (reg & LSR_LCNT_MASK) +#define get_ldmc(reg) ((reg >> LSR_LDMC_SHIFT) & LSR_LDMC_MASK) +#define get_ecnt(reg) ((reg >> LSR_ECNT_SHIFT) & LSR_ECNT_MASK) +#define get_pcnt(reg) ((reg >> LSR_PCNT_SHIFT) & LSR_PCNT_MASK) +#define get_vlc(reg) ((reg >> LSR_VLC_SHIFT) & LSR_VLC_MASK) + +static inline void +native_get_array_descriptors_v2(e2k_aau_t *context) +{ + NATIVE_GET_ARRAY_DESCRIPTORS_V2(context); +} +static inline void +native_get_array_descriptors_v5(e2k_aau_t *context) +{ + NATIVE_GET_ARRAY_DESCRIPTORS_V5(context); +} + +static __always_inline void +native_set_array_descriptors(const e2k_aau_t *context) +{ + NATIVE_SET_ARRAY_DESCRIPTORS(context); +} + +static inline void +native_get_synchronous_part_v2(e2k_aau_t *context) +{ + NATIVE_GET_SYNCHRONOUS_PART_V2(context); +} +static inline void +native_get_synchronous_part_v5(e2k_aau_t *context) +{ + NATIVE_GET_SYNCHRONOUS_PART_V5(context); +} + +static __always_inline void +native_set_synchronous_part(const e2k_aau_t *context) +{ + NATIVE_SET_SYNCHRONOUS_PART(context); +} + +static inline void +native_set_all_aaldis(const u64 aaldis[]) +{ + NATIVE_SET_ALL_AALDIS(aaldis); +} + +static inline void +native_set_all_aaldas(const e2k_aalda_t aaldas_p[]) +{ +#ifndef __LITTLE_ENDIAN +# error This loads must be little endian to not mix aaldas up (and the same goes to SAVE_AALDA) +#endif + NATIVE_SET_ALL_AALDAS(aaldas_p); +} + +/* set current array prefetch buffer indices values */ +static __always_inline void native_set_aau_aaldis_aaldas( + const struct thread_info *ti, const e2k_aau_t *aau_regs) +{ + native_set_all_aaldis(aau_regs->aaldi); + native_set_all_aaldas(ti->aalda); +} + +/* + * It's taken that aasr was get earlier(from get_aau_context caller) + * and comparison with aasr.iab was taken. + */ +static inline void +native_get_aau_context_v2(e2k_aau_t *context) +{ + NATIVE_GET_AAU_CONTEXT_V2(context); +} +static inline void +native_get_aau_context_v5(e2k_aau_t *context) +{ + NATIVE_GET_AAU_CONTEXT_V5(context); +} + +/* + * It's taken that comparison with aasr.iab was taken and assr + * will be set later. + */ +static __always_inline void +native_set_aau_context(e2k_aau_t *context) +{ + NATIVE_SET_AAU_CONTEXT(context); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure guest kernel without paravirtualization */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* It is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* native kernel without virtualization */ +/* or native host kernel with virtualization support */ + +#define GET_ARRAY_DESCRIPTORS_V2(aau_context) \ +({ \ + native_get_array_descriptors_v2(aau_context); \ +}) +#define GET_ARRAY_DESCRIPTORS_V5(aau_context) \ +({ \ + native_get_array_descriptors_v5(aau_context); \ +}) +#define GET_SYNCHRONOUS_PART_V2(aau_context) \ +({ \ + native_get_synchronous_part_v2(aau_context); \ +}) +#define GET_SYNCHRONOUS_PART_V5(aau_context) \ +({ \ + native_get_synchronous_part_v5(aau_context); \ +}) + +#define GET_AAU_CONTEXT_V2(cntx) native_get_aau_context_v2(cntx) +#define GET_AAU_CONTEXT_V5(cntx) native_get_aau_context_v5(cntx) + +#define SAVE_AAU_MASK_REGS(aau_context, aasr) \ + NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr) + +#define RESTORE_AAU_MASK_REGS(aau_context) \ + NATIVE_RESTORE_AAU_MASK_REGS(aau_context) + +#define SAVE_AADS(aau_regs) \ + NATIVE_SAVE_AADS(aau_regs) + +#define RESTORE_AADS(aau_regs) \ + NATIVE_RESTORE_AADS(aau_regs) + + +#define SAVE_AALDIS_V2(regs) NATIVE_SAVE_AALDIS_V2(regs) +#define SAVE_AALDIS_V5(regs) NATIVE_SAVE_AALDIS_V5(regs) + +#define SAVE_AALDA(aaldas) \ +({ \ + register u32 aalda0, aalda4, aalda8, aalda12, \ + aalda16, aalda20, aalda24, aalda28, \ + aalda32, aalda36, aalda40, aalda44, \ + aalda48, aalda52, aalda56, aalda60; \ + \ + NATIVE_GET_AAU_AALDA(aalda0, aalda32, aalda0); \ + NATIVE_GET_AAU_AALDA(aalda4, aalda36, aalda4); \ + NATIVE_GET_AAU_AALDA(aalda8, aalda40, aalda8); \ + NATIVE_GET_AAU_AALDA(aalda12, aalda44, aalda12); \ + NATIVE_GET_AAU_AALDA(aalda16, aalda48, aalda16); \ + NATIVE_GET_AAU_AALDA(aalda20, aalda52, aalda20); \ + NATIVE_GET_AAU_AALDA(aalda24, aalda56, aalda24); \ + NATIVE_GET_AAU_AALDA(aalda28, aalda60, aalda28); \ + *(u32 *) (&aaldas[0]) = aalda0; \ + *(u32 *) (&aaldas[4]) = aalda4; \ + *(u32 *) (&aaldas[8]) = aalda8; \ + *(u32 *) (&aaldas[12]) = aalda12; \ + *(u32 *) (&aaldas[16]) = aalda16; \ + *(u32 *) (&aaldas[20]) = aalda20; \ + *(u32 *) (&aaldas[24]) = aalda24; \ + *(u32 *) (&aaldas[28]) = aalda28; \ + *(u32 *) (&aaldas[32]) = aalda32; \ + *(u32 *) (&aaldas[36]) = aalda36; \ + *(u32 *) (&aaldas[40]) = aalda40; \ + *(u32 *) (&aaldas[44]) = aalda44; \ + *(u32 *) (&aaldas[48]) = aalda48; \ + *(u32 *) (&aaldas[52]) = aalda52; \ + *(u32 *) (&aaldas[56]) = aalda56; \ + *(u32 *) (&aaldas[60]) = aalda60; \ +}) + +#define SAVE_AAFSTR(regs) \ +({ \ + regs = native_read_aafstr_reg_value(); \ +}) +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +/* + * for code optimization + */ +static inline int aau_working(e2k_aau_t *context) +{ + e2k_aasr_t aasr = context->aasr; + + return unlikely(AW(aasr) & (AAU_AASR_IAB | AAU_AASR_STB)); +} + +#endif /* _E2K_AAU_CONTEXT_H */ diff --git a/arch/e2k/include/asm/aau_regs.h b/arch/e2k/include/asm/aau_regs.h new file mode 100644 index 0000000..8f9ff08 --- /dev/null +++ b/arch/e2k/include/asm/aau_regs.h @@ -0,0 +1,24 @@ +/* + * AAU registers description, macroses for load/store AAU context + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _E2K_AAU_H_ +#define _E2K_AAU_H_ + +#include +#include +#include + +#endif /* _E2K_AAU_H_ */ diff --git a/arch/e2k/include/asm/aau_regs_access.h b/arch/e2k/include/asm/aau_regs_access.h new file mode 100644 index 0000000..ecd399a --- /dev/null +++ b/arch/e2k/include/asm/aau_regs_access.h @@ -0,0 +1,683 @@ +/* + * AAU registers description, macroses for load/store AAU context + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _E2K_AAU_REGS_ACCESS_H_ +#define _E2K_AAU_REGS_ACCESS_H_ + +#include +#include + +#include + +/* + * see comment about of PREFIX_ at top of arch/e2k/include/regs_state.h + * + additional parameter: + * pv_type argument in macroses is same as prefix but by small letter + * and can be: + * native native kernel with or without virtualization support + * kvm guest kernel (can be run only as paravirtualized + * guest kernel) + * pv paravirtualized kernel (can be run as host and as guest + * paravirtualized kernels) + */ + +#define PREFIX_SAVE_AAU_MASK_REGS(PV_TYPE, pv_type, aau_context, aasr) \ +({ \ + if (unlikely(AAU_ACTIVE(aasr))) { \ + /* As it turns out AAU can be in ACTIVE state \ + * in interrupt handler (bug 53227 comment 28 \ + * and bug 53227 comment 36). \ + * The hardware stops AAU automatically but \ + * the value to be written should be corrected \ + * to "stopped" so that the "DONE" instruction \ + * works as expected. \ + */ \ + AS(aasr).lds = AASR_STOPPED; \ + } \ + (aau_context)->aasr = aasr; \ + if (unlikely(AAU_STOPPED(aasr))) { \ + pv_type##_read_aaldv_reg(&(aau_context)->aaldv); \ + pv_type##_read_aaldm_reg(&(aau_context)->aaldm); \ + } else { \ + AW((aau_context)->aaldv) = 0; \ + AW((aau_context)->aaldm) = 0; \ + } \ +}) + +#define NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr) \ + PREFIX_SAVE_AAU_MASK_REGS(NATIVE, native, aau_context, aasr) + +#define PREFIX_RESTORE_AAU_MASK_REGS(PV_TYPE, pv_type, aau_context) \ +({ \ + pv_type##_write_aafstr_reg_value(0); \ + pv_type##_write_aaldm_reg(&(aau_context)->aaldm); \ + pv_type##_write_aaldv_reg(&(aau_context)->aaldv); \ + /* aasr can be in 'ACTIVE' state, so we set it last */ \ + pv_type##_write_aasr_reg((aau_context)->aasr); \ +}) + +#define NATIVE_RESTORE_AAU_MASK_REGS(aau_context) \ + PREFIX_RESTORE_AAU_MASK_REGS(NATIVE, native, aau_context) + +#define PREFIX_SAVE_AADS(PV_TYPE, pv_type, aau_regs) \ +({ \ + register e2k_aadj_t *aads = (aau_regs)->aads; \ + \ + pv_type##_read_aads_4_reg(0, &aads[0]); \ + pv_type##_read_aads_4_reg(4, &aads[4]); \ + pv_type##_read_aads_4_reg(8, &aads[8]); \ + pv_type##_read_aads_4_reg(12, &aads[12]); \ + pv_type##_read_aads_4_reg(16, &aads[16]); \ + pv_type##_read_aads_4_reg(20, &aads[20]); \ + pv_type##_read_aads_4_reg(24, &aads[24]); \ + pv_type##_read_aads_4_reg(28, &aads[28]); \ +}) + +#define NATIVE_SAVE_AADS(aau_regs) \ + PREFIX_SAVE_AADS(NATIVE, native, aau_regs) + +#define PREFIX_RESTORE_AADS(PV_TYPE, pv_type, aau_regs) \ +({ \ + register e2k_aadj_t *aads = (aau_regs)->aads; \ + \ + pv_type##_write_aads_4_reg(0, &aads[0]); \ + pv_type##_write_aads_4_reg(4, &aads[4]); \ + pv_type##_write_aads_4_reg(8, &aads[8]); \ + pv_type##_write_aads_4_reg(12, &aads[12]); \ + pv_type##_write_aads_4_reg(16, &aads[16]); \ + pv_type##_write_aads_4_reg(20, &aads[20]); \ + pv_type##_write_aads_4_reg(24, &aads[24]); \ + pv_type##_write_aads_4_reg(28, &aads[28]); \ +}) + +#define NATIVE_RESTORE_AADS(aau_regs) \ + PREFIX_RESTORE_AADS(NATIVE, native, aau_regs) + +#define PREFIX_SAVE_AALDIS(PV_TYPE, pv_type, ISET, iset, regs) \ +({ \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(0, regs[0], regs[32]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(1, regs[1], regs[33]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(2, regs[2], regs[34]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(3, regs[3], regs[35]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(4, regs[4], regs[36]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(5, regs[5], regs[37]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(6, regs[6], regs[38]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(7, regs[7], regs[39]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(8, regs[8], regs[40]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(9, regs[9], regs[41]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(10, regs[10], regs[42]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(11, regs[11], regs[43]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(12, regs[12], regs[44]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(13, regs[13], regs[45]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(14, regs[14], regs[46]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(15, regs[15], regs[47]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(16, regs[16], regs[48]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(17, regs[17], regs[49]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(18, regs[18], regs[50]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(19, regs[19], regs[51]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(20, regs[20], regs[52]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(21, regs[21], regs[53]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(22, regs[22], regs[54]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(23, regs[23], regs[55]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(24, regs[24], regs[56]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(25, regs[25], regs[57]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(26, regs[26], regs[58]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(27, regs[27], regs[59]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(28, regs[28], regs[60]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(29, regs[29], regs[61]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(30, regs[30], regs[62]); \ + PV_TYPE##_READ_AALDI_REG_VALUE_##ISET(31, regs[31], regs[63]); \ +}) +#define PREFIX_SAVE_AALDIS_V2(PV_TYPE, pv_type, regs) \ + PREFIX_SAVE_AALDIS(PV_TYPE, pv_type, V2, v2, regs) +#define PREFIX_SAVE_AALDIS_V5(PV_TYPE, pv_type, regs) \ + PREFIX_SAVE_AALDIS(PV_TYPE, pv_type, V5, v5, regs) + +#define NATIVE_SAVE_AALDIS_V2(regs) \ + PREFIX_SAVE_AALDIS_V2(NATIVE, native, regs) +#define NATIVE_SAVE_AALDIS_V5(regs) \ + PREFIX_SAVE_AALDIS_V5(NATIVE, native, regs) +#define NATIVE_SAVE_AALDIS(regs) \ +({ \ + if (IS_AAU_ISET_V5()) { \ + NATIVE_SAVE_AALDIS_V5(regs); \ + } else if (IS_AAU_ISET_V2()) { \ + NATIVE_SAVE_AALDIS_V2(regs); \ + } else if (IS_AAU_ISET_GENERIC()) { \ + machine.save_aaldi(regs); \ + } else { \ + BUILD_BUG_ON(true); \ + } \ +}) + +#define PREFIX_GET_ARRAY_DESCRIPTORS_V2(PV_TYPE, pv_type, aau_context) \ +({ \ + u64 *const aainds = (aau_context)->aainds; \ + u64 *const aaincrs = (aau_context)->aaincrs; \ + \ + /* \ + * get AAINDs, omit the AAIND0 saving since it has predefined 0 \ + * value \ + */ \ + { \ + register u32 ind1, ind2, ind3, ind4, \ + ind5, ind6, ind7, ind8, \ + ind9, ind10, ind11, ind12, \ + ind13, ind14, ind15; \ + register u32 tags; \ + \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(1, ind1, ind2); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(3, ind3, ind4); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(5, ind5, ind6); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(7, ind7, ind8); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(9, ind9, ind10); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(11, ind11, ind12); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V2(13, ind13, ind14); \ + PV_TYPE##_READ_AAIND_REG15_AND_TAGS_VALUE_V2(ind15, tags); \ + aainds[0] = 0; \ + aainds[1] = ind1; \ + aainds[2] = ind2; \ + aainds[3] = ind3; \ + aainds[4] = ind4; \ + aainds[5] = ind5; \ + aainds[6] = ind6; \ + aainds[7] = ind7; \ + aainds[8] = ind8; \ + aainds[9] = ind9; \ + aainds[10] = ind10; \ + aainds[11] = ind11; \ + aainds[12] = ind12; \ + aainds[13] = ind13; \ + aainds[14] = ind14; \ + aainds[15] = ind15; \ + context->aaind_tags = tags; \ + } \ + \ + /* \ + * get AAINCRs, omit the AAINCR0 saving since it has predefined 1 \ + * value \ + */ \ + { \ + register u32 incr1, incr2, incr3, incr4, \ + incr5, incr6, incr7; \ + register u32 tags; \ + \ + PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V2(1, incr1, incr2); \ + PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V2(3, incr3, incr4); \ + PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V2(5, incr5, incr6); \ + PV_TYPE##_READ_AAINCR_REG7_AND_TAGS_VALUE_V2(incr7, tags); \ + aaincrs[0] = 1; \ + aaincrs[1] = (s64) (s32) incr1; \ + aaincrs[2] = (s64) (s32) incr2; \ + aaincrs[3] = (s64) (s32) incr3; \ + aaincrs[4] = (s64) (s32) incr4; \ + aaincrs[5] = (s64) (s32) incr5; \ + aaincrs[6] = (s64) (s32) incr6; \ + aaincrs[7] = (s64) (s32) incr7; \ + context->aaincr_tags = tags; \ + } \ +}) +#define NATIVE_GET_ARRAY_DESCRIPTORS_V2(aau_context) \ + PREFIX_GET_ARRAY_DESCRIPTORS_V2(NATIVE, native, aau_context) + +#define PREFIX_GET_ARRAY_DESCRIPTORS_V5(PV_TYPE, pv_type, aau_context) \ +({ \ + u64 *const aainds = (aau_context)->aainds; \ + u64 *const aaincrs = (aau_context)->aaincrs; \ + \ + /* \ + * get AAINDs, omit the AAIND0 saving since it has predefined 0 \ + * value \ + */ \ + { \ + register u64 ind1, ind2, ind3, ind4, \ + ind5, ind6, ind7, ind8, \ + ind9, ind10, ind11, ind12, \ + ind13, ind14, ind15; \ + register u32 tags; \ + \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(1, ind1, ind2); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(3, ind3, ind4); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(5, ind5, ind6); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(7, ind7, ind8); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(9, ind9, ind10); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(11, ind11, ind12); \ + PV_TYPE##_READ_AAINDS_PAIR_VALUE_V5(13, ind13, ind14); \ + PV_TYPE##_READ_AAIND_REG15_AND_TAGS_VALUE_V5(ind15, tags); \ + aainds[0] = 0; \ + aainds[1] = ind1; \ + aainds[2] = ind2; \ + aainds[3] = ind3; \ + aainds[4] = ind4; \ + aainds[5] = ind5; \ + aainds[6] = ind6; \ + aainds[7] = ind7; \ + aainds[8] = ind8; \ + aainds[9] = ind9; \ + aainds[10] = ind10; \ + aainds[11] = ind11; \ + aainds[12] = ind12; \ + aainds[13] = ind13; \ + aainds[14] = ind14; \ + aainds[15] = ind15; \ + context->aaind_tags = tags; \ + } \ + \ + /* \ + * get AAINCRs, omit the AAINCR0 saving since it has predefined 1 \ + * value \ + */ \ + { \ + register u64 incr1, incr2, incr3, incr4, \ + incr5, incr6, incr7; \ + register u32 tags; \ + \ + PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V5(1, incr1, incr2); \ + PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V5(3, incr3, incr4); \ + PV_TYPE##_READ_AAINCRS_PAIR_VALUE_V5(5, incr5, incr6); \ + PV_TYPE##_READ_AAINCR_REG7_AND_TAGS_VALUE_V5(incr7, tags); \ + aaincrs[0] = 1; \ + aaincrs[1] = incr1; \ + aaincrs[2] = incr2; \ + aaincrs[3] = incr3; \ + aaincrs[4] = incr4; \ + aaincrs[5] = incr5; \ + aaincrs[6] = incr6; \ + aaincrs[7] = incr7; \ + context->aaincr_tags = tags; \ + } \ +}) +#define NATIVE_GET_ARRAY_DESCRIPTORS_V5(aau_context) \ + PREFIX_GET_ARRAY_DESCRIPTORS_V5(NATIVE, native, aau_context) + +#define PREFIX_SET_ARRAY_DESCRIPTORS(PV_TYPE, pv_type, aau_context) \ +({ \ + const e2k_aau_t *const aau = (aau_context); \ + const u64 *const aainds = aau->aainds; \ + const u64 *const aaincrs = aau->aaincrs; \ + \ + /* \ + * set AAINDs, omit the AAIND0 restoring since \ + * it has predefined 0 value. \ + */ \ + pv_type##_write_aainds_pair_value(1, aainds[1], aainds[2]); \ + pv_type##_write_aainds_pair_value(3, aainds[3], aainds[4]); \ + pv_type##_write_aainds_pair_value(5, aainds[5], aainds[6]); \ + pv_type##_write_aainds_pair_value(7, aainds[7], aainds[8]); \ + pv_type##_write_aainds_pair_value(9, aainds[9], aainds[10]); \ + pv_type##_write_aainds_pair_value(11, aainds[11], aainds[12]); \ + pv_type##_write_aainds_pair_value(13, aainds[13], aainds[14]); \ + pv_type##_write_aaind_reg_value(15, aainds[15]); \ + \ + /* \ + * set AAINCRs, omit the AAINCR0 restoring since \ + * it has predefined 1 value. \ + */ \ + pv_type##_write_aaincrs_pair_value(1, aaincrs[1], aaincrs[2]); \ + pv_type##_write_aaincrs_pair_value(3, aaincrs[3], aaincrs[4]); \ + pv_type##_write_aaincrs_pair_value(5, aaincrs[5], aaincrs[6]); \ + pv_type##_write_aaincr_reg_value(7, aaincrs[7]); \ + \ + /* Set TAGS */ \ + PV_TYPE##_SET_AAU_AAIND_AAINCR_TAGS(aau->aaind_tags, aau->aaincr_tags); \ +}) +#define NATIVE_SET_ARRAY_DESCRIPTORS(aau_context) \ + PREFIX_SET_ARRAY_DESCRIPTORS(NATIVE, native, aau_context) + +#define PREFIX_GET_SYNCHRONOUS_PART_V2(PV_TYPE, pv_type, aau_context) \ +({ \ + u64 *const aastis = (aau_context)->aastis; \ + register u32 sti0, sti1, sti2, sti3, \ + sti4, sti5, sti6, sti7, \ + sti8, sti9, sti10, sti11, \ + sti12, sti13, sti14, sti15; \ + \ + /* get AASTIs */ \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(0, sti0, sti1); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(2, sti2, sti3); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(4, sti4, sti5); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(6, sti6, sti7); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(8, sti8, sti9); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(10, sti10, sti11); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(12, sti12, sti13); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V2(14, sti14, sti15); \ + \ + aastis[0] = sti0; \ + aastis[1] = sti1; \ + aastis[2] = sti2; \ + aastis[3] = sti3; \ + aastis[4] = sti4; \ + aastis[5] = sti5; \ + aastis[6] = sti6; \ + aastis[7] = sti7; \ + aastis[8] = sti8; \ + aastis[9] = sti9; \ + aastis[10] = sti10; \ + aastis[11] = sti11; \ + aastis[12] = sti12; \ + aastis[13] = sti13; \ + aastis[14] = sti14; \ + aastis[15] = sti15; \ + (aau_context)->aasti_tags = \ + pv_type##_read_aasti_tags_reg_value(); \ +}) + +#define PREFIX_GET_SYNCHRONOUS_PART_V5(PV_TYPE, pv_type, aau_context) \ +({ \ + u64 *const aastis = (aau_context)->aastis; \ + register u64 sti0, sti1, sti2, sti3, \ + sti4, sti5, sti6, sti7, \ + sti8, sti9, sti10, sti11, \ + sti12, sti13, sti14, sti15; \ + \ + /* get AASTIs */ \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(0, sti0, sti1); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(2, sti2, sti3); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(4, sti4, sti5); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(6, sti6, sti7); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(8, sti8, sti9); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(10, sti10, sti11); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(12, sti12, sti13); \ + PV_TYPE##_READ_AASTIS_PAIR_VALUE_V5(14, sti14, sti15); \ + \ + aastis[0] = sti0; \ + aastis[1] = sti1; \ + aastis[2] = sti2; \ + aastis[3] = sti3; \ + aastis[4] = sti4; \ + aastis[5] = sti5; \ + aastis[6] = sti6; \ + aastis[7] = sti7; \ + aastis[8] = sti8; \ + aastis[9] = sti9; \ + aastis[10] = sti10; \ + aastis[11] = sti11; \ + aastis[12] = sti12; \ + aastis[13] = sti13; \ + aastis[14] = sti14; \ + aastis[15] = sti15; \ + (aau_context)->aasti_tags = \ + pv_type##_read_aasti_tags_reg_value(); \ +}) +#define NATIVE_GET_SYNCHRONOUS_PART_V2(aau_context) \ + PREFIX_GET_SYNCHRONOUS_PART_V2(NATIVE, native, aau_context) +#define NATIVE_GET_SYNCHRONOUS_PART_V5(aau_context) \ + PREFIX_GET_SYNCHRONOUS_PART_V5(NATIVE, native, aau_context) + +#define PREFIX_SET_SYNCHRONOUS_PART(PV_TYPE, pv_type, aau_context) \ +({ \ + const u64 *const aastis = (aau_context)->aastis; \ + \ + /* set AASTIs */ \ + pv_type##_write_aastis_pair_value(0, aastis[0], aastis[1]); \ + pv_type##_write_aastis_pair_value(2, aastis[2], aastis[3]); \ + pv_type##_write_aastis_pair_value(4, aastis[4], aastis[5]); \ + pv_type##_write_aastis_pair_value(6, aastis[6], aastis[7]); \ + pv_type##_write_aastis_pair_value(8, aastis[8], aastis[9]); \ + pv_type##_write_aastis_pair_value(10, aastis[10], aastis[11]); \ + pv_type##_write_aastis_pair_value(12, aastis[12], aastis[13]); \ + pv_type##_write_aastis_pair_value(14, aastis[14], aastis[15]); \ + pv_type##_write_aasti_tags_reg_value((aau_context)->aasti_tags); \ +}) +#define NATIVE_SET_SYNCHRONOUS_PART(aau_context) \ + PREFIX_SET_SYNCHRONOUS_PART(NATIVE, native, aau_context) + +#define PREFIX_SET_ALL_AALDIS(PV_TYPE, pv_type, aaldis) \ +({ \ + pv_type##_write_aaldi_reg_value(0, aaldis[0], aaldis[32]); \ + pv_type##_write_aaldi_reg_value(1, aaldis[1], aaldis[33]); \ + pv_type##_write_aaldi_reg_value(2, aaldis[2], aaldis[34]); \ + pv_type##_write_aaldi_reg_value(3, aaldis[3], aaldis[35]); \ + pv_type##_write_aaldi_reg_value(4, aaldis[4], aaldis[36]); \ + pv_type##_write_aaldi_reg_value(5, aaldis[5], aaldis[37]); \ + pv_type##_write_aaldi_reg_value(6, aaldis[6], aaldis[38]); \ + pv_type##_write_aaldi_reg_value(7, aaldis[7], aaldis[39]); \ + pv_type##_write_aaldi_reg_value(8, aaldis[8], aaldis[40]); \ + pv_type##_write_aaldi_reg_value(9, aaldis[9], aaldis[41]); \ + pv_type##_write_aaldi_reg_value(10, aaldis[10], aaldis[42]); \ + pv_type##_write_aaldi_reg_value(11, aaldis[11], aaldis[43]); \ + pv_type##_write_aaldi_reg_value(12, aaldis[12], aaldis[44]); \ + pv_type##_write_aaldi_reg_value(13, aaldis[13], aaldis[45]); \ + pv_type##_write_aaldi_reg_value(14, aaldis[14], aaldis[46]); \ + pv_type##_write_aaldi_reg_value(15, aaldis[15], aaldis[47]); \ + pv_type##_write_aaldi_reg_value(16, aaldis[16], aaldis[48]); \ + pv_type##_write_aaldi_reg_value(17, aaldis[17], aaldis[49]); \ + pv_type##_write_aaldi_reg_value(18, aaldis[18], aaldis[50]); \ + pv_type##_write_aaldi_reg_value(19, aaldis[19], aaldis[51]); \ + pv_type##_write_aaldi_reg_value(20, aaldis[20], aaldis[52]); \ + pv_type##_write_aaldi_reg_value(21, aaldis[21], aaldis[53]); \ + pv_type##_write_aaldi_reg_value(22, aaldis[22], aaldis[54]); \ + pv_type##_write_aaldi_reg_value(23, aaldis[23], aaldis[55]); \ + pv_type##_write_aaldi_reg_value(24, aaldis[24], aaldis[56]); \ + pv_type##_write_aaldi_reg_value(25, aaldis[25], aaldis[57]); \ + pv_type##_write_aaldi_reg_value(26, aaldis[26], aaldis[58]); \ + pv_type##_write_aaldi_reg_value(27, aaldis[27], aaldis[59]); \ + pv_type##_write_aaldi_reg_value(28, aaldis[28], aaldis[60]); \ + pv_type##_write_aaldi_reg_value(29, aaldis[29], aaldis[61]); \ + pv_type##_write_aaldi_reg_value(30, aaldis[30], aaldis[62]); \ + pv_type##_write_aaldi_reg_value(31, aaldis[31], aaldis[63]); \ +}) +#define NATIVE_SET_ALL_AALDIS(aaldis) \ + PREFIX_SET_ALL_AALDIS(NATIVE, native, aaldis) + +#define PREFIX_SET_ALL_AALDAS(PV_TYPE, pv_type, aaldas_p) \ +({ \ + register u32 *aaldas = (u32 *)(aaldas_p); \ + \ + pv_type##_write_aaldas_reg_value(0, aaldas[0], aaldas[8]); \ + pv_type##_write_aaldas_reg_value(4, aaldas[1], aaldas[9]); \ + pv_type##_write_aaldas_reg_value(8, aaldas[2], aaldas[10]); \ + pv_type##_write_aaldas_reg_value(12, aaldas[3], aaldas[11]); \ + pv_type##_write_aaldas_reg_value(16, aaldas[4], aaldas[12]); \ + pv_type##_write_aaldas_reg_value(20, aaldas[5], aaldas[13]); \ + pv_type##_write_aaldas_reg_value(24, aaldas[6], aaldas[14]); \ + pv_type##_write_aaldas_reg_value(28, aaldas[7], aaldas[15]); \ +}) + +/* + * It's taken that aasr was get earlier(from get_aau_context caller) + * and comparison with aasr.iab was taken. + */ +#define PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, ISET, iset, aau_context) \ +({ \ + /* get registers, which describe arrays in APB operations */ \ + e2k_aasr_t aasr = (aau_context)->aasr; \ + \ + /* get descriptors & auxiliary registers */ \ + if (AS(aasr).iab) \ + PV_TYPE##_GET_ARRAY_DESCRIPTORS_##ISET(aau_context); \ + \ + /* get synchronous part of APB */ \ + if (AS(aasr).stb) \ + PV_TYPE##_GET_SYNCHRONOUS_PART_##ISET(aau_context); \ +}) +#define PREFIX_GET_AAU_CONTEXT_V2(PV_TYPE, pv_type, aau_context) \ + PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V2, v2, aau_context) +#define PREFIX_GET_AAU_CONTEXT_V5(PV_TYPE, pv_type, aau_context) \ + PREFIX_GET_AAU_CONTEXT(PV_TYPE, pv_type, V5, v5, aau_context) +#define NATIVE_GET_AAU_CONTEXT_V2(aau_context) \ + PREFIX_GET_AAU_CONTEXT_V2(NATIVE, native, aau_context) +#define NATIVE_GET_AAU_CONTEXT_V5(aau_context) \ + PREFIX_GET_AAU_CONTEXT_V5(NATIVE, native, aau_context) +#define NATIVE_GET_AAU_CONTEXT(aau_context) \ +({ \ + if (IS_AAU_ISET_V5()) { \ + NATIVE_GET_AAU_CONTEXT_V5(aau_context); \ + } else if (IS_AAU_ISET_V2()) { \ + NATIVE_GET_AAU_CONTEXT_V2(aau_context); \ + } else if (IS_AAU_ISET_GENERIC()) { \ + machine.get_aau_context(aau_context); \ + } else { \ + BUILD_BUG_ON(true); \ + } \ +}) + +/* + * It's taken that comparison with aasr.iab was taken and assr + * will be set later. + */ +#define PREFIX_SET_AAU_CONTEXT(PV_TYPE, pv_type, aau_context) \ +do { \ + const e2k_aau_t *const aau = (aau_context); \ + /* retrieve common APB status register */\ + e2k_aasr_t aasr = aau->aasr; \ + \ + /* prefetch data to restore */ \ + if (AS(aasr).stb) \ + prefetchw_range(aau->aastis, sizeof(aau->aastis) + \ + sizeof(aau->aasti_tags)); \ + if (AS(aasr).iab) \ + prefetchw_range(aau->aainds, sizeof(aau->aainds) + \ + sizeof(aau->aaind_tags) + sizeof(aau->aaincrs) + \ + sizeof(aau->aaincr_tags) + sizeof(aau->aads)); \ + if (AAU_STOPPED(aasr)) \ + prefetchw_range(aau->aaldi, sizeof(aau->aaldi)); \ + \ + /* Make sure prefetches are issued */ \ + barrier(); \ + \ + /* set synchronous part of APB */ \ + if (AS(aasr).stb) \ + pv_type##_set_synchronous_part(aau); \ + \ + /* set descriptors & auxiliary registers */ \ + if (AS(aasr).iab) \ + pv_type##_set_array_descriptors(aau); \ +} while (0) +#define NATIVE_SET_AAU_CONTEXT(aau_context) \ + PREFIX_SET_AAU_CONTEXT(NATIVE, native, aau_context) + +#define PREFIX_SAVE_AALDAS(PV_TYPE, pv_type, aaldas_p) \ +({ \ + register u32 *aaldas = (u32 *)aaldas_p; \ + \ + pv_type##_read_aaldas_reg_value(0, &aaldas[0], &aaldas[8]); \ + pv_type##_read_aaldas_reg_value(4, &aaldas[1], &aaldas[9]); \ + pv_type##_read_aaldas_reg_value(8, &aaldas[2], &aaldas[10]); \ + pv_type##_read_aaldas_reg_value(12, &aaldas[3], &aaldas[11]); \ + pv_type##_read_aaldas_reg_value(16, &aaldas[4], &aaldas[12]); \ + pv_type##_read_aaldas_reg_value(20, &aaldas[5], &aaldas[13]); \ + pv_type##_read_aaldas_reg_value(24, &aaldas[6], &aaldas[14]); \ + pv_type##_read_aaldas_reg_value(28, &aaldas[7], &aaldas[15]); \ +}) + +#define NATIVE_SAVE_AALDAS(aaldas_p) \ + PREFIX_SAVE_AALDAS(NATIVE, native, aaldas_p) + +#define PREFIX_SAVE_AAFSTR(PV_TYPE, pv_type, aau_context) \ +({ \ + (aau_context)->aafstr = pv_type##_read_aafstr_reg_value(); \ +}) + +#define NATIVE_SAVE_AAFSTR(aau_context) \ + PREFIX_SAVE_AAFSTR(NATIVE, native, aau_context) + +#define PREFIX_SAVE_AAU_REGS_FOR_PTRACE(PV_TYPE, pv_type, pt_regs, ti) \ +({ \ + e2k_aau_t *__aau_context = (pt_regs)->aau_context; \ + if (__aau_context) { \ + if (machine.native_iset_ver < E2K_ISET_V6) \ + PV_TYPE##_SAVE_AALDIS(__aau_context->aaldi); \ + PV_TYPE##_SAVE_AALDAS(ti->aalda); \ + PV_TYPE##_SAVE_AAFSTR(__aau_context); \ + } \ +}) + +#define NATIVE_SAVE_AAU_REGS_FOR_PTRACE(pt_regs, ti) \ + PREFIX_SAVE_AAU_REGS_FOR_PTRACE(NATIVE, native, pt_regs, ti) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure guest kernel without paravirtualization */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* It is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* native kernel without virtualization */ +/* or native host kernel with virtualization support */ + +static __always_inline u32 read_aasr_reg_value(void) +{ + return native_read_aasr_reg_value(); +} +static __always_inline void write_aasr_reg_value(u32 reg_value) +{ + native_write_aasr_reg_value(reg_value); +} +static inline u32 read_aafstr_reg_value(void) +{ + return native_read_aafstr_reg_value(); +} +static inline void write_aafstr_reg_value(u32 reg_value) +{ + native_write_aafstr_reg_value(reg_value); +} + +static __always_inline e2k_aasr_t read_aasr_reg(void) +{ + return native_read_aasr_reg(); +} +static __always_inline void write_aasr_reg(e2k_aasr_t aasr) +{ + native_write_aasr_reg(aasr); +} +static inline void read_aaldm_reg(e2k_aaldm_t *aaldm) +{ + native_read_aaldm_reg(aaldm); +} +static inline void write_aaldm_reg(e2k_aaldm_t *aaldm) +{ + native_write_aaldm_reg(aaldm); +} +static inline void read_aaldv_reg(e2k_aaldv_t *aaldv) +{ + native_read_aaldv_reg(aaldv); +} +static inline void write_aaldv_reg(e2k_aaldv_t *aaldv) +{ + native_write_aaldv_reg(aaldv); +} + +#ifdef CONFIG_USE_AAU +# define SAVE_AAU_REGS_FOR_PTRACE(__regs, ti) \ + NATIVE_SAVE_AAU_REGS_FOR_PTRACE(__regs, ti) +#else +# define SAVE_AAU_REGS_FOR_PTRACE(__regs, ti) +#endif + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#define SWITCH_GUEST_AAU_AASR(aasr, aau_context, do_switch) \ +({ \ + if (do_switch) { \ + e2k_aasr_t aasr_worst_case; \ + AW(aasr_worst_case) = 0; \ + AS(aasr_worst_case).stb = 1; \ + AS(aasr_worst_case).iab = 1; \ + AS(aasr_worst_case).lds = AASR_STOPPED; \ + (aau_context)->guest_aasr = *(aasr); \ + *(aasr) = aasr_worst_case; \ + } \ +}) + +#define RESTORE_GUEST_AAU_AASR(aau_context, do_restore) \ +({ \ + if (do_restore) { \ + (aau_context)->aasr = (aau_context)->guest_aasr; \ + } \ +}) + +#endif /* _E2K_AAU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/aau_regs_types.h b/arch/e2k/include/asm/aau_regs_types.h new file mode 100644 index 0000000..f536117 --- /dev/null +++ b/arch/e2k/include/asm/aau_regs_types.h @@ -0,0 +1,178 @@ +/* + * AAU registers structures description + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _E2K_AAU_REGS_TYPES_H_ +#define _E2K_AAU_REGS_TYPES_H_ + +#include +#include + +#if CONFIG_CPU_ISET >= 5 +# define IS_AAU_ISET_V5() true +# define IS_AAU_ISET_V2() false +# define IS_AAU_ISET_GENERIC() false +#elif CONFIG_CPU_ISET >= 1 +# define IS_AAU_ISET_V2() true +# define IS_AAU_ISET_V5() false +# define IS_AAU_ISET_GENERIC() false +#elif CONFIG_CPU_ISET == 0 +# define IS_AAU_ISET_GENERIC() true +# define IS_AAU_ISET_V2() false +# define IS_AAU_ISET_V5() false +#else /* CONFIG_CPU_ISET undefined or negative */ +# warning "Undefined CPU ISET VERSION #, IS_AAU_ISET_Vx is defined dinamicaly" +# define IS_AAU_ISET_GENERIC() true +# define IS_AAU_ISET_V2() false +# define IS_AAU_ISET_V5() false +#endif /* CONFIG_CPU_ISET 0-6 */ + +/* Values for AASR.lds */ +enum { + AASR_NULL = 0, + AASR_READY = 1, + AASR_ACTIVE = 3, + AASR_STOPPED = 5 +}; +#define AAU_AASR_STB 0x20 +#define AAU_AASR_IAB 0x40 +typedef struct e2k_aasr_fields { + u32 reserved : 5; /* [4:0] */ + u32 stb : 1; /* [5:5] */ + u32 iab : 1; /* [6:6] */ + u32 lds : 3; /* [9:7] */ +} e2k_aasr_fields_t; +typedef union e2k_aasr { /* aadj quad-word */ + e2k_aasr_fields_t fields; + u32 word; +} e2k_aasr_t; + +/* Check up AAU state */ +#define AAU_NULL(aasr) (AS(aasr).lds == AASR_NULL) +#define AAU_READY(aasr) (AS(aasr).lds == AASR_READY) +#define AAU_ACTIVE(aasr) (AS(aasr).lds == AASR_ACTIVE) +#define AAU_STOPPED(aasr) (AS(aasr).lds == AASR_STOPPED) + +typedef u32 e2k_aafstr_t; + +/* Values for AAD.tag */ +enum { + AAD_AAUNV = 0, + AAD_AAUDT = 1, + AAD_AAUET = 2, + AAD_AAUAP = 4, + AAD_AAUSAP = 5, + AAD_AAUDS = 6 +}; + +/* We are not using AAD SAP format here + * so it is not described in the structure */ +typedef union e2k_aadj_lo_fields { + struct { + u64 ap_base : E2K_VA_SIZE; /* [E2K_VA_MSB:0] */ + u64 unused1 : 53 - E2K_VA_MSB; /* [53:48] */ + u64 tag : 3; /* [56:54] */ + u64 mb : 1; /* [57] */ + u64 ed : 1; /* [58] */ + u64 rw : 2; /* [60:59] */ + u64 unused2 : 3; /* [63:60] */ + }; + struct { + u64 sap_base : 32; + u64 psl : 16; + u64 __pad : 16; + }; +} e2k_aadj_lo_fields_t; +typedef struct e2k_aadj_hi_fields { + u64 unused : 32; + u64 size : 32; /* [63:32] */ +} e2k_aadj_hi_fields_t; +typedef union e2k_aadj { /* aadj quad-word */ + struct { + e2k_aadj_lo_fields_t lo; + e2k_aadj_hi_fields_t hi; + } fields; + struct { + u64 lo; + u64 hi; + } word; +} e2k_aadj_t; + +/* Possible values for aalda.exc field */ +enum { + AALDA_EIO = 1, + AALDA_EPM = 2, + AALDA_EPMSI = 3 +}; + +union e2k_u64_struct { /* aaldv,aaldm,aasta_restore dword */ + struct { + u32 lo; /* read/write on left channel */ + u32 hi; /* read/write on right channel */ + }; + u64 word; +}; +typedef union e2k_u64_struct e2k_aaldv_t; +typedef union e2k_u64_struct e2k_aaldm_t; + +typedef struct e2k_aalda_fields { + u8 exc: 2; + u8 cincr: 1; + u8 unused1: 1; + u8 root: 1; + u8 unused2: 3; +} e2k_aalda_fields_t; + +typedef union e2k_aalda_struct { + e2k_aalda_fields_t fields; + u8 word; +} e2k_aalda_t; + +#define AASTIS_REGS_NUM 16 +#define AASTIS_TAG_no AASTIS_REGS_NUM +#define AAINDS_REGS_NUM 16 +#define AAINDS_TAG_no AAINDS_REGS_NUM +#define AAINCRS_REGS_NUM 8 +#define AAINCRS_TAG_no AAINCRS_REGS_NUM +#define AADS_REGS_NUM 32 +#define AALDIS_REGS_NUM 64 +#define AALDAS_REGS_NUM 64 + +/* + * For virtualization, aasr might be switched to worst-case scenario (lds = AAU_STOPPED, + * iab = 1, stb = 1). In that case, real aasr will be saved to guest_aasr + */ +typedef struct e2k_aau_context { + e2k_aasr_t aasr; + e2k_aasr_t guest_aasr; + e2k_aafstr_t aafstr; + e2k_aaldm_t aaldm; + e2k_aaldv_t aaldv; + + /* Synchronous part */ + u64 aastis[AASTIS_REGS_NUM]; + u32 aasti_tags; + + /* Asynchronous part */ + u64 aainds[AAINDS_REGS_NUM]; + u32 aaind_tags; + u64 aaincrs[AAINCRS_REGS_NUM]; + u32 aaincr_tags; + e2k_aadj_t aads[AADS_REGS_NUM]; + /* %aaldi [synonim for %aaldsi] must be saved since iset v6 */ + u64 aaldi[AALDIS_REGS_NUM]; +} e2k_aau_t; + +#endif /* _E2K_AAU_REGS_TYPES_H_ */ diff --git a/arch/e2k/include/asm/acenv.h b/arch/e2k/include/asm/acenv.h new file mode 100644 index 0000000..c6fb4ab --- /dev/null +++ b/arch/e2k/include/asm/acenv.h @@ -0,0 +1,10 @@ + +#ifndef _ASM_E2K_ACENV_H_ +#define _ASM_E2K_ACENV_H_ + +#include +#define ACPI_FLUSH_CPU_CACHE() write_back_CACHE_L12() +#include + +#endif /* _ASM_E2K_ACENV_H_ */ + diff --git a/arch/e2k/include/asm/acpi.h b/arch/e2k/include/asm/acpi.h new file mode 100644 index 0000000..7b5bcd2 --- /dev/null +++ b/arch/e2k/include/asm/acpi.h @@ -0,0 +1,6 @@ +#ifndef __ASM_ACPI_H +#define __ASM_ACPI_H + +#include + +#endif diff --git a/arch/e2k/include/asm/alternative-asm.h b/arch/e2k/include/asm/alternative-asm.h new file mode 100644 index 0000000..c68bba4 --- /dev/null +++ b/arch/e2k/include/asm/alternative-asm.h @@ -0,0 +1,193 @@ +#ifndef _ASM_E2K_ALTERNATIVE_ASM_H +#define _ASM_E2K_ALTERNATIVE_ASM_H + +#ifdef __ASSEMBLY__ + +/* + * Check the length of an instruction sequence, must be a multiple of 8. + */ +.macro alt_len_check start,end + .if ( \end - \start ) % 8 + .error "cpu alternatives instructions length is not divisible by 8\n" + .endif +.endm + +/* + * Issue one struct alt_instr descriptor entry (need to put it into + * the section .altinstructions, see below). This entry contains + * enough information for the alternatives patching code to patch an + * instruction. See apply_alternatives(). + */ +.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature + .align 4 + .word \orig_start - . + .word \alt_start - . + .short \orig_end - \orig_start + .short \alt_end - \alt_start + .short \feature +.endm + +.macro alt_pad_64bytes bytes, check + .if ( \bytes >= \check ) + .fill 1, 4, 0x00000070 + .fill 15, 4, 0 + .endif +.endm + +/* + * Fill up @bytes with nops. + */ +.macro alt_pad bytes + .if ( \bytes >= 576 ) + ibranch . + \bytes + alt_pad_fill \bytes - 16 + .else + alt_pad_64bytes \bytes, 512 + alt_pad_64bytes \bytes, 448 + alt_pad_64bytes \bytes, 384 + alt_pad_64bytes \bytes, 320 + alt_pad_64bytes \bytes, 256 + alt_pad_64bytes \bytes, 192 + alt_pad_64bytes \bytes, 128 + alt_pad_64bytes \bytes, 64 + .if ( \bytes % 64 ) == 56 + .fill 1, 4, 0x00000060 + .fill 13, 4, 0 + .endif + .if ( \bytes % 64 ) == 48 + .fill 1, 4, 0x00000050 + .fill 11, 4, 0 + .endif + .if ( \bytes % 64 ) == 40 + .fill 1, 4, 0x00000040 + .fill 9, 4, 0 + .endif + .if ( \bytes % 64 ) == 32 + .fill 1, 4, 0x00000030 + .fill 7, 4, 0 + .endif + .if ( \bytes % 64 ) == 24 + .fill 1, 4, 0x00000020 + .fill 5, 4, 0 + .endif + .if ( \bytes % 64 ) == 16 + .fill 1, 4, 0x00000010 + .fill 3, 4, 0 + .endif + .if ( \bytes % 64 ) == 8 + .fill 2, 4, 0 + .endif + .endif +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. + */ +.macro ALTERNATIVE oldinstr, newinstr, feature + .pushsection .altinstr_replacement,"ax" +770: \newinstr +771: .popsection +772: \oldinstr +773: alt_len_check 770b, 771b + alt_len_check 772b, 773b + alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) ) +774: .pushsection .altinstructions,"a" + alt_entry 772b, 774b, 770b, 771b, \feature + .popsection +.endm + +/* + * Define an alternative between three instructions. + */ +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 + .pushsection .altinstr_replacement,"ax" +770: \newinstr1 +771: \newinstr2 +772: .popsection +773: \oldinstr +774: alt_len_check 770b, 771b + alt_len_check 771b, 772b + alt_len_check 773b, 774b + .if ( 771b - 770b > 772b - 771b ) + alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) ) + .else + alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) ) + .endif +775: .pushsection .altinstructions,"a" + alt_entry 773b, 775b, 770b, 771b,\feature1 + alt_entry 773b, 775b, 771b, 772b,\feature2 + .popsection +.endm + + +/* + * bug 110687: we cannot pass e2k wide instructions to GNU assembler .macro + * as a parameter in a sane way so use the following in complex cases. + * How to use: + * + * 1) There is one alternative + * + * ALTERNATIVE_1_ALTINSTR + * < alt. instruction > + * ALTERNATIVE_2_OLDINSTR + * < initial instruction > + * ALTERNATIVE_3_FEATURE + * + * 2) There are two alternatives + * + * ALTERNATIVE_1_ALTINSTR + * "< first alt. instruction >" + * ALTERNATIVE_2_ALTINSTR2 + * "< second alt. instruction >" + * ALTERNATIVE_3_OLDINSTR2 + * "< initial instruction >" + * ALTERNATIVE_4_FEATURE2(feature1, feature2) + */ +#define ALTERNATIVE_1_ALTINSTR \ + .pushsection .altinstr_replacement,"ax" ; \ + 770: + +#define ALTERNATIVE_2_OLDINSTR \ + 771: ; \ + .popsection ; \ + 772: + +#define ALTERNATIVE_3_FEATURE(feature) \ + 773: ; \ + alt_len_check 770b, 771b ; \ + alt_len_check 772b, 773b ; \ + alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) ) ; \ + 774: ; \ + .pushsection .altinstructions,"a" ; \ + alt_entry 772b, 774b, 770b, 771b, feature ; \ + .popsection + +#define ALTERNATIVE_2_ALTINSTR2 \ + 771: + +#define ALTERNATIVE_3_OLDINSTR2 \ + 772: ; \ + .popsection ; \ + 773: + +#define ALTERNATIVE_4_FEATURE2(feature1, feature2) \ + 774: ; \ + alt_len_check 770b, 771b ; \ + alt_len_check 771b, 772b ; \ + alt_len_check 773b, 774b ; \ + .if ( 771b - 770b > 772b - 771b ) ; \ + alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) ) ; \ + .else ; \ + alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) ) ; \ + .endif ; \ + 775: ; \ + .pushsection .altinstructions,"a" ; \ + alt_entry 773b, 775b, 770b, 771b, feature1 ; \ + alt_entry 773b, 775b, 771b, 772b, feature2 ; \ + .popsection + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_E2K_ALTERNATIVE_ASM_H */ diff --git a/arch/e2k/include/asm/alternative.h b/arch/e2k/include/asm/alternative.h new file mode 100644 index 0000000..12d0505 --- /dev/null +++ b/arch/e2k/include/asm/alternative.h @@ -0,0 +1,260 @@ +#ifndef _ASM_E2K_ALTERNATIVE_H +#define _ASM_E2K_ALTERNATIVE_H + +#ifndef __ASSEMBLY__ + +#include + +struct alt_instr { + s32 instr_offset; /* original instruction */ + s32 repl_offset; /* offset to replacement instruction */ + u16 instrlen; /* length of original instruction */ + u16 replacementlen; /* length of new instruction */ + u16 facility; /* facility bit set for replacement */ +} __aligned(4); + +void apply_alternative_instructions(void); +void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +/* + * An example when first alternative instruction is the biggest, + * and original instruction is the smallest. + * + * Original instruction is padded statically at compile time, + * while alternative instructions are padded if necessary in + * runtime when patching them in. + * + * |661: |662: |663: + * +-----------+---------+-----------------+ + * | oldinstr | oldinstr_padding | + * | +---------+-----------------+ + * | | ibranch if length >= 576 | + * | | 64-bytes NOPs otherwise | + * +-----------+---------+-----------------+ + * ^^^^^^ static padding ^^^^^ + * + * .altinstr_replacement section + * +-----------+---------+-----------------+ + * |6641: |6651: + * | alternative instr 1 | + * +-----------+---------+- - - - - - - - -+ + * |6642: |6652: | + * | alternative instr 2 | padding | + * +-----------+---------+- - - - - - - - -+ + * ^runtime padding^ + * + * + * 'struct alt_instr' holds details about how and when + * instructions must be replaced: + * + * .altinstructions section + * +----------------------------+ + * | alt_instr entries for each | + * | alternative instruction | + * +----------------------------+ + */ + +#define b_altinstr(num) "664"#num +#define e_altinstr(num) "665"#num + +#define e_oldinstr_pad_end "663" +#define oldinstr_len "662b-661b" +#define oldinstr_total_len e_oldinstr_pad_end"b-661b" +#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b" +#define oldinstr_pad_len(num) \ + "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \ + "((" altinstr_len(num) ")-(" oldinstr_len "))" + +#define INSTR_LEN_SANITY_CHECK(len) \ + ".if (" len ") %% 8\n" \ + "\t.error \"cpu alternatives instructions length is not divisible by 8\"\n" \ + ".endif\n" + +#define OLDINSTR_PAD_64_BYTES(num, check) \ + ".if " oldinstr_pad_len(num) " >= " __stringify(check) "\n" \ + "\t.fill 1, 4, 0x00000070\n" \ + "\t.fill 15, 4, 0\n" \ + ".endif\n" + +#define OLDINSTR_PADDING(oldinstr, num) \ + ".if " oldinstr_pad_len(num) " >= 576\n" \ + "\tibranch " e_oldinstr_pad_end "f\n" \ + "6620:\n" \ + "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 8, 8, 0\n" \ + ".else\n" \ + OLDINSTR_PAD_64_BYTES(num, 512) \ + OLDINSTR_PAD_64_BYTES(num, 448) \ + OLDINSTR_PAD_64_BYTES(num, 384) \ + OLDINSTR_PAD_64_BYTES(num, 320) \ + OLDINSTR_PAD_64_BYTES(num, 256) \ + OLDINSTR_PAD_64_BYTES(num, 192) \ + OLDINSTR_PAD_64_BYTES(num, 128) \ + OLDINSTR_PAD_64_BYTES(num, 64) \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 56\n" \ + "\t.fill 1, 4, 0x00000060\n" \ + "\t.fill 13, 4, 0\n" \ + ".endif\n" \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 48\n" \ + "\t.fill 1, 4, 0x00000050\n" \ + "\t.fill 11, 4, 0\n" \ + ".endif\n" \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 40\n" \ + "\t.fill 1, 4, 0x00000040\n" \ + "\t.fill 9, 4, 0\n" \ + ".endif\n" \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 32\n" \ + "\t.fill 1, 4, 0x00000030\n" \ + "\t.fill 7, 4, 0\n" \ + ".endif\n" \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 24\n" \ + "\t.fill 1, 4, 0x00000020\n" \ + "\t.fill 5, 4, 0\n" \ + ".endif\n" \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 16\n" \ + "\t.fill 1, 4, 0x00000010\n" \ + "\t.fill 3, 4, 0\n" \ + ".endif\n" \ + ".if ( " oldinstr_pad_len(num) " %% 64 ) == 8\n" \ + "\t.fill 2, 4, 0\n" \ + ".endif\n" \ + ".endif\n" + +#define OLDINSTR(oldinstr, num) \ + "661:\n\t" oldinstr "\n662:\n" \ + OLDINSTR_PADDING(oldinstr, num) \ + e_oldinstr_pad_end ":\n" \ + INSTR_LEN_SANITY_CHECK(oldinstr_len) + +#define OLDINSTR_2(oldinstr, num1, num2) \ + "661:\n\t" oldinstr "\n662:\n" \ + ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \ + OLDINSTR_PADDING(oldinstr, num2) \ + ".else\n" \ + OLDINSTR_PADDING(oldinstr, num1) \ + ".endif\n" \ + e_oldinstr_pad_end ":\n" \ + INSTR_LEN_SANITY_CHECK(oldinstr_len) + +#define ALTINSTR_ENTRY(facility, num) \ + "\t.align 4\n" \ + "\t.word 661b - .\n" /* old instruction */ \ + "\t.word " b_altinstr(num)"b - .\n" /* alt instruction */ \ + "\t.short " oldinstr_total_len "\n" /* source len */ \ + "\t.short " altinstr_len(num) "\n" /* alt instruction len */ \ + "\t.short " __stringify(facility) "\n" /* facility bit */ + +#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \ + b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \ + INSTR_LEN_SANITY_CHECK(altinstr_len(num)) + +/* alternative assembly primitive: */ +#define ALTERNATIVE(oldinstr, altinstr, facility) \ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + ALTINSTR_REPLACEMENT(altinstr, 1) \ + ".popsection\n" \ + OLDINSTR(oldinstr, 1) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(facility, 1) \ + ".popsection\n" + +#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + ALTINSTR_REPLACEMENT(altinstr1, 1) \ + ALTINSTR_REPLACEMENT(altinstr2, 2) \ + ".popsection\n" \ + OLDINSTR_2(oldinstr, 1, 2) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(facility1, 1) \ + ALTINSTR_ENTRY(facility2, 2) \ + ".popsection\n" + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * oldinstr is padded with jump and nops at compile time if altinstr is + * longer. altinstr is padded with jump and nops at run-time during patching. + */ +#define alternative(oldinstr, altinstr, facility, clobbers...) \ + asm volatile (ALTERNATIVE(oldinstr, altinstr, facility) \ + ::: clobbers) + +#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \ + asm volatile (ALTERNATIVE_2(oldinstr, altinstr1, facility1, \ + altinstr2, facility2) \ + ::: clobbers) + +/* + * How to use: + * + * 1) There is one alternative + * + * asm volatile ( + * ALTERNATIVE_1_ALTINSTR + * "< alt. instruction >" + * ALTERNATIVE_2_OLDINSTR + * "< initial instruction >" + * ALTERNATIVE_3_FEATURE(feature) + * ) + * + * 2) There are two alternatives + * + * asm volatile ( + * ALTERNATIVE_1_ALTINSTR + * "< first alt. instruction >" + * ALTERNATIVE_2_ALTINSTR2 + * "< second alt. instruction >" + * ALTERNATIVE_3_OLDINSTR2 + * "< initial instruction >" + * ALTERNATIVE_4_FEATURE2(feature1, feature2) + * ) + */ +#define ALTERNATIVE_1_ALTINSTR \ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + b_altinstr(1)":\n" + +#define ALTERNATIVE_2_OLDINSTR \ + "\n" e_altinstr(1) ":\n" \ + INSTR_LEN_SANITY_CHECK(altinstr_len(1)) \ + ".popsection\n" \ + "661:\n" + +#define ALTERNATIVE_3_FEATURE(facility) \ + "\n662:\n" \ + OLDINSTR_PADDING(oldinstr, 1) \ + e_oldinstr_pad_end ":\n" \ + INSTR_LEN_SANITY_CHECK(oldinstr_len) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(facility, 1) \ + ".popsection\n" + +#define ALTERNATIVE_2_ALTINSTR2 \ + "\n" e_altinstr(1) ":\n" \ + INSTR_LEN_SANITY_CHECK(altinstr_len(1)) \ + b_altinstr(2)":\n" + +#define ALTERNATIVE_3_OLDINSTR2 \ + "\n" e_altinstr(2) ":\n" \ + INSTR_LEN_SANITY_CHECK(altinstr_len(2)) \ + ".popsection\n" \ + "661:\n" + +#define ALTERNATIVE_4_FEATURE2(facility1, facility2) \ + "\n662:\n" \ + ".if " altinstr_len(1) " < " altinstr_len(2) "\n" \ + OLDINSTR_PADDING(oldinstr, 2) \ + ".else\n" \ + OLDINSTR_PADDING(oldinstr, 1) \ + ".endif\n" \ + e_oldinstr_pad_end ":\n" \ + INSTR_LEN_SANITY_CHECK(oldinstr_len) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(facility1, 1) \ + ALTINSTR_ENTRY(facility2, 2) \ + ".popsection\n" + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_E2K_ALTERNATIVE_H */ diff --git a/arch/e2k/include/asm/apic.h b/arch/e2k/include/asm/apic.h new file mode 100644 index 0000000..8d6081a --- /dev/null +++ b/arch/e2k/include/asm/apic.h @@ -0,0 +1,46 @@ +#ifndef __ASM_E2K_APIC_H +#define __ASM_E2K_APIC_H + +#ifdef __KERNEL__ +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * Basic functions accessing APICs. + */ +static inline void arch_apic_write(unsigned int reg, unsigned int v) +{ + boot_writel(v, (void __iomem *) (APIC_DEFAULT_PHYS_BASE + reg)); +} + +static inline unsigned int arch_apic_read(unsigned int reg) +{ + return boot_readl((void __iomem *) (APIC_DEFAULT_PHYS_BASE + reg)); +} + +static inline void boot_arch_apic_write(unsigned int reg, unsigned int v) +{ + arch_apic_write(reg, v); +} + +static inline unsigned int boot_arch_apic_read(unsigned int reg) +{ + return arch_apic_read(reg); +} + +#if IS_ENABLED(CONFIG_RDMA) || IS_ENABLED(CONFIG_RDMA_SIC) || \ + IS_ENABLED(CONFIG_RDMA_NET) +extern int rdma_apic_init; +extern int rdma_node[]; +#endif + +#endif /* !(__ASSEMBLY__) */ + +#include + +#endif /* __KERNEL__ */ +#endif /* __ASM_E2K_APIC_H */ diff --git a/arch/e2k/include/asm/apic_regs.h b/arch/e2k/include/asm/apic_regs.h new file mode 100644 index 0000000..5fbabfc --- /dev/null +++ b/arch/e2k/include/asm/apic_regs.h @@ -0,0 +1,276 @@ +#ifndef __ASM_APIC_REGS_H +#define __ASM_APIC_REGS_H + + +#ifndef __ASSEMBLY__ + +/* + * the local APIC register structure, memory mapped. Not terribly well + * tested, but we might eventually use this one in the future - the + * problem why we cannot use it right now is the P5 APIC, it has an + * errata which cannot take 8-bit reads and writes, only 32-bit ones ... + */ +#define u32 unsigned int + +struct local_apic { + +/*000*/ struct { u32 __reserved[4]; } __reserved_01; + +/*010*/ struct { u32 __reserved_1 : 8, + boot_strap : 1, + __reserved_2 : 2, + apic_enable : 1, + __reserved_3 : 20; + u32 __reserved[3]; + } bsp; + +/*020*/ struct { /* APIC ID Register */ + u32 __reserved_1 : 24, + phys_apic_id : 4, + __reserved_2 : 4; + u32 __reserved[3]; + } id; + +/*030*/ const + struct { /* APIC Version Register */ + u32 version : 8, + __reserved_1 : 8, + max_lvt : 8, + __reserved_2 : 8; + u32 __reserved[3]; + } version; + +/*040*/ struct { u32 __reserved[4]; } __reserved_03; + +/*050*/ struct { u32 __reserved[4]; } __reserved_04; + +/*060*/ struct { u32 __reserved[4]; } __reserved_05; + +/*070*/ struct { u32 __reserved[4]; } __reserved_06; + +/*080*/ struct { /* Task Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } tpr; + +/*090*/ const + struct { /* Arbitration Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } apr; + +/*0A0*/ const + struct { /* Processor Priority Register */ + u32 priority : 8, + __reserved_1 : 24; + u32 __reserved_2[3]; + } ppr; + +/*0B0*/ struct { /* End Of Interrupt Register */ + u32 eoi; + u32 __reserved[3]; + } eoi; + +/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; + +/*0D0*/ struct { /* Logical Destination Register */ + u32 __reserved_1 : 24, + logical_dest : 8; + u32 __reserved_2[3]; + } ldr; + +/*0E0*/ struct { /* Destination Format Register */ + u32 __reserved_1 : 28, + model : 4; + u32 __reserved_2[3]; + } dfr; + +/*0F0*/ struct { /* Spurious Interrupt Vector Register */ + u32 spurious_vector : 8, + apic_enabled : 1, + focus_cpu : 1, + __reserved_2 : 22; + u32 __reserved_3[3]; + } svr; + +/*100*/ struct { /* In Service Register */ +/*170*/ u32 bitfield; + u32 __reserved[3]; + } isr [8]; + +/*180*/ struct { /* Trigger Mode Register */ +/*1F0*/ u32 bitfield; + u32 __reserved[3]; + } tmr [8]; + +/*200*/ struct { /* Interrupt Request Register */ +/*270*/ u32 bitfield; + u32 __reserved[3]; + } irr [8]; + +/*280*/ union { /* Error Status Register */ + struct { + u32 send_cs_error : 1, + receive_cs_error : 1, + send_accept_error : 1, + receive_accept_error : 1, + __reserved_1 : 1, + send_illegal_vector : 1, + receive_illegal_vector : 1, + illegal_register_address : 1, + __reserved_2 : 24; + u32 __reserved_3[3]; + } error_bits; + struct { + u32 errors; + u32 __reserved_3[3]; + } all_errors; + } esr; + +/*290*/ struct { u32 __reserved[4]; } __reserved_08; + +/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; + +/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; + +/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; + +/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; + +/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; + +/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; + +/*300*/ struct { /* Interrupt Command Register 1 */ + u32 vector : 8, + delivery_mode : 3, + destination_mode : 1, + delivery_status : 1, + __reserved_1 : 1, + level : 1, + trigger : 1, + __reserved_2 : 2, + shorthand : 2, + __reserved_3 : 12; + u32 __reserved_4[3]; + } icr1; + +/*310*/ struct { /* Interrupt Command Register 2 */ + union { + u32 __reserved_1 : 24, + phys_dest : 4, + __reserved_2 : 4; + u32 __reserved_3 : 24, + logical_dest : 8; + } dest; + u32 __reserved_4[3]; + } icr2; + +/*320*/ struct { /* LVT - Timer */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + timer_mode : 1, + __reserved_3 : 14; + u32 __reserved_4[3]; + } lvt_timer; + +/*330*/ struct { u32 __reserved[4]; } __reserved_15; + +/*340*/ struct { /* LVT - Performance Counter */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_pc; + +/*350*/ struct { /* LVT - LINT0 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint0; + +/*360*/ struct { /* LVT - LINT1 */ + u32 vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + u32 __reserved_3[3]; + } lvt_lint1; + +/*370*/ struct { /* LVT - Error */ + u32 vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + u32 __reserved_4[3]; + } lvt_error; + +/*380*/ struct { /* Timer Initial Count Register */ + u32 initial_count; + u32 __reserved_2[3]; + } timer_icr; + +/*390*/ const + struct { /* Timer Current Count Register */ + u32 curr_count; + u32 __reserved_2[3]; + } timer_ccr; + +/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; + +/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; + +/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; + +/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; + +/*3E0*/ struct { /* Timer Divide Configuration Register */ + u32 divisor : 4, + __reserved_1 : 28; + u32 __reserved_2[3]; + } timer_dcr; + +/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; +#if 0 +/*3F0*/ struct { u32 __reserved[764]; } __reserved_20; +/*FE0*/ struct { /* Vector from PIC or APIC in nmi */ + u32 nm_vector : 8, + __reserved : 24; + u32 __reserved[3]; + } nm_vect; +/*FF0*/ struct { /* Vector */ + u32 vector : 8, + __reserved_1 : 24; + u32 __reserved[3]; + } vect; +#endif +} __attribute__ ((packed)); + +#undef u32 + +#endif /* !(__ASSEMBLY__) */ + +#endif /* __ASM_APIC_REGS_H */ diff --git a/arch/e2k/include/asm/apicdef.h b/arch/e2k/include/asm/apicdef.h new file mode 100644 index 0000000..70fc4a9 --- /dev/null +++ b/arch/e2k/include/asm/apicdef.h @@ -0,0 +1,9 @@ +#ifndef __ASM_E2K_APICDEF_H +#define __ASM_E2K_APICDEF_H + +#ifdef __KERNEL__ +#include +#include +#endif + +#endif /* __ASM_E2K_APICDEF_H */ diff --git a/arch/e2k/include/asm/atomic.h b/arch/e2k/include/asm/atomic.h new file mode 100644 index 0000000..75b7a20 --- /dev/null +++ b/arch/e2k/include/asm/atomic.h @@ -0,0 +1,380 @@ +#ifndef _E2K_ATOMIC_ +#define _E2K_ATOMIC_ + +#include +#include +#include + +#define ATOMIC_INIT(i) { (i) } +#define ATOMIC64_INIT(i) { (i) } + +#define atomic_read(v) READ_ONCE((v)->counter) +#define atomic64_read(v) READ_ONCE((v)->counter) + +#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) +#define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i)) + +static inline void atomic_and(int incr, atomic_t *val) +{ + __api_atomic_op(incr, &val->counter, w, "ands", RELAXED_MB); +} + +static inline void atomic64_and(__s64 incr, atomic64_t *val) +{ + __api_atomic_op(incr, &val->counter, d, "andd", RELAXED_MB); +} + +#define atomic_andnot atomic_andnot +static inline void atomic_andnot(int incr, atomic_t *val) +{ + __api_atomic_op(incr, &val->counter, w, "andns", RELAXED_MB); +} + +#define atomic64_andnot atomic64_andnot +static inline void atomic64_andnot(__s64 incr, atomic64_t *val) +{ + __api_atomic_op(incr, &val->counter, d, "andnd", RELAXED_MB); +} + +static inline void atomic_or(int incr, atomic_t *val) +{ + __api_atomic_op(incr, &val->counter, w, "ors", RELAXED_MB); +} + +static inline void atomic64_or(__s64 incr, atomic64_t *val) +{ + __api_atomic_op(incr, &val->counter, d, "ord", RELAXED_MB); +} + +static inline void atomic_xor(int incr, atomic_t *val) +{ + __api_atomic_op(incr, &val->counter, w, "xors", RELAXED_MB); +} + +static inline void atomic64_xor(__s64 incr, atomic64_t *val) +{ + __api_atomic_op(incr, &val->counter, d, "xord", RELAXED_MB); +} + +static inline void atomic_add(int incr, atomic_t *val) +{ + __api_atomic_op(incr, &val->counter, w, "adds", RELAXED_MB); +} + +static inline void atomic64_add(__s64 incr, atomic64_t *val) +{ + __api_atomic_op(incr, &val->counter, d, "addd", RELAXED_MB); +} + +static inline void atomic_sub(int incr, atomic_t *val) +{ + __api_atomic_op(incr, &val->counter, w, "subs", RELAXED_MB); +} + +static inline void atomic64_sub(__s64 incr, atomic64_t *val) +{ + __api_atomic_op(incr, &val->counter, d, "subd", RELAXED_MB); +} + +#define __atomic_add_return(v, p, mem_model) \ + __api_atomic_op((int) (v), &(p)->counter, w, "adds", mem_model) +#define atomic_add_return_relaxed(v, p) __atomic_add_return((v), (p), RELAXED_MB) +#define atomic_add_return_acquire(v, p) __atomic_add_return((v), (p), ACQUIRE_MB) +#define atomic_add_return_release(v, p) __atomic_add_return((v), (p), RELEASE_MB) +#define atomic_add_return(v, p) __atomic_add_return((v), (p), STRONG_MB) +#define atomic_add_return_lock(v, p) __atomic_add_return((v), (p), LOCK_MB) + +#define __atomic64_add_return(v, p, mem_model) \ + __api_atomic_op((__s64) (v), &(p)->counter, d, "addd", mem_model) +#define atomic64_add_return_relaxed(v, p) __atomic64_add_return((v), (p), RELAXED_MB) +#define atomic64_add_return_acquire(v, p) __atomic64_add_return((v), (p), ACQUIRE_MB) +#define atomic64_add_return_release(v, p) __atomic64_add_return((v), (p), RELEASE_MB) +#define atomic64_add_return(v, p) __atomic64_add_return((v), (p), STRONG_MB) + +#define __atomic_sub_return(v, p, mem_model) \ + __api_atomic_op((int) (v), &(p)->counter, w, "subs", mem_model) +#define atomic_sub_return_relaxed(v, p) __atomic_sub_return((v), (p), RELAXED_MB) +#define atomic_sub_return_acquire(v, p) __atomic_sub_return((v), (p), ACQUIRE_MB) +#define atomic_sub_return_release(v, p) __atomic_sub_return((v), (p), RELEASE_MB) +#define atomic_sub_return(v, p) __atomic_sub_return((v), (p), STRONG_MB) + +#define __atomic64_sub_return(v, p, mem_model) \ + __api_atomic_op((__s64) (v), &(p)->counter, d, "subd", mem_model) +#define atomic64_sub_return_relaxed(v, p) __atomic64_sub_return((v), (p), RELAXED_MB) +#define atomic64_sub_return_acquire(v, p) __atomic64_sub_return((v), (p), ACQUIRE_MB) +#define atomic64_sub_return_release(v, p) __atomic64_sub_return((v), (p), RELEASE_MB) +#define atomic64_sub_return(v, p) __atomic64_sub_return((v), (p), STRONG_MB) + +#define __atomic_fetch_add(v, p, mem_model) \ + __api_atomic_fetch_op((int) (v), &(p)->counter, w, "adds", mem_model) +#define atomic_fetch_add_relaxed(v, p) __atomic_fetch_add((v), (p), RELAXED_MB) +#define atomic_fetch_add_acquire(v, p) __atomic_fetch_add((v), (p), ACQUIRE_MB) +#define atomic_fetch_add_release(v, p) __atomic_fetch_add((v), (p), RELEASE_MB) +#define atomic_fetch_add(v, p) __atomic_fetch_add((v), (p), STRONG_MB) + +#define __atomic64_fetch_add(v, p, mem_model) \ + __api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "addd", mem_model) +#define atomic64_fetch_add_relaxed(v, p) __atomic64_fetch_add((v), (p), RELAXED_MB) +#define atomic64_fetch_add_acquire(v, p) __atomic64_fetch_add((v), (p), ACQUIRE_MB) +#define atomic64_fetch_add_release(v, p) __atomic64_fetch_add((v), (p), RELEASE_MB) +#define atomic64_fetch_add(v, p) __atomic64_fetch_add((v), (p), STRONG_MB) + +#define __atomic_fetch_sub(v, p, mem_model) \ + __api_atomic_fetch_op((int) (v), &(p)->counter, w, "subs", mem_model) +#define atomic_fetch_sub_relaxed(v, p) __atomic_fetch_sub((v), (p), RELAXED_MB) +#define atomic_fetch_sub_acquire(v, p) __atomic_fetch_sub((v), (p), ACQUIRE_MB) +#define atomic_fetch_sub_release(v, p) __atomic_fetch_sub((v), (p), RELEASE_MB) +#define atomic_fetch_sub(v, p) __atomic_fetch_sub((v), (p), STRONG_MB) + +#define __atomic64_fetch_sub(v, p, mem_model) \ + __api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "subd", mem_model) +#define atomic64_fetch_sub_relaxed(v, p) __atomic64_fetch_sub((v), (p), RELAXED_MB) +#define atomic64_fetch_sub_acquire(v, p) __atomic64_fetch_sub((v), (p), ACQUIRE_MB) +#define atomic64_fetch_sub_release(v, p) __atomic64_fetch_sub((v), (p), RELEASE_MB) +#define atomic64_fetch_sub(v, p) __atomic64_fetch_sub((v), (p), STRONG_MB) + +#define __atomic_fetch_or(v, p, mem_model) \ + __api_atomic_fetch_op((int) (v), &(p)->counter, w, "ors", mem_model) +#define atomic_fetch_or_relaxed(v, p) __atomic_fetch_or((v), (p), RELAXED_MB) +#define atomic_fetch_or_acquire(v, p) __atomic_fetch_or((v), (p), ACQUIRE_MB) +#define atomic_fetch_or_release(v, p) __atomic_fetch_or((v), (p), RELEASE_MB) +#define atomic_fetch_or(v, p) __atomic_fetch_or((v), (p), STRONG_MB) + +#define __atomic64_fetch_or(v, p, mem_model) \ + __api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "ord", mem_model) +#define atomic64_fetch_or_relaxed(v, p) __atomic64_fetch_or((v), (p), RELAXED_MB) +#define atomic64_fetch_or_acquire(v, p) __atomic64_fetch_or((v), (p), ACQUIRE_MB) +#define atomic64_fetch_or_release(v, p) __atomic64_fetch_or((v), (p), RELEASE_MB) +#define atomic64_fetch_or(v, p) __atomic64_fetch_or((v), (p), STRONG_MB) + +#define __atomic_fetch_and(v, p, mem_model) \ + __api_atomic_fetch_op((int) (v), &(p)->counter, w, "ands", mem_model) +#define atomic_fetch_and_relaxed(v, p) __atomic_fetch_and((v), (p), RELAXED_MB) +#define atomic_fetch_and_acquire(v, p) __atomic_fetch_and((v), (p), ACQUIRE_MB) +#define atomic_fetch_and_release(v, p) __atomic_fetch_and((v), (p), RELEASE_MB) +#define atomic_fetch_and(v, p) __atomic_fetch_and((v), (p), STRONG_MB) + +#define __atomic64_fetch_and(v, p, mem_model) \ + __api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "andd", mem_model) +#define atomic64_fetch_and_relaxed(v, p) __atomic64_fetch_and((v), (p), RELAXED_MB) +#define atomic64_fetch_and_acquire(v, p) __atomic64_fetch_and((v), (p), ACQUIRE_MB) +#define atomic64_fetch_and_release(v, p) __atomic64_fetch_and((v), (p), RELEASE_MB) +#define atomic64_fetch_and(v, p) __atomic64_fetch_and((v), (p), STRONG_MB) + +#define __atomic_fetch_andnot(v, p, mem_model) \ + __api_atomic_fetch_op((int) (v), &(p)->counter, w, "andns", mem_model) +#define atomic_fetch_andnot_relaxed(v, p) __atomic_fetch_andnot((v), (p), RELAXED_MB) +#define atomic_fetch_andnot_acquire(v, p) __atomic_fetch_andnot((v), (p), ACQUIRE_MB) +#define atomic_fetch_andnot_release(v, p) __atomic_fetch_andnot((v), (p), RELEASE_MB) +#define atomic_fetch_andnot(v, p) __atomic_fetch_andnot((v), (p), STRONG_MB) + +#define __atomic64_fetch_andnot(v, p, mem_model) \ + __api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "andnd", mem_model) +#define atomic64_fetch_andnot_relaxed(v, p) __atomic64_fetch_andnot((v), (p), RELAXED_MB) +#define atomic64_fetch_andnot_acquire(v, p) __atomic64_fetch_andnot((v), (p), ACQUIRE_MB) +#define atomic64_fetch_andnot_release(v, p) __atomic64_fetch_andnot((v), (p), RELEASE_MB) +#define atomic64_fetch_andnot(v, p) __atomic64_fetch_andnot((v), (p), STRONG_MB) + +#define __atomic_fetch_xor(v, p, mem_model) \ + __api_atomic_fetch_op((int) (v), &(p)->counter, w, "xors", mem_model) +#define atomic_fetch_xor_relaxed(v, p) __atomic_fetch_xor((v), (p), RELAXED_MB) +#define atomic_fetch_xor_acquire(v, p) __atomic_fetch_xor((v), (p), ACQUIRE_MB) +#define atomic_fetch_xor_release(v, p) __atomic_fetch_xor((v), (p), RELEASE_MB) +#define atomic_fetch_xor(v, p) __atomic_fetch_xor((v), (p), STRONG_MB) + +#define __atomic64_fetch_xor(v, p, mem_model) \ + __api_atomic_fetch_op((__s64) (v), &(p)->counter, d, "xord", mem_model) +#define atomic64_fetch_xor_relaxed(v, p) __atomic64_fetch_xor((v), (p), RELAXED_MB) +#define atomic64_fetch_xor_acquire(v, p) __atomic64_fetch_xor((v), (p), ACQUIRE_MB) +#define atomic64_fetch_xor_release(v, p) __atomic64_fetch_xor((v), (p), RELEASE_MB) +#define atomic64_fetch_xor(v, p) __atomic64_fetch_xor((v), (p), STRONG_MB) + +#define __atomic_xchg(p, v, mem_model) \ + (int)__api_xchg_return((int) (v), &(p)->counter, w, mem_model) +#define atomic_xchg_relaxed(p, v) __atomic_xchg((p), (v), RELAXED_MB) +#define atomic_xchg_acquire(p, v) __atomic_xchg((p), (v), ACQUIRE_MB) +#define atomic_xchg_release(p, v) __atomic_xchg((p), (v), RELEASE_MB) +#define atomic_xchg(p, v) __atomic_xchg((p), (v), STRONG_MB) + +#define __atomic64_xchg(p, v, mem_model) \ + __api_xchg_return((__s64) (v), &(p)->counter, d, mem_model) +#define atomic64_xchg_relaxed(p, v) __atomic64_xchg((p), (v), RELAXED_MB) +#define atomic64_xchg_acquire(p, v) __atomic64_xchg((p), (v), ACQUIRE_MB) +#define atomic64_xchg_release(p, v) __atomic64_xchg((p), (v), RELEASE_MB) +#define atomic64_xchg(p, v) __atomic64_xchg((p), (v), STRONG_MB) + +#define __atomic_cmpxchg(p, o, n, mem_model) \ + (int)__api_cmpxchg_word_return((int) (o), (int) (n), \ + &(p)->counter, mem_model) +#define atomic_cmpxchg_relaxed(p, o, n) __atomic_cmpxchg((p), (o), (n), RELAXED_MB) +#define atomic_cmpxchg_acquire(p, o, n) __atomic_cmpxchg((p), (o), (n), ACQUIRE_MB) +#define atomic_cmpxchg_release(p, o, n) __atomic_cmpxchg((p), (o), (n), RELEASE_MB) +#define atomic_cmpxchg(p, o, n) __atomic_cmpxchg((p), (o), (n), STRONG_MB) +#define atomic_cmpxchg_lock(p, o, n) __atomic_cmpxchg((p), (o), (n), LOCK_MB) + +#define __atomic64_cmpxchg(p, o, n, mem_model) \ + __api_cmpxchg_dword_return((__s64) (o), (__s64) (n), \ + &(p)->counter, mem_model) +#define atomic64_cmpxchg_relaxed(p, o, n) __atomic64_cmpxchg((p), (o), (n), RELAXED_MB) +#define atomic64_cmpxchg_acquire(p, o, n) __atomic64_cmpxchg((p), (o), (n), ACQUIRE_MB) +#define atomic64_cmpxchg_release(p, o, n) __atomic64_cmpxchg((p), (o), (n), RELEASE_MB) +#define atomic64_cmpxchg(p, o, n) __atomic64_cmpxchg((p), (o), (n), STRONG_MB) +#define atomic64_cmpxchg_lock(p, o, n) __atomic64_cmpxchg((p), (o), (n), LOCK_MB) + +#define atomic_long_cmpxchg_lock(p, o, n) atomic64_cmpxchg_lock((p), (o), (n)) + +#define atomic_inc_unless_negative atomic_inc_unless_negative +static inline bool atomic_inc_unless_negative(atomic_t *p) +{ + return __api_atomic32_fetch_inc_unless_negative(&p->counter) >= 0; +} + +#define atomic64_inc_unless_negative atomic64_inc_unless_negative +static inline bool atomic64_inc_unless_negative(atomic64_t *p) +{ + return __api_atomic64_fetch_inc_unless_negative(&p->counter) >= 0; +} + +#define atomic_dec_unless_positive atomic_dec_unless_positive +static inline bool atomic_dec_unless_positive(atomic_t *p) +{ + return __api_atomic32_fetch_dec_unless_positive(&p->counter) <= 0; +} + +#define atomic64_dec_unless_positive atomic64_dec_unless_positive +static inline bool atomic64_dec_unless_positive(atomic64_t *p) +{ + return __api_atomic64_fetch_dec_unless_positive(&p->counter) <= 0; +} + +/** + * atomic_dec_if_positive - decrement by 1 if old value positive + * @p: pointer of type atomic_t + * + * The function returns the old value of *p minus 1, even if + * the atomic variable, v, was not decremented. + */ +#define atomic_dec_if_positive atomic_dec_if_positive +static inline int atomic_dec_if_positive(atomic_t *p) +{ + return __api_atomic32_fetch_dec_if_positive(&p->counter) - 1; +} + +#define atomic64_dec_if_positive atomic64_dec_if_positive +static inline s64 atomic64_dec_if_positive(atomic64_t *p) +{ + return __api_atomic64_fetch_dec_if_positive(&p->counter) - 1; +} + +/** + * atomic_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +#define atomic_fetch_add_unless atomic_fetch_add_unless +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + return __api_atomic32_fetch_add_unless(a, &v->counter, u); +} + +#define atomic64_fetch_add_unless atomic64_fetch_add_unless +static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + return __api_atomic64_fetch_add_unless(a, &v->counter, u); +} + +#define atomic_try_cmpxchg atomic_try_cmpxchg +static inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +static inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_acquire(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release +static inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_release(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed +static inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_relaxed(v, o, new); + *old = r; + return likely(r == o); +} + +static __always_inline bool atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_lock(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic64_try_cmpxchg atomic64_try_cmpxchg +static inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +static inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_acquire(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +static inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_release(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed +static inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_relaxed(v, o, new); + *old = r; + return likely(r == o); +} + +static inline bool atomic64_try_cmpxchg_lock(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_lock(v, o, new); + *old = r; + return likely(r == o); +} + +#define atomic_long_try_cmpxchg_lock(p, o, n) atomic64_try_cmpxchg_lock((p), (s64 *) (o), (n)) + +#endif /* _E2K_ATOMIC_ */ diff --git a/arch/e2k/include/asm/atomic_api.h b/arch/e2k/include/asm/atomic_api.h new file mode 100644 index 0000000..02b6f02 --- /dev/null +++ b/arch/e2k/include/asm/atomic_api.h @@ -0,0 +1,892 @@ +#ifndef _ASM_E2K_ATOMIC_API_H_ +#define _ASM_E2K_ATOMIC_API_H_ + +#include +#include +#include +#include + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +/* + * Special page that is accessible for reading by every user + * process is used for hardware bug #89242 workaround. + */ +#define NATIVE_HWBUG_WRITE_MEMORY_BARRIER_ADDRESS 0xff6000000000UL + +#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) + +# define NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS \ + NATIVE_HWBUG_WRITE_MEMORY_BARRIER_ADDRESS +# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS \ + virt_cpu_has(CPU_HWBUG_WRITE_MEMORY_BARRIER) +# ifdef E2K_FAST_SYSCALL +# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU NATIVE_GET_DSREG_OPEN(clkr) +# else +# ifndef __ASSEMBLY__ +# include +register unsigned long long __cpu_reg DO_ASM_GET_GREG_MEMONIC(SMP_CPU_ID_GREG); +# endif +# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU __cpu_reg +# endif + +#elif defined(E2K_P2V) + +# define NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS \ + (NATIVE_NV_READ_IP_REG_VALUE() & ~0x3fUL) +# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU 0 +# if !defined(CONFIG_E2K_MACHINE) || defined(CONFIG_E2K_E8C) +# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS 1 +# else +# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS 0 +# endif + +#else /* CONFIG_BOOT_E2K */ + +# define NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS \ + (NATIVE_NV_READ_IP_REG_VALUE() & ~0x3fUL) +# define NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS 0 +# define NATIVE_HWBUG_AFTER_LD_ACQ_CPU 0 + +#endif + +#if !defined CONFIG_E2K_MACHINE || defined CONFIG_E2K_E8C +/* Define these here to avoid include hell... */ +# define _UPSR_IE 0x20U +# define _UPSR_NMIE 0x80U +# define NATIVE_HWBUG_AFTER_LD_ACQ() \ +do { \ + unsigned long long __reg1, __reg2; \ + if (NATIVE_HAS_HWBUG_AFTER_LD_ACQ_ADDRESS) { \ + unsigned long __hwbug_cpu = NATIVE_HWBUG_AFTER_LD_ACQ_CPU; \ + unsigned long __hwbug_address = \ + NATIVE_HWBUG_AFTER_LD_ACQ_ADDRESS + \ + (__hwbug_cpu & 0x3) * 4096; \ + unsigned long __hwbug_atomic_flags; \ + __hwbug_atomic_flags = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + NATIVE_SET_UPSR_IRQ_BARRIER( \ + __hwbug_atomic_flags & ~(_UPSR_IE | _UPSR_NMIE)); \ + NATIVE_CLEAN_LD_ACQ_ADDRESS(__reg1, __reg2, __hwbug_address); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 0 * 4096 + 0 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 0 * 4096 + 4 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 8 * 4096 + 1 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 8 * 4096 + 5 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 16 * 4096 + 2 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 16 * 4096 + 6 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 24 * 4096 + 3 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + NATIVE_WRITE_MAS_D(__hwbug_address + 24 * 4096 + 7 * 64, 0UL, \ + MAS_DCACHE_LINE_FLUSH); \ + __E2K_WAIT(_fl_c); \ + NATIVE_SET_UPSR_IRQ_BARRIER(__hwbug_atomic_flags); \ + } \ +} while (0) +#else +# define NATIVE_HWBUG_AFTER_LD_ACQ() do { } while (0) +#endif + +/* FIXME: here will be paravirtualized only hardware bugs workaround macroses */ +/* but in guest general case these bugs can be workarounded only on host and */ +/* guest should call appropriate hypercalls to make all atomic */ +/* sequence on host, because of they contain privileged actions */ + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not virtualized based on pv_ops */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel with or without virtualization support */ + +/* examine bare hardware bugs */ +#define virt_cpu_has(hwbug) cpu_has(hwbug) + +#define VIRT_HWBUG_AFTER_LD_ACQ() NATIVE_HWBUG_AFTER_LD_ACQ() +#endif /* CONFIG_PARAVIRT_GUEST */ + +#define VIRT_HWBUG_AFTER_LD_ACQ_STRONG_MB VIRT_HWBUG_AFTER_LD_ACQ +#define VIRT_HWBUG_AFTER_LD_ACQ_LOCK_MB VIRT_HWBUG_AFTER_LD_ACQ +#define VIRT_HWBUG_AFTER_LD_ACQ_ACQUIRE_MB VIRT_HWBUG_AFTER_LD_ACQ +#define VIRT_HWBUG_AFTER_LD_ACQ_RELEASE_MB() +#define VIRT_HWBUG_AFTER_LD_ACQ_RELAXED_MB() + + +#define virt_api_atomic32_add_if_not_negative(val, addr, mem_model) \ +({ \ + register int rval; \ + NATIVE_ATOMIC32_ADD_IF_NOT_NEGATIVE(val, addr, rval, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define virt_api_atomic64_add_if_not_negative(val, addr, mem_model) \ +({ \ + register long long rval; \ + NATIVE_ATOMIC64_ADD_IF_NOT_NEGATIVE(val, addr, rval, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +/* Atomically add to 16 low bits and return the new 32 bits value */ +#define virt_api_atomic16_add_return32_lock(val, addr) \ +({ \ + register int rval, tmp; \ + NATIVE_ATOMIC16_ADD_RETURN32_LOCK(val, addr, rval, tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +/* Atomically add two 32 bits values packed into one 64 bits value */ +/* and return the new 64 bits value */ +#define virt_api_atomic32_pair_add_return64_lock(val_lo, val_hi, addr) \ +({ \ + register long rval, tmp1, tmp2, tmp3; \ + NATIVE_ATOMIC32_PAIR_ADD_RETURN64_LOCK(val_lo, val_hi, addr, rval, \ + tmp1, tmp2, tmp3); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +/* Atomically sub two 32 bits values packed into one 64 bits value */ +/* and return the new 64 bits value */ +#define virt_api_atomic32_pair_sub_return64_lock(val_lo, val_hi, addr) \ +({ \ + register long rval, tmp1, tmp2, tmp3; \ + NATIVE_ATOMIC32_PAIR_SUB_RETURN64_LOCK(val_lo, val_hi, addr, rval, \ + tmp1, tmp2, tmp3); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic_ticket_trylock(spinlock, tail_shift) \ +({ \ + register int __rval; \ + register int __val; \ + register int __head; \ + register int __tail; \ + NATIVE_ATOMIC_TICKET_TRYLOCK(spinlock, tail_shift, \ + __val, __head, __tail, __rval); \ + VIRT_HWBUG_AFTER_LD_ACQ_LOCK_MB(); \ + __rval; \ +}) + +/* + * Atomic support of new read/write spinlock mechanism. + * Locking is ordered and later readers cannot outrun former writers. + * Locking order based on coupons (tickets) received while first try to get + * lock, if lock is already taken by other. + * + * read/write spinlocks initial state allowing 2^32 active readers and + * only one active writer. But coupon discipline allows simultaniously + * have only 2^16 registered users of the lock: active + waiters + */ + +/* + * It is test: is read/write lock can be now taken by reader + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - locking can be successful' + * + * C equivalent: + * +static rwlock_val_t +atomic_can_lock_reader(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = rw->lock; + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + return src_lock.lock; +} + */ +#define virt_api_atomic_can_lock_reader(__rw_addr, __success) \ +({ \ + register unsigned int __head; \ + register unsigned int __ticket; \ + register int __count; \ + register unsigned long __src; \ + \ + NATIVE_ATOMIC_CAN_LOCK_READER(__rw_addr, __success, \ + __head, __ticket, __count, __src); \ + __src; \ +}) + +/* + * It is test: is read/write lock can be now taken by writer + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - locking can be successful' + * + * C equivalent: + * +static rwlock_val_t +atomic_can_lock_writer(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = rw->lock; + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + return src_lock.lock; +} + */ +#define virt_api_atomic_can_lock_writer(__rw_addr, __success) \ +({ \ + register unsigned int __head; \ + register unsigned int __ticket; \ + register int __count; \ + register unsigned long __src; \ + \ + NATIVE_ATOMIC_CAN_LOCK_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __src); \ + __src; \ +}) + +/* + * The first try to take read spinlock. + * Successful locking increment # of ticket and head, decrement active + * readers counter (negative counter) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise reader receives coupon and + * should be queued as waiter similar mutex implementation + * + * C equivalent: + * +static rwlock_val_t +atomic_add_new_reader(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + dst_lock.ticket = ticket + 1; + if (success) { + // take lock: increment readers (negative value), + // increment head to enable follow readers + count = count - 1; + head = head + 1; + } + dst_lock.count = count; + dst_lock.head = head; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define virt_api_atomic_add_new_reader(__rw_addr, __success) \ +({ \ + register unsigned int __head; \ + register unsigned int __ticket; \ + register int __count; \ + register unsigned long __tmp; \ + register unsigned long __src; \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_ADD_NEW_READER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __src; \ +}) + +/* + * Only try to take read spinlock. + * Successful locking increment # of ticket and head, decrement active + * readers counter (negative counter) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise 'success' is false and + * nothing are not changed + * + * C equivalent: + * +static rwlock_val_t +atomic_try_add_new_reader(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + if (success) { + // take lock: increment readers (negative value), + // increment head to enable follow readers + // increment ticket number for next users + dst_lock.ticket = ticket + 1; + dst_lock.count = count - 1; + dst_lock.head = head + 1; + } else { + dst_lock.lock = src_lock.lock; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define virt_api_atomic_try_add_new_reader(__rw_addr, __success) \ +({ \ + register unsigned int __head; \ + register unsigned int __ticket; \ + register int __count; \ + register unsigned long __tmp; \ + register unsigned long __src; \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_TRY_ADD_NEW_READER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __src; \ +}) + +/* + * The slow try to take read spinlock according to erlier received # of coupon + * Successful locking increment # of head, decrement active readers counter + * (negative counter) + * Macros return current updated state of read/write lock and set bypassed + * boolean value 'success - lockin is successful', otherwise reader should be + * queued again + * + * C equivalent: + * +static rwlock_val_t +atomic_add_slow_reader(arch_rwlock_t *rw, u16 ticket, bool success) +{ + arch_rwlock_t dst_lock; + u16 head; + s32 count; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + if (success) { + // take lock: increment readers (negative value), + // increment head to enable follow readers + count = count - 1; + head = head + 1; + dst_lock.count = count; + dst_lock.head = head; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define virt_api_atomic_add_slow_reader(__rw_addr, __ticket, __success) \ +({ \ + register unsigned int __head; \ + register int __count; \ + register unsigned long __tmp; \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_ADD_SLOW_READER(__rw_addr, __success, \ + __head, __ticket, __count, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __dst; \ +}) + +/* + * Unlocking of read spinlock. + * Need only increment active readers counter (negative counter) + * Macros return current updated state of read/write lock. + * + * C equivalent: + * +static rwlock_val_t +atomic_free_lock_reader(arch_rwlock_t *rw) +{ + arch_rwlock_t dst_lock; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + dst_lock.count++; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define virt_api_atomic_free_lock_reader(__rw_addr) \ +({ \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_FREE_LOCK_READER(__rw_addr, __dst); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __dst; \ +}) + +/* + * The first try to take write spinlock. + * Successful locking increment # of ticket and active writers counter + * (positive value - can be only one active writer, so set counter to 1) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise writer receives coupon and + * should be queued as waiter similar mutex implementation + * + * C equivalent: + * +static rwlock_val_t +atomic_add_new_writer(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + dst_lock.head = head; + dst_lock.ticket = ticket + 1; + if (success) { + // take lock: increment writerss, + count = count + 1; + } + dst_lock.count = count; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define virt_api_atomic_add_new_writer(__rw_addr, __success) \ +({ \ + register unsigned int __head; \ + register unsigned int __ticket; \ + register int __count; \ + register unsigned long __tmp; \ + register unsigned long __src; \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_ADD_NEW_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __src; \ +}) + +/* + * Only try to take write spinlock. + * Successful locking increment # of ticket and active writers counter + * (positive value - can be only one active writer, so set counter to 1) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise 'success' is set to false and + * nothing are not changed + * + * C equivalent: + * +static rwlock_val_t +atomic_try_add_new_writer(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + if (success) { + // take lock: increment writers counter, + // increment ticket number for next readers/writers + dst_lock.head = head; + dst_lock.ticket = ticket + 1; + dst_lock.count = count + 1; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define virt_api_atomic_try_add_new_writer(__rw_addr, __success) \ +({ \ + register unsigned int __head; \ + register unsigned int __ticket; \ + register int __count; \ + register unsigned long __tmp; \ + register unsigned long __src; \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_TRY_ADD_NEW_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __src; \ +}) + +/* + * The slow try to take write spinlock according to erlier received # of coupon + * Successful locking increment active writers counter + * (positive counter - can be only one active writer, so set counter to 1) + * Macros return current updated state of read/write lock and set bypassed + * boolean value 'success - lockin is successful', otherwise writer should be + * queued again + * + * C equivalent: + * +static rwlock_val_t +atomic_add_slow_writer(arch_rwlock_t *rw, u16 ticket, bool success) +{ + arch_rwlock_t dst_lock; + u16 head; + s32 count; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + if (success) { + // take lock: increment writers, + count = count + 1; + dst_lock.count = count; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define virt_api_atomic_add_slow_writer(__rw_addr, __ticket, __success) \ +({ \ + register unsigned int __head; \ + register int __count; \ + register unsigned long __tmp; \ + register unsigned long __dst; \ + \ + NATIVE_ATOMIC_ADD_SLOW_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __dst; \ +}) + +/* + * Unlocking of write spinlock. + * Need only increment # of queue head and decrement active writers counter + * (positive counter - can be only one writer, so set counter to 0) + * Macros return current updated state of read/write lock. + * + * C equivalent: + * +static rwlock_val_t +atomic_free_lock_writer(arch_rwlock_t *rw) +{ + arch_rwlock_t dst_lock; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + dst_lock.count++; + dst_lock.head++; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define virt_api_atomic_free_lock_writer(__rw_addr) \ +({ \ + register unsigned long __dst; \ + register unsigned int __head; \ + register int __count; \ + register unsigned long __tmp; \ + \ + NATIVE_ATOMIC_FREE_LOCK_WRITER(__rw_addr, \ + __head, __count, __dst, __tmp); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + __dst; \ +}) + +#define virt_api_atomic_op(val, addr, size_letter, op, mem_model) \ +({ \ + typeof(val) rval; \ + NATIVE_ATOMIC_OP(val, addr, rval, size_letter, op, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define virt_api_atomic_fetch_op(val, addr, size_letter, op, mem_model) \ +({ \ + typeof(val) rval, stored_val; \ + NATIVE_ATOMIC_FETCH_OP(val, addr, rval, stored_val, \ + size_letter, op, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + + +/* + * Atomic operations with return value and acquire/release semantics + */ + +#define virt_api_atomic32_fetch_inc_unless_negative(addr) \ +({ \ + register int rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(1, addr, 0, tmp, rval, \ + w, "adds", "~ ", "adds", "", "cmplsb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic64_fetch_inc_unless_negative(addr) \ +({ \ + register long long rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(1ull, addr, 0ull, tmp, rval, \ + d, "addd", "~ ", "addd", "", "cmpldb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic32_fetch_dec_unless_positive(addr) \ +({ \ + register int rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(1, addr, 0, tmp, rval, \ + w, "subs", "", "adds", "~ ", "cmplesb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic64_fetch_dec_unless_positive(addr) \ +({ \ + register long long rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(1ull, addr, 0ull, tmp, rval, \ + d, "subd", "", "addd", "~ ", "cmpledb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic32_fetch_dec_if_positive(addr) \ +({ \ + register int rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(1, addr, 0, tmp, rval, \ + w, "subs", "~ ", "adds", "", "cmplesb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic64_fetch_dec_if_positive(addr) \ +({ \ + register long long rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(1ull, addr, 0ull, tmp, rval, \ + d, "subd", "~ ", "addd", "", "cmpledb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic32_fetch_add_unless(val, addr, unless) \ +({ \ + register int rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(val, addr, unless, tmp, rval, \ + w, "adds", "~ ", "adds", "", "cmpesb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define virt_api_atomic64_fetch_add_unless(val, addr, unless) \ +({ \ + register long long rval, tmp; \ + NATIVE_ATOMIC_FETCH_OP_UNLESS(val, addr, unless, tmp, rval, \ + d, "addd", "~ ", "addd", "", "cmpedb", STRONG_MB); \ + VIRT_HWBUG_AFTER_LD_ACQ(); \ + rval; \ +}) + +#define __api_atomic64_fetch_xchg_if_below(val, addr, mem_model) \ +({ \ + register long long rval, tmp; \ + NATIVE_ATOMIC_FETCH_XCHG_UNLESS(val, addr, tmp, rval, d, \ + "merged", "cmpbdb", mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define virt_api_xchg_return(val, addr, size_letter, mem_model) \ +({ \ + register long rval; \ + NATIVE_ATOMIC_XCHG_RETURN(val, addr, rval, size_letter, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define virt_api_cmpxchg_return(old, new, addr, size_letter, \ + sxt_size, mem_model) \ +({ \ + register long rval; \ + register long stored_val; \ + NATIVE_ATOMIC_CMPXCHG_RETURN(old, new, addr, stored_val, rval, \ + size_letter, sxt_size, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define virt_api_cmpxchg_word_return(old, new, addr, mem_model) \ +({ \ + int rval, stored_val; \ + NATIVE_ATOMIC_CMPXCHG_WORD_RETURN(old, new, addr, \ + stored_val, rval, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define virt_api_cmpxchg_dword_return(old, new, addr, mem_model) \ +({ \ + long long rval, stored_val; \ + NATIVE_ATOMIC_CMPXCHG_DWORD_RETURN(old, new, addr, stored_val, \ + rval, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +/* + * implementation of cmpxchg_double for 64-bit pairs + * and activates the logic required for the SLUB + * + * C equivalent: + * +static int +atomic_cmpxchg_double(struct page page, void *freelist_old, + unsigned long counters_old, + void *freelist_new, unsigned long counters_new) +{ + unsigned long flags; + + local_irq_save(flags); + slab_lock(page); + if (page->freelist == freelist_old && + page->counters == counters_old) { + page->freelist = freelist_new; + set_page_slub_counters(page, counters_new); + slab_unlock(page); + local_irq_restore(flags); + return true; + } + slab_unlock(page); + local_irq_restore(flags); + return false; +} + */ + +#define virt_api_cmpxchg_double(addr1, addr2, old1, old2, new1, new2, \ + mem_model) \ +({ \ + register long rval; \ + NATIVE_ATOMIC_CMPXCHG_DWORD_PAIRS(addr1, old1, old2, new1, new2,\ + rval, mem_model); \ + VIRT_HWBUG_AFTER_LD_ACQ_##mem_model(); \ + rval; \ +}) + +#define __api_cmpxchg_double(addr1, addr2, old1, old2, new1, new2) \ + virt_api_cmpxchg_double(addr1, addr2, old1, old2, \ + new1, new2, STRONG_MB) + +#define __api_futex_atomic32_op(insn, oparg, uaddr) \ + virt_api_atomic_fetch_op(oparg, uaddr, w, insn, STRONG_MB) + +#define __api_atomic32_add_if_not_negative \ + virt_api_atomic32_add_if_not_negative + +#define __api_atomic64_add_if_not_negative \ + virt_api_atomic64_add_if_not_negative + +/* Atomically add and return the old value */ +#define __api_atomic32_add_oldval(val, addr) \ + virt_api_atomic_fetch_op(val, addr, w, "adds", STRONG_MB) + +#define __api_atomic32_add_oldval_lock(val, addr) \ + virt_api_atomic_fetch_op(val, addr, w, "adds", LOCK_MB) + +/* Atomically add to 16 low bits and return the new 32 bits value */ +#define __api_atomic16_add_return32_lock(val, addr) \ + virt_api_atomic16_add_return32_lock(val, addr) + +/* Atomically add two 32 bits values packed into one 64 bits value */ +/* and return the new 64 bits value */ +#define __api_atomic32_pair_add_return64_lock(val_lo, val_hi, addr) \ + virt_api_atomic32_pair_add_return64_lock(val_lo, val_hi, addr) + +/* Atomically sub two 32 bits values packed into one 64 bits value */ +/* and return the new 64 bits value */ +#define __api_atomic32_pair_sub_return64_lock(val_lo, val_hi, addr) \ + virt_api_atomic32_pair_sub_return64_lock(val_lo, val_hi, addr) + +#define __api_atomic_ticket_trylock(spinlock, tail_shift) \ + virt_api_atomic_ticket_trylock(spinlock, tail_shift) +#define __api_atomic_can_lock_reader(__rw_addr, __success) \ + virt_api_atomic_can_lock_reader(__rw_addr, __success) +#define __api_atomic_can_lock_writer(__rw_addr, __success) \ + virt_api_atomic_can_lock_writer(__rw_addr, __success) +#define __api_atomic_add_new_reader(__rw_addr, __success) \ + virt_api_atomic_add_new_reader(__rw_addr, __success) +#define __api_atomic_try_add_new_reader(__rw_addr, __success) \ + virt_api_atomic_try_add_new_reader(__rw_addr, __success) +#define __api_atomic_add_slow_reader(__rw_addr, __ticket, __success) \ + virt_api_atomic_add_slow_reader(__rw_addr, __ticket, __success) +#define __api_atomic_free_lock_reader(__rw_addr) \ + virt_api_atomic_free_lock_reader(__rw_addr) +#define __api_atomic_add_new_writer(__rw_addr, __success) \ + virt_api_atomic_add_new_writer(__rw_addr, __success) +#define __api_atomic_try_add_new_writer(__rw_addr, __success) \ + virt_api_atomic_try_add_new_writer(__rw_addr, __success) +#define __api_atomic_add_slow_writer(__rw_addr, __ticket, __success) \ + virt_api_atomic_add_slow_writer(__rw_addr, __ticket, \ + __success) +#define __api_atomic_free_lock_writer(__rw_addr) \ + virt_api_atomic_free_lock_writer(__rw_addr) + +#define __api_atomic_op virt_api_atomic_op +#define __api_atomic_fetch_op virt_api_atomic_fetch_op + +/* + * Atomic operations with return value and acquire/release semantics + */ +#define __api_atomic32_fetch_add_unless(val, addr, unless) \ + virt_api_atomic32_fetch_add_unless(val, addr, unless) +#define __api_atomic64_fetch_add_unless(val, addr, unless) \ + virt_api_atomic64_fetch_add_unless(val, addr, unless) + +#define __api_atomic32_fetch_dec_if_positive virt_api_atomic32_fetch_dec_if_positive +#define __api_atomic64_fetch_dec_if_positive virt_api_atomic64_fetch_dec_if_positive +#define __api_atomic32_fetch_dec_unless_positive virt_api_atomic32_fetch_dec_unless_positive +#define __api_atomic64_fetch_dec_unless_positive virt_api_atomic64_fetch_dec_unless_positive +#define __api_atomic32_fetch_inc_unless_negative virt_api_atomic32_fetch_inc_unless_negative +#define __api_atomic64_fetch_inc_unless_negative virt_api_atomic64_fetch_inc_unless_negative + +#define __api_xchg_return virt_api_xchg_return + +#define __api_cmpxchg_return virt_api_cmpxchg_return + +#define __api_cmpxchg_word_return virt_api_cmpxchg_word_return + +#define __api_cmpxchg_dword_return virt_api_cmpxchg_dword_return + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_E2K_ATOMIC_API_H_ */ diff --git a/arch/e2k/include/asm/auxvec.h b/arch/e2k/include/asm/auxvec.h new file mode 100644 index 0000000..21189a6 --- /dev/null +++ b/arch/e2k/include/asm/auxvec.h @@ -0,0 +1,12 @@ +#ifndef _E2K_AUXVEC_H +#define _E2K_AUXVEC_H + +#define AT_FAST_SYSCALLS 32 +/* Skip 33 as it is assumed to be AT_SYSINFO_EHDR in Linux */ +#define AT_SYSTEM_INFO 34 + +#ifdef __KERNEL__ +# define AT_VECTOR_SIZE_ARCH 2 +#endif + +#endif /* _E2K_AUXVEC_H */ diff --git a/arch/e2k/include/asm/barrier.h b/arch/e2k/include/asm/barrier.h new file mode 100644 index 0000000..0c3617f --- /dev/null +++ b/arch/e2k/include/asm/barrier.h @@ -0,0 +1,165 @@ +#ifndef _ASM_E2K_BARRIER_H +#define _ASM_E2K_BARRIER_H + +#include + +#include +#include + +#if CONFIG_CPU_ISET >= 6 +/* Cannot use this on V5 because of load-after-store dependencies - + * compiled kernel won't honour them */ +# define mb() E2K_WAIT_V6(_st_c | _ld_c | _sas | _sal | _las | _lal) +#else +# define mb() E2K_WAIT(_st_c | _ld_c) +#endif +#define wmb() E2K_WAIT_ST_C_SAS() +#define rmb() E2K_WAIT_LD_C_LAL() + +/* + * For smp_* variants add _mt modifier + */ +#if CONFIG_CPU_ISET >= 6 +/* Cannot use this on V5 because of load-after-store dependencies - + * compiled kernel won't honour them */ +# define __smp_mb() E2K_WAIT_V6(_st_c | _ld_c | _sas | _sal | _las | _lal | _mt) +#else +# define __smp_mb() E2K_WAIT(_st_c | _ld_c) +#endif +#define __smp_wmb() E2K_WAIT_ST_C_SAS_MT() +#define __smp_rmb() E2K_WAIT_LD_C_LAL_MT() + +#define dma_rmb() __smp_rmb() +#define dma_wmb() __smp_wmb() + +#define __smp_read_barrier_depends() NATIVE_HWBUG_AFTER_LD_ACQ() + + +#if CONFIG_CPU_ISET >= 5 +# define __smp_mb__after_atomic() barrier() +# define __smp_mb__before_atomic() E2K_WAIT_ST_C_SAS_LD_C_SAL_MT() +#elif CONFIG_CPU_ISET >= 3 +/* Atomic operations are serializing since e2s */ +# define __smp_mb__after_atomic() \ +do { \ + barrier(); \ + NATIVE_HWBUG_AFTER_LD_ACQ(); \ +} while (0) +# define __smp_mb__before_atomic() barrier() +#else +# define __smp_mb__after_atomic() E2K_WAIT(_st_c) +# define __smp_mb__before_atomic() barrier() +#endif + +extern int __smp_store_release_bad(void) __attribute__((noreturn)); +#if CONFIG_CPU_ISET >= 6 +# define __smp_store_release(p, v) \ +do { \ + __typeof__(*(p)) __ssr_v = (v); \ + switch (sizeof(*p)) { \ + case 1: STORE_NV_MAS((p), __ssr_v, MAS_STORE_RELEASE_V6(MAS_MT_1), b, "memory"); break; \ + case 2: STORE_NV_MAS((p), __ssr_v, MAS_STORE_RELEASE_V6(MAS_MT_1), h, "memory"); break; \ + case 4: STORE_NV_MAS((p), __ssr_v, MAS_STORE_RELEASE_V6(MAS_MT_1), w, "memory"); break; \ + case 8: STORE_NV_MAS((p), __ssr_v, MAS_STORE_RELEASE_V6(MAS_MT_1), d, "memory"); break; \ + default: __smp_store_release_bad(); break; \ + } \ +} while (0) +#else +# define __smp_store_release(p, v) \ +do { \ + compiletime_assert(sizeof(*p) == 1 || sizeof(*p) == 2 || \ + sizeof(*p) == 4 || sizeof(*p) == 8, \ + "Need native word sized stores/loads for atomicity."); \ + E2K_WAIT_ST_C_SAS_LD_C_SAL_MT(); \ + WRITE_ONCE(*(p), (v)); \ +} while (0) +#endif /* CONFIG_CPU_ISET >= 6 */ + +/* + * store_release() - same as __smp_store_release but acts on device accesses too + */ +#define store_release_v2 __smp_store_release +#define store_release_v6(p, v) \ +do { \ + __typeof__(*(p)) __sr6_v = (v); \ + switch (sizeof(*p)) { \ + case 1: STORE_NV_MAS((p), __sr6_v, MAS_STORE_RELEASE_V6(MAS_MT_0), b, "memory"); break; \ + case 2: STORE_NV_MAS((p), __sr6_v, MAS_STORE_RELEASE_V6(MAS_MT_0), h, "memory"); break; \ + case 4: STORE_NV_MAS((p), __sr6_v, MAS_STORE_RELEASE_V6(MAS_MT_0), w, "memory"); break; \ + case 8: STORE_NV_MAS((p), __sr6_v, MAS_STORE_RELEASE_V6(MAS_MT_0), d, "memory"); break; \ + default: __smp_store_release_bad(); break; \ + } \ +} while (0) +#define store_release(p, v) \ +do { \ + if (cpu_has(CPU_FEAT_ISET_V6)) \ + store_release_v6((p), (v)); \ + else \ + store_release_v2((p), (v)); \ +} while (0) + +#if CONFIG_CPU_ISET >= 6 +extern int __smp_load_acquire_bad(void) __attribute__((noreturn)); +# define __smp_load_acquire(p) \ +({ \ + union { typeof(*(p)) __ret_la; char __c[1]; } __u; \ + switch (sizeof(*p)) { \ + case 1: LOAD_NV_MAS((p), (*(__u8 *)__u.__c), MAS_LOAD_ACQUIRE_V6(MAS_MT_1), b, "memory"); \ + break; \ + case 2: LOAD_NV_MAS((p), (*(__u16 *)__u.__c), MAS_LOAD_ACQUIRE_V6(MAS_MT_1), h, "memory"); \ + break; \ + case 4: LOAD_NV_MAS((p), (*(__u32 *)__u.__c), MAS_LOAD_ACQUIRE_V6(MAS_MT_1), w, "memory"); \ + break; \ + case 8: LOAD_NV_MAS((p), (*(__u64 *)__u.__c), MAS_LOAD_ACQUIRE_V6(MAS_MT_1), d, "memory"); \ + break; \ + default: __smp_load_acquire_bad(); break; \ + } \ + __u.__ret_la; \ +}) +#else +# define __smp_load_acquire(p) \ +({ \ + typeof(*(p)) ___p1 = READ_ONCE(*(p)); \ + compiletime_assert(sizeof(*p) == 1 || sizeof(*p) == 2 || \ + sizeof(*p) == 4 || sizeof(*p) == 8, \ + "Need native word sized stores/loads for atomicity."); \ + E2K_RF_WAIT_LOAD(___p1); \ + ___p1; \ +}) +#endif + +/* + * e2k is in-order architecture, thus loads are not speculated by hardware + * and we only have to protect against compiler optimizations + */ +#define smp_acquire__after_ctrl_dep() barrier() + +/** + * array_index_mask_nospec - hide 'index' from compiler so that + * it does not try to load array speculatively across this point + * + * On e2k there is no hardware speculation, only software, so the + * trick with mask is not needed. + */ +#define array_index_mask_nospec array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + OPTIMIZER_HIDE_VAR(index); + + return -1UL; +} + +/* + * Follow the example of RISC-V and forbid IO crossing of scheduling + * boundary by using mb() instead of smp_mb(). This should not have + * any measurable performance impact on e2k. The bad case is when + * task is preempted after writeX() and migrated to another CPU fast + * enough so that the CPU it was preempted on has not called any + * spin_unlock()'s yet. + */ +#define smp_mb__after_spinlock() mb() + +#include + +#endif /* _ASM_E2K_BARRIER_H */ diff --git a/arch/e2k/include/asm/bios_map.h b/arch/e2k/include/asm/bios_map.h new file mode 100644 index 0000000..d5ffde9 --- /dev/null +++ b/arch/e2k/include/asm/bios_map.h @@ -0,0 +1,116 @@ +/* + * $Id: bios_map.h,v 1.1 2009/01/15 13:47:21 kostin Exp $ + * Bios cmos map distribution. + */ +#ifndef _E2K_BIOS_MAP_H_ +#define _E2K_BIOS_MAP_H_ + +#ifdef __KERNEL__ +#define ECMOS_PORT(ext) (0x70 + (ext)) +/* + * The yet supported machines all access the RTC index register via + * an ISA port access but the way to access the date register differs ... + */ +#define ECMOS_READ(addr, ext) ({ \ +outb_p((addr),ECMOS_PORT(ext + 0)); \ +inb_p(ECMOS_PORT(ext + 1)); \ +}) +#define ECMOS_WRITE(val, addr, ext) ({ \ +outb_p((addr),ECMOS_PORT(ext + 0)); \ +outb_p((val),ECMOS_PORT(ext + 1)); \ +}) + +static inline unsigned char bios_read(int addr) +{ + char byte; + if (addr & 0x80) byte = ECMOS_READ(addr - 0x80, 2); + else byte = ECMOS_READ(addr, 0); + return byte; +} + +static inline void bios_write(unsigned char val, int addr) +{ + if (addr & 0x80) ECMOS_WRITE(val, addr - 0x80, 2); + else ECMOS_WRITE(val, addr, 0); +} +#endif /* __KERNEL__ */ + +//#define bios_read(addr) ECMOS_READ(addr) +//#define bios_write(val, addr) ECMOS_WRITE(val, addr) + +#define BIOS_UNSET_ONE -1 + +#define name_length 15 +#define cmdline_length 127 + +#define CMOS_BASE 128 + 64 +#define CMOS_SIZE 64 +#define CMOS_FILE_LENGTH 15 + +#define BIOS_PROC_MASK CMOS_BASE + 0 +#define BIOS_DEV_NUM CMOS_BASE + 3 /* device number(0 - 3) */ +#define BIOS_AUTOBOOT_TIMER CMOS_BASE + 4 /* boot waiting seconds */ +#define BIOS_BOOT_ITEM CMOS_BASE + 5 /* boot item: kernel, lintel, + tests - 'Ñ','Ë','Ô' */ +#define BIOS_BOOT_KNAME CMOS_BASE + 6 /* kernel name */ + +#define BIOS_TEST_FLAG 0x6c +#define BIOS_TEST_FLAG2 0x6d +#define BIOS_SERIAL_RATE 0x6e /* 3 - 38400 other - 115200 */ + +#define BIOS_MACHINE_TYPE CMOS_BASE + 28 /* architecture type */ + +#define BIOS_PASSWD_FLAG CMOS_BASE + 29 +#define BIOS_PASSWD_FLAG2 CMOS_BASE + 30 +#define BIOS_PASSWD1 CMOS_BASE + 31 +#define BIOS_PASSWD2 CMOS_BASE + 32 +#define BIOS_PASSWD3 CMOS_BASE + 33 +#define BIOS_PASSWD4 CMOS_BASE + 34 +#define BIOS_PASSWD5 CMOS_BASE + 35 +#define BIOS_PASSWD6 CMOS_BASE + 36 +#define BIOS_PASSWD7 CMOS_BASE + 37 +#define BIOS_PASSWD8 CMOS_BASE + 38 +#define BIOS_PASSWD9 CMOS_BASE + 39 +#define BIOS_PASSWD10 CMOS_BASE + 40 + +#define BIOS_CSUM CMOS_BASE + 61 /* checksum lsb */ +#define BIOS_CSUM2 CMOS_BASE + 62 /* checksum msb */ + +typedef struct e2k_bios_param { + char kernel_name[name_length + 1]; + char command_line[cmdline_length + 1]; + int booting_item; + int dev_num; + int serial_rate; + int autoboot_timer; + int machine_type; +} e2k_bios_param_t; + +#ifdef __KERNEL__ +static unsigned int _bios_csum(unsigned int counter, unsigned int len) +{ + unsigned int csum = 0; + + len = len + counter; + + + while(counter < len) { + csum += bios_read(counter); + counter++; + } + + return csum; +} + +static inline unsigned int _bios_checksum(void) +{ + unsigned int csum = 0; + + csum = _bios_csum( 106, 6); + csum += _bios_csum( 192, 21); + csum += _bios_csum( 220, 12 ); + + return csum; +} +#endif /* __KERNEL__ */ +#endif /*_E2K_BIOS_MAP_H_ */ diff --git a/arch/e2k/include/asm/bitops.h b/arch/e2k/include/asm/bitops.h new file mode 100644 index 0000000..325b87d --- /dev/null +++ b/arch/e2k/include/asm/bitops.h @@ -0,0 +1,63 @@ +#ifndef _E2K_BITOPS_H_ +#define _E2K_BITOPS_H_ + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include +#include + +/* This is better than generic definition */ +static inline int fls(unsigned int x) +{ + return 8 * sizeof(int) - E2K_LZCNTS(x); +} + +static inline unsigned int __arch_hweight32(unsigned int w) +{ + return E2K_POPCNTS(w); +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return E2K_POPCNTS(w & 0xffff); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return E2K_POPCNTS(w & 0xff); +} + +static inline unsigned long __arch_hweight64(unsigned long w) +{ + return E2K_POPCNTD(w); +} + +#include +#include +#include +#include +#include + +#include + +#if defined E2K_P2V && !defined CONFIG_BOOT_E2K +extern unsigned long boot_find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); +extern unsigned long boot_find_next_zero_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); +# define find_next_bit boot_find_next_bit +# define find_next_zero_bit boot_find_next_zero_bit +#endif +#include + +#include +#include +#include +#include +#include +#include + +#endif /* _E2K_BITOPS_H_ */ diff --git a/arch/e2k/include/asm/bitrev.h b/arch/e2k/include/asm/bitrev.h new file mode 100644 index 0000000..4db2bc7 --- /dev/null +++ b/arch/e2k/include/asm/bitrev.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_BITREV_H +#define __ASM_BITREV_H + +static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x) +{ + return __builtin_e2k_bitrevs(x); +} + +static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x) +{ + return __builtin_e2k_bitrevs((u32) x) >> 16; +} + +static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x) +{ + return __builtin_e2k_bitrevs((u32) x) >> 24; +} + +#endif diff --git a/arch/e2k/include/asm/bitsperlong.h b/arch/e2k/include/asm/bitsperlong.h new file mode 100644 index 0000000..0697e90 --- /dev/null +++ b/arch/e2k/include/asm/bitsperlong.h @@ -0,0 +1,8 @@ +#ifndef __ASM_E2K_BITSPERLONG_H +#define __ASM_E2K_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* __ASM_E2K_BITSPERLONG_H */ diff --git a/arch/e2k/include/asm/boot_flags.h b/arch/e2k/include/asm/boot_flags.h new file mode 100644 index 0000000..2bb35c0 --- /dev/null +++ b/arch/e2k/include/asm/boot_flags.h @@ -0,0 +1,123 @@ +/* + * E2K boot info flags support. + */ +#ifndef _E2K_BOOT_FLAGS_H +#define _E2K_BOOT_FLAGS_H + +#include + +#include +#include +#include +#include + +/* + * bootblock manipulations (read/write/set/reset) in virtual kernel mode + * on physical level: + * write through and uncachable access on physical address + * bootblock virtual address can be only read + */ + +#define DO_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field, mas) \ +({ \ + u64 field_value; \ + switch (sizeof((bootblock_p)->blk_field)) { \ + case 1: \ + field_value = \ + NATIVE_READ_MAS_B(&((bootblock_p)->blk_field), \ + mas); \ + break; \ + case 2: \ + field_value = \ + NATIVE_READ_MAS_H(&((bootblock_p)->blk_field), \ + mas); \ + break; \ + case 4: \ + field_value = \ + NATIVE_READ_MAS_W(&((bootblock_p)->blk_field), \ + mas); \ + break; \ + case 8: \ + field_value = \ + NATIVE_READ_MAS_D(&((bootblock_p)->blk_field), \ + mas); \ + break; \ + default: \ + BUG(); \ + } \ + (field_value); \ +}) + +#define DO_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value, mas) \ +({ \ + switch (sizeof((bootblock_p)->blk_field)) { \ + case 1: \ + NATIVE_WRITE_MAS_B(&((bootblock_p)->blk_field), \ + (field_value), mas); \ + break; \ + case 2: \ + NATIVE_WRITE_MAS_H(&((bootblock_p)->blk_field), \ + (field_value), mas); \ + break; \ + case 4: \ + NATIVE_WRITE_MAS_W(&((bootblock_p)->blk_field), \ + (field_value), mas); \ + break; \ + case 8: \ + NATIVE_WRITE_MAS_D(&((bootblock_p)->blk_field), \ + (field_value), mas); \ + break; \ + default: \ + BUG(); \ + } \ +}) +#define NATIVE_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + DO_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field, MAS_IOADDR) +#define NATIVE_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + DO_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value, MAS_IOADDR) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* it is native kernel without virtualization support */ +/* or host kernel with virtualization support */ +#define READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + NATIVE_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) +#define WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + NATIVE_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value) +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +static inline u64 +read_bootblock_flags(bootblock_struct_t *bootblock) +{ + return READ_BOOTBLOCK_FIELD(bootblock, kernel_flags); +} + +static inline void +write_bootblock_flags(bootblock_struct_t *bootblock, u64 new_flags) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, boot_flags, new_flags); + WRITE_BOOTBLOCK_FIELD(bootblock, kernel_flags, new_flags); +} + +static inline void +set_bootblock_flags(bootblock_struct_t *bootblock, u64 new_flags) +{ + u64 cur_flags = read_bootblock_flags(bootblock); + write_bootblock_flags(bootblock, cur_flags | new_flags); +} + +static inline void +reset_bootblock_flags(bootblock_struct_t *bootblock, u64 new_flags) +{ + u64 cur_flags = read_bootblock_flags(bootblock); + write_bootblock_flags(bootblock, cur_flags & ~new_flags); +} + +#endif /* _E2K_BOOT_FLAGS_H */ diff --git a/arch/e2k/include/asm/boot_profiling.h b/arch/e2k/include/asm/boot_profiling.h new file mode 100644 index 0000000..64353a9 --- /dev/null +++ b/arch/e2k/include/asm/boot_profiling.h @@ -0,0 +1,34 @@ +#ifndef _ASM_E2K_BOOT_PROFILING_H +#define _ASM_E2K_BOOT_PROFILING_H + +#include +#include + +#ifdef CONFIG_BOOT_TRACE +extern void notrace boot_add_boot_trace_event(char *name); + +/* EARLY_BOOT_TRACEPOINT should be used if virtual memory + * is not working yet. It does not support formatted strings. */ +# define EARLY_BOOT_TRACEPOINT(name) \ + boot_add_boot_trace_event(name) + +#ifdef CONFIG_RECOVERY +/* Clears boot trace data (needed to trace recovery times). */ +void reinitialize_boot_trace_data(void); +#endif /* CONFIG_RECOVERY */ + +#define boot_trace_get_cycles get_cycles + +/* Convert boot counter cycles to ms */ +static inline u64 boot_cycles_to_ms(u64 cycles) +{ + u64 cpu_hz = cpu_data[0].proc_freq; + + return MSEC_PER_SEC * cycles / cpu_hz; +} +#else /* !CONFIG_BOOT_TRACE */ +# define EARLY_BOOT_TRACEPOINT(name) +#endif /* CONFIG_BOOT_TRACE */ + +#endif /* _ASM_E2K_BOOT_PROFILING_H */ + diff --git a/arch/e2k/include/asm/boot_recovery.h b/arch/e2k/include/asm/boot_recovery.h new file mode 100644 index 0000000..4a669ea --- /dev/null +++ b/arch/e2k/include/asm/boot_recovery.h @@ -0,0 +1,42 @@ +/* $Id: boot_recovery.h,v 1.12 2009/06/29 11:52:31 atic Exp $ + * + * boot-time recovery of kernel from control point. + */ + +#ifndef _E2K_BOOT_RECOVERY_H +#define _E2K_BOOT_RECOVERY_H + +#include +#include +#include + +/* To use stgd upon kernel entry task_struct must be aligned + * (since %gd_lo.base points to it) */ +struct aligned_task { + struct task_struct t; +} __aligned(E2K_ALIGN_GLOBALS_SZ); +extern struct aligned_task task_to_restart[]; +extern struct task_struct *task_to_recover; + +/* + * Forwards of boot-time functions to recover system state + */ + +extern void boot_recovery(bootblock_struct_t *bootblock); +extern void recover_kernel(void); +extern int restart_system(void (*restart_func)(void *), void *arg); + +#define full_phys_mem nodes_phys_mem + +#define START_KERNEL_SYSCALL 12 + +extern inline void +scall2(bootblock_struct_t *bootblock) +{ + (void) E2K_SYSCALL(START_KERNEL_SYSCALL, /* Trap number */ + 0, /* empty sysnum */ + 1, /* single argument */ + (long) bootblock); /* the argument */ +} + +#endif /* _E2K_BOOT_RECOVERY_H */ diff --git a/arch/e2k/include/asm/bootinfo.h b/arch/e2k/include/asm/bootinfo.h new file mode 100644 index 0000000..1c3a637 --- /dev/null +++ b/arch/e2k/include/asm/bootinfo.h @@ -0,0 +1,10 @@ +#ifndef _E2K_BOOTINFO_H_ +#define _E2K_BOOTINFO_H_ + +#ifdef __KERNEL__ +#include +#endif + +#include + +#endif /* _E2K_BOOTINFO_H_ */ diff --git a/arch/e2k/include/asm/bug.h b/arch/e2k/include/asm/bug.h new file mode 100644 index 0000000..178748b --- /dev/null +++ b/arch/e2k/include/asm/bug.h @@ -0,0 +1,20 @@ +#ifndef _E2K_BUG_H +#define _E2K_BUG_H + +#ifdef CONFIG_BUG +# include + +# define BUG() \ +do { \ + __EMIT_BUG(0); \ + unreachable(); \ +} while (0) + +# define __WARN_FLAGS(flags) __EMIT_BUG(BUGFLAG_WARNING|(flags)); + +# define HAVE_ARCH_BUG +#endif /* CONFIG_BUG */ + +#include + +#endif /* _E2K_BUG_H */ diff --git a/arch/e2k/include/asm/byteorder.h b/arch/e2k/include/asm/byteorder.h new file mode 100644 index 0000000..1bd1c69 --- /dev/null +++ b/arch/e2k/include/asm/byteorder.h @@ -0,0 +1,10 @@ +#ifndef _E2K_BYTEORDER_H_ +#define _E2K_BYTEORDER_H_ + +#include + +#define __BYTEORDER_HAS_U64__ + +#include + +#endif /* _E2K_BYTEORDER_H_ */ diff --git a/arch/e2k/include/asm/cache.h b/arch/e2k/include/asm/cache.h new file mode 100644 index 0000000..c2dcc8e --- /dev/null +++ b/arch/e2k/include/asm/cache.h @@ -0,0 +1,81 @@ +#ifndef _E2K_CACHE_H_ +#define _E2K_CACHE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define _max_(a, b) ((a) > (b) ? (a) : (b)) +#define _max3_(a, b, c) _max_((a), _max_((b), (c))) + +#ifdef CONFIG_E2K_MACHINE +# if defined(CONFIG_E2K_ES2_DSP) || defined(CONFIG_E2K_ES2_RU) +# define L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +# elif defined(CONFIG_E2K_E2S) +# define L1_CACHE_SHIFT E2S_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E2S_L2_CACHE_SHIFT +# elif defined(CONFIG_E2K_E8C) +# define L1_CACHE_SHIFT E8C_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E8C_L2_CACHE_SHIFT +# define L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT +# elif defined(CONFIG_E2K_E1CP) +# define L1_CACHE_SHIFT E1CP_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E1CP_L2_CACHE_SHIFT +# elif defined(CONFIG_E2K_E8C2) +# define L1_CACHE_SHIFT E8C2_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E8C2_L2_CACHE_SHIFT +# define L3_CACHE_SHIFT E8C2_L3_CACHE_SHIFT +# elif defined(CONFIG_E2K_E12C) +# define L1_CACHE_SHIFT E12C_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E12C_L2_CACHE_SHIFT +# define L3_CACHE_SHIFT E12C_L3_CACHE_SHIFT +# elif defined(CONFIG_E2K_E16C) +# define L1_CACHE_SHIFT E16C_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E16C_L2_CACHE_SHIFT +# define L3_CACHE_SHIFT E16C_L3_CACHE_SHIFT +# elif defined(CONFIG_E2K_E2C3) +# define L1_CACHE_SHIFT E2C3_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT E2C3_L2_CACHE_SHIFT +# else +# error "E2K MACHINE type does not defined" +# endif +# ifndef L3_CACHE_SHIFT +# define L3_CACHE_SHIFT 0 +# endif +#else /* ! CONFIG_E2K_MACHINE */ +/* + * FIXME: Take it in mind while adding new cpu type + */ +# define L1_CACHE_SHIFT_MAX ES2_L1_CACHE_SHIFT +# define L2_CACHE_SHIFT_MAX ES2_L2_CACHE_SHIFT +# define L3_CACHE_SHIFT_MAX E8C_L3_CACHE_SHIFT + +# define L1_CACHE_SHIFT L1_CACHE_SHIFT_MAX +# define L2_CACHE_SHIFT L2_CACHE_SHIFT_MAX +# define L3_CACHE_SHIFT L3_CACHE_SHIFT_MAX +#endif /* CONFIG_E2K_MACHINE */ + +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT) +#define L3_CACHE_BYTES (L3_CACHE_SHIFT ? (1 << L3_CACHE_SHIFT) : 0) + +/* Stores pass through L1$, so we should use the biggest size. */ +#define SMP_CACHE_BYTES _max3_(L1_CACHE_BYTES, L2_CACHE_BYTES, \ + L3_CACHE_BYTES) +#define INTERNODE_CACHE_SHIFT _max3_(L1_CACHE_SHIFT, L2_CACHE_SHIFT, \ + L3_CACHE_SHIFT) + +#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) + +#define cache_line_size() _max3_(L1_CACHE_BYTES, L2_CACHE_BYTES, \ + L3_CACHE_BYTES) + +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) + +#endif /* _E2K_CACHE_H_ */ diff --git a/arch/e2k/include/asm/cacheflush.h b/arch/e2k/include/asm/cacheflush.h new file mode 100644 index 0000000..6b8fddc --- /dev/null +++ b/arch/e2k/include/asm/cacheflush.h @@ -0,0 +1,230 @@ +/* + * pgalloc.h: the functions and defines necessary to allocate + * page tables. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ +#ifndef _E2K_CACHEFLUSH_H +#define _E2K_CACHEFLUSH_H + +#include +#include +#include + +#include +#include + +#undef DEBUG_MR_MODE +#undef DebugMR +#define DEBUG_MR_MODE 0 /* MMU registers access */ +#define DebugMR(...) DebugPrint(DEBUG_MR_MODE, ##__VA_ARGS__) + +/* + * Caches flushing routines. This is the kind of stuff that can be very + * expensive, so should try to avoid them whenever possible. + */ + +/* + * Caches aren't brain-dead on the E2K + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(mm, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_page_to_ram(page) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +/* + * Invalidate all ICAHES of the host processor + */ + +typedef struct icache_range_array { + icache_range_t *ranges; + int count; + struct mm_struct *mm; +} icache_range_array_t; + +extern void __flush_icache_all(void); +extern void native_flush_icache_range(e2k_addr_t start, e2k_addr_t end); +extern void __flush_icache_range_array( + icache_range_array_t *icache_range_arr); +extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page); + +#ifndef CONFIG_SMP +#define flush_icache_all() __flush_icache_all() +#define flush_icache_range(start, end) __flush_icache_range(start, end) +#define flush_icache_range_array __flush_icache_range_array +#define flush_icache_page(vma, page) __flush_icache_page(vma, page) + +#define native_smp_flush_icache_range(start, end) +#define native_smp_flush_icache_range_array(icache_range_arr) +#define native_smp_flush_icache_page(vma, page) +#define native_smp_flush_icache_all() +#define native_smp_flush_icache_kernel_line(addr) +#else /* CONFIG_SMP */ +extern void native_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end); +extern void native_smp_flush_icache_range_array( + icache_range_array_t *icache_range_arr); +extern void native_smp_flush_icache_page(struct vm_area_struct *vma, + struct page *page); +extern void native_smp_flush_icache_all(void); +extern void native_smp_flush_icache_kernel_line(e2k_addr_t addr); + +#define flush_icache_all() smp_flush_icache_all() + +#define flush_icache_range(start, end) \ +({ \ + if (cpu_has(CPU_FEAT_FLUSH_DC_IC)) \ + __flush_icache_range(start, end); \ + else \ + smp_flush_icache_range(start, end); \ +}) + +#define flush_icache_range_array smp_flush_icache_range_array + +#define flush_icache_page(vma, page) \ +({ \ + if (cpu_has(CPU_FEAT_FLUSH_DC_IC)) \ + __flush_icache_page(vma, page); \ + else \ + smp_flush_icache_page(vma, page); \ +}) + +#endif /* ! (CONFIG_SMP) */ + +/* + * Some usefull routines to flush caches + */ + +/* + * Write Back and Invalidate all caches (instruction and data). + * "local_" versions work on the calling CPU only. + */ +extern void local_write_back_cache_all(void); +extern void local_write_back_cache_range(unsigned long start, size_t size); +extern void write_back_cache_all(void); +extern void write_back_cache_range(unsigned long start, size_t size); + +/* + * Flush multiple DCACHE lines + */ +static inline void +native_flush_DCACHE_range(void *addr, size_t len) +{ + char *cp, *end; + unsigned long stride; + + DebugMR("Flush DCACHE range: virtual addr 0x%lx, len %lx\n", addr, len); + + /* Although L1 cache line is 32 bytes, coherency works + * with 64 bytes granularity. So a single flush_dc_line + * can flush _two_ lines from L1 */ + stride = SMP_CACHE_BYTES; + + end = PTR_ALIGN(addr + len, SMP_CACHE_BYTES); + + E2K_WAIT_ST; + for (cp = addr; cp < end; cp += stride) + flush_DCACHE_line((unsigned long) cp); + E2K_WAIT_FLUSH; +} + +/* + * Clear multiple DCACHE L1 lines + */ +static inline void +native_clear_DCACHE_L1_range(void *virt_addr, size_t len) +{ + unsigned long cp; + unsigned long end = (unsigned long) virt_addr + len; + unsigned long stride; + + stride = cacheinfo_get_l1d_line_size(); + + for (cp = (u64) virt_addr; cp < end; cp += stride) + clear_DCACHE_L1_line(cp); +} + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native guest kernel */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or native kernel with virtualization support */ +static inline void +smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + native_smp_flush_icache_range(start, end); +} +static inline void +smp_flush_icache_range_array(icache_range_array_t *icache_range_arr) +{ + native_smp_flush_icache_range_array(icache_range_arr); +} +static inline void +smp_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + native_smp_flush_icache_page(vma, page); +} +static inline void +smp_flush_icache_all(void) +{ + native_smp_flush_icache_all(); +} +static inline void +smp_flush_icache_kernel_line(e2k_addr_t addr) +{ + native_smp_flush_icache_kernel_line(addr); +} +static inline void +__flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + native_flush_icache_range(start, end); +} +static inline void +flush_DCACHE_range(void *addr, size_t len) +{ + native_flush_DCACHE_range(addr, len); +} +static inline void +clear_DCACHE_L1_range(void *virt_addr, size_t len) +{ + native_clear_DCACHE_L1_range(virt_addr, len); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +static inline void copy_to_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, void *dst, + const void *src, unsigned long len) +{ + if (IS_ALIGNED((unsigned long) dst, 8) && + IS_ALIGNED((unsigned long) src, 8) && IS_ALIGNED(len, 8)) { + tagged_memcpy_8(dst, src, len); + } else { + memcpy(dst, src, len); + } + flush_icache_range((unsigned long) dst, (unsigned long) dst + len); +} + +static inline void copy_from_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, void *dst, + const void *src, size_t len) +{ + if (IS_ALIGNED((unsigned long) dst, 8) && + IS_ALIGNED((unsigned long) src, 8) && IS_ALIGNED(len, 8)) { + tagged_memcpy_8(dst, src, len); + } else { + memcpy(dst, src, len); + } +} + +#endif /* _E2K_CACHEFLUSH_H */ diff --git a/arch/e2k/include/asm/checksum.h b/arch/e2k/include/asm/checksum.h new file mode 100644 index 0000000..9bddf84 --- /dev/null +++ b/arch/e2k/include/asm/checksum.h @@ -0,0 +1,141 @@ +#ifndef _E2K_CHECKSUM_H_ +#define _E2K_CHECKSUM_H_ + +#include +#include + +extern unsigned int __pure e2k_do_csum(const unsigned char *buff, int len); + +/* + * Fold a partial checksum + */ +#define csum_fold csum_fold +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + + return (__force __sum16) ((~sum - __builtin_e2k_scls(sum, 16)) >> 16); +} + +static inline u32 from64to32(u64 x) +{ + x += __builtin_e2k_scld(x, 32); + return (u32) (x >> 32); +} + +/* + * ihl is always 5 or greater, almost always is 5, + * and iph is word aligned the majority of the time. + */ +static inline __wsum ip_fast_csum_nofold_maybe_unaligned(const void *iph, unsigned int ihl) +{ + const u32 *iph32 = iph; + size_t i; + u64 sum; + + sum = (u64) iph32[0] + (u64) iph32[1] + (u64) iph32[2] + + (u64) iph32[3] + (u64) iph32[4]; + + if (unlikely(ihl > 5)) { + for (i = 5; i < ihl; i++) + sum += (u64) iph32[i]; + } + + return (__force __wsum) from64to32(sum); +} +#define ip_fast_csum ip_fast_csum +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + if (cpu_has(CPU_HWBUG_UNALIGNED_LOADS) && + !IS_ALIGNED((unsigned long) iph, 4)) + return (__force __sum16) ~e2k_do_csum(iph, ihl*4); + else + return csum_fold(ip_fast_csum_nofold_maybe_unaligned(iph, ihl)); + +} + +static inline u32 add32_with_carry(u32 a, u32 b) +{ + u64 arg1 = ((u64) a << 32ULL) | (u64) b; + u64 arg2 = ((u64) b << 32ULL) | (u64) a; + + return (arg1 + arg2) >> 32ULL; +} + +#define HAVE_ARCH_CSUM_ADD +static inline __wsum csum_add(__wsum csum, __wsum addend) +{ + return (__force __wsum) add32_with_carry((__force u32) csum, + (__force u32) addend); +} + + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum __csum_partial(const void *buff, int len, __wsum sum); + +static inline __wsum csum_partial(const void *buff, int len, __wsum sum) +{ + if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0 && + !cpu_has(CPU_HWBUG_UNALIGNED_LOADS)) { + u64 sum_64 = (__force u32) sum; + + if (len == 2) + sum_64 += *(const u16 *) buff; + if (len >= 4) + sum_64 += *(const u32 *) buff; + if (len == 6) + sum_64 += *(const u16 *) (buff + 4); + if (len >= 8) + sum_64 += *(const u32 *) (buff + 4); + if (len == 10) + sum_64 += *(const u16 *) (buff + 8); + if (len >= 12) + sum_64 += *(const u32 *) (buff + 8); + if (len == 14) + sum_64 += *(const u16 *) (buff + 12); + if (len >= 16) + sum_64 += *(const u32 *) (buff + 12); + + sum = from64to32(sum_64); + } else if (__builtin_constant_p(len) && (len & 3) == 0 && + !cpu_has(CPU_HWBUG_UNALIGNED_LOADS)) { + sum = csum_add(sum, ip_fast_csum_nofold_maybe_unaligned(buff, len >> 2)); + } else { + E2K_PREFETCH_L1((__force void *) buff); + sum = __csum_partial(buff, len, sum); + } + return sum; +} + +#define csum_tcpudp_nofold csum_tcpudp_nofold +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) +{ + u64 s = (__force u32) sum; + + s += (__force u32) saddr; + s += (__force u32) daddr; + s += (proto + len) << 8; + return (__force __wsum) from64to32(s); +} + +#define _HAVE_ARCH_IPV6_CSUM +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum csum); + + +#include + +#endif /* _E2K_CHECKSUM_H_ */ diff --git a/arch/e2k/include/asm/clkr.h b/arch/e2k/include/asm/clkr.h new file mode 100644 index 0000000..a1d6bff --- /dev/null +++ b/arch/e2k/include/asm/clkr.h @@ -0,0 +1,22 @@ +#ifndef _ASM_E2K_CLKR_H +#define _ASM_E2K_CLKR_H + +#include +#include + +extern __interrupt u64 fast_syscall_read_clkr(void); + +extern u64 last_clkr; +DECLARE_PER_CPU(u64, clkr_offset); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized guest and host kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* native kernel with or without virtualization support */ +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_E2K_CLKR_H */ diff --git a/arch/e2k/include/asm/clock_info.h b/arch/e2k/include/asm/clock_info.h new file mode 100644 index 0000000..1d91905 --- /dev/null +++ b/arch/e2k/include/asm/clock_info.h @@ -0,0 +1,109 @@ +/* + * Kernel performance measuring tool and support + */ +#ifndef _E2K_CLOCK_INFO_H +#define _E2K_CLOCK_INFO_H + +#include + +#ifndef __ASSEMBLY__ +#include +#endif /* __ASSEMBLY__ */ + +#ifndef __ASSEMBLY__ + +typedef u64 e2k_clock_t; + +typedef enum { + SYSTEM_CALL_TT = 1, /* system calls */ + TRAP_TT /* traps */ +} times_type_t; + +typedef struct { + int syscall_num; /* # of system call */ + int signals_num; /* number of handled signals */ + e2k_clock_t start; /* start clock of system call */ + e2k_clock_t end; /* end clock */ + e2k_clock_t pt_regs_set; /* pt_regs structure is set */ + e2k_clock_t save_stack_regs; + e2k_clock_t save_sys_regs; + e2k_clock_t save_stacks_state; + e2k_clock_t save_thread_state; + e2k_clock_t scall_switch; + e2k_clock_t scall_done; + e2k_clock_t restore_thread_state; + e2k_clock_t check_pt_regs; + e2k_clock_t do_signal_start; + e2k_clock_t do_signal_done; + e2k_clock_t restore_start; + e2k_clock_t restore_user_regs; + e2k_pshtp_t pshtp; + u64 psp_ind; + e2k_pshtp_t pshtp_to_done; + u64 psp_ind_to_done; +} scall_times_t; + +typedef struct { + e2k_clock_t start; /* start clock of system call */ + e2k_clock_t end; /* end clock */ + e2k_clock_t pt_regs_set; /* pt_regs structure is set */ + e2k_clock_t signal_done; + int nr_TIRs; + e2k_tir_t TIRs[TIR_NUM]; + e2k_psp_hi_t psp_hi; + e2k_pshtp_t pshtp; + u64 psp_ind; + e2k_pcsp_hi_t pcsp_hi; + u64 ctpr1; + u64 ctpr2; + u64 ctpr3; + u8 ps_bounds; + u8 pcs_bounds; + int trap_num; + e2k_psp_hi_t psp_hi_to_done; + e2k_pshtp_t pshtp_to_done; + e2k_pcsp_hi_t pcsp_hi_to_done; + u64 ctpr1_to_done; + u64 ctpr2_to_done; + u64 ctpr3_to_done; +} trap_times_t; + +typedef struct kernel_times { + times_type_t type; + union { + scall_times_t syscall; /* system calls */ + trap_times_t trap; /* traps */ + } of; +} kernel_times_t; + +#ifdef CONFIG_MAX_KERNEL_TIMES_NUM +#define MAX_KERNEL_TIMES_NUM CONFIG_MAX_KERNEL_TIMES_NUM +#else +#define MAX_KERNEL_TIMES_NUM 20 +#endif /* CONFIG_MAX_KERNEL_TIMES_NUM */ + +#define INCR_KERNEL_TIMES_COUNT(ti) { \ + (ti)->times_index ++; \ + (ti)->times_num ++; \ + if ((ti)->times_index >= MAX_KERNEL_TIMES_NUM) \ + (ti)->times_index = 0; \ + } +#define GET_DECR_KERNEL_TIMES_COUNT(ti, count) { \ + (count) = (ti)->times_index; \ + if ((ti)->times_num == 0) \ + (ti)->times_num = 1; \ + (count) --; \ + if ((count) < 0) \ + (count) = MAX_KERNEL_TIMES_NUM - 1; \ + } +#define E2K_SAVE_CLOCK_REG(clock) { \ + (clock) = E2K_GET_DSREG(clkr); \ + } +#define CALCULATE_CLOCK_TIME(start_clock, end_clock) \ + ((end_clock) - (start_clock)) + +extern void sys_e2k_print_kernel_times(struct task_struct *task, + kernel_times_t *times, long times_num, int times_index); + +#endif /* __ASSEMBLY__ */ +#endif /* _E2K_THREAD_INFO_H */ diff --git a/arch/e2k/include/asm/cmos.h b/arch/e2k/include/asm/cmos.h new file mode 100644 index 0000000..a6b204a --- /dev/null +++ b/arch/e2k/include/asm/cmos.h @@ -0,0 +1,37 @@ +#ifndef _ASM_CMOS_H +#define _ASM_CMOS_H + +#include +#include + +#ifndef RTC_PORT +#define RTC_PORT(x) (0x70 + (x)) +#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ +#endif + +static inline char mc146818_cmos_read(char addr) +{ + if (HAS_MACHINE_E2K_IOHUB) { + WARN_ONCE(1, "Warning: CMOS_READ attempted on a machine without a functioning CMOS\n"); + return 0; + } + + outb_p((addr),RTC_PORT(0)); + return inb_p(RTC_PORT(1)); +} + +static inline void mc146818_cmos_write(char val, char addr) +{ + if (HAS_MACHINE_E2K_IOHUB) { + WARN_ONCE(1, "Warning: CMOS_WRITE attempted on a machine without a functioning CMOS\n"); + return; + } + + outb_p(addr, RTC_PORT(0)); + outb_p(val, RTC_PORT(1)); +} + +#define CMOS_READ(addr) mc146818_cmos_read(addr) +#define CMOS_WRITE(val, addr) mc146818_cmos_write(val, addr) + +#endif diff --git a/arch/e2k/include/asm/cmpxchg.h b/arch/e2k/include/asm/cmpxchg.h new file mode 100644 index 0000000..77136d1 --- /dev/null +++ b/arch/e2k/include/asm/cmpxchg.h @@ -0,0 +1,102 @@ +#ifndef ASM_E2K_CMPXCHG_H +#define ASM_E2K_CMPXCHG_H + +#include +#include +#include +#include + +/* + * Non-existant functions to indicate usage errors at link time + * (or compile-time if the compiler implements __compiletime_error(). + */ +extern void __xchg_wrong_size(void) + __compiletime_error("Bad argument size for xchg"); +extern void __cmpxchg_wrong_size(void) + __compiletime_error("Bad argument size for cmpxchg"); + +#define __xchg(ptr, val, mem_model) \ +({ \ + volatile void *__x_ptr = (volatile void *) (ptr); \ + u64 __x_ret, __x_val = (u64) (val); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __x_ret = __api_xchg_return(__x_val, (volatile u8 *) __x_ptr, \ + b, mem_model); \ + break; \ + case 2: \ + __x_ret = __api_xchg_return(__x_val, (volatile u16 *) __x_ptr, \ + h, mem_model); \ + break; \ + case 4: \ + __x_ret = __api_xchg_return(__x_val, (volatile u32 *) __x_ptr, \ + w, mem_model); \ + break; \ + case 8: \ + __x_ret = __api_xchg_return(__x_val, (volatile u64 *) __x_ptr, \ + d, mem_model); \ + break; \ + default: \ + __x_ret = 0; \ + __xchg_wrong_size(); \ + break; \ + } \ + (__typeof__(*(ptr))) __x_ret; \ +}) +#define xchg_relaxed(ptr, v) __xchg((ptr), (v), RELAXED_MB) +#define xchg_acquire(ptr, v) __xchg((ptr), (v), ACQUIRE_MB) +#define xchg_release(ptr, v) __xchg((ptr), (v), RELEASE_MB) +#define xchg(ptr, v) __xchg((ptr), (v), STRONG_MB) + +#define __cmpxchg(ptr, old, new, mem_model) \ +({ \ + volatile void *__x_ptr = (volatile void *) (ptr); \ + u64 __x_ret, __x_old = (u64) (old), __x_new = (u64) (new); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __x_ret = __api_cmpxchg_return(__x_old, __x_new, \ + (volatile u8 *) __x_ptr, b, 0x4, mem_model); \ + break; \ + case 2: \ + __x_ret = __api_cmpxchg_return(__x_old, __x_new, \ + (volatile u16 *) __x_ptr, h, 0x5, mem_model); \ + break; \ + case 4: \ + __x_ret = __api_cmpxchg_word_return(__x_old, __x_new, \ + (volatile u32 *) __x_ptr, mem_model); \ + break; \ + case 8: \ + __x_ret = __api_cmpxchg_dword_return(__x_old, __x_new, \ + (volatile u64 *) __x_ptr, mem_model); \ + break; \ + default: \ + __x_ret = 0; \ + __cmpxchg_wrong_size(); \ + break; \ + } \ + (__typeof__(*(ptr))) __x_ret; \ +}) +#define cmpxchg_relaxed(ptr, o, n) __cmpxchg((ptr), (o), (n), RELAXED_MB) +#define cmpxchg_acquire(ptr, o, n) __cmpxchg((ptr), (o), (n), ACQUIRE_MB) +#define cmpxchg_release(ptr, o, n) __cmpxchg((ptr), (o), (n), RELEASE_MB) +#define cmpxchg(ptr, o, n) __cmpxchg((ptr), (o), (n), STRONG_MB) +#define cmpxchg_lock(ptr, o, n) __cmpxchg((ptr), (o), (n), LOCK_MB) + +#define __cmpxchg64(ptr, o, n, mem_model) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + (u64) __cmpxchg((ptr), (o), (n), mem_model); \ +}) +#define cmpxchg64_relaxed(ptr, o, n) __cmpxchg64((ptr), (o), (n), RELAXED_MB) +#define cmpxchg64_acquire(ptr, o, n) __cmpxchg64((ptr), (o), (n), ACQUIRE_MB) +#define cmpxchg64_release(ptr, o, n) __cmpxchg64((ptr), (o), (n), RELEASE_MB) +#define cmpxchg64(ptr, o, n) __cmpxchg64((ptr), (o), (n), STRONG_MB) + +#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ + __api_cmpxchg_double(p1, p2, o1, o2, n1, n2) + +#define cmpxchg_local(ptr, o, n) cmpxchg_local((ptr), (o), (n)) +#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n)) + +#define system_has_cmpxchg_double() 1 +#endif /* ASM_E2K_CMPXCHG_H */ diff --git a/arch/e2k/include/asm/cnt_point.h b/arch/e2k/include/asm/cnt_point.h new file mode 100644 index 0000000..bb4e365 --- /dev/null +++ b/arch/e2k/include/asm/cnt_point.h @@ -0,0 +1,359 @@ +/* $Id: cnt_point.h,v 1.3 2009/06/29 11:51:48 atic Exp $ + * + * Recovery the system from control point. + */ + +#ifndef _E2K_CNT_POINT_H +#define _E2K_CNT_POINT_H + +#include +#include +#include + +/* + * Core dump header on the disk + * Total size of header should be one page of memory = one block on disk + * Note that the first kilobyte is reserved for boot loader or + * disk label stuff... + * The following first bytes should contain signature and the last bytes + * of header - magic value to indicate dump header itegrety + * Other structures are aligned to have constant offset in the header + * by adding zip areas in the structure end. + */ +#define TOTAL_DUMP_HEADER_SIZE PAGE_SIZE +#define BOOTBITS_DUMP_HEADER_SIZE 0x400 /* offset 0x000 */ +#define DUMP_INFO_HEADER_SIZE 0x100 /* offset 0x400 */ +#define CORE_DUMP_HEADER_SIZE 0x500 /* offset 0x500 */ + /* offset 0xa00 - gap */ + /* offset 0xff8 - magic */ + +/* + * Dump device and common dump state info + * Dump file space layout: + * block 0 dump file header + * block 1 core dump area start + * --------------------------------- + * | header | core dump area | + * --------------------------------- + * 0 block + * 1 block + */ + +#define CORE_DUMP_AREA_OFFSET 1 +#define DEFAULT_CORE_AREA_MAX_SIZE (16 * 1024L) /* 16 Gb */ + +typedef struct dump_desc { + u64 signature; /* signature to indicate dump */ + /* header structure start */ + /* should be first bytes of useful */ + /* part of the header */ + u8 cntp_valid; /* control points header of file */ + /* is created and valid */ + u8 core_valid; /* system core dump header of file */ + /* is created and valid */ + u64 file_size; /* total size of dump file */ + /* in pages */ + /* (page size = block size) */ + u64 cntp_offset; /* offset (in blocks = page) */ + /* of control points area in */ + /* the dump file */ + u64 cntp_size; /* size of control points area */ + /* in blocks */ + u64 core_offset; /* offset (in blocks = page) */ + /* of core dump area in */ + /* the dump file */ + u64 core_size; /* size of core dump area */ + /* in blocks */ +} dump_desc_t; + +/* + * System core dump state info + */ +typedef struct core_dump { +} core_dump_t; + +/* + * Dump header on the disk structure + */ +typedef struct dump_header { + /* Space for disklabel etc. */ + u8 bootbits[BOOTBITS_DUMP_HEADER_SIZE]; + + dump_desc_t info; /* Device & dump state common info */ + u8 zip1[DUMP_INFO_HEADER_SIZE - sizeof (dump_desc_t)]; + + core_dump_t core; /* System core dump header stuff */ + u8 zip3[CORE_DUMP_HEADER_SIZE - sizeof (core_dump_t)]; + + /* zip area to make size of */ + /* header - constant == PAGE_SIZE */ + u8 gap[ TOTAL_DUMP_HEADER_SIZE - + BOOTBITS_DUMP_HEADER_SIZE - + DUMP_INFO_HEADER_SIZE - + CORE_DUMP_HEADER_SIZE - + 8]; /* u64 : magic */ + + u64 magic; /* magic value to indicate control */ + /* point header structure */ + /* should be last bytes of the */ + /* header */ +} dump_header_t; + +#define DUMP_HEADER_SIGNATURE 0xe2c0c0e226143210 +#define DUMP_HEADER_MAGIC 0xe2c0c0e22614cdef + +#define DUMP_BLOCK_TO_SECTOR(block) ((block) * (PAGE_SIZE >> 9)) +#define CORE_BLOCK_TO_SECTOR(block) DUMP_BLOCK_TO_SECTOR(block) + +/* + * Forwards of some functions to recover system state + */ + +extern struct vm_area_struct *cntp_find_vma(struct task_struct *ts, + unsigned long addr); +extern void dump_prepare(u16 dump_dev, u64 dump_sector); +extern void start_emergency_dump(void); +extern int create_dump_point(void); + +extern void init_dump_analyze_mode(void); +extern void start_dump_analyze(void); + +extern e2k_addr_t cntp_kernel_address_to_phys(e2k_addr_t address); +extern e2k_addr_t cntp_user_address_to_phys(struct task_struct *tsk, + e2k_addr_t address); + +extern int map_memory_region(e2k_addr_t mem_base, e2k_addr_t mem_end, + int *just_mapped_point); + +extern int run_init_process(const char *init_filename); + +#if defined(CONFIG_EMERGENCY_DUMP) +extern unsigned int nr_swapfiles; +extern struct swap_info_struct *swap_info[MAX_SWAPFILES]; +#endif + +extern e2k_addr_t cntp_kernel_base; + +extern int cur_cnt_point; +extern int cntp_small_kern_mem_div; +extern int dump_analyze_mode; +extern int dump_analyze_opt; +extern char dump_analyze_cmd[]; + +#define boot_cur_cnt_point \ + boot_get_vo_value(cur_cnt_point) +#define boot_cntp_small_kern_mem_div \ + boot_get_vo_value(cntp_small_kern_mem_div) +#define boot_dump_analyze_mode \ + boot_get_vo_value(dump_analyze_mode) +#define boot_dump_analyze_opt \ + boot_get_vo_value(dump_analyze_opt) +#define boot_dump_analyze_cmd \ + boot_vp_to_pp((char *)dump_analyze_cmd) + +extern inline e2k_size_t +get_dump_analyze_bank_size(e2k_phys_bank_t *phys_bank, int cntp_num) +{ + e2k_addr_t base, new_base; + e2k_size_t size, new_size; + + BUG_ON(cntp_num == 0 || cntp_num == 1); + + size = phys_bank->pages_num * PAGE_SIZE; + base = phys_bank->base_addr; + new_base = LARGE_PAGE_ALIGN_DOWN(base); + new_size = size - (new_base - base); + + return LARGE_PAGE_ALIGN_UP(new_size / cntp_num); +} + +extern inline e2k_size_t +get_dump_analyze_memory_len(e2k_phys_bank_t *phys_bank, int cntp, int cntp_num) +{ + e2k_size_t size = get_dump_analyze_bank_size(phys_bank, cntp_num); + e2k_size_t len = size; + e2k_addr_t base; + e2k_addr_t new_base; + + BUG_ON(cntp_num == 0 || cntp_num == 1); + BUG_ON(cntp != cntp_num - 1); + + + base = phys_bank->base_addr; + new_base = LARGE_PAGE_ALIGN_DOWN(base); + len += phys_bank->pages_num * PAGE_SIZE - + ((new_base - base) + size * cntp_num); + + return len; +} + +extern inline e2k_addr_t +get_dump_analyze_memory_offset(e2k_phys_bank_t *phys_bank, int cntp, + int cntp_num) +{ + e2k_size_t size; + e2k_addr_t offset = 0; + e2k_addr_t base; + e2k_addr_t new_base; + + BUG_ON(cntp_num == 0 || cntp_num == 1); + BUG_ON(cntp != cntp_num - 1); + + size = get_dump_analyze_bank_size(phys_bank, cntp_num); + base = phys_bank->base_addr; + new_base = LARGE_PAGE_ALIGN_DOWN(base); + offset = (new_base - base) + size * cntp; + return offset; +} + +extern inline e2k_addr_t +get_dump_analyze_memory_base(e2k_phys_bank_t *phys_bank, int cntp, int cntp_num) +{ + e2k_addr_t offset = get_dump_analyze_memory_offset( + phys_bank, cntp, cntp_num); + e2k_addr_t base = phys_bank->base_addr; + + base += offset; + return base; +} + +extern inline e2k_addr_t +boot_get_dump_analyze_kernel_base(void) +{ + e2k_phys_bank_t *phys_bank; + e2k_addr_t base; + e2k_addr_t new_base; + e2k_size_t cntp_size; + int node; + int bank; + + for (node = 0; node < L_MAX_MEM_NUMNODES; node ++) { + phys_bank = full_phys_mem[node].banks; + if (phys_bank->pages_num == 0) + continue; /* node has not memory */ + + for (bank = 0; bank < L_MAX_NODE_PHYS_BANKS; bank ++) { + if (phys_bank->pages_num == 0) + break; + + cntp_size = get_dump_analyze_memory_len( + phys_bank, + boot_cntp_small_kern_mem_div - 1, + boot_cntp_small_kern_mem_div); + if (cntp_size < boot_kernel_image_size) + goto next_bank; + + base = get_dump_analyze_memory_base( + phys_bank, + boot_cntp_small_kern_mem_div - 1, + boot_cntp_small_kern_mem_div); + + new_base = _PAGE_ALIGN_DOWN(base, E2K_KERNEL_PAGE_SIZE); + if (new_base - base + boot_kernel_image_size <= + cntp_size) + return new_base; +next_bank: + phys_bank ++; + } + } + + /* + * TODO: avoid this + */ + BUG(); + + return -1; +} + +/* + * bootblock manipulations (read/write/set/reset) in virtual kernel mode + * on physical level: + * write through and uncachable access on physical address + * bootblock virtual address can be only read + */ + +static inline u64 +read_bootblock_cur_cnt_point(bootblock_struct_t *bootblock) +{ + return READ_BOOTBLOCK_FIELD(bootblock, cur_cnt_point); +} + +extern inline void +write_bootblock_cur_cnt_point(bootblock_struct_t *bootblock, u64 new_cnt_point) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, cur_cnt_point, new_cnt_point); +} + +extern inline void +write_bootblock_mem_cnt_points(bootblock_struct_t *bootblock, u64 new_mem_points) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, mem_cnt_points, new_mem_points); +} + +extern inline void +write_bootblock_disk_cnt_points(bootblock_struct_t *bootblock, + u64 new_disk_points) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, disk_cnt_points, new_disk_points); +} + +extern inline void +write_bootblock_kernel_base(bootblock_struct_t *bootblock, + u64 new_kernel_base) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, info.kernel_base, new_kernel_base); +} + +extern inline u64 +read_bootblock_cntp_kernel_base(bootblock_struct_t *bootblock, int cntp) +{ + return READ_BOOTBLOCK_FIELD(bootblock, + info.cntp_info[cntp].kernel_base); +} + +extern inline void +write_bootblock_cntp_kernel_base(bootblock_struct_t *bootblock, int cntp, + u64 kernel_base) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, info.cntp_info[cntp].kernel_base, + kernel_base); +} + +extern inline void +set_bootblock_cntp_created(bootblock_struct_t *bootblock) +{ + WRITE_BOOTBLOCK_FIELD(bootblock, cnt_points_created, 1); +} + +/* + * Convert virtual address of kernel item in a control point context + * to the consistent physical address. + */ +#define cntp_va_to_pa(virt_addr, cntp_kernel_phys_base, ts) \ +({ \ + e2k_addr_t phys = 0; \ + e2k_addr_t virt = (e2k_addr_t)virt_addr; \ + \ + if (virt > 0 && virt < PAGE_OFFSET) \ + phys = cntp_user_address_to_phys(ts, virt); \ + else if (virt >= PAGE_OFFSET && virt < PAGE_OFFSET + MAX_PM_SIZE) \ + phys = __pa(virt); \ + else if (virt >= KERNEL_BASE && virt <= KERNEL_END) \ + phys = virt - KERNEL_BASE + cntp_kernel_phys_base; \ + else if (virt != 0) \ + phys = cntp_kernel_address_to_phys(virt); \ + \ + phys; \ +}) + +#define cntp_va(virt_addr, ts) \ +({ \ + void *virt = (void*)0; \ + if ((e2k_addr_t)virt_addr != 0) { \ + virt = (void *) cntp_va_to_pa(virt_addr, cntp_kernel_base, ts);\ + if (((unsigned long) virt) != -1) \ + virt = __va(virt); \ + } \ + virt; \ +}) +#endif /* _E2K_CNT_POINT_H */ diff --git a/arch/e2k/include/asm/compat.h b/arch/e2k/include/asm/compat.h new file mode 100644 index 0000000..d48d19a --- /dev/null +++ b/arch/e2k/include/asm/compat.h @@ -0,0 +1,214 @@ +#ifndef _ASM_E2K_COMPAT_H +#define _ASM_E2K_COMPAT_H + +/* + * Architecture specific compatibility types + */ +#include + +#include +#include +#include + +#include + +#define COMPAT_USER_HZ 100 + +typedef u32 compat_size_t; +typedef s32 compat_ssize_t; +typedef s32 compat_clock_t; +typedef s32 compat_pid_t; +typedef u16 __compat_uid_t; +typedef u16 __compat_gid_t; +typedef u32 __compat_uid32_t; +typedef u32 __compat_gid32_t; +typedef u16 compat_mode_t; +typedef u32 compat_ino_t; +typedef u16 compat_dev_t; +typedef s32 compat_off_t; +typedef s64 compat_loff_t; +typedef s16 compat_nlink_t; +typedef u16 compat_ipc_pid_t; +typedef s32 compat_daddr_t; +typedef u32 compat_caddr_t; +typedef __kernel_fsid_t compat_fsid_t; +typedef s32 compat_key_t; +typedef s32 compat_timer_t; + +typedef s32 compat_int_t; +typedef s32 compat_long_t; +typedef u32 compat_uint_t; +typedef u32 compat_ulong_t; +typedef u32 compat_uptr_t; + +typedef u64 compat_u64; +typedef s64 compat_s64; + +struct compat_stat { + compat_dev_t st_dev; + compat_ino_t st_ino; + compat_mode_t st_mode; + compat_nlink_t st_nlink; + __compat_uid_t st_uid; + __compat_gid_t st_gid; + compat_dev_t st_rdev; + compat_off_t st_size; + compat_time_t st_atime; + compat_ulong_t st_atime_nsec; + compat_time_t st_mtime; + compat_ulong_t st_mtime_nsec; + compat_time_t st_ctime; + compat_ulong_t st_ctime_nsec; + compat_off_t st_blksize; + compat_off_t st_blocks; + u32 __unused4[2]; +}; + +struct compat_flock { + short l_type; + short l_whence; + compat_off_t l_start; + compat_off_t l_len; + compat_pid_t l_pid; + short __unused; +}; + +#define F_GETLK64 12 +#define F_SETLK64 13 +#define F_SETLKW64 14 + +struct compat_flock64 { + short l_type; + short l_whence; + compat_loff_t l_start; + compat_loff_t l_len; + compat_pid_t l_pid; + short __unused; +}; + +struct compat_statfs { + int f_type; + int f_bsize; + int f_blocks; + int f_bfree; + int f_bavail; + int f_files; + int f_ffree; + compat_fsid_t f_fsid; + int f_namelen; + int f_frsize; + int f_flags; + int f_spare[4]; +}; + +#define COMPAT_RLIM_INFINITY 0x7fffffff + +typedef u32 compat_old_sigset_t; + +#undef DebugUS +#define DEBUG_US 0 /* Allocate User Space */ +#define DebugUS(...) DebugPrint(DEBUG_US ,##__VA_ARGS__) + + +#define _COMPAT_NSIG 64 +#define _COMPAT_NSIG_BPW 32 + +typedef u32 compat_sigset_word; + +typedef struct sigevent32 { + sigval_t sigev_value; + int sigev_signo; + int sigev_notify; + union { + int _pad[SIGEV_PAD_SIZE32]; + + struct { + u32 _function; + u32 _attribute; /* really pthread_attr_t */ + } _sigev_thread; + } _sigev_un; +} sigevent_t32; + +#define COMPAT_OFF_T_MAX 0x7fffffff + +/* + * The type of struct elf_prstatus.pr_reg in compatible core dumps. + */ +typedef struct user_regs_struct compat_elf_gregset_t; + +static inline void __user *compat_ptr(compat_uptr_t uptr) +{ + return (void __user *)(unsigned long)uptr; +} + +static inline compat_uptr_t ptr_to_compat(void __user *uptr) +{ + return (u32)(unsigned long)uptr; +} + +extern void __user *arch_compat_alloc_user_space(unsigned long len); + +struct compat_ipc64_perm { + compat_key_t key; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; + unsigned short __pad1; + compat_mode_t mode; + unsigned short __pad2; + unsigned short seq; + unsigned long __unused1; /* yes they really are 64bit pads */ + unsigned long __unused2; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + compat_ulong_t __unused1; + compat_ulong_t sem_otime; + compat_ulong_t sem_otime_high; + compat_ulong_t sem_ctime; + compat_ulong_t sem_nsems; + compat_ulong_t sem_ctime_high; + compat_ulong_t __unused2; +}; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; + compat_ulong_t __unused1; + compat_ulong_t msg_stime; + compat_ulong_t msg_stime_high; + compat_ulong_t msg_rtime; + compat_ulong_t msg_rtime_high; + compat_ulong_t msg_ctime; + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t msg_ctime_high; + compat_ulong_t __unused2; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + compat_ulong_t __unused1; + compat_ulong_t shm_atime; + compat_ulong_t shm_atime_high; + compat_ulong_t shm_dtime; + compat_ulong_t shm_dtime_high; + compat_ulong_t shm_ctime; + compat_size_t shm_segsz; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t shm_ctime_high; + compat_ulong_t __unused2; +}; + +static inline int is_compat_task(void) +{ + return current->thread.flags & E2K_FLAG_32BIT; +} + +#endif /* _ASM_E2K_COMPAT_H */ diff --git a/arch/e2k/include/asm/compiler.h b/arch/e2k/include/asm/compiler.h new file mode 100644 index 0000000..38a5e2a --- /dev/null +++ b/arch/e2k/include/asm/compiler.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_COMPILER_H +#define _ASM_COMPILER_H + +#include + +#undef barrier +#undef barrier_data +#undef RELOC_HIDE + +#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) +#if GCC_VERSION >= 40400 +/* builtin version has better throughput but worse latency */ +#undef __HAVE_BUILTIN_BSWAP32__ +#endif +#endif + +#define __PREEMPTION_CLOBBERS_1(cpu_greg, offset_greg) \ + "g" #cpu_greg, "g" #offset_greg +#define __PREEMPTION_CLOBBERS(cpu_greg, offset_greg) \ + __PREEMPTION_CLOBBERS_1(cpu_greg, offset_greg) +/* If a compiler barrier is used in loop, these clobbers will + * force the compiler to always access *current* per-cpu area + * instead of moving its address calculation out from the loop. + * + * The same goes for preemption-disabled sections: these clobbers + * will forbid compiler to move per-cpu area address calculation out + * from them. Since disabling interrupts also disables preemption, + * we also need these clobbers when writing PSR/UPSR. */ +#define PREEMPTION_CLOBBERS __PREEMPTION_CLOBBERS(SMP_CPU_ID_GREG, MY_CPU_OFFSET_GREG) + +#ifdef CONFIG_DEBUG_LCC_VOLATILE_ATOMIC +#define NOT_VOLATILE volatile +#else +#define NOT_VOLATILE +#endif + +/* See bug #89623, bug #94946 */ +#define barrier() \ +do { \ + int unused; \ + __asm__ NOT_VOLATILE("" : "=r" (unused) : : "memory", PREEMPTION_CLOBBERS);\ +} while (0) + +/* See comment before PREEMPTION_CLOBBERS */ +#define barrier_preemption() \ +do { \ + int unused; \ + __asm__ NOT_VOLATILE("" : "=r" (unused) : : PREEMPTION_CLOBBERS);\ +} while (0) + +#define barrier_data(ptr) \ +do { \ + __asm__ NOT_VOLATILE("" : : "r"(ptr) : "memory", PREEMPTION_CLOBBERS); \ +} while (0) + +#define RELOC_HIDE(ptr, off) \ +({ \ + unsigned long __ptr; \ + __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ + (typeof(ptr)) (__ptr + (off)); \ +}) + +#if defined(__LCC__) && (__LCC__ > 125 || __LCC__ == 125 && __LCC_MINOR__ >= 9) +# define builtin_expect_wrapper(x, val) __builtin_expect_with_probability((x), (val), 0.9999) +#else +# define builtin_expect_wrapper(x, val) __builtin_expect((x), (val)) +#endif + +#endif /* _ASM_COMPILER_H */ diff --git a/arch/e2k/include/asm/console.h b/arch/e2k/include/asm/console.h new file mode 100644 index 0000000..e4740ae --- /dev/null +++ b/arch/e2k/include/asm/console.h @@ -0,0 +1,47 @@ + +#ifndef _E2K_CONSOLE_H_ +#define _E2K_CONSOLE_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include + +static inline void +native_virt_console_dump_putc(char c) +{ +#ifdef CONFIG_EARLY_VIRTIO_CONSOLE + if (IS_HV_GM()) { + /* virtio console is actual only for guest mode */ + kvm_virt_console_dump_putc(c); + } +#endif /* CONFIG_EARLY_VIRTIO_CONSOLE */ +} + +extern void init_bug(const char *fmt_v, ...); +extern void init_warning(const char *fmt_v, ...); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized guest and host kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* native kernel or native kernel with virtualization support */ +static inline void +virt_console_dump_putc(char c) +{ + native_virt_console_dump_putc(c); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _E2K_CONSOLE_H_ */ diff --git a/arch/e2k/include/asm/convert_array.h b/arch/e2k/include/asm/convert_array.h new file mode 100644 index 0000000..2f434f0 --- /dev/null +++ b/arch/e2k/include/asm/convert_array.h @@ -0,0 +1,114 @@ +/* + * convert_array.h - Linux syscall interfaces (arch-specific) + * + * Copyright (c) 2019 MCST + * + * This file is released under the GPLv2. + * See the file COPYING for more details. + */ + +#ifndef _ASM_E2K_UAPI_CONVERT_ARRAY_H +#define _ASM_E2K_UAPI_CONVERT_ARRAY_H + + +#ifdef CONFIG_PROTECTED_MODE + +#define convert_array(prot_array, new_array, max_prot_array_size, fields, \ + items, mask_type, mask_align) \ + convert_array_3(prot_array, new_array, max_prot_array_size, fields, \ + items, mask_type, mask_align, 0, 0) + +extern int convert_array_3(long __user *prot_array, long *new_array, + const int max_prot_array_size, const int fields, + const int items, const long mask_type, + const long mask_align, const long mask_rw, + const int rval_mode); +/* + * Converts the given array of structures, which can contain + * protected user pointers to memory, function descriptors, and int values. + * prot_array - pointer to the original (user-space) array + * new_array - pointer to area where to put converted array + * max_prot_array_size - the maximum size, which prot_array can occupy + * fileds - number of enries in each element + * items - number of identical elements in the array to convert + * mask_type - mask for encoding of field type in each element: + * 2 bits per each entry: + * --- 00 (0x0) - int + * --- 01 (0x1) - long + * --- 10 (0x2) - pointer to function + * --- 11 (0x3) - pointer to memory (descriptor) + * mask_align - mask for encoding of alignment of the NEXT! field + * 2 bits per each entry: + * --- 00 (0x0) - next field aligned as int (to 4 bytes) + * --- 01 (0x1) - next field aligned as long (to 8 bytes) + * --- 10 (0x2) - not used yet + * --- 11 (0x3) - next field aligned as pointer (to 16 bytes) + * mask_rw - mask for encoding access type of the structure elements + * 2 bits per each entry: + * --- 01 (0x1) - the field's content gets read by syscall (READ-able) + * --- 02 (0x2) - the field's content gets updated by syscall (WRITE-able) + * --- 11 (0x3) - the field is both READ-able and WRITE-able + * --- 00 (0x0) - default type; the same as (READ-able) + * rval_mode - error (return value) reporting mode mask: + * 0 - report only critical problems in prot_array structure; + * 1 - return with -EFAULT if wrong tag in 'int' field; + * 2 - --'-- --'-- 'long' field; + * 4 - --'-- --'-- 'func' field; + * 8 - --'-- --'-- 'descr' field; + * 16 - ignore errors in 'int' field; + * 32 - --'-- --'-- 'long' field; + * 64 - --'-- --'-- 'func' field; + * 128 - --'-- --'-- 'descr' field. + * Returns: 0 - if converted OK; + * error number - otherwise. + */ + +#define CONV_ARR_WRONG_INT_FLD 1 +#define CONV_ARR_WRONG_LONG_FLD 2 +#define CONV_ARR_WRONG_FUNC_FLD 4 +#define CONV_ARR_WRONG_DSCR_FLD 8 +#define CONV_ARR_WRONG_ANY_FLD 15 /* error if any field appeared bad */ +#define CONV_ARR_IGNORE_INT_FLD_ERR 16 +#define CONV_ARR_IGNORE_LONG_FLD_ERR 32 +#define CONV_ARR_IGNORE_FUNC_FLD_ERR 64 +#define CONV_ARR_IGNORE_DSCR_FLD_ERR 128 + + +extern int check_args_array(const long __user *args_array, + const long tags, + const int arg_num, + const long mask_type, + const int rval_mode, + const char *ErrMsgHeader); + +/* + * This function checks protected syscall arguments on correspondence with + * the given mask: + * args_array - pointer to argument array + * tags - argument tags (4 bits per arg; lower to higher bits ordered) + * arg_num - number of arguments + * mask_type - mask for encoding of field type in each element + * 2 bits per each entry: + * --- 00 (0x0) - int + * --- 01 (0x1) - long + * --- 10 (0x2) - pointer to function + * --- 11 (0x3) - pointer to memory. + * rval_mode - error (return value) reporting mode mask: + * 0 - report only critical problems; + * 1 - return with -EFAULT if wrong tag in 'int' field; + * 2 - --'-- --'-- 'long' field; + * 4 - --'-- --'-- 'func' field; + * 8 - --'-- --'-- 'descr' field; + * 16 - ignore errors in 'int' field; + * 32 - --'-- --'-- 'long' field; + * 64 - --'-- --'-- 'func' field; + * 128 - --'-- --'-- 'descr' field. + * Returns: 0 - if converted OK; + * error number - otherwise. + */ + +#else +# define convert_array(...) 0 +#endif /* CONFIG_PROTECTED_MODE */ + +#endif /* _ASM_E2K_UAPI_CONVERT_ARRAY_H */ diff --git a/arch/e2k/include/asm/coredump.h b/arch/e2k/include/asm/coredump.h new file mode 100644 index 0000000..94abf5e --- /dev/null +++ b/arch/e2k/include/asm/coredump.h @@ -0,0 +1,10 @@ +#ifndef _E2K_COREDUMP_H +#define _E2K_COREDUMP_H +/* + * For coredump + */ +extern void clear_delayed_free_hw_stacks(struct mm_struct *mm); +extern void create_delayed_free_hw_stacks(void); + +#endif /* _E2K_COREDUMP_H */ + diff --git a/arch/e2k/include/asm/cpu.h b/arch/e2k/include/asm/cpu.h new file mode 100644 index 0000000..42ca8b9 --- /dev/null +++ b/arch/e2k/include/asm/cpu.h @@ -0,0 +1,36 @@ +#ifndef _ASM_E2K_CPU_H_ +#define _ASM_E2K_CPU_H_ + +#include + +extern int arch_register_cpu(int num); +#ifdef CONFIG_HOTPLUG_CPU +extern void arch_unregister_cpu(int); +#endif + +static inline unsigned long +native_get_cpu_running_cycles(void) +{ + /* native kernel is running always */ + return get_cycles(); +} + +extern void store_cpu_info(int cpuid); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized guest and host kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* native kernel or native kernel with virtualization support */ + +static inline unsigned long +get_cpu_running_cycles(void) +{ + return native_get_cpu_running_cycles(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_E2K_CPU_H_ */ diff --git a/arch/e2k/include/asm/cpu_regs.h b/arch/e2k/include/asm/cpu_regs.h new file mode 100644 index 0000000..52640e4 --- /dev/null +++ b/arch/e2k/include/asm/cpu_regs.h @@ -0,0 +1,3669 @@ + +#ifndef _E2K_CPU_REGS_H_ +#define _E2K_CPU_REGS_H_ + +#ifdef __KERNEL__ + +#include +#include +#include + +#ifndef __ASSEMBLY__ +#include +#include + +#define NATIVE_STRIP_PCSHTP_WINDOW() NATIVE_WRITE_PCSHTP_REG_SVALUE(0) +#define STRIP_PCSHTP_WINDOW() WRITE_PCSHTP_REG_SVALUE(0) + +/* + * Read low double-word OS Compilation Unit Register (OSCUD) + * from the low word structure + * Register fields access: fff = OSCUD_lo.OSCUD_lo_xxx; + * Register double-word half access: oscud_lo = OSCUD_lo.OSCUD_lo_half; + */ +#define NATIVE_READ_OSCUD_LO_REG() \ +({ \ + e2k_oscud_lo_t OSCUD_lo; \ + OSCUD_lo.OSCUD_lo_half = NATIVE_READ_OSCUD_LO_REG_VALUE(); \ + OSCUD_lo; \ +}) +#define READ_OSCUD_LO_REG() \ +({ \ + e2k_oscud_lo_t OSCUD_lo; \ + OSCUD_lo.OSCUD_lo_half = READ_OSCUD_LO_REG_VALUE(); \ + OSCUD_lo; \ +}) +#define BOOT_READ_OSCUD_LO_REG() \ +({ \ + e2k_oscud_lo_t OSCUD_lo; \ + OSCUD_lo.OSCUD_lo_half = BOOT_READ_OSCUD_LO_REG_VALUE(); \ + OSCUD_lo; \ +}) + +static inline e2k_oscud_lo_t +native_read_OSCUD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSCUD_lo register 0x%lx\n", + NATIVE_READ_OSCUD_LO_REG().OSCUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_READ_OSCUD_LO_REG(); +} +static inline e2k_oscud_lo_t +read_OSCUD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSCUD_lo register 0x%lx\n", + READ_OSCUD_LO_REG().OSCUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_OSCUD_LO_REG(); +} +static inline e2k_oscud_lo_t +boot_read_OSCUD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSCUD_lo register 0x%lx\n", + BOOT_READ_OSCUD_LO_REG().OSCUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_OSCUD_LO_REG(); +} + +/* + * Read high double-word OS Compilation Unit Register (OSCUD) + * from the high word structure + * Register fields access: fff = OSCUD_hi.OSCUD_hi_xxx; + * Register double-word half access: oscud_lo = OSCUD_hi.OSCUD_hi_half; + */ +#define NATIVE_READ_OSCUD_HI_REG() \ +({ \ + e2k_oscud_hi_t OSCUD_hi; \ + OSCUD_hi.OSCUD_hi_half = NATIVE_READ_OSCUD_HI_REG_VALUE(); \ + OSCUD_hi; \ +}) +#define READ_OSCUD_HI_REG() \ +({ \ + e2k_oscud_hi_t OSCUD_hi; \ + OSCUD_hi.OSCUD_hi_half = READ_OSCUD_HI_REG_VALUE(); \ + OSCUD_hi; \ +}) +#define BOOT_READ_OSCUD_HI_REG() \ +({ \ + e2k_oscud_hi_t OSCUD_hi; \ + OSCUD_hi.OSCUD_hi_half = BOOT_READ_OSCUD_HI_REG_VALUE(); \ + OSCUD_hi; \ +}) + +static inline e2k_oscud_hi_t +native_read_OSCUD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSCUD_hi register 0x%lx\n", + NATIVE_READ_OSCUD_HI_REG().OSCUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_READ_OSCUD_HI_REG(); +} +static inline e2k_oscud_hi_t +read_OSCUD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSCUD_hi register 0x%lx\n", + READ_OSCUD_HI_REG().OSCUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_OSCUD_HI_REG(); +} +static inline e2k_oscud_hi_t +boot_read_OSCUD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSCUD_hi register 0x%lx\n", + BOOT_READ_OSCUD_HI_REG().OSCUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_OSCUD_HI_REG(); +} + +/* + * Read quad-word OS Compilation Unit Register (OSCUD) to the structure + * Register fields access: fff = OSCUD -> OSCUD_xxx + * Register double-word halfs access: OSCUD_lo = OSCUD -> OSCUD_lo_reg + * OSCUD_hi = OSCUD -> OSCUD_hi_reg + */ +#define READ_OSCUD_REG() \ +({ \ + oscud_struct_t OSCUD; \ + OSCUD.OSCUD_hi_struct = READ_OSCUD_HI_REG(); \ + OSCUD.OSCUD_lo_struct = READ_OSCUD_LO_REG(); \ + OSCUD; \ +}) +#define READ_OSCUD_REG_TO(OSCUD) (*(OSCUD) = READ_OSCUD_REG()) +#define BOOT_READ_OSCUD_REG() \ +({ \ + oscud_struct_t OSCUD; \ + OSCUD.OSCUD_hi_struct = BOOT_READ_OSCUD_HI_REG(); \ + OSCUD.OSCUD_lo_struct = BOOT_READ_OSCUD_LO_REG(); \ + OSCUD; \ +}) +#define READ_OSCUD_REG_TO(OSCUD) (*(OSCUD) = READ_OSCUD_REG()) +#define BOOT_READ_OSCUD_REG_TO(OSCUD) (*(OSCUD) = BOOT_READ_OSCUD_REG()) + +static inline void +read_OSCUD_reg(oscud_struct_t *OSCUD) +{ + READ_OSCUD_REG_TO(OSCUD); +} +static inline void +boot_read_OSCUD_reg(oscud_struct_t *OSCUD) +{ + BOOT_READ_OSCUD_REG_TO(OSCUD); +} + +/* + * Write low double-word OS Compilation Unit Register (OSCUD) + * from the low word structure + * Register fields filling: OSCUD_lo.OSCUD_lo_xxx = fff; + * Register double-word half filling: OSCUD_lo.OSCUD_lo_half = oscud_lo; + */ +#define NATIVE_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define BOOT_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) + +static inline void +native_write_OSCUD_lo_reg(e2k_oscud_lo_t OSCUD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSCUD_lo register 0x%lx\n", OSCUD_lo.OSCUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_WRITE_OSCUD_LO_REG(OSCUD_lo); +} +static inline void +write_OSCUD_lo_reg(e2k_oscud_lo_t OSCUD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSCUD_lo register 0x%lx\n", OSCUD_lo.OSCUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_OSCUD_LO_REG(OSCUD_lo); +} + +/* + * Write high double-word OS Compilation Unit Register (OSCUD) + * from the high word structure + * Register fields filling: OSCUD_hi.OSCUD_hi_xxx = fff; + * Register double-word half filling: OSCUD_hi.OSCUD_hi_half = oscud_lo; + */ +#define NATIVE_WRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define WRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define BOOT_WRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) + +static inline void +native_write_OSCUD_hi_reg(e2k_oscud_hi_t OSCUD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSCUD_hi register 0x%lx\n", OSCUD_hi.OSCUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_WRITE_OSCUD_HI_REG(OSCUD_hi); +} +static inline void +write_OSCUD_hi_reg(e2k_oscud_hi_t OSCUD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSCUD_hi register 0x%lx\n", OSCUD_hi.OSCUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_OSCUD_HI_REG(OSCUD_hi); +} + +/* + * Write high & low quad-word OS Compilation Unit Register (OSCUD) + * from the high & low word structure + */ + +#define WRITE_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define BOOT_WRITE_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define WRITE_OSCUD_REG(OSCUD_hi, OSCUD_lo) \ +({ \ + WRITE_OSCUD_REG_VALUE(OSCUD_hi.OSCUD_hi_half, \ + OSCUD_lo.OSCUD_lo_half); \ +}) +#define BOOT_WRITE_OSCUD_REG(OSCUD_hi, OSCUD_lo) \ +({ \ + BOOT_WRITE_OSCUD_REG_VALUE(OSCUD_hi.OSCUD_hi_half, \ + OSCUD_lo.OSCUD_lo_half); \ +}) + +static inline void +write_OSCUD_hi_lo_reg(e2k_oscud_hi_t OSCUD_hi, e2k_oscud_lo_t OSCUD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSCUD_hi register 0x%lx\n", OSCUD_hi.OSCUD_hi_half); + boot_printk("Write OSCUD_lo register 0x%lx\n", OSCUD_lo.OSCUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_OSCUD_REG(OSCUD_hi, OSCUD_lo); +} + +/* + * Write quad-word OS Compilation Unit Register (OSCUD) from the structure + * Register fields filling: OSCUD.OSCUD_xxx = fff; + * Register double-word halfs filling: OSCUD.OSCUD_lo_reg = OSCUD_lo; + * OSCUD.OSCUD_hi_reg = OSCUD_hi; + */ +#define WRITE_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define BOOT_WRITE_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) + +static inline void +write_OSCUD_reg(oscud_struct_t OSCUD) +{ + WRITE_OSCUD_REG(OSCUD.OSCUD_hi_struct, OSCUD.OSCUD_lo_struct); +} + +/* + * Read low double-word OS Globals Register (OSGD) + * from the low word structure + * Register fields access: fff = OSGD_lo.OSGD_lo_xxx; + * Register double-word half access: osgd_lo = OSGD_lo.OSGD_lo_half; + */ +#define NATIVE_READ_OSGD_LO_REG() \ +({ \ + e2k_osgd_lo_t OSGD_lo; \ + OSGD_lo.OSGD_lo_half = NATIVE_READ_OSGD_LO_REG_VALUE(); \ + OSGD_lo; \ +}) +#define READ_OSGD_LO_REG() \ +({ \ + e2k_osgd_lo_t OSGD_lo; \ + OSGD_lo.OSGD_lo_half = READ_OSGD_LO_REG_VALUE(); \ + OSGD_lo; \ +}) +#define BOOT_READ_OSGD_LO_REG() \ +({ \ + e2k_osgd_lo_t OSGD_lo; \ + OSGD_lo.OSGD_lo_half = BOOT_READ_OSGD_LO_REG_VALUE(); \ + OSGD_lo; \ +}) + +static inline e2k_osgd_lo_t +native_read_OSGD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSGD_lo register 0x%lx\n", + NATIVE_READ_OSGD_LO_REG().OSGD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_READ_OSGD_LO_REG(); +} +static inline e2k_osgd_lo_t +read_OSGD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSGD_lo register 0x%lx\n", + READ_OSGD_LO_REG().OSGD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_OSGD_LO_REG(); +} +static inline e2k_osgd_lo_t +boot_read_OSGD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSGD_lo register 0x%lx\n", + BOOT_READ_OSGD_LO_REG().OSGD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_OSGD_LO_REG(); +} + +/* + * Read high double-word OS Globals Register (OSGD) + * from the high word structure + * Register fields access: fff = OSGD_hi.OSGD_hi_xxx; + * Register double-word half access: osgd_lo = OSGD_hi.OSGD_hi_half; + */ +#define NATIVE_READ_OSGD_HI_REG() \ +({ \ + e2k_osgd_hi_t OSGD_hi; \ + OSGD_hi.OSGD_hi_half = NATIVE_READ_OSGD_HI_REG_VALUE(); \ + OSGD_hi; \ +}) +#define READ_OSGD_HI_REG() \ +({ \ + e2k_osgd_hi_t OSGD_hi; \ + OSGD_hi.OSGD_hi_half = READ_OSGD_HI_REG_VALUE(); \ + OSGD_hi; \ +}) +#define BOOT_READ_OSGD_HI_REG() \ +({ \ + e2k_osgd_hi_t OSGD_hi; \ + OSGD_hi.OSGD_hi_half = BOOT_READ_OSGD_HI_REG_VALUE(); \ + OSGD_hi; \ +}) + +static inline e2k_osgd_hi_t +native_read_OSGD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSGD_hi register 0x%lx\n", + NATIVE_READ_OSGD_HI_REG().OSGD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_READ_OSGD_HI_REG(); +} +static inline e2k_osgd_hi_t +read_OSGD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSGD_hi register 0x%lx\n", + READ_OSGD_HI_REG().OSGD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_OSGD_HI_REG(); +} +static inline e2k_osgd_hi_t +boot_read_OSGD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read OSGD_hi register 0x%lx\n", + BOOT_READ_OSGD_HI_REG().OSGD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_OSGD_HI_REG(); +} + +/* + * Read quad-word OS Globals Register (OSGD) to the structure + * Register fields access: fff = OSGD -> OSGD_xxx + * Register double-word halfs access: OSGD_lo = OSGD -> OSGD_lo_reg + * OSGD_hi = OSGD -> OSGD_hi_reg + */ +#define READ_OSGD_REG() \ +({ \ + osgd_struct_t OSGD; \ + OSGD.OSGD_hi_struct = READ_OSGD_HI_REG(); \ + OSGD.OSGD_lo_struct = READ_OSGD_LO_REG(); \ + OSGD; \ +}) +#define READ_OSGD_REG_TO(OSGD) (*(OSGD) = READ_OSGD_REG()) +#define BOOT_READ_OSGD_REG() \ +({ \ + osgd_struct_t OSGD; \ + OSGD.OSGD_hi_struct = BOOT_READ_OSGD_HI_REG(); \ + OSGD.OSGD_lo_struct = BOOT_READ_OSGD_LO_REG(); \ + OSGD; \ +}) +#define BOOT_READ_OSGD_REG_TO(OSGD) (*(OSGD) = BOOT_READ_OSGD_REG()) + +static inline void +read_OSGD_reg(osgd_struct_t *OSGD) +{ + READ_OSGD_REG_TO(OSGD); +} +static inline void +boot_read_OSGD_reg(osgd_struct_t *OSGD) +{ + BOOT_READ_OSGD_REG_TO(OSGD); +} + +/* + * Write low double-word OS Globals Register (OSGD) + * from the low word structure + * Register fields filling: OSGD_lo.OSGD_lo_xxx = fff; + * Register double-word half filling: OSGD_lo.OSGD_lo_half = gd_lo; + */ +#define NATIVE_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define BOOT_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + BOOT_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) + +static inline void +native_write_OSGD_lo_reg(e2k_osgd_lo_t OSGD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSGD_lo register 0x%lx\n", OSGD_lo.OSGD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_WRITE_OSGD_LO_REG(OSGD_lo); +} +static inline void +write_OSGD_lo_reg(e2k_osgd_lo_t OSGD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSGD_lo register 0x%lx\n", OSGD_lo.OSGD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_OSGD_LO_REG(OSGD_lo); +} + +/* + * Write high double-word OS Globals Register (OSGD) + * from the high word structure + * Register fields filling: OSGD_hi.OSGD_hi_xxx = fff; + * Register double-word half filling: OSGD_hi.OSGD_hi_half = gd_lo; + */ +#define NATIVE_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) +#define WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) +#define BOOT_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) + +static inline void +native_write_OSGD_hi_reg(e2k_osgd_hi_t OSGD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSGD_hi register 0x%lx\n", OSGD_hi.OSGD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_WRITE_OSGD_HI_REG(OSGD_hi); +} +static inline void +write_OSGD_hi_reg(e2k_osgd_hi_t OSGD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSGD_hi register 0x%lx\n", OSGD_hi.OSGD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_OSGD_HI_REG(OSGD_hi); +} + +/* + * Write high & low quad-word OS Globals Register (OSGD) + * from the high & low word structure + */ + +#define WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \ +({ \ + WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ + WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ +}) +#define BOOT_WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \ +({ \ + BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ + BOOT_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ +}) +#define WRITE_OSGD_REG(OSGD_hi, OSGD_lo) \ +({ \ + WRITE_OSGD_REG_VALUE(OSGD_hi.OSGD_hi_half, OSGD_lo.OSGD_lo_half); \ +}) +#define BOOT_WRITE_OSGD_REG(OSGD_hi, OSGD_lo) \ +({ \ + BOOT_WRITE_OSGD_REG_VALUE(OSGD_hi.OSGD_hi_half, \ + OSGD_lo.OSGD_lo_half); \ +}) + +static inline void +write_OSGD_hi_lo_reg(e2k_osgd_hi_t OSGD_hi, e2k_osgd_lo_t OSGD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write OSGD_hi register 0x%lx\n", OSGD_hi.OSGD_hi_half); + boot_printk("Write OSGD_lo register 0x%lx\n", OSGD_lo.OSGD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_OSGD_REG(OSGD_hi, OSGD_lo); +} + +/* + * Write quad-word OS Globals Register (OSGD) from the structure + * Register fields filling: OSGD.OSGD_xxx = fff; + * Register double-word halfs filling: OSGD.OSGD_lo_reg = OSGD_lo; + * OSGD.OSGD_hi_reg = OSGD_hi; + */ +static inline void +write_OSGD_reg(osgd_struct_t OSGD) +{ + WRITE_OSGD_REG(OSGD.OSGD_hi_struct, OSGD.OSGD_lo_struct); +} + +/* + * Read low double-word Compilation Unit Register (CUD) + * from the low word structure + * Register fields access: fff = CUD_lo.CUD_lo_xxx; + * Register double-word half access: cud_lo = CUD_lo.CUD_lo_half; + */ +#define NATIVE_READ_CUD_LO_REG() \ +({ \ + e2k_cud_lo_t CUD_lo; \ + CUD_lo.CUD_lo_half = NATIVE_READ_CUD_LO_REG_VALUE(); \ + CUD_lo; \ +}) +#define READ_CUD_LO_REG() \ +({ \ + e2k_cud_lo_t CUD_lo; \ + CUD_lo.CUD_lo_half = READ_CUD_LO_REG_VALUE(); \ + CUD_lo; \ +}) +#define BOOT_READ_CUD_LO_REG() \ +({ \ + e2k_cud_lo_t CUD_lo; \ + CUD_lo.CUD_lo_half = BOOT_READ_CUD_LO_REG_VALUE(); \ + CUD_lo; \ +}) + +static inline e2k_cud_lo_t +read_CUD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CUD_lo register 0x%lx\n", + READ_CUD_LO_REG().CUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_CUD_LO_REG(); +} + +/* + * Read high double-word Compilation Unit Register (CUD) + * from the high word structure + * Register fields access: fff = CUD_hi.CUD_hi_xxx; + * Register double-word half access: cud_lo = CUD_hi.CUD_hi_half; + */ +#define NATIVE_READ_CUD_HI_REG() \ +({ \ + e2k_cud_hi_t CUD_hi; \ + CUD_hi.CUD_hi_half = NATIVE_READ_CUD_HI_REG_VALUE(); \ + CUD_hi; \ +}) +#define READ_CUD_HI_REG() \ +({ \ + e2k_cud_hi_t CUD_hi; \ + CUD_hi.CUD_hi_half = READ_CUD_HI_REG_VALUE(); \ + CUD_hi; \ +}) +#define BOOT_READ_CUD_HI_REG() \ +({ \ + e2k_cud_hi_t CUD_hi; \ + CUD_hi.CUD_hi_half = BOOT_READ_CUD_HI_REG_VALUE(); \ + CUD_hi; \ +}) + +static inline e2k_cud_hi_t +read_CUD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CUD_hi register 0x%lx\n", + READ_CUD_HI_REG().CUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_CUD_HI_REG(); +} + +/* + * Read quad-word Compilation Unit Register (CUD) to the structure + * Register fields access: fff = CUD -> CUD_xxx + * Register double-word halfs access: CUD_lo = CUD -> CUD_lo_reg + * CUD_hi = CUD -> CUD_hi_reg + */ +#define READ_CUD_REG() \ +({ \ + cud_struct_t CUD; \ + CUD.CUD_hi_struct = READ_CUD_HI_REG(); \ + CUD.CUD_lo_struct = READ_CUD_LO_REG(); \ + CUD; \ +}) +#define READ_CUD_REG_TO(CUD) (*(CUD) = READ_CUD_REG()) +#define BOOT_READ_CUD_REG() \ +({ \ + cud_struct_t CUD; \ + CUD.CUD_hi_struct = BOOT_READ_CUD_HI_REG(); \ + CUD.CUD_lo_struct = BOOT_READ_CUD_LO_REG(); \ + CUD; \ +}) +#define BOOT_READ_CUD_REG_TO(CUD) (*(CUD) = BOOT_READ_CUD_REG()) + +static inline void +read_CUD_reg(cud_struct_t *CUD) +{ + READ_CUD_REG_TO(CUD); +} +static inline void +boot_read_CUD_reg(cud_struct_t *CUD) +{ + BOOT_READ_CUD_REG_TO(CUD); +} + +/* + * Write low double-word Compilation Unit Register (CUD) + * from the low word structure + * Register fields filling: CUD_lo.CUD_lo_xxx = fff; + * Register double-word half filling: CUD_lo.CUD_lo_half = cud_lo; + */ +#define WRITE_CUD_LO_REG(CUD_lo) \ +({ \ + WRITE_CUD_LO_REG_VALUE(CUD_lo.CUD_lo_half); \ +}) +#define BOOT_WRITE_CUD_LO_REG(CUD_lo) \ +({ \ + BOOT_WRITE_CUD_LO_REG_VALUE(CUD_lo.CUD_lo_half); \ +}) + +static inline void +write_CUD_lo_reg(e2k_cud_lo_t CUD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CUD_lo register 0x%lx\n", CUD_lo.CUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CUD_LO_REG(CUD_lo); +} + +/* + * Write high double-word Compilation Unit Register (CUD) + * from the high word structure + * Register fields filling: CUD_hi.CUD_hi_xxx = fff; + * Register double-word half filling: CUD_hi.CUD_hi_half = cud_lo; + */ +#define WRITE_CUD_HI_REG(CUD_hi) \ +({ \ + WRITE_CUD_HI_REG_VALUE(CUD_hi.CUD_hi_half); \ +}) +#define BOOT_WRITE_CUD_HI_REG(CUD_hi) \ +({ \ + BOOT_WRITE_CUD_HI_REG_VALUE(CUD_hi.CUD_hi_half); \ +}) + +static inline void +write_CUD_hi_reg(e2k_cud_hi_t CUD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CUD_hi register 0x%lx\n", CUD_hi.CUD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CUD_HI_REG(CUD_hi); +} + +/* + * Write high & low quad-word Compilation Unit Register (CUD) + * from the high & low word structure + */ + +#define WRITE_CUD_REG_VALUE(CUD_hi_value, CUD_lo_value) \ +({ \ + WRITE_CUD_HI_REG_VALUE(CUD_hi_value); \ + WRITE_CUD_LO_REG_VALUE(CUD_lo_value); \ +}) +#define BOOT_WRITE_CUD_REG_VALUE(CUD_hi_value, CUD_lo_value) \ +({ \ + BOOT_WRITE_CUD_HI_REG_VALUE(CUD_hi_value); \ + BOOT_WRITE_CUD_LO_REG_VALUE(CUD_lo_value); \ +}) +#define WRITE_CUD_REG(CUD_hi, CUD_lo) \ +({ \ + WRITE_CUD_REG_VALUE(CUD_hi.CUD_hi_half, CUD_lo.CUD_lo_half); \ +}) +#define BOOT_WRITE_CUD_REG(CUD_hi, CUD_lo) \ +({ \ + BOOT_WRITE_CUD_REG_VALUE(CUD_hi.CUD_hi_half, CUD_lo.CUD_lo_half); \ +}) + +static inline void +write_CUD_hi_lo_reg(e2k_cud_hi_t CUD_hi, e2k_cud_lo_t CUD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CUD_hi register 0x%lx\n", CUD_hi.CUD_hi_half); + boot_printk("Write CUD_lo register 0x%lx\n", CUD_lo.CUD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CUD_REG(CUD_hi, CUD_lo); +} + +/* + * Write quad-word Compilation Unit Register (CUD) from the structure + * Register fields filling: CUD.CUD_xxx = fff; + * Register double-word halfs filling: CUD.CUD_lo_reg = CUD_lo; + * CUD.CUD_hi_reg = CUD_hi; + */ +static inline void +write_CUD_reg(cud_struct_t CUD) +{ + WRITE_CUD_REG(CUD.CUD_hi_struct, CUD.CUD_lo_struct); +} + +/* + * Read low double-word Globals Register (GD) + * from the low word structure + * Register fields access: fff = GD_lo.GD_lo_xxx; + * Register double-word half access: gd_lo = GD_lo.GD_lo_half; + */ +#define READ_GD_LO_REG() \ +({ \ + e2k_gd_lo_t GD_lo; \ + GD_lo.GD_lo_half = READ_GD_LO_REG_VALUE(); \ + GD_lo; \ +}) +#define BOOT_READ_GD_LO_REG() \ +({ \ + e2k_gd_lo_t GD_lo; \ + GD_lo.GD_lo_half = BOOT_READ_GD_LO_REG_VALUE(); \ + GD_lo; \ +}) + +static inline e2k_gd_lo_t +read_GD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read GD_lo register 0x%lx\n", + READ_GD_LO_REG().GD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_GD_LO_REG(); +} +static inline e2k_gd_lo_t +boot_read_GD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read GD_lo register 0x%lx\n", + BOOT_READ_GD_LO_REG().GD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_GD_LO_REG(); +} + +/* + * Read high double-word Globals Register (GD) + * from the high word structure + * Register fields access: fff = GD_hi.GD_hi_xxx; + * Register double-word half access: gd_lo = GD_hi.GD_hi_half; + */ +#define READ_GD_HI_REG() \ +({ \ + e2k_gd_hi_t GD_hi; \ + GD_hi.GD_hi_half = READ_GD_HI_REG_VALUE(); \ + GD_hi; \ +}) +#define BOOT_READ_GD_HI_REG() \ +({ \ + e2k_gd_hi_t GD_hi; \ + GD_hi.GD_hi_half = BOOT_READ_GD_HI_REG_VALUE(); \ + GD_hi; \ +}) + +static inline e2k_gd_hi_t +read_GD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read GD_hi register 0x%lx\n", + READ_GD_HI_REG().GD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_GD_HI_REG(); +} +static inline e2k_gd_hi_t +boot_read_GD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read GD_hi register 0x%lx\n", + BOOT_READ_GD_HI_REG().GD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_GD_HI_REG(); +} + +/* + * Read quad-word Globals Register (GD) to the structure + * Register fields access: fff = GD -> GD_xxx + * Register double-word halfs access: GD_lo = GD -> GD_lo_reg + * GD_hi = GD -> GD_hi_reg + */ +#define READ_GD_REG() \ +({ \ + gd_struct_t GD; \ + GD.GD_hi_struct = READ_GD_HI_REG(); \ + GD.GD_lo_struct = READ_GD_LO_REG(); \ + GD; \ +}) +#define READ_GD_REG_TO(GD) (*(GD) = READ_GD_REG()) +#define BOOT_READ_GD_REG() \ +({ \ + gd_struct_t GD; \ + GD.GD_hi_struct = BOOT_READ_GD_HI_REG(); \ + GD.GD_lo_struct = BOOT_READ_GD_LO_REG(); \ + GD; \ +}) +#define BOOT_READ_GD_REG_TO(GD) (*(GD) = BOOT_READ_GD_REG()) + +static inline void +read_GD_reg(gd_struct_t *GD) +{ + READ_GD_REG_TO(GD); +} +static inline void +boot_read_GD_reg(gd_struct_t *GD) +{ + BOOT_READ_GD_REG_TO(GD); +} + +/* + * Write low double-word Globals Register (GD) + * from the low word structure + * Register fields filling: GD_lo.GD_lo_xxx = fff; + * Register double-word half filling: GD_lo.GD_lo_half = gd_lo; + */ +#define WRITE_GD_LO_REG(GD_lo) \ + WRITE_GD_LO_REG_VALUE(GD_lo.GD_lo_half) +#define BOOT_WRITE_GD_LO_REG(GD_lo) \ + BOOT_WRITE_GD_LO_REG_VALUE(GD_lo.GD_lo_half) + +static inline void +write_GD_lo_reg(e2k_gd_lo_t GD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write GD_lo register 0x%lx\n", GD_lo.GD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_GD_LO_REG(GD_lo); +} + +/* + * Write high double-word Globals Register (GD) + * from the high word structure + * Register fields filling: GD_hi.GD_hi_xxx = fff; + * Register double-word half filling: GD_hi.GD_hi_half = gd_lo; + */ +#define WRITE_GD_HI_REG(GD_hi) \ + WRITE_GD_HI_REG_VALUE(GD_hi.GD_hi_half) +#define BOOT_WRITE_GD_HI_REG(GD_hi) \ + BOOT_WRITE_GD_HI_REG_VALUE(GD_hi.GD_hi_half) + +static inline void +write_GD_hi_reg(e2k_gd_hi_t GD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write GD_hi register 0x%lx\n", GD_hi.GD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_GD_HI_REG(GD_hi); +} + +/* + * Write high & low quad-word Globals Register (GD) + * from the high & low word structure + */ + +#define WRITE_GD_REG_VALUE(GD_hi_value, GD_lo_value) \ +({ \ + WRITE_GD_HI_REG_VALUE(GD_hi_value); \ + WRITE_GD_LO_REG_VALUE(GD_lo_value); \ +}) +#define BOOT_WRITE_GD_REG_VALUE(GD_hi_value, GD_lo_value) \ +({ \ + BOOT_WRITE_GD_HI_REG_VALUE(GD_hi_value); \ + BOOT_WRITE_GD_LO_REG_VALUE(GD_lo_value); \ +}) +#define WRITE_GD_REG(GD_hi, GD_lo) \ +({ \ + WRITE_GD_REG_VALUE(GD_hi.GD_hi_half, GD_lo.GD_lo_half); \ +}) +#define BOOT_WRITE_GD_REG(GD_hi, GD_lo) \ +({ \ + BOOT_WRITE_GD_REG_VALUE(GD_hi.GD_hi_half, GD_lo.GD_lo_half); \ +}) + +static inline void +write_GD_hi_lo_reg(e2k_gd_hi_t GD_hi, e2k_gd_lo_t GD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write GD_hi register 0x%lx\n", GD_hi.GD_hi_half); + boot_printk("Write GD_lo register 0x%lx\n", GD_lo.GD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_GD_REG(GD_hi, GD_lo); +} + +/* + * Write quad-word Globals Register (GD) from the structure + * Register fields filling: GD.GD_xxx = fff; + * Register double-word halfs filling: GD.GD_lo_reg = GD_lo; + * GD.GD_hi_reg = GD_hi; + */ +static inline void +write_GD_reg(gd_struct_t GD) +{ + WRITE_GD_REG(GD.GD_hi_struct, GD.GD_lo_struct); +} + +/* + * Read quad-word Procedure Stack Pointer Register (PSP) to the structure + * Register fields access: PSP_hi = READ_PSP_HI_REG(); + * fff = PSP_hi.PSP_hi_xxx; + * PSP_lo = READ_PSP_LO_REG(); + * fff = PSP_lo.PSP_lo_xxx; + */ + +#define NATIVE_NV_READ_PSP_LO_REG() \ +({ \ + e2k_psp_lo_t PSP_lo; \ + PSP_lo.PSP_lo_half = NATIVE_NV_READ_PSP_LO_REG_VALUE(); \ + PSP_lo; \ +}) +#define NATIVE_NV_READ_PSP_HI_REG() \ +({ \ + e2k_psp_hi_t PSP_hi; \ + PSP_hi.PSP_hi_half = NATIVE_NV_READ_PSP_HI_REG_VALUE(); \ + PSP_hi; \ +}) +#define NATIVE_NV_READ_PSP_REG() \ +({ \ + psp_struct_t PSP; \ + PSP.PSP_hi_struct = NATIVE_NV_READ_PSP_HI_REG(); \ + PSP.PSP_lo_struct = NATIVE_NV_READ_PSP_LO_REG(); \ + PSP; \ +}) +#define NATIVE_NV_READ_PSP_REG_TO(PSP) \ +({ \ + *PSP = NATIVE_NV_READ_PSP_REG(); \ +}) + +#define READ_PSP_LO_REG() \ +({ \ + e2k_psp_lo_t PSP_lo; \ + PSP_lo.PSP_lo_half = READ_PSP_LO_REG_VALUE(); \ + PSP_lo; \ +}) +#define READ_PSP_HI_REG() \ +({ \ + e2k_psp_hi_t PSP_hi; \ + PSP_hi.PSP_hi_half = READ_PSP_HI_REG_VALUE(); \ + PSP_hi; \ +}) +#define READ_PSP_REG() \ +({ \ + psp_struct_t PSP; \ + PSP.PSP_hi_struct = READ_PSP_HI_REG(); \ + PSP.PSP_lo_struct = READ_PSP_LO_REG(); \ + PSP; \ +}) +#define READ_PSP_REG_TO(PSP) \ +({ \ + *PSP = READ_PSP_REG(); \ +}) + +#define BOOT_READ_PSP_LO_REG() \ +({ \ + e2k_psp_lo_t PSP_lo; \ + PSP_lo.PSP_lo_half = BOOT_READ_PSP_LO_REG_VALUE(); \ + PSP_lo; \ +}) +#define BOOT_READ_PSP_HI_REG() \ +({ \ + e2k_psp_hi_t PSP_hi; \ + PSP_hi.PSP_hi_half = BOOT_READ_PSP_HI_REG_VALUE(); \ + PSP_hi; \ +}) +#define BOOT_READ_PSP_REG() \ +({ \ + psp_struct_t PSP; \ + PSP.PSP_hi_struct = BOOT_READ_PSP_HI_REG(); \ + PSP.PSP_lo_struct = BOOT_READ_PSP_LO_REG(); \ + PSP; \ +}) +#define BOOT_READ_PSP_REG_TO(PSP) \ +({ \ + *PSP = BOOT_READ_PSP_REG(); \ +}) + +/* + * Read low double-word Procedure Stack Pointer Register (PSP) + * from the low word structure + * Register fields access: fff = PSP_lo.PSP_lo_xxx; + * Register double-word half access: psp_lo = PSP_lo.PSP_lo_half; + */ +static inline e2k_psp_lo_t +native_nv_read_PSP_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PSP_lo register 0x%lx\n", + NATIVE_NV_READ_PSP_LO_REG().PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_PSP_LO_REG(); +} +static inline e2k_psp_lo_t +read_PSP_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PSP_lo register 0x%lx\n", + READ_PSP_LO_REG().PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_PSP_LO_REG(); +} +static inline e2k_psp_lo_t +boot_read_PSP_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PSP_lo register 0x%lx\n", + BOOT_READ_PSP_LO_REG().PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_PSP_LO_REG(); +} + +/* + * Read high double-word Procedure Stack Pointer Register (PSP) + * from the high word structure + * Register fields access: fff = PSP_hi.PSP_hi_xxx; + * Register double-word half access: psp_lo = PSP_hi.PSP_hi_half; + */ +static inline e2k_psp_hi_t +native_nv_read_PSP_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PSP_hi register 0x%lx\n", + NATIVE_NV_READ_PSP_HI_REG().PSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_PSP_HI_REG(); +} +static inline e2k_psp_hi_t +read_PSP_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PSP_hi register 0x%lx\n", + READ_PSP_HI_REG().PSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_PSP_HI_REG(); +} +static inline e2k_psp_hi_t +boot_read_PSP_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PSP_hi register 0x%lx\n", + BOOT_READ_PSP_HI_REG().PSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_PSP_HI_REG(); +} + +/* + * Read quad-word Procedure Stack Pointer Register (PSP) to the structure + * Register fields access: fff = PSP -> PSP_xxx + * Register double-word halfs access: PSP_lo_word = PSP -> PSP_lo_reg + * PSP_hi_word = PSP -> PSP_hi_reg + */ +static inline void +native_nv_read_PSP_reg(psp_struct_t *PSP) +{ + NATIVE_NV_READ_PSP_REG_TO(PSP); +} +static inline void +read_PSP_reg(psp_struct_t *PSP) +{ + READ_PSP_REG_TO(PSP); +} +static inline void +boot_read_PSP_reg(psp_struct_t *PSP) +{ + BOOT_READ_PSP_REG_TO(PSP); +} + +/* + * Write low double-word Procedure Stack Pointer Register (PSP) + * from the low word structure + * Register fields filling: PSP_lo.PSP_lo_xxx = fff; + * Register double-word half filling: PSP_lo.PSP_lo_half = psp_lo; + */ +#define NATIVE_NV_WRITE_PSP_LO_REG(PSP_lo) \ +({ \ + NATIVE_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo.PSP_lo_half); \ +}) +#define WRITE_PSP_LO_REG(PSP_lo) \ +({ \ + WRITE_PSP_LO_REG_VALUE(PSP_lo.PSP_lo_half); \ +}) +#define BOOT_WRITE_PSP_LO_REG(PSP_lo) \ +({ \ + BOOT_WRITE_PSP_LO_REG_VALUE(PSP_lo.PSP_lo_half); \ +}) + +static inline void +native_nv_write_PSP_lo_reg(e2k_psp_lo_t PSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_lo register 0x%lx\n", PSP_lo.PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PSP_LO_REG(PSP_lo); +} +static inline void +write_PSP_lo_reg(e2k_psp_lo_t PSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_lo register 0x%lx\n", PSP_lo.PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PSP_LO_REG(PSP_lo); +} + +static inline void +boot_write_PSP_lo_reg(e2k_psp_lo_t PSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_lo register 0x%lx\n", PSP_lo.PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_PSP_LO_REG(PSP_lo); +} + +/* + * Write high double-word Procedure Stack Pointer Register (PSP) + * from the high word structure + * Register fields filling: PSP_hi.PSP_hi_xxx = fff; + * Register double-word half filling: PSP_hi.PSP_hi_half = psp_lo; + */ +#define NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG(PSP_hi) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(PSP_hi.PSP_hi_half); \ +}) +#define WRITE_PSP_HI_REG(PSP_hi) \ +({ \ + WRITE_PSP_HI_REG_VALUE(PSP_hi.PSP_hi_half); \ +}) +#define BOOT_WRITE_PSP_HI_REG(PSP_hi) \ +({ \ + BOOT_WRITE_PSP_HI_REG_VALUE(PSP_hi.PSP_hi_half); \ +}) + +static inline void +native_nv_noirq_write_PSP_hi_reg(e2k_psp_hi_t PSP_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_hi register 0x%lx\n", PSP_hi.PSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG(PSP_hi); +} +static inline void +write_PSP_hi_reg(e2k_psp_hi_t PSP_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_hi register 0x%lx\n", PSP_hi.PSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PSP_HI_REG(PSP_hi); +} + +static inline void +boot_write_PSP_hi_reg(e2k_psp_hi_t PSP_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_hi register 0x%lx\n", PSP_hi.PSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_PSP_HI_REG(PSP_hi); +} + +/* + * Write high & low quad-word Procedure Stack Pointer Register (PSP) + * from the high & low word structure + */ + +#define NATIVE_NV_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(PSP_hi_value); \ + NATIVE_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value); \ +}) +#define NATIVE_NV_WRITE_PSP_REG(PSP_hi, PSP_lo) \ +({ \ + NATIVE_NV_WRITE_PSP_REG_VALUE(PSP_hi.PSP_hi_half, PSP_lo.PSP_lo_half); \ +}) +#define WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ +({ \ + WRITE_PSP_HI_REG_VALUE(PSP_hi_value); \ + WRITE_PSP_LO_REG_VALUE(PSP_lo_value); \ +}) +#define WRITE_PSP_REG(PSP_hi, PSP_lo) \ +({ \ + WRITE_PSP_REG_VALUE(PSP_hi.PSP_hi_half, PSP_lo.PSP_lo_half); \ +}) +#define BOOT_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ +({ \ + BOOT_WRITE_PSP_HI_REG_VALUE(PSP_hi_value); \ + BOOT_WRITE_PSP_LO_REG_VALUE(PSP_lo_value); \ +}) +#define BOOT_WRITE_PSP_REG(PSP_hi, PSP_lo) \ +({ \ + BOOT_WRITE_PSP_REG_VALUE(PSP_hi.PSP_hi_half, PSP_lo.PSP_lo_half); \ +}) + +static inline void +native_nv_write_PSP_hi_lo_reg(e2k_psp_hi_t PSP_hi, e2k_psp_lo_t PSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_hi register 0x%lx\n", PSP_hi.PSP_hi_half); + boot_printk("Write PSP_lo register 0x%lx\n", PSP_lo.PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PSP_REG(PSP_hi, PSP_lo); +} +static inline void +write_PSP_hi_lo_reg(e2k_psp_hi_t PSP_hi, e2k_psp_lo_t PSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_hi register 0x%lx\n", PSP_hi.PSP_hi_half); + boot_printk("Write PSP_lo register 0x%lx\n", PSP_lo.PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PSP_REG(PSP_hi, PSP_lo); +} +static inline void +boot_write_PSP_hi_lo_reg(e2k_psp_hi_t PSP_hi, e2k_psp_lo_t PSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PSP_hi register 0x%lx\n", PSP_hi.PSP_hi_half); + boot_printk("Write PSP_lo register 0x%lx\n", PSP_lo.PSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_PSP_REG(PSP_hi, PSP_lo); +} + +/* + * Write quad-word Procedure Stack Pointer Register (PSP) from the structure + * Register fields filling: PSP.PSP_xxx = fff; + * Register double-word halfs filling: PSP.PSP_lo_reg = PSP_lo; + * PSP.PSP_hi_reg = PSP_hi; + */ +static inline void +native_nv_write_PSP_reg(psp_struct_t PSP) +{ + NATIVE_NV_WRITE_PSP_REG(PSP.PSP_hi_struct, PSP.PSP_lo_struct); +} +static inline void +write_PSP_reg(psp_struct_t PSP) +{ + WRITE_PSP_REG(PSP.PSP_hi_struct, PSP.PSP_lo_struct); +} +static inline void +boot_write_PSP_reg(psp_struct_t PSP) +{ + BOOT_WRITE_PSP_REG(PSP.PSP_hi_struct, PSP.PSP_lo_struct); +} + + +/* + * Read quad-word Procedure Chain Stack Pointer Register (PCSP) to the structure + * Register fields access: PCSP_hi = READ_PCSP_HI_REG(); + * fff = PCSP_hi.PCSP_hi_xxx; + * PCSP_lo = READ_PCSP_LO_REG(); + * fff = PCSP_lo.PCSP_lo_xxx; + */ + +#define NATIVE_NV_READ_PCSP_LO_REG() \ +({ \ + e2k_pcsp_lo_t PCSP_lo; \ + PCSP_lo.PCSP_lo_half = NATIVE_NV_READ_PCSP_LO_REG_VALUE(); \ + PCSP_lo; \ +}) +#define NATIVE_NV_READ_PCSP_HI_REG() \ +({ \ + e2k_pcsp_hi_t PCSP_hi; \ + PCSP_hi.PCSP_hi_half = NATIVE_NV_READ_PCSP_HI_REG_VALUE(); \ + PCSP_hi; \ +}) +#define READ_PCSP_LO_REG() \ +({ \ + e2k_pcsp_lo_t PCSP_lo; \ + PCSP_lo.PCSP_lo_half = READ_PCSP_LO_REG_VALUE(); \ + PCSP_lo; \ +}) +#define READ_PCSP_HI_REG() \ +({ \ + e2k_pcsp_hi_t PCSP_hi; \ + PCSP_hi.PCSP_hi_half = READ_PCSP_HI_REG_VALUE(); \ + PCSP_hi; \ +}) +#define NATIVE_NV_READ_PCSP_REG() \ +({ \ + pcsp_struct_t PCSP; \ + PCSP.PCSP_hi_struct = NATIVE_NV_READ_PCSP_HI_REG(); \ + PCSP.PCSP_lo_struct = NATIVE_NV_READ_PCSP_LO_REG(); \ + PCSP; \ +}) +#define NATIVE_NV_READ_PCSP_REG_TO(PCSP) \ +({ \ + *PCSP = NATIVE_NV_READ_PCSP_REG(); \ +}) +#define READ_PCSP_REG() \ +({ \ + pcsp_struct_t PCSP; \ + PCSP.PCSP_hi_struct = READ_PCSP_HI_REG(); \ + PCSP.PCSP_lo_struct = READ_PCSP_LO_REG(); \ + PCSP; \ +}) +#define READ_PCSP_REG_TO(PCSP) \ +({ \ + *PCSP = READ_PCSP_REG(); \ +}) +#define BOOT_READ_PCSP_LO_REG() \ +({ \ + e2k_pcsp_lo_t PCSP_lo; \ + PCSP_lo.PCSP_lo_half = BOOT_READ_PCSP_LO_REG_VALUE(); \ + PCSP_lo; \ +}) +#define BOOT_READ_PCSP_HI_REG() \ +({ \ + e2k_pcsp_hi_t PCSP_hi; \ + PCSP_hi.PCSP_hi_half = BOOT_READ_PCSP_HI_REG_VALUE(); \ + PCSP_hi; \ +}) +#define BOOT_READ_PCSP_REG() \ +({ \ + pcsp_struct_t PCSP; \ + PCSP.PCSP_hi_struct = BOOT_READ_PCSP_HI_REG(); \ + PCSP.PCSP_lo_struct = BOOT_READ_PCSP_LO_REG(); \ + PCSP; \ +}) +#define BOOT_READ_PCSP_REG_TO(PCSP) \ +({ \ + *PCSP = BOOT_READ_PCSP_REG(); \ +}) + +/* + * Read low double-word Procedure Chain Stack Pointer Register (PCSP) + * from the low word structure + * Register fields access: fff = PCSP_lo.PCSP_lo_xxx; + * Register double-word half access: pcsp_lo = PCSP_lo.PCSP_lo_half; + */ +static inline e2k_pcsp_lo_t +native_nv_read_PCSP_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PCSP_lo register 0x%lx\n", + NATIVE_NV_READ_PCSP_LO_REG().PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_PCSP_LO_REG(); +} +static inline e2k_pcsp_lo_t +read_PCSP_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PCSP_lo register 0x%lx\n", + READ_PCSP_LO_REG().PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_PCSP_LO_REG(); +} +static inline e2k_pcsp_lo_t +boot_read_PCSP_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PCSP_lo register 0x%lx\n", + BOOT_READ_PCSP_LO_REG().PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_PCSP_LO_REG(); +} + +/* + * Read high double-word Procedure Chain Stack Pointer Register (PCSP) + * from the high word structure + * Register fields access: fff = PCSP_hi.PCSP_hi_xxx; + * Register double-word half access: pcsp_lo = PCSP_hi.PCSP_hi_half; + */ +static inline e2k_pcsp_hi_t +native_nv_read_PCSP_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PCSP_hi register 0x%lx\n", + NATIVE_NV_READ_PCSP_HI_REG().PCSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_PCSP_HI_REG(); +} +static inline e2k_pcsp_hi_t +read_PCSP_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PCSP_hi register 0x%lx\n", + READ_PCSP_HI_REG().PCSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_PCSP_HI_REG(); +} +static inline e2k_pcsp_hi_t +boot_read_PCSP_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PCSP_hi register 0x%lx\n", + BOOT_READ_PCSP_HI_REG().PCSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_PCSP_HI_REG(); +} + +/* + * Read quad-word Procedure Chain Stack Pointer Register (PCSP) to the structure + * Register fields access: fff = PCSP -> PCSP_xxx + * Register double-word halfs access: PCSP_lo_word = PCSP -> PCSP_lo_reg + * PCSP_hi_word = PCSP -> PCSP_hi_reg + */ + +static inline void +native_nv_read_PCSP_reg(pcsp_struct_t *PCSP) +{ + NATIVE_NV_READ_PCSP_REG_TO(PCSP); +} +static inline void +read_PCSP_reg(pcsp_struct_t *PCSP) +{ + READ_PCSP_REG_TO(PCSP); +} +static inline void +boot_read_PCSP_reg(pcsp_struct_t *PCSP) +{ + BOOT_READ_PCSP_REG_TO(PCSP); +} + +/* + * Write low double-word Procedure Chain Stack Pointer Register (PCSP) + * from the low word structure + * Register fields filling: PCSP_lo.PCSP_lo_xxx = fff; + * Register double-word half filling: PCSP_lo.PCSP_lo_half = pcsp_lo; + */ +#define NATIVE_NV_WRITE_PCSP_LO_REG(PCSP_lo) \ +({ \ + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo.PCSP_lo_half); \ +}) +#define WRITE_PCSP_LO_REG(PCSP_lo) \ +({ \ + WRITE_PCSP_LO_REG_VALUE(PCSP_lo.PCSP_lo_half); \ +}) +#define BOOT_WRITE_PCSP_LO_REG(PCSP_lo) \ +({ \ + BOOT_WRITE_PCSP_LO_REG_VALUE(PCSP_lo.PCSP_lo_half); \ +}) + +static inline void +native_nv_write_PCSP_lo_reg(e2k_pcsp_lo_t PCSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_lo register 0x%lx\n", PCSP_lo.PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PCSP_LO_REG(PCSP_lo); +} +static inline void +write_PCSP_lo_reg(e2k_pcsp_lo_t PCSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_lo register 0x%lx\n", PCSP_lo.PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PCSP_LO_REG(PCSP_lo); +} +static inline void +boot_write_PCSP_lo_reg(e2k_pcsp_lo_t PCSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_lo register 0x%lx\n", PCSP_lo.PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_PCSP_LO_REG(PCSP_lo); +} + +/* + * Write high double-word Procedure Chain Stack Pointer Register (PCSP) + * from the high word structure + * Register fields filling: PCSP_hi.PCSP_hi_xxx = fff; + * Register double-word half filling: PCSP_hi.PCSP_hi_half = pcsp_lo; + */ +#define NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(PCSP_hi) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(PCSP_hi.PCSP_hi_half); \ +}) +#define WRITE_PCSP_HI_REG(PCSP_hi) \ +({ \ + WRITE_PCSP_HI_REG_VALUE(PCSP_hi.PCSP_hi_half); \ +}) +#define BOOT_WRITE_PCSP_HI_REG(PCSP_hi) \ +({ \ + BOOT_WRITE_PCSP_HI_REG_VALUE(PCSP_hi.PCSP_hi_half); \ +}) + +static inline void +native_nv_noirq_write_PCSP_hi_reg(e2k_pcsp_hi_t PCSP_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_hi register 0x%lx\n", PCSP_hi.PCSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(PCSP_hi); +} +static inline void +write_PCSP_hi_reg(e2k_pcsp_hi_t PCSP_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_hi register 0x%lx\n", PCSP_hi.PCSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PCSP_HI_REG(PCSP_hi); +} +static inline void +boot_write_PCSP_hi_reg(e2k_pcsp_hi_t PCSP_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_hi register 0x%lx\n", PCSP_hi.PCSP_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_PCSP_HI_REG(PCSP_hi); +} + +/* + * Write high & low quad-word Procedure Chain Stack Pointer Register (PCSP) + * from the high & low word structure + */ + +#define NATIVE_NV_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value); \ + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value); \ +}) +#define WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ +({ \ + WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value); \ + WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value); \ +}) +#define BOOT_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ +({ \ + BOOT_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value); \ + BOOT_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value); \ +}) +#define NATIVE_NV_WRITE_PCSP_REG(PCSP_hi, PCSP_lo) \ +({ \ + NATIVE_NV_WRITE_PCSP_REG_VALUE(PCSP_hi.PCSP_hi_half, \ + PCSP_lo.PCSP_lo_half); \ +}) +#define WRITE_PCSP_REG(PCSP_hi, PCSP_lo) \ +({ \ + WRITE_PCSP_REG_VALUE(PCSP_hi.PCSP_hi_half, \ + PCSP_lo.PCSP_lo_half); \ +}) +#define BOOT_WRITE_PCSP_REG(PCSP_hi, PCSP_lo) \ +({ \ + BOOT_WRITE_PCSP_REG_VALUE(PCSP_hi.PCSP_hi_half, \ + PCSP_lo.PCSP_lo_half); \ +}) + +static inline void +native_nv_write_PCSP_hi_lo_reg(e2k_pcsp_hi_t PCSP_hi, e2k_pcsp_lo_t PCSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_hi register 0x%lx\n", PCSP_hi.PCSP_hi_half); + boot_printk("Write PCSP_lo register 0x%lx\n", PCSP_lo.PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PCSP_REG(PCSP_hi, PCSP_lo); +} +static inline void +write_PCSP_hi_lo_reg(e2k_pcsp_hi_t PCSP_hi, e2k_pcsp_lo_t PCSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_hi register 0x%lx\n", PCSP_hi.PCSP_hi_half); + boot_printk("Write PCSP_lo register 0x%lx\n", PCSP_lo.PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PCSP_REG(PCSP_hi, PCSP_lo); +} +static inline void +boot_write_PCSP_hi_lo_reg(e2k_pcsp_hi_t PCSP_hi, e2k_pcsp_lo_t PCSP_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PCSP_hi register 0x%lx\n", PCSP_hi.PCSP_hi_half); + boot_printk("Write PCSP_lo register 0x%lx\n", PCSP_lo.PCSP_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_PCSP_REG(PCSP_hi, PCSP_lo); +} + +/* + * Write quad-word Procedure Chain Stack Pointer Register (PCSP) from the + * structure + * Register fields filling: PCSP.PCSP_xxx = fff; + * Register double-word halfs filling: PCSP.PCSP_lo_reg = PCSP_lo; + * PCSP.PCSP_hi_reg = PCSP_hi; + */ +static inline void +native_nv_write_PCSP_reg(pcsp_struct_t PCSP) +{ + NATIVE_NV_WRITE_PCSP_REG(PCSP.PCSP_hi_struct, PCSP.PCSP_lo_struct); +} +static inline void +write_PCSP_reg(pcsp_struct_t PCSP) +{ + WRITE_PCSP_REG(PCSP.PCSP_hi_struct, PCSP.PCSP_lo_struct); +} +static inline void +boot_write_PCSP_reg(pcsp_struct_t PCSP) +{ + BOOT_WRITE_PCSP_REG(PCSP.PCSP_hi_struct, PCSP.PCSP_lo_struct); +} + +/* + * Read Current Chain Register (CR0/CR1) to the structure + * Register fields access: crX_hi = READ_CRx_HI_REG(); + * fff = CRx_hi.CRx_hi_xxx; + * CRx_lo = READ_CRx_LO_REG(); + * fff = CRx_lo.CRx_lo_xxx; + */ + +#define NATIVE_NV_READ_CR0_LO_REG() \ +({ \ + e2k_cr0_lo_t CR0_lo; \ + CR0_lo.CR0_lo_half = NATIVE_NV_READ_CR0_LO_REG_VALUE(); \ + CR0_lo; \ +}) +#define NATIVE_NV_READ_CR0_HI_REG() \ +({ \ + e2k_cr0_hi_t CR0_hi; \ + CR0_hi.CR0_hi_half = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + CR0_hi; \ +}) +#define NATIVE_NV_READ_CR1_LO_REG() \ +({ \ + e2k_cr1_lo_t CR1_lo; \ + CR1_lo.CR1_lo_half = NATIVE_NV_READ_CR1_LO_REG_VALUE(); \ + CR1_lo; \ +}) +#define NATIVE_NV_READ_CR1_HI_REG() \ +({ \ + e2k_cr1_hi_t CR1_hi; \ + CR1_hi.CR1_hi_half = NATIVE_NV_READ_CR1_HI_REG_VALUE(); \ + CR1_hi; \ +}) +#define READ_CR0_LO_REG() \ +({ \ + e2k_cr0_lo_t CR0_lo; \ + CR0_lo.CR0_lo_half = READ_CR0_LO_REG_VALUE(); \ + CR0_lo; \ +}) +#define READ_CR0_HI_REG() \ +({ \ + e2k_cr0_hi_t CR0_hi; \ + CR0_hi.CR0_hi_half = READ_CR0_HI_REG_VALUE(); \ + CR0_hi; \ +}) +#define READ_CR1_LO_REG() \ +({ \ + e2k_cr1_lo_t CR1_lo; \ + CR1_lo.CR1_lo_half = READ_CR1_LO_REG_VALUE(); \ + CR1_lo; \ +}) +#define READ_CR1_HI_REG() \ +({ \ + e2k_cr1_hi_t CR1_hi; \ + CR1_hi.CR1_hi_half = READ_CR1_HI_REG_VALUE(); \ + CR1_hi; \ +}) + +static inline e2k_cr0_lo_t +native_nv_read_CR0_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR0_lo register 0x%lx\n", + NATIVE_NV_READ_CR0_LO_REG().CR0_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_CR0_LO_REG(); +} +static inline e2k_cr0_hi_t +native_nv_read_CR0_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR0_hi register 0x%lx\n", + NATIVE_NV_READ_CR0_HI_REG().CR0_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_CR0_HI_REG(); +} + +static inline e2k_cr1_lo_t +native_nv_read_CR1_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR1_lo register 0x%lx\n", + NATIVE_NV_READ_CR1_LO_REG().CR1_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_CR1_LO_REG(); +} +static inline e2k_cr1_hi_t +native_nv_read_CR1_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR1_hi register 0x%lx\n", + NATIVE_NV_READ_CR1_HI_REG().CR1_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_CR1_HI_REG(); +} +static inline e2k_cr0_lo_t +read_CR0_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR0_lo register 0x%lx\n", + READ_CR0_LO_REG().CR0_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_CR0_LO_REG(); +} +static inline e2k_cr0_hi_t +read_CR0_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR0_hi register 0x%lx\n", + READ_CR0_HI_REG().CR0_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_CR0_HI_REG(); +} + +static inline e2k_cr1_lo_t +read_CR1_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR1_lo register 0x%lx\n", + READ_CR1_LO_REG().CR1_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_CR1_LO_REG(); +} +static inline e2k_cr1_hi_t +read_CR1_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read CR1_hi register 0x%lx\n", + READ_CR1_HI_REG().CR1_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_CR1_HI_REG(); +} + +/* + * Write Current Chain Register (CR0/CR1) + * from the low word structure + * Register fields filling: CRx_lo.CRx_lo_xxx = fff; + * Register double-word half filling: CRx_lo.CRx_lo_half = crX_lo; + */ +#define NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(CR0_lo) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(CR0_lo.CR0_lo_half); \ +}) +#define NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(CR0_hi) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(CR0_hi.CR0_hi_half); \ +}) +#define NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(CR1_lo) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(CR1_lo.CR1_lo_half); \ +}) +#define NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(CR1_hi) \ +({ \ + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(CR1_hi.CR1_hi_half); \ +}) +#define WRITE_CR0_LO_REG(CR0_lo) \ +({ \ + WRITE_CR0_LO_REG_VALUE(CR0_lo.CR0_lo_half); \ +}) +#define WRITE_CR0_HI_REG(CR0_hi) \ +({ \ + WRITE_CR0_HI_REG_VALUE(CR0_hi.CR0_hi_half); \ +}) +#define WRITE_CR1_LO_REG(CR1_lo) \ +({ \ + WRITE_CR1_LO_REG_VALUE(CR1_lo.CR1_lo_half); \ +}) +#define WRITE_CR1_HI_REG(CR1_hi) \ +({ \ + WRITE_CR1_HI_REG_VALUE(CR1_hi.CR1_hi_half); \ +}) +static inline void +native_nv_noirq_write_CR0_lo_reg(e2k_cr0_lo_t CR0_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR0_lo register 0x%lx\n", CR0_lo.CR0_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(CR0_lo); +} +static inline void +native_nv_noirq_write_CR0_hi_reg(e2k_cr0_hi_t CR0_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR0_hi register 0x%lx\n", CR0_hi.CR0_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(CR0_hi); +} +static inline void +native_nv_noirq_write_CR1_lo_reg(e2k_cr1_lo_t CR1_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR1_lo register 0x%lx\n", CR1_lo.CR1_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(CR1_lo); +} +static inline void +native_nv_noirq_write_CR1_hi_reg(e2k_cr1_hi_t CR1_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR1_hi register 0x%lx\n", CR1_hi.CR1_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(CR1_hi); +} +static inline void +write_CR0_lo_reg(e2k_cr0_lo_t CR0_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR0_lo register 0x%lx\n", CR0_lo.CR0_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CR0_LO_REG(CR0_lo); +} +static inline void +write_CR0_hi_reg(e2k_cr0_hi_t CR0_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR0_hi register 0x%lx\n", CR0_hi.CR0_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CR0_HI_REG(CR0_hi); +} +static inline void +write_CR1_lo_reg(e2k_cr1_lo_t CR1_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR1_lo register 0x%lx\n", CR1_lo.CR1_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CR1_LO_REG(CR1_lo); +} +static inline void +write_CR1_hi_reg(e2k_cr1_hi_t CR1_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write CR1_hi register 0x%lx\n", CR1_hi.CR1_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_CR1_HI_REG(CR1_hi); +} + +/* + * Read double-word Control Transfer Preparation Registers (CTPR1/CTPR2/CTPR3) + * to the structure + * Register fields access: fff = CTPRn -> CTPRn_xxx + * Register entire access: CTPRn_entire = CTPRn -> CTPRn_reg + */ +#define NATIVE_NV_READ_CTPR1_REG_VALUE() NATIVE_NV_READ_CTPR_REG_VALUE(1) +#define NATIVE_NV_READ_CTPR2_REG_VALUE() NATIVE_NV_READ_CTPR_REG_VALUE(2) +#define NATIVE_NV_READ_CTPR3_REG_VALUE() NATIVE_NV_READ_CTPR_REG_VALUE(3) +#define READ_CTPR1_REG_VALUE() READ_CTPR_REG_VALUE(1) +#define READ_CTPR2_REG_VALUE() READ_CTPR_REG_VALUE(2) +#define READ_CTPR3_REG_VALUE() READ_CTPR_REG_VALUE(3) +#define NATIVE_NV_READ_CTPR_REG(reg_no) \ +({ \ + e2k_ctpr_t CTPR; \ + CTPR.CTPR_reg = NATIVE_NV_READ_CTPR_REG_VALUE(reg_no); \ + CTPR; \ +}) +#define NATIVE_NV_READ_CTPR1_REG() NATIVE_NV_READ_CTPR_REG(1) +#define NATIVE_NV_READ_CTPR2_REG() NATIVE_NV_READ_CTPR_REG(2) +#define NATIVE_NV_READ_CTPR3_REG() NATIVE_NV_READ_CTPR_REG(3) +#define READ_CTPR_REG(reg_no) \ +({ \ + e2k_ctpr_t CTPR; \ + CTPR.CTPR_reg = READ_CTPR_REG_VALUE(reg_no); \ + CTPR; \ +}) +#define READ_CTPR1_REG() READ_CTPR_REG(1) +#define READ_CTPR2_REG() READ_CTPR_REG(2) +#define READ_CTPR3_REG() READ_CTPR_REG(3) +static inline e2k_ctpr_t +read_CTPR_reg(int reg_no) +{ + switch (reg_no) { + case 1: return READ_CTPR1_REG(); + case 2: return READ_CTPR2_REG(); + case 3: return READ_CTPR3_REG(); + default: + return READ_CTPR1_REG(); + } +} +static inline e2k_ctpr_t +read_CTPR1_reg(void) +{ + return read_CTPR_reg(1); +} +static inline e2k_ctpr_t +read_CTPR2_reg(void) +{ + return read_CTPR_reg(2); +} +static inline e2k_ctpr_t +read_CTPR3_reg(void) +{ + return read_CTPR_reg(3); +} + +#define NATIVE_READ_CTPR1_HI_REG_VALUE() NATIVE_READ_CTPR_HI_REG_VALUE(1) +#define NATIVE_READ_CTPR2_HI_REG_VALUE() NATIVE_READ_CTPR_HI_REG_VALUE(2) +#define NATIVE_READ_CTPR3_HI_REG_VALUE() NATIVE_READ_CTPR_HI_REG_VALUE(3) + +/* + * Write double-word Control Transfer Preparation Registers (CTPR1/CTPR2/CTPR3) + * from the structure + * Register fields filling: CTPRn.CTPR_xxx = fff; + * Register entire filling: CTPRn.CTPR_reg = CTPRn_value; + */ +#define NATIVE_WRITE_CTPR1_REG_VALUE(CTPR_value) \ + NATIVE_WRITE_CTPR_REG_VALUE(1, CTPR_value) +#define NATIVE_WRITE_CTPR2_REG_VALUE(CTPR_value) \ + NATIVE_WRITE_CTPR_REG_VALUE(2, CTPR_value) +#define NATIVE_WRITE_CTPR3_REG_VALUE(CTPR_value) \ + NATIVE_WRITE_CTPR_REG_VALUE(3, CTPR_value) +#define NATIVE_WRITE_CTPR_REG(reg_no, CTPR) \ + NATIVE_WRITE_CTPR_REG_VALUE(reg_no, CTPR.CTPR_reg) +#define NATIVE_WRITE_CTPR1_REG(CTPR) NATIVE_WRITE_CTPR_REG(1, CTPR) +#define NATIVE_WRITE_CTPR2_REG(CTPR) NATIVE_WRITE_CTPR_REG(2, CTPR) +#define NATIVE_WRITE_CTPR3_REG(CTPR) NATIVE_WRITE_CTPR_REG(3, CTPR) +#define WRITE_CTPR1_REG_VALUE(CTPR_value) \ + WRITE_CTPR_REG_VALUE(1, CTPR_value) +#define WRITE_CTPR2_REG_VALUE(CTPR_value) \ + WRITE_CTPR_REG_VALUE(2, CTPR_value) +#define WRITE_CTPR3_REG_VALUE(CTPR_value) \ + WRITE_CTPR_REG_VALUE(3, CTPR_value) +#define WRITE_CTPR_REG(reg_no, CTPR) \ + WRITE_CTPR_REG_VALUE(reg_no, CTPR.CTPR_reg) +#define WRITE_CTPR1_REG(CTPR) WRITE_CTPR_REG(1, CTPR) +#define WRITE_CTPR2_REG(CTPR) WRITE_CTPR_REG(2, CTPR) +#define WRITE_CTPR3_REG(CTPR) WRITE_CTPR_REG(3, CTPR) + +static inline void +write_CTPR_reg(int reg_no, e2k_ctpr_t CTPR) +{ + switch (reg_no) { + case 1: + WRITE_CTPR1_REG(CTPR); + break; + case 2: + WRITE_CTPR2_REG(CTPR); + break; + case 3: + WRITE_CTPR3_REG(CTPR); + break; + default: + break; + } +} +static inline void +write_CTPR1_reg(e2k_ctpr_t CTPR) +{ + write_CTPR_reg(1, CTPR); +} +static inline void +write_CTPR2_reg(e2k_ctpr_t CTPR) +{ + write_CTPR_reg(2, CTPR); +} +static inline void +write_CTPR3_reg(e2k_ctpr_t CTPR) +{ + write_CTPR_reg(3, CTPR); +} + +#define NATIVE_WRITE_CTPR1_HI_REG_VALUE(CTPR_value) \ + NATIVE_WRITE_CTPR_HI_REG_VALUE(1, CTPR_value) +#define NATIVE_WRITE_CTPR2_HI_REG_VALUE(CTPR_value) \ + NATIVE_WRITE_CTPR_HI_REG_VALUE(2, CTPR_value) +#define NATIVE_WRITE_CTPR3_HI_REG_VALUE(CTPR_value) \ + NATIVE_WRITE_CTPR_HI_REG_VALUE(3, CTPR_value) + +/* + * Read signed word-register Procedure Chain Stack Hardware + * Top Pointer (PCSHTP) + */ + +static inline e2k_pcshtp_t +read_PCSHTP_reg(void) +{ + return READ_PCSHTP_REG_SVALUE(); +} + +/* + * Write signed word-register Procedure Chain Stack Hardware + * Top Pointer (PCSHTP) + */ + +static inline void +write_PCSHTP_reg(e2k_pcshtp_t PCSHTP) +{ + WRITE_PCSHTP_REG_SVALUE(PCSHTP); +} + + +/* + * Read low double-word Non-Protected User Stack Descriptor Register (USD) + * as the low word structure + * Register fields access: USD_lo = READ_USD_LO_REG(); + * fff = USD_lo.USD_lo_xxx; + */ +#define NATIVE_NV_READ_USD_LO_REG() \ +({ \ + e2k_usd_lo_t USD_lo; \ + USD_lo.USD_lo_half = NATIVE_NV_READ_USD_LO_REG_VALUE(); \ + USD_lo; \ +}) +#define READ_USD_LO_REG() \ +({ \ + e2k_usd_lo_t USD_lo; \ + USD_lo.USD_lo_half = READ_USD_LO_REG_VALUE(); \ + USD_lo; \ +}) +#define BOOT_READ_USD_LO_REG() \ +({ \ + e2k_usd_lo_t USD_lo; \ + USD_lo.USD_lo_half = BOOT_READ_USD_LO_REG_VALUE(); \ + USD_lo; \ +}) + +static inline e2k_usd_lo_t +native_nv_read_USD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read USD_lo register 0x%lx\n", + NATIVE_NV_READ_USD_LO_REG().USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_USD_LO_REG(); +} +static inline e2k_usd_lo_t +read_USD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read USD_lo register 0x%lx\n", + READ_USD_LO_REG().USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_USD_LO_REG(); +} +static inline e2k_usd_lo_t +boot_read_USD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read USD_lo register 0x%lx\n", + BOOT_READ_USD_LO_REG().USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_USD_LO_REG(); +} + +/* + * Read high double-word Non-Protected User Stack Descriptor Register (USD) + * as the high word structure + * Register fields access: USD_hi = READ_USD_HI_REG(); + * fff = USD_hi.USD_hi_xxx; + */ +#define NATIVE_NV_READ_USD_HI_REG() \ +({ \ + e2k_usd_hi_t USD_hi; \ + USD_hi.USD_hi_half = NATIVE_NV_READ_USD_HI_REG_VALUE(); \ + USD_hi; \ +}) +#define READ_USD_HI_REG() \ +({ \ + e2k_usd_hi_t USD_hi; \ + USD_hi.USD_hi_half = READ_USD_HI_REG_VALUE(); \ + USD_hi; \ +}) +#define BOOT_READ_USD_HI_REG() \ +({ \ + e2k_usd_hi_t USD_hi; \ + USD_hi.USD_hi_half = BOOT_READ_USD_HI_REG_VALUE(); \ + USD_hi; \ +}) + +static inline e2k_usd_hi_t +native_nv_read_USD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read USD_hi register 0x%lx\n", + NATIVE_NV_READ_USD_HI_REG().USD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_USD_HI_REG(); +} +static inline e2k_usd_hi_t +read_USD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read USD_hi register 0x%lx\n", + READ_USD_HI_REG().USD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_USD_HI_REG(); +} +static inline e2k_usd_hi_t +boot_read_USD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read USD_hi register 0x%lx\n", + BOOT_READ_USD_HI_REG().USD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return BOOT_READ_USD_HI_REG(); +} + +/* + * Read quad-word Non-Protected User Stack Descriptor Register (USD) + * to the structure + * Register fields access: fff = USD -> USD_xxx + * Register double-word halfs access: USD_lo = USD -> USD_lo_reg + * USD_hi = USD -> USD_hi_reg + */ +#define READ_USD_REG() \ +({ \ + usd_struct_t USD; \ + USD.USD_hi_struct = READ_USD_HI_REG(); \ + USD.USD_lo_struct = READ_USD_LO_REG(); \ + USD; \ +}) +#define READ_USD_REG_TO(USD) \ +({ \ + *USD = READ_USD_REG(); \ +}) +#define BOOT_READ_USD_REG() \ +({ \ + usd_struct_t USD; \ + USD.USD_hi_struct = BOOT_READ_USD_HI_REG(); \ + USD.USD_lo_struct = BOOT_READ_USD_LO_REG(); \ + USD; \ +}) +#define BOOT_READ_USD_REG_TO(USD) \ +({ \ + *USD = BOOT_READ_USD_REG(); \ +}) + +static inline void +read_USD_reg(usd_struct_t *USD) +{ + READ_USD_REG_TO(USD); +} +static inline void +boot_read_USD_reg(usd_struct_t *USD) +{ + BOOT_READ_USD_REG_TO(USD); +} + +/* + * Write low double-word Non-Protected User Stack Descriptor Register (USD) + * from the low word structure + * Register fields filling: USD_lo.USD_lo_xxx = fff; + * Register double-word half filling: USD_lo.USD_lo_half = usd_lo; + */ +#define NATIVE_NV_WRITE_USD_LO_REG(USD_lo) \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(USD_lo.USD_lo_half) +#define WRITE_USD_LO_REG(USD_lo) \ + WRITE_USD_LO_REG_VALUE(USD_lo.USD_lo_half) +#define BOOT_WRITE_USD_LO_REG(USD_lo) \ + BOOT_WRITE_USD_LO_REG_VALUE(USD_lo.USD_lo_half) + +static inline void +native_nv_write_USD_lo_reg(e2k_usd_lo_t USD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_lo register 0x%lx\n", USD_lo.USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_USD_LO_REG(USD_lo); +} +static inline void +write_USD_lo_reg(e2k_usd_lo_t USD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_lo register 0x%lx\n", USD_lo.USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_USD_LO_REG(USD_lo); +} +static inline void +boot_write_USD_lo_reg(e2k_usd_lo_t USD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_lo register 0x%lx\n", USD_lo.USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_USD_LO_REG(USD_lo); +} + +/* + * Write high double-word Non-Protected User Stack Descriptor Register (USD) + * from the high word structure + * Register fields filling: USD_hi.USD_hi_xxx = fff; + * Register double-word half filling: USD_hi.USD_hi_half = usd_hi; + */ +#define NATIVE_NV_WRITE_USD_HI_REG(USD_hi) \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(USD_hi.USD_hi_half) +#define WRITE_USD_HI_REG(USD_hi) \ + WRITE_USD_HI_REG_VALUE(USD_hi.USD_hi_half) +#define BOOT_WRITE_USD_HI_REG(USD_hi) \ + BOOT_WRITE_USD_HI_REG_VALUE(USD_hi.USD_hi_half) + +static inline void +native_nv_write_USD_hi_reg(e2k_usd_hi_t USD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_hi register 0x%lx\n", USD_hi.USD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_USD_HI_REG(USD_hi); +} +static inline void +write_USD_hi_reg(e2k_usd_hi_t USD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_hi register 0x%lx\n", USD_hi.USD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_USD_HI_REG(USD_hi); +} +static inline void +boot_write_USD_hi_reg(e2k_usd_hi_t USD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_hi register 0x%lx\n", USD_hi.USD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_USD_HI_REG(USD_hi); +} + +/* + * Write high & low quad-word Non-Protected User Stack Descriptor Register (USD) + * from the high & low word structure + */ +#define WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ +({ \ + WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define WRITE_USD_REG(USD_hi, USD_lo) \ +({ \ + WRITE_USD_REG_VALUE(USD_hi.USD_hi_half, USD_lo.USD_lo_half); \ +}) +#define BOOT_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ +({ \ + BOOT_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + BOOT_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define BOOT_WRITE_USD_REG(USD_hi, USD_lo) \ +({ \ + BOOT_WRITE_USD_REG_VALUE(USD_hi.USD_hi_half, USD_lo.USD_lo_half); \ +}) + +#define WRITE_USBR_USD_REG_VALUE(usbr, USD_hi, USD_lo) \ +do { \ + WRITE_USBR_REG_VALUE(usbr); \ + WRITE_USD_REG_VALUE(USD_hi, USD_lo); \ +} while (0) + +#define WRITE_USBR_USD_REG(usbr, USD_hi, USD_lo) \ +do { \ + WRITE_USBR_REG(usbr); \ + WRITE_USD_REG(USD_hi, USD_lo); \ +} while (0) + + +static inline void +write_USD_hi_lo_reg(e2k_usd_hi_t USD_hi, e2k_usd_lo_t USD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_hi register 0x%lx\n", USD_hi.USD_hi_half); + boot_printk("Write USD_lo register 0x%lx\n", USD_lo.USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_USD_REG(USD_hi, USD_lo); +} +static inline void +boot_write_USD_hi_lo_reg(e2k_usd_hi_t USD_hi, e2k_usd_lo_t USD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write USD_hi register 0x%lx\n", USD_hi.USD_hi_half); + boot_printk("Write USD_lo register 0x%lx\n", USD_lo.USD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + BOOT_WRITE_USD_REG(USD_hi, USD_lo); +} + +/* + * Write quad-word Non-Protected User Stack Descriptor Register (USD) + * from the structure + * Register fields filling: USD.USD_xxx = fff; + * Register double-word halfs filling: USD.USD_lo_reg = USD_lo; + * USD.USD_hi_reg = USD_hi; + */ +static inline void +write_USD_reg(usd_struct_t USD) +{ + WRITE_USD_REG(USD.USD_hi_struct, USD.USD_lo_struct); +} +static inline void +boot_write_USD_reg(usd_struct_t USD) +{ + BOOT_WRITE_USD_REG(USD.USD_hi_struct, USD.USD_lo_struct); +} + +/* + * Read low double-word Protected User Stack Descriptor Register (PUSD) + * as the low word structure + * Register fields access: PUSD_lo = READ_PUSD_LO_REG(); + * fff = PUSD_lo.PUSD_lo_xxx; + */ +#define NATIVE_NV_READ_PUSD_LO_REG() \ +({ \ + e2k_pusd_lo_t PUSD_lo; \ + PUSD_lo.PUSD_lo_half = NATIVE_NV_READ_PUSD_LO_REG_VALUE(); \ + PUSD_lo; \ +}) +#define READ_PUSD_LO_REG() \ +({ \ + e2k_pusd_lo_t PUSD_lo; \ + PUSD_lo.PUSD_lo_half = READ_PUSD_LO_REG_VALUE(); \ + PUSD_lo; \ +}) +static inline e2k_pusd_lo_t +native_nv_read_PUSD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PUSD_lo register 0x%lx\n", + NATIVE_NV_READ_PUSD_LO_REG().PUSD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_PUSD_LO_REG(); +} +static inline e2k_pusd_lo_t +read_PUSD_lo_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PUSD_lo register 0x%lx\n", + READ_PUSD_LO_REG().PUSD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_PUSD_LO_REG(); +} + +/* + * Read high double-word Protected User Stack Descriptor Register (PUSD) + * as the high word structure + * Register fields access: PUSD_hi = READ_PUSD_HI_REG(); + * fff = PUSD_hi.PUSD_hi_xxx; + */ +#define NATIVE_NV_READ_PUSD_HI_REG() \ +({ \ + e2k_pusd_hi_t PUSD_hi; \ + PUSD_hi.PUSD_hi_half = NATIVE_NV_READ_PUSD_HI_REG_VALUE(); \ + PUSD_hi; \ +}) +#define READ_PUSD_HI_REG() \ +({ \ + e2k_pusd_hi_t PUSD_hi; \ + PUSD_hi.PUSD_hi_half = READ_PUSD_HI_REG_VALUE(); \ + PUSD_hi; \ +}) +static inline e2k_pusd_hi_t +native_nv_read_PUSD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PUSD_hi register 0x%lx\n", + NATIVE_NV_READ_PUSD_HI_REG().PUSD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return NATIVE_NV_READ_PUSD_HI_REG(); +} +static inline e2k_pusd_hi_t +read_PUSD_hi_reg(void) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Read PUSD_hi register 0x%lx\n", + READ_PUSD_HI_REG().PUSD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + return READ_PUSD_HI_REG(); +} + +/* + * Read quad-word User Protected Stack Descriptor Register (PUSD) + * to the structure + * Register fields access: fff = PUSD -> PUSD_xxx + * Register double-word halfs access: PUSD_lo = PUSD -> PUSD_lo_reg + * PUSD_hi = PUSD -> PUSD_hi_reg + */ +#define NATIVE_NV_READ_PUSD_REG() \ +({ \ + pusd_struct_t PUSD; \ + PUSD.PUSD_hi_struct = NATIVE_NV_READ_PUSD_HI_REG(); \ + PUSD.PUSD_lo_struct = NATIVE_NV_READ_PUSD_LO_REG(); \ + PUSD; \ +}) +#define NATIVE_NV_READ_PUSD_REG_TO(PUSD) \ +({ \ + *PUSD = NATIVE_NV_READ_PUSD_REG(); \ +}) +#define READ_PUSD_REG() \ +({ \ + pusd_struct_t PUSD; \ + PUSD.PUSD_hi_struct = READ_PUSD_HI_REG(); \ + PUSD.PUSD_lo_struct = READ_PUSD_LO_REG(); \ + PUSD; \ +}) +#define READ_PUSD_REG_TO(PUSD) \ +({ \ + *PUSD = READ_PUSD_REG(); \ +}) +static inline void +native_nv_read_PUSD_reg(pusd_struct_t *PUSD) +{ + NATIVE_NV_READ_PUSD_REG_TO(PUSD); +} +static inline void +read_PUSD_reg(pusd_struct_t *PUSD) +{ + READ_PUSD_REG_TO(PUSD); +} + +/* + * Write low double-word Protected User Stack Descriptor Register (PUSD) + * from the low word structure + * Register fields filling: PUSD_lo.PUSD_lo_xxx = fff; + * Register double-word half filling: PUSD_lo.PUSD_lo_half = pusd_lo; + */ +#define NATIVE_NV_WRITE_PUSD_LO_REG(PUSD_lo) \ + NATIVE_NV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo.PUSD_lo_half) +#define WRITE_PUSD_LO_REG(PUSD_lo) \ + WRITE_PUSD_LO_REG_VALUE(PUSD_lo.PUSD_lo_half) + +static inline void +native_nv_write_PUSD_lo_reg(e2k_pusd_lo_t PUSD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PUSD_lo register 0x%lx\n", PUSD_lo.PUSD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PUSD_LO_REG(PUSD_lo); +} +static inline void +write_PUSD_lo_reg(e2k_pusd_lo_t PUSD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PUSD_lo register 0x%lx\n", PUSD_lo.PUSD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PUSD_LO_REG(PUSD_lo); +} + +/* + * Write high double-word Protected User Stack Descriptor Register (PUSD) + * from the high word structure + * Register fields filling: PUSD_hi.PUSD_hi_xxx = fff; + * Register double-word half filling: PUSD_hi.PUSD_hi_half = pusd_hi; + */ +#define NATIVE_NV_WRITE_PUSD_HI_REG(PUSD_hi) \ + NATIVE_NV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi.PUSD_hi_half) +#define WRITE_PUSD_HI_REG(PUSD_hi) \ + WRITE_PUSD_HI_REG_VALUE(PUSD_hi.PUSD_hi_half) + +static inline void +native_nv_write_PUSD_hi_reg(e2k_pusd_hi_t PUSD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PUSD_hi register 0x%lx\n", PUSD_hi.PUSD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PUSD_HI_REG(PUSD_hi); +} +static inline void +write_PUSD_hi_reg(e2k_pusd_hi_t PUSD_hi) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PUSD_hi register 0x%lx\n", PUSD_hi.PUSD_hi_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PUSD_HI_REG(PUSD_hi); +} + +/* + * Write high & low quad-word Protected User Stack Descriptor Register (PUSD) + * from the high & low word structure + */ +#define NATIVE_NV_WRITE_PUSD_REG_VALUE(PUSD_hi_value, PUSD_lo_value) \ +({ \ + NATIVE_NV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value); \ + NATIVE_NV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value); \ +}) +#define NATIVE_NV_WRITE_PUSD_REG(PUSD_hi, PUSD_lo) \ +({ \ + NATIVE_NV_WRITE_PUSD_REG_VALUE(PUSD_hi.PUSD_hi_half, \ + PUSD_lo.PUSD_lo_half); \ +}) +#define WRITE_PUSD_REG_VALUE(PUSD_hi_value, PUSD_lo_value) \ +({ \ + WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value); \ + WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value); \ +}) +#define WRITE_PUSD_REG(PUSD_hi, PUSD_lo) \ +({ \ + WRITE_PUSD_REG_VALUE(PUSD_hi.PUSD_hi_half, PUSD_lo.PUSD_lo_half); \ +}) +static inline void +native_nv_write_PUSD_hi_lo_reg(e2k_pusd_hi_t PUSD_hi, e2k_pusd_lo_t PUSD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PUSD_hi register 0x%lx\n", PUSD_hi.PUSD_hi_half); + boot_printk("Write PUSD_lo register 0x%lx\n", PUSD_lo.PUSD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + NATIVE_NV_WRITE_PUSD_REG(PUSD_hi, PUSD_lo); +} +static inline void +write_PUSD_hi_lo_reg(e2k_pusd_hi_t PUSD_hi, e2k_pusd_lo_t PUSD_lo) +{ +#ifdef _E2K_BOOT_CPU_REGS_TRACE_ + boot_printk("Write PUSD_hi register 0x%lx\n", PUSD_hi.PUSD_hi_half); + boot_printk("Write PUSD_lo register 0x%lx\n", PUSD_lo.PUSD_lo_half); +#endif /* _E2K_BOOT_CPU_REGS_TRACE_ */ + WRITE_PUSD_REG(PUSD_hi, PUSD_lo); +} + +/* + * Write quad-word User Protected Stack Descriptor Register (PUSD) + * from the structure + * Register fields filling: PUSD.PUSD_xxx = fff; + * Register double-word halfs filling: PUSD.PUSD_lo_reg = PUSD_lo; + * PUSD.PUSD_hi_reg = PUSD_hi; + */ +static inline void +write_PUSD_reg(pusd_struct_t PUSD) +{ + WRITE_PUSD_REG(PUSD.PUSD_hi_struct, PUSD.PUSD_lo_struct); +} + +/* + * Read double-word User Stacks Base Register (USBR) to the structure + * Register fields access: fff = USBR -> USBR_xxx + * Register entire access: USBR_entire = USBR -> USBR_reg + */ +#define READ_USBR_REG() \ +({ \ + e2k_usbr_t USBR; \ + USBR.USBR_reg = READ_USBR_REG_VALUE(); \ + USBR; \ +}) +#define NATIVE_NV_READ_USBR_REG() \ +({ \ + e2k_usbr_t USBR; \ + USBR.USBR_reg = NATIVE_NV_READ_USBR_REG_VALUE(); \ + USBR; \ +}) +#define BOOT_READ_USBR_REG() \ +({ \ + e2k_usbr_t USBR; \ + USBR.USBR_reg = BOOT_READ_USBR_REG_VALUE(); \ + USBR; \ +}) +static inline e2k_usbr_t +read_USBR_reg(void) +{ + return READ_USBR_REG(); +} +static inline e2k_usbr_t +boot_read_USBR_reg(void) +{ + return BOOT_READ_USBR_REG(); +} + +/* + * Write double-word User Stacks Base Register (USBR) from the structure + * Register fields filling: USBR.USBR_xxx = fff; + * Register entire filling: USBR.USBR_reg = USBR_value; + */ +#define WRITE_USBR_REG(USBR) WRITE_USBR_REG_VALUE(USBR.USBR_reg) +#define BOOT_WRITE_USBR_REG(USBR) \ + BOOT_WRITE_USBR_REG_VALUE(USBR.USBR_reg) + +static inline void +write_USBR_reg(e2k_usbr_t USBR) +{ + WRITE_USBR_REG(USBR); +} +static inline void +boot_write_USBR_reg(e2k_usbr_t USBR) +{ + BOOT_WRITE_USBR_REG(USBR); +} + +/* + * Read double-word Stacks Base Register (SBR) to the structure + * Register fields access: fff = SBR -> SBR_xxx + * Register entire access: SBR_entire = SBR -> SBR_reg + */ +#define NATIVE_NV_READ_SBR_REG() \ +({ \ + e2k_sbr_t SBR; \ + SBR.SBR_reg = NATIVE_NV_READ_SBR_REG_VALUE(); \ + SBR; \ +}) +#define READ_SBR_REG() \ +({ \ + e2k_sbr_t SBR; \ + SBR.SBR_reg = READ_SBR_REG_VALUE(); \ + SBR; \ +}) +#define BOOT_READ_SBR_REG() \ +({ \ + e2k_sbr_t SBR; \ + SBR.SBR_reg = BOOT_READ_SBR_REG_VALUE(); \ + SBR; \ +}) + +static inline e2k_sbr_t +native_nv_read_SBR_reg(void) +{ + return NATIVE_NV_READ_SBR_REG(); +} +static inline e2k_sbr_t +read_SBR_reg(void) +{ + return READ_SBR_REG(); +} +static inline e2k_sbr_t +boot_read_SBR_reg(void) +{ + return BOOT_READ_SBR_REG(); +} + +/* + * Write double-word Stacks Base Register (SBR) from the structure + * Register fields filling: SBR.SBR_xxx = fff; + * Register entire filling: SBR.SBR_reg = SBR_value; + */ +#define NATIVE_NV_WRITE_SBR_REG(SBR) \ + NATIVE_NV_WRITE_SBR_REG_VALUE(SBR.SBR_reg) +#define WRITE_SBR_REG(SBR) \ + WRITE_SBR_REG_VALUE(SBR.SBR_reg) +#define BOOT_WRITE_SBR_REG(SBR) \ + BOOT_WRITE_SBR_REG_VALUE(SBR.SBR_reg) + +static inline void +native_nv_write_SBR_reg(e2k_sbr_t SBR) +{ + NATIVE_NV_WRITE_SBR_REG(SBR); +} +static inline void +write_SBR_reg(e2k_sbr_t SBR) +{ + WRITE_SBR_REG(SBR); +} +static inline void +boot_write_SBR_reg(e2k_sbr_t SBR) +{ + BOOT_WRITE_SBR_REG(SBR); +} + +#define NATIVE_NV_READ_PSHTP_REG() \ +({ \ + e2k_pshtp_t PSHTP_reg; \ + PSHTP_reg.word = NATIVE_NV_READ_PSHTP_REG_VALUE(); \ + PSHTP_reg; \ +}) +#define READ_PSHTP_REG() \ +({ \ + e2k_pshtp_t PSHTP_reg; \ + PSHTP_reg.word = READ_PSHTP_REG_VALUE(); \ + PSHTP_reg; \ +}) + +#define NATIVE_WRITE_PSHTP_REG(PSHTP_reg) \ +({ \ + NATIVE_WRITE_PSHTP_REG_VALUE(AS_WORD(PSHTP_reg)); \ +}) +#define NATIVE_STRIP_PSHTP_WINDOW() NATIVE_WRITE_PSHTP_REG_VALUE(0) +#define WRITE_PSHTP_REG(PSHTP_reg) \ +({ \ + WRITE_PSHTP_REG_VALUE(AS_WORD(PSHTP_reg)); \ +}) +#define STRIP_PSHTP_WINDOW() WRITE_PSHTP_REG_VALUE(0) + +/* + * Read double-word Window Descriptor Register (WD) to the structure + * Register fields access: fff = WD -> WD_xxx + * Register entire access: WD_entire = WD -> WD_reg + */ +#define NATIVE_READ_WD_REG() \ +({ \ + e2k_wd_t WD; \ + WD.WD_reg = NATIVE_READ_WD_REG_VALUE(); \ + WD; \ +}) +#define READ_WD_REG() \ +({ \ + e2k_wd_t WD; \ + WD.WD_reg = READ_WD_REG_VALUE(); \ + WD; \ +}) +static inline e2k_wd_t +native_read_WD_reg(void) +{ + return NATIVE_READ_WD_REG(); +} +static inline e2k_wd_t +read_WD_reg(void) +{ + return READ_WD_REG(); +} + +/* + * Write double-word Window Descriptor Register (WD) from the structure + * Register fields filling: WD.WD_xxx = fff; + * Register entire filling: WD.WD_reg = WD_value; + */ +#define NATIVE_WRITE_WD_REG(WD) NATIVE_WRITE_WD_REG_VALUE(WD.WD_reg) +#define WRITE_WD_REG(WD) WRITE_WD_REG_VALUE(WD.WD_reg) +static inline void +native_write_WD_reg(e2k_wd_t WD) +{ + NATIVE_WRITE_WD_REG(WD); +} +static inline void +write_WD_reg(e2k_wd_t WD) +{ + WRITE_WD_REG(WD); +} + +#ifdef NEED_PARAVIRT_LOOP_REGISTERS + +/* + * Read double-word Loop Status Register (LSR) to the structure + * Register fields access: fff = LSR -> LSR_xxx + * Register entire access: LSR_entire = LSR -> LSR_reg + */ +#define READ_LSR_REG() \ +({ \ + e2k_lsr_t LSR; \ + LSR.LSR_reg = READ_LSR_REG_VALUE(); \ + LSR; \ +}) +static inline e2k_lsr_t +read_LSR_reg(void) +{ + return READ_LSR_REG(); +} + +/* + * Write double-word Loop Status Register (LSR) from the structure + * Register fields filling: LSR.LSR_xxx = fff; + * Register entire filling: LSR.LSR_reg = LSR_value; + */ +#define WRITE_LSR_REG(LSR) WRITE_LSR_REG_VALUE(LSR.LSR_reg) +static inline void +write_LSR_reg(e2k_lsr_t LSR) +{ + WRITE_LSR_REG(LSR); +} + +/* + * Read double-word Initial Loop Counters Register (ILCR) to the structure + * Register fields access: fff = ILCR -> ILCR_xxx + * Register entire access: ILCR_entire = ILCR -> ILCR_reg + */ +#define READ_ILCR_REG() \ +({ \ + e2k_ilcr_t ILCR; \ + ILCR.ILCR_reg = READ_ILCR_REG_VALUE(); \ + ILCR; \ +}) +static inline e2k_ilcr_t +read_ILCR_reg(void) +{ + return READ_ILCR_REG(); +} + +/* + * Write double-word Initial Loop Counters Register (ILCR) from the structure + * Register fields filling: ILCR.ILCR_xxx = fff; + * Register entire filling: ILCR.ILCR_reg = ILCR_value; + */ +#define WRITE_ILCR_REG(ILCR) WRITE_ILCR_REG_VALUE(ILCR.ILCR_reg) +static inline void +write_ILCR_reg(e2k_ilcr_t ILCR) +{ + WRITE_ILCR_REG(ILCR); +} + +/* + * Write double-word LSR/ILCR registers in complex + */ +#define WRITE_LSR_LSR1_ILCR_ILCR1_REGS(lsr, lsr1, ilcr, ilcr1) \ + WRITE_LSR_ILCR_LSR1_ILCR1_REGS_VALUE(lsr.ILSR_reg, \ + lsr1.LSR1_reg, ilcr.ILCR_reg, ilcr1.ILCR1_reg) +#endif /* NEED_PARAVIRT_LOOP_REGISTERS */ + +/* + * Read/write OS register which point to current process thread info + * structure (OSR0) + */ +#define NATIVE_GET_OSR0_REG_VALUE() NATIVE_NV_READ_OSR0_REG_VALUE() +#define NATIVE_READ_CURRENT_REG_VALUE() NATIVE_NV_READ_OSR0_REG_VALUE() +#define NATIVE_READ_CURRENT_REG() \ +({ \ + struct thread_info *TI; \ + TI = (struct thread_info *)NATIVE_READ_CURRENT_REG_VALUE(); \ + TI; \ +}) +#define READ_CURRENT_REG() \ +({ \ + struct thread_info *TI; \ + TI = (struct thread_info *)READ_CURRENT_REG_VALUE(); \ + TI; \ +}) +#define BOOT_READ_CURRENT_REG() \ +({ \ + struct thread_info *TI; \ + TI = (struct thread_info *)BOOT_READ_CURRENT_REG_VALUE(); \ + TI; \ +}) + +static inline struct thread_info * +read_current_reg(void) +{ + return READ_CURRENT_REG(); +} +static inline struct thread_info * +boot_read_current_reg(void) +{ + return BOOT_READ_CURRENT_REG(); +} + +#define NATIVE_SET_OSR0_REG_VALUE(TI) \ + NATIVE_NV_WRITE_OSR0_REG_VALUE(TI) +#define NATIVE_WRITE_CURRENT_REG(TI) \ + NATIVE_SET_OSR0_REG_VALUE((unsigned long)(TI)) +#define WRITE_CURRENT_REG(TI) \ + WRITE_CURRENT_REG_VALUE((unsigned long)(TI)) +#define BOOT_WRITE_CURRENT_REG(TI) \ + BOOT_WRITE_CURRENT_REG_VALUE((unsigned long)(TI)) +static inline void +native_write_current_reg(struct thread_info *TI) +{ + NATIVE_WRITE_CURRENT_REG(TI); +} +static inline void +write_current_reg(struct thread_info *TI) +{ + WRITE_CURRENT_REG(TI); +} + +/* + * Read/write OS Entries Mask (OSEM) + */ +#define READ_OSEM_REG() READ_OSEM_REG_VALUE() +static inline unsigned int +read_OSEM_reg(void) +{ + return READ_OSEM_REG(); +} +#define WRITE_OSEM_REG(OSEM) WRITE_OSEM_REG_VALUE(OSEM) +static inline void +write_OSEM_reg(unsigned int OSEM) +{ + WRITE_OSEM_REG(OSEM); +} + +#define READ_HCEM_REG() NATIVE_GET_SREG_CLOSED(hcem) +#define WRITE_HCEM_REG(value) NATIVE_SET_SREG_CLOSED_NOEXC(hcem, (value), 5) + +#define READ_HCEB_REG() NATIVE_GET_DSREG_CLOSED(hceb) +#define WRITE_HCEB_REG(value) NATIVE_SET_DSREG_CLOSED_NOEXC(hceb, (value), 5) + +/* + * Read/write word Base Global Register (BGR) to the structure + * Register fields access: fff = BGR.xxx + * Register entire access: BGR_entire = BGR.BGR_reg + */ +#define NATIVE_READ_BGR_REG() \ +({ \ + e2k_bgr_t BGR; \ + BGR.BGR_reg = NATIVE_READ_BGR_REG_VALUE(); \ + BGR; \ +}) +#define READ_BGR_REG() \ +({ \ + e2k_bgr_t BGR; \ + BGR.BGR_reg = READ_BGR_REG_VALUE(); \ + BGR; \ +}) +static inline e2k_bgr_t +native_read_BGR_reg(void) +{ + return NATIVE_READ_BGR_REG(); +} +static inline e2k_bgr_t +read_BGR_reg(void) +{ + return READ_BGR_REG(); +} + +/* + * Write word Base Global Register (BGR) from the structure + * Register fields filling: BGR.BGR_xxx = fff + * Register entire filling: BGR.BGR_reg = BGR_value + */ + +#define NATIVE_WRITE_BGR_REG(BGR) NATIVE_WRITE_BGR_REG_VALUE(BGR.BGR_reg) +#define WRITE_BGR_REG(BGR) WRITE_BGR_REG_VALUE(BGR.BGR_reg) +#define BOOT_WRITE_BGR_REG(BGR) BOOT_WRITE_BGR_REG_VALUE(BGR.BGR_reg) + +static inline void +native_write_BGR_reg(e2k_bgr_t bgr) +{ + NATIVE_WRITE_BGR_REG(bgr); +} +static inline void +write_BGR_reg(e2k_bgr_t bgr) +{ + WRITE_BGR_REG(bgr); +} + +#define NATIVE_INIT_BGR_REG() NATIVE_WRITE_BGR_REG(E2K_INITIAL_BGR) +#define NATIVE_BOOT_INIT_BGR_REG() NATIVE_INIT_BGR_REG() +#define INIT_BGR_REG() WRITE_BGR_REG(E2K_INITIAL_BGR) +#define BOOT_INIT_BGR_REG() BOOT_WRITE_BGR_REG(E2K_INITIAL_BGR) + +static inline void +native_init_BGR_reg(void) +{ + NATIVE_INIT_BGR_REG(); +} +static inline void +init_BGR_reg(void) +{ + INIT_BGR_REG(); +} +static inline void +native_boot_init_BGR_reg(void) +{ + NATIVE_BOOT_INIT_BGR_REG(); +} + +/* + * Read CPU current clock regigister (CLKR) + */ +#define READ_CLKR_REG() READ_CLKR_REG_VALUE() + +/* + * Read/Write system clock registers (SCLKM) + */ +#define READ_SCLKR_REG() READ_SCLKR_REG_VALUE() +#define READ_SCLKM1_REG() ((e2k_sclkm1_t) READ_SCLKM1_REG_VALUE()) +#define READ_SCLKM2_REG() READ_SCLKM2_REG_VALUE() +#define READ_SCLKM3_REG() READ_SCLKM3_REG_VALUE() + +#define WRITE_SCLKR_REG(reg_value) WRITE_SCLKR_REG_VALUE(reg_value) +#define WRITE_SCLKM1_REG(reg) WRITE_SCLKM1_REG_VALUE(AW(reg)) +#define WRITE_SCLKM2_REG(reg_value) WRITE_SCLKM2_REG_VALUE(reg_value) +#define WRITE_SCLKM3_REG(reg_value) WRITE_SCLKM3_REG_VALUE(reg_value) + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +#define READ_CU_HW0_REG() READ_CU_HW0_REG_VALUE() +#define READ_CU_HW1_REG() READ_CU_HW1_REG_VALUE() + +#define WRITE_CU_HW0_REG(reg) WRITE_CU_HW0_REG_VALUE(reg) +#define WRITE_CU_HW1_REG(reg) WRITE_CU_HW1_REG_VALUE(reg) + +/* + * Read low/high double-word Recovery point register (RPR) + */ +#define NATIVE_READ_RPR_LO_REG() \ +({ \ + e2k_rpr_lo_t RPR_lo; \ + RPR_lo.RPR_lo_half = NATIVE_READ_RPR_LO_REG_VALUE(); \ + RPR_lo; \ +}) +#define NATIVE_READ_RPR_HI_REG() \ +({ \ + e2k_rpr_hi_t RPR_hi; \ + RPR_hi.RPR_hi_half = NATIVE_READ_RPR_HI_REG_VALUE(); \ + RPR_hi; \ +}) +#define NATIVE_CL_READ_RPR_LO_REG() \ +({ \ + e2k_rpr_lo_t RPR_lo; \ + RPR_lo.RPR_lo_half = NATIVE_CL_READ_RPR_LO_REG_VALUE(); \ + RPR_lo; \ +}) +#define NATIVE_CL_READ_RPR_HI_REG() \ +({ \ + e2k_rpr_hi_t RPR_hi; \ + RPR_hi.RPR_hi_half = NATIVE_CL_READ_RPR_HI_REG_VALUE(); \ + RPR_hi; \ +}) +#define READ_RPR_LO_REG() \ +({ \ + e2k_rpr_lo_t RPR_lo; \ + RPR_lo.RPR_lo_half = READ_RPR_LO_REG_VALUE(); \ + RPR_lo; \ +}) +#define READ_RPR_HI_REG() \ +({ \ + e2k_rpr_hi_t RPR_hi; \ + RPR_hi.RPR_hi_half = READ_RPR_HI_REG_VALUE(); \ + RPR_hi; \ +}) +static inline e2k_rpr_lo_t +native_read_RPR_lo_reg(void) +{ + return NATIVE_READ_RPR_LO_REG(); +} +static inline e2k_rpr_hi_t +native_read_RPR_hi_reg(void) +{ + return NATIVE_READ_RPR_HI_REG(); +} +static inline e2k_rpr_lo_t +read_RPR_lo_reg(void) +{ + return READ_RPR_LO_REG(); +} +static inline e2k_rpr_hi_t +read_RPR_hi_reg(void) +{ + return READ_RPR_HI_REG(); +} + +/* + * Write low/high double-word Recovery point register (RPR) + */ +#define NATIVE_WRITE_RPR_LO_REG(RPR_lo) \ + NATIVE_WRITE_RPR_LO_REG_VALUE(RPR_lo.RPR_lo_half) +#define NATIVE_WRITE_RPR_HI_REG(RPR_hi) \ + NATIVE_WRITE_RPR_HI_REG_VALUE(RPR_hi.RPR_hi_half) +#define WRITE_RPR_LO_REG(RPR_lo) \ + WRITE_RPR_LO_REG_VALUE(RPR_lo.RPR_lo_half) +#define WRITE_RPR_HI_REG(RPR_hi) \ + WRITE_RPR_HI_REG_VALUE(RPR_hi.RPR_hi_half) + +static inline void +native_write_RPR_lo_reg(e2k_rpr_lo_t RPR_lo) +{ + NATIVE_WRITE_RPR_LO_REG(RPR_lo); +} +static inline void +native_write_RPR_hi_reg(e2k_rpr_hi_t RPR_hi) +{ + NATIVE_WRITE_RPR_HI_REG(RPR_hi); +} +static inline void +write_RPR_lo_reg(e2k_rpr_lo_t RPR_lo) +{ + WRITE_RPR_LO_REG(RPR_lo); +} +static inline void +write_RPR_hi_reg(e2k_rpr_hi_t RPR_hi) +{ + WRITE_RPR_HI_REG(RPR_hi); +} + +/* + * Read CPU current Instruction Pointer register (IP) + */ +#define READ_IP_REG() READ_IP_REG_VALUE() +static inline unsigned long +read_ip_reg(void) +{ + return READ_IP_REG(); +} + +/* + * Read debug and monitors registers + */ +#define NATIVE_READ_DIBCR_REG() \ +({ \ + e2k_dibcr_t DIBCR; \ + AS_WORD(DIBCR) = NATIVE_READ_DIBCR_REG_VALUE(); \ + DIBCR; \ +}) +#define READ_DIBCR_REG() \ +({ \ + e2k_dibcr_t DIBCR; \ + AS_WORD(DIBCR) = READ_DIBCR_REG_VALUE(); \ + DIBCR; \ +}) +static inline e2k_dibcr_t +read_DIBCR_reg(void) +{ + return READ_DIBCR_REG(); +} + +#define NATIVE_READ_DIBSR_REG() \ +({ \ + e2k_dibsr_t DIBSR; \ + AS_WORD(DIBSR) = NATIVE_READ_DIBSR_REG_VALUE(); \ + DIBSR; \ +}) +#define READ_DIBSR_REG() \ +({ \ + e2k_dibsr_t DIBSR; \ + AS_WORD(DIBSR) = READ_DIBSR_REG_VALUE(); \ + DIBSR; \ +}) +static inline e2k_dibsr_t +read_DIBSR_reg(void) +{ + return READ_DIBSR_REG(); +} + +#define NATIVE_READ_DIMCR_REG() \ +({ \ + e2k_dimcr_t DIMCR; \ + AS_WORD(DIMCR) = NATIVE_READ_DIMCR_REG_VALUE(); \ + DIMCR; \ +}) +#define READ_DIMCR_REG() \ +({ \ + e2k_dimcr_t DIMCR; \ + AS_WORD(DIMCR) = READ_DIMCR_REG_VALUE(); \ + DIMCR; \ +}) +static inline e2k_dimcr_t +read_DIMCR_reg(void) +{ + return READ_DIMCR_REG(); +} + +#define NATIVE_READ_DIBAR0_REG() NATIVE_READ_DIBAR0_REG_VALUE() +#define NATIVE_READ_DIBAR1_REG() NATIVE_READ_DIBAR1_REG_VALUE() +#define NATIVE_READ_DIBAR2_REG() NATIVE_READ_DIBAR2_REG_VALUE() +#define NATIVE_READ_DIBAR3_REG() NATIVE_READ_DIBAR3_REG_VALUE() +#define READ_DIBAR0_REG() READ_DIBAR0_REG_VALUE() +#define READ_DIBAR1_REG() READ_DIBAR1_REG_VALUE() +#define READ_DIBAR2_REG() READ_DIBAR2_REG_VALUE() +#define READ_DIBAR3_REG() READ_DIBAR3_REG_VALUE() +static inline unsigned long +read_DIBAR0_reg(void) +{ + return READ_DIBAR0_REG(); +} +static inline unsigned long +read_DIBAR1_reg(void) +{ + return READ_DIBAR1_REG(); +} +static inline unsigned long +read_DIBAR2_reg(void) +{ + return READ_DIBAR2_REG(); +} +static inline unsigned long +read_DIBAR3_reg(void) +{ + return READ_DIBAR3_REG(); +} + +#define NATIVE_READ_DIMAR0_REG() NATIVE_READ_DIMAR0_REG_VALUE() +#define NATIVE_READ_DIMAR1_REG() NATIVE_READ_DIMAR1_REG_VALUE() +#define READ_DIMAR0_REG() READ_DIMAR0_REG_VALUE() +#define READ_DIMAR1_REG() READ_DIMAR1_REG_VALUE() +static inline unsigned long +read_DIMAR0_reg(void) +{ + return READ_DIMAR0_REG(); +} +static inline unsigned long +read_DIMAR1_reg(void) +{ + return READ_DIMAR1_REG(); +} + +#define NATIVE_WRITE_DIBCR_REG(DIBCR) \ + NATIVE_WRITE_DIBCR_REG_VALUE(DIBCR.DIBCR_reg) +#define WRITE_DIBCR_REG(DIBCR) WRITE_DIBCR_REG_VALUE(DIBCR.DIBCR_reg) +static inline void +write_DIBCR_reg(e2k_dibcr_t DIBCR) +{ + WRITE_DIBCR_REG(DIBCR); +} +#define NATIVE_WRITE_DIBSR_REG(DIBSR) \ + NATIVE_WRITE_DIBSR_REG_VALUE(DIBSR.DIBSR_reg) +#define WRITE_DIBSR_REG(DIBSR) WRITE_DIBSR_REG_VALUE(DIBSR.DIBSR_reg) +static inline void +write_DIBSR_reg(e2k_dibsr_t DIBSR) +{ + WRITE_DIBSR_REG(DIBSR); +} +#define NATIVE_WRITE_DIMCR_REG(DIMCR) \ + NATIVE_WRITE_DIMCR_REG_VALUE(DIMCR.DIMCR_reg) +#define WRITE_DIMCR_REG(DIMCR) WRITE_DIMCR_REG_VALUE(DIMCR.DIMCR_reg) +static inline void +write_DIMCR_reg(e2k_dimcr_t DIMCR) +{ + WRITE_DIMCR_REG(DIMCR); +} +#define NATIVE_WRITE_DIBAR0_REG(DIBAR0) NATIVE_WRITE_DIBAR0_REG_VALUE(DIBAR0) +#define NATIVE_WRITE_DIBAR1_REG(DIBAR1) NATIVE_WRITE_DIBAR1_REG_VALUE(DIBAR1) +#define NATIVE_WRITE_DIBAR2_REG(DIBAR2) NATIVE_WRITE_DIBAR2_REG_VALUE(DIBAR2) +#define NATIVE_WRITE_DIBAR3_REG(DIBAR3) NATIVE_WRITE_DIBAR3_REG_VALUE(DIBAR3) +#define WRITE_DIBAR0_REG(DIBAR0) WRITE_DIBAR0_REG_VALUE(DIBAR0) +#define WRITE_DIBAR1_REG(DIBAR1) WRITE_DIBAR1_REG_VALUE(DIBAR1) +#define WRITE_DIBAR2_REG(DIBAR2) WRITE_DIBAR2_REG_VALUE(DIBAR2) +#define WRITE_DIBAR3_REG(DIBAR3) WRITE_DIBAR3_REG_VALUE(DIBAR3) +static inline void +write_DIBAR0_reg(unsigned long DIBAR0) +{ + WRITE_DIBAR0_REG(DIBAR0); +} +static inline void +write_DIBAR1_reg(unsigned long DIBAR1) +{ + WRITE_DIBAR1_REG(DIBAR1); +} +static inline void +write_DIBAR2_reg(unsigned long DIBAR2) +{ + WRITE_DIBAR2_REG(DIBAR2); +} +static inline void +write_DIBAR3_reg(unsigned long DIBAR3) +{ + WRITE_DIBAR3_REG(DIBAR3); +} + +#define NATIVE_WRITE_DIMAR0_REG(DIMAR0) NATIVE_WRITE_DIMAR0_REG_VALUE(DIMAR0) +#define NATIVE_WRITE_DIMAR1_REG(DIMAR1) NATIVE_WRITE_DIMAR1_REG_VALUE(DIMAR1) +#define WRITE_DIMAR0_REG(DIMAR0) WRITE_DIMAR0_REG_VALUE(DIMAR0) +#define WRITE_DIMAR1_REG(DIMAR1) WRITE_DIMAR1_REG_VALUE(DIMAR1) +static inline void +write_DIMAR0_reg(unsigned long DIMAR0) +{ + WRITE_DIMAR0_REG(DIMAR0); +} +static inline void +write_DIMAR1_reg(unsigned long DIMAR1) +{ + WRITE_DIMAR1_REG(DIMAR1); +} + +/* + * Read double-word Compilation Unit Table Register (CUTD) to the structure + * Register fields access: fff = CUTD.CUTD_xxx or + * fff = CUTD->CUTD_xxx + * Register entire access: CUTD_entire = CUTD.CUTD_reg or + * CUTD_entire = CUTD->CUTD_reg + */ +#define NATIVE_NV_READ_CUTD_REG() \ +({ \ + e2k_cutd_t CUTD; \ + CUTD.CUTD_reg = NATIVE_NV_READ_CUTD_REG_VALUE(); \ + CUTD; \ +}) +#define READ_CUTD_REG() \ +({ \ + e2k_cutd_t CUTD; \ + CUTD.CUTD_reg = READ_CUTD_REG_VALUE(); \ + CUTD; \ +}) +static inline e2k_cutd_t +native_nv_read_CUTD_reg(void) +{ + return NATIVE_NV_READ_CUTD_REG(); +} +static inline e2k_cutd_t +read_CUTD_reg(void) +{ + return READ_CUTD_REG(); +} + +/* + * Write double-word Compilation Unit Table Register (CUTD) from the structure + * Register fields filling: CUTD.CUTD_xxx = fff or + * CUTD->CUTD_xxx = fff + * Register entire filling: CUTD.CUTD_reg = CUTD_value or + * CUTD->CUTD_reg = CUTD_value + */ +#define NATIVE_NV_NOIRQ_WRITE_CUTD_REG(CUTD) \ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD.CUTD_reg) +#define WRITE_CUTD_REG(CUTD) \ + WRITE_CUTD_REG_VALUE(CUTD.CUTD_reg) +static inline void +native_nv_noirq_write_CUTD_reg(e2k_cutd_t CUTD) +{ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG(CUTD); +} +static inline void +write_CUTD_reg(e2k_cutd_t CUTD) +{ + WRITE_CUTD_REG(CUTD); +} + +/* + * Read word Compilation Unit Index Register (CUIR) to the structure + * Register fields access: fff = CUIR.CUIR_xxx or + * fff = CUIR->CUIR_xxx + * Register entire access: CUIR_entire = CUIR.CUIR_reg or + * CUIR_entire = CUIR->CUIR_reg + */ +#define READ_CUIR_REG() \ +({ \ + e2k_cuir_t CUIR; \ + CUIR.CUIR_reg = READ_CUIR_REG_VALUE(); \ + CUIR; \ +}) +static inline e2k_cuir_t +read_CUIR_reg(void) +{ + return READ_CUIR_REG(); +} + +/* + * Read Core Mode Register (CORE_MODE) to the structure + * Register fields access: fff = AS_STRACT(CORE_MODE).xxx + * Register entire access: reg_entire = AS_WORD(CORE_MODE) + */ +#define NATIVE_READ_CORE_MODE_REG() \ +({ \ + e2k_core_mode_t CORE_MODE; \ + CORE_MODE.CORE_MODE_reg = NATIVE_READ_CORE_MODE_REG_VALUE(); \ + CORE_MODE; \ +}) +#define READ_CORE_MODE_REG() \ +({ \ + e2k_core_mode_t CORE_MODE; \ + CORE_MODE.CORE_MODE_reg = READ_CORE_MODE_REG_VALUE(); \ + CORE_MODE; \ +}) +#define BOOT_READ_CORE_MODE_REG() \ +({ \ + e2k_core_mode_t CORE_MODE; \ + CORE_MODE.CORE_MODE_reg = BOOT_READ_CORE_MODE_REG_VALUE(); \ + CORE_MODE; \ +}) + +/* + * Write Core Mode Register (CORE_MODE) from the structure + * Register fields filling: AS_STRACT(CORE_MODE).xxx = fff + * Register entire filling: AS_WORD(CORE_MODE) = CORE_MODE_value + */ +#define NATIVE_WRITE_CORE_MODE_REG(CORE_MODE) \ + NATIVE_WRITE_CORE_MODE_REG_VALUE(CORE_MODE.CORE_MODE_reg) +#define BOOT_NATIVE_WRITE_CORE_MODE_REG(CORE_MODE) \ + BOOT_NATIVE_WRITE_CORE_MODE_REG_VALUE(CORE_MODE.CORE_MODE_reg) +#define WRITE_CORE_MODE_REG(CORE_MODE) \ + WRITE_CORE_MODE_REG_VALUE(CORE_MODE.CORE_MODE_reg) +#define BOOT_WRITE_CORE_MODE_REG(CORE_MODE) \ + BOOT_WRITE_CORE_MODE_REG_VALUE(CORE_MODE.CORE_MODE_reg) + +/* + * Read word Processor State Register (PSR) to the structure + * Register fields access: fff = AS_STRACT(PSR).xxx + * Register entire access: PSR_entire = AS_WORD(PSR) + */ +#define BOOT_NATIVE_NV_READ_PSR_REG_VALUE() \ + NATIVE_NV_READ_PSR_REG_VALUE() +#define NATIVE_NV_READ_PSR_REG() \ +({ \ + e2k_psr_t PSR; \ + PSR.PSR_reg = NATIVE_NV_READ_PSR_REG_VALUE(); \ + PSR; \ +}) +#define READ_PSR_REG() \ +({ \ + e2k_psr_t PSR; \ + PSR.PSR_reg = READ_PSR_REG_VALUE(); \ + PSR; \ +}) +#define BOOT_READ_PSR_REG() \ +({ \ + e2k_psr_t PSR; \ + PSR.PSR_reg = BOOT_READ_PSR_REG_VALUE(); \ + PSR; \ +}) + +static inline e2k_psr_t +read_PSR_reg(void) +{ + return READ_PSR_REG(); +} + +/* + * Write word Processor State Register (PSR) from the structure + * Register fields filling: AS_STRACT(PSR).xxx = fff + * Register entire filling: AS_WORD(PSR) = PSR_value + */ +#define BOOT_NATIVE_WRITE_PSR_REG_VALUE(PSR_value) \ + NATIVE_WRITE_PSR_REG_VALUE(PSR_value) +#define NATIVE_WRITE_PSR_REG(PSR) NATIVE_WRITE_PSR_REG_VALUE(PSR.PSR_reg) +#define BOOT_NATIVE_WRITE_PSR_REG(PSR) \ + BOOT_NATIVE_WRITE_PSR_REG_VALUE(PSR.PSR_reg) +#define WRITE_PSR_REG(PSR) WRITE_PSR_REG_VALUE(PSR.PSR_reg) +#define BOOT_WRITE_PSR_REG(PSR) BOOT_WRITE_PSR_REG_VALUE(PSR.PSR_reg) + +static inline void +write_PSR_reg(e2k_psr_t PSR) +{ + WRITE_PSR_REG(PSR); +} + +/* + * Read word User Processor State Register (UPSR) to the structure + * Register fields access: fff = AS_STRACT(UPSR).xxx + * Register entire access: UPSR_entire = AS_WORD(UPSR) + */ +#define BOOT_NATIVE_NV_READ_UPSR_REG_VALUE() \ + NATIVE_NV_READ_UPSR_REG_VALUE() +#define NATIVE_NV_READ_UPSR_REG() \ +({ \ + e2k_upsr_t UPSR; \ + UPSR.UPSR_reg = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + UPSR; \ +}) +#define READ_UPSR_REG() \ +({ \ + e2k_upsr_t UPSR; \ + UPSR.UPSR_reg = READ_UPSR_REG_VALUE(); \ + UPSR; \ +}) + +static inline e2k_upsr_t +read_UPSR_reg(void) +{ + return READ_UPSR_REG(); +} + +/* + * Write word User Processor State Register (UPSR) from the structure + * Register fields filling: AS_STRACT(UPSR).xxx = fff + * Register entire filling: AS_WORD(UPSR) = UPSR_value + */ +#define BOOT_NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value) \ + NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value) +#define NATIVE_WRITE_UPSR_REG(UPSR) \ + NATIVE_WRITE_UPSR_REG_VALUE(UPSR.UPSR_reg) +#define BOOT_NATIVE_WRITE_UPSR_REG(UPSR) \ + NATIVE_WRITE_UPSR_REG(UPSR) +#define WRITE_UPSR_REG(UPSR) \ + WRITE_UPSR_REG_VALUE(UPSR.UPSR_reg) +#define BOOT_WRITE_UPSR_REG(UPSR) \ + BOOT_WRITE_UPSR_REG_VALUE(UPSR.UPSR_reg) + +static inline void +native_write_UPSR_reg(e2k_upsr_t UPSR) +{ + NATIVE_WRITE_UPSR_REG(UPSR); +} +static inline void +boot_native_write_UPSR_reg(e2k_upsr_t UPSR) +{ + BOOT_NATIVE_WRITE_UPSR_REG(UPSR); +} +static inline void +write_UPSR_reg(e2k_upsr_t UPSR) +{ + WRITE_UPSR_REG(UPSR); +} + +/* + * Read word floating point control registers (PFPFR/FPCR/FPSR) to the structure + * Register fields access: fff = PFnnn.yyy + * Register entire access: PFnnn_entire = PFnnn_value + */ +#define NATIVE_NV_READ_PFPFR_REG() \ +({ \ + e2k_pfpfr_t PFPFR; \ + PFPFR.PFPFR_reg = NATIVE_NV_READ_PFPFR_REG_VALUE(); \ + PFPFR; \ +}) +#define NATIVE_NV_READ_FPCR_REG() \ +({ \ + e2k_fpcr_t FPCR; \ + FPCR.FPCR_reg = NATIVE_NV_READ_FPCR_REG_VALUE(); \ + FPCR; \ +}) +#define NATIVE_NV_READ_FPSR_REG() \ +({ \ + e2k_fpsr_t FPSR; \ + FPSR.FPSR_reg = NATIVE_NV_READ_FPSR_REG_VALUE(); \ + FPSR; \ +}) +#define READ_PFPFR_REG() \ +({ \ + e2k_pfpfr_t PFPFR; \ + PFPFR.PFPFR_reg = READ_PFPFR_REG_VALUE(); \ + PFPFR; \ +}) +#define READ_FPCR_REG() \ +({ \ + e2k_fpcr_t FPCR; \ + FPCR.FPCR_reg = READ_FPCR_REG_VALUE(); \ + FPCR; \ +}) +#define READ_FPSR_REG() \ +({ \ + e2k_fpsr_t FPSR; \ + FPSR.FPSR_reg = READ_FPSR_REG_VALUE(); \ + FPSR; \ +}) +static inline e2k_pfpfr_t +native_nv_read_PFPFR_reg(void) +{ + return NATIVE_NV_READ_PFPFR_REG(); +} +static inline e2k_fpcr_t +native_nv_read_FPCR_reg(void) +{ + return NATIVE_NV_READ_FPCR_REG(); +} +static inline e2k_fpsr_t +native_nv_read_FPSR_reg(void) +{ + return NATIVE_NV_READ_FPSR_REG(); +} +static inline e2k_pfpfr_t +read_PFPFR_reg(void) +{ + return READ_PFPFR_REG(); +} +static inline e2k_fpcr_t +read_FPCR_reg(void) +{ + return READ_FPCR_REG(); +} +static inline e2k_fpsr_t +read_FPSR_reg(void) +{ + return READ_FPSR_REG(); +} + +/* + * Write word floating point control registers (PFPFR/FPCR/FPSR) + * from the structure + * Register fields filling: PFnnn.xxx = fff + * Register entire filling: PFnnn = PFnnn_value + */ +#define NATIVE_NV_WRITE_PFPFR_REG(PFPFR) \ + NATIVE_NV_WRITE_PFPFR_REG_VALUE(PFPFR.PFPFR_reg) +#define NATIVE_NV_WRITE_FPCR_REG(FPCR) \ + NATIVE_NV_WRITE_FPCR_REG_VALUE(FPCR.FPCR_reg) +#define NATIVE_NV_WRITE_FPSR_REG(FPSR) \ + NATIVE_NV_WRITE_FPSR_REG_VALUE(FPSR.FPSR_reg) +#define WRITE_PFPFR_REG(PFPFR) \ + WRITE_PFPFR_REG_VALUE(PFPFR.PFPFR_reg) +#define WRITE_FPCR_REG(FPCR) WRITE_FPCR_REG_VALUE(FPCR.FPCR_reg) +#define WRITE_FPSR_REG(FPSR) WRITE_FPSR_REG_VALUE(FPSR.FPSR_reg) +static inline void +native_nv_write_PFPFR_reg(e2k_pfpfr_t PFPFR) +{ + NATIVE_NV_WRITE_PFPFR_REG(PFPFR); +} +static inline void +native_nv_write_FPCR_reg(e2k_fpcr_t FPCR) +{ + NATIVE_NV_WRITE_FPCR_REG(FPCR); +} +static inline void +native_nv_write_FPSR_reg(e2k_fpsr_t FPSR) +{ + NATIVE_NV_WRITE_FPSR_REG(FPSR); +} +static inline void +write_PFPFR_reg(e2k_pfpfr_t PFPFR) +{ + WRITE_PFPFR_REG(PFPFR); +} +static inline void +write_FPCR_reg(e2k_fpcr_t FPCR) +{ + WRITE_FPCR_REG(FPCR); +} +static inline void +write_FPSR_reg(e2k_fpsr_t FPSR) +{ + WRITE_FPSR_REG(FPSR); +} + +/* + * Read doubleword Processor Identification Register (IDR) to the structure + * Register fields access: fff = AS_STRACT(IDR).xxx or + * fff = IDR.IDR_xxx + * Register entire access: IDR_entire = AS_WORD(IDR) or + * IDR_entire = IDR.IDR_reg + */ +#define NATIVE_READ_IDR_REG() \ +({ \ + e2k_idr_t IDR; \ + AS_WORD(IDR) = NATIVE_READ_IDR_REG_VALUE(); \ + IDR; \ +}) +#define BOOT_NATIVE_READ_IDR_REG() NATIVE_READ_IDR_REG() +static inline e2k_idr_t +native_read_IDR_reg(void) +{ + return NATIVE_READ_IDR_REG(); +} +static inline e2k_idr_t +boot_native_read_IDR_reg(void) +{ + return BOOT_NATIVE_READ_IDR_REG(); +} +#define READ_IDR_REG() \ +({ \ + e2k_idr_t IDR; \ + AS_WORD(IDR) = READ_IDR_REG_VALUE(); \ + IDR; \ +}) +#define BOOT_READ_IDR_REG() \ +({ \ + e2k_idr_t IDR; \ + AS_WORD(IDR) = BOOT_READ_IDR_REG_VALUE(); \ + IDR; \ +}) +static inline e2k_idr_t +read_IDR_reg(void) +{ + return READ_IDR_REG(); +} +static inline e2k_idr_t +boot_read_IDR_reg(void) +{ + return BOOT_READ_IDR_REG(); +} + +static inline instr_cs0_t *find_cs0(void *ip) +{ + instr_hs_t *hs; + + hs = (instr_hs_t *) &E2K_GET_INSTR_HS(ip); + if (!hs->c0) + return NULL; + + return (instr_cs0_t *) (hs + hs->s + hweight32(hs->al) + 1); +} + +static inline instr_cs1_t *find_cs1(void *ip) +{ + instr_hs_t *hs; + + hs = (instr_hs_t *) &E2K_GET_INSTR_HS(ip); + if (!hs->c1) + return NULL; + + return (instr_cs1_t *) (hs + hs->mdl); +} + +static inline int get_instr_size_by_vaddr(unsigned long addr) +{ + int instr_size; + instr_syl_t *syl; + instr_hs_t hs; + + syl = &E2K_GET_INSTR_HS((e2k_addr_t)addr); + hs.word = *syl; + instr_size = E2K_GET_INSTR_SIZE(hs); + + return instr_size; +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_CPU_REGS_H_ */ diff --git a/arch/e2k/include/asm/cpu_regs_access.h b/arch/e2k/include/asm/cpu_regs_access.h new file mode 100644 index 0000000..81e49e1 --- /dev/null +++ b/arch/e2k/include/asm/cpu_regs_access.h @@ -0,0 +1,549 @@ + +#ifndef _E2K_CPU_REGS_ACCESS_H_ +#define _E2K_CPU_REGS_ACCESS_H_ + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +#if CONFIG_CPU_ISET >= 3 +# define native_read_CORE_MODE_reg_value() \ + NATIVE_READ_CORE_MODE_REG_VALUE() +# define native_write_CORE_MODE_reg_value(modes) \ + NATIVE_WRITE_CORE_MODE_REG_VALUE((modes)); +#else +# define native_read_CORE_MODE_reg_value() \ + (machine.rrd(E2K_REG_CORE_MODE)) +# define native_write_CORE_MODE_reg_value(modes) \ + (machine.rwd(E2K_REG_CORE_MODE, modes)) +#endif +#define native_read_OSCUTD_reg_value() \ + (machine.rrd(E2K_REG_OSCUTD)) +#define native_write_OSCUTD_reg_value(modes) \ + (machine.rwd(E2K_REG_OSCUTD, modes)) +#define native_read_OSCUIR_reg_value() \ + (machine.rrd(E2K_REG_OSCUIR)) +#define native_write_OSCUIR_reg_value() \ + (machine.rwd(E2K_REG_OSCUIR)) + +#define boot_native_read_CORE_MODE_reg_value() \ +({ \ + typeof(boot_machine.boot_rrd) func; \ + func = boot_native_vp_to_pp(boot_machine.boot_rrd); \ + func(E2K_REG_CORE_MODE); \ +}) +#define boot_native_write_CORE_MODE_reg_value(modes) \ +({ \ + typeof(boot_machine.boot_rwd) func; \ + func = boot_native_vp_to_pp(boot_machine.boot_rwd); \ + func(E2K_REG_CORE_MODE, modes); \ +}) +#define boot_native_read_OSCUTD_reg_value() \ +({ \ + typeof(boot_machine.boot_rrd) func; \ + func = boot_native_vp_to_pp(boot_machine.boot_rrd); \ + func(E2K_REG_OSCUTD); \ +}) +#define boot_native_write_OSCUTD_reg_value(v) \ +({ \ + typeof(boot_machine.boot_rwd) func; \ + func = boot_native_vp_to_pp(boot_machine.boot_rwd); \ + func(E2K_REG_OSCUTD, (v)); \ +}) +#define boot_native_read_OSCUIR_reg_value() \ +({ \ + typeof(boot_machine.boot_rrd) func; \ + func = boot_native_vp_to_pp(boot_machine.boot_rrd); \ + func(E2K_REG_OSCUIR); \ +}) +#define boot_native_write_OSCUIR_reg_value(v) \ +({ \ + typeof(boot_machine.boot_rwd) func; \ + func = boot_native_vp_to_pp(boot_machine.boot_rwd); \ + func(E2K_REG_OSCUIR, v); \ +}) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* native kernel */ +/* it is native kernel without any virtualization */ +/* or host kernel with virtualization support */ + +/* + * Set flags of updated VCPU registers + */ +#define PUT_UPDATED_CPU_REGS_FLAGS(flags) + +/* + * Read/write word Procedure Stack Harware Top Pointer (PSHTP) + */ +#define READ_PSHTP_REG_VALUE() NATIVE_NV_READ_PSHTP_REG_VALUE() + +#define WRITE_PSHTP_REG_VALUE(PSHTP_value) \ + NATIVE_WRITE_PSHTP_REG_VALUE(PSHTP_value) + +/* + * Read/write word Procedure Chain Stack Harware Top Pointer (PCSHTP) + */ +#define READ_PCSHTP_REG_SVALUE() NATIVE_READ_PCSHTP_REG_SVALUE() + +#define WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) \ + NATIVE_WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) + +/* + * Read/write low/high double-word OS Compilation Unit Descriptor (OSCUD) + */ + +#define READ_OSCUD_LO_REG_VALUE() NATIVE_READ_OSCUD_LO_REG_VALUE() +#define READ_OSCUD_HI_REG_VALUE() NATIVE_READ_OSCUD_HI_REG_VALUE() +#define BOOT_READ_OSCUD_LO_REG_VALUE() NATIVE_READ_OSCUD_LO_REG_VALUE() +#define BOOT_READ_OSCUD_HI_REG_VALUE() NATIVE_READ_OSCUD_HI_REG_VALUE() + +#define WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) +#define WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) +#define BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) +#define BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) + +/* + * Read/write low/hgh double-word OS Globals Register (OSGD) + */ + +#define READ_OSGD_LO_REG_VALUE() NATIVE_READ_OSGD_LO_REG_VALUE() +#define READ_OSGD_HI_REG_VALUE() NATIVE_READ_OSGD_HI_REG_VALUE() +#define BOOT_READ_OSGD_LO_REG_VALUE() NATIVE_READ_OSGD_LO_REG_VALUE() +#define BOOT_READ_OSGD_HI_REG_VALUE() NATIVE_READ_OSGD_HI_REG_VALUE() + +#define WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) +#define WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) +#define BOOT_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) +#define BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) + +/* + * Read/write low/high double-word Compilation Unit Register (CUD) + */ + +#define READ_CUD_LO_REG_VALUE() NATIVE_READ_CUD_LO_REG_VALUE() +#define READ_CUD_HI_REG_VALUE() NATIVE_READ_CUD_HI_REG_VALUE() +#define BOOT_READ_CUD_LO_REG_VALUE() NATIVE_READ_CUD_LO_REG_VALUE() +#define BOOT_READ_CUD_HI_REG_VALUE() NATIVE_READ_CUD_HI_REG_VALUE() + +#define WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) +#define WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) +#define BOOT_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) +#define BOOT_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) + +/* + * Read/write low/high double-word Globals Register (GD) + */ + +#define READ_GD_LO_REG_VALUE() NATIVE_READ_GD_LO_REG_VALUE() +#define READ_GD_HI_REG_VALUE() NATIVE_READ_GD_HI_REG_VALUE() +#define BOOT_READ_GD_LO_REG_VALUE() NATIVE_READ_GD_LO_REG_VALUE() +#define BOOT_READ_GD_HI_REG_VALUE() NATIVE_READ_GD_HI_REG_VALUE() + +#define WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value) +#define WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value) +#define BOOT_WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value) +#define BOOT_WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value) + +/* + * Read/write low/high quad-word Procedure Stack Pointer Register (PSP) + */ + +#define READ_PSP_LO_REG_VALUE() NATIVE_NV_READ_PSP_LO_REG_VALUE() +#define READ_PSP_HI_REG_VALUE() NATIVE_NV_READ_PSP_HI_REG_VALUE() +#define BOOT_READ_PSP_LO_REG_VALUE() NATIVE_NV_READ_PSP_LO_REG_VALUE() +#define BOOT_READ_PSP_HI_REG_VALUE() NATIVE_NV_READ_PSP_HI_REG_VALUE() + +#define WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + NATIVE_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) +#define BOOT_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + NATIVE_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define BOOT_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) + +/* + * Read/write low/high quad-word Procedure Chain Stack Pointer Register (PCSP) + */ +#define READ_PCSP_LO_REG_VALUE() NATIVE_NV_READ_PCSP_LO_REG_VALUE() +#define READ_PCSP_HI_REG_VALUE() NATIVE_NV_READ_PCSP_HI_REG_VALUE() +#define BOOT_READ_PCSP_LO_REG_VALUE() NATIVE_NV_READ_PCSP_LO_REG_VALUE() +#define BOOT_READ_PCSP_HI_REG_VALUE() NATIVE_NV_READ_PCSP_HI_REG_VALUE() + +#define WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) +#define BOOT_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define BOOT_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) + +/* + * Read/write low/high quad-word Current Chain Register (CR0/CR1) + */ +#define READ_CR0_LO_REG_VALUE() NATIVE_NV_READ_CR0_LO_REG_VALUE() +#define READ_CR0_HI_REG_VALUE() NATIVE_NV_READ_CR0_HI_REG_VALUE() +#define READ_CR1_LO_REG_VALUE() NATIVE_NV_READ_CR1_LO_REG_VALUE() +#define READ_CR1_HI_REG_VALUE() NATIVE_NV_READ_CR1_HI_REG_VALUE() + +#define WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) +#define WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) +#define WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) +#define WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) + +/* + * Read/write double-word Control Transfer Preparation Registers + * (CTPR1/CTPR2/CTPR3) + */ +#define READ_CTPR_REG_VALUE(reg_no) NATIVE_NV_READ_CTPR_REG_VALUE(reg_no) + +#define WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) \ + NATIVE_WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) + +/* + * Read/write low/high double-word Non-Protected User Stack Descriptor + * Register (USD) + */ +#define READ_USD_LO_REG_VALUE() NATIVE_NV_READ_USD_LO_REG_VALUE() +#define READ_USD_HI_REG_VALUE() NATIVE_NV_READ_USD_HI_REG_VALUE() +#define BOOT_READ_USD_LO_REG_VALUE() NATIVE_NV_READ_USD_LO_REG_VALUE() +#define BOOT_READ_USD_HI_REG_VALUE() NATIVE_NV_READ_USD_HI_REG_VALUE() + +#define WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(USD_hi_value) +#define BOOT_WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define BOOT_WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(USD_hi_value) + +/* + * Read/write low/high double-word Protected User Stack Descriptor + * Register (PUSD) + */ +#define READ_PUSD_LO_REG_VALUE() NATIVE_NV_READ_PUSD_LO_REG_VALUE() +#define READ_PUSD_HI_REG_VALUE() NATIVE_NV_READ_PUSD_HI_REG_VALUE() + +#define WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) \ + NATIVE_NV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) +#define WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) \ + NATIVE_NV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) + +/* + * Read/write double-word User Stacks Base Register (USBR) + */ +#define READ_USBR_REG_VALUE() NATIVE_NV_READ_USBR_REG_VALUE() +#define READ_SBR_REG_VALUE() NATIVE_NV_READ_SBR_REG_VALUE() +#define BOOT_READ_USBR_REG_VALUE() NATIVE_NV_READ_USBR_REG_VALUE() +#define BOOT_READ_SBR_REG_VALUE() NATIVE_NV_READ_SBR_REG_VALUE() + +#define WRITE_USBR_REG_VALUE(USBR_value) \ + NATIVE_NV_WRITE_USBR_REG_VALUE(USBR_value) +#define WRITE_SBR_REG_VALUE(SBR_value) \ + NATIVE_NV_WRITE_SBR_REG_VALUE(SBR_value) +#define BOOT_WRITE_USBR_REG_VALUE(USBR_value) \ + NATIVE_NV_WRITE_USBR_REG_VALUE(USBR_value) +#define BOOT_WRITE_SBR_REG_VALUE(SBR_value) \ + NATIVE_NV_WRITE_SBR_REG_VALUE(SBR_value) + +/* + * Read/write double-word Window Descriptor Register (WD) + */ +#define READ_WD_REG_VALUE() NATIVE_READ_WD_REG_VALUE() + +#define WRITE_WD_REG_VALUE(WD_value) \ + NATIVE_WRITE_WD_REG_VALUE(WD_value) + +#ifdef NEED_PARAVIRT_LOOP_REGISTERS +/* + * Read/write double-word Loop Status Register (LSR) + */ +#define READ_LSR_REG_VALUE() NATIVE_READ_LSR_REG_VALUE() + +#define WRITE_LSR_REG_VALUE(LSR_value) \ + NATIVE_WRITE_LSR_REG_VALUE(LSR_value) + +/* + * Read/write double-word Initial Loop Counters Register (ILCR) + */ +#define READ_ILCR_REG_VALUE() NATIVE_READ_ILCR_REG_VALUE() + +#define WRITE_ILCR_REG_VALUE(ILCR_value) \ + NATIVE_WRITE_ILCR_REG_VALUE(ILCR_value) + +/* + * Write double-word LSR/ILCR registers in complex + */ +#define WRITE_LSR_LSR1_ILCR_ILCR1_REGS_VALUE(lsr, lsr1, ilcr, ilcr1) \ + NATIVE_WRITE_LSR_LSR1_ILCR_ILCR1_REGS_VALUE(lsr, lsr1, \ + ilcr, ilcr1) +#endif /* NEED_PARAVIRT_LOOP_REGISTERS */ + +/* + * Read/write OS register which point to current process thread info + * structure (OSR0) + */ +#define READ_CURRENT_REG_VALUE() NATIVE_NV_READ_OSR0_REG_VALUE() +#define BOOT_READ_CURRENT_REG_VALUE() NATIVE_NV_READ_OSR0_REG_VALUE() + +#define WRITE_CURRENT_REG_VALUE(osr0_value) \ + NATIVE_NV_WRITE_OSR0_REG_VALUE(osr0_value) +#define BOOT_WRITE_CURRENT_REG_VALUE(osr0_value) \ + NATIVE_NV_WRITE_OSR0_REG_VALUE(osr0_value) + +/* + * Read/write OS Entries Mask (OSEM) + */ +#define READ_OSEM_REG_VALUE() NATIVE_READ_OSEM_REG_VALUE() + +#define WRITE_OSEM_REG_VALUE(osem_value) \ + NATIVE_WRITE_OSEM_REG_VALUE(osem_value) + +/* + * Read/write word Base Global Register (BGR) + */ +#define READ_BGR_REG_VALUE() NATIVE_READ_BGR_REG_VALUE() +#define BOOT_READ_BGR_REG_VALUE() NATIVE_READ_BGR_REG_VALUE() + +#define WRITE_BGR_REG_VALUE(BGR_value) \ + NATIVE_WRITE_BGR_REG_VALUE(BGR_value) +#define BOOT_WRITE_BGR_REG_VALUE(BGR_value) \ + NATIVE_WRITE_BGR_REG_VALUE(BGR_value) + +/* + * Read CPU current clock regigister (CLKR) + */ +#define READ_CLKR_REG_VALUE() NATIVE_READ_CLKR_REG_VALUE() + +/* + * Read/Write system clock registers (SCLKM) + */ +#define READ_SCLKR_REG_VALUE() NATIVE_READ_SCLKR_REG_VALUE() +#define READ_SCLKM1_REG_VALUE() NATIVE_READ_SCLKM1_REG_VALUE() +#define READ_SCLKM2_REG_VALUE() NATIVE_READ_SCLKM2_REG_VALUE() +#define READ_SCLKM3_REG_VALUE() NATIVE_READ_SCLKM3_REG_VALUE() + +#define WRITE_SCLKR_REG_VALUE(reg_value) \ + NATIVE_WRITE_SCLKR_REG_VALUE(reg_value) +#define WRITE_SCLKM1_REG_VALUE(reg_value) \ + NATIVE_WRITE_SCLKM1_REG_VALUE(reg_value) +#define WRITE_SCLKM2_REG_VALUE(reg_value) \ + NATIVE_WRITE_SCLKM2_REG_VALUE(reg_value) +#define WRITE_SCLKM3_REG_VALUE(reg_value) \ + NATIVE_WRITE_SCLKM3_REG_VALUE(reg_value) + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +#define READ_CU_HW0_REG_VALUE() NATIVE_READ_CU_HW0_REG_VALUE() +#define READ_CU_HW1_REG_VALUE() NATIVE_READ_CU_HW1_REG_VALUE() + +#define WRITE_CU_HW0_REG_VALUE(reg) NATIVE_WRITE_CU_HW0_REG_VALUE(reg) +#define WRITE_CU_HW1_REG_VALUE(reg) NATIVE_WRITE_CU_HW1_REG_VALUE(reg) + +/* + * Read/write low/high double-word Recovery point register (RPR) + */ +#define READ_RPR_LO_REG_VALUE() NATIVE_READ_RPR_LO_REG_VALUE() +#define READ_RPR_HI_REG_VALUE() NATIVE_READ_RPR_HI_REG_VALUE() +#define READ_SBBP_REG_VALUE() NATIVE_READ_SBBP_REG_VALUE() + +#define WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \ + NATIVE_WRITE_RPR_LO_REG_VALUE(RPR_lo_value) +#define WRITE_RPR_HI_REG_VALUE(RPR_hi_value) \ + NATIVE_WRITE_RPR_HI_REG_VALUE(RPR_hi_value) + +/* + * Read double-word CPU current Instruction Pointer register (IP) + */ +#define READ_IP_REG_VALUE() NATIVE_NV_READ_IP_REG_VALUE() + +/* + * Read debug and monitors regigisters + */ +#define READ_DIBCR_REG_VALUE() NATIVE_READ_DIBCR_REG_VALUE() +#define READ_DIBSR_REG_VALUE() NATIVE_READ_DIBSR_REG_VALUE() +#define READ_DIMCR_REG_VALUE() NATIVE_READ_DIMCR_REG_VALUE() +#define READ_DIBAR0_REG_VALUE() NATIVE_READ_DIBAR0_REG_VALUE() +#define READ_DIBAR1_REG_VALUE() NATIVE_READ_DIBAR1_REG_VALUE() +#define READ_DIBAR2_REG_VALUE() NATIVE_READ_DIBAR2_REG_VALUE() +#define READ_DIBAR3_REG_VALUE() NATIVE_READ_DIBAR3_REG_VALUE() +#define READ_DIMAR0_REG_VALUE() NATIVE_READ_DIMAR0_REG_VALUE() +#define READ_DIMAR1_REG_VALUE() NATIVE_READ_DIMAR1_REG_VALUE() + +#define WRITE_DIBCR_REG_VALUE(DIBCR_value) \ + NATIVE_WRITE_DIBCR_REG_VALUE(DIBCR_value) +#define WRITE_DIBSR_REG_VALUE(DIBSR_value) \ + NATIVE_WRITE_DIBSR_REG_VALUE(DIBSR_value) +#define WRITE_DIMCR_REG_VALUE(DIMCR_value) \ + NATIVE_WRITE_DIMCR_REG_VALUE(DIMCR_value) +#define WRITE_DIBAR0_REG_VALUE(DIBAR0_value) \ + NATIVE_WRITE_DIBAR0_REG_VALUE(DIBAR0_value) +#define WRITE_DIBAR1_REG_VALUE(DIBAR1_value) \ + NATIVE_WRITE_DIBAR1_REG_VALUE(DIBAR1_value) +#define WRITE_DIBAR2_REG_VALUE(DIBAR2_value) \ + NATIVE_WRITE_DIBAR2_REG_VALUE(DIBAR2_value) +#define WRITE_DIBAR3_REG_VALUE(DIBAR3_value) \ + NATIVE_WRITE_DIBAR3_REG_VALUE(DIBAR3_value) +#define WRITE_DIMAR0_REG_VALUE(DIMAR0_value) \ + NATIVE_WRITE_DIMAR0_REG_VALUE(DIMAR0_value) +#define WRITE_DIMAR1_REG_VALUE(DIMAR1_value) \ + NATIVE_WRITE_DIMAR1_REG_VALUE(DIMAR1_value) + +/* + * Read/write double-word Compilation Unit Table Register (CUTD) + */ +#define READ_CUTD_REG_VALUE() NATIVE_NV_READ_CUTD_REG_VALUE() + +#define WRITE_CUTD_REG_VALUE(CUTD_value) \ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD_value) + +/* + * Read word Compilation Unit Index Register (CUIR) + */ +#define READ_CUIR_REG_VALUE() NATIVE_READ_CUIR_REG_VALUE() + +/* + * Read/write word Processor State Register (PSR) + */ +#define READ_PSR_REG_VALUE() NATIVE_NV_READ_PSR_REG_VALUE() +#define BOOT_READ_PSR_REG_VALUE() NATIVE_NV_READ_PSR_REG_VALUE() + +#define WRITE_PSR_REG_VALUE(PSR_value) \ + NATIVE_WRITE_PSR_REG_VALUE(PSR_value) +#define BOOT_WRITE_PSR_REG_VALUE(PSR_value) \ + NATIVE_WRITE_PSR_REG_VALUE(PSR_value) +#define WRITE_PSR_IRQ_BARRIER(PSR_value) \ + NATIVE_WRITE_PSR_IRQ_BARRIER(PSR_value) + +/* + * Read/write word User Processor State Register (UPSR) + */ +#define READ_UPSR_REG_VALUE() NATIVE_NV_READ_UPSR_REG_VALUE() +#define BOOT_READ_UPSR_REG_VALUE() NATIVE_NV_READ_UPSR_REG_VALUE() + +#define WRITE_UPSR_REG_VALUE(UPSR_value) \ + NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value) +#define BOOT_WRITE_UPSR_REG_VALUE(UPSR_value) \ + NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value) +#define WRITE_UPSR_IRQ_BARRIER(UPSR_value) \ + NATIVE_WRITE_UPSR_IRQ_BARRIER(UPSR_value) + +/* + * Read/write word floating point control registers (PFPFR/FPCR/FPSR) + */ +#define READ_PFPFR_REG_VALUE() NATIVE_NV_READ_PFPFR_REG_VALUE() +#define READ_FPCR_REG_VALUE() NATIVE_NV_READ_FPCR_REG_VALUE() +#define READ_FPSR_REG_VALUE() NATIVE_NV_READ_FPSR_REG_VALUE() + +#define WRITE_PFPFR_REG_VALUE(PFPFR_value) \ + NATIVE_NV_WRITE_PFPFR_REG_VALUE(PFPFR_value) +#define WRITE_FPCR_REG_VALUE(FPCR_value) \ + NATIVE_NV_WRITE_FPCR_REG_VALUE(FPCR_value) +#define WRITE_FPSR_REG_VALUE(FPSR_value) \ + NATIVE_NV_WRITE_FPSR_REG_VALUE(FPSR_value) + +/* + * Read/write low/high double-word Intel segments registers (xS) + */ + +#define READ_CS_LO_REG_VALUE() NATIVE_READ_CS_LO_REG_VALUE() +#define READ_CS_HI_REG_VALUE() NATIVE_READ_CS_HI_REG_VALUE() +#define READ_DS_LO_REG_VALUE() NATIVE_READ_DS_LO_REG_VALUE() +#define READ_DS_HI_REG_VALUE() NATIVE_READ_DS_HI_REG_VALUE() +#define READ_ES_LO_REG_VALUE() NATIVE_READ_ES_LO_REG_VALUE() +#define READ_ES_HI_REG_VALUE() NATIVE_READ_ES_HI_REG_VALUE() +#define READ_FS_LO_REG_VALUE() NATIVE_READ_FS_LO_REG_VALUE() +#define READ_FS_HI_REG_VALUE() NATIVE_READ_FS_HI_REG_VALUE() +#define READ_GS_LO_REG_VALUE() NATIVE_READ_GS_LO_REG_VALUE() +#define READ_GS_HI_REG_VALUE() NATIVE_READ_GS_HI_REG_VALUE() +#define READ_SS_LO_REG_VALUE() NATIVE_READ_SS_LO_REG_VALUE() +#define READ_SS_HI_REG_VALUE() NATIVE_READ_SS_HI_REG_VALUE() + +#define WRITE_CS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_CS_LO_REG_VALUE(sd) +#define WRITE_CS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_CS_HI_REG_VALUE(sd) +#define WRITE_DS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_DS_LO_REG_VALUE(sd) +#define WRITE_DS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_DS_HI_REG_VALUE(sd) +#define WRITE_ES_LO_REG_VALUE(sd) NATIVE_CL_WRITE_ES_LO_REG_VALUE(sd) +#define WRITE_ES_HI_REG_VALUE(sd) NATIVE_CL_WRITE_ES_HI_REG_VALUE(sd) +#define WRITE_FS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_FS_LO_REG_VALUE(sd) +#define WRITE_FS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_FS_HI_REG_VALUE(sd) +#define WRITE_GS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_GS_LO_REG_VALUE(sd) +#define WRITE_GS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_GS_HI_REG_VALUE(sd) +#define WRITE_SS_LO_REG_VALUE(sd) NATIVE_CL_WRITE_SS_LO_REG_VALUE(sd) +#define WRITE_SS_HI_REG_VALUE(sd) NATIVE_CL_WRITE_SS_HI_REG_VALUE(sd) + +/* + * Read doubleword User Processor Identification Register (IDR) + */ +#define READ_IDR_REG_VALUE() NATIVE_READ_IDR_REG_VALUE() +#define BOOT_READ_IDR_REG_VALUE() NATIVE_READ_IDR_REG_VALUE() + +/* + * Processor Core Mode Register (CORE_MODE) + */ +#define READ_CORE_MODE_REG_VALUE() native_read_CORE_MODE_reg_value() +#define BOOT_READ_CORE_MODE_REG_VALUE() boot_native_read_CORE_MODE_reg_value() +#define WRITE_CORE_MODE_REG_VALUE(modes) \ + native_write_CORE_MODE_reg_value(modes) +#define BOOT_WRITE_CORE_MODE_REG_VALUE(modes) \ + boot_native_write_CORE_MODE_reg_value(modes) + +/* + * OS Compilation Unit Table Descriptor Register (OSCUTD) + */ +#define READ_OSCUTD_REG_VALUE() native_read_OSCUTD_reg_value() +#define BOOT_READ_OSCUTD_REG_VALUE() boot_native_read_OSCUTD_reg_value() +#define WRITE_OSCUTD_REG_VALUE(desc) \ + native_write_OSCUTD_reg_value(desc) +#define BOOT_WRITE_OSCUTD_REG_VALUE(desc) \ + boot_native_write_OSCUTD_reg_value((desc)) + +/* + * OS Compilation Unit Index Register (OSCUIR) + */ +#define READ_OSCUIR_REG_VALUE() native_read_OSCUIR_reg_value() +#define WRITE_OSCUIR_REG_VALUE(v) native_write_OSCUIR_reg_value((v)) +#define BOOT_READ_OSCUIR_REG_VALUE() boot_native_read_OSCUIR_reg_value() +#define BOOT_WRITE_OSCUIR_REG_VALUE(v) boot_native_write_OSCUIR_reg_value((v)) + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_CPU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/cpu_regs_types.h b/arch/e2k/include/asm/cpu_regs_types.h new file mode 100644 index 0000000..398a05e --- /dev/null +++ b/arch/e2k/include/asm/cpu_regs_types.h @@ -0,0 +1,2435 @@ + +#ifndef _E2K_CPU_REGS_TYPES_H_ +#define _E2K_CPU_REGS_TYPES_H_ + +#ifdef __KERNEL__ + +#include + +#ifndef __ASSEMBLY__ + +/* E2K physical address definitions */ +#define MAX_PA_SIZE 40 /* E2K physical address size */ + /* (bits number) */ +#define MAX_PA_MSB (MAX_PA_SIZE - 1) /* The number of the most */ + /* significant bit of E2K */ + /* physical address */ +#define MAX_PA_MASK ((1UL << MAX_PA_SIZE) - 1) +#define MAX_PM_SIZE (1UL << MAX_PA_SIZE) + +/* E2K virtual address definitions */ +#define MAX_VA_SIZE 59 /* Virtual address maximum */ + /* size (bits number) */ +#define MAX_VA_MSB (MAX_VA_SIZE -1) /* The maximum number of the */ + /* most significant bit of */ + /* virtual address */ +#define MAX_VA_MASK ((1UL << MAX_VA_SIZE) - 1) + +#define E2K_VA_SIZE 48 /* E2K Virtual address size */ + /* (bits number) */ +#define E2K_VA_MSB (E2K_VA_SIZE - 1) /* The number of the most */ + /* significant bit of E2K */ + /* virtual address */ +#define E2K_VA_MASK ((1UL << E2K_VA_SIZE) - 1) + +#define E2K_VA_PAGE_MASK (E2K_VA_MASK & PAGE_MASK) + + +/* + * Read/Write Pointer (RWP) (64 bits) + */ +typedef struct e2k_rwp_fields { /* Structure of Read/write pointer */ + u64 base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused2 : 53 - E2K_VA_MSB; /* [53:48] */ + u64 stub5 : 1; /* [54] */ + u64 stub4 : 1; /* [55] */ + u64 stub3 : 1; /* [56] */ + u64 stub2 : 1; /* [57] */ + u64 stub1 : 1; /* [58] */ + u64 unused : 5; /* [63:59] */ +} e2k_rwp_fields_t; +typedef union e2k_rwp_struct { /* Structure of lower word */ + e2k_rwp_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_rwp_struct_t; +#define E2K_RWP_stub1 fields.stub1 +#define E2K_RWP_stub2 fields.stub2 +#define E2K_RWP_stub3 fields.stub3 +#define E2K_RWP_stub4 fields.stub4 +#define E2K_RWP_stub5 fields.stub5 +#define E2K_RWP_base fields.base +#define E2K_RWP_reg word + +/* + * Read/Write Array Pointer (RWAP) + */ +typedef struct e2k_rwap_lo_fields { /* Fields of lower word */ + u64 base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused2 : 55 - E2K_VA_MSB; /* [55:48] */ + u64 stub3 : 1; /* [56] */ + u64 stub2 : 1; /* [57] */ + u64 stub1 : 1; /* [58] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ +} e2k_rwap_lo_fields_t; +typedef struct e2k_rusd_lo_fields { /* Fields of lower word */ + u64 base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused2 : 57 - E2K_VA_MSB; /* [57:48] */ + u64 p : 1; /* [58] */ + u64 rw : 2; /* [60:59] */ + u64 unused : 3; /* [63:61] */ +} e2k_rusd_lo_fields_t; +typedef union e2k_rwap_lo_struct { /* Structure of lower word */ + e2k_rwap_lo_fields_t ap_fields; /* as AP fields */ + e2k_rusd_lo_fields_t fields; /* as USD fields */ + u64 word; /* as entire register */ +} e2k_rwap_lo_struct_t; +#define E2K_RWAP_lo_itag ap_fields.itag +#define E2K_RWAP_lo_rw ap_fields.rw +#define E2K_RWAP_lo_stub1 ap_fields.stub1 +#define E2K_RWAP_lo_stub2 ap_fields.stub2 +#define E2K_RWAP_lo_stub3 ap_fields.stub3 +#define E2K_RWAP_lo_base ap_fields.base +#define E2K_RUSD_lo_rw fields.rw +#define E2K_RUSD_lo_p fields.p +#define E2K_RUSD_lo_p_bit 58 /* do not forget to modify if changed */ +#define E2K_RUSD_lo_base fields.base +#define E2K_RWAP_lo_half word +#define E2K_RUSD_lo_half word + +typedef struct e2k_rwap_hi_fields { /* Fields of high word */ + u64 curptr : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ +} e2k_rwap_hi_fields_t; +typedef struct e2k_rpsp_hi_fields { /* Fields of high word */ + u64 ind : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ +} e2k_rpsp_hi_fields_t; +typedef union e2k_rwap_hi_struct { /* Structure of high word */ + e2k_rwap_hi_fields_t ap_fields; /* as AP fields */ + e2k_rpsp_hi_fields_t fields; /* as PSP fields */ + u64 word; /* as entire register */ +} e2k_rwap_hi_struct_t; +#define E2K_RWAP_hi_size ap_fields.size +#define E2K_RWAP_hi_curptr ap_fields.curptr +#define E2K_RWAP_hi_half word +#define E2K_RPSP_hi_size fields.size +#define E2K_RPSP_hi_ind fields.ind +#define E2K_RPSP_hi_half word + +typedef struct e2k_rwap_struct { /* quad-word register */ + e2k_rwap_lo_struct_t lo; + e2k_rwap_hi_struct_t hi; +} e2k_rwap_struct_t; +#define E2K_RWAP_lo_struct lo +#define E2K_RUSD_lo_struct lo +#define E2K_RWAP_hi_struct hi +#define E2K_RPSP_hi_struct hi +#define E2K_RWAP_itag lo.E2K_RWAP_lo_itag +#define E2K_RWAP_rw lo.E2K_RWAP_lo_rw +#define E2K_RWAP_stub1 lo.E2K_RWAP_lo_stub1 +#define E2K_RWAP_stub2 lo.E2K_RWAP_lo_stub2 +#define E2K_RWAP_stub3 lo.E2K_RWAP_lo_stub3 +#define E2K_RWAP_base lo.E2K_RWAP_lo_base +#define E2K_RUSD_rw lo.E2K_RUSD_lo_rw +#define E2K_RUSD_p lo.E2K_RUSD_lo_p +#define E2K_RUSD_p_bit E2K_RUSD_lo_p_bit /* protected flag */ +#define E2K_RUSD_p_flag (1 << E2K_RUSD_p_bit) /* as value */ +#define E2K_RUSD_base lo.E2K_RUSD_lo_base +#define E2K_RWAP_size hi.E2K_RWAP_hi_size +#define E2K_RWAP_curptr hi.E2K_RWAP_hi_curptr +#define E2K_RPSP_size hi.E2K_RPSP_hi_size +#define E2K_RPSP_ind hi.E2K_RPSP_hi_ind +#define E2K_RWAP_lo_reg lo.E2K_RWAP_lo_half +#define E2K_RUSD_lo_reg lo.E2K_RUSD_lo_half +#define E2K_RWAP_hi_reg hi.E2K_RWAP_hi_half +#define E2K_RPSP_hi_reg hi.E2K_RPSP_hi_half + +#define E2_RWAR_R_ENABLE 0x1 +#define E2_RWAR_W_ENABLE 0x2 +#define E2_RWAR_RW_ENABLE (E2_RWAR_R_ENABLE | E2_RWAR_W_ENABLE) +#define E2_RWAR_C_TRUE 0x1 + +#define R_ENABLE 0x1 +#define W_ENABLE 0x2 +#define RW_ENABLE 0x3 + +/* + * Read/Write Stack Array Pointer (RWSAP) + */ +typedef struct e2k_rwsap_lo_fields { /* Fields of lower word */ + u64 base : 32; /* [31: 0] */ + u64 psl : 16; /* [47:32] */ + u64 unused2 : 8; /* [55:48] */ + u64 stub3 : 1; /* [56] */ + u64 stub2 : 1; /* [57] */ + u64 stub1 : 1; /* [58] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ +} e2k_rwsap_lo_fields_t; +typedef struct e2k_rpusd_lo_fields { /* Fields of lower word */ + u64 base : 32; /* [31: 0] */ + u64 psl : 16; /* [47:32] */ + u64 unused2 : 10; /* [57:48] */ + u64 p : 1; /* [58] */ + u64 rw : 2; /* [60:59] */ + u64 unused : 3; /* [63:61] */ +} e2k_rpusd_lo_fields_t; +typedef union e2k_rwsap_lo_struct { /* Structure of lower word */ + e2k_rwsap_lo_fields_t sap_fields; /* as SAP fields */ + e2k_rpusd_lo_fields_t fields; /* as PUSD fields */ + u64 word; /* as entire register */ +} e2k_rwsap_lo_struct_t; +#define E2K_RWSAP_lo_itag sap_fields.itag +#define E2K_RWSAP_lo_rw sap_fields.rw +#define E2K_RWSAP_lo_stub1 sap_fields.stub1 +#define E2K_RWSAP_lo_stub2 sap_fields.stub2 +#define E2K_RWSAP_lo_stub3 sap_fields.stub3 +#define E2K_RWSAP_lo_psl sap_fields.psl +#define E2K_RWSAP_lo_base sap_fields.base +#define E2K_RPUSD_lo_rw fields.rw +#define E2K_RPUSD_lo_p fields.p +#define E2K_RPUSD_lo_psl fields.psl +#define E2K_RPUSD_lo_base fields.base +#define E2K_RWSAP_lo_half word +#define E2K_RPUSD_lo_half word + +typedef struct e2k_rwsap_hi_fields { /* Fields of high word */ + u64 curptr : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ +} e2k_rwsap_hi_fields_t; +typedef union e2k_rwsap_hi_struct { /* Structure of high word */ + e2k_rwsap_hi_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_rwsap_hi_struct_t; +#define E2K_RWSAP_hi_size fields.size +#define E2K_RWSAP_hi_curptr fields.curptr +#define E2K_RWSAP_hi_half word + +typedef struct e2k_rwsap_struct { /* quad-word register */ + e2k_rwsap_lo_struct_t lo; + e2k_rwsap_hi_struct_t hi; +} e2k_rwsap_struct_t; +#define E2K_RWSAP_lo_struct lo +#define E2K_RPUSD_lo_struct lo +#define E2K_RWSAP_hi_struct hi +#define E2K_RWSAP_itag lo.E2K_RWSAP_lo_itag +#define E2K_RWSAP_rw lo.E2K_RWSAP_lo_rw +#define E2K_RWSAP_stub1 lo.E2K_RWSAP_lo_stub1 +#define E2K_RWSAP_stub2 lo.E2K_RWSAP_lo_stub2 +#define E2K_RWSAP_stub3 lo.E2K_RWSAP_lo_stub3 +#define E2K_RWSAP_psl lo.E2K_RWSAP_lo_psl +#define E2K_RWSAP_base lo.E2K_RWSAP_lo_base +#define E2K_RPUSD_rw lo.E2K_RPUSD_lo_rw +#define E2K_RPUSD_p lo.E2K_RPUSD_lo_p +#define E2K_RPUSD_psl lo.E2K_RPUSD_lo_psl +#define E2K_RPUSD_base lo.E2K_RPUSD_lo_base +#define E2K_RWSAP_size hi.E2K_RWSAP_hi_size +#define E2K_RWSAP_curptr hi.E2K_RWSAP_hi_curptr +#define E2K_RWSAP_lo_reg lo.E2K_RWSAP_lo_half +#define E2K_RPUSD_lo_reg lo.E2K_RPUSD_lo_half +#define E2K_RWSAP_hi_reg hi.E2K_RWSAP_hi_half + +/* + * Compilation Unit Descriptor (CUD) + * describes the memory containing codes of the current compilation unit + */ + + /* + * Structure of lower word + * access CUD.lo.CUD_lo_xxx or CUD -> lo.CUD_lo_xxx + * or CUD_lo.CUD_lo_xxx or CUD_lo -> CUD_lo_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_cud_lo_t; +#define _CUD_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */ + /* should be "R" */ +#define E2K_CUD_RW_PROTECTIONS E2_RWAR_R_ENABLE +#define CUD_lo_c E2K_RWAP_lo_stub1 /* [58] - checked flag, */ + /* if set then literal CT */ + /* is correct */ +#define E2K_CUD_CHECKED_FLAG E2_RWAR_C_TRUE +#define CUD_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */ +#define CUD_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + /* + * Structure of high word + * access CUD.hi.CUD_hi_xxx or CUD -> hi.CUD_hi_xxx + * or CUD_hi.CUD_hi_xxx or CUD_hi -> CUD_hi_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_cud_hi_t; +#define CUD_hi_size E2K_RWAP_hi_size /* [63:32] - size */ +#define _CUD_hi_curptr E2K_RWAP_hi_curptr /* [31: 0] - should be 0 */ +#define CUD_hi_half E2K_RWAP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + + /* + * Structure of quad-word register + * access CUD.CUD_xxx or CUD -> CUD_xxx + */ +typedef e2k_rwap_struct_t cud_struct_t; +#define _CUD_rw E2K_RWAP_rw /* [60:59] - read/write flags */ + /* should be "R" */ +#define CUD_c E2K_RWAP_stub1 /* [58] - checked flag, */ + /* if set then literal CT */ + /* is correct */ +#define CUD_base E2K_RWAP_base /* [47: 0] - base address */ +#define CUD_size E2K_RWAP_size /* [63:32] - size */ +#define _CUD_curptr E2K_RWAP_curptr /* [31: 0] - should be 0 */ +#define CUD_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define CUD_hi_reg E2K_RWAP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define CUD_lo_struct E2K_RWAP_lo_struct /* low register structure */ +#define CUD_hi_struct E2K_RWAP_hi_struct /* high register structure */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_CODES 12 /* Codes area boundaries */ + /* alignment (2's exponent */ + /* value */ +#ifndef __ASSEMBLY__ +#define E2K_ALIGN_CODES_MASK ((1UL << E2K_ALIGN_CODES) - 1) +#else /* __ASSEMBLY__ */ +#define E2K_ALIGN_CODES_MASK ((1 << E2K_ALIGN_CODES) - 1) +#endif /* !(__ASSEMBLY__) */ + +#ifndef __ASSEMBLY__ +/* + * Compilation Unit Globals Descriptor (GD) + * describes the global variables memory of the current compilation unit + */ + + /* + * Structure of lower word + * access GD.lo.GD_lo_xxx or GD -> lo.GD_lo_xxx + * or GD_lo.GD_lo_xxx or GD_lo -> GD_lo_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_gd_lo_t; +#define _GD_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define E2K_GD_RW_PROTECTIONS E2_RWAR_RW_ENABLE; +#define GD_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */ +#define GD_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + + /* + * Structure of high word + * access GD.hi.GD_hi_xxx or GD -> hi.GD_hi_xxx + * or GD_hi.GD_hi_xxx or GD_hi -> GD_hi_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_gd_hi_t; +#define GD_hi_size E2K_RWAP_hi_size /* [63:32] - size */ +#define _GD_hi_curptr E2K_RWAP_hi_curptr /* [31: 0] - should be 0 */ +#define GD_hi_half E2K_RWAP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + + /* + * Structure of quad-word register + * access GD.GD_xxx or GD -> GD_xxx + */ +typedef e2k_rwap_struct_t gd_struct_t; +#define _GD_rw E2K_RWAP_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define GD_base E2K_RWAP_base /* [47: 0] - base address */ +#define GD_size E2K_RWAP_size /* [63:32] - size */ +#define _GD_curptr E2K_RWAP_curptr /* [31: 0] - should be 0 */ +#define GD_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define GD_hi_reg E2K_RWAP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define GD_lo_struct E2K_RWAP_lo_struct /* low register structure */ +#define GD_hi_struct E2K_RWAP_hi_struct /* high register structure */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_GLOBALS 12 /* Globals area boundaries */ + /* alignment (2's exponent */ + /* value */ +#define E2K_ALIGN_GLOBALS_SZ _BITUL(E2K_ALIGN_GLOBALS) +#define E2K_ALIGN_GLOBALS_MASK (_BITUL(E2K_ALIGN_GLOBALS) - 1) + +#ifndef __ASSEMBLY__ +/* + * OS Compilation Unit Descriptor (OSCUD) + * describes the global variables memory containing interface codes of the OS + */ + + /* + * Structure of lower word + * access OSCUD.lo.OSCUD_xxx or OSCUD -> lo.OSCUD_xxx + * or OSCUD_lo.OSCUD_xxx or OSCUD_lo -> OSCUD_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_oscud_lo_t; +#define _OSCUD_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */ + /* should be "R" */ +#define E2K_OSCUD_RW_PROTECTIONS E2_RWAR_R_ENABLE; +#define OSCUD_lo_c E2K_RWAP_lo_stub1 /* [58] - checked flag, */ + /* if set then literal CT */ + /* is correct */ +#define OSCUD_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */ +#define OSCUD_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + +#define OSCUD_lo_base_mask E2K_VA_MASK + + /* + * Structure of high word + * access OSCUD.hi.OSCUD_xxx or OSCUD -> hi.OSCUD_xxx + * or OSCUD_hi.OSCUD_xxx or OSCUD_hi -> OSCUD_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_oscud_hi_t; +#define OSCUD_hi_size E2K_RWAP_hi_size /* [63:32] - size */ +#define _OSCUD_hi_curptr \ + E2K_RWAP_hi_curptr /* [31: 0] - should be 0 */ +#define OSCUD_hi_half E2K_RWAP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + + /* + * Structure of quad-word register + * access OSCUD.OSCUD_xxx or OSCUD -> OSCUD_xxx + */ +typedef e2k_rwap_struct_t oscud_struct_t; +#define _OSCUD_rw E2K_RWAP_rw /* [60:59] - read/write flags */ + /* should be "R" */ +#define OSCUD_c E2K_RWAP_stub1 /* [58] - checked flag, */ + /* if set then literal CT */ + /* is correct */ +#define OSCUD_base E2K_RWAP_base /* [47: 0] - base address */ +#define OSCUD_size E2K_RWAP_size /* [63:32] - size */ +#define _OSCUD_curptr E2K_RWAP_curptr /* [31: 0] - should be 0 */ +#define OSCUD_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define OSCUD_hi_reg E2K_RWAP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define OSCUD_lo_struct E2K_RWAP_lo_struct /* low register structure */ +#define OSCUD_hi_struct E2K_RWAP_hi_struct /* high register structure */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_OSCU 12 /* OS codes area boundaries */ + /* alignment (2's exponent */ + /* value */ +#ifndef __ASSEMBLY__ +#define E2K_ALIGN_OSCU_MASK ((1UL << E2K_ALIGN_OSCU) - 1) +#else /* __ASSEMBLY__ */ +#define E2K_ALIGN_OSCU_MASK ((1 << E2K_ALIGN_OSCU) - 1) +#endif /* !(__ASSEMBLY__) */ + +#ifndef __ASSEMBLY__ +/* + * OS Compilation Unit Globals Descriptor (OSGD) + * describes the OS global variables memory + */ + + /* + * Structure of lower word + * access OSGD.lo.OSGD_lo_xxx or OSGD -> lo.OSGD_lo_xxx + * or OSGD_lo.OSGD_lo_xxx or OSGD_lo -> OSGD_lo_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_osgd_lo_t; +#define _OSGD_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define E2K_OSGD_RW_PROTECTIONS E2_RWAR_RW_ENABLE; +#define OSGD_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */ +#define OSGD_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + + /* + * Structure of high word + * access OSGD.hi.OSGD_hi_xxx or OSGD -> hi.OSGD_hi_xxx + * or OSGD_hi.OSGD_hi_xxx or OSGD_hi -> OSGD_hi_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_osgd_hi_t; +#define OSGD_hi_size E2K_RWAP_hi_size /* [63:32] - size */ +#define _OSGD_hi_curptr E2K_RWAP_hi_curptr /* [31: 0] - should be 0 */ +#define OSGD_hi_half E2K_RWAP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + + /* + * Structure of quad-word register + * access OSGD.OSGD_xxx or OSGD -> OSGD_xxx + */ +typedef e2k_rwap_struct_t osgd_struct_t; +#define _OSGD_rw E2K_RWAP_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define OSGD_base E2K_RWAP_base /* [47: 0] - base address */ +#define OSGD_size E2K_RWAP_size /* [63:32] - size */ +#define _OSGD_curptr E2K_RWAP_curptr /* [31: 0] - should be 0 */ +#define OSGD_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define OSGD_hi_reg E2K_RWAP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define OSGD_lo_struct E2K_RWAP_lo_struct /* low register structure */ +#define OSGD_hi_struct E2K_RWAP_hi_struct /* high register structure */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_OS_GLOBALS 12 /* OS Globals area boundaries */ + /* alignment (2's exponent */ + /* value */ +#ifndef __ASSEMBLY__ +#define E2K_ALIGN_OS_GLOBALS_MASK ((1UL << E2K_ALIGN_OS_GLOBALS) - 1) +#else /* __ASSEMBLY__ */ +#define E2K_ALIGN_OS_GLOBALS_MASK ((1 << E2K_ALIGN_OS_GLOBALS) - 1) +#endif /* !(__ASSEMBLY__) */ + +#ifndef __ASSEMBLY__ +/* + * Procedure Stack Pointer (PSP) + * describes the full procedure stack memory as well as the current pointer + * to the top of a procedure stack memory part. + */ + + /* + * Structure of lower word + * access PSP.lo.PSP_lo_xxx or PSP -> lo.PSP_lo_xxx + * or PSP_lo.PSP_lo_xxx or PSP_lo -> PSP_lo_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_psp_lo_t; +#define _PSP_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define E2K_PSP_RW_PROTECTIONS E2_RWAR_RW_ENABLE; +#define PSP_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */ +#define PSP_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + + /* + * Structure of high word + * access PSP.hi.PSP_hi_xxx or PSP -> hi.PSP_hi_xxx + * or PSP_hi.PSP_hi_xxx or PSP_hi -> PSP_hi_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_psp_hi_t; +#define PSP_hi_size E2K_RPSP_hi_size /* [63:32] - size */ +#define PSP_hi_ind E2K_RPSP_hi_ind /* [31: 0] - index for SPILL */ + /* and FILL */ +#define PSP_hi_half E2K_RPSP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + /* + * Structure of LSR -Loop status register + */ + +typedef struct e2k_lsr_fields { + u64 lcnt : 32; /* [31: 0] (loop counter) */ + u64 ecnt : 5; /* [36:32] (epilogue counter)*/ + u64 vlc : 1; /* [37] (loop counter valid bit) */ + u64 over : 1; /* [38] */ + u64 ldmc : 1; /* [39] (loads manual control)*/ + u64 ldovl : 8; /* [47:40] (load overlap)*/ + u64 pcnt : 5; /* [52:48] (prologue counter)*/ + u64 strmd : 7; /* [59:53] (store remainder counter)*/ + u64 semc : 1; /* [60] (side effects manual control */ + u64 unused : 3; /* [63:61] */ +} e2k_lsr_fields_t; + +typedef union e2k_lsr_struct_t { /* quad-word register */ + e2k_lsr_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_lsr_t; + +#define LSR_lcnt fields.lcnt +#define LSR_ecnt fields.ecnt +#define LSR_vlc fields.vlc +#define LSR_over fields.over +#define LSR_ldmc fields.ldmc +#define LSR_ldovl fields.ldovl +#define LSR_pcnt fields.pcnt +#define LSR_strmd fields.strmd +#define LSR_semc fields.semc +#define LSR_reg word + +/* see C.19.1. */ +#define ls_prlg(x) ((x).LSR_pcnt != 0) +#define ls_lst_itr(x) ((x).LSR_vlc && ((x).LSR_lcnt < 2)) +#define ls_loop_end(x) (ls_lst_itr(x) && ((x).LSR_ecnt == 0)) + +#define E2K_LSR_VLC (1UL << 37) + + /* + * Structure of ILCR - Initial loop counters register + */ + +typedef struct e2k_ilcr_fields { + u64 lcnt : 32; /* [31: 0] (loop counter) */ + u64 ecnt : 5; /* [36:32] (epilogue counter)*/ + u64 unused1 : 11; /* [47:37] unused */ + u64 pcnt : 5; /* [52:48] (prologue counter)*/ + u64 unused2 : 11; /* [63:53] unused */ +} e2k_ilcr_fields_t; + +typedef union e2k_ilcr { /* quad-word register */ + e2k_ilcr_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_ilcr_t; + +#define ILCR_lcnt fields.lcnt +#define ILCR_ecnt fields.ecnt +#define ILCR_pcnt fields.pcnt +#define ILCR_reg word + +/* see C.17.1.2. */ +typedef struct e2k_ct_op_fields { + u64 psrc : 5; /* [4:0] (pointer to condition)*/ + u64 ct : 4; /* [8:5] (condition type) */ +} e2k_ct_op_fields_t; + +typedef union e2k_ct_struct_t { + e2k_ct_op_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_ct_t; + +#define CT_reg(x) ((x).word) +#define CT_PSRC(x) ((x).fields.psrc) +#define CT_CT(x) ((x).fields.ct) + + /* + * Structure of quad-word register + * access PSP.PSP_xxx or PSP -> PSP_xxx + */ +typedef e2k_rwap_struct_t psp_struct_t; +#define _PSP_rw E2K_RWAP_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define PSP_base E2K_RWAP_base /* [47: 0] - base address */ +#define PSP_size E2K_RPSP_size /* [63:32] - size */ +#define PSP_ind E2K_RPSP_ind /* [31: 0] - index for SPILL */ + /* and FILL */ +#define PSP_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define PSP_hi_reg E2K_RPSP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define PSP_lo_struct E2K_RWAP_lo_struct /* low register structure */ +#define PSP_hi_struct E2K_RPSP_hi_struct /* high register structure */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_PSTACK 12 /* Procedure stack boundaries */ + /* alignment (2's exponent */ + /* value) */ +#define E2K_ALIGN_PSTACK_TOP 5 /* Procedure stack top */ + /* boundaries alignment */ + /* (2's exponent value) */ +#ifndef __ASSEMBLY__ +# define ALIGN_PSTACK_SIZE (1ULL << E2K_ALIGN_PSTACK) +# define ALIGN_PSTACK_TOP_SIZE (1ULL << E2K_ALIGN_PSTACK_TOP) +#else /* __ASSEMBLY__ */ +# define ALIGN_PSTACK_SIZE (1 << E2K_ALIGN_PSTACK) +# define ALIGN_PSTACK_TOP_SIZE (1 << E2K_ALIGN_PSTACK_TOP) +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_PSTACK_MASK (ALIGN_PSTACK_SIZE - 1) +#define E2K_ALIGN_PSTACK_TOP_MASK (ALIGN_PSTACK_TOP_SIZE - 1) + +#ifndef __ASSEMBLY__ +/* + * Procedure Chain Stack Pointer (PCSP) + * describes the full procedure chain stack memory as well as the current + * pointer to the top of a procedure chain stack memory part. + */ + + /* + * Structure of lower word + * access PCSP.lo.PCSP_lo_xxx or PCSP -> lo.PCSP_lo_xxx + * or PCSP_lo.PCSP_lo_xxx or PCSP_lo -> PCSP_lo_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_pcsp_lo_t; +#define _PCSP_lo_rw E2K_RWAP_lo_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define E2K_PCSR_RW_PROTECTIONS E2_RWAR_RW_ENABLE; +#define PCSP_lo_base E2K_RWAP_lo_base /* [47: 0] - base address */ +#define PCSP_lo_half E2K_RWAP_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + /* + * Structure of high word + * access PCSP.hi.PCSP_hi_xxx or PCSP -> hi.PCSP_hi_xxx + * or PCSP_hi.PCSP_hi_xxx or PCSP_hi -> PCSP_hi_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_pcsp_hi_t; +#define PCSP_hi_size E2K_RPSP_hi_size /* [63:32] - size */ +#define PCSP_hi_ind E2K_RPSP_hi_ind /* [31: 0] - index for SPILL */ + /* and FILL */ +#define PCSP_hi_half E2K_RPSP_hi_half /* [63: 0] - entire high */ + + /* + * Structure of quad-word register + * access PCSP.PCSP_xxx or PCSP -> PCSP_xxx + */ +typedef e2k_rwap_struct_t pcsp_struct_t; +#define _PCSP_rw E2K_RWAP_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define PCSP_base E2K_RWAP_base /* [47: 0] - base address */ +#define PCSP_size E2K_RPSP_size /* [63:32] - size */ +#define PCSP_ind E2K_RPSP_ind /* [31: 0] - index for SPILL */ + /* and FILL */ +#define PCSP_lo_reg E2K_RWAP_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define PCSP_hi_reg E2K_RPSP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define PCSP_lo_struct E2K_RWAP_lo_struct /* low register structure */ +#define PCSP_hi_struct E2K_RPSP_hi_struct /* high register structure */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_PCSTACK 12 /* Procedure chain stack */ + /* boundaries alignment */ + /* (2's exponent value) */ +#define E2K_ALIGN_PCSTACK_TOP 5 /* Procedure chain stack top */ + /* boundaries alignment */ + /* (2's exponent value) */ + +#ifndef __ASSEMBLY__ +# define ALIGN_PCSTACK_SIZE (1ULL << E2K_ALIGN_PCSTACK) +# define ALIGN_PCSTACK_TOP_SIZE (1ULL << E2K_ALIGN_PCSTACK_TOP) +#else +# define ALIGN_PCSTACK_SIZE (1 << E2K_ALIGN_PCSTACK) +# define ALIGN_PCSTACK_TOP_SIZE (1 << E2K_ALIGN_PCSTACK_TOP) +#endif + +#define E2K_ALIGN_PCSTACK_MASK (ALIGN_PCSTACK_SIZE - 1) +#define E2K_ALIGN_PCSTACK_TOP_MASK (ALIGN_PCSTACK_TOP_SIZE - 1) + + +/* + * ========== numeric registers (register file) =========== + */ + +#define E2K_MAXCR 64 /* The total number of */ + /* chain registers */ +#define E2K_MAXCR_q E2K_MAXCR /* The total number of */ + /* chain quad-registers */ +#define E2K_ALIGN_CHAIN_WINDOW 5 /* Chain registers Window */ + /* boundaries alignment */ +#define E2K_CWD_MSB 9 /* The number of the */ + /* most significant bit */ + /* of CWD_base */ +#define E2K_CWD_SIZE (E2K_CWD_MSB + 1) /* The number of bits in */ + /* CWD_base field */ +#define E2K_PCSHTP_MSB (E2K_CWD_MSB + 1) /* The number of the */ + /* most significant bit */ + /* of PCSHTP */ +#define E2K_PCSHTP_SIZE (E2K_PCSHTP_MSB + 1) /* The number of bits in */ + /* PCSHTP */ + +/* Maximum size to be filled by hardware */ +#define E2K_CF_MAX_FILL_FILLC_q (E2K_MAXCR_q - 6) + + +#ifndef __ASSEMBLY__ + +/* Current chain registers window descriptor (CWD) */ + +typedef unsigned int e2k_cwd_t; + +/* + * Structure of procedure chain stack hardare top register PCSHTP + * Register is signed value, so read from register get signed value + * and write to put signed value. + */ + +typedef unsigned int e2k_pcshtp_t; + +#define PCSHTP_SIGN_EXTEND(pcshtp) \ + (((s64) (pcshtp) << (s64) (64 - E2K_PCSHTP_SIZE)) \ + >> (s64) (64 - E2K_PCSHTP_SIZE)) + +#endif /* !(__ASSEMBLY__) */ + +#ifndef __ASSEMBLY__ +/* + * User Stack Base Register (USBR/SBR) + * USBR - contains the base virtual address of the current User Stack area. + * SBR - contains the base virtual address of an area dedicated for all user + * stacks of the current task + */ +typedef e2k_rwp_struct_t e2k_usbr_t; +typedef e2k_rwp_struct_t e2k_sbr_t; + + /* + * Structure of double-word register + * access USBR.USBR_xxx or USBR -> USBR_xxx + * access SBR.SBR_xxx or SBR -> SBR_xxx + */ +#define USBR_base E2K_RWP_base /* [47: 0] - base address */ +#define USBR_reg E2K_RWP_reg /* [63: 0] - entire */ + /* double-word register */ +#define SBR_base USBR_base /* [47: 0] - base address */ +#define SBR_reg USBR_reg /* [63: 0] - entire */ + /* double-word register */ +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_STACKS_BASE 12 /* User stacks boundaries */ + /* alignment */ + /* (2's exponent value) */ +#define E2K_ALIGN_ALL_STACKS_BASE 37 /* All User stacks area */ + /* boundaries alignment */ + /* (2's exponent value) */ +#define E2K_PROTECTED_STACK_BASE_BITS 32 /* Protected mode stack */ + /* does not cross 4 Gb */ + /* boundary. */ + +#define E2K_ALIGN_STACK_BASE_REG (1UL << E2K_ALIGN_STACKS_BASE) +#define E2K_ALIGN_STACKS_BASE_MASK ((1UL << E2K_ALIGN_STACKS_BASE) - 1) +#define E2K_ALL_STACKS_MAX_SIZE (1UL << E2K_ALIGN_ALL_STACKS_BASE) +#define E2K_PROTECTED_STACK_BASE_MASK \ + ((1UL << E2K_PROTECTED_STACK_BASE_BITS) - 1) + +#ifndef __ASSEMBLY__ + +/* + * Non-Protected User Stack Descriptor (USD) + * contains free memory space dedicated for user stack data and + * is supposed to grow from higher memory addresses to lower ones + */ + + /* + * Structure of lower word + * access USD.lo.USD_lo_xxx or USD -> lo.USD_lo_xxx + * or USD.USD_lo_xxx or USD -> USD_lo_xxx + */ +typedef e2k_rwap_lo_struct_t e2k_usd_lo_t; +#define _USD_lo_rw E2K_RUSD_lo_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define USD_lo_p E2K_RUSD_lo_p /* [58] - flag of "protected" */ + /* mode: should be */ + /* 0 - non-protected */ +#define USD_lo_p_bit E2K_RUSD_lo_p_bit /* protected flag as value */ +#define USD_lo_p_flag (1UL << USD_lo_p_bit) + +#define USD_lo_base E2K_RUSD_lo_base /* [47: 0] - base address */ +#define USD_lo_half E2K_RUSD_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + + /* + * Structure of high word + * access USD.hi.USD_hi_xxx or USD -> hi.USD_hi_xxx + * or USD_hi.USD_hi_xxx or USD_hi -> USD_hi_xxx + */ +typedef e2k_rwap_hi_struct_t e2k_usd_hi_t; +#define USD_hi_size E2K_RWAP_hi_size /* [63:32] - size */ +#define _USD_hi_curptr E2K_RWAP_hi_curptr /* [31: 0] - should be 0 */ +#define USD_hi_half E2K_RWAP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + +#define MAX_USD_HI_SIZE (4ULL * 1024 * 1024 * 1024 - 1ULL) + + /* + * Structure of quad-word register + * access USD.USD_xxx or USD -> USD_xxx + */ +typedef e2k_rwap_struct_t usd_struct_t; +#define _USD_rw E2K_RUSD_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define USD_p E2K_RUSD_p /* [58] - flag of "protected" */ + /* mode: 1 - protected */ +#define USD_base E2K_RUSD_base /* [31: 0] - base address */ +#define USD_size E2K_RWAP_size /* [63:32] - size */ +#define _USD_curptr E2K_RWAP_curptr /* [31: 0] - should be 0 */ +#define USD_lo_reg E2K_RUSD_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define USD_hi_reg E2K_RWAP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define USD_lo_struct E2K_RUSD_lo_struct /* low register structure */ +#define USD_hi_struct E2K_RWAP_hi_struct /* high register structure */ + +/* + * Protected User Stack Descriptor (PUSD) + * contains free memory space dedicated for user stack data and + * is supposed to grow from higher memory addresses to lower ones + */ + + /* + * Structure of lower word + * access PUSD.lo.PUSD_lo_xxx or PUSD -> lo.PUSD_lo_xxx + * or PUSD.PUSD_lo_xxx or PUSD -> PUSD_lo_xxx + */ +typedef e2k_rwsap_lo_struct_t e2k_pusd_lo_t; +#define _PUSD_lo_rw E2K_RPUSD_lo_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define PUSD_lo_p E2K_RPUSD_lo_p /* [58] - flag of "protected" */ + /* mode: should be */ + /* 1 - protected */ +#define PUSD_lo_psl E2K_RPUSD_lo_psl /* {47:32} - dynamic level of */ + /* the current procedure in a */ + /* stack of called procedures */ +#define PUSD_lo_base E2K_RPUSD_lo_base /* [31: 0] - base address */ +#define PUSD_lo_half E2K_RPUSD_lo_half /* [63: 0] - entire lower */ + /* double-word of register */ + + /* + * Structure of high word + * access PUSD.hi.PUSD_hi_xxx or PUSD -> hi.PUSD_hi_xxx + * or PUSD_hi.PUSD_hi_xxx or PUSD_hi -> PUSD_hi_xxx + */ +typedef e2k_rwsap_hi_struct_t e2k_pusd_hi_t; +#define PUSD_hi_size E2K_RWSAP_hi_size /* [63:32] - size */ +#define _PUSD_hi_curptr E2K_RWSAP_hi_curptr /* [31: 0] - should be 0 */ +#define PUSD_hi_half E2K_RWSAP_hi_half /* [63: 0] - entire high */ + /* double-word of register */ + + /* + * Structure of quad-word register + * access PUSD.PUSD_xxx or PUSD -> PUSD_xxx + */ +typedef e2k_rwsap_struct_t pusd_struct_t; +#define _PUSD_rw E2K_RPUSD_rw /* [60:59] - read/write flags */ + /* should be "RW" */ +#define PUSD_p E2K_RPUSD_p /* [58] - flag of "protected" */ + /* mode: should be */ + /* 1 - protected */ +#define PUSD_psl E2K_RPUSD_psl /* {47:32} - dynamic level of */ + /* the current procedure in a */ + /* stack of called procedures */ +#define PUSD_base E2K_RUSD_base /* [31: 0] - base address */ +#define PUSD_size E2K_RWSAP_size /* [63:32] - size */ +#define _PUSD_curptr E2K_RWSAP_curptr /* [31: 0] - should be 0 */ +#define PUSD_lo_reg E2K_RPUSD_lo_reg /* [63: 0] - entire lower */ + /* double-word of register */ +#define PUSD_hi_reg E2K_RWSAP_hi_reg /* [63: 0] - entire high */ + /* double-word of register */ +#define PUSD_lo_struct E2K_RUSD_lo_struct /* low register structure */ +#define PUSD_hi_struct E2K_RWSAP_hi_struct /* high register structure */ + + +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_USTACK 4 /* Non-Protected User Stack */ + /* boundaries alignment */ + /* (2's exponent value) */ +#define E2K_ALIGN_PUSTACK 5 /* Protected User Stack */ + /* boundaries alignment */ + /* (2's exponent value) */ + +#define E2K_ALIGN_USTACK_SIZE (1UL << E2K_ALIGN_USTACK) +#define E2K_ALIGN_PUSTACK_SIZE (1UL << E2K_ALIGN_PUSTACK) + +/* + * This should be + * max(E2K_ALIGN_USTACK_SIZE, E2K_ALIGN_PUSTACK_SIZE) + * but we want it to be constant + */ +#define E2K_ALIGN_STACK 32UL + +#ifndef __ASSEMBLY__ +#define E2K_ALIGN_USTACK_MASK ((1UL << E2K_ALIGN_USTACK) - 1) +#define E2K_ALIGN_PUSTACK_MASK ((1UL << E2K_ALIGN_PUSTACK) - 1) +#else /* __ASSEMBLY__ */ +#define E2K_ALIGN_USTACK_MASK ((1 << E2K_ALIGN_USTACK) - 1) +#define E2K_ALIGN_PUSTACK_MASK ((1 << E2K_ALIGN_PUSTACK) - 1) +#endif /* !(__ASSEMBLY__) */ + +#ifndef __ASSEMBLY__ + +/* + * Instruction structure + */ + +typedef u64 instr_item_t; /* min. item of instruction */ + /* is double-word */ + +#define E2K_INSTR_MAX_SYLLABLES_NUM 8 /* max length of instruction */ + /* in terms of min item of */ + /* instruction */ +#define E2K_INSTR_MAX_SIZE (E2K_INSTR_MAX_SYLLABLES_NUM * \ + sizeof(instr_item_t)) + +/* Asynchronous program instruction 'fapb' is always 16 bytes long */ +#define E2K_ASYNC_INSTR_SIZE 16 +/* Asynchronous program can contain maximum 32 instructions */ +#define MAX_ASYNC_PROGRAM_INSTRUCTIONS 32 + +typedef u16 instr_semisyl_t; /* instruction semi-syllable */ + /* is short */ + +typedef u32 instr_syl_t; /* instruction syllable */ + /* is word */ + +/* + * Order of fixed syllables of instruction + */ +#define E2K_INSTR_HS_NO 0 /* header syllable */ +#define E2K_INSTR_SS_NO 1 /* stubs syllable (if present) */ + +#define E2K_GET_INSTR_SEMISYL(instr_addr, semisyl_no) \ + (((instr_semisyl_t *)(instr_addr)) \ + [((semisyl_no) & 0x1) ? ((semisyl_no) - 1) : \ + ((semisyl_no) + 1)]) +#define E2K_GET_INSTR_SYL(instr_addr, syl_no) \ + (((instr_syl_t *)(instr_addr))[syl_no]) + +#define E2K_GET_INSTR_HS(instr_addr) E2K_GET_INSTR_SYL(instr_addr, \ + E2K_INSTR_HS_NO) +#define E2K_GET_INSTR_SS(instr_addr) E2K_GET_INSTR_SYL(instr_addr, \ + E2K_INSTR_SS_NO) +#define E2K_GET_INSTR_ALS0(instr_addr, ss_flag) \ + E2K_GET_INSTR_SYL(instr_addr, \ + (ss_flag) ? E2K_INSTR_SS_NO + 1 \ + : \ + E2K_INSTR_SS_NO) +#define E2K_GET_INSTR_ALES0(instr_addr, mdl) \ + E2K_GET_INSTR_SEMISYL(instr_addr, ((mdl) + 1) * 2) + +/* + * Header syllable structure + */ + +typedef union instr_hs { + struct { + u32 mdl : 4; /* [ 3: 0] middle pointer in terms of */ + /* syllables - 1 */ + u32 lng : 3; /* [ 6: 4] length of instruction in */ + /* terms of double-words - 1 */ + u32 nop : 3; /* [ 9: 7] no operation code */ + u32 lm : 1; /* [10] loop mode flag */ + u32 x : 1; /* [11] unused field */ + u32 s : 1; /* [12] Stubs syllable presence bit */ + u32 sw : 1; /* [13] bit used by software */ + u32 c : 2; /* [15:14] Control syllables presence */ + /* mask */ + u32 cd : 2; /* [17:16] Conditional execution */ + /* syllables number */ + u32 pl : 2; /* [19:18] Predicate logic channel */ + /* syllables number */ + u32 ale : 6; /* [25:20] Arithmetic-logic channel */ + /* syllable extensions */ + /* presence mask */ + u32 al : 6; /* [31:26] Arithmetic-logic channel */ + /* syllables presence mask */ + }; + struct { + u32 __pad : 14; + u32 c0 : 1; /* CS0 */ + u32 c1 : 1; /* CS1 */ + u32 __pad2 : 16; + }; + struct { + u32 mdl : 4; + u32 lng : 3; + u32 nop : 3; + u32 lm : 1; + u32 x : 1; + u32 s : 1; + u32 sw : 1; + u32 c : 2; + u32 cd : 2; + u32 pl : 2; + u32 ale : 6; + u32 al : 6; + } fields; + instr_syl_t word; /* as entire syllable */ +} instr_hs_t; + +#define E2K_INSTR_HS_LNG_MASK 0x70 + +#define E2K_GET_INSTR_SIZE(hs) \ + ((AS_STRUCT(hs).lng + 1) * sizeof(instr_item_t)) + +/* + * Stubs sullable structure + */ + +typedef union instr_ss { + struct { + u32 ctcond : 9; /* [ 8: 0] control transfer condition */ + u32 x : 1; /* [ 9] unused field */ + u32 ctop : 2; /* [11:10] control transfer opcode */ + u32 aa : 4; /* [15:12] mask of AAS */ + u32 alc : 2; /* [17:16] advance loop counters */ + u32 abp : 2; /* [19:18] advance predicate base */ + u32 xx : 1; /* [20] unused field */ + u32 abn : 2; /* [22:21] advance numeric base */ + u32 abg : 2; /* [24:23] advance global base */ + u32 xxx : 1; /* [25] unused field */ + u32 vfdi : 1; /* [26] verify deferred interrupt */ + u32 srp : 1; /* [27] store recovery point */ + u32 bap : 1; /* [28] begin array prefetch */ + u32 eap : 1; /* [29] end array prefetch */ + u32 ipd : 2; /* [31:30] instruction prefetch depth */ + }; + struct { + u32 ctcond : 9; + u32 x : 1; + u32 ctop : 2; + u32 aa : 4; + u32 alc : 2; + u32 abp : 2; + u32 xx : 1; + u32 abn : 2; + u32 abg : 2; + u32 xxx : 1; + u32 vfdi : 1; + u32 srp : 1; + u32 bap : 1; + u32 eap : 1; + u32 ipd : 2; + } fields; + instr_syl_t word; /* as entire syllable */ +} instr_ss_t; + +/* + * ALU syllables structure + */ + +typedef struct instr_alsf2_fields { + u32 dst : 8; /* [ 7: 0] destination */ + u32 src2 : 8; /* [15: 8] source register #2 */ + u32 opce : 8; /* [23:16] opcode extension */ + u32 cop : 7; /* [30:24] code of operation */ + u32 spec : 1; /* [31] speculative mode */ +} instr_alsf2_fields_t; + +typedef union instr_alsf2 { + instr_alsf2_fields_t fields; /* as fields */ + instr_syl_t word; /* as entire syllable */ +} instr_alsf2_t; + +typedef union instr_als { + instr_alsf2_fields_t f2; /* as fields */ + instr_syl_t word; /* as entire syllable */ +} instr_als_t; + +typedef struct instr_alesf2_fields { + u32 opce : 8; /* [ 7: 0] opcode 2 extension */ + u32 opc2 : 8; /* [15: 8] opcode 2 */ +} instr_alesf2_fields_t; + +typedef union instr_alesf2 { + instr_alesf2_fields_t fields; /* as fields */ + instr_semisyl_t word; /* as entire syllable */ +} instr_alesf2_t; + +typedef union instr_ales { + instr_alesf2_fields_t f2; /* as fields */ + instr_semisyl_t word; /* as entire syllable */ +} instr_ales_t; + +#define INSTR_SRC2_GREG_VALUE 0xe0 +#define INSTR_SRC2_GREG_MASK 0xe0 +#define INSTR_SRC2_GREG_NUM_MASK 0x1f +#define INSTR_SRC2_16BIT_VALUE 0xd0 +#define INSTR_SRC2_32BIT_VALUE 0xd8 +#define INSTR_SRC2_BIT_MASK 0xf8 +#define INSTR_SRC2_LTS_NUM_MASK 0x03 +#define INSTR_SRC2_LTS_SHIFT_MASK 0x04 +#define INSTR_LTS_32BIT_SHIFT 0 +#define INSTR_LTS_16BIT_SHIFT 16 +#define INSTR_LTS_16BIT_NOSHIFT 0 +#define INSTR_LTS_32BIT_MASK 0xffffffff +#define INSTR_LTS_16BIT_SHIFT_MASK 0xffff0000 +#define INSTR_LTS_16BIT_NOSHIFT_MASK 0x0000ffff + +/* + * ALU syllable code of operations and opcode extentions + */ +#define DRTOAP_ALS_COP 0x62 /* DRTOAP */ +#define GETSP_ALS_COP 0x58 /* GETSP */ +#define GETSOD_ALS_COP 0x5a /* GETSOP */ +#define EXT_ALES_OPC2 0x01 /* EXTension */ +#define USD_ALS_OPCE 0xec /* USD */ + +/* + * CS0 syllable structure + */ + +typedef union { + struct { + u32 prefr : 3; + u32 ipd : 1; + u32 pdisp : 24; + u32 __pad : 4; + } pref; + struct { + u32 param : 28; + u32 ctp_opc : 2; + u32 ctpr : 2; + } cof1; + struct { + u32 disp : 28; + u32 ctp_opc : 2; + u32 ctpr : 2; + } cof2; + struct { + u32 __pad1 : 28; + u32 opc : 4; + }; + struct { + u32 __pad2 : 28; + u32 ctp_opc : 2; + u32 ctpr : 2; + }; + instr_syl_t word; +} instr_cs0_t; + +#define CS0_CTP_OPC_IBRANCH 0 +#define CS0_CTP_OPC_DISP 0 +#define CS0_CTP_OPC_LDISP 1 +#define CS0_CTP_OPC_PREF 1 +#define CS0_CTP_OPC_PUTTSD 2 + + +/* + * CS1 syllable structure + */ + +typedef union { + struct { + u32 __pad1 : 27; + u32 sft : 1; + u32 __pad2 : 4; + }; + struct { + u32 param : 28; + u32 opc : 4; + }; + instr_syl_t word; +} instr_cs1_t; + +#define CS1_OPC_SETEI 2 +#define CS1_OPC_CALL 5 + + +/* + * ========== numeric registers (register file) =========== + */ + +#define E2K_MAXNR 128 /* The total number of */ + /* quad-NRs */ +#define E2K_MAXGR 16 /* The total number of global */ + /* quad-NRs */ +#define E2K_MAXSR (E2K_MAXNR - E2K_MAXGR) /* The total number of stack */ + /* quad-NRs */ +#define E2K_MAXNR_d (E2K_MAXNR * 2) /* The total number of */ + /* double-NRs */ +#define E2K_MAXGR_d (E2K_MAXGR * 2) /* The total number of global */ + /* double-NRs */ +#define E2K_MAXSR_d (E2K_MAXSR * 2) /* The total number of stack */ + /* double-NRs */ +#define E2K_ALIGN_WINDOW 4 /* Window boundaries */ + /* alignment */ +#define E2K_WD_MSB 10 /* The number of bits in WD */ + /* fields */ +#define E2K_WD_SIZE (E2K_WD_MSB + 1) /* The number of bits in WD */ + /* fields */ +#define E2K_NR_SIZE 16 /* Byte size of quad-NR */ + +/* Total size of registers file (local stack registers + global registers) */ +#define MAX_NRF_SIZE (E2K_MAXNR * E2K_NR_SIZE) +/* Size of local stack registers file */ +#define MAX_SRF_SIZE (E2K_MAXSR * E2K_NR_SIZE) + +struct e2k_wd_fields { + u64 base : E2K_WD_SIZE; /* [10: 0] window base: */ + /* %r0 physical address */ + u64 unused1 : 16 - E2K_WD_SIZE; /* [15:11] */ + u64 size : E2K_WD_SIZE; /* [26:16] window size */ + u64 unused2 : 16 - E2K_WD_SIZE; /* [31:27] */ + u64 psize : E2K_WD_SIZE; /* [42:32] parameters area */ + /* size */ + u64 unused3 : 16 - E2K_WD_SIZE; /* [47:43] */ + u64 fx : 1; /* [48] spill/fill */ + /* extended flag; indicates */ + /* that the current procedure */ + /* has variables of FX type */ + u64 unused4 : 15; /* [63:49] unused field */ +}; + +/* Current window descriptor (WD) */ +typedef union e2k_wd { + struct { + u64 base : E2K_WD_SIZE; /* [10: 0] window base: */ + /* %r0 physical address */ + u64 unused1 : 16 - E2K_WD_SIZE; /* [15:11] */ + u64 size : E2K_WD_SIZE; /* [26:16] window size */ + u64 unused2 : 16 - E2K_WD_SIZE; /* [31:27] */ + u64 psize : E2K_WD_SIZE; /* [42:32] parameters area */ + /* size */ + u64 unused3 : 16 - E2K_WD_SIZE; /* [47:43] */ + u64 fx : 1; /* [48] spill/fill */ + /* extended flag; indicates */ + /* that the current procedure */ + /* has variables of FX type */ + u64 dbl : 1; /* [49] */ + u64 unused4 : 14; /* [63:50] unused field */ + }; + struct e2k_wd_fields fields; + u64 word; /* as entire opcode */ +} e2k_wd_t; + +#define WD_base base +#define WD_size size +#define WD_psize psize +#define WD_fx fx +#define WD_reg word + +/* Structure of dword register PSHTP */ +typedef struct e2k_pshtp_fields { /* PSHTP fields */ + u64 ind : E2K_WD_SIZE + 1; /* [WD_MSB + 1 : 0] */ + u64 unused1 : 16 - E2K_WD_SIZE - 1; /* [15: WD_MSB + 2] */ + u64 fxind : E2K_WD_SIZE; /* [16 + WD_MSB : 16] */ + u64 unused2 : 32 - E2K_WD_SIZE - 16;/* [31: 16+ WD_MSB + 1] */ + u64 tind : E2K_WD_SIZE; /* [32 + WD_MSB : 32] */ + u64 unused3 : 48 - E2K_WD_SIZE - 32;/* [47: 32+ WD_MSB + 1] */ + u64 fx : 1; /* [48 : 48] */ + u64 unused4 : 15; /* [63 : 49] */ +} e2k_pshtp_fields_t; + +typedef union e2k_pshtp_struct { /* Register */ + e2k_pshtp_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_pshtp_t; + +#define PSHTP_ind fields.ind +#define PSHTP_tind fields.tind +#define PSHTP_fxind fields.fxind +#define PSHTP_fx fields.fx +#define PSHTP_reg word + +/* + * PSHTP register contains index in terms of double-numeric registers + * PSP register contains index in terms of extended double-numeric + * registers spilled into memory - each double-numeric register occupy + * two double words: one for main part and second for extension. + * So it need some conversion to operate with PSP_ind and PSHTP_ind in + * common terms. + */ +#define PSHTP_IND_TO_PSP(nr_nums) ((nr_nums) * 2) +#define PSP_IND_TO_PSHTP(mem_ind) ((mem_ind) / 2) +#define PSHTP_IND_SIGN_EXTEND(pshtp) \ + (((s64) (pshtp) << (s64) (64 - (E2K_WD_SIZE + 1))) \ + >> (s64) (64 - (E2K_WD_SIZE + 1))) +#define PSHTP_MEM_SIGN_EXTEND(pshtp) \ + (((s64) (pshtp) << (s64) (64 - (E2K_WD_SIZE + 1))) \ + >> (s64) (64 - (E2K_WD_SIZE + 1) - 1)) +#define PSHTP_Q_SIGN_EXTEND(pshtp) \ + (((s64) (pshtp) << (s64) (64 - (E2K_WD_SIZE + 1))) \ + >> (s64) (64 - (E2K_WD_SIZE + 1) + 4)) +#define GET_PSHTP_NR_INDEX(pshtp) ((u64) PSHTP_IND_SIGN_EXTEND(AW(pshtp))) +#define SET_PSHTP_NR_INDEX(pshtp, signed_nr_nums) \ + ((pshtp).PSHTP_ind = (signed_nr_nums)) +#define GET_PSHTP_MEM_INDEX(pshtp) ((u64) PSHTP_MEM_SIGN_EXTEND(AW(pshtp))) +#define SET_PSHTP_MEM_INDEX(pshtp, mem_ind) \ + SET_PSHTP_NR_INDEX(pshtp, PSP_IND_TO_PSHTP(mem_ind)) +#define GET_PSHTP_Q_INDEX(pshtp) ((u64) PSHTP_Q_SIGN_EXTEND(AW(pshtp))) + + +/* Numeric Register in a rotatable area: %br# or %dbr# (OPCODE) */ +typedef struct e2k_nbr_fields { + u8 index : 7; /* [ 6: 0] NR index in a */ + /* rotatable area */ + u8 rt7 : 1; /* [ 7] should be 0 */ +} e2k_nbr_fields_t; +typedef union e2k_nbr { + e2k_nbr_fields_t fields; /* as fields */ + u8 word; /* as entire opcode */ +} e2k_nbr_t; + +/* Numeric Register in a window: %r# or %dr# (OPCODE) */ +typedef struct e2k_nr_fields { + u8 index : 6; /* [ 5: 0] NR index in a */ + /* window */ + u8 rt6 : 1; /* [ 6] should be 0 */ + u8 rt7 : 1; /* [ 7] should be 1 */ +} e2k_nr_fields_t; +typedef union e2k_nr { + e2k_nr_fields_t fields; /* as fields */ + u8 word; /* as entire opcode */ +} e2k_nr_t; + +/* Numeric results */ +/* Result destination (destination(ALS.dst)) is encoded in dst fields */ +/* of ALS or AAS syllables as follows: */ + +typedef union e2k_dst { + e2k_nbr_t nbr; /* as rotatable register */ + e2k_nr_t nr; /* as window register */ + u8 word; /* as entire opcode */ +} e2k_dst_t; + +#define DST_IS_NBR(dst) (AS_STRUCT(dst.nbr).rt7 == 0) +#define DST_IS_NR(dst) (AS_STRUCT(dst.nr).rt7 == 1 && \ + AS_STRUCT(dst.nr).rt6 == 0) +#define DST_NBR_INDEX(dst) (AS_STRUCT(dst.nbr).index) +#define DST_NR_INDEX(dst) (AS_STRUCT(dst.nr).index) +#define DST_NBR_RNUM_d(dst) DST_NBR_INDEX(dst) +#define DST_NR_RNUM_d(dst) DST_NR_INDEX(dst) + +/* The effective address of NR in a rotatable area (in terms of double-NR) */ +#define NBR_IND_d(BR, rnum_d) (AS_STRUCT(BR).rbs * 2 + \ + (AS_STRUCT(BR).rcur * 2 + rnum_d) % \ + (AS_STRUCT(BR).rsz * 2 + 2)) +#define NBR_REA_d(WD, ind_d) ((AS_STRUCT(WD).base / 8 + ind_d) % \ + E2K_MAXSR_d) + +/* The effective address of NR in a window (in terms of double-NR) */ +#define NR_REA_d(WD, rnum_d) ((AS_STRUCT(WD).base / 8 + rnum_d) % \ + E2K_MAXSR_d) + + +/* + * ========== chain regs & usd regs =========== + * To work with reg as with word use AS_WORD + * To work with reg as with struct use AS_STRUCT + */ + + +#define AS_WORD(x) ((x).word) +#define AS_STRUCT(x) ((x).fields) +#define AS_V2_STRUCT(x) ((x).v2_fields) +#define AS_V6_STRUCT(x) ((x).v6_fields) +#define AS_SAP_STRUCT(x) ((x).sap_fields) +#define AS_AP_STRUCT(x) ((x).ap_fields) +#define AS_WORD_P(xp) ((xp)->word) +#define AS_STRUCT_P(xp) ((xp)->fields) +#define AS_SAP_STRUCT_P(xp) ((xp)->sap_fields) +#define AS_AP_STRUCT_P(xp) ((xp)->ap_fields) + +#define AW(x) AS_WORD(x) +#define AS(x) AS_STRUCT(x) +#define AWP(xp) AS_WORD_P(xp) +#define ASP(xp) AS_STRUCT_P(xp) + +/* BR */ +typedef struct e2k_br_fields { /* Structure of br reg */ + u32 rbs : 6; /* [ 5: 0] */ + u32 rsz : 6; /* [11: 6] */ + u32 rcur : 6; /* [17:12] */ + u32 psz : 5; /* [22:18] */ + u32 pcur : 5; /* [27:23] */ +} e2k_br_fields_t; +typedef union e2k_br { + e2k_br_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_br_t; +#define BR_rbs fields.rbs +#define BR_rsz fields.rsz +#define BR_rcur fields.rcur +#define BR_psz fields.psz +#define BR_pcur fields.pcur +#define BR_reg word + +/* see 5.25.1. */ + +typedef union e2k_rpr_lo_struct { + e2k_rwp_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_rpr_lo_t; + +#define RPR_lo_reg(rpr_lo) ((rpr_lo).word) +#define RPR_lo_ip E2K_RWP_base /* [47: 0] - IP of trap */ +#define RPR_lo_stp E2K_RWP_stub1 /* [58] - store pointer */ +#define RPR_lo_half BR_reg + +typedef union e2k_rpr_hi_struct { + e2k_br_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_rpr_hi_t; + +#define RPR_hi_reg(rpr_hi) ((rpr_hi).word) +#define RPR_hi_rbs BR_rbs +#define RPR_hi_rsz BR_rsz +#define RPR_hi_rcur BR_rcur +#define RPR_hi_psz BR_psz +#define RPR_hi_pcur BR_pcur +#define RPR_hi_half BR_reg + +#define RPR_IP(x) ((x).RPR_lo_ip) +#define RPR_STP(x) ((x).RPR_lo_stp) +#define RPR_BR_CUR(x) ((x).RPR_hi_rcur) +#define RPR_BR_PCUR(x) ((x).RPR_hi_pcur) + +/* + * BGR. Rotation base of global registers. + * 11 bits wide. Rounded to 32-bit, because 16-bit memory & sysreg access + * makes no sense in this case + */ +typedef struct e2k_bgr_fields { /* Structure of bgr reg */ + u32 val : 8; /* [ 7: 0] */ + u32 cur : 3; /* [10: 8] */ +} e2k_bgr_fields_t; +typedef union e2k_bgr { + e2k_bgr_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_bgr_t; + +#define BGR_val fields.val +#define BGR_cur fields.cur +#define BGR_reg word + +#define E2K_INITIAL_BGR_VAL 0xff +#define E2K_INITIAL_BGR ((e2k_bgr_t) { {cur : 0, val : 0xff} }) + + +#define E2K_GB_START_REG_NO_d 24 +#define E2K_GB_REGS_NUM_d (E2K_MAXGR_d - E2K_GB_START_REG_NO_d) + + +/* CR0 */ + +typedef struct e2k_cr0_hi_fields { /* Structure of cr0_hi chain reg */ + u64 unused : 3; /* [ 2: 0] */ + u64 ip : 61; /* [63: 3] */ +} e2k_cr0_hi_fields_t; +typedef union e2k_cr0_hi { + e2k_cr0_hi_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cr0_hi_t; +#define CR0_hi_ip fields.ip /* [63: 3] - IP >> 3 */ +#define CR0_hi_half word /* [63: 0] - entire high */ +#define CR0_hi_IP CR0_hi_half /* [63: 0] - IP */ + +typedef struct e2k_cr0_lo_fields { /* Structure of cr0_lo chain reg */ + u64 pf : 64; /* [63: 0] */ +} e2k_cr0_lo_fields_t; +typedef union e2k_cr0_lo { + e2k_cr0_lo_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cr0_lo_t; +#define CR0_lo_pf fields.pf /* [63: 0] - predicates file */ +#define CR0_lo_half word /* [63: 0] - entire high */ + +/* CR1 */ + +typedef union e2k_cr1_hi_fields { /* Structure of cr1_hi chain reg */ + struct { + u64 br : 28; /* [27: 0] */ + u64 unused : 7; /* [34:28] */ + u64 wdbl : 1; /* [35:35] */ + u64 ussz : 28; /* [63:36] */ + }; + struct { + u64 rbs : 6; /* [5 :0 ] */ + u64 rsz : 6; /* [11:6 ] */ + u64 rcur : 6; /* [17:12] */ + u64 psz : 5; /* [22:18] */ + u64 pcur : 5; /* [27:23] */ + u64 __x1 : 36; /* [63:28] */ + }; +} e2k_cr1_hi_fields_t; +typedef union e2k_cr1_hi { + e2k_cr1_hi_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cr1_hi_t; +#define CR1_hi_br fields.br /* [27: 0] - base of rotate regs */ +#define CR1_hi_wdbl fields.wdbl /* [63:36] - ??? */ +#define CR1_hi_ussz fields.ussz /* [63:36] - user stack size */ +#define CR1_hi_rbs fields.rbs /* [ 5: 0] - base of rotate regs */ +#define CR1_hi_rsz fields.rsz /* [11: 6] - size of rotate regs */ +#define CR1_hi_rcur fields.rcur /* [17:12] - current of rotate regs */ +#define CR1_hi_psz fields.psz /* [22:18] - size of rotate preds */ +#define CR1_hi_pcur fields.pcur /* [27:23] - current of rotate preds */ +#define CR1_hi_half word /* [63: 0] - entire high */ + +typedef union e2k_cr1_lo_fields { /* Structure of cr1_lo chain reg */ + struct { + u64 unused1 : 16; /* [15:0] */ + u64 ein : 8; /* [23:16] */ + u64 ss : 1; /* [24] */ + u64 wfx : 1; /* [25] */ + u64 wpsz : 7; /* [32:26] */ + u64 wbs : 7; /* [39:33] */ + u64 cuir : 17; /* [56:40] */ + u64 psr : 7; /* [63:57] */ + }; + struct { + u64 __x1 : 40; /* [39:0] */ + u64 cui : 16; /* [40:55] */ + u64 ic : 1; /* [56] */ + u64 pm : 1; /* [57] privileged mode */ + u64 ie : 1; /* [58] interrupt enable */ + u64 sge : 1; /* [59] stack gard control enable */ + u64 lw : 1; /* [60] last wish */ + u64 uie : 1; /* [61] user interrupts enable */ + u64 nmie : 1; /* [62] not masked interrupts enable */ + u64 unmie : 1; /* [63] user not masked interrupts */ + /* enable */ + }; +} e2k_cr1_lo_fields_t; +typedef union e2k_cr1_lo { + e2k_cr1_lo_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cr1_lo_t; +#define CR1_lo_tr fields.tr /* [14: 0] - ??? */ +#define CR1_lo_ein fields.ein /* [23:16] - ??? */ +#define CR1_lo_wfx fields.wfx /* [25] - ??? */ +#define CR1_lo_wpsz fields.wpsz /* [32:26] - regs window parametr */ +#define CR1_lo_wbs fields.wbs /* [39:33] - regs window base */ +#define CR1_lo_cuir fields.cuir /* [56:40] - compilation unit index */ +#define CR1_lo_cui fields.cui /* [55:40] - compilation unit index */ + /* new release field */ +#define CR1_lo_ic fields.ic /* [56] - index checkup flag */ +#define CR1_lo_psr fields.psr /* [63:57] - processor state reg */ +#define CR1_lo_pm fields.pm /* [57] - privileged mode */ +#define CR1_lo_ie fields.ie /* [58] - interrupt enable */ +#define CR1_lo_sge fields.sge /* [59] - stack gard control */ + /* enable */ +#define CR1_lo_lw fields.lw /* [60] - last wish */ +#define CR1_lo_uie fields.uie /* [61] - user interrupts enable */ +#define CR1_lo_nmie fields.nmie /* [62] - not masked interrupts */ + /* enable */ +#define CR1_lo_unmie fields.unmie /* [63] - user not masked */ + /* interrupts enable */ +#define CR1_lo_half word /* [63: 0] - entire high */ +#endif /* !(__ASSEMBLY__) */ + +#ifdef __ASSEMBLY__ +/* for assemly only */ +#define CR1_lo_psr_shift 57 /* [63:57] - processor state reg */ +#define CR1_LO_PSR_PM_BIT (PSR_PM_BIT + CR1_lo_psr_shift) +#define CR1_LO_PSR_IE_BIT (PSR_IE_BIT + CR1_lo_psr_shift) +#define CR1_LO_PSR_SGE_BIT (PSR_SGE_BIT + CR1_lo_psr_shift) +#define CR1_LO_PSR_LW_BIT (PSR_LW_BIT + CR1_lo_psr_shift) +#define CR1_LO_PSR_UIE_BIT (PSR_UIE_BIT + CR1_lo_psr_shift) +#define CR1_LO_PSR_NMIE_BIT (PSR_NMIE_BIT + CR1_lo_psr_shift) +#define CR1_LO_PSR_UNMIE_BIT (PSR_UNMIE_BIT + CR1_lo_psr_shift) + +#define CR1_LO_PSR_PM_MASK (0x1UL << CR1_LO_PSR_PM_BIT) +#define CR1_LO_PSR_IE_MASK (0x1UL << CR1_LO_PSR_IE_BIT) +#define CR1_LO_PSR_SGE_MASK (0x1UL << CR1_LO_PSR_SGE_BIT) +#define CR1_LO_PSR_LW_MASK (0x1UL << CR1_LO_PSR_LW_BIT) +#define CR1_LO_PSR_UIE_MASK (0x1UL << CR1_LO_PSR_UIE_BIT) +#define CR1_LO_PSR_NMIE_MASK (0x1UL << CR1_LO_PSR_NMIE_BIT) +#define CR1_LO_PSR_UNMIE_MASK (0x1UL << CR1_LO_PSR_UNMIE_BIT) + +#define CR1_lo_cuir_shift 40 /* [55:40] - CUIR value */ +#define CR1_lo_cuir_mask (CUIR_mask << CR1_lo_cuir_shift) + +#endif /* __ASSEMBLY__ */ + +#define CR1_lo_cuir_size 16 /* size in bits */ +#define CUIR_mask ((1UL << CR1_lo_cuir_size) - 1) + +#ifndef __ASSEMBLY__ + +#define E2K_ALIGN_INS 3 /* number of least */ + /* significant bits of IP */ + /* are zeroed */ + +/* + * Control Transfer Preparation Register (CTPR) + */ + + /* + * Structure of double-word register + * access CTPR.CTPR_xxx or CTPR -> CTPR_xxx + */ +typedef union { + struct { + u64 ta_base : E2K_VA_SIZE; /* [47: 0] */ + u64 __pad1 : 53 - E2K_VA_MSB; /* [53:48] */ + u64 ta_tag : 3; /* [56:54] */ + u64 opc : 2; /* [58:57] */ + u64 ipd : 2; /* [60:59] */ + u64 __pad2 : 3; /* [63:61] */ + } fields; + struct { + u64 ta_base : E2K_VA_SIZE; + u64 __pad1 : 53 - E2K_VA_MSB; + u64 ta_tag : 3; + u64 opc : 2; + u64 ipd : 2; + u64 __pad2 : 3; + }; + u64 word; +} e2k_ctpr_t; +#define CTPR_ta_base ta_base /* [47: 0] - transfer address */ +#define CTPR_ta_tag ta_tag /* [56:54] - tag */ +#define CTPR_opc opc /* [58:57] - opcode */ +#define CTPR_ipd ipd /* [58:57] - prefetch level */ +#define CTPR_reg word /* [63: 0] - entire register */ +/* Control Transfer Opcodes */ +#define DISP_CT_OPC 0 +#define LDISP_CT_OPC 1 +#define RETURN_CT_OPC 3 + +/* Control Transfer Tag */ +#define CTPEW_CT_TAG 0 /* empty word */ +#define CTPDW_CT_TAG 1 /* diagnostic word */ +#define CTPPL_CT_TAG 2 /* procedure label */ +#define CTPLL_CT_TAG 3 /* local label */ +#define CTPNL_CT_TAG 4 /* numeric label */ +#define CTPSL_CT_TAG 5 /* system label */ + +/* Control Transfer Prefetch Level */ +#define NONE_CT_IPD 0 /* none any prefetching */ +#define ONE_IP_CT_IPD 1 /* only one instruction on 'ta_base' IP */ +#define TWO_IP_CT_IPD 2 /* two instructions on 'ta_base' and next IP */ + +typedef union { + struct { + u64 cui : 16; + u64 __pad : 48; + }; + u64 word; +} e2k_ctpr_hi_t; + + +/* PSR */ +typedef struct e2k_psr_fields { /* Structure of psr reg */ + u32 pm : 1; /* [ 0] */ + u32 ie : 1; /* [ 1] */ + u32 sge : 1; /* [ 2] */ + u32 lw : 1; /* [ 3] last wish */ + u32 uie : 1; /* [ 4] user interrupts enable */ + u32 nmie : 1; /* [ 5] not masked interrupts enable */ + u32 unmie : 1; /* [ 6] user not masked interrupts */ + /* enable */ + u32 unused : 25; /* [31: 7] */ +} e2k_psr_fields_t; +typedef union e2k_psr { + e2k_psr_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_psr_t; + +#define PSR_pm fields.pm /* [ 0] */ +#define PSR_ie fields.ie /* [ 1] */ +#define PSR_sge fields.sge /* [ 2] */ +#define PSR_lw fields.lw /* [ 3] */ +#define PSR_uie fields.uie /* [ 4] */ +#define PSR_nmie fields.nmie /* [ 5] */ +#define PSR_unmie fields.unmie /* [ 6] */ +#define PSR_reg word /* [31: 0] - entire */ + /* single-word register */ + +#define PSR_PM 0x01U +#define PSR_IE 0x02U +#define PSR_SGE 0x04U +#define PSR_LW 0x08U +#define PSR_UIE 0x10U +#define PSR_NMIE 0x20U +#define PSR_UNMIE 0x40U +#define PSR_DISABLE 0xff8dU /*~(PSR_IE|PSR_NMIE|PSR_UIE|PSR_UNMIE)*/ +#define PSR_PM_DISABLE 0xfffeU /* ~PSR_PM_AS */ +#endif /* !(__ASSEMBLY__) */ + +#ifdef __ASSEMBLY__ +/* for assemly only */ +#define PSR_PM_BIT 0 +#define PSR_IE_BIT 1 +#define PSR_SGE_BIT 2 +#define PSR_LW_BIT 3 +#define PSR_UIE_BIT 4 +#define PSR_NMIE_BIT 5 +#define PSR_UNMIE_BIT 6 + +#define PSR_PM_AS (0x1 << PSR_PM_BIT) +#define PSR_IE_AS (0x1 << PSR_IE_BIT) +#define PSR_SGE_AS (0x1 << PSR_SGE_BIT) +#define PSR_LW_AS (0x1 << PSR_LW_BIT) +#define PSR_UIE_AS (0x1 << PSR_UIE_BIT) +#define PSR_NMIE_AS (0x1 << PSR_NMIE_BIT) +#define PSR_UNMIE_AS (0x1 << PSR_UNMIE_BIT) +#define PSR_DISABLE (~(PSR_IE_AS | PSR_NMIE_AS | PSR_UIE_AS | PSR_UNMIE_AS)) +#define PSR_PM_DISABLE (~PSR_PM_AS) + +#endif /* __ASSEMBLY__ */ + +#ifndef __ASSEMBLY__ + +/* CUT entry */ + +typedef struct e2k_cute_dw0_fields { /* Structure of the first d-word */ + /* of CUT entry */ + u64 cud_base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused1 : 57 - E2K_VA_MSB; /* [57:48] */ + u64 cud_c : 1; /* [58:58] */ + u64 unused2 : 5; /* [63:59] */ +} e2k_cute_dw0_fields_t; + +typedef union e2k_cute_dw0 { + e2k_cute_dw0_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cute_dw0_t; + + +typedef struct e2k_cute_dw1_fields { /* Structure of the second d-word */ + /* of CUT entry */ + u64 unused1 : 32; /* [31: 0] */ + u64 cud_size : 32; /* [63:32] */ +} e2k_cute_dw1_fields_t; + +typedef union e2k_cute_dw1 { + e2k_cute_dw1_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cute_dw1_t; + +typedef struct e2k_cute_dw2_fields { /* Structure of the third d-word */ + /* of CUT entry */ + u64 gd_base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused1 : 63 - E2K_VA_MSB; /* [63:48] */ +} e2k_cute_dw2_fields_t; + +typedef union e2k_cute_dw2 { + e2k_cute_dw2_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cute_dw2_t; + +typedef struct e2k_cute_dw3_fields { /* Structure of the fourth d-word */ + /* of CUT entry */ + u64 tsd_base : 15; /* [14: 0] */ + u64 unused1 : 1; /* [15:15] */ + u64 tsd_size : 15; /* [30:16] */ + u64 unused2 : 1; /* [31:31] */ + u64 gd_size : 32; /* [63:32] */ +} e2k_cute_dw3_fields_t; + +typedef union e2k_cute_dw3 { + e2k_cute_dw3_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_cute_dw3_t; + +/* Structure of entire CUT entry */ +typedef struct e2k_cute { + e2k_cute_dw0_t dw0; + e2k_cute_dw1_t dw1; + e2k_cute_dw2_t dw2; + e2k_cute_dw3_t dw3; +} e2k_cute_t; + +#define CUTE_CUD_BASE(p) AS_STRUCT(p->dw0).cud_base +#define CUTE_CUD_SIZE(p) AS_STRUCT(p->dw1).cud_size +#define CUTE_CUD_C(p) AS_STRUCT(p->dw0).cud_c + +#define CUTE_GD_BASE(p) AS_STRUCT(p->dw2).gd_base +#define CUTE_GD_SIZE(p) AS_STRUCT(p->dw3).gd_size + +#define CUTE_TSD_BASE(p) AS_STRUCT(p->dw3).tsd_base +#define CUTE_TSD_SIZE(p) AS_STRUCT(p->dw3).tsd_size + +#endif /* !(__ASSEMBLY__) */ + +#define E2K_ALIGN_CUT 5 /* Compilation units table */ + /* boundaries alignment */ + /* (2's exponent value */ +#ifndef __ASSEMBLY__ +#define E2K_ALIGN_CUT_MASK ((1UL << E2K_ALIGN_CUT) - 1) +#else /* __ASSEMBLY__ */ +#define E2K_ALIGN_CUT_MASK ((1 << E2K_ALIGN_CUT) - 1) +#endif /* !(__ASSEMBLY__) */ + +#ifndef __ASSEMBLY__ + +/* CUTD */ + +typedef e2k_rwp_struct_t e2k_cutd_t; +#define CUTD_base E2K_RWP_base /* [47: 0] - base address */ +#define CUTD_reg E2K_RWP_reg /* [63: 0] - entire double- */ + /* word register */ + +/* CUIR */ + +typedef struct e2k_cuir_fields { /* Structure of the CUIR reg */ + u32 index : 16; /* [15: 0] */ + u32 checkup : 1; /* [16:16] */ + u32 unused1 : 15; /* [31:17] */ +} e2k_cuir_fields_t; + +typedef union e2k_cuir { + e2k_cuir_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_cuir_t; +#define CUIR_index fields.index +#define CUIR_checkup fields.checkup +#define CUIR_reg word + +/* TSD */ + +typedef struct e2k_tsd_fields { /* Structure of the TSD reg */ + u64 base : 15; /* [14: 0] */ + u64 unused1 : 17; /* [31:15] */ + u64 size : 15; /* [46:32] */ + u64 unused2 : 17; /* [63:47] */ +} e2k_tsd_fields_t; + +typedef union e2k_tsd { + e2k_tsd_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_tsd_t; +#define TSD_base fields.base +#define TSD_size fields.size +#define TSD_reg word + +#define CUD_CFLAG_CEARED 0 /* intermodule security verification */ + /* (ISV) have not passed */ +#define CUD_CFLAG_SET 1 /* ISV have passed */ + +/* Hardware procedure stack memory mapping (one quad-register record, LE) */ +/* Istruction sets from V2 to V4 */ +typedef struct e2k_mem_ps_v2 { + unsigned long word_lo; /* low word value */ + unsigned long word_hi; /* high word value */ + unsigned long ext_lo; /* extention of low word */ + unsigned long ext_hi; /* extention of hagh word */ +} e2k_mem_ps_v2_t; +/* Istruction sets from V5 to V6 */ +typedef struct e2k_mem_ps_v5 { + unsigned long word_lo; /* low word value */ + unsigned long ext_lo; /* extention of low word */ + unsigned long word_hi; /* high word value */ + unsigned long ext_hi; /* extention of hagh word */ +} e2k_mem_ps_v5_t; +typedef union e2k_mem_ps { + e2k_mem_ps_v2_t v2; + e2k_mem_ps_v5_t v5; +} e2k_mem_ps_t; + +/* interkernel hardware-independent representation */ +typedef struct kernel_mem_ps { + unsigned long word_lo; /* low word value */ + unsigned long word_hi; /* high word value */ + unsigned long ext_lo; /* extention of low word */ + unsigned long ext_hi; /* extention of hagh word */ +} kernel_mem_ps_t; + +/* Chain stack memory mapping (one record, LE) */ + +typedef struct e2k_mem_crstack { + e2k_cr0_lo_t cr0_lo; + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; +} e2k_mem_crs_t; + +#endif /* !(__ASSEMBLY__) */ + +/* + * chain stack items relative offset from cr_ind for pcsp + */ + +#define CR0_LO_I (0 * 8) +#define CR0_HI_I (1 * 8) +#define CR1_LO_I (2 * 8) +#define CR1_HI_I (3 * 8) + +#ifndef __ASSEMBLY__ + +/* + * cr1.lo.wbs is size of prev proc in term of size of 4 32 bit reegs. + * But in hard stack these regs are in extended format (*2) + */ +#define EXT_4_NR_SZ ((4 * 4) * 2) +#define SZ_OF_CR sizeof(e2k_mem_crs_t) + + +/* + * Trap Info Registers + */ + +typedef e2k_rwp_struct_t e2k_tir_lo_t; + +typedef struct tir_hi_fields { /* Structure of the TIR_hi reg */ + u64 exc : 44; /* exceptions mask [43: 0] */ + u64 al : 6; /* ALS mask [49:44] */ + u64 unused1 : 2; /* unused bits [51:50] */ + u64 aa : 4; /* MOVA mask [55:52] */ + u64 j : 8; /* # of TIR [63:56] */ +} tir_hi_fields_t; + +typedef union tir_hi_struct { + struct { /* Structure of the TIR_hi reg */ + u64 exc : 44; /* exceptions mask [43: 0] */ + u64 al : 6; /* ALS mask [49:44] */ + u64 unused1 : 2; /* unused bits [51:50] */ + u64 aa : 4; /* MOVA mask [55:52] */ + u64 j : 8; /* # of TIR [63:56] */ + }; + tir_hi_fields_t fields; /* as fields */ + u64 word; /* as entire register */ +} e2k_tir_hi_t; + +typedef struct e2k_tir_reg { /* simple TIRj register desc */ + e2k_tir_lo_t TIR_lo; + e2k_tir_hi_t TIR_hi; +} e2k_tir_t; + + /* + * Structure of low word of the register + * access TIR_lo.TIR_lo_xxx or TIR_lo -> TIR_lo_xxx + */ +#define TIR_lo_ip E2K_RWP_base /* [47: 0] - IP of trap */ +#define TIR_lo_reg E2K_RWP_reg /* [63: 0] - entire */ + /* double-word register */ + + /* + * Structure of hi word of the register + * access TIR_hi.TIR_hi_xxx or TIR_hi -> TIR_hi_xxx + */ +#define TIR_hi_reg word /* [63: 0] - entire */ + +#define TIR_hi_exc fields.exc +#define TIR_hi_al fields.al +#define TIR_hi_aa fields.aa +#define TIR_hi_j fields.j + +/* ALS mask structure */ +#define ALS0_mask 0x01 +#define ALS1_mask 0x02 +#define ALS2_mask 0x04 +#define ALS3_mask 0x08 +#define ALS4_mask 0x10 +#define ALS5_mask 0x20 + +#define MAX_TIRs_NUM 19 + +/* + * User processor status register (UPSR) + */ +typedef struct e2k_upsr_fields { + u32 fe : 1; /* float-pointing enable */ + u32 se : 1; /* supervisor mode enable (only for Intel) */ + u32 ac : 1; /* not-aligned access control */ + u32 di : 1; /* delayed interrupt (only for Intel) */ + u32 wp : 1; /* write protection (only for Intel) */ + u32 ie : 1; /* interrupt enable */ + u32 a20 : 1; /* emulation of 1 Mb memory (only for Intel) */ + /* should be 0 for Elbrus */ + u32 nmie : 1; /* not masked interrupt enable */ + /* next field of register exist only on ES2/E2S/E8C/E1C+ CPUs */ + u32 fsm : 1; /* floating comparison mode flag */ + /* 1 - compatible with x86/x87 */ + u32 impt : 1; /* ignore Memory Protection Table flag */ + u32 iuc : 1; /* ignore access right for uncached pages */ + +} e2k_upsr_fields_t; +typedef union e2k_upsr { + e2k_upsr_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_upsr_t; +#define UPSR_fe fields.fe +#define UPSR_se fields.se +#define UPSR_ac fields.ac +#define UPSR_di fields.di +#define UPSR_wp fields.wp +#define UPSR_ie fields.ie +#define UPSR_a20 fields.a20 +#define UPSR_nmie fields.nmie +#define UPSR_fsm fields.fsm +#define UPSR_impt fields.impt +#define UPSR_iuc fields.iuc +#define UPSR_reg word + +#endif /* !(__ASSEMBLY__) */ + +#define UPSR_FE 0x01U +#define UPSR_SE 0x02U +#define UPSR_AC 0x04U +#define UPSR_DI 0x08U +#define UPSR_WP 0x10U +#define UPSR_IE 0x20U +#define UPSR_A20 0x40U +#define UPSR_NMIE 0x80U +/* next field of register exist only on ES2/E2S/E8C/E1C+ CPUs */ +#define UPSR_FSM 0x100U +#define UPSR_IMPT 0x200U +#define UPSR_IUC 0x400U +#define UPSR_DISABLE (0xff5f) /* ~(UPSR_IE_AS|UPSR_NMIE_AS) */ + +/* (IS_UPT_E3S ? 0 : UPSR_SE_AS) */ +#ifndef IS_UPT_E3S + #define KERNEL_UPSR_SE_INIT 0 +#else + #define KERNEL_UPSR_SE_INIT UPSR_SE +#endif /* IS_UPT_E3S */ +#ifndef CONFIG_ACCESS_CONTROL + #define KERNEL_UPSR_ALL_INIT (UPSR_FE | KERNEL_UPSR_SE_INIT) +#else + #define KERNEL_UPSR_ALL_INIT (UPSR_FE | KERNEL_UPSR_SE_INIT | UPSR_AC) +#endif /* KERNEL_UPSR_ALL_INIT */ + +#ifndef __ASSEMBLY__ + +/* + * Processor Identification Register (IDR) + */ +typedef union e2k_idr { + struct { + u64 mdl : 8; /* CPU model number */ + u64 rev : 4; /* revision number */ + u64 wbl : 3; /* write back length of L2 */ + u64 core : 5; /* number of the core into node */ + u64 pn : 4; /* node number from RT_LCFG0.pn */ + u64 hw_virt : 1; /* hardware virtualization enabled */ + u64 hw_virt_ver : 4; /* hardware virtualization revision */ + /* number */ + u64 reserve : 35; /* reserved */ + }; + struct { + u64 __pad : 12; + u64 ms : 52; /* model specific info */ + }; + u64 word; /* as entire register */ +} e2k_idr_t; + +#define IDR_reg word /* [63: 0] - entire */ + +#define IDR_mdl mdl +#define IDR_rev rev +#define IDR_ms ms +#define IDR_wbl wbl +#define IDR_ms_core core +#define IDR_ms_pn pn +#define IDR_ms_hw_virt hw_virt +#define IDR_ms_hw_virt_ver hw_virt_ver + +/* CPU model numbers */ +#define IDR_NONE 0x00 /* No such hardware exists */ +#define IDR_E2S_MDL 0x03 /* Elbrus-4C (Elbrus-2S) */ +#define IDR_ES2_DSP_MDL 0x04 /* Elbrus-2C+ */ +#define IDR_E4S_MDL 0x05 /* reserve */ +#define IDR_ES2_RU_MDL 0x06 /* Elbrus-2CM (without DSP) */ + /* russian MICRON release */ +#define IDR_E8C_MDL 0x07 /* Elbrus-8C */ +#define IDR_E1CP_MDL 0x08 /* Elbrus-1C+ one processor e2s */ + /* + graphic */ +#define IDR_E8C2_MDL 0x09 /* Elbrus-8C2 */ +#define IDR_E12C_MDL 0x0a /* Elbrus-12C */ +#define IDR_E16C_MDL 0x0b /* Elbrus-16C */ +#define IDR_E2C3_MDL 0x0c /* Elbrus-2C3 */ + +/* Convert IDR register write back length code to number of bytes */ +/* using current WBL code presentation */ +#define IDR_WBL_TO_BYTES(wbl) ((wbl) ? (1 << (wbl + 4)) : 1) + +/* + * Processor Core Mode Register (CORE_MODE) + */ +typedef union e2k_core_mode { + struct { + u32 reserve0 : 1; /* bit #0 reserved */ + u32 no_stack_prot : 1; /* no check stack pointers */ + u32 sep_virt_space : 1; /* separate page tables for kernel */ + /* and users */ + u32 gmi : 1; /* indicator of guest mode */ + /* actual only in guest mode */ + u32 hci : 1; /* indicator of hypercalls enabled */ + /* actual only in guest mode */ + u32 pt_v6 : 1; /* new Page Tables structures mode */ + /* only for ISET >= V6 */ + u32 sp_rollback_en : 1; /* hardware rollback PSP/PCSP stack */ + /* pointers is enabled */ + u32 reserve6 : 25; /* other bits reserved */ + }; + u32 word; /* as entire register */ +} e2k_core_mode_t; + +#define CORE_MODE_reg word /* [31: 0] - entire */ + +#define CORE_MODE_no_stack_prot no_stack_prot +#define CORE_MODE_sep_virt_space sep_virt_space +#define CORE_MODE_gmi gmi +#define CORE_MODE_hci hci +#define CORE_MODE_pt_v6 pt_v6 +#define CORE_MODE_sp_rollback_en sp_rollback_en + +/* + * Packed Floating Point Flag Register (PFPFR) + */ +typedef struct e2k_pfpfr_fields { + u32 ie : 1; /* [0] */ + u32 de : 1; /* [1] */ + u32 ze : 1; /* [2] */ + u32 oe : 1; /* [3] */ + u32 ue : 1; /* [4] */ + u32 pe : 1; /* [5] */ + u32 zero1 : 1; /* [6] */ + u32 im : 1; /* [7] */ + u32 dm : 1; /* [8] */ + u32 zm : 1; /* [9] */ + u32 om : 1; /* [10] */ + u32 um : 1; /* [11] */ + u32 pm : 1; /* [12] */ + u32 rc : 2; /* [14:13] */ + u32 fz : 1; /* [15] */ + u32 zero2 : 10; /* [25:16] */ + u32 die : 1; /* [26] */ + u32 dde : 1; /* [27] */ + u32 dze : 1; /* [28] */ + u32 doe : 1; /* [29] */ + u32 due : 1; /* [30] */ + u32 dpe : 1; /* [31] */ +} e2k_pfpfr_fields_t; +typedef union e2k_pfpfr { + e2k_pfpfr_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_pfpfr_t; + +#define PFPFR_ie fields.ie +#define PFPFR_de fields.de +#define PFPFR_ze fields.ze +#define PFPFR_oe fields.oe +#define PFPFR_ue fields.ue +#define PFPFR_pe fields.pe +#define PFPFR_zero1 fields.zero1 +#define PFPFR_im fields.im +#define PFPFR_dm fields.dm +#define PFPFR_zm fields.zm +#define PFPFR_om fields.om +#define PFPFR_um fields.um +#define PFPFR_pm fields.pm +#define PFPFR_rc fields.rc +#define PFPFR_fz fields.fz +#define PFPFR_zero2 fields.zero2 +#define PFPFR_die fields.die +#define PFPFR_dde fields.dde +#define PFPFR_dze fields.dze +#define PFPFR_doe fields.doe +#define PFPFR_due fields.due +#define PFPFR_dpe fields.dpe +#define PFPFR_reg word + +/* + * Floating point control register (FPCR) + */ +typedef struct e2k_fpcr_fields { + u32 im : 1; /* [0] */ + u32 dm : 1; /* [1] */ + u32 zm : 1; /* [2] */ + u32 om : 1; /* [3] */ + u32 um : 1; /* [4] */ + u32 pm : 1; /* [5] */ + u32 one1 : 1; /* [6] */ + u32 zero1 : 1; /* [7] */ + u32 pc : 2; /* [9:8] */ + u32 rc : 2; /* [11:10] */ + u32 ic : 1; /* [12] */ + u32 zero2 : 3; /* [15:13] */ +} e2k_fpcr_fields_t; +typedef union e2k_fpcr { + e2k_fpcr_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_fpcr_t; + +#define FPCR_im fields.im +#define FPCR_dm fields.dm +#define FPCR_zm fields.zm +#define FPCR_om fields.om +#define FPCR_um fields.um +#define FPCR_pm fields.pm +#define FPCR_one1 fields.one1 +#define FPCR_zero1 fields.zero1 +#define FPCR_pc fields.pc +#define FPCR_rc fields.rc +#define FPCR_ic fields.ic +#define FPCR_zero2 fields.zero2 +#define FPCR_reg word + + +/* + * Floating point status register (FPSR) + */ +typedef struct e2k_fpsr_fields { + u32 ie : 1; /* [0] */ + u32 de : 1; /* [1] */ + u32 ze : 1; /* [2] */ + u32 oe : 1; /* [3] */ + u32 ue : 1; /* [4] */ + u32 pe : 1; /* [5] */ + u32 zero1 : 1; /* [6] */ + u32 es : 1; /* [7] */ + u32 zero2 : 1; /* [8] */ + u32 c1 : 1; /* [9] */ + u32 zero3 : 5; /* [14:10] */ + u32 bf : 1; /* [15] */ +} e2k_fpsr_fields_t; +typedef union e2k_fpsr { + e2k_fpsr_fields_t fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_fpsr_t; + +#define FPSR_ie fields.ie +#define FPSR_de fields.de +#define FPSR_ze fields.ze +#define FPSR_oe fields.oe +#define FPSR_ue fields.ue +#define FPSR_pe fields.pe +#define FPSR_zero1 fields.zero1 +#define FPSR_es fields.es +#define FPSR_zero2 fields.zero2 +#define FPSR_c1 fields.c1 +#define FPSR_zero3 fields.zero3 +#define FPSR_bf fields.bf +#define FPSR_reg word + +typedef union { + struct { + u32 user : 1; + u32 system : 1; + u32 trap : 1; + u32 unused1 : 13; + u32 event : 7; + u32 unused2 : 9; + } fields[2]; + struct { + u64 __pad1 : 11; + u64 u_m_en : 1; + u64 mode : 4; + u64 __pad2 : 48; + }; + u64 word; +} e2k_dimcr_t; +#define DIMCR_reg word + +typedef union { + struct { + u32 b0 : 1; + u32 b1 : 1; + u32 b2 : 1; + u32 b3 : 1; + u32 bt : 1; + u32 m0 : 1; + u32 m1 : 1; + u32 ss : 1; + u32 btf : 1; + }; + struct { /* structure of register */ + u32 b0 : 1; /* [0] */ + u32 b1 : 1; /* */ + u32 b2 : 1; /* */ + u32 b3 : 1; /* */ + u32 bt : 1; /* [4] */ + u32 m0 : 1; /* [5] */ + u32 m1 : 1; /* [6] */ + u32 ss : 1; /* [7] */ + u32 btf : 1; /* [8] */ + } fields; + u32 word; +} e2k_dibsr_t; +#define DIBSR_reg word + +#define E2K_DIBSR_MASK(cp_num) (0x1ULL << (cp_num)) +#define E2K_DIBSR_MASK_ALL_BP 0xfULL + +typedef union { + struct { + u32 v0 : 1; + u32 t0 : 1; + u32 v1 : 1; + u32 t1 : 1; + u32 v2 : 1; + u32 t2 : 1; + u32 v3 : 1; + u32 t3 : 1; + u32 bt : 1; + u32 stop : 1; + u32 btf : 1; + u32 gm : 1; + }; + struct { + u32 v0 : 1; + u32 t0 : 1; + u32 v1 : 1; + u32 t1 : 1; + u32 v2 : 1; + u32 t2 : 1; + u32 v3 : 1; + u32 t3 : 1; + u32 bt : 1; + u32 stop : 1; + u32 btf : 1; + u32 gm : 1; + } fields; + u32 word; +} e2k_dibcr_t; +#define DIBCR_reg word + +#define E2K_DIBCR_MASK(cp_num) (0x3ULL << ((cp_num) * 2)) + +typedef union { + struct { + struct { + u64 base : E2K_VA_SIZE; + u64 __pad1 : 59 - E2K_VA_SIZE; + u64 rw : 2; + u64 __pad2 : 3; + }; + struct { + u64 ind : 32; + u64 size : 32; + }; + }; + struct { + u64 lo; + u64 hi; + }; +} e2k_dimtp_t; + +#define E2K_DIMTP_ALIGN 32 + + +/* + * Global registers (saved state) definition + */ +typedef struct e2k_svd_gregs_struct { + u64 base; /* exists any time */ + u32 extension; /* when holds an FP value */ + u8 tag; /* any time too */ +} e2k_svd_gregs_t; + +/* CU_HW0 register */ +#define _CU_HW0_TRWM_ITAG_MASK 0x00000007 /* IB tag */ +#define _CU_HW0_TRWM_IDATA_MASK 0x00000038 /* IB data */ +#define _CU_HW0_TRWM_CF_MASK 0x000001c0 /* Chain File */ +/* Disable IB snooping */ +#define _CU_HW0_IB_SNOOP_DISABLE_MASK 0x00000200 +#define _CU_HW0_BIST_CF_MASK 0x00000400 /* Chain File */ +#define _CU_HW0_BIST_TU_MASK 0x00000800 /* Trap Unit */ +#define _CU_HW0_BIST_ITAG_MASK 0x00001000 /* IB tag */ +#define _CU_HW0_BIST_ITLB_TAG_MASK 0x00002000 /* ITLB tag */ +#define _CU_HW0_BIST_ITLB_DATA_MASK 0x00004000 /* ITLB data */ +#define _CU_HW0_BIST_IDATA_NM_MASK 0x00078000 /* IB data */ +#define _CU_HW0_BIST_IDATA_CNT_MASK 0x1ff80000 /* IB tag */ +#define _CU_HW0_PIPE_FROST_DISABLE_MASK 0x20000000 /* Instruction pipe */ +#define _CU_HW0_RF_CLEAN_DISABLE_MASK 0x40000000 /* Register File */ +/* Disable hardware virtualization support */ +#define _CU_HW0_VIRT_DISABLE_MASK 0x80000000 + + +struct hw_stacks { + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pshtp_t pshtp; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_pcshtp_t pcshtp; +}; + + +typedef union { + struct { + u64 div : 32; + u64 mdiv : 1; + u64 mode : 1; + u64 trn : 1; + u64 sw : 1; + u64 wsclkr : 1; + u64 __pad1 : 19; + u64 ver : 8; + }; + struct { + u64 __pad2 : 56; + u64 w_sclkr_hi : 1; + u64 sclkm3 : 1; + u64 __pad3 : 6; + }; + u64 word; +} e2k_sclkm1_t; + +typedef enum cu_reg_no { + undef_cu_reg_no = -1, + SCLKM1_cu_reg_no = 0x70, + SCLKM2_cu_reg_no = 0x71, + SCLKM3_cu_reg_no = 0x72, + IDR_cu_reg_no = 0x8a, + CLKR_cu_reg_no = 0x90, + SCLKR_cu_reg_no = 0x92, + DIBCR_cu_reg_no = 0x40, + DIMCR_cu_reg_no = 0x41, + DIBSR_cu_reg_no = 0x42, + DTCR_cu_reg_no = 0x43, + DIMTP_hi_cu_reg_no = 0x46, + DIMTP_lo_cu_reg_no = 0x47, + DIBAR0_cu_reg_no = 0x48, + DIBAR1_cu_reg_no = 0x49, + DIBAR2_cu_reg_no = 0x4a, + DIBAR3_cu_reg_no = 0x4b, + DIMAR0_cu_reg_no = 0x4c, + DIMAR1_cu_reg_no = 0x4d, + DTARF_cu_reg_no = 0x4e, + DTART_cu_reg_no = 0x4f, +} cu_reg_no_t; + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_CPU_REGS_TYPES_H_ */ diff --git a/arch/e2k/include/asm/current.h b/arch/e2k/include/asm/current.h new file mode 100644 index 0000000..c201880 --- /dev/null +++ b/arch/e2k/include/asm/current.h @@ -0,0 +1,12 @@ +#ifndef _E2K_CURRENT_H +#define _E2K_CURRENT_H + +#include +#include + +struct task_struct; +register struct task_struct *current DO_ASM_GET_GREG_MEMONIC( + CURRENT_TASK_GREG); +#define native_current() current + +#endif /* _E2K_CURRENT_H */ diff --git a/arch/e2k/include/asm/debug_print.h b/arch/e2k/include/asm/debug_print.h new file mode 100644 index 0000000..e31830c --- /dev/null +++ b/arch/e2k/include/asm/debug_print.h @@ -0,0 +1,46 @@ +#ifndef _DEBUG_PRINT_H_ +#define _DEBUG_PRINT_H_ + +#include +#include + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +#ifdef E2K_P2V + +#include + +# define DebugPrint(condition, fmt, ...) \ +do { \ + if (condition) \ + do_boot_printk("%s: " fmt, __func__ ,##__VA_ARGS__); \ +} while (0) + +# define DebugPrintCont(condition, fmt, ...) \ +do { \ + if (condition) \ + do_boot_printk(fmt, ##__VA_ARGS__); \ +} while (0) + +#else + +# define DebugPrint(condition, fmt, ...) \ +do { \ + if (condition) \ + printk(KERN_DEBUG "%d %d %s: " fmt, \ + raw_smp_processor_id(), current->pid, __func__ , \ + ##__VA_ARGS__); \ +} while (0) + +# define DebugPrintCont(condition, fmt, ...) \ +do { \ + if (condition) \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ +} while (0) + +#endif + +#endif +#endif +#endif /* _DEBUG_PRINT_H_ */ diff --git a/arch/e2k/include/asm/delay.h b/arch/e2k/include/asm/delay.h new file mode 100644 index 0000000..9900932 --- /dev/null +++ b/arch/e2k/include/asm/delay.h @@ -0,0 +1,7 @@ +#ifndef _E2K_DELAY_H_ +#define _E2K_DELAY_H_ + +extern void udelay(unsigned long usecs); +extern void __delay(unsigned long loops); + +#endif /* _E2K_DELAY_H_ */ diff --git a/arch/e2k/include/asm/device.h b/arch/e2k/include/asm/device.h new file mode 100644 index 0000000..710ad02 --- /dev/null +++ b/arch/e2k/include/asm/device.h @@ -0,0 +1,22 @@ +#ifndef _ASM_E2K_DEVICE_H +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#include + +struct dev_archdata { + unsigned int link; + struct e2k_iommu_dev_data iommu; +}; + +struct pdev_archdata { +}; + +#define dev_to_link(__dev) (__dev ? (__dev)->archdata.link : 0) +#define set_dev_link(__dev, __link) do { \ + (__dev)->archdata.link = __link; \ + } while(0) + +#endif /* _ASM_E2K_DEVICE_H */ diff --git a/arch/e2k/include/asm/dma-direct.h b/arch/e2k/include/asm/dma-direct.h new file mode 100644 index 0000000..e200ef9 --- /dev/null +++ b/arch/e2k/include/asm/dma-direct.h @@ -0,0 +1,6 @@ +#ifndef _ASM_E2K_DMA_DIRECT_H +#define _ASM_E2K_DMA_DIRECT_H + +#include + +#endif /* _ASM_E2K_DMA_DIRECT_H */ diff --git a/arch/e2k/include/asm/dma-mapping.h b/arch/e2k/include/asm/dma-mapping.h new file mode 100644 index 0000000..2e568dc --- /dev/null +++ b/arch/e2k/include/asm/dma-mapping.h @@ -0,0 +1,6 @@ +#ifndef _ASM_E2K_DMA_MAPPING_H +#define _ASM_E2K_DMA_MAPPING_H + +#include + +#endif /* _ASM_E2K_DMA_MAPPING_H */ diff --git a/arch/e2k/include/asm/dma.h b/arch/e2k/include/asm/dma.h new file mode 100644 index 0000000..80ffae3 --- /dev/null +++ b/arch/e2k/include/asm/dma.h @@ -0,0 +1,297 @@ +/* $Id: dma.h,v 1.4 2006/02/02 14:25:30 atic Exp $ + * linux/include/asm/dma.h: Defines for using and allocating dma channels. + * Written by Hennus Bergman, 1992. + * High DMA channel support & info by Hannu Savolainen + * and John Boyd, Nov. 1992. + */ + +#ifndef _ASM_DMA_H +#define _ASM_DMA_H + +#include /* And spinlocks */ +#include /* need byte IO */ +#include + + +#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER +#define dma_outb outb_p +#else +#define dma_outb outb +#endif + +#define dma_inb inb + +/* + * NOTES about DMA transfers: + * + * controller 1: channels 0-3, byte operations, ports 00-1F + * controller 2: channels 4-7, word operations, ports C0-DF + * + * - ALL registers are 8 bits only, regardless of transfer size + * - channel 4 is not used - cascades 1 into 2. + * - channels 0-3 are byte - addresses/counts are for physical bytes + * - channels 5-7 are word - addresses/counts are for physical words + * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries + * - transfer count loaded to registers is 1 less than actual count + * - controller 2 offsets are all even (2x offsets for controller 1) + * - page registers for 5-7 don't use data bit 0, represent 128K pages + * - page registers for 0-3 use bit 0, represent 64K pages + * + * DMA transfers are limited to the lower 16MB of _physical_ memory. + * Note that addresses loaded into registers must be _physical_ addresses, + * not logical addresses (which may differ if paging is active). + * + * Address mapping for channels 0-3: + * + * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * P7 ... P0 A7 ... A0 A7 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Address mapping for channels 5-7: + * + * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) + * | ... | \ \ ... \ \ \ ... \ \ + * | ... | \ \ ... \ \ \ ... \ (not used) + * | ... | \ \ ... \ \ \ ... \ + * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses + * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at + * the hardware level, so odd-byte transfers aren't possible). + * + * Transfer count (_not # bytes_) is limited to 64K, represented as actual + * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, + * and up to 128K bytes may be transferred on channels 5-7 in one operation. + * + */ + +#define MAX_DMA_CHANNELS 8 + +/* The maximum address that we can perform a DMA transfer to on this platform */ +#define MAX_DMA_ADDRESS (PAGE_OFFSET + (1UL << ARCH_ZONE_DMA_BITS)) + +/* 8237 DMA controllers */ +#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ +#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ + +/* DMA controller registers */ +#define DMA1_CMD_REG 0x08 /* command register (w) */ +#define DMA1_STAT_REG 0x08 /* status register (r) */ +#define DMA1_REQ_REG 0x09 /* request register (w) */ +#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ +#define DMA1_MODE_REG 0x0B /* mode register (w) */ +#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ +#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ +#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ +#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ +#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ + +#define DMA2_CMD_REG 0xD0 /* command register (w) */ +#define DMA2_STAT_REG 0xD0 /* status register (r) */ +#define DMA2_REQ_REG 0xD2 /* request register (w) */ +#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ +#define DMA2_MODE_REG 0xD6 /* mode register (w) */ +#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ +#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ +#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ +#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ +#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ + +#define DMA_ADDR_0 0x00 /* DMA address registers */ +#define DMA_ADDR_1 0x02 +#define DMA_ADDR_2 0x04 +#define DMA_ADDR_3 0x06 +#define DMA_ADDR_4 0xC0 +#define DMA_ADDR_5 0xC4 +#define DMA_ADDR_6 0xC8 +#define DMA_ADDR_7 0xCC + +#define DMA_CNT_0 0x01 /* DMA count registers */ +#define DMA_CNT_1 0x03 +#define DMA_CNT_2 0x05 +#define DMA_CNT_3 0x07 +#define DMA_CNT_4 0xC2 +#define DMA_CNT_5 0xC6 +#define DMA_CNT_6 0xCA +#define DMA_CNT_7 0xCE + +#define DMA_PAGE_0 0x87 /* DMA page registers */ +#define DMA_PAGE_1 0x83 +#define DMA_PAGE_2 0x81 +#define DMA_PAGE_3 0x82 +#define DMA_PAGE_5 0x8B +#define DMA_PAGE_6 0x89 +#define DMA_PAGE_7 0x8A + +#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ +#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ +#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ + +#define DMA_AUTOINIT 0x10 + + +extern spinlock_t dma_spin_lock; + +static __inline__ unsigned long claim_dma_lock(void) +{ + unsigned long flags; + spin_lock_irqsave(&dma_spin_lock, flags); + return flags; +} + +static __inline__ void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +/* enable/disable a specific DMA channel */ +static __inline__ void enable_dma(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(dmanr, DMA1_MASK_REG); + else + dma_outb(dmanr & 3, DMA2_MASK_REG); +} + +static __inline__ void disable_dma(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(dmanr | 4, DMA1_MASK_REG); + else + dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); +} + +/* Clear the 'DMA Pointer Flip Flop'. + * Write 0 for LSB/MSB, 1 for MSB/LSB access. + * Use this once to initialize the FF to a known state. + * After that, keep track of it. :-) + * --- In order to do that, the DMA routines below should --- + * --- only be used while holding the DMA lock ! --- + */ +static __inline__ void clear_dma_ff(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(0, DMA1_CLEAR_FF_REG); + else + dma_outb(0, DMA2_CLEAR_FF_REG); +} + +/* set mode (above) for a specific DMA channel */ +static __inline__ void set_dma_mode(unsigned int dmanr, char mode) +{ + if (dmanr<=3) + dma_outb(mode | dmanr, DMA1_MODE_REG); + else + dma_outb(mode | (dmanr&3), DMA2_MODE_REG); +} + +/* Set only the page register bits of the transfer address. + * This is used for successive transfers when we know the contents of + * the lower 16 bits of the DMA current address register, but a 64k boundary + * may have been crossed. + */ +static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) +{ + switch(dmanr) { + case 0: + dma_outb(pagenr, DMA_PAGE_0); + break; + case 1: + dma_outb(pagenr, DMA_PAGE_1); + break; + case 2: + dma_outb(pagenr, DMA_PAGE_2); + break; + case 3: + dma_outb(pagenr, DMA_PAGE_3); + break; + case 5: + dma_outb(pagenr & 0xfe, DMA_PAGE_5); + break; + case 6: + dma_outb(pagenr & 0xfe, DMA_PAGE_6); + break; + case 7: + dma_outb(pagenr & 0xfe, DMA_PAGE_7); + break; + } +} + + +/* Set transfer address & page bits for specific DMA channel. + * Assumes dma flipflop is clear. + */ +static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) +{ + set_dma_page(dmanr, a>>16); + if (dmanr <= 3) { + dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); + dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); + } else { + dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); + dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); + } +} + + +/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for + * a specific DMA channel. + * You must ensure the parameters are valid. + * NOTE: from a manual: "the number of transfers is one more + * than the initial word count"! This is taken into account. + * Assumes dma flip-flop is clear. + * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. + */ +static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) +{ + count--; + if (dmanr <= 3) { + dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); + dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); + } else { + dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); + dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); + } +} + + +/* Get DMA residue count. After a DMA transfer, this + * should return zero. Reading this while a DMA transfer is + * still in progress will return unpredictable results. + * If called before the channel has been used, it may return 1. + * Otherwise, it returns the number of _bytes_ left to transfer. + * + * Assumes DMA flip-flop is clear. + */ +static __inline__ int get_dma_residue(unsigned int dmanr) +{ + unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE + : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; + + /* using short to get 16-bit wrap around */ + unsigned short count; + + count = 1 + dma_inb(io_port); + count += dma_inb(io_port) << 8; + + return (dmanr<=3)? count : (count<<1); +} + + +/* These are in kernel/dma.c: */ +extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ +extern void free_dma(unsigned int dmanr); /* release it again */ + +/* From PCI */ + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + +#endif /* _ASM_DMA_H */ diff --git a/arch/e2k/include/asm/e12c.h b/arch/e2k/include/asm/e12c.h new file mode 100644 index 0000000..918c746 --- /dev/null +++ b/arch/e2k/include/asm/e12c.h @@ -0,0 +1,62 @@ +#ifndef _ASM_E12C_H_ +#define _ASM_E12C_H_ + +/* + * Machine (based on E12C processor) topology: + * E12C is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 12 CPUs (cores) + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_e12c_setup_arch(void); +extern void e12c_setup_machine(void); +extern void setup_APIC_vector_handler(int vector, + void (*handler)(struct pt_regs *), bool system, char *name); +#endif + +#define E12C_CPU_VENDOR ES2_CPU_VENDOR +#define E12C_CPU_FAMILY E16C_CPU_FAMILY + +#define E12C_NR_NODE_CPUS 12 +#define E12C_MAX_NR_NODE_CPUS 16 + +#define E12C_NODE_IOLINKS 1 + +#define E12C_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE +#define E12C_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE + +#define E12C_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE + +#define E12C_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET +#define E12C_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE + +#define E12C_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE +#define E12C_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE + +#define E12C_MLT_SIZE ES2_MLT_SIZE + +#define E12C_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E12C_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM +#define E12C_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2 +#define E12C_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2 +#define E12C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM +#define E12C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT + +#define E12C_SIC_MC_COUNT E8C_SIC_MC_COUNT +#define E12C_SIC_MC1_ECC E2S_SIC_MC1_ECC + +#define E12C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E12C_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E12C_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E12C_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E12C_L2_CACHE_BYTES ES2_L2_CACHE_BYTES +#define E12C_L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT +#define E12C_L3_CACHE_BYTES E8C_L3_CACHE_BYTES + +#endif /* _ASM_E12C_H_ */ diff --git a/arch/e2k/include/asm/e16c.h b/arch/e2k/include/asm/e16c.h new file mode 100644 index 0000000..f27e6eb --- /dev/null +++ b/arch/e2k/include/asm/e16c.h @@ -0,0 +1,62 @@ +#ifndef _ASM_E16C_H_ +#define _ASM_E16C_H_ + +/* + * Machine (based on E16C processor) topology: + * E16C is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 16 CPUs (cores) + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_e16c_setup_arch(void); +extern void e16c_setup_machine(void); +extern void setup_APIC_vector_handler(int vector, + void (*handler)(struct pt_regs *), bool system, char *name); +#endif + +#define E16C_CPU_VENDOR ES2_CPU_VENDOR +#define E16C_CPU_FAMILY 6 + +#define E16C_NR_NODE_CPUS 16 +#define E16C_MAX_NR_NODE_CPUS 16 + +#define E16C_NODE_IOLINKS 1 + +#define E16C_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE +#define E16C_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE + +#define E16C_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE + +#define E16C_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET +#define E16C_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE + +#define E16C_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE +#define E16C_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE + +#define E16C_MLT_SIZE ES2_MLT_SIZE + +#define E16C_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E16C_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM +#define E16C_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2 +#define E16C_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2 +#define E16C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM +#define E16C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT + +#define E16C_SIC_MC_COUNT E8C_SIC_MC_COUNT +#define E16C_SIC_MC1_ECC E2S_SIC_MC1_ECC + +#define E16C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E16C_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E16C_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E16C_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E16C_L2_CACHE_BYTES ES2_L2_CACHE_BYTES +#define E16C_L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT +#define E16C_L3_CACHE_BYTES E8C_L3_CACHE_BYTES + +#endif /* _ASM_E16C_H_ */ diff --git a/arch/e2k/include/asm/e1cp.h b/arch/e2k/include/asm/e1cp.h new file mode 100644 index 0000000..5c4204d --- /dev/null +++ b/arch/e2k/include/asm/e1cp.h @@ -0,0 +1,49 @@ +#ifndef _ASM_E1CP_H_ +#define _ASM_E1CP_H_ + +/* + * Machine (based on E1C+ processor) topology: + * E1C+ is one core CPU + graphical processor to support 3D, so + * - is not NUMA system + * - is not SMP system + */ + +#ifndef __ASSEMBLY__ +extern void boot_e1cp_setup_arch(void); +extern void e1cp_setup_machine(void); +#endif + +#define E1CP_CPU_VENDOR ES2_CPU_VENDOR +#define E1CP_CPU_FAMILY ES2_CPU_FAMILY + +#define E1CP_NR_NODE_CPUS 1 +#define E1CP_MAX_NR_NODE_CPUS E1CP_NR_NODE_CPUS + +#define E1CP_NODE_IOLINKS 2 + +#define E1CP_PCICFG_AREA_PHYS_BASE 0x000000ff10000000UL +#define E1CP_PCICFG_AREA_SIZE 0x0000000010000000UL + +#define E1CP_NBSR_AREA_OFFSET E2S_NBSR_AREA_OFFSET +#define E1CP_NBSR_AREA_SIZE E2S_NBSR_AREA_SIZE + +#define E1CP_MLT_SIZE ES2_MLT_SIZE + +#define E1CP_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E1CP_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM +#define E1CP_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2 +#define E1CP_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2 +#define E1CP_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM +#define E1CP_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT + +#define E1CP_SIC_MC_COUNT ES2_SIC_MC_COUNT +#define E1CP_SIC_MC1_ECC E2S_SIC_MC1_ECC + +#define E1CP_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E1CP_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E1CP_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E1CP_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E1CP_L2_CACHE_BYTES ES2_L2_CACHE_BYTES + +#endif /* _ASM_E1CP_H_ */ diff --git a/arch/e2k/include/asm/e2c3.h b/arch/e2k/include/asm/e2c3.h new file mode 100644 index 0000000..d75a717 --- /dev/null +++ b/arch/e2k/include/asm/e2c3.h @@ -0,0 +1,60 @@ +#ifndef _ASM_E2C3_H_ +#define _ASM_E2C3_H_ + +/* + * Machine (based on E2C3 processor) topology: + * E2C3 is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 2 CPUs (cores) + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_e2c3_setup_arch(void); +extern void e2c3_setup_machine(void); +extern void setup_APIC_vector_handler(int vector, + void (*handler)(struct pt_regs *), bool system, char *name); +#endif + +#define E2C3_CPU_VENDOR ES2_CPU_VENDOR +#define E2C3_CPU_FAMILY E16C_CPU_FAMILY + +#define E2C3_NR_NODE_CPUS 2 +#define E2C3_MAX_NR_NODE_CPUS 16 + +#define E2C3_NODE_IOLINKS 1 + +#define E2C3_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE +#define E2C3_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE + +#define E2C3_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE + +#define E2C3_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET +#define E2C3_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE + +#define E2C3_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE +#define E2C3_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE + +#define E2C3_MLT_SIZE ES2_MLT_SIZE + +#define E2C3_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E2C3_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM +#define E2C3_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2 +#define E2C3_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2 +#define E2C3_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM +#define E2C3_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT + +#define E2C3_SIC_MC_COUNT E8C_SIC_MC_COUNT +#define E2C3_SIC_MC1_ECC E2S_SIC_MC1_ECC + +#define E2C3_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E2C3_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E2C3_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E2C3_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E2C3_L2_CACHE_BYTES ES2_L2_CACHE_BYTES + +#endif /* _ASM_E2C3_H_ */ diff --git a/arch/e2k/include/asm/e2k-iommu.h b/arch/e2k/include/asm/e2k-iommu.h new file mode 100644 index 0000000..c54bb90 --- /dev/null +++ b/arch/e2k/include/asm/e2k-iommu.h @@ -0,0 +1,19 @@ +#ifndef __ASM_E2K_IOMMU_H +#define __ASM_E2K_IOMMU_H + +/* + * This struct contains device specific data for the IOMMU + */ +struct e2k_iommu_dev_data { + struct e2k_iommu_domain *domain; /* Domain the device is bound to */ + struct kvm *kvm; /* Virtual machine, to which device is + * passed */ +}; + +extern int iommu_panic_off; +extern void e2k_iommu_error_interrupt(void); +extern void e2k_iommu_guest_write_ctrl(u32 reg_value); +extern void e2k_iommu_setup_guest_2d_dte(struct kvm *kvm, u64 g_page_table); +extern void e2k_iommu_flush_guest(struct kvm *kvm, u64 command); + +#endif /* __ASM_E2K_IOMMU_H */ diff --git a/arch/e2k/include/asm/e2k.h b/arch/e2k/include/asm/e2k.h new file mode 100644 index 0000000..9a7308b --- /dev/null +++ b/arch/e2k/include/asm/e2k.h @@ -0,0 +1,422 @@ +#ifndef _ASM_E2K_H_ +#define _ASM_E2K_H_ + +#include +#include + +#include /* E2K_VA_MSB */ +#include +#include +#include +#include +#include +#include +#include + +/* CPU model numbers */ +#define IDR_E2K_VIRT_MDL 0x00 /* machine is virtual, so CPUs also */ + +#define MACHINE_ID_NONE 0x0000 +#define MACHINE_ID_CPU_TYPE_MASK 0x000f +#define MACHINE_ID_SIMUL 0x0010 +#define MACHINE_ID_E2K_FULL_SIC 0x0020 +#define MACHINE_ID_E2K_IOHUB 0x0040 +#define MACHINE_ID_L_IOMMU 0x0080 +#define MACHINE_ID_E2K_LEGACY_SIC 0x0100 /* host bridge & legacy NBSR */ +#define MACHINE_ID_E2K_VIRT_IO 0x0400 /* machine is virtual and */ + /* IO simulates on user level */ + /* (for example by QEMU) */ +#define MACHINE_ID_HW_VIRT 0x4000 /* hardware virtualized VM */ +#define MACHINE_ID_VIRT 0x8000 /* soft paravirtualized VM */ +#define MACHINE_ID_E2K_IOMMU 0x10000 + +#define MACHINE_ID_ES2_DSP (IDR_ES2_DSP_MDL | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB) +#define MACHINE_ID_ES2_RU (IDR_ES2_RU_MDL | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB) +#define MACHINE_ID_E2S (IDR_E2S_MDL | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_L_IOMMU) +#define MACHINE_ID_E8C (IDR_E8C_MDL | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_L_IOMMU) +#define MACHINE_ID_E1CP (IDR_E1CP_MDL | \ + MACHINE_ID_E2K_LEGACY_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_L_IOMMU) +#define MACHINE_ID_E8C2 (IDR_E8C2_MDL | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_L_IOMMU) +/* + * IO_* NBSRs are absent in models with EIOHub. Using LEGACY_SIC with FULL_SIC + * helps to avoid reading those NBSRs while still using IO_AREA_PHYS_BASE + * defined for FULL_SIC + */ +#define MACHINE_ID_E12C (IDR_E12C_MDL | \ + MACHINE_ID_E2K_LEGACY_SIC | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_E2K_IOMMU) +#define MACHINE_ID_E16C (IDR_E16C_MDL | \ + MACHINE_ID_E2K_LEGACY_SIC | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_E2K_IOMMU) +#define MACHINE_ID_E2C3 (IDR_E2C3_MDL | \ + MACHINE_ID_E2K_LEGACY_SIC | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_E2K_IOMMU) +#define MACHINE_ID_E2K_VIRT (IDR_E2K_VIRT_MDL | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_E2K_VIRT_IO) +#define MACHINE_ID_E2K_HW_VIRT (IDR_E2K_VIRT_MDL | \ + MACHINE_ID_HW_VIRT | \ + MACHINE_ID_E2K_LEGACY_SIC | \ + MACHINE_ID_E2K_FULL_SIC | \ + MACHINE_ID_E2K_IOHUB | \ + MACHINE_ID_E2K_IOMMU | \ + MACHINE_ID_E2K_VIRT_IO) + +#define MACHINE_ID_ES2_DSP_LMS (MACHINE_ID_ES2_DSP | \ + MACHINE_ID_SIMUL) +#define MACHINE_ID_ES2_RU_LMS (MACHINE_ID_ES2_RU | MACHINE_ID_SIMUL) +#define MACHINE_ID_E2S_LMS (MACHINE_ID_E2S | MACHINE_ID_SIMUL) +#define MACHINE_ID_E8C_LMS (MACHINE_ID_E8C | MACHINE_ID_SIMUL) +#define MACHINE_ID_E1CP_LMS (MACHINE_ID_E1CP | MACHINE_ID_SIMUL) +#define MACHINE_ID_E8C2_LMS (MACHINE_ID_E8C2 | MACHINE_ID_SIMUL) +#define MACHINE_ID_E12C_LMS (MACHINE_ID_E12C | MACHINE_ID_SIMUL) +#define MACHINE_ID_E16C_LMS (MACHINE_ID_E16C | MACHINE_ID_SIMUL) +#define MACHINE_ID_E2C3_LMS (MACHINE_ID_E2C3 | MACHINE_ID_SIMUL) + +#define MACHINE_ID_VIRT_ES2_DSP (MACHINE_ID_ES2_DSP | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_ES2_RU (MACHINE_ID_ES2_RU | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E2S (MACHINE_ID_E2S | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E8C (MACHINE_ID_E8C | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E1CP (MACHINE_ID_E1CP | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E8C2 (MACHINE_ID_E8C2 | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E12C (MACHINE_ID_E12C | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E16C (MACHINE_ID_E16C | MACHINE_ID_VIRT) +#define MACHINE_ID_VIRT_E2C3 (MACHINE_ID_E2C3 | MACHINE_ID_VIRT) + +#ifdef CONFIG_E2K_SIMULATOR +# define MACHINE_SIMUL_FLAG MACHINE_ID_SIMUL +#else +# define MACHINE_SIMUL_FLAG 0 +#endif + +#ifdef CONFIG_E2K_MACHINE + #if defined(CONFIG_E2K_ES2_DSP) + #define native_machine_id (MACHINE_ID_ES2_DSP | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_ES2_RU) + #define native_machine_id (MACHINE_ID_ES2_RU | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E2S) + #define native_machine_id (MACHINE_ID_E2S | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E8C) + #define native_machine_id (MACHINE_ID_E8C | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E1CP) + #define native_machine_id (MACHINE_ID_E1CP | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E8C2) + #define native_machine_id (MACHINE_ID_E8C2 | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E12C) + #define native_machine_id (MACHINE_ID_E12C | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E16C) + #define native_machine_id (MACHINE_ID_E16C | MACHINE_SIMUL_FLAG) + #elif defined(CONFIG_E2K_E2C3) + #define native_machine_id (MACHINE_ID_E2C3 | MACHINE_SIMUL_FLAG) + #else + # error "E2K MACHINE type does not defined" + #endif +#elif defined(CONFIG_ES2) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_ES2_DSP_LMS +#elif defined(CONFIG_E2S) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E2S_LMS +#elif defined(CONFIG_E8C) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E8C_LMS +#elif defined(CONFIG_E1CP) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E1CP_LMS +#elif defined(CONFIG_E8C2) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E8C2_LMS +#elif defined(CONFIG_E12C) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E12C_LMS +#elif defined(CONFIG_E16C) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E16C_LMS +#elif defined(CONFIG_E2C3) /* can be defined only for tiny boot on lms */ + #define native_machine_id MACHINE_ID_E2C3_LMS +#else /* ! CONFIG_E2K_MACHINE && ! our boot on lms */ +extern unsigned int __nodedata native_machine_id; +#endif /* CONFIG_E2K_MACHINE */ + +extern const char *e2k_get_cpu_type_name(int mach_type_id); +extern const char *e2k_get_mach_type_name(int mach_type_id); +extern int e2k_get_machine_type_name(int mach_id); +extern void __init native_setup_machine(void); +extern void native_set_mach_type_id(void); +extern const char *native_get_mach_type_name(void); + +extern void e2k_init_IRQ(void); + +#define IS_THE_MACHINE_ES2_DSP(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_ES2_DSP_MDL) +#define IS_THE_MACHINE_ES2_RU(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_ES2_RU_MDL) +#define IS_THE_MACHINE_ES2(mach_id) \ + ((IS_THE_MACHINE_ES2_DSP(mach_id)) || \ + (IS_THE_MACHINE_ES2_RU(mach_id))) +#define IS_THE_MACHINE_E2S(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E2S_MDL) +#define IS_THE_MACHINE_E8C(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E8C_MDL) +#define IS_THE_MACHINE_E1CP(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E1CP_MDL) +#define IS_THE_MACHINE_E8C2(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E8C2_MDL) +#define IS_THE_MACHINE_E12C(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E12C_MDL) +#define IS_THE_MACHINE_E16C(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E16C_MDL) +#define IS_THE_MACHINE_E2C3(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E2C3_MDL) +#define IS_THE_MACHINE_E2K_VIRT(mach_id) \ + (((mach_id) & MACHINE_ID_CPU_TYPE_MASK) == IDR_E2K_VIRT_MDL) + +#define IS_THE_MACHINE_SIM(mach_id) \ + (((mach_id) & MACHINE_ID_SIMUL) != 0) + +#define HAS_THE_MACHINE_E2K_DSP(mach_id) \ + (IS_THE_MACHINE_ES2_DSP(mach_id)) +#define HAS_THE_MACHINE_E2K_FULL_SIC(mach_id) \ + (((mach_id) & MACHINE_ID_E2K_FULL_SIC) != 0) +#define HAS_THE_MACHINE_E2K_IOHUB(mach_id) \ + (((mach_id) & MACHINE_ID_E2K_IOHUB) != 0) +#define HAS_THE_MACHINE_L_IOMMU(mach_id) \ + (((mach_id) & MACHINE_ID_L_IOMMU) != 0) +#define HAS_THE_MACHINE_E2K_IOMMU(mach_id) \ + (((mach_id) & MACHINE_ID_E2K_IOMMU) != 0) +#define HAS_THE_MACHINE_E2K_LEGACY_SIC(mach_id) \ + (((mach_id) & MACHINE_ID_E2K_LEGACY_SIC) != 0) +#define HAS_THE_MACHINE_L_SIC(mach_id) \ + (HAS_THE_MACHINE_E2K_FULL_SIC(mach_id) || \ + HAS_THE_MACHINE_E2K_LEGACY_SIC(mach_id)) + +#define NATIVE_IS_MACHINE_ES2_DSP \ + IS_THE_MACHINE_ES2_DSP(native_machine_id) +#define NATIVE_IS_MACHINE_ES2_RU \ + IS_THE_MACHINE_ES2_RU(native_machine_id) +#define NATIVE_IS_MACHINE_ES2 \ + IS_THE_MACHINE_ES2(native_machine_id) +#define NATIVE_IS_MACHINE_E2S \ + IS_THE_MACHINE_E2S(native_machine_id) +#define NATIVE_IS_MACHINE_E8C \ + IS_THE_MACHINE_E8C(native_machine_id) +#define NATIVE_IS_MACHINE_E1CP \ + IS_THE_MACHINE_E1CP(native_machine_id) +#define NATIVE_IS_MACHINE_E8C2 \ + IS_THE_MACHINE_E8C2(native_machine_id) +#define NATIVE_IS_MACHINE_E12C \ + IS_THE_MACHINE_E12C(native_machine_id) +#define NATIVE_IS_MACHINE_E16C \ + IS_THE_MACHINE_E16C(native_machine_id) +#define NATIVE_IS_MACHINE_E2C3 \ + IS_THE_MACHINE_E2C3(native_machine_id) +#define NATIVE_IS_MACHINE_E2K_VIRT (false) + +#define BOOT_NATIVE_IS_MACHINE_ES2_DSP \ + IS_THE_MACHINE_ES2_DSP(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_ES2_RU \ + IS_THE_MACHINE_ES2_RU(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_ES2 \ + ((BOOT_NATIVE_IS_MACHINE_ES2_DSP) || \ + (BOOT_NATIVE_IS_MACHINE_ES2_RU)) +#define BOOT_NATIVE_IS_MACHINE_E2S \ + IS_THE_MACHINE_E2S(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E8C \ + IS_THE_MACHINE_E8C(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E1CP \ + IS_THE_MACHINE_E1CP(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E8C2 \ + IS_THE_MACHINE_E8C2(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E12C \ + IS_THE_MACHINE_E12C(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E16C \ + IS_THE_MACHINE_E16C(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E2C3 \ + IS_THE_MACHINE_E2C2(boot_native_machine_id) +#define BOOT_NATIVE_IS_MACHINE_E2K_VIRT false + +#define NATIVE_IS_MACHINE_SIM \ + IS_THE_MACHINE_SIM(native_machine_id) + +#define NATIVE_HAS_MACHINE_E2K_DSP \ + HAS_THE_MACHINE_E2K_DSP(native_machine_id) +#define NATIVE_HAS_MACHINE_E2K_FULL_SIC \ + HAS_THE_MACHINE_E2K_FULL_SIC(native_machine_id) +#define NATIVE_HAS_MACHINE_E2K_IOHUB \ + HAS_THE_MACHINE_E2K_IOHUB(native_machine_id) +#define NATIVE_HAS_MACHINE_E2K_IOMMU \ + HAS_THE_MACHINE_E2K_IOMMU(native_machine_id) +#define NATIVE_HAS_MACHINE_E2K_LEGACY_SIC \ + HAS_THE_MACHINE_E2K_LEGACY_SIC(native_machine_id) +#define NATIVE_HAS_MACHINE_L_SIC \ + HAS_THE_MACHINE_L_SIC(native_machine_id) + +#define BOOT_NATIVE_IS_MACHINE_SIM \ + IS_THE_MACHINE_SIM(boot_native_machine_id) + +#define BOOT_NATIVE_HAS_MACHINE_E2K_DSP \ + HAS_THE_MACHINE_E2K_DSP(boot_native_machine_id) +#define BOOT_NATIVE_HAS_MACHINE_E2K_FULL_SIC \ + HAS_THE_MACHINE_E2K_FULL_SIC(boot_native_machine_id) +#define BOOT_NATIVE_HAS_MACHINE_E2K_IOHUB \ + HAS_THE_MACHINE_E2K_IOHUB(boot_native_machine_id) +#define BOOT_NATIVE_HAS_MACHINE_E2K_LEGACY_SIC \ + HAS_THE_MACHINE_E2K_LEGACY_SIC(boot_native_machine_id) +#define BOOT_NATIVE_HAS_MACHINE_L_SIC \ + HAS_THE_MACHINE_L_SIC(boot_native_machine_id) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* it is native kernel without any virtualization */ +/* or host kernel with virtualization support */ +/* only native machine is exists and should be examined */ +#define machine_id native_machine_id +#define boot_machine_id boot_native_machine_id + +#define get_machine_id() machine_id +#define boot_get_machine_id() boot_machine_id +#define set_machine_id(mach_id) (machine_id = (mach_id)) +#define boot_set_machine_id(mach_id) (boot_machine_id = (mach_id)) + +static inline void set_mach_type_id(void) +{ + native_set_mach_type_id(); +} + +#define boot_panic(fmt, args...) boot_native_panic(fmt, ##args) + +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#define IS_MACHINE_ES2_DSP \ + IS_THE_MACHINE_ES2_DSP(get_machine_id()) +#define IS_MACHINE_ES2_RU \ + IS_THE_MACHINE_ES2_RU(get_machine_id()) +#define IS_MACHINE_ES2 \ + IS_THE_MACHINE_ES2(get_machine_id()) +#define IS_MACHINE_E2S \ + IS_THE_MACHINE_E2S(get_machine_id()) +#define IS_MACHINE_E8C \ + IS_THE_MACHINE_E8C(get_machine_id()) +#define IS_MACHINE_E1CP \ + IS_THE_MACHINE_E1CP(get_machine_id()) +#define IS_MACHINE_E8C2 \ + IS_THE_MACHINE_E8C2(get_machine_id()) +#define IS_MACHINE_E12C \ + IS_THE_MACHINE_E12C(get_machine_id()) +#define IS_MACHINE_E16C \ + IS_THE_MACHINE_E16C(get_machine_id()) +#define IS_MACHINE_E2C3 \ + IS_THE_MACHINE_E2C3(get_machine_id()) +#define IS_MACHINE_E2K_VIRT \ + IS_THE_MACHINE_E2K_VIRT(get_machine_id()) + +#define HAS_MACHINE_E2K_DSP \ + HAS_THE_MACHINE_E2K_DSP(get_machine_id()) +#define HAS_MACHINE_E2K_FULL_SIC \ + HAS_THE_MACHINE_E2K_FULL_SIC(get_machine_id()) +#define HAS_MACHINE_E2K_IOHUB \ + HAS_THE_MACHINE_E2K_IOHUB(get_machine_id()) +#define HAS_MACHINE_L_IOMMU \ + HAS_THE_MACHINE_L_IOMMU(get_machine_id()) +#define HAS_MACHINE_E2K_IOMMU \ + HAS_THE_MACHINE_E2K_IOMMU(get_machine_id()) +#define HAS_MACHINE_E2K_LEGACY_SIC \ + HAS_THE_MACHINE_E2K_LEGACY_SIC(get_machine_id()) +#define HAS_MACHINE_L_SIC \ + HAS_THE_MACHINE_L_SIC(get_machine_id()) + +#define BOOT_IS_MACHINE_ES2_DSP \ + IS_THE_MACHINE_ES2_DSP(boot_get_machine_id()) +#define BOOT_IS_MACHINE_ES2_RU \ + IS_THE_MACHINE_ES2_RU(boot_get_machine_id()) +#define BOOT_IS_MACHINE_ES2 \ + IS_THE_MACHINE_ES2(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E2S \ + IS_THE_MACHINE_E2S(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E8C \ + IS_THE_MACHINE_E8C(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E1CP \ + IS_THE_MACHINE_E1CP(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E8C2 \ + IS_THE_MACHINE_E8C2(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E12C \ + IS_THE_MACHINE_E12C(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E16C \ + IS_THE_MACHINE_E16C(boot_get_machine_id()) +#define BOOT_IS_MACHINE_E2C3 \ + IS_THE_MACHINE_E2C3(boot_get_machine_id()) +#define BOOT_IS_MACHINE_VIRT \ + IS_THE_MACHINE_VIRT(boot_get_machine_id()) + +#define BOOT_HAS_MACHINE_E2K_DSP \ + HAS_THE_MACHINE_E2K_DSP(boot_get_machine_id()) +#define BOOT_HAS_MACHINE_E2K_FULL_SIC \ + HAS_THE_MACHINE_E2K_FULL_SIC(boot_get_machine_id()) +#define BOOT_HAS_MACHINE_E2K_IOHUB \ + HAS_THE_MACHINE_E2K_IOHUB(boot_get_machine_id()) +#define BOOT_HAS_MACHINE_L_IOMMU \ + HAS_THE_MACHINE_L_IOMMU(boot_get_machine_id()) +#define BOOT_HAS_MACHINE_E2K_IOMMU \ + HAS_THE_MACHINE_E2K_IOMMU(boot_get_machine_id()) +#define BOOT_HAS_MACHINE_E2K_LEGACY_SIC \ + HAS_THE_MACHINE_E2K_LEGACY_SIC(boot_get_machine_id()) +#define BOOT_HAS_MACHINE_L_SIC \ + HAS_THE_MACHINE_L_SIC(boot_get_machine_id()) + +#define MAX_NODE_CPUS 16 /* all 16 CPU cores on a node */ + +#define E2K_MAX_NODE_IOLINKS 2 /* each node can has max 2 IO links */ + /* connected to IOHUB or RDMA */ +#define MACH_MAX_NUMIOLINKS (E2K_MAX_NODE_IOLINKS * MAX_NUMNODES) + +#define LMS_CONS_DATA_PORT 0x300UL /* On READ - data from keyboard */ + /* On WRITE - data to debug ouput */ + /* port (console/journal) */ + +#define LMS_CONS_STATUS_PORT 0x301UL /* On READ - data available on 0x300 */ + /* On WRITE - shift count for 0x304 */ + +#define LMS_NSOCK_BADDR_PORT 0x302UL /* On READ - network socket base addr*/ + /* On WRITE - the same. */ + +#define LMS_NSOCK_DATA_PORT 0x303UL /* On READ - data from network socket*/ + /* On WRITE - data to network socket*/ + +#define LMS_TRACE_CNTL_PORT 0x304UL /* On READ - state of the instruction*/ + /* counter */ + /* On WRITE - LMS tracer control */ + /* (1 - start, 0 - stop) */ + +#define LMS_RAM_ADDR_PORT 0x309UL /* On WRITE - RAM address to load */ + /* kernel image by simulator */ + +#define LMS_TRACE_CNTL_OFF 0 +#define LMS_TRACE_CNTL_ON 1 + +#define LMS_LOAD_IMAGE_TO_RAM 5 /* Load 'romimage' file to RAM from */ + /* address before written to */ + /* 'LMS_RAM_ADDR_PORT' port */ + +extern unsigned long machine_serial_num; + +#endif /* _ASM_E2K_H_ */ diff --git a/arch/e2k/include/asm/e2k_api.h b/arch/e2k/include/asm/e2k_api.h new file mode 100644 index 0000000..0e8e5f6 --- /dev/null +++ b/arch/e2k/include/asm/e2k_api.h @@ -0,0 +1,7029 @@ +#ifndef _E2K_API_H_ +#define _E2K_API_H_ + +#include +#include + + +#ifndef __ASSEMBLY__ +typedef unsigned char __e2k_u8_t; +typedef unsigned short int __e2k_u16_t; +typedef unsigned int __e2k_u32_t; +typedef unsigned long long __e2k_u64_t; +typedef void *__e2k_ptr_t; +#endif + + +/* + * FIXME: the following header include is commented +#include + * because of this header 'asm/e2k_api.h' is included in arch-independent + * header 'linux/compiler-gcc.h' (see header to know why). + * So header 'asm/mmu_types.h' cannot contain any types from 'linux/types.h' + * and it need include the header directly before 'asm/e2k_api.h' into follow + * files: + * kernel/trap_table.S + * kernel/page_tables.S + */ + +/* + * Used to separate one wide instruction from another + */ +#define E2K_CMD_SEPARATOR asm volatile ("{nop}" ::: "memory") + +/* To avoid header dependencies use this define + * instead of BUILD_BUG_ON() from . */ +#define E2K_BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) + +#ifndef E2K_BIN_VER +#define E2K_BIN_VER 8 +#endif + +#define EI_SEMANTIC 7 + +#define ELF_CODE_UNKNOWN 0 +#define ELF_CODE_32_UNPROTECTED 1 +#define ELF_CODE_64_UNPROTECTED 2 +#define ELF_CODE_NEW_PROTECTED 5 +#define ELF_CODE_NEW_PROTECTED_CXX 6 + +#define ELF_BIN_COMP 0x4 +/* + * If x->e_flags && ELF_E2K_INCOMPAT == 1 + * the code can executed only (mtype==0) - any + * ==2 es2 + * ==3 e2s + * ==4 e8c + */ +#define ELF_E2K_INCOMPAT 0x10 + +#define IS_INCOMPAT(x) ((x)->e_machine == ELF_ARCH && \ + ((x)->e_flags & ELF_E2K_INCOMPAT)) + +/* protected mode flag */ +#define ELF_E2K_PM 0x20 +/* ELF segments are to be mapped in packed way. */ +#define ELF_E2K_PACK_SEGMENTS 0x40 + +#define EM_E2KL (33 + (E2K_BIN_VER << 1)) /* Little endian */ +#define EM_E2KB (34 + (E2K_BIN_VER << 1)) /* Big endian */ +#define EM_E2K_FAKE EM_E2KL +#define EM_E2K 175 +/* Compilation unit number for all memory allocations in 32-bit comp. mode */ +#define E2K_ELF32_COMP_UNIT 1 + +/* + * Machine type checker. Is to be used for 64-bit, 32-bit elf + * and protected mode. Result depends on machine type and binary type. + */ + +#define elf_check_e2k_mtype(x) \ +({ \ + unsigned long mt; \ + int _res = 0; \ + int _iset = machine.native_iset_ver; \ + \ + if ((x)->e_machine == ELF_ARCH) { \ + mt = (unsigned long)((x)->e_flags >> 24); \ + } else { \ + mt = (unsigned long)((x)->e_flags >> 28) & 0x7; \ + } \ + \ + switch (mt) { \ + case 0: \ + if (!IS_INCOMPAT(x) || _iset == ELBRUS_S_ISET) \ + _res = 1; \ + break; \ + case 2: \ + if (!IS_INCOMPAT(x) && _iset > ELBRUS_S_ISET \ + || _iset == ELBRUS_S_ISET) \ + _res = 1; \ + break; \ + case 3: \ + if (!IS_INCOMPAT(x) && _iset > ELBRUS_2S_ISET \ + || _iset == ELBRUS_2S_ISET) \ + _res = 1; \ + break; \ + case 4: \ + if (!IS_INCOMPAT(x) && _iset > ELBRUS_8C_ISET \ + || _iset == ELBRUS_8C_ISET) \ + _res = 1; \ + break; \ + case 5: \ + if (!IS_INCOMPAT(x) && _iset > ELBRUS_8C2_ISET \ + || _iset == ELBRUS_8C2_ISET) \ + _res = 1; \ + break; \ + case 6: \ + if (_iset == E2K_ISET_V6) \ + _res = 1; \ + break; \ + case 19: \ + if (IS_MACHINE_E8C) \ + _res = 1; \ + break; \ + case 20: \ + if (IS_MACHINE_E1CP) \ + _res = 1; \ + break; \ + case 21: \ + if (IS_MACHINE_E12C) \ + _res = 1; \ + break; \ + case 22: \ + if (IS_MACHINE_E16C) \ + _res = 1; \ + break; \ + case 23: \ + if (IS_MACHINE_E2C3) \ + _res = 1; \ + break; \ + default: \ + break; \ + } \ + \ + _res; \ +}) + +/* + * Normal simulator termination + */ +#define E2K_LMS_HALT_OK \ +({ \ + _Pragma("no_asm_inline") \ + asm volatile (".word \t0x00008001\n\t" \ + ".word \t0x60000000"); \ +}) + +/* + * Simulator termination on error + */ +#define E2K_LMS_HALT_ERROR(err_no) \ +({ \ + _Pragma("no_asm_inline") \ + asm volatile (".word \t0x00008001\n\t" \ + ".word \t0x60000000 | %0" \ + : \ + : "i" (err_no)); \ +}) + +/* + * Kprobes breakpoint instruction + */ +#define E2K_KPROBES_BREAKPOINT \ +({ \ + _Pragma("no_asm_inline") \ + asm volatile (".word \t0x04000001\n\t" \ + ".word \t0x0dc0c040"); \ +}) + +#define ASM_GET_GREG_MEMONIC(greg_no) __asm__("%g" #greg_no) +#define DO_ASM_GET_GREG_MEMONIC(greg_no) ASM_GET_GREG_MEMONIC(greg_no) + +#define E2K_GET_REG(reg_no) \ +({ \ + register __e2k_u32_t res; \ + asm volatile ("adds \t0x0, %%r" #reg_no ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_GET_DREG(reg_no) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("addd \t0x0, %%dr" #reg_no ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define ASM_GET_DGREG(reg_no) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("addd \t0x0, %%dg" #reg_no ", %0" \ + : "=r" (res)); \ + res; \ +}) +#define DO_ASM_GET_DGREG(greg_no) \ + ASM_GET_DGREG(greg_no) +#define E2K_GET_DGREG(greg_no) \ + DO_ASM_GET_DGREG(greg_no) +#define NATIVE_GET_DGREG(greg_no) \ + DO_ASM_GET_DGREG(greg_no) + + +#define ASM_GET_UNTEGGED_DGREG(reg_no) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("addd,s \t0x0, %%dg" #reg_no ", %0\n" \ + "puttagd,s \t%0, 0, %0" \ + : "=r" (res)); \ + res; \ +}) +#define DO_ASM_GET_UNTEGGED_DGREG(greg_no) \ + ASM_GET_UNTEGGED_DGREG(greg_no) +#define E2K_GET_UNTEGGED_DGREG(greg_no) \ + DO_ASM_GET_UNTEGGED_DGREG(greg_no) +#define NATIVE_GET_UNTEGGED_DGREG(greg_no) \ + DO_ASM_GET_UNTEGGED_DGREG(greg_no) + +#define ASM_SET_DGREG(reg_no, val) \ +({ \ + asm volatile ("addd \t0x0, %0, %%dg" #reg_no \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) +#define DO_ASM_SET_DGREG(greg_no, val) \ + ASM_SET_DGREG(greg_no, val) +#define E2K_SET_DGREG(greg_no, val) \ + DO_ASM_SET_DGREG(greg_no, val) +#define NATIVE_SET_DGREG(greg_no, val) \ + DO_ASM_SET_DGREG(greg_no, val) + +#define ASM_SET_DGREG_NV(greg_no, _val) \ +({ \ + register u64 _greg asm("g" #greg_no); \ + asm ("addd 0, %[val], %[greg]" \ + : [greg] "=r" (_greg) \ + : [val] "ri" ((__e2k_u64_t) (_val))); \ +}) +#define DO_ASM_SET_DGREG_NV(greg_no, val) \ + ASM_SET_DGREG_NV(greg_no, val) +#define E2K_SET_DGREG_NV(greg_no, val) \ + DO_ASM_SET_DGREG_NV(greg_no, val) +#define NATIVE_SET_DGREG_NV(greg_no, val) \ + DO_ASM_SET_DGREG_NV(greg_no, val) + + +#define __E2K_QPSWITCHD_SM_GREG(num) \ +do { \ + asm ("qpswitchd,sm %%dg" #num ", %%dg" #num \ + ::: "%g" #num); \ +} while (0) + +#define E2K_QPSWITCHD_SM_GREG(greg_num) \ +do { \ + switch (greg_num) { \ + case 0: __E2K_QPSWITCHD_SM_GREG(0); break; \ + case 1: __E2K_QPSWITCHD_SM_GREG(1); break; \ + case 2: __E2K_QPSWITCHD_SM_GREG(2); break; \ + case 3: __E2K_QPSWITCHD_SM_GREG(3); break; \ + case 4: __E2K_QPSWITCHD_SM_GREG(4); break; \ + case 5: __E2K_QPSWITCHD_SM_GREG(5); break; \ + case 6: __E2K_QPSWITCHD_SM_GREG(6); break; \ + case 7: __E2K_QPSWITCHD_SM_GREG(7); break; \ + case 8: __E2K_QPSWITCHD_SM_GREG(8); break; \ + case 9: __E2K_QPSWITCHD_SM_GREG(9); break; \ + case 10: __E2K_QPSWITCHD_SM_GREG(10); break; \ + case 11: __E2K_QPSWITCHD_SM_GREG(11); break; \ + case 12: __E2K_QPSWITCHD_SM_GREG(12); break; \ + case 13: __E2K_QPSWITCHD_SM_GREG(13); break; \ + case 14: __E2K_QPSWITCHD_SM_GREG(14); break; \ + case 15: __E2K_QPSWITCHD_SM_GREG(15); break; \ + case 16: __E2K_QPSWITCHD_SM_GREG(16); break; \ + case 17: __E2K_QPSWITCHD_SM_GREG(17); break; \ + case 18: __E2K_QPSWITCHD_SM_GREG(18); break; \ + case 19: __E2K_QPSWITCHD_SM_GREG(19); break; \ + case 20: __E2K_QPSWITCHD_SM_GREG(20); break; \ + case 21: __E2K_QPSWITCHD_SM_GREG(21); break; \ + case 22: __E2K_QPSWITCHD_SM_GREG(22); break; \ + case 23: __E2K_QPSWITCHD_SM_GREG(23); break; \ + case 24: __E2K_QPSWITCHD_SM_GREG(24); break; \ + case 25: __E2K_QPSWITCHD_SM_GREG(25); break; \ + case 26: __E2K_QPSWITCHD_SM_GREG(26); break; \ + case 27: __E2K_QPSWITCHD_SM_GREG(27); break; \ + case 28: __E2K_QPSWITCHD_SM_GREG(28); break; \ + case 29: __E2K_QPSWITCHD_SM_GREG(29); break; \ + case 30: __E2K_QPSWITCHD_SM_GREG(30); break; \ + case 31: __E2K_QPSWITCHD_SM_GREG(31); break; \ + default: panic("Invalid global register # %d\n", greg_num); \ + } \ +} while (0) + +/* + * Copy single register tagged value to single register + * src_reg - local value of type single register to copy from + * dst_reg - local value of type single register to copy to + */ +#define E2K_MOVE_TAGGED_REG(src_reg, dst_reg) \ +({ \ + asm volatile ("movts \t%1, %0" \ + : "=r" (dst_reg) \ + : "r" ((__e2k_u32_t) (src_reg))); \ +}) + +#define _E2K_GET_DGREG_VAL_AND_TAG(greg_no, dst_reg, tag) \ +({ \ + u32 __dtag; \ + asm volatile ("{gettagd %%dg" #greg_no ", %0\n\t" \ + "puttagd %%dg" #greg_no ", 0, %1}" \ + : "=r" (__dtag), "=r" (dst_reg) \ + : ); \ + tag = __dtag; \ +}) + +#define E2K_GET_DGREG_VAL_AND_TAG(greg_num, dst_reg, tag) \ +({ \ + switch (greg_num) { \ + case 0: _E2K_GET_DGREG_VAL_AND_TAG(0, dst_reg, tag); break; \ + case 1: _E2K_GET_DGREG_VAL_AND_TAG(1, dst_reg, tag); break; \ + case 2: _E2K_GET_DGREG_VAL_AND_TAG(2, dst_reg, tag); break; \ + case 3: _E2K_GET_DGREG_VAL_AND_TAG(3, dst_reg, tag); break; \ + case 4: _E2K_GET_DGREG_VAL_AND_TAG(4, dst_reg, tag); break; \ + case 5: _E2K_GET_DGREG_VAL_AND_TAG(5, dst_reg, tag); break; \ + case 6: _E2K_GET_DGREG_VAL_AND_TAG(6, dst_reg, tag); break; \ + case 7: _E2K_GET_DGREG_VAL_AND_TAG(7, dst_reg, tag); break; \ + case 8: _E2K_GET_DGREG_VAL_AND_TAG(8, dst_reg, tag); break; \ + case 9: _E2K_GET_DGREG_VAL_AND_TAG(9, dst_reg, tag); break; \ + case 10: _E2K_GET_DGREG_VAL_AND_TAG(10, dst_reg, tag); break; \ + case 11: _E2K_GET_DGREG_VAL_AND_TAG(11, dst_reg, tag); break; \ + case 12: _E2K_GET_DGREG_VAL_AND_TAG(12, dst_reg, tag); break; \ + case 13: _E2K_GET_DGREG_VAL_AND_TAG(13, dst_reg, tag); break; \ + case 14: _E2K_GET_DGREG_VAL_AND_TAG(14, dst_reg, tag); break; \ + case 15: _E2K_GET_DGREG_VAL_AND_TAG(15, dst_reg, tag); break; \ + case 16: _E2K_GET_DGREG_VAL_AND_TAG(16, dst_reg, tag); break; \ + case 17: _E2K_GET_DGREG_VAL_AND_TAG(17, dst_reg, tag); break; \ + case 18: _E2K_GET_DGREG_VAL_AND_TAG(18, dst_reg, tag); break; \ + case 19: _E2K_GET_DGREG_VAL_AND_TAG(19, dst_reg, tag); break; \ + case 20: _E2K_GET_DGREG_VAL_AND_TAG(20, dst_reg, tag); break; \ + case 21: _E2K_GET_DGREG_VAL_AND_TAG(21, dst_reg, tag); break; \ + case 22: _E2K_GET_DGREG_VAL_AND_TAG(22, dst_reg, tag); break; \ + case 23: _E2K_GET_DGREG_VAL_AND_TAG(23, dst_reg, tag); break; \ + case 24: _E2K_GET_DGREG_VAL_AND_TAG(24, dst_reg, tag); break; \ + case 25: _E2K_GET_DGREG_VAL_AND_TAG(25, dst_reg, tag); break; \ + case 26: _E2K_GET_DGREG_VAL_AND_TAG(26, dst_reg, tag); break; \ + case 27: _E2K_GET_DGREG_VAL_AND_TAG(27, dst_reg, tag); break; \ + case 28: _E2K_GET_DGREG_VAL_AND_TAG(28, dst_reg, tag); break; \ + case 29: _E2K_GET_DGREG_VAL_AND_TAG(29, dst_reg, tag); break; \ + case 30: _E2K_GET_DGREG_VAL_AND_TAG(30, dst_reg, tag); break; \ + case 31: _E2K_GET_DGREG_VAL_AND_TAG(31, dst_reg, tag); break; \ + default: panic("Invalid global register # %d\n", greg_num); \ + } \ +}) + +#define _E2K_SET_DGREG_VAL_AND_TAG(greg_no, val, tag) \ +do { \ + asm volatile ("puttagd %0, %1, %%dg" #greg_no \ + : \ + : "r" (val), "r" (tag)); \ +} while (0) + +#define E2K_SET_DGREG_VAL_AND_TAG(greg_num, val, tag) \ +do { \ + switch (greg_num) { \ + case 0: _E2K_SET_DGREG_VAL_AND_TAG(0, val, tag); break; \ + case 1: _E2K_SET_DGREG_VAL_AND_TAG(1, val, tag); break; \ + case 2: _E2K_SET_DGREG_VAL_AND_TAG(2, val, tag); break; \ + case 3: _E2K_SET_DGREG_VAL_AND_TAG(3, val, tag); break; \ + case 4: _E2K_SET_DGREG_VAL_AND_TAG(4, val, tag); break; \ + case 5: _E2K_SET_DGREG_VAL_AND_TAG(5, val, tag); break; \ + case 6: _E2K_SET_DGREG_VAL_AND_TAG(6, val, tag); break; \ + case 7: _E2K_SET_DGREG_VAL_AND_TAG(7, val, tag); break; \ + case 8: _E2K_SET_DGREG_VAL_AND_TAG(8, val, tag); break; \ + case 9: _E2K_SET_DGREG_VAL_AND_TAG(9, val, tag); break; \ + case 10: _E2K_SET_DGREG_VAL_AND_TAG(10, val, tag); break; \ + case 11: _E2K_SET_DGREG_VAL_AND_TAG(11, val, tag); break; \ + case 12: _E2K_SET_DGREG_VAL_AND_TAG(12, val, tag); break; \ + case 13: _E2K_SET_DGREG_VAL_AND_TAG(13, val, tag); break; \ + case 14: _E2K_SET_DGREG_VAL_AND_TAG(14, val, tag); break; \ + case 15: _E2K_SET_DGREG_VAL_AND_TAG(15, val, tag); break; \ + case 16: _E2K_SET_DGREG_VAL_AND_TAG(16, val, tag); break; \ + case 17: _E2K_SET_DGREG_VAL_AND_TAG(17, val, tag); break; \ + case 18: _E2K_SET_DGREG_VAL_AND_TAG(18, val, tag); break; \ + case 19: _E2K_SET_DGREG_VAL_AND_TAG(19, val, tag); break; \ + case 20: _E2K_SET_DGREG_VAL_AND_TAG(20, val, tag); break; \ + case 21: _E2K_SET_DGREG_VAL_AND_TAG(21, val, tag); break; \ + case 22: _E2K_SET_DGREG_VAL_AND_TAG(22, val, tag); break; \ + case 23: _E2K_SET_DGREG_VAL_AND_TAG(23, val, tag); break; \ + case 24: _E2K_SET_DGREG_VAL_AND_TAG(24, val, tag); break; \ + case 25: _E2K_SET_DGREG_VAL_AND_TAG(25, val, tag); break; \ + case 26: _E2K_SET_DGREG_VAL_AND_TAG(26, val, tag); break; \ + case 27: _E2K_SET_DGREG_VAL_AND_TAG(27, val, tag); break; \ + case 28: _E2K_SET_DGREG_VAL_AND_TAG(28, val, tag); break; \ + case 29: _E2K_SET_DGREG_VAL_AND_TAG(29, val, tag); break; \ + case 30: _E2K_SET_DGREG_VAL_AND_TAG(30, val, tag); break; \ + case 31: _E2K_SET_DGREG_VAL_AND_TAG(31, val, tag); break; \ + default: panic("Invalid global register # %d\n", greg_num); \ + } \ +} while (0) + +#define _E2K_GET_GREG_VAL_AND_TAG(greg_no, dst_reg, tag) \ +({ \ + u32 __tag; \ + asm volatile ("{gettags %%g" #greg_no ", %0\n\t" \ + "puttags %%g" #greg_no ", 0, %1}" \ + : "=r" (__tag), "=r" (dst_reg) \ + : ); \ + tag = __tag; \ +}) + +#define E2K_GET_GREG_VAL_AND_TAG(greg_num, dst_reg, tag) \ +({ \ + switch (greg_num) { \ + case 0: _E2K_GET_GREG_VAL_AND_TAG(0, dst_reg, tag); break; \ + case 1: _E2K_GET_GREG_VAL_AND_TAG(1, dst_reg, tag); break; \ + case 2: _E2K_GET_GREG_VAL_AND_TAG(2, dst_reg, tag); break; \ + case 3: _E2K_GET_GREG_VAL_AND_TAG(3, dst_reg, tag); break; \ + case 4: _E2K_GET_GREG_VAL_AND_TAG(4, dst_reg, tag); break; \ + case 5: _E2K_GET_GREG_VAL_AND_TAG(5, dst_reg, tag); break; \ + case 6: _E2K_GET_GREG_VAL_AND_TAG(6, dst_reg, tag); break; \ + case 7: _E2K_GET_GREG_VAL_AND_TAG(7, dst_reg, tag); break; \ + case 8: _E2K_GET_GREG_VAL_AND_TAG(8, dst_reg, tag); break; \ + case 9: _E2K_GET_GREG_VAL_AND_TAG(9, dst_reg, tag); break; \ + case 10: _E2K_GET_GREG_VAL_AND_TAG(10, dst_reg, tag); break; \ + case 11: _E2K_GET_GREG_VAL_AND_TAG(11, dst_reg, tag); break; \ + case 12: _E2K_GET_GREG_VAL_AND_TAG(12, dst_reg, tag); break; \ + case 13: _E2K_GET_GREG_VAL_AND_TAG(13, dst_reg, tag); break; \ + case 14: _E2K_GET_GREG_VAL_AND_TAG(14, dst_reg, tag); break; \ + case 15: _E2K_GET_GREG_VAL_AND_TAG(15, dst_reg, tag); break; \ + case 16: _E2K_GET_GREG_VAL_AND_TAG(16, dst_reg, tag); break; \ + case 17: _E2K_GET_GREG_VAL_AND_TAG(17, dst_reg, tag); break; \ + case 18: _E2K_GET_GREG_VAL_AND_TAG(18, dst_reg, tag); break; \ + case 19: _E2K_GET_GREG_VAL_AND_TAG(19, dst_reg, tag); break; \ + case 20: _E2K_GET_GREG_VAL_AND_TAG(20, dst_reg, tag); break; \ + case 21: _E2K_GET_GREG_VAL_AND_TAG(21, dst_reg, tag); break; \ + case 22: _E2K_GET_GREG_VAL_AND_TAG(22, dst_reg, tag); break; \ + case 23: _E2K_GET_GREG_VAL_AND_TAG(23, dst_reg, tag); break; \ + case 24: _E2K_GET_GREG_VAL_AND_TAG(24, dst_reg, tag); break; \ + case 25: _E2K_GET_GREG_VAL_AND_TAG(25, dst_reg, tag); break; \ + case 26: _E2K_GET_GREG_VAL_AND_TAG(26, dst_reg, tag); break; \ + case 27: _E2K_GET_GREG_VAL_AND_TAG(27, dst_reg, tag); break; \ + case 28: _E2K_GET_GREG_VAL_AND_TAG(28, dst_reg, tag); break; \ + case 29: _E2K_GET_GREG_VAL_AND_TAG(29, dst_reg, tag); break; \ + case 30: _E2K_GET_GREG_VAL_AND_TAG(30, dst_reg, tag); break; \ + case 31: _E2K_GET_GREG_VAL_AND_TAG(31, dst_reg, tag); break; \ + default: panic("Invalid global register # %d\n", greg_num); \ + } \ +}) + +#define ASM_SAVE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, iset) \ +({ \ + u64 reg0, reg1; \ + BUILD_BUG_ON(iset != E2K_ISET_V2); \ + \ + asm ( \ + "strd,2 [ %[addr_lo] + %[opc_0] ], %%dg" #numlo "\n" \ + "strd,5 [ %[addr_hi] + %[opc_0] ], %%dg" #numhi "\n" \ + "movfi %%dg" #numlo ", %[reg0]\n" \ + "movfi %%dg" #numhi ", %[reg1]\n" \ + "sth [ %[addr_lo] + 8 ], %[reg0]\n" \ + "sth [ %[addr_hi] + 8 ], %[reg1]\n" \ + : [reg0] "=&r" (reg0), [reg1] "=&r" (reg1) \ + : [addr_lo] "r" (__addr_lo), [addr_hi] "r" (__addr_hi), \ + [opc_0] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory"); \ +}) + +#define ASM_RESTORE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, iset) \ +({ \ + u64 reg0, reg1, reg2, reg3; \ + BUILD_BUG_ON(iset != E2K_ISET_V2); \ + \ + asm ( \ + "ldrd,2 [ %[addr_lo] + %[opc_0] ], %%dg" #numlo "\n" \ + "ldrd,5 [ %[addr_hi] + %[opc_0] ], %%dg" #numhi "\n" \ + "ldh [ %[addr_lo] + 8 ], %[reg0]\n" \ + "ldh [ %[addr_hi] + 8 ], %[reg1]\n" \ + "gettagd %%dg" #numlo ", %[reg2]\n" \ + "gettagd %%dg" #numhi ", %[reg3]\n" \ + "cmpesb 0, %[reg2], %%pred2\n" \ + "cmpesb 0, %[reg3], %%pred3\n" \ + "movif %%dg" #numlo ", %[reg0], %%dg" #numlo " ? %%pred2\n" \ + "movif %%dg" #numhi ", %[reg1], %%dg" #numhi " ? %%pred3\n" \ + : [reg0] "=&r" (reg0), [reg1] "=&r" (reg1), \ + [reg2] "=&r" (reg2), [reg3] "=&r" (reg3) \ + : [addr_lo] "r" (__addr_lo), [addr_hi] "r" (__addr_hi), \ + [opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC) \ + :"%g" #numlo, "%g" #numhi, "%pred2", "%pred3"); \ +}) + +#define ASM_SAVE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, iset) \ +({ \ + u64 unused; \ + BUILD_BUG_ON(iset != E2K_ISET_V5); \ + \ + asm ( \ + /* Bug 116851 - all strqp must be speculative \ + * if dealing with tags */ \ + "{\n" /* Close this asm because 'sm' for 'strqp' \ + is not supported by lcc */ \ + "strqp,2,sm [ %[addr_lo] + %[opc_0] ], %%dg" #numlo "\n" \ + "strqp,5,sm [ %[addr_hi] + %[opc_0] ], %%dg" #numhi "\n" \ + "}\n" \ + : [unused] "=r" (unused) \ + : [addr_lo] "r" (__addr_lo), [addr_hi] "r" (__addr_hi), \ + [opc_0] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory"); \ +}) + +#define ASM_RESTORE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, iset) \ +({ \ + u64 reg0, reg1; \ + BUILD_BUG_ON(iset != E2K_ISET_V5); \ + \ + asm ( \ + "ldrqp,2 [ %[addr_lo] + %[opc_0] ], %%dg" #numlo "\n" \ + "ldrqp,5 [ %[addr_hi] + %[opc_0] ], %%dg" #numhi "\n" \ + : [reg0] "=&r" (reg0), [reg1] "=&r" (reg1) \ + : [addr_lo] "r" (__addr_lo), [addr_hi] "r" (__addr_hi), \ + [opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC) \ + : "%g" #numlo, "%g" #numhi); \ +}) + +#if __iset__ == 2 + +#define ASM_SAVE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \ + ASM_SAVE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, iset) + +#define ASM_RESTORE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \ + ASM_RESTORE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, iset) + +#elif __iset__ == 5 + +#define ASM_SAVE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \ + ASM_SAVE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, iset) + +#define ASM_RESTORE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \ + ASM_RESTORE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, iset) + +#else +/* IMPORTANT: Do NOT use these macros directly, use + * machine.save_gregs()/machine.restore_gregs() instead */ +#endif + +#define NATIVE_SAVE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \ + ASM_SAVE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) +#define NATIVE_SAVE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi) \ + ASM_SAVE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V2) +#define NATIVE_SAVE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi) \ + ASM_SAVE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V5) +#define NATIVE_RESTORE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) \ + ASM_RESTORE_GREG(__addr_lo, __addr_hi, numlo, numhi, iset) +#define NATIVE_RESTORE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi) \ + ASM_RESTORE_GREG_V2(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V2) +#define NATIVE_RESTORE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi) \ + ASM_RESTORE_GREG_V5(__addr_lo, __addr_hi, numlo, numhi, E2K_ISET_V5) + +#define ASM_SAVE_THE_KERNEL_GREG(greg_no, _base, ind) \ +({ \ + u64 reg0, reg1; \ + BUILD_BUG_ON(iset != E2K_ISET_V2); \ + \ + asm ( \ + "strd [ %[base] + %[opc] ], %%dg" #greg_no "\n" \ + : \ + : [base] "r" (_base), \ + [opc] "i" (TAGGED_MEM_STORE_REC_OPC | ind * 16UL) \ + : "%g" #greg_no, "memory"); \ +}) +#define NATIVE_SAVE_THE_KERNEL_GREG(greg_no, _base, ind) \ + ASM_SAVE_THE_KERNEL_GREG(greg_no, _base, ind) + +#define ASM_RESTORE_THE_KERNEL_GREG(greg_no, _base, ind) \ +do { \ + asm ( \ + "ldrd [ %[base] + %[opc] ], %%dg" #greg_no "\n" \ + : \ + : [base] "r" (_base), \ + [opc] "i" (TAGGED_MEM_LOAD_REC_OPC | ind * 16UL) \ + : "%g" #greg_no, "memory"); \ +} while (false) +#define NATIVE_RESTORE_THE_KERNEL_GREG(greg_no, _base, ind) \ + ASM_RESTORE_THE_KERNEL_GREG(greg_no, _base, ind) + +#define ASM_RESTORE_KERNEL_GREG(__base, \ + indlo1, indhi1, indlo2, indhi2, \ + numlo1, numhi1, numlo2, numhi2) \ +do { \ + u64 _base = (u64) (__base); \ + register u64 g##numlo1 asm("g" #numlo1); \ + register u64 g##numhi1 asm("g" #numhi1); \ + register u64 g##numlo2 asm("g" #numlo2); \ + register u64 g##numhi2 asm("g" #numhi2); \ + asm ( "ldrd [ %[base] + %[opc_lo1] ], %[g_lo1]\n" \ + "ldrd [ %[base] + %[opc_hi1] ], %[g_hi1]\n" \ + "ldrd [ %[base] + %[opc_lo2] ], %[g_lo2]\n" \ + "ldrd [ %[base] + %[opc_hi2] ], %[g_hi2]\n" \ + : [g_lo1] "=r" (g##numlo1), [g_hi1] "=r" (g##numhi1), \ + [g_lo2] "=r" (g##numlo2), [g_hi2] "=r" (g##numhi2) \ + : [base] "r" (_base), \ + [opc_lo1] "i" (TAGGED_MEM_LOAD_REC_OPC | indlo1 * 16UL), \ + [opc_hi1] "i" (TAGGED_MEM_LOAD_REC_OPC | indhi1 * 16UL), \ + [opc_lo2] "i" (TAGGED_MEM_LOAD_REC_OPC | indlo2 * 16UL), \ + [opc_hi2] "i" (TAGGED_MEM_LOAD_REC_OPC | indhi2 * 16UL)); \ +} while (false) +#define NATIVE_RESTORE_KERNEL_GREG(base, \ + indlo1, indhi1, indlo2, indhi2, \ + numlo1, numhi1, numlo2, numhi2) \ + ASM_RESTORE_KERNEL_GREG(base, \ + indlo1, indhi1, indlo2, indhi2, \ + numlo1, numhi1, numlo2, numhi2) + +#define E2K_ALL_GREGS_SET_EMPTY() \ +({ \ + register char tag; \ + tag = ETAGEWD; \ + asm ("puttagd 0, %0, %%dg0\n" \ + "puttagd 0, %0, %%dg1\n" \ + "puttagd 0, %0, %%dg2\n" \ + "puttagd 0, %0, %%dg3\n" \ + "puttagd 0, %0, %%dg4\n" \ + "puttagd 0, %0, %%dg5\n" \ + "puttagd 0, %0, %%dg6\n" \ + "puttagd 0, %0, %%dg7\n" \ + "puttagd 0, %0, %%dg8\n" \ + "puttagd 0, %0, %%dg9\n" \ + "puttagd 0, %0, %%dg10\n" \ + "puttagd 0, %0, %%dg11\n" \ + "puttagd 0, %0, %%dg12\n" \ + "puttagd 0, %0, %%dg13\n" \ + "puttagd 0, %0, %%dg14\n" \ + "puttagd 0, %0, %%dg15\n" \ + "puttagd 0, %0, %%dg16\n" \ + "puttagd 0, %0, %%dg17\n" \ + "puttagd 0, %0, %%dg18\n" \ + "puttagd 0, %0, %%dg19\n" \ + "puttagd 0, %0, %%dg20\n" \ + "puttagd 0, %0, %%dg21\n" \ + "puttagd 0, %0, %%dg22\n" \ + "puttagd 0, %0, %%dg23\n" \ + "puttagd 0, %0, %%dg24\n" \ + "puttagd 0, %0, %%dg25\n" \ + "puttagd 0, %0, %%dg26\n" \ + "puttagd 0, %0, %%dg27\n" \ + "puttagd 0, %0, %%dg28\n" \ + "puttagd 0, %0, %%dg29\n" \ + "puttagd 0, %0, %%dg30\n" \ + "puttagd 0, %0, %%dg31\n" \ + : \ + : "ri" ((char) (tag)) \ + : "%g0", "%g1", "%g2", "%g3", "%g4", "%g5", \ + "%g6", "%g7", "%g8", "%g9", "%g10", "%g11", \ + "%g12", "%g13", "%g14", "%g15", "%g16", \ + "%g17", "%g18", "%g19", "%g20", "%g21", \ + "%g22", "%g23", "%g24", "%g25", "%g26", \ + "%g27", "%g28", "%g29", "%g30", "%g31"); \ +}) + +#define NATIVE_GREGS_SET_EMPTY() \ +({ \ + register char tag; \ + tag = ETAGEWD; \ + asm ("puttagd 0, %0, %%dg0\n" \ + "puttagd 0, %0, %%dg1\n" \ + "puttagd 0, %0, %%dg2\n" \ + "puttagd 0, %0, %%dg3\n" \ + "puttagd 0, %0, %%dg4\n" \ + "puttagd 0, %0, %%dg5\n" \ + "puttagd 0, %0, %%dg6\n" \ + "puttagd 0, %0, %%dg7\n" \ + "puttagd 0, %0, %%dg8\n" \ + "puttagd 0, %0, %%dg9\n" \ + "puttagd 0, %0, %%dg10\n" \ + "puttagd 0, %0, %%dg11\n" \ + "puttagd 0, %0, %%dg12\n" \ + "puttagd 0, %0, %%dg13\n" \ + "puttagd 0, %0, %%dg14\n" \ + "puttagd 0, %0, %%dg15\n" \ + /* g16-g19 are used by kernel */ \ + /*"puttagd 0, %0, %%dg16\n"*/ \ + /*"puttagd 0, %0, %%dg17\n"*/ \ + /*"puttagd 0, %0, %%dg18\n"*/ \ + /*"puttagd 0, %0, %%dg19\n"*/ \ + "puttagd 0, %0, %%dg20\n" \ + "puttagd 0, %0, %%dg21\n" \ + "puttagd 0, %0, %%dg22\n" \ + "puttagd 0, %0, %%dg23\n" \ + "puttagd 0, %0, %%dg24\n" \ + "puttagd 0, %0, %%dg25\n" \ + "puttagd 0, %0, %%dg26\n" \ + "puttagd 0, %0, %%dg27\n" \ + "puttagd 0, %0, %%dg28\n" \ + "puttagd 0, %0, %%dg29\n" \ + "puttagd 0, %0, %%dg30\n" \ + "puttagd 0, %0, %%dg31\n" \ + : \ + : "ri" ((char) (tag)) \ + : "%g0", "%g1", "%g2", "%g3", "%g4", "%g5", \ + "%g6", "%g7", "%g8", "%g9", "%g10", "%g11", \ + "%g12", "%g13", "%g14", "%g15", /*"%g16",*/ \ + /*"%g17", "%g18", "%g19",*/ "%g20", "%g21", \ + "%g22", "%g23", "%g24", "%g25", "%g26", \ + "%g27", "%g28", "%g29", "%g30", "%g31"); \ +}) + +/* + * We copy the value,tag and extension for all global regs + * (we must copy all componets of register with bad tags too) + */ +#define E2K_GET_GREGS_FROM_THREAD(_g_u, _gt_u, _base) \ +({ \ + u64 reg0, reg1, reg2, reg3, reg6, reg7, reg8; \ + \ + asm ( \ + "addd %[base], 0x0, %[r6]\n" \ + "addd 0, 0x0, %[r7]\n" \ + "addd 0, 0x0, %[r8]\n" \ + \ + "1:\n" \ + "ldrd,2 [%[r6] + %[opc_0]], %[val_lo]\n" \ + "ldrd,5 [%[r6] + %[opc_16]], %[val_hi]\n" \ + "addd %[r6], 32, %[r6]\n" \ + \ + "gettagd,2 %[val_lo], %[tag_lo]\n" \ + "gettagd,5 %[val_hi], %[tag_hi]\n" \ + "shls %[tag_hi], 8, %[tag_hi]\n" \ + "ors %[tag_lo], %[tag_hi], %[tag_lo]\n" \ + "sth [%[gt_u], %[r8]], %[tag_lo]\n" \ + "addd %[r8], 2, %[r8]\n" \ + \ + "puttagd,2 %[val_lo], 0, %[val_lo]\n" \ + "std [%[g_u], %[r7]], %[val_lo]\n" \ + "addd %[r7], 8, %[r7]\n" \ + \ + "puttagd,5 %[val_hi], 0, %[val_hi]\n" \ + "std [%[g_u], %[r7]], %[val_hi]\n" \ + "addd %[r7], 8, %[r7]\n" \ + \ + "disp %%ctpr3, 1b\n" \ + "cmpedb %[r8], 32, %%pred2\n" \ + "ct %%ctpr3 ? ~ %%pred2\n" \ + \ + : [val_lo] "=&r"(reg0), [val_hi] "=&r"(reg1), \ + [tag_lo] "=&r"(reg2), [tag_hi] "=&r"(reg3), \ + [r6] "=&r"(reg6), [r7] "=&r"(reg7), [r8] "=&r"(reg8) \ + : [g_u] "r"(_g_u), [gt_u] "r"(_gt_u), [base] "r"(_base),\ + [opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16UL) \ + : "%ctpr3", "%pred1", "%pred2", "memory"); \ +}) + +#define E2K_SET_GREGS_TO_THREAD(_base, _g_u, _gt_u) \ +({ \ + u64 reg0, reg1, reg2, reg3, reg6, reg7, reg8; \ + \ + asm ( \ + "addd 0, 0x0, %[r6]\n" \ + "addd 0, 0x0, %[r7]\n" \ + "addd %[base], 0x0, %[r8]\n" \ + \ + "2:\n" \ + "ldd [%[g_u], %[r6]], %[val_lo]\n" \ + "addd %[r6], 8, %[r6]\n" \ + "ldd [%[g_u], %[r6]], %[val_hi]\n" \ + "addd %[r6], 8, %[r6]\n" \ + \ + "ldb [%[gt_u], %[r7]], %[tag_lo]\n" \ + "addd %[r7], 1, %[r7]\n" \ + "ldb [%[gt_u], %[r7]], %[tag_hi]\n" \ + "addd %[r7], 1, %[r7]\n" \ + \ + "puttagd,2 %[val_lo], %[tag_lo], %[val_lo]\n" \ + "puttagd,5 %[val_hi], %[tag_hi], %[val_hi]\n" \ + \ + "strd,2 [%[r8] + %[opc_0]], %[val_lo]\n" \ + "strd,5 [%[r8] + %[opc_16]], %[val_hi]\n" \ + "addd %[r8], 32, %[r8]\n" \ + \ + "disp %%ctpr3, 2b\n" \ + \ + "cmpedb %[r7], 32, %%pred2\n" \ + "ct %%ctpr3 ? ~ %%pred2\n" \ + \ + : [val_lo] "=&r"(reg0), [val_hi] "=&r"(reg1), \ + [tag_lo] "=&r"(reg2), [tag_hi] "=&r"(reg3), \ + [r6] "=&r"(reg6), [r7] "=&r"(reg7), [r8] "=&r"(reg8) \ + : [base] "r"(_base), [g_u] "r"(_g_u), [gt_u] "r"(_gt_u),\ + [opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16UL) \ + : "%ctpr3", "%pred2", "memory"); \ +}) + +#define E2K_MOVE_GREG_TO_REG(greg_no, local_reg) \ +({ \ +_Pragma("no_asm_inline") \ + asm volatile ("\nmovts \t%%g" #greg_no ", %0" \ + : "=&r" (local_reg)); \ +}) +#define E2K_MOVE_DGREG_TO_DREG(greg_no, local_reg) \ +({ \ +_Pragma("no_asm_inline") \ + asm volatile ("\nmovtd \t%%dg" #greg_no ", %0" \ + : "=&r" (local_reg)); \ +}) + +#define E2K_MOVE_REG_TO_GREG(greg_no, local_reg) \ +({ \ +_Pragma("no_asm_inline") \ + asm volatile ("\nmovts \t%0, %%g" #greg_no \ + : \ + : "&r" ((__e2k_u32_t) (local_reg))); \ +}) +#define E2K_MOVE_DREG_TO_DGREG(greg_no, local_reg) \ +({ \ +_Pragma("no_asm_inline") \ + asm volatile ("\nmovtd \t%0, %%dg" #greg_no \ + : \ + : "r" ((__e2k_u64_t) (local_reg))); \ +}) +#define E2K_ADD_DREGS_TO_DGREG(greg_no, local_reg1, local_reg2) \ +({ \ +_Pragma("no_asm_inline") \ + asm volatile ("\naddd \t%0, %1, %%dg" #greg_no \ + : \ + : "ri" ((__e2k_u64_t) (local_reg1)), \ + "r" ((__e2k_u64_t) (local_reg2))); \ +}) + +/* + * bug #97048 + * + * We have following macros for registers reading/writing + * depending on whether lcc supports the register in question: + * + * NATIVE_GET_[D]SREG_OPEN() - read supported register + * NATIVE_GET_[D]SREG_CLOSED() - read unsupported register + * + * NATIVE_SET_[D]SREG_OPEN() - write supported register + * NATIVE_SET_[D]SREG_OPEN_NOIRQ() - write supported register when + * it must be done under closed interrupts (for psp.hi/pcsp.hi/cr/cutd) + * NATIVE_SET_[D]SREG_CLOSED_NOEXC() - write unsupported register when + * it is _not_ listed in exceptions list in 1.1.1 1) of "Scheduling" + * NATIVE_SET_[D]SREG_CLOSED_EXC() - write unsupported register when + * it _is_ listed in exceptions list in 1.1.1 1) of "Scheduling" + */ + +/* + * bug #60599, #97048 + * Allow for lcc optimizations of registers reads and writes + * (when lcc supports the registers in question) + */ +#if defined CONFIG_OPTIMIZE_REGISTERS_ACCESS +# define NATIVE_GET_SREG_OPEN(reg_mnemonic) \ +({ \ + register __e2k_u32_t res; \ + asm ("rrs %%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +# define NATIVE_GET_DSREG_OPEN(reg_mnemonic) \ +({ \ + register __e2k_u64_t res; \ + asm ("rrd %%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +# define NATIVE_SET_SREG_OPEN(reg_mnemonic, val) \ +({ \ + /* Fake return value is needed for lcc to optimize inline asm... */ \ + register __e2k_u32_t res; \ + asm ("rws %1, %%" #reg_mnemonic \ + : "=r" (res) \ + : "ri" ((__e2k_u32_t) (val))); \ +}) + +# define NATIVE_SET_DSREG_OPEN(reg_mnemonic, val) \ +({ \ + /* Fake return value is needed for lcc to optimize inline asm... */ \ + register __e2k_u64_t res; \ + asm ("rwd %1, %%" #reg_mnemonic \ + : "=r" (res) \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +/* + * *_NOIRQ version is for psp.hi/pcsp.hi/cr/cutd + * + * Rules for writing: + * 1) There must be NO exceptions and interrupts + * 2) As a consequence of 1), instructions that are placed _later_ + * than "rw" should not generate exceptions too because compiler + * can reorder them before the "rw" instruction. + * + * IOW in the whole area covered by all_irq_save()/all_irq_restore() + * there must not be any exception-generating instructions. + */ + +# define NATIVE_SET_DSREG_OPEN_NOIRQ(reg_mnemonic, val) \ +({ \ + register __e2k_u64_t res; \ + asm ("rwd %1, %%" #reg_mnemonic \ + : "=r" (res) \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +#else +# define NATIVE_GET_SREG_OPEN NATIVE_GET_SREG_CLOSED +# define NATIVE_GET_DSREG_OPEN NATIVE_GET_DSREG_CLOSED +# define NATIVE_SET_SREG_OPEN(reg, val) \ + NATIVE_SET_SREG_CLOSED_NOEXC(reg, (val), 7) +# define NATIVE_SET_DSREG_OPEN(reg, val) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(reg, (val), 7) +# define NATIVE_SET_DSREG_OPEN_NOIRQ(reg, val) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(reg, (val), 7) +#endif + + +/* + * bug #97048 + * Closed GNU asm is used for rarely read registers. + * Keep "volatile" since some of those registers can have side effects + * (for example, see %dibsr reading in arch/e2k/kernel/perf_event.c - + * it must be done before reading %dimar; or look at %clkr). + */ +#define NATIVE_GET_SREG_CLOSED(reg_mnemonic) \ +({ \ + register __e2k_u32_t res; \ + _Pragma("no_asm_inline") \ + asm volatile ("rrs %%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define NATIVE_GET_DSREG_CLOSED(reg_mnemonic) \ +({ \ + register __e2k_u64_t res; \ + _Pragma("no_asm_inline") \ + asm volatile ("rrd %%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define NATIVE_GET_DSREG_CLOSED_CLOBBERS(reg_mnemonic, clobbers) \ +({ \ + register __e2k_u64_t res; \ + _Pragma("no_asm_inline") \ + asm volatile ("rrd %%" #reg_mnemonic ", %0" \ + : "=r" (res) :: clobbers); \ + res; \ +}) + +/* + * These macros will insert real wide instructions + * instead of doing all nops with "nop x" field in HS. + * This is needed, for example, when writing %wd. + */ +#define NOP_0_MINUS_4 0 +#define NOP_1_MINUS_4 0 +#define NOP_2_MINUS_4 0 +#define NOP_3_MINUS_4 0 +#define NOP_4_MINUS_4 0 +#define NOP_5_MINUS_4 1 +#define NOP_6_MINUS_4 2 +#define NOP_7_MINUS_4 3 + +/* + * bug #97048 + * + * For closed writes we have to manually check how many NOPs are needed + * for this register. If we try to use _Pragma("no_asm_inline"), then + * lcc will use its default value of 5 nops which is not always enough. + * + * Also, according to "Scheduling 1.1.1", the next 3 long instructions + * after the write must not generate delayed exceptions, and the next + * 4 long instruction must not generate exact exceptions. So add 4 nops + * after the write. + * + * This is slow but this version is used only for rarely written registers. + * %usd/%psp/etc registers are supported by lcc and are written with an + * open GNU asm. + */ +#define NATIVE_SET_SREG_CLOSED_NOEXC(reg_mnemonic, val, nop) \ +({ \ + asm volatile ("{nop " __stringify(NOP_##nop##_MINUS_4) "\n" \ + " rws %0, %%" #reg_mnemonic "}\n" \ + "{nop} {nop} {nop} {nop}" \ + : \ + : "ri" ((__e2k_u32_t) (val))); \ +}) + +#define NATIVE_SET_DSREG_CLOSED_NOEXC(reg_mnemonic, val, nop) \ +({ \ + asm volatile ("{nop " __stringify(NOP_##nop##_MINUS_4) "\n" \ + " rwd %0, %%" #reg_mnemonic "}" \ + "{nop} {nop} {nop} {nop}" \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +/* + * For some registers (see "Scheduling 1.1.1") there is no requirement + * of avoiding deferred and exact exception after the long instruction. + * But some registers (e.g. %wd, %bgr) still require at least 1 real + * instruction after the write. + */ +#define NATIVE_SET_SREG_CLOSED_EXC(reg_mnemonic, val, nop) \ +({ \ + asm volatile ("{nop " __stringify(nop) "\n" \ + " rws %0, %%" #reg_mnemonic "}\n" \ + : \ + : "ri" ((__e2k_u32_t) (val))); \ +}) + +#define NATIVE_SET_DSREG_CLOSED_EXC(reg_mnemonic, val, nop) \ +({ \ + asm volatile ("{nop " __stringify(nop) "\n" \ + " rwd %0, %%" #reg_mnemonic "}" \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +#define NATIVE_SET_DSREG_CLOSED_EXC_CLOBBERS(reg_mnemonic, val, nop, clobbers) \ +({ \ + asm volatile ("{nop " __stringify(nop) "\n" \ + " rwd %0, %%" #reg_mnemonic "}" \ + : \ + : "ri" ((__e2k_u64_t) (val)) \ + : clobbers); \ +}) + + +#define NATIVE_EXIT_HANDLE_SYSCALL(sbr, usd_hi, usd_lo, upsr) \ +({ \ + asm volatile ("{rwd %0, %%sbr}" \ + "{rwd %1, %%usd.hi}" \ + "{rwd %2, %%usd.lo}" \ + "{rws %3, %%upsr;" \ + " nop 4}\n" \ + : \ + : "ri" ((__e2k_u64_t) (sbr)), \ + "ri" ((__e2k_u64_t) (usd_hi)), \ + "ri" ((__e2k_u64_t) (usd_lo)), \ + "ri" ((__e2k_u32_t) (upsr))); \ +}) + + +/* lcc ignores manually specified clobbers for opened GNU asm, + * so use closed version (bug #69565, bug #60599) */ +#define NATIVE_SET_PSR_IRQ_BARRIER(val) \ +({ \ + asm volatile ("{\n" \ + "nop 5\n" \ + "rwd %0, %%psr" \ + "}" \ + : \ + : "ri" ((__e2k_u64_t) (val)) \ + : "memory", PREEMPTION_CLOBBERS); \ +}) +#define NATIVE_SET_UPSR_IRQ_BARRIER(val) \ +({ \ + asm volatile ("{\n" \ + "nop 4\n" \ + "rwd %0, %%upsr" \ + "}" \ + : \ + : "ri" ((__e2k_u64_t) (val)) \ + : "memory", PREEMPTION_CLOBBERS); \ +}) + + +#define NATIVE_GET_MMUREG(reg_mnemonic) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("mmurr \t%%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define NATIVE_SET_MMUREG(reg_mnemonic, val) \ +({ \ + asm volatile ("mmurw \t%0, %%" #reg_mnemonic \ + : \ + : "r" ((__e2k_u64_t) (val))); \ +}) + +#define NATIVE_TAGGED_LOAD_TO_MMUREG(reg_mnemonic, _addr) \ +do { \ + unsigned long long _tmp; \ + asm volatile ("ldrd [ %[addr] + %[opc] ], %[tmp]\n" \ + "mmurw,s %[tmp], %%" #reg_mnemonic "\n" \ + : [tmp] "=r" (_tmp) \ + : [addr] "m" (*((unsigned long long *) (_addr))), \ + [opc] "i" (TAGGED_MEM_LOAD_REC_OPC)); \ +} while (0) + +#define NATIVE_STORE_TAGGED_MMUREG(_addr, reg_mnemonic) \ +do { \ + unsigned long long _tmp; \ + asm volatile ("mmurr %%" #reg_mnemonic ", %[tmp]\n" \ + "strd [ %[addr] + %[opc] ], %[tmp]\n" \ + : [tmp] "=r" (_tmp) \ + : [addr] "m" (*((unsigned long long *) (_addr))), \ + [opc] "i" (TAGGED_MEM_STORE_REC_OPC)); \ +} while (0) + +#define NATIVE_GET_MMU_DEBUG_REG(reg_no) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("ldd,5 \t[%1 + 0] %2, %0" \ + : "=r" (res) \ + : "ri" ((__e2k_ptr_t) _DEBUG_REG_NO_TO_MMU_ADDR(reg_no)), \ + "i" MAS_MMU_DEBUG_REG); \ + res; \ +}) +#define NATIVE_SET_MMU_DEBUG_REG(reg_no, val) \ +({ \ + asm volatile ("std,2 \t[%0 + 0] %1, %2" \ + : \ + : "ri" ((__e2k_ptr_t) _DEBUG_REG_NO_TO_MMU_ADDR(reg_no)), \ + "i" MAS_MMU_DEBUG_REG, \ + "ri" ((__e2k_u64_t) (val))); \ +}) + +#define NATIVE_GET_AAUREG(reg_mnemonic, chan_letter) \ +({ \ + register __e2k_u32_t res; \ + asm ("aaurr," #chan_letter " \t%%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +/* This macro is used to pack two 'aaurr' into one long instruction */ +#define NATIVE_GET_AAUREGS(l_reg, r_reg, lval, rval) \ +({ \ + asm ("aaurr,2 \t%%" #l_reg ", %0\n" \ + "aaurr,5 \t%%" #r_reg ", %1" \ + : "=r" (lval), "=r" (rval)); \ +}) + +#define NATIVE_SET_AAUREG(reg_mnemonic, val, chan_letter) \ +({ \ + int unused; \ + asm ("aaurw," #chan_letter " %1, %%" #reg_mnemonic \ + : "=r" (unused) \ + : "r" ((__e2k_u32_t) (val))); \ +}) + +/* This macro is used to pack two 'aaurr' into one long instruction */ +#define NATIVE_SET_AAUREGS(l_reg, r_reg, lval, rval) \ +do { \ + int unused; \ + asm ("aaurw,2 %1, %%" #l_reg "\n" \ + "aaurw,5 %2, %%" #r_reg \ + : "=r" (unused) \ + : "r" ((__e2k_u32_t) (lval)), "r" ((__e2k_u32_t) (rval))); \ +} while (0) + +#define NATIVE_GET_AAUDREG(reg_mnemonic, chan_letter) \ +({ \ + register __e2k_u64_t res; \ + asm ("aaurrd," #chan_letter " %%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define NATIVE_GET_AAUDREGS(l_reg, r_reg, lval, rval) \ +({ \ + asm ("aaurrd,2 %%" #l_reg ", %0\n" \ + "aaurrd,5 %%" #r_reg ", %1" \ + : "=r" (lval), "=r" (rval)); \ +}) + + +#define NATIVE_SET_AAUDREG(reg_mnemonic, val, chan_letter) \ +do { \ + int unused; \ + asm ("aaurwd," #chan_letter " %1, %%" #reg_mnemonic \ + : "=r" (unused) \ + : "r" (val)); \ +} while (0) + +#define NATIVE_SET_AAUDREGS(l_reg, r_reg, lval, rval) \ +do { \ + int unused; \ + asm ("aaurwd,2 %1, %%" #l_reg "\n" \ + "aaurwd,5 %2, %%" #r_reg \ + : "=r" (unused) \ + : "r" (lval), "r" (rval)); \ +} while (0) + + +#define NATIVE_GET_AAUQREG(mem_p, reg_mnemonic) \ +({ \ + register __e2k_u64_t lo asm ("%b[0]"); \ + register __e2k_u64_t hi asm ("%b[1]"); \ + asm volatile ("aaurrq \t%%" #reg_mnemonic ", %%qb[0]" \ + : \ + : \ + : "%b[0]", "%b[1]"); \ + (mem_p)->lo = lo; \ + (mem_p)->hi = hi; \ +}) + +#define NATIVE_GET_AAUQREGS(mem_p, reg1, reg2, reg3, reg4) \ +({ \ + register __e2k_u64_t lo1 asm ("%b[0]"); \ + register __e2k_u64_t hi1 asm ("%b[1]"); \ + register __e2k_u64_t lo2 asm ("%b[2]"); \ + register __e2k_u64_t hi2 asm ("%b[3]"); \ + register __e2k_u64_t lo3 asm ("%b[4]"); \ + register __e2k_u64_t hi3 asm ("%b[5]"); \ + register __e2k_u64_t lo4 asm ("%b[6]"); \ + register __e2k_u64_t hi4 asm ("%b[7]"); \ + asm volatile ("aaurrq \t%%" #reg1 ", %%qb[0]\n" \ + "aaurrq \t%%" #reg2 ", %%qb[2]\n" \ + "aaurrq \t%%" #reg3 ", %%qb[4]\n" \ + "aaurrq \t%%" #reg4 ", %%qb[6]\n" \ + : \ + : \ + : "%b[0]", "%b[1]", "%b[2]", "%b[3]", \ + "%b[4]", "%b[5]", "%b[6]", "%b[7]"); \ + (mem_p)->lo = lo1; \ + (mem_p)->hi = hi1; \ + (mem_p + 1)->lo = lo2; \ + (mem_p + 1)->hi = hi2; \ + (mem_p + 2)->lo = lo3; \ + (mem_p + 2)->hi = hi3; \ + (mem_p + 3)->lo = lo4; \ + (mem_p + 3)->hi = hi4; \ +}) + +#define NATIVE_SET_AAUQREG(reg_mnemonic, mem_p) \ +do { \ + register u64 lo asm ("%b[0]"); \ + register u64 hi asm ("%b[1]"); \ + int unused; \ + lo = (mem_p)->lo; \ + hi = (mem_p)->hi; \ + asm ("aaurwq %%r0, %%" #reg_mnemonic \ + : "=r" (unused) \ + : "r" (lo), "r" (hi)); \ +} while (0) + +#define NATIVE_SET_AAUQREGS(mem_p, reg1, reg2, reg3, reg4) \ +do { \ + asm volatile ("{ldd,0 [ %0 + 0x0 ], %%db[0]\n" \ + " ldd,2 [ %0 + 0x8 ], %%db[1]\n" \ + " ldd,3 [ %0 + 0x10 ], %%db[2]\n" \ + " ldd,5 [ %0 + 0x18 ], %%db[3]}\n" \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_FEAT_ISET_V6 version */ \ + "{nop 3\n" \ + " ldd,0 [ %0 + 0x20 ], %%db[4]\n" \ + " ldd,2 [ %0 + 0x28 ], %%db[5]\n" \ + " ldd,3 [ %0 + 0x30 ], %%db[6]\n" \ + " ldd,5 [ %0 + 0x38 ], %%db[7]}\n" \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + "{nop 1\n" \ + " ldd,0 [ %0 + 0x20 ], %%db[4]\n" \ + " ldd,2 [ %0 + 0x28 ], %%db[5]\n" \ + " ldd,3 [ %0 + 0x30 ], %%db[6]\n" \ + " ldd,5 [ %0 + 0x38 ], %%db[7]}\n" \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + "aaurwq,2 %%qb[0], %%" #reg1 "\n" \ + "aaurwq,2 %%qb[2], %%" #reg2 "\n" \ + "aaurwq,2 %%qb[4], %%" #reg3 "\n" \ + "aaurwq,2 %%qb[6], %%" #reg4 "\n" \ + : \ + : "r" (mem_p), [facility] "i" (CPU_FEAT_ISET_V6) \ + : "%b[0]", "%b[1]", "%b[2]", "%b[3]", \ + "%b[4]", "%b[5]", "%b[6]", "%b[7]"); \ +} while (0) + +/* Clear AAU to prepare it for restoring */ +#define NATIVE_CLEAR_APB() \ +do { \ + asm volatile ("1:\n" \ + "{ipd 0; disp %%ctpr2, 1b}" \ + : \ + : \ + : "ctpr2"); \ +} while (0) + +/* Do "disp" for all %ctpr's */ +#define E2K_DISP_CTPRS() \ + asm volatile ("1:\n" \ + "{ipd 0; disp %%ctpr1, 1b}" \ + "{ipd 0; disp %%ctpr2, 1b}" \ + "{ipd 0; disp %%ctpr3, 1b}" \ + : \ + : \ + : "ctpr1", "ctpr2", "ctpr3") + +#define LOAD_NV_MAS(_addr, _val, _mas, size_letter, clobber) \ +do { \ + _Pragma("no_asm_inline") \ + asm NOT_VOLATILE ("ld" #size_letter" %[addr], %[val], mas=%[mas]" \ + : [val] "=r" (_val) \ + : [addr] "m" (*(_addr)), \ + [mas] "i" (_mas) \ + : clobber); \ +} while (0) + +#define STORE_NV_MAS(_addr, _val, _mas, size_letter, clobber) \ +do { \ + _Pragma("no_asm_inline") \ + asm NOT_VOLATILE ("st" #size_letter" %[addr], %[val], mas=%[mas]" \ + : [addr] "=m" (*(_addr)) \ + : [val] "r" (_val), \ + [mas] "i" (_mas) \ + : clobber); \ +} while (0) + +/* + * Do load with specified MAS + */ + +/* + * After iset v6 these loads are not ordered with regards to RAM accesses. + * so add barriers manually. Driver writers who want control over barriers + * should use readX_relaxed()/writeX_relaxed() anyway. + */ +#if CONFIG_CPU_ISET >= 6 + +# define READ_MAS_BARRIER_AFTER(mas) \ +do { \ + if ((mas) == MAS_IOADDR) \ + E2K_WAIT_LD_C_LAL_SAL(); \ +} while (0) +# define WRITE_MAS_BARRIER_BEFORE(mas) \ +do { \ + if ((mas) == MAS_IOADDR) \ + E2K_WAIT_ST_C_SAS_LD_C_SAL(); \ +} while (0) +/* + * Not required by documentation, but this is how + * x86 works and how most of the drivers are tested. + */ +# define WRITE_MAS_BARRIER_AFTER(mas) \ +do { \ + if ((mas) == MAS_IOADDR) \ + E2K_WAIT_ST_C_SAS(); \ +} while (0) + +#elif CONFIG_CPU_ISET == 0 + +# define READ_MAS_BARRIER_AFTER(mas) \ +do { \ + if ((mas) == MAS_IOADDR) \ + __E2K_WAIT(_ld_c); \ +} while (0) +# define WRITE_MAS_BARRIER_BEFORE(mas) \ +do { \ + if ((mas) == MAS_IOADDR) \ + __E2K_WAIT(_st_c | _ld_c); \ +} while (0) +/* + * Not required by documentation, but this is how + * x86 works and how most of the drivers are tested. + */ +# define WRITE_MAS_BARRIER_AFTER(mas) \ +do { \ + if ((mas) == MAS_IOADDR) \ + __E2K_WAIT(_st_c); \ +} while (0) + +#else + +# define READ_MAS_BARRIER_AFTER(mas) +# define WRITE_MAS_BARRIER_BEFORE(mas) +# define WRITE_MAS_BARRIER_AFTER(mas) +#endif + +#define NATIVE_DO_READ_MAS_TO(addr, val, mas, size_letter, chan_letter) \ +({ \ + int __mas = (mas); \ + asm volatile ("ld" #size_letter "," #chan_letter " \t0x0, [%1] %2, %0" \ + : "=r" (val) \ + : "r" ((__e2k_ptr_t) (addr)), \ + "i" (__mas)); \ + READ_MAS_BARRIER_AFTER(__mas); \ +}) + +#define NATIVE_DO_READ_MAS(addr, mas, type, size_letter, chan_letter) \ +({ \ + register type res; \ + int __mas = (mas); \ + asm volatile ("ld" #size_letter "," #chan_letter " \t0x0, [%1] %2, %0" \ + : "=r" (res) \ + : "r" ((u64) (addr)), \ + "i" (__mas)); \ + READ_MAS_BARRIER_AFTER(__mas); \ + res; \ +}) + +#define NATIVE_DO_WRITE_MAS(addr, val, mas, type, size_letter, chan_letter) \ +({ \ + int __mas = (mas); \ + WRITE_MAS_BARRIER_BEFORE(__mas); \ + asm volatile ("st" #size_letter "," #chan_letter " \t0x0, [%0] %2, %1" \ + : \ + : "r" ((__e2k_ptr_t) (addr)), \ + "r" ((type) (val)), \ + "i" (__mas)); \ + WRITE_MAS_BARRIER_AFTER(__mas); \ +}) + +#define NATIVE_DO_WRITE_TAGGED(addr, val, type, size_letter, chan_letter) \ +({ \ + asm volatile ("st" #size_letter ",sm," #chan_letter " \t0x0, [%0], %1" \ + : \ + : "r" ((__e2k_ptr_t) (addr)), \ + "r" ((type) (val))); \ +}) + +#define NATIVE_READ_MAS_B_CH_TO(addr, val, mas, chan_letter) \ + NATIVE_DO_READ_MAS_TO((addr), (val), (mas), b, chan_letter) +#define NATIVE_READ_MAS_H_CH_TO(addr, val, mas, chan_letter) \ + NATIVE_DO_READ_MAS_TO((addr), (val), (mas), h, chan_letter) +#define NATIVE_READ_MAS_W_CH_TO(addr, val, mas, chan_letter) \ + NATIVE_DO_READ_MAS_TO((addr), (val), (mas), w, chan_letter) +#define NATIVE_READ_MAS_D_CH_TO(addr, val, mas, chan_letter) \ + NATIVE_DO_READ_MAS_TO((addr), (val), (mas), d, chan_letter) + +#define NATIVE_READ_MAS_B_CH(addr, mas, chan_letter) \ + NATIVE_DO_READ_MAS((addr), (mas), __e2k_u8_t, b, chan_letter) +#define NATIVE_READ_MAS_H_CH(addr, mas, chan_letter) \ + NATIVE_DO_READ_MAS((addr), (mas), __e2k_u16_t, h, chan_letter) +#define NATIVE_READ_MAS_W_CH(addr, mas, chan_letter) \ + NATIVE_DO_READ_MAS((addr), (mas), __e2k_u32_t, w, chan_letter) +#define NATIVE_READ_MAS_D_CH(addr, mas, chan_letter) \ + NATIVE_DO_READ_MAS((addr), (mas), __e2k_u64_t, d, chan_letter) + +#define NATIVE_READ_MAS_B(addr, mas) NATIVE_READ_MAS_B_CH((addr), (mas), 2) +#define NATIVE_READ_MAS_H(addr, mas) NATIVE_READ_MAS_H_CH((addr), (mas), 2) +#define NATIVE_READ_MAS_W(addr, mas) NATIVE_READ_MAS_W_CH((addr), (mas), 2) +#define NATIVE_READ_MAS_D(addr, mas) NATIVE_READ_MAS_D_CH((addr), (mas), 2) + +#define NATIVE_READ_MAS_B_5(addr, mas) NATIVE_READ_MAS_B_CH((addr), (mas), 5) +#define NATIVE_READ_MAS_H_5(addr, mas) NATIVE_READ_MAS_H_CH((addr), (mas), 5) +#define NATIVE_READ_MAS_W_5(addr, mas) NATIVE_READ_MAS_W_CH((addr), (mas), 5) +#define NATIVE_READ_MAS_D_5(addr, mas) NATIVE_READ_MAS_D_CH((addr), (mas), 5) + +#define NATIVE_WRITE_MAS_B_CH(addr, val, mas, chan_letter) \ + NATIVE_DO_WRITE_MAS((addr), (val), (mas), __e2k_u8_t, b, \ + chan_letter) +#define NATIVE_WRITE_MAS_H_CH(addr, val, mas, chan_letter) \ + NATIVE_DO_WRITE_MAS((addr), (val), (mas), __e2k_u16_t, h, \ + chan_letter) +#define NATIVE_WRITE_MAS_W_CH(addr, val, mas, chan_letter) \ + NATIVE_DO_WRITE_MAS((addr), (val), (mas), __e2k_u32_t, w, \ + chan_letter) +#define NATIVE_WRITE_MAS_D_CH(addr, val, mas, chan_letter) \ + NATIVE_DO_WRITE_MAS((addr), (val), (mas), __e2k_u64_t, d, \ + chan_letter) +#define NATIVE_WRITE_TAGGED_D_CH(addr, val, chan_letter) \ + NATIVE_DO_WRITE_TAGGED((addr), (val), __e2k_u64_t, d, \ + chan_letter) +#define NATIVE_WRITE_MAS_B(addr, val, mas) \ + NATIVE_DO_WRITE_MAS(addr, val, mas, __e2k_u8_t, b, 2) +#define NATIVE_WRITE_MAS_H(addr, val, mas) \ + NATIVE_DO_WRITE_MAS(addr, val, mas, __e2k_u16_t, h, 2) +#define NATIVE_WRITE_MAS_W(addr, val, mas) \ + NATIVE_DO_WRITE_MAS(addr, val, mas, __e2k_u32_t, w, 2) +#define NATIVE_WRITE_MAS_D(addr, val, mas) \ + NATIVE_DO_WRITE_MAS(addr, val, mas, __e2k_u64_t, d, 2) + + +/* + * Relaxed IO read/write + * + * bug #81369: put every UC access into a separate + * wide instruction to avoid reorderings possible if + * one access hits in DTLB and another one misses. + */ +#define IO_READ(_addr, type, size_letter) \ +({ \ + type __ior_val; \ + asm ("{ld" #size_letter " %[addr], %[val]}" \ + : [val] "=r" (__ior_val) \ + : [addr] "m" (*((volatile type *) (_addr))) \ + : "memory"); \ + __ior_val; \ +}) + +#define IO_WRITE(_addr, _val, type, size_letter) \ +do { \ + asm ("{st" #size_letter " %[addr], %[val]}" \ + : [addr] "=m" (*((volatile type *) (_addr))) \ + : [val] "r" ((type) (_val)) \ + : "memory"); \ +} while (0) + +#define IO_READ_B(addr) IO_READ((addr), u8, b) +#define IO_READ_H(addr) IO_READ((addr), u16, h) +#define IO_READ_W(addr) IO_READ((addr), u32, w) +#define IO_READ_D(addr) IO_READ((addr), u64, d) + +#define IO_WRITE_B(addr, val) IO_WRITE((addr), (val), u8, b) +#define IO_WRITE_H(addr, val) IO_WRITE((addr), (val), u16, h) +#define IO_WRITE_W(addr, val) IO_WRITE((addr), (val), u32, w) +#define IO_WRITE_D(addr, val) IO_WRITE((addr), (val), u64, d) + + +/* + * Read from and write to system configuration registers SIC + * Now SIC is the same as NBSRs registers + */ + +#define NATIVE_SET_SICREG(reg_mnemonic, val, cln, pln) \ +({ \ + register __e2k_u64_t addr; \ + register __e2k_u64_t node_id = (cln) << 2; \ + node_id = node_id + ((pln)&0x3); \ + addr = (__e2k_u64_t) THE_NODE_NBSR_PHYS_BASE(node_id); \ + addr = addr + SIC_##reg_mnemonic; \ + NATIVE_WRITE_MAS_W(addr, val, MAS_IOADDR); \ +}) +#define NATIVE_GET_SICREG(reg_mnemonic, cln, pln) \ +({ \ + register __e2k_u32_t res; \ + register __e2k_u64_t addr; \ + register __e2k_u64_t node_id = (cln) << 2; \ + node_id = node_id + ((pln)&0x3); \ + addr = (__e2k_u64_t) THE_NODE_NBSR_PHYS_BASE(node_id); \ + addr = addr + SIC_##reg_mnemonic; \ + res = NATIVE_READ_MAS_W(addr, MAS_IOADDR); \ + res; \ +}) + + +/* + * Prefetching with fully speculative load is + * needed when the passed address can be invalid. + */ +#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) +# define E2K_PREFETCH_L2_SPEC(addr) \ +do { \ + int unused; \ + asm ("ldb,sm %1, 0, %%empty, mas=%2\n" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (MAS_LOAD_SPEC | MAS_BYPASS_L1_CACHE)); \ +} while (0) + +# define E2K_PREFETCH_L2_SPEC_OFFSET(addr, offset) \ +do { \ + int unused; \ + asm ("ldb,sm %1, %2, %%empty, mas=%3\n" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (offset), \ + "i" (MAS_LOAD_SPEC | MAS_BYPASS_L1_CACHE)); \ +} while (0) + +# define E2K_PREFETCH_L2_OFFSET(addr, offset) \ +do { \ + int unused; \ + asm ("ldb,sm %1, %2, %%empty, mas=%3\n" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (offset), \ + "i" (MAS_BYPASS_L1_CACHE)); \ +} while (0) + +# define E2K_PREFETCH_L2_256(addr) \ +do { \ + int unused; \ + asm ( "ldb,0,sm %1, 0, %%empty, mas=%2\n" \ + "ldb,2,sm %1, 64, %%empty, mas=%2\n" \ + "ldb,3,sm %1, 128, %%empty, mas=%2\n" \ + "ldb,5,sm %1, 192, %%empty, mas=%2" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (MAS_BYPASS_L1_CACHE)); \ +} while (0) + +# define E2K_PREFETCH_L2(addr) \ +do { \ + int unused; \ + asm ("ldb,sm %1, 0, %%empty, mas=%2" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (MAS_BYPASS_L1_CACHE)); \ +} while (0) + +# define E2K_PREFETCH_L1_SPEC(addr) \ +do { \ + int unused; \ + asm ("ldb,sm %1, 0, %%empty, mas=%2\n" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (MAS_LOAD_SPEC)); \ +} while (0) + +# define E2K_PREFETCH_L1_SPEC_OFFSET(addr, offset) \ +do { \ + int unused; \ + asm ("ldb,sm %1, %2, %%empty, mas=%3\n" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (offset), \ + "i" (MAS_LOAD_SPEC)); \ +} while (0) + +# define E2K_PREFETCH_L1_OFFSET(addr, offset) \ +do { \ + int unused; \ + asm ("ldb,sm %1, %2, %%empty\n" \ + : "=r" (unused) \ + : "r" (addr), \ + "i" (offset)); \ +} while (0) + +# define E2K_PREFETCH_L1_256(addr) \ +do { \ + int unused; \ + asm ( "ldb,0,sm %1, 0, %%empty\n" \ + "ldb,2,sm %1, 64, %%empty\n" \ + "ldb,3,sm %1, 128, %%empty\n" \ + "ldb,5,sm %1, 192, %%empty" \ + : "=r" (unused) \ + : "r" (addr)); \ +} while (0) + +# define E2K_PREFETCH_L1(addr) \ +do { \ + int unused; \ + asm ("ldb,3 %1, 0, %%empty" \ + : "=r" (unused) \ + : "r" (addr)); \ +} while (0) +#else +# define E2K_PREFETCH_L2_SPEC_OFFSET(addr, offset) \ + do { (void) (addr); (void) (offset); } while (0) +# define E2K_PREFETCH_L2_OFFSET(addr, offset) \ + do { (void) (addr); (void) (offset); } while (0) +# define E2K_PREFETCH_L1_SPEC_OFFSET(addr, offset) \ + do { (void) (addr); (void) (offset); } while (0) +# define E2K_PREFETCH_L1_OFFSET(addr, offset) \ + do { (void) (addr); (void) (offset); } while (0) +# define E2K_PREFETCH_L2_SPEC(addr) do { (void) (addr); } while (0) +# define E2K_PREFETCH_L2_256(addr) do { (void) (addr); } while (0) +# define E2K_PREFETCH_L2(addr) do { (void) (addr); } while (0) +# define E2K_PREFETCH_L1_SPEC(addr) do { (void) (addr); } while (0) +# define E2K_PREFETCH_L1_256(addr) do { (void) (addr); } while (0) +# define E2K_PREFETCH_L1(addr) do { (void) (addr); } while (0) +#endif + +/* + * Recovery operations + * chan: 0, 1, 2 or 3 + */ +#define NATIVE_RECOVERY_TAGGED_LOAD_TO(_addr, _opc, _val, _tag, _chan) \ +do { \ + asm ( "{nop 1\n" \ + " cmpesb,0 %[chan], 0, %%pred20\n" \ + " cmpesb,1 %[chan], 1, %%pred21\n" \ + " cmpesb,3 %[chan], 2, %%pred22\n" \ + " cmpesb,4 %[chan], 3, %%pred23}\n" \ + "{nop 4\n" \ + " ldrd,0 [ %[addr] + %[opc] ], %[val] ? %%pred20\n" \ + " ldrd,2 [ %[addr] + %[opc] ], %[val] ? %%pred21\n" \ + " ldrd,3 [ %[addr] + %[opc] ], %[val] ? %%pred22\n" \ + " ldrd,5 [ %[addr] + %[opc] ], %[val] ? %%pred23}\n" \ + "{gettagd,2 %[val], %[tag]\n" \ + " puttagd,5 %[val], 0, %[val]}\n" \ + : [val] "=r"(_val), [tag] "=r"(_tag) \ + : [addr] "r" (_addr), [opc] "r" (_opc), \ + [chan] "r" ((u32) (_chan)) \ + : "memory", "pred20", "pred21", "pred22", "pred23"); \ +} while (0) + +#define NATIVE_RECOVERY_LOAD_TO(addr, opc, val, chan_letter) \ +({ \ + asm volatile ("ldrd," #chan_letter "\t[%1 + %2], %0" \ + : "=r"(val) \ + : "r" ((__e2k_ptr_t) (addr)), \ + "r" ((__e2k_u64_t) (opc))); \ +}) + +#define NATIVE_LOAD_TAGGED_DGREGS(addr, numlo, numhi) \ +do { \ + asm ("ldrd,2 [%0 + %1], %%dg" #numlo "\n" \ + "ldrd,5 [%0 + %2], %%dg" #numhi "\n" \ + : \ + : "r" (addr), \ + "i" (TAGGED_MEM_LOAD_REC_OPC), \ + "i" (TAGGED_MEM_LOAD_REC_OPC | 8UL) \ + : "%g" #numlo, "%g" #numhi); \ +} while (0) + +#define NATIVE_STORE_TAGGED_DGREG(addr, greg_no) \ +do { \ + asm ("strd [%0 + %1], %%dg" #greg_no \ + : \ + : "r" (addr), "i" (TAGGED_MEM_STORE_REC_OPC)); \ +} while (0) + +/* + * chan: 0, 1, 2 or 3 + * vr: set to 0 if we want to preserve the lower 4-byte word + * (same as vr in cellar) + */ +#define NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(_addr, _opc, greg_no, \ + _chan, _vr, _quadro) \ +do { \ + u64 val, val_8; \ + u32 __chan = (u32) (_chan); \ + u32 __quadro = (u32) (_quadro); \ + u32 __chan_q = (__quadro) ? __chan : -1; \ + u64 __opc = (_opc); \ + asm volatile ( \ + "{disp %%ctpr1, qpswitchd_sm\n" \ + " cmpesb,0 %[chan], 0, %%pred20\n" \ + " cmpesb,1 %[chan], 1, %%pred21\n" \ + " cmpesb,3 %[chan], 2, %%pred22\n" \ + " cmpesb,4 %[chan], 3, %%pred23}\n" \ + "{cmpesb,0 %[chan_q], 0, %%pred24\n" \ + " cmpesb,1 %[chan_q], 1, %%pred25\n" \ + " cmpesb,3 %[chan_q], 2, %%pred26\n" \ + " cmpesb,4 %[chan_q], 3, %%pred27}\n" \ + "{ldrd,0 [ %[addr] + %[opc] ], %[val] ? %%pred20\n" \ + " ldrd,2 [ %[addr] + %[opc] ], %[val] ? %%pred21\n" \ + " ldrd,3 [ %[addr] + %[opc] ], %[val] ? %%pred22\n" \ + " ldrd,5 [ %[addr] + %[opc] ], %[val] ? %%pred23\n" \ + " cmpesb,1 %[quadro], 0, %%pred18\n" \ + " cmpesb,4 %[vr], 0, %%pred19}\n" \ + "{nop 3\n" \ + " ldrd,0 [ %[addr] + %[opc_8] ], %[val_8] ? %%pred24\n" \ + " ldrd,2 [ %[addr] + %[opc_8] ], %[val_8] ? %%pred25\n" \ + " ldrd,3 [ %[addr] + %[opc_8] ], %[val_8] ? %%pred26\n" \ + " ldrd,5 [ %[addr] + %[opc_8] ], %[val_8] ? %%pred27}\n" \ + "{movts %%g" #greg_no ", %[val] ? %%pred19}\n" \ + "{movtd %[val_8], %%dg" #greg_no " ? ~ %%pred18\n" \ + " addd %[greg], 0, %%db[0] ? ~ %%pred18\n" \ + " call %%ctpr1, wbs=%# ? ~ %%pred18}\n" \ + "{movtd %[val], %%dg" #greg_no "}\n" \ + : [val] "=&r" (val), [val_8] "=&r" (val_8) \ + : [addr] "r" (_addr), [vr] "ir" ((u32) (_vr)), \ + [chan] "ir" (__chan), [chan_q] "ir" (__chan_q), \ + [opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \ + [quadro] "r" (__quadro), [greg] "i" ((u64) (greg_no)) \ + : "call", "memory", "pred18", "pred19", "pred20", "pred21", \ + "pred22", "pred23", "pred24", "pred25", "pred26", "pred27", \ + "g" #greg_no); \ +} while (0) + +/* + * As NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR but repeats from cellar + * an aligned atomic 16-bytes load. + */ +#define NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(_addr, _opc, \ + greg_no, _vr) \ +do { \ + u64 val; \ + u64 __opc = (_opc); \ + asm ( "{disp %%ctpr1, qpswitchd_sm\n" \ + " nop 4\n" \ + " ldrd,0 [ %[addr] + %[opc] ], %[val]\n" \ + " ldrd,2 [ %[addr] + %[opc_8] ], %%g" #greg_no "\n" \ + " cmpesb,1 %[vr], 0, %%pred19}\n" \ + "{movts,0 %%g" #greg_no ", %[val] ? %%pred19\n" \ + " addd,2 %[greg], 0, %%db[0]\n" \ + " call %%ctpr1, wbs=%#}\n" \ + "{movtd %[val], %%dg" #greg_no "}\n" \ + : [val] "=&r" (val) \ + : [addr] "r" (_addr), [vr] "ir" ((u32) (_vr)), \ + [opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \ + [greg] "i" ((u64) (greg_no)) \ + : "call", "memory", "pred19", "g" #greg_no); \ +} while (false) + +#define NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(_addr, _opc, \ + greg_no_lo, greg_no_hi, _vr, _qp_load) \ +do { \ + u64 val; \ + u64 __opc = (_opc); \ + if (_qp_load) { \ + asm ( "{disp %%ctpr1, qpswitchd_sm\n" \ + " nop 4\n" \ + " ldrd,0 [ %[addr] + %[opc] ], %[val]\n" \ + " ldrd,2 [ %[addr] + %[opc_8] ], %%g" #greg_no_lo "\n" \ + " cmpesb,1 %[vr], 0, %%pred19}\n" \ + "{movts,0 %%g" #greg_no_lo ", %[val] ? %%pred19\n" \ + " addd,2 %[greg], 0, %%db[0]\n" \ + " call %%ctpr1, wbs=%#}\n" \ + "{movtd %[val], %%dg" #greg_no_lo "}\n" \ + : [val] "=&r" (val) \ + : [addr] "r" (_addr), [vr] "ir" ((u32) (_vr)), \ + [opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \ + [greg] "i" ((u64) (greg_no_lo)) \ + : "call", "memory", "pred19", "g" #greg_no_lo); \ + } else { \ + asm ( "{nop 4\n" \ + " ldrd,0 [ %[addr] + %[opc] ], %[val]\n" \ + " ldrd,2 [ %[addr] + %[opc_8] ], %%g" #greg_no_hi "\n" \ + " cmpesb,1 %[vr], 0, %%pred19}\n" \ + "{nop 1\n" \ + " movts,0 %%g" #greg_no_lo ", %[val] ? %%pred19}\n" \ + "{movtd,0 %[val], %%dg" #greg_no_lo "}\n" \ + : [val] "=&r" (val) \ + : [addr] "r" (_addr), [vr] "ir" ((u32) (_vr)), \ + [opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \ + [greg] "i" ((u64) (greg_no_lo)) \ + : "call", "memory", "pred19", "g" #greg_no_lo); \ + } \ +} while (false) + +#define NATIVE_RECOVERY_LOAD_TO_A_GREG_CH_VR(addr, opc, greg_num, \ + chan_opc, vr, quadro) \ +do { \ + switch (greg_num) { \ + case 0: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 0, \ + chan_opc, vr, quadro); \ + break; \ + case 1: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 1, \ + chan_opc, vr, quadro); \ + break; \ + case 2: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 2, \ + chan_opc, vr, quadro); \ + break; \ + case 3: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 3, \ + chan_opc, vr, quadro); \ + break; \ + case 4: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 4, \ + chan_opc, vr, quadro); \ + break; \ + case 5: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 5, \ + chan_opc, vr, quadro); \ + break; \ + case 6: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 6, \ + chan_opc, vr, quadro); \ + break; \ + case 7: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 7, \ + chan_opc, vr, quadro); \ + break; \ + case 8: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 8, \ + chan_opc, vr, quadro); \ + break; \ + case 9: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 9, \ + chan_opc, vr, quadro); \ + break; \ + case 10: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 10, \ + chan_opc, vr, quadro); \ + break; \ + case 11: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 11, \ + chan_opc, vr, quadro); \ + break; \ + case 12: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 12, \ + chan_opc, vr, quadro); \ + break; \ + case 13: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 13, \ + chan_opc, vr, quadro); \ + break; \ + case 14: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 14, \ + chan_opc, vr, quadro); \ + break; \ + case 15: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 15, \ + chan_opc, vr, quadro); \ + break; \ + /* Do not load g16-g19 as they are used by kernel */ \ + case 16: \ + case 17: \ + case 18: \ + case 19: \ + break; \ + case 20: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 20, \ + chan_opc, vr, quadro); \ + break; \ + case 21: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 21, \ + chan_opc, vr, quadro); \ + break; \ + case 22: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 22, \ + chan_opc, vr, quadro); \ + break; \ + case 23: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 23, \ + chan_opc, vr, quadro); \ + break; \ + case 24: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 24, \ + chan_opc, vr, quadro); \ + break; \ + case 25: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 25, \ + chan_opc, vr, quadro); \ + break; \ + case 26: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 26, \ + chan_opc, vr, quadro); \ + break; \ + case 27: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 27, \ + chan_opc, vr, quadro); \ + break; \ + case 28: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 28, \ + chan_opc, vr, quadro); \ + break; \ + case 29: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 29, \ + chan_opc, vr, quadro); \ + break; \ + case 30: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 30, \ + chan_opc, vr, quadro); \ + break; \ + case 31: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_CH_VR(addr, opc, 31, \ + chan_opc, vr, quadro); \ + break; \ + default: \ + panic("Invalid global register # %d\n", greg_num); \ + } \ +} while (0) + +#define NATIVE_RECOVERY_LOAD_TO_A_GREG_VR_ATOMIC(addr, opc, greg_num, \ + vr, qp_load) \ +do { \ + switch (greg_num) { \ + case 0: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 0, 1, vr, qp_load); \ + break; \ + case 1: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 1, \ + vr); \ + break; \ + case 2: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 2, 3, vr, qp_load); \ + break; \ + case 3: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 3, \ + vr); \ + break; \ + case 4: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 4, 5, vr, qp_load); \ + break; \ + case 5: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 5, \ + vr); \ + break; \ + case 6: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 6, 7, vr, qp_load); \ + break; \ + case 7: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 7, \ + vr); \ + break; \ + case 8: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 8, 9, vr, qp_load); \ + break; \ + case 9: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 9, \ + vr); \ + break; \ + case 10: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 10, 11, vr, qp_load); \ + break; \ + case 11: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 11, \ + vr); \ + break; \ + case 12: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 12, 13, vr, qp_load); \ + break; \ + case 13: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 13, \ + vr); \ + break; \ + case 14: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 14, 15, vr, qp_load); \ + break; \ + case 15: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 15, \ + vr); \ + break; \ + /* Do not load g16-g19 as they are used by kernel */ \ + case 16: \ + case 17: \ + case 18: \ + case 19: \ + break; \ + case 20: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 20, 21, vr, qp_load); \ + break; \ + case 21: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 21, \ + vr); \ + break; \ + case 22: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 22, 23, vr, qp_load); \ + break; \ + case 23: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 23, \ + vr); \ + break; \ + case 24: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 24, 25, vr, qp_load); \ + break; \ + case 25: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 25, \ + vr); \ + break; \ + case 26: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 26, 27, vr, qp_load); \ + break; \ + case 27: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 27, \ + vr); \ + break; \ + case 28: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 28, 29, vr, qp_load); \ + break; \ + case 29: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 29, \ + vr); \ + break; \ + case 30: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP_OR_Q(addr, opc, \ + 30, 31, vr, qp_load); \ + break; \ + case 31: \ + NATIVE_RECOVERY_LOAD_TO_THE_GREG_VR_ATOMIC_QP(addr, opc, 31, \ + vr); \ + break; \ + default: \ + panic("Invalid global register # %d\n", greg_num); \ + } \ +} while (0) + +#define NATIVE_RECOVERY_STORE(_addr, _val, _opc, _chan) \ +do { \ + asm volatile ("strd," #_chan " [ %[addr] + %[opc] ], %[val]" \ + : \ + : [addr] "r" ((u64) (_addr)), \ + [opc] "ir" ((u64) (_opc)), \ + [val] "r" ((u64) (_val)) \ + : "memory"); \ +} while (0) + +#define NATIVE_RECOVERY_TAGGED_STORE_ATOMIC(_addr, _val, _tag, _opc, \ + _val_ext, _tag_ext, _opc_ext) \ +({ \ + u64 tmp, tmp_ext; \ + asm ( "{puttagd,2 %[val], %[tag], %[tmp]\n" \ + " puttagd,5 %[val_ext], %[tag_ext], %[tmp_ext]}\n" \ + "{strd,2 [ %[addr] + %[opc] ], %[tmp]\n" \ + " strd,5 [ %[addr] + %[opc_ext] ], %[tmp_ext]}\n" \ + : [tmp] "=&r" (tmp), [tmp_ext] "=&r" (tmp_ext) \ + : [addr] "r" (_addr), \ + [val] "r" ((u64) (_val)), [val_ext] "r" ((u64) (_val_ext)), \ + [tag] "r" ((u32) (_tag)), [tag_ext] "r" ((u32) (_tag_ext)), \ + [opc] "ir" (_opc), [opc_ext] "ir" (_opc_ext) \ + : "memory"); \ +}) + +#define NATIVE_RECOVERY_TAGGED_STORE(_addr, _val, _tag, _opc, \ + _val_ext, _tag_ext, _opc_ext, _chan, _quadro) \ +({ \ + u64 tmp, tmp_ext; \ + u32 __chan = (u32) (_chan); \ + u32 __chan_q = (_quadro) ? __chan : -1; \ + asm ( "{nop 1\n" \ + " puttagd,2 %[val], %[tag], %[tmp]\n" \ + " puttagd,5,sm %[val_ext], %[tag_ext], %[tmp_ext]\n" \ + " cmpesb,0 %[chan], 1, %%pred20\n" \ + " cmpesb,3 %[chan], 3, %%pred21\n" \ + " cmpesb,1 %[chan_q], 1, %%pred22\n" \ + " cmpesb,4 %[chan_q], 3, %%pred23}\n" \ + "{strd,2 [ %[addr] + %[opc] ], %[tmp] ? %%pred20\n" \ + " strd,5 [ %[addr] + %[opc] ], %[tmp] ? %%pred21}\n" \ + "{strd,2 [ %[addr] + %[opc_ext] ], %[tmp_ext] ? %%pred22\n" \ + " strd,5 [ %[addr] + %[opc_ext] ], %[tmp_ext] ? %%pred23}\n" \ + : [tmp] "=&r" (tmp), [tmp_ext] "=&r" (tmp_ext) \ + : [addr] "r" (_addr), \ + [val] "r" ((u64) (_val)), [val_ext] "r" ((u64) (_val_ext)), \ + [tag] "r" ((u32) (_tag)), [tag_ext] "r" ((u32) (_tag_ext)), \ + [opc] "ir" (_opc), [opc_ext] "ir" (_opc_ext), \ + [chan] "r" ((u32) (__chan)), [chan_q] "r" ((u32) (__chan_q)) \ + : "memory", "pred20", "pred21", "pred22", "pred23"); \ +}) + + +/* + * #58441 - work with taged value (compiler problem) + * store tag and store taged word must be in common asm code + * (cloused asm code) + */ +#define NATIVE_STORE_VALUE_WITH_TAG(addr, val, tag) \ + NATIVE_STORE_TAGGED_WORD(addr, val, tag, \ + TAGGED_MEM_STORE_REC_OPC, 2) + +#define NATIVE_STORE_TAGGED_WORD(addr, val, tag, opc, chan_letter) \ +do { \ + u64 __tmp_reg = val; \ + E2K_BUILD_BUG_ON(sizeof(val) != 8); \ + asm volatile ("{puttagd \t%0, %2, %0\n}" \ + " strd," #chan_letter " \t[%1 + %3], %0\n" \ + : "+r" (__tmp_reg) \ + : "r" ((__e2k_ptr_t) (addr)), \ + "ri" ((__e2k_u32_t) (tag)), \ + "ri" ((opc)) \ + : "memory"); \ +} while (0) + +#define NATIVE_STORE_TAGGED_WORD_CH(addr, val, tag, opc, trap_cellar_chan) \ +do { \ + switch (trap_cellar_chan) { \ + case 1: \ + NATIVE_STORE_TAGGED_WORD(addr, val, tag, opc, 2); \ + break; \ + case 3: \ + NATIVE_STORE_TAGGED_WORD(addr, val, tag, opc, 5); \ + break; \ + } \ +} while (0) + + +#define __NATIVE_STORE_TAGGED_QWORD(addr, val_lo, val_hi, \ + tag_lo, tag_hi, offset) \ +({ \ + u64 reg1, reg2; \ + E2K_BUILD_BUG_ON(sizeof(val_hi) != 8); \ + E2K_BUILD_BUG_ON(sizeof(val_lo) != 8); \ + asm volatile ( "{puttagd %3, %5, %0\n" \ + " puttagd %4, %6, %1}\n" \ + "{strd,2 [%2 + %7], %0\n" \ + " strd,5 [%2 + %8], %1}\n" \ + : "=&r" (reg1), "=&r" (reg2) \ + : "r" (addr), \ + "r" (val_lo), \ + "r" (val_hi), \ + "ri" (tag_lo), \ + "ri" (tag_hi), \ + "i" (TAGGED_MEM_STORE_REC_OPC), \ + "ri" (TAGGED_MEM_STORE_REC_OPC | offset) \ + : "memory"); \ +}) +#define NATIVE_STORE_TAGGED_QWORD(addr, val_lo, val_hi, tag_lo, tag_hi) \ + __NATIVE_STORE_TAGGED_QWORD((addr), (val_lo), (val_hi), \ + (tag_lo), (tag_hi), 8UL) + +#define E2K_STORE_NULLPTR_QWORD(addr) \ + _E2K_STORE_NULLPTR_QWORD(addr, TAGGED_MEM_STORE_REC_OPC) + + +#define _E2K_STORE_NULLPTR_QWORD(addr, opc) \ +({ \ + e2k_addr_t addr_hi = (e2k_addr_t)addr + 8; \ + unsigned long np = 0UL; \ + asm volatile ("{puttagd \t%0, %3, %0}\n" \ + " {strd, 2 \t[%1 + %4], %0\n" \ + " strd, 5 \t[%2 + %4], %0}\n" \ + : "+r" (np) \ + : "r" ((__e2k_ptr_t) (addr)), \ + "r" ((__e2k_ptr_t) (addr_hi)), \ + "i" (E2K_NULLPTR_ETAG), \ + "i" ( (opc)) \ + : "memory" \ + ); \ +}) + +#define NATIVE_MOVE_TAGGED_QWORD(_from_lo, _from_hi, _to_lo, _to_hi) \ +({ \ + u64 __val_lo, __val_hi; \ + asm ("{nop 4\n" \ + " ldrd,2 [ %[from_lo] + %[opc_ld] ], %[val_lo]\n" \ + " ldrd,5 [ %[from_hi] + %[opc_ld] ], %[val_hi]}\n" \ + "{strd,2 [ %[to_lo] + %[opc_st] ], %[val_lo]\n" \ + " strd,5 [ %[to_hi] + %[opc_st] ], %[val_hi]}\n" \ + : [val_lo] "=&r" (__val_lo), [val_hi] "=&r" (__val_hi) \ + : [from_lo] "r" (_from_lo), [from_hi] "r" (_from_hi), \ + [to_lo] "r" (_to_lo), [to_hi] "r" (_to_hi), \ + [opc_ld] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [opc_st] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory"); \ +}) + +#define NATIVE_MOVE_TAGGED_DWORD(_from, _to) \ +do { \ + long _tmp; \ + asm ( "ldrd [ %[from] + %[opc] ], %[tmp]\n" \ + "strd [ %[to] + %[opc_st] ], %[tmp]\n" \ + : [tmp] "=&r" (_tmp) \ + : [from] "r" (_from), [to] "r" (_to), \ + [opc] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [opc_st] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory"); \ +} while (false) + +#define NATIVE_MOVE_TAGGED_WORD(_from, _to) \ +do { \ + long _tmp; \ + asm ( "ldrd [ %[from] + %[opc] ], %[tmp]\n" \ + "strd [ %[to] + %[opc_st] ], %[tmp]\n" \ + : [tmp] "=&r" (_tmp) \ + : [from] "r" (_from), [to] "r" (_to), \ + [opc] "i" (TAGGED_MEM_LOAD_REC_OPC_W), \ + [opc_st] "i" (TAGGED_MEM_STORE_REC_OPC_W) \ + : "memory"); \ +} while (false) + +/* + * Repeat memory load from cellar. + * chan: 0, 1, 2 or 3 - channel for operation + * quadro: set if this is a non-atomic quadro operation to move 16 bytes + * vr: set to 0 if we want to preserve the lower 4-byte word + * (same as vr in cellar) + */ +#define NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_CH_VR(_from, _to, _to_hi, _vr, _opc, \ + _chan, _quadro) \ +do { \ + u64 prev, val, val_8; \ + u32 __chan = (u32) (_chan); \ + u32 __quadro = (u32) (_quadro); \ + u32 __chan_q = (__quadro) ? __chan : -1; \ + u64 __opc = (_opc); \ + asm ( "{cmpesb %[quadro], 0, %%pred18\n" \ + " cmpesb %[vr], 0, %%pred19}\n" \ + "{cmpesb,0 %[chan], 0, %%pred20\n" \ + " cmpesb,1 %[chan], 1, %%pred21\n" \ + " cmpesb,3 %[chan], 2, %%pred22\n" \ + " cmpesb,4 %[chan], 3, %%pred23}\n" \ + "{cmpesb,0 %[chan_q], 0, %%pred24\n" \ + " cmpesb,1 %[chan_q], 1, %%pred25\n" \ + " cmpesb,3 %[chan_q], 2, %%pred26\n" \ + " cmpesb,4 %[chan_q], 3, %%pred27\n" \ + " ldrd [ %[to] + %[opc_ld] ], %[prev] ? %%pred19}\n" \ + "{ldrd,0 [ %[from] + %[opc] ], %[val] ? %%pred20\n" \ + " ldrd,2 [ %[from] + %[opc] ], %[val] ? %%pred21\n" \ + " ldrd,3 [ %[from] + %[opc] ], %[val] ? %%pred22\n" \ + " ldrd,5 [ %[from] + %[opc] ], %[val] ? %%pred23}\n" \ + "{nop 3\n" \ + " ldrd,0 [ %[from] + %[opc_8] ], %[val_8] ? %%pred24\n" \ + " ldrd,2 [ %[from] + %[opc_8] ], %[val_8] ? %%pred25\n" \ + " ldrd,3 [ %[from] + %[opc_8] ], %[val_8] ? %%pred26\n" \ + " ldrd,5 [ %[from] + %[opc_8] ], %[val_8] ? %%pred27}\n" \ + "{movts,1 %[prev], %[val] ? %%pred19}\n" \ + "{strd,2 [ %[to] + %[opc_st] ], %[val]\n" \ + " strd,5 [ %[to_hi] + %[opc_st] ], %[val_8] ? ~ %%pred18}\n" \ + : [prev] "=&r" (prev), [val] "=&r" (val), \ + [val_8] "=&r" (val_8) \ + : [from] "r" (_from), [to] "r" (_to), [to_hi] "r" (_to_hi), \ + [vr] "ir" ((u32) (_vr)), [quadro] "r" (__quadro), \ + [chan] "ir" (__chan), [chan_q] "ir" (__chan_q), \ + [opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \ + [opc_ld] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [opc_st] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory", "pred18", "pred19", "pred20", "pred21", "pred22", \ + "pred23", "pred24", "pred25", "pred26", "pred27"); \ +} while (false) + +/* + * As NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_CH_VR but repeats from cellar + * an aligned atomic 16-bytes load. + */ +#define NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_VR_ATOMIC(_from, _to, _to_hi, \ + _vr, _opc) \ +do { \ + u64 prev, val, val_8; \ + u64 __opc = (_opc); \ + asm ( "{cmpesb %[vr], 0, %%pred19}\n" \ + "{ldrd,0 [ %[from] + %[opc] ], %[val]\n" \ + " ldrd,2 [ %[from] + %[opc_8] ], %[val_8]}\n" \ + "{nop 4\n" \ + " ldrd [ %[to] + %[opc_ld] ], %[prev] ? %%pred19}\n" \ + "{movts,1 %[prev], %[val] ? %%pred19}\n" \ + "{strd,2 [ %[to] + %[opc_st] ], %[val]\n" \ + " strd,5 [ %[to_hi] + %[opc_st] ], %[val_8]}\n" \ + : [prev] "=&r" (prev), [val] "=&r" (val), \ + [val_8] "=&r" (val_8) \ + : [from] "r" (_from), [to] "r" (_to), [to_hi] "r" (_to_hi), \ + [vr] "ir" ((u32) (_vr)), \ + [opc] "r" (__opc), [opc_8] "r" (__opc | 8ull), \ + [opc_ld] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [opc_st] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory", "pred19"); \ +} while (false) + +#define E2K_TAGGED_MEMMOVE_8(__dst, __src) \ +({ \ + u64 __tmp1; \ + asm ( \ + "{\n" \ + "nop 4\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_16(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2; \ + asm ( \ + "{\n" \ + "nop 4\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_24(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "nop 3\n" \ + "ldrd,2 [ %[src] + %[ld_opc_16] ], %[tmp3]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_16] ], %[tmp3]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_32(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3, __tmp4; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "nop 3\n" \ + "ldrd,2 [ %[src] + %[ld_opc_16] ], %[tmp3]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_16] ], %[tmp3]\n" \ + "strd,5 [ %[dst] + %[st_opc_24] ], %[tmp4]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3), [tmp4] "=&r" (__tmp4) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16), \ + [ld_opc_24] "i" (TAGGED_MEM_LOAD_REC_OPC | 24), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16), \ + [st_opc_24] "i" (TAGGED_MEM_STORE_REC_OPC | 24) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_40(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3, __tmp4, __tmp5; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_16] ], %[tmp3]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "nop 2\n" \ + "ldrd,2 [ %[src] + %[ld_opc_32] ], %[tmp5]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_16] ], %[tmp3]\n" \ + "strd,5 [ %[dst] + %[st_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_32] ], %[tmp5]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3), [tmp4] "=&r" (__tmp4), \ + [tmp5] "=&r" (__tmp5) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16), \ + [ld_opc_24] "i" (TAGGED_MEM_LOAD_REC_OPC | 24), \ + [ld_opc_32] "i" (TAGGED_MEM_LOAD_REC_OPC | 32), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16), \ + [st_opc_24] "i" (TAGGED_MEM_STORE_REC_OPC | 24), \ + [st_opc_32] "i" (TAGGED_MEM_STORE_REC_OPC | 32) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_48(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3, __tmp4, __tmp5, __tmp6; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_16] ], %[tmp3]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "nop 2\n" \ + "ldrd,2 [ %[src] + %[ld_opc_32] ], %[tmp5]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_40] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_16] ], %[tmp3]\n" \ + "strd,5 [ %[dst] + %[st_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_32] ], %[tmp5]\n" \ + "strd,5 [ %[dst] + %[st_opc_40] ], %[tmp6]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3), [tmp4] "=&r" (__tmp4), \ + [tmp5] "=&r" (__tmp5), [tmp6] "=&r" (__tmp6) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16), \ + [ld_opc_24] "i" (TAGGED_MEM_LOAD_REC_OPC | 24), \ + [ld_opc_32] "i" (TAGGED_MEM_LOAD_REC_OPC | 32), \ + [ld_opc_40] "i" (TAGGED_MEM_LOAD_REC_OPC | 40), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16), \ + [st_opc_24] "i" (TAGGED_MEM_STORE_REC_OPC | 24), \ + [st_opc_32] "i" (TAGGED_MEM_STORE_REC_OPC | 32), \ + [st_opc_40] "i" (TAGGED_MEM_STORE_REC_OPC | 40) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_56(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3, __tmp4, __tmp5, __tmp6, __tmp7; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_16] ], %[tmp3]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_32] ], %[tmp5]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_40] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "nop 1\n" \ + "ldrd,2 [ %[src] + %[ld_opc_48] ], %[tmp7]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_16] ], %[tmp3]\n" \ + "strd,5 [ %[dst] + %[st_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_32] ], %[tmp5]\n" \ + "strd,5 [ %[dst] + %[st_opc_40] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_48] ], %[tmp7]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3), [tmp4] "=&r" (__tmp4), \ + [tmp5] "=&r" (__tmp5), [tmp6] "=&r" (__tmp6), \ + [tmp7] "=&r" (__tmp7) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16), \ + [ld_opc_24] "i" (TAGGED_MEM_LOAD_REC_OPC | 24), \ + [ld_opc_32] "i" (TAGGED_MEM_LOAD_REC_OPC | 32), \ + [ld_opc_40] "i" (TAGGED_MEM_LOAD_REC_OPC | 40), \ + [ld_opc_48] "i" (TAGGED_MEM_LOAD_REC_OPC | 48), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16), \ + [st_opc_24] "i" (TAGGED_MEM_STORE_REC_OPC | 24), \ + [st_opc_32] "i" (TAGGED_MEM_STORE_REC_OPC | 32), \ + [st_opc_40] "i" (TAGGED_MEM_STORE_REC_OPC | 40), \ + [st_opc_48] "i" (TAGGED_MEM_STORE_REC_OPC | 48) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_64(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3, __tmp4, __tmp5, __tmp6, __tmp7, __tmp8; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_16] ], %[tmp3]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_32] ], %[tmp5]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_40] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "nop 1\n" \ + "ldrd,2 [ %[src] + %[ld_opc_48] ], %[tmp7]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_56] ], %[tmp8]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_8] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_16] ], %[tmp3]\n" \ + "strd,5 [ %[dst] + %[st_opc_24] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_32] ], %[tmp5]\n" \ + "strd,5 [ %[dst] + %[st_opc_40] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_48] ], %[tmp7]\n" \ + "strd,5 [ %[dst] + %[st_opc_56] ], %[tmp8]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3), [tmp4] "=&r" (__tmp4), \ + [tmp5] "=&r" (__tmp5), [tmp6] "=&r" (__tmp6), \ + [tmp7] "=&r" (__tmp7), [tmp8] "=&r" (__tmp8) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_8] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_16] "i" (TAGGED_MEM_LOAD_REC_OPC | 16), \ + [ld_opc_24] "i" (TAGGED_MEM_LOAD_REC_OPC | 24), \ + [ld_opc_32] "i" (TAGGED_MEM_LOAD_REC_OPC | 32), \ + [ld_opc_40] "i" (TAGGED_MEM_LOAD_REC_OPC | 40), \ + [ld_opc_48] "i" (TAGGED_MEM_LOAD_REC_OPC | 48), \ + [ld_opc_56] "i" (TAGGED_MEM_LOAD_REC_OPC | 56), \ + [st_opc_0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_8] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_16] "i" (TAGGED_MEM_STORE_REC_OPC | 16), \ + [st_opc_24] "i" (TAGGED_MEM_STORE_REC_OPC | 24), \ + [st_opc_32] "i" (TAGGED_MEM_STORE_REC_OPC | 32), \ + [st_opc_40] "i" (TAGGED_MEM_STORE_REC_OPC | 40), \ + [st_opc_48] "i" (TAGGED_MEM_STORE_REC_OPC | 48), \ + [st_opc_56] "i" (TAGGED_MEM_STORE_REC_OPC | 56) \ + : "memory"); \ +}) + +#define E2K_TAGGED_MEMMOVE_128_RF_V2(__dst, __src) \ +({ \ + u64 __tmp1, __tmp2, __tmp3, __tmp4, __tmp5, __tmp6, __tmp7, __tmp8; \ + asm ( \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_r0] ], %[tmp1]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_r1] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_r2] ], %[tmp3]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_r3] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[src] + %[ld_opc_r4] ], %[tmp5]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_r5] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "nop 1\n" \ + "ldrd,2 [ %[src] + %[ld_opc_r6] ], %[tmp7]\n" \ + "ldrd,5 [ %[src] + %[ld_opc_r7] ], %[tmp8]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_r0] ], %[tmp1]\n" \ + "strd,5 [ %[dst] + %[st_opc_r1] ], %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_r2] ], %[tmp3]\n" \ + "strd,5 [ %[dst] + %[st_opc_r3] ], %[tmp4]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_r4] ], %[tmp5]\n" \ + "strd,5 [ %[dst] + %[st_opc_r5] ], %[tmp6]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %[dst] + %[st_opc_r6] ], %[tmp7]\n" \ + "strd,5 [ %[dst] + %[st_opc_r7] ], %[tmp8]\n" \ + "}\n" \ + : [tmp1] "=&r" (__tmp1), [tmp2] "=&r" (__tmp2), \ + [tmp3] "=&r" (__tmp3), [tmp4] "=&r" (__tmp4), \ + [tmp5] "=&r" (__tmp5), [tmp6] "=&r" (__tmp6), \ + [tmp7] "=&r" (__tmp7), [tmp8] "=&r" (__tmp8) \ + : [src] "r" (__src), [dst] "r" (__dst), \ + [ld_opc_r0] "i" (TAGGED_MEM_LOAD_REC_OPC), \ + [ld_opc_r1] "i" (TAGGED_MEM_LOAD_REC_OPC | 8), \ + [ld_opc_r2] "i" (TAGGED_MEM_LOAD_REC_OPC | 32), \ + [ld_opc_r3] "i" (TAGGED_MEM_LOAD_REC_OPC | 40), \ + [ld_opc_r4] "i" (TAGGED_MEM_LOAD_REC_OPC | 64), \ + [ld_opc_r5] "i" (TAGGED_MEM_LOAD_REC_OPC | 72), \ + [ld_opc_r6] "i" (TAGGED_MEM_LOAD_REC_OPC | 96), \ + [ld_opc_r7] "i" (TAGGED_MEM_LOAD_REC_OPC | 104), \ + [st_opc_r0] "i" (TAGGED_MEM_STORE_REC_OPC), \ + [st_opc_r1] "i" (TAGGED_MEM_STORE_REC_OPC | 8), \ + [st_opc_r2] "i" (TAGGED_MEM_STORE_REC_OPC | 32), \ + [st_opc_r3] "i" (TAGGED_MEM_STORE_REC_OPC | 40), \ + [st_opc_r4] "i" (TAGGED_MEM_STORE_REC_OPC | 64), \ + [st_opc_r5] "i" (TAGGED_MEM_STORE_REC_OPC | 72), \ + [st_opc_r6] "i" (TAGGED_MEM_STORE_REC_OPC | 96), \ + [st_opc_r7] "i" (TAGGED_MEM_STORE_REC_OPC | 104) \ + : "memory"); \ +}) + +/* Store quadro pointer "ptr" at address "addr" */ +#define E2K_SET_TAGS_AND_STORE_QUADRO(ptr, addr) \ +do { \ + asm ("{\n" \ + "puttagd %0, 15, %%db[0]\n" \ + "puttagd %1, 12, %%db[1]\n" \ + "}\n" \ + "{\n" \ + "strd,2 [ %2 + %3 ], %%db[0]\n" \ + "strd,5 [ %2 + %4 ], %%db[1]\n" \ + "}\n" \ + : \ + : "r" (AW(ptr).lo), "r" (AW(ptr).hi), \ + "r" ((unsigned long) addr), \ + "i" (TAGGED_MEM_STORE_REC_OPC), \ + "i" (TAGGED_MEM_STORE_REC_OPC | 8UL) \ + : "%b[0]", "%b[1]"); \ +} while (0) + + +/* + * Read tags at @src and pack them at @dst. + */ +#define NATIVE_EXTRACT_TAGS_32(dst, src) \ +do { \ + register u64 __opc0 = TAGGED_MEM_LOAD_REC_OPC; \ + register u64 __opc8 = TAGGED_MEM_LOAD_REC_OPC | 8; \ + register u64 __opc16 = TAGGED_MEM_LOAD_REC_OPC | 16; \ + register u64 __opc24 = TAGGED_MEM_LOAD_REC_OPC | 24; \ + register u64 __tmp0, __tmp8, __tmp16, __tmp24; \ + \ + asm volatile ( "{\n" \ + "nop 4\n" \ + "ldrd,0 [%5 + %6], %0\n" \ + "ldrd,2 [%5 + %7], %1\n" \ + "ldrd,3 [%5 + %8], %2\n" \ + "ldrd,5 [%5 + %9], %3\n" \ + "}\n" \ + "{\n" \ + "gettagd,2 %1, %1\n" \ + "gettagd,5 %3, %3\n" \ + "}\n" \ + "{\n" \ + "gettagd,2 %0, %0\n" \ + "gettagd,5 %2, %2\n" \ + "shls,0 %1, 4, %1\n" \ + "shls,3 %3, 4, %3\n" \ + "}\n" \ + "{\n" \ + "ors,0 %0, %1, %0\n" \ + "ors,3 %2, %3, %2\n" \ + "}\n" \ + "{\n" \ + "stb,2 [ %4 + 0 ], %0\n" \ + "stb,5 [ %4 + 1 ], %2\n" \ + "}\n" \ + : "=&r" (__tmp0), "=&r" (__tmp8), \ + "=&r" (__tmp16), "=&r" (__tmp24) \ + : "r" (dst), "r" (src), \ + "r" (__opc0), "r" (__opc8), \ + "r" (__opc16), "r" (__opc24)); \ +} while (0) + +#define NATIVE_LOAD_TAGD(addr) \ +({ \ + u32 __dtag; \ + asm ("ldrd [%1 + %2], %0\n" \ + "gettagd %0, %0\n" \ + : "=r"(__dtag) \ + : "m" (*(unsigned long long *) (addr)), \ + "i"(TAGGED_MEM_LOAD_REC_OPC)); \ + __dtag; \ +}) + +#define NATIVE_LOAD_VAL_AND_TAGD(addr, val, tag) \ +do { \ + BUILD_BUG_ON(sizeof(tag) > 4); \ + asm ("ldrd [%2 + %3], %1\n" \ + "gettagd %1, %0\n" \ + "puttagd %1, 0, %1\n" \ + : "=r" (tag), "=r" (val) \ + : "m" (*((unsigned long long *) (addr))), \ + "i" (TAGGED_MEM_LOAD_REC_OPC)); \ +} while (0) + +#define NATIVE_LOAD_VAL_AND_TAGW(addr, val, tag) \ +({ \ + register int __tag; \ + register long __word; \ + asm ("{ldrd [%2 + %3], %1\n}" \ + "{gettagd \t%1, %0\n" \ + " puttagd \t%1, 0, %1}\n" \ + : "=r"(__tag), "=r"(__word) \ + : "m" (*((unsigned long long *) (addr))), \ + "i"(TAGGED_MEM_LOAD_REC_OPC_W)); \ + val = __word; \ + tag = __tag; \ +}) + +#define NATIVE_LOAD_TAGGED_QWORD_AND_TAGS(addr, lo, hi, tag_lo, tag_hi) \ +{ \ + NATIVE_LOAD_VAL_AND_TAGD(addr, lo, tag_lo); \ + NATIVE_LOAD_VAL_AND_TAGD(((__e2k_u64_t *) (addr)) + 1, \ + hi, tag_hi); \ +} + +#define E2K_LOAD_VAL_AND_TAG(addr, val, tag) \ +({ \ + register int __tag; \ + register long __dword; \ + asm ("{ldrd [%2 + %3], %1\n}" \ + "{gettags \t%1, %0\n" \ + " puttags \t%1, 0, %1}\n" \ + : "=r"(__tag), "=r"(__dword) \ + : "m" (*((unsigned long long *) (addr))), \ + "i"(TAGGED_MEM_LOAD_REC_OPC)); \ + val = __dword; \ + tag = __tag; \ +}) + +/** + * Load/stote based data operations + */ +#define E2K_LD_GREG_BASED_B(greg_no, offset, chan_letter) \ +({ \ + register unsigned long res; \ + asm volatile ("ldb," #chan_letter "\t%%dg" #greg_no ", [%1], %0" \ + : "=r"(res) \ + : "ri" ((__e2k_u64_t) (offset))); \ + res; \ +}) +#define E2K_LD_GREG_BASED_H(greg_no, offset, chan_letter) \ +({ \ + register unsigned long res; \ + asm volatile ("ldh," #chan_letter "\t%%dg" #greg_no ", [%1], %0" \ + : "=r"(res) \ + : "ri" ((__e2k_u64_t) (offset))); \ + res; \ +}) +#define E2K_LD_GREG_BASED_W(greg_no, offset, chan_letter) \ +({ \ + register unsigned long res; \ + asm volatile ("ldw," #chan_letter "\t%%dg" #greg_no ", [%1], %0" \ + : "=r"(res) \ + : "ri" ((__e2k_u64_t) (offset))); \ + res; \ +}) +#define E2K_LD_GREG_BASED_D(greg_no, offset, chan_letter) \ +({ \ + register unsigned long long res; \ + asm volatile ("ldd," #chan_letter "\t%%dg" #greg_no ", [%1], %0" \ + : "=r"(res) \ + : "ri" ((__e2k_u64_t) (offset))); \ + res; \ +}) +#define E2K_ST_GREG_BASED_B(greg_no, offset, value, chan_letter) \ +({ \ + asm volatile ("stb," #chan_letter "\t%%dg" #greg_no ", [%0], %1" \ + : \ + : "ri" ((__e2k_u64_t) (offset)), \ + "r" ((__e2k_u8_t) (value))); \ +}) +#define E2K_ST_GREG_BASED_H(greg_no, offset, value, chan_letter) \ +({ \ + asm volatile ("sth," #chan_letter "\t%%dg" #greg_no ", [%0], %1" \ + : \ + : "ri" ((__e2k_u64_t) (offset)), \ + "r" ((__e2k_u16_t) (value))); \ +}) +#define E2K_ST_GREG_BASED_W(greg_no, offset, value, chan_letter) \ +({ \ + asm volatile ("stw," #chan_letter "\t%%dg" #greg_no ", [%0], %1" \ + : \ + : "ri" ((__e2k_u64_t) (offset)), \ + "r" ((__e2k_u32_t) (value))); \ +}) +#define E2K_ST_GREG_BASED_D(greg_no, offset, value, chan_letter) \ +({ \ + asm volatile ("std," #chan_letter "\t%%dg" #greg_no ", [%0], %1" \ + : \ + : "ri" ((__e2k_u64_t) (offset)), \ + "r" ((__e2k_u64_t) (value))); \ +}) + +#define E2K_LOAD_GREG_BASED_B(greg_no, offset) \ + E2K_LD_GREG_BASED_B(greg_no, offset, 0) +#define E2K_LOAD_GREG_BASED_H(greg_no, offset) \ + E2K_LD_GREG_BASED_H(greg_no, offset, 0) +#define E2K_LOAD_GREG_BASED_W(greg_no, offset) \ + E2K_LD_GREG_BASED_W(greg_no, offset, 0) +#define E2K_LOAD_GREG_BASED_D(greg_no, offset) \ + E2K_LD_GREG_BASED_D(greg_no, offset, 0) + +#define E2K_STORE_GREG_BASED_B(greg_no, offset, value) \ + E2K_ST_GREG_BASED_B(greg_no, offset, value, 2) +#define E2K_STORE_GREG_BASED_H(greg_no, offset, value) \ + E2K_ST_GREG_BASED_H(greg_no, offset, value, 2) +#define E2K_STORE_GREG_BASED_W(greg_no, offset, value) \ + E2K_ST_GREG_BASED_W(greg_no, offset, value, 2) +#define E2K_STORE_GREG_BASED_D(greg_no, offset, value) \ + E2K_ST_GREG_BASED_D(greg_no, offset, value, 2) + +/* + * Bytes swapping + */ + +#define E2K_SWAPB_16(addr) E2K_READ_MAS_H(addr, MAS_BIGENDIAN) +#define E2K_SWAPB_32(addr) E2K_READ_MAS_W(addr, MAS_BIGENDIAN) +#define E2K_SWAPB_64(addr) E2K_READ_MAS_D(addr, MAS_BIGENDIAN) + +#define _E2K_GEN_LABEL(label_name, label_no) #label_name #label_no + +#define _E2K_ASM_LABEL_L(label_name, label_no) \ + asm volatile ("\n" _E2K_GEN_LABEL(label_name, label_no) ":"); + +#define _E2K_ASM_LABEL_R(label_name, label_no) \ + _E2K_GEN_LABEL(label_name, label_no) + + +/* + * Atomic read hardware stacks (procedure and chain) registers + * in coordinated state. + * Any interrupt inside registers reading sequence can update + * some fields of registers and them can be at miscoordinated state + * So use "wait lock" and "wait unlock" load/store to avoid interrupts + * Argument 'lock_addr' is used only to provide lock/unlock, so it can be + * any unused local variable of caller + */ +#define ATOMIC_READ_P_STACK_REGS(psp_lo, psp_hi, pshtp) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%3] 7, %0\n" \ + \ + "\t rrd \t %%psp.lo, %0\n" \ + "\t rrd \t %%psp.hi, %1\n" \ + "\t rrd \t %%pshtp, %2\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%3] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (psp_lo), \ + "=&r" (psp_hi), \ + "=&r" (pshtp) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) +#define ATOMIC_READ_PC_STACK_REGS(pcsp_lo, pcsp_hi, pcshtp) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%3] 7, %0\n" \ + \ + "\t rrd \t %%pcsp.lo, %0\n" \ + "\t rrd \t %%pcsp.hi, %1\n" \ + "\t rrd \t %%pcshtp, %2\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%3] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (pcsp_lo), \ + "=&r" (pcsp_hi), \ + "=&r" (pcshtp) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) +#define ATOMIC_READ_HW_PS_SIZES(psp_hi, pshtp) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%2] 7, %0\n" \ + \ + "\t rrd \t %%psp.hi, %0\n" \ + "\t rrd \t %%pshtp, %1\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%2] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (psp_hi), \ + "=&r" (pshtp) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) +#define ATOMIC_READ_HW_PCS_SIZES(pcsp_hi, pcshtp) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%2] 7, %0\n" \ + \ + "\t rrd \t %%pcsp.hi, %0\n" \ + "\t rrs \t %%pcshtp, %1\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%2] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (pcsp_hi), \ + "=&r" (pcshtp) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) +#define ATOMIC_READ_HW_STACKS_SIZES(psp_hi, pshtp, pcsp_hi, pcshtp) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%4] 7, %0\n" \ + \ + "\t rrd \t %%psp.hi, %0\n" \ + "\t rrd \t %%pshtp, %1\n" \ + "\t rrd \t %%pcsp.hi, %2\n" \ + "\t rrs \t %%pcshtp, %3\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%4] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (psp_hi), \ + "=&r" (pshtp), \ + "=&r" (pcsp_hi), \ + "=&r" (pcshtp) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) +#define ATOMIC_READ_HW_STACKS_REGS(psp_lo, psp_hi, pshtp, \ + pcsp_lo, pcsp_hi, pcshtp) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%6] 7, %0\n" \ + \ + "\t rrd \t %%psp.lo, %0\n" \ + "\t rrd \t %%psp.hi, %1\n" \ + "\t rrd \t %%pshtp, %2\n" \ + "\t rrd \t %%pcsp.lo, %3\n" \ + "\t rrd \t %%pcsp.hi, %4\n" \ + "\t rrs \t %%pcshtp, %5\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%6] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (psp_lo), \ + "=&r" (psp_hi), \ + "=&r" (pshtp), \ + "=&r" (pcsp_lo), \ + "=&r" (pcsp_hi), \ + "=&r" (pcshtp) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) +/* + * Atomic read all stacks hardware (procedure and chain) and data stack + * registers in coordinated state. + */ +#define ATOMIC_READ_ALL_STACKS_REGS(psp_lo, psp_hi, pshtp, \ + pcsp_lo, pcsp_hi, pcshtp, \ + usd_lo, usd_hi, cr1_hi) \ +({ \ + unsigned long lock_addr; \ + asm volatile ( \ + "\n" \ + "1:\n" \ + "\t ldd,0 \t 0, [%9] 7, %0\n" \ + \ + "\t rrd \t %%psp.lo, %0\n" \ + "\t rrd \t %%psp.hi, %1\n" \ + "\t rrd \t %%pshtp, %2\n" \ + "\t rrd \t %%pcsp.lo, %3\n" \ + "\t rrd \t %%pcsp.hi, %4\n" \ + "\t rrs \t %%pcshtp, %5\n" \ + "\t rrd \t %%usd.lo, %6\n" \ + "\t rrd \t %%usd.hi, %7\n" \ + "\t rrd \t %%cr1.hi, %8\n" \ + \ + "{\n" \ + "\t std,2 \t 0, [%9] 2, %0\n" \ + "\t ibranch \t 1b ? %%MLOCK\n" \ + "}\n" \ + : "=&r" (psp_lo), \ + "=&r" (psp_hi), \ + "=&r" (pshtp), \ + "=&r" (pcsp_lo), \ + "=&r" (pcsp_hi), \ + "=&r" (pcshtp), \ + "=&r" (usd_lo), \ + "=&r" (usd_hi), \ + "=&r" (cr1_hi) \ + : "r" ((__e2k_ptr_t) (&lock_addr)) \ + : "memory"); \ +}) + +#define NATIVE_ASM_FLUSH_DCACHE_LINE(addr) \ +do { \ + asm volatile("{wait st_c=1}\n" \ + "{std,2 [ %0 + 0 ] %2, %1}\n" \ + "{wait fl_c=1}\n" \ + : \ + : "r" (addr), "r" (0), "i" (MAS_DCACHE_LINE_FLUSH));\ +} while (0) + +#define NATIVE_CLEAN_LD_ACQ_ADDRESS(_reg1, _reg2, _hwbug_address) \ +({ \ + asm volatile ( \ + "{\n" \ + "ldb,0,sm %[addr], 0 * 4096 + 0 * 64, %[reg1], mas=%[mas]\n" \ + "ldb,3,sm %[addr], 0 * 4096 + 4 * 64, %[reg2], mas=%[mas]\n" \ + "}\n" \ + "{\n" \ + "ldb,0,sm %[addr], 8 * 4096 + 1 * 64, %[reg1], mas=%[mas]\n" \ + "ldb,3,sm %[addr], 8 * 4096 + 5 * 64, %[reg2], mas=%[mas]\n" \ + "}\n" \ + "{\n" \ + "ldb,0,sm %[addr], 16 * 4096 + 2 * 64, %[reg1], mas=%[mas]\n" \ + "ldb,3,sm %[addr], 16 * 4096 + 6 * 64, %[reg2], mas=%[mas]\n" \ + "}\n" \ + "{\n" \ + "ldb,0,sm %[addr], 24 * 4096 + 3 * 64, %[reg1], mas=%[mas]\n" \ + "ldb,3,sm %[addr], 24 * 4096 + 7 * 64, %[reg2], mas=%[mas]\n" \ + "}\n" \ + : [reg1] "=&r" (_reg1), [reg2] "=&r" (_reg2) \ + : [addr] "r" (__hwbug_address), \ + [mas] "i" (MAS_BYPASS_ALL_CACHES | \ + MAS_MODE_LOAD_OP_LOCK_CHECK)); \ +}) + + +#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) && defined(CONFIG_CPU_ES2) + +# define HWBUG_ATOMIC_BEGIN(addr) \ + unsigned long __hwbug_atomic_flags = 0; \ + bool __hwbug_atomic_possible = cpu_has(CPU_HWBUG_ATOMIC); \ + if (__hwbug_atomic_possible) { \ + __hwbug_atomic_flags = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + NATIVE_SET_UPSR_IRQ_BARRIER( \ + __hwbug_atomic_flags & ~(UPSR_IE | UPSR_NMIE)); \ + NATIVE_FLUSH_DCACHE_LINE_UNPRIV((unsigned long) (addr)); \ + } +# define HWBUG_ATOMIC_END() \ + if (__hwbug_atomic_possible) \ + NATIVE_SET_UPSR_IRQ_BARRIER(__hwbug_atomic_flags) +#else +# define HWBUG_ATOMIC_BEGIN(addr) +# define HWBUG_ATOMIC_END() +#endif + +/* + * On E2C+ atomic operations have relaxed memory ordering: + * _st_unlock can be reordered with subsequent loads and stores. + * Issue an explicit memory barrier if atomic operation returns a value. + * + * On E4C with multiple nodes and E2C+ atomic operations have fully + * relaxed memory ordering because of a hardware bug, must add "wait ma_c". + */ +#if !defined CONFIG_E2K_MACHINE +# define MB_BEFORE_ATOMIC "{wait st_c=1, ma_c=1}\n" +# define MB_AFTER_ATOMIC "{wait st_c=1, ma_c=1}\n" +# define MB_AFTER_ATOMIC_LOCK_MB /* E2K_WAIT_ST_C_SAS() */ \ + ".word 0x00008001\n" \ + ".word 0x30000084\n" +#elif defined CONFIG_E2K_ES2_DSP || defined CONFIG_E2K_ES2_RU +# define MB_BEFORE_ATOMIC "{wait st_c=1, ma_c=1}\n" +# define MB_AFTER_ATOMIC "{wait st_c=1, ma_c=1}\n" +# define MB_AFTER_ATOMIC_LOCK_MB /* E2K_WAIT_ST_C_SAS() */ \ + ".word 0x00008001\n" \ + ".word 0x30000084\n" +#elif defined CONFIG_E2K_E2S && defined CONFIG_NUMA +# define MB_BEFORE_ATOMIC "{wait st_c=1, ma_c=1}\n" +# define MB_AFTER_ATOMIC "{wait st_c=1, ma_c=1}\n" +# define MB_AFTER_ATOMIC_LOCK_MB +#else +# define MB_BEFORE_ATOMIC +# define MB_AFTER_ATOMIC +# define MB_AFTER_ATOMIC_LOCK_MB +#endif + +#define MB_BEFORE_ATOMIC_LOCK_MB + +#define MB_BEFORE_ATOMIC_STRONG_MB MB_BEFORE_ATOMIC +#define MB_AFTER_ATOMIC_STRONG_MB MB_AFTER_ATOMIC + +#define MB_BEFORE_ATOMIC_RELEASE_MB MB_BEFORE_ATOMIC +#define MB_AFTER_ATOMIC_RELEASE_MB + +#define MB_BEFORE_ATOMIC_ACQUIRE_MB +#define MB_AFTER_ATOMIC_ACQUIRE_MB MB_AFTER_ATOMIC + +#define MB_BEFORE_ATOMIC_RELAXED_MB +#define MB_AFTER_ATOMIC_RELAXED_MB + +#ifdef CONFIG_DEBUG_LCC_VOLATILE_ATOMIC +# define NOT_VOLATILE volatile +#else +# define NOT_VOLATILE +#endif + +#if CONFIG_CPU_ISET >= 5 +# define ACQUIRE_MB_ATOMIC_CHANNEL "5" +# define RELAXED_MB_ATOMIC_CHANNEL "5" +#else /* CONFIG_CPU_ISET < 5 */ +# define ACQUIRE_MB_ATOMIC_CHANNEL "2" +# define RELAXED_MB_ATOMIC_CHANNEL "2" +#endif /* CONFIG_CPU_ISET >= 5 */ +#define RELEASE_MB_ATOMIC_CHANNEL "2" +#define STRONG_MB_ATOMIC_CHANNEL "2" +#define LOCK_MB_ATOMIC_CHANNEL ACQUIRE_MB_ATOMIC_CHANNEL + +#if CONFIG_CPU_ISET >= 6 +# define LOCK_MB_ATOMIC_MAS "0x2" +# define ACQUIRE_MB_ATOMIC_MAS "0x2" +# define RELEASE_MB_ATOMIC_MAS "0x73" +# define STRONG_MB_ATOMIC_MAS "0x73" +# define RELAXED_MB_ATOMIC_MAS "0x2" +#else +# define LOCK_MB_ATOMIC_MAS "0x2" +# define ACQUIRE_MB_ATOMIC_MAS "0x2" +# define RELEASE_MB_ATOMIC_MAS "0x2" +# define STRONG_MB_ATOMIC_MAS "0x2" +# define RELAXED_MB_ATOMIC_MAS "0x2" +#endif + +#define CLOBBERS_LOCK_MB : "memory" +#define CLOBBERS_ACQUIRE_MB : "memory" +#define CLOBBERS_RELEASE_MB : "memory" +#define CLOBBERS_STRONG_MB : "memory" +#define CLOBBERS_RELAXED_MB + +/* + * mem_model - one of the following: + * LOCK_MB + * ACQUIRE_MB + * RELEASE_MB + * STRONG_MB + * RELAXED_MB + */ +#define NATIVE_ATOMIC_OP(__val, __addr, __rval, \ + size_letter, op, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nld" #size_letter ",0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{"\ + "\n" op " %[rval], %[val], %[rval]" \ + "\n}" \ + "\n{"\ + "\nst" #size_letter "," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[rval], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [val] "ir" (__val) \ + CLOBBERS_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC_FETCH_OP(__val, __addr, __rval, __tmp, \ + size_letter, op, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nld" #size_letter ",0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{"\ + "\n" op " %[rval], %[val], %[tmp]" \ + "\n}" \ + "\n{"\ + "\nst" #size_letter "," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[tmp], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [tmp] "=&r" (__tmp), [addr] "+m" (*(__addr)), \ + [rval] "=&r" (__rval) \ + : [val] "ir" (__val) \ + CLOBBERS_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC32_ADD_IF_NOT_NEGATIVE(__val, __addr, __rval, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nldw,0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nnop 1"\ + "\ncmplsb %[rval], 0, %%pred2" \ + "\n}" \ + "\n{"\ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\nadds %[rval], %[val], %[rval] ? ~ %%pred2" \ + "\n}" \ + "\n{"\ + "\nstw," mem_model##_ATOMIC_CHANNEL " %[addr], %[rval], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [val] "ir" (__val) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC64_ADD_IF_NOT_NEGATIVE(__val, __addr, __rval, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n1:" \ + "\n{" \ + "\nnop 4" \ + "\nldd,0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nnop 1" \ + "\ncmpldb %[rval], 0, %%pred2" \ + "\n}" \ + "\n{"\ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\naddd %[rval], %[val], %[rval] ? ~ %%pred2" \ + "\n}" \ + "\n{"\ + "\nstd," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[rval], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [val] "ir" (__val) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +/* Atomically add to 16 low bits and return the new 32 bits value */ +#define NATIVE_ATOMIC16_ADD_RETURN32_LOCK(val, addr, rval, tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nldw,0\t0x0, [%3] 0x7, %0" \ + "\n}" \ + "\n{"\ + "\nadds %0, %2, %1" \ + "\nands %0, 0xffff0000, %0" \ + "\n}" \ + "\nands %1, 0x0000ffff, %1" \ + "\nadds %0, %1, %0" \ + "\n{"\ + "\nstw," ACQUIRE_MB_ATOMIC_CHANNEL " 0x0, [%3] 0x2, %0" \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : "=&r" (rval), "=&r" (tmp) \ + : "i" (val), "r" ((__e2k_ptr_t) (addr)) \ + : "memory"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* Atomically add two 32 bits values packed into one 64 bits value */ +/* and return the new 64 bits value */ +#define NATIVE_ATOMIC32_PAIR_ADD_RETURN64_LOCK(val_lo, val_hi, addr, rval, \ + tmp1, tmp2, tmp3) \ +({ \ + HWBUG_ATOMIC_BEGIN(addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{"\ + "\n\tnop 4"\ + "\n\tldd,0\t0x0, [%6] 0x7, %0" \ + "\n\t}"\ + "\n\t{"\ + "\n\tsard %0, 32, %1" \ + "\n\tadds %4, 0, %2" \ + "\n\tadds %5, 0, %3" \ + "\n\t}" \ + "\n\t{"\ + "\n\tadds %1, %3, %1" \ + "\n\tadds %0, %2, %0" \ + "\n\t}" \ + "\n\t{"\ + "\n\tsxt 6, %1, %1" \ + "\n\tsxt 6, %0, %0" \ + "\n\t}" \ + "\n\tshld %1, 32, %1" \ + "\n\tord %1, %0, %0" \ + "\n\t{"\ + "\n\tstd," ACQUIRE_MB_ATOMIC_CHANNEL "0x0, [%6] 0x2, %0" \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : "=&r" (rval), \ + "=&r" (tmp1), \ + "=&r" (tmp2), \ + "=&r" (tmp3) \ + : "ri" (val_lo), \ + "ri" (val_hi), \ + "r" ((__e2k_ptr_t) (addr)) \ + : "memory"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* Atomically sub two 32 bits values packed into one 64 bits value */ +/* and return the new 64 bits value */ +#define NATIVE_ATOMIC32_PAIR_SUB_RETURN64_LOCK(val_lo, val_hi, addr, rval, \ + tmp1, tmp2, tmp3) \ +({ \ + HWBUG_ATOMIC_BEGIN(addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{"\ + "\n\tnop 4"\ + "\n\tldd,0\t0x0, [%6] 0x7, %0" \ + "\n\t}"\ + "\n\t{"\ + "\n\tsard %0, 32, %1" \ + "\n\tadds %4, 0, %2" \ + "\n\tadds %5, 0, %3" \ + "\n\t}" \ + "\n\t{"\ + "\n\tsubs %1, %3, %1" \ + "\n\tsubs %0, %2, %0" \ + "\n\t}" \ + "\n\t{"\ + "\n\tsxt 6, %1, %1" \ + "\n\tsxt 6, %0, %0" \ + "\n\t}" \ + "\n\tshld %1, 32, %1" \ + "\n\tord %1, %0, %0" \ + "\n\t{"\ + "\n\tstd," ACQUIRE_MB_ATOMIC_CHANNEL "0x0, [%6] 0x2, %0" \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : "=&r" (rval), \ + "=&r" (tmp1), \ + "=&r" (tmp2), \ + "=&r" (tmp3) \ + : "ri" (val_lo), \ + "ri" (val_hi), \ + "r" ((__e2k_ptr_t) (addr)) \ + : "memory"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * C equivalent: + * + * boot_spinlock_t oldval, newval; + * oldval.lock = ACCESS_ONCE(lock->lock); + * if (oldval.head == oldval.tail) { + * newval.lock = oldval.lock + (1 << BOOT_SPINLOCK_TAIL_SHIFT); + * if (cmpxchg(&lock->lock, oldval.lock, newval.lock) == + * oldval.lock) + * return 1; + * } + * return 0; + */ +#define NATIVE_ATOMIC_TICKET_TRYLOCK(spinlock, tail_shift, \ + __val, __head, __tail, __rval) \ +do { \ + HWBUG_ATOMIC_BEGIN(spinlock); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nldw,0 %[addr], %[val], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nshrs,0 %[val], 0x10, %[tail]" \ + "\ngetfs,1 %[val], 0x400, %[head]" \ + "\n}" \ + "\n{" \ + "\nnop" \ + "\ncmpesb,0 %[tail], %[head], %%pred2" \ + "\nadds 0, 0, %[rval]" \ + "\n}" \ + "\n{" \ + "\nnop 3" /* bug 92891 - optimize for performance */ \ + "\nadds,0 0, 1, %[rval] ? %%pred2" \ + "\nadds,2 %[val], %[incr], %[val] ? %%pred2" \ + "\n}" \ + "\n{" \ + "\nstw," ACQUIRE_MB_ATOMIC_CHANNEL " %[addr], %[val], mas=" LOCK_MB_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [rval] "=&r" (__rval), [val] "=&r" (__val), \ + [head] "=&r" (__head), [tail] "=&r" (__tail), \ + [addr] "+m" (*(spinlock)) \ + : [incr] "i" (1 << tail_shift) \ + : "memory", "pred2"); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +/* + * Atomic support of new read/write spinlock mechanism. + * Locking is ordered and later readers cannot outrun former writers. + * Locking order based on coupons (tickets) received while first try to get + * lock, if lock is already taken by other. + * + * read/write spinlocks initial state allowing 2^32 active readers and + * only one active writer. But coupon discipline allows simultaniously + * have only 2^16 registered users of the lock: active + waiters + */ + +/* + * It is test: is read/write lock can be now taken by reader + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - locking can be successful' + * + * C equivalent: + * +static rwlock_val_t +atomic_can_lock_reader(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = rw->lock; + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + return src_lock.lock; +} + */ +#define NATIVE_ATOMIC_CAN_LOCK_READER(__rw_addr, __success, \ + __head, __ticket, __count, __src) \ +({ \ + asm ( \ + "\n\tldd,0 %[addr], %[src]" \ + "\n\t{" \ + "\n\tsard %[src], 32, %[count]" \ + "\n\tgetfd %[src], 0x400, %[head]" \ + "\n\tgetfd %[src], 0x410, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsubs %[count], 1, %[count]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmplsb %[count], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + : [success] "=&r" (__success), \ + [src] "=&r" (__src), \ + [head] "=&r" (__head), \ + [ticket] "=&r" (__ticket), \ + [count] "=&r" (__count) \ + : [addr] "m" (*(__rw_addr)) \ + : "memory", "pred2", "pred3"); \ +}) + +/* + * It is test: is read/write lock can be now taken by writer + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - locking can be successful' + * + * C equivalent: + * +static rwlock_val_t +atomic_can_lock_writer(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = rw->lock; + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + return src_lock.lock; +} + */ +#define NATIVE_ATOMIC_CAN_LOCK_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __src) \ +({ \ + asm ( \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[src]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[src], 32, %[count]" \ + "\n\tgetfd %[src], 0x400, %[head]" \ + "\n\tgetfd %[src], 0x410, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmpesb %[count], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + : [success] "=&r" (__success), \ + [src] "=&r" (__src), \ + [head] "=&r" (__head), \ + [ticket] "=&r" (__ticket), \ + [count] "=&r" (__count) \ + : [addr] "m" (*(__rw_addr)) \ + : "memory", "pred2", "pred3"); \ +}) + +/* + * The first try to take read spinlock. + * Successful locking increment # of ticket and head, decrement active + * readers counter (negative counter) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise reader receives coupon and + * should be queued as waiter similar mutex implementation + * + * C equivalent: + * +static rwlock_val_t +atomic_add_new_reader(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + dst_lock.ticket = ticket + 1; + if (success) { + // take lock: increment readers (negative value), + // increment head to enable follow readers + count = count - 1; + head = head + 1; + } + dst_lock.count = count; + dst_lock.head = head; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define NATIVE_ATOMIC_ADD_NEW_READER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[src], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[src], 32, %[count]" \ + "\n\tgetfd %[src], 0x400, %[head]" \ + "\n\tgetfd %[src], 0x410, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsubs %[count], 1, %[tmp]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmplsb %[tmp], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\tadds %[ticket], 1, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[ticket], %[ticket]" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[head], 1, %[head] ? %%pred2" \ + "\n\tsubs %[count], 1, %[count] ? %%pred2" \ + "\n\tshld %[ticket], 16, %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[head], %[head] ? %%pred2" \ + "\n\tsxt 2, %[count], %[count] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp]" \ + "\n\tord %[dst], %[head], %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst]" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [success] "=&r" (__success), \ + [src] "=&r" (__src), \ + [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [ticket] "=&r" (__ticket), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + :: "memory", "pred2", "pred3"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * Only try to take read spinlock. + * Successful locking increment # of ticket and head, decrement active + * readers counter (negative counter) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise 'success' is false and + * nothing are not changed + * + * C equivalent: + * +static rwlock_val_t +atomic_try_add_new_reader(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + if (success) { + // take lock: increment readers (negative value), + // increment head to enable follow readers + // increment ticket number for next users + dst_lock.ticket = ticket + 1; + dst_lock.count = count - 1; + dst_lock.head = head + 1; + } else { + dst_lock.lock = src_lock.lock; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define NATIVE_ATOMIC_TRY_ADD_NEW_READER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[src], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[src], 32, %[count]" \ + "\n\tgetfd %[src], 0x400, %[head]" \ + "\n\tgetfd %[src], 0x410, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsubs %[count], 1, %[tmp]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmplsb %[tmp], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\tadds %[ticket], 1, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[ticket], %[ticket]" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[head], 1, %[head] ? %%pred2" \ + "\n\tsubs %[count], 1, %[count] ? %%pred2" \ + "\n\tshld %[ticket], 16, %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[head], %[head] ? %%pred2" \ + "\n\tsxt 2, %[count], %[count] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp] ? %%pred2" \ + "\n\tord %[dst], %[head], %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst] ? %%pred2" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\taddd %[src], 0, %[dst] ? ~%%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [success] "=&r" (__success), \ + [src] "=&r" (__src), \ + [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [ticket] "=&r" (__ticket), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + :: "memory", "pred2", "pred3"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * The slow try to take read spinlock according to erlier received # of coupon + * Successful locking increment # of head, decrement active readers counter + * (negative counter) + * Macros return current updated state of read/write lock and set bypassed + * boolean value 'success - lockin is successful', otherwise reader should be + * queued again + * + * C equivalent: + * +static rwlock_val_t +atomic_add_slow_reader(arch_rwlock_t *rw, u16 ticket, bool success) +{ + arch_rwlock_t dst_lock; + u16 head; + s32 count; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active writers + success = (ticket == head) && (count-1 < 0); + if (success) { + // take lock: increment readers (negative value), + // increment head to enable follow readers + count = count - 1; + head = head + 1; + dst_lock.count = count; + dst_lock.head = head; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define NATIVE_ATOMIC_ADD_SLOW_READER(__rw_addr, __success, \ + __head, __ticket, __count, __dst, __tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[dst], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[dst], 32, %[count]" \ + "\n\tgetfd %[dst], 0x400, %[head]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsubs %[count], 1, %[tmp]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmplsb %[tmp], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[head], 1, %[head] ? %%pred2" \ + "\n\tsubs %[count], 1, %[count] ? %%pred2" \ + "\n\tandd %[dst], 0xffff0000, %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[head], %[head] ? %%pred2" \ + "\n\tsxt 2, %[count], %[count] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp] ? %%pred2" \ + "\n\tord %[dst], %[head], %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst] ? %%pred2" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [success] "=&r" (__success), \ + [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + : [ticket] "r" (__ticket) \ + : "memory", "pred2", "pred3"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * Unlocking of read spinlock. + * Need only increment active readers counter (negative counter) + * Macros return current updated state of read/write lock. + * + * C equivalent: + * +static rwlock_val_t +atomic_free_lock_reader(arch_rwlock_t *rw) +{ + arch_rwlock_t dst_lock; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + dst_lock.count++; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define NATIVE_ATOMIC_FREE_LOCK_READER(__rw_addr, __dst) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[dst], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tnop 2" \ + "\n\taddd %[dst], 0x100000000, %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [dst] "=&r" (__dst), \ + [addr] "+m" (*(__rw_addr)) \ + :: "memory"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * The first try to take write spinlock. + * Successful locking increment # of ticket and active writers counter + * (positive value - can be only one active writer, so set counter to 1) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise writer receives coupon and + * should be queued as waiter similar mutex implementation + * + * C equivalent: + * +static rwlock_val_t +atomic_add_new_writer(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + dst_lock.head = head; + dst_lock.ticket = ticket + 1; + if (success) { + // take lock: increment writerss, + count = count + 1; + } + dst_lock.count = count; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define NATIVE_ATOMIC_ADD_NEW_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[src], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[src], 32, %[count]" \ + "\n\tgetfd %[src], 0x400, %[head]" \ + "\n\tgetfd %[src], 0x410, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmpesb %[count], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\tadds %[ticket], 1, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[ticket], %[ticket]" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[count], 1, %[count] ? %%pred2" \ + "\n\tshld %[ticket], 16, %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 2, %[count], %[count] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp]" \ + "\n\tord %[dst], %[head], %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst]" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [success] "=&r" (__success), \ + [src] "=&r" (__src), \ + [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [ticket] "=&r" (__ticket), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + :: "memory", "pred2", "pred3"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * Only try to take write spinlock. + * Successful locking increment # of ticket and active writers counter + * (positive value - can be only one active writer, so set counter to 1) + * Macros return source state of read/write lock and set bypassed boolean value + * 'success - lockin is successful', otherwise 'success' is set to false and + * nothing are not changed + * + * C equivalent: + * +static rwlock_val_t +atomic_try_add_new_writer(arch_rwlock_t *rw, bool success // bypassed) +{ + arch_rwlock_t src_lock; + arch_rwlock_t dst_lock; + u16 ticket; + u16 head; + s32 count; + + src_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + ticket = src_lock.ticket; + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + if (success) { + // take lock: increment writers counter, + // increment ticket number for next readers/writers + dst_lock.head = head; + dst_lock.ticket = ticket + 1; + dst_lock.count = count + 1; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return src_lock.lock; +} + */ +#define NATIVE_ATOMIC_TRY_ADD_NEW_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __src, __dst, __tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[src], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[src], 32, %[count]" \ + "\n\tgetfd %[src], 0x400, %[head]" \ + "\n\tgetfd %[src], 0x410, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmpesb %[count], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\tadds %[ticket], 1, %[ticket]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[ticket], %[ticket]" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[count], 1, %[count] ? %%pred2" \ + "\n\tshld %[ticket], 16, %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 2, %[count], %[count] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp] ? %%pred2" \ + "\n\tord %[dst], %[head], %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst] ? %%pred2" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\taddd %[src], 0, %[dst] ? ~%%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [success] "=&r" (__success), \ + [src] "=&r" (__src), \ + [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [ticket] "=&r" (__ticket), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + :: "memory", "pred2", "pred3"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * The slow try to take write spinlock according to erlier received # of coupon + * Successful locking increment active writers counter + * (positive counter - can be only one active writer, so set counter to 1) + * Macros return current updated state of read/write lock and set bypassed + * boolean value 'success - lockin is successful', otherwise writer should be + * queued again + * + * C equivalent: + * +static rwlock_val_t +atomic_add_slow_writer(arch_rwlock_t *rw, u16 ticket, bool success) +{ + arch_rwlock_t dst_lock; + u16 head; + s32 count; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + head = src_lock.head; + count = src_lock.count; + // can lock: none waiters and active readers and writers + success = (ticket == head) && (count == 0); + if (success) { + // take lock: increment writers, + count = count + 1; + dst_lock.count = count; + } + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define NATIVE_ATOMIC_ADD_SLOW_WRITER(__rw_addr, __success, \ + __head, __ticket, __count, __dst, __tmp) \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[dst], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[dst], 32, %[count]" \ + "\n\tgetfd %[dst], 0x400, %[head]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tcmpesb %[count], 0, %%pred3" \ + "\n\tcmpesb %[head], %[ticket], %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[count], 1, %[count] ? %%pred2" \ + "\n\tandd %[dst], 0xffffffff, %[dst] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 2, %[count], %[count] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp] ? %%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst] ? %%pred2" \ + "\n\tadds 1, 0, %[success] ? %%pred2" \ + "\n\tadds 0, 0, %[success] ? ~%%pred2" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [success] "=&r" (__success), \ + [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + : [ticket] "r" (__ticket) \ + : "memory", "pred2", "pred3"); \ + HWBUG_ATOMIC_END(); \ +}) + +/* + * Unlocking of write spinlock. + * Need only increment # of queue head and decrement active writers counter + * (positive counter - can be only one writer, so set counter to 0) + * Macros return current updated state of read/write lock. + * + * C equivalent: + * +static rwlock_val_t +atomic_free_lock_writer(arch_rwlock_t *rw) +{ + arch_rwlock_t dst_lock; + + dst_lock.lock = E2K_ATOMIC_LBRACKET_D(rw, MAS_WAIT_LOCK, 0); + dst_lock.count++; + dst_lock.head++; + E2K_ATOMIC_RBRACKET_D(rw, dst_lock.lock, MAS_WAIT_UNLOCK, 0); + return dst_lock.lock; +} + */ +#define NATIVE_ATOMIC_FREE_LOCK_WRITER(__rw_addr, \ + __head, __count, __dst, __tmp); \ +({ \ + HWBUG_ATOMIC_BEGIN(__rw_addr); \ + asm NOT_VOLATILE ( \ + "\n1:" \ + "\n\t{" \ + "\n\tnop 4" \ + "\n\tldd,0 %[addr], %[dst], mas=0x7" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsard %[dst], 32, %[count]" \ + "\n\tgetfd %[dst], 0x400, %[head]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tadds %[head], 1, %[head]" \ + "\n\tsubs %[count], 1, %[count]" \ + "\n\tandd %[dst], 0xffff0000, %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tsxt 5, %[head], %[head]" \ + "\n\tsxt 2, %[count], %[count]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tshld %[count], 32, %[tmp]" \ + "\n\tord %[dst], %[head], %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tord %[dst], %[tmp], %[dst]" \ + "\n\t}" \ + "\n\t{" \ + "\n\tstd,2 %[addr], %[dst], mas=" LOCK_MB_ATOMIC_MAS \ + "\n\tibranch 1b ? %%MLOCK" \ + "\n\t}" \ + MB_AFTER_ATOMIC_LOCK_MB \ + : [dst] "=&r" (__dst), \ + [head] "=&r" (__head), \ + [count] "=&r" (__count), \ + [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__rw_addr)) \ + :: "memory"); \ + HWBUG_ATOMIC_END(); \ +}) + + +/* + * Atomic operations with return value and acquire/release semantics + */ + +#define NATIVE_ATOMIC_FETCH_OP_UNLESS(__val, __addr, __unless, __tmp, __rval, \ + size_letter, op, op_pred, add_op, add_op_pred, cmp_op, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nld"#size_letter ",0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{"\ + "\nnop" \ + "\n" cmp_op " %[rval], %[unless], %%pred2" \ + "\n}" \ + "\n{"\ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\n" op " %[rval], %[val], %[tmp] ? " op_pred "%%pred2" \ + "\n" add_op " %[rval], 0, %[tmp] ? " add_op_pred "%%pred2" \ + "\n}" \ + "\n{"\ + "\nst"#size_letter "," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[tmp], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__addr)) \ + : [val] "ir" (__val), [unless] "ir" (__unless) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC_FETCH_XCHG_UNLESS(__val, __addr, __tmp, __rval, \ + size_letter, merge_op, cmp_op, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nld"#size_letter ",0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{"\ + "\nnop" \ + "\n" cmp_op " %[rval], %[val], %%pred2" \ + "\n}" \ + "\n{"\ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\n" merge_op " %[rval], %[val], %[tmp], %%pred2" \ + "\n}" \ + "\n{"\ + "\nst"#size_letter "," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[tmp], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [tmp] "=&r" (__tmp), \ + [addr] "+m" (*(__addr)) \ + : [val] "ir" (__val) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC_XCHG_RETURN(__val, __addr, __rval, \ + size_letter, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n2:" \ + "\n{"\ + "\nnop 5" /* bug 92891 - optimize for performance */ \ + "\nld"#size_letter ",0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{"\ + "\nst"#size_letter "," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[val], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 2b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [val] "r" (__val) \ + CLOBBERS_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define CLOBBERS_PRED2_LOCK_MB : "memory", "pred2" +#define CLOBBERS_PRED2_ACQUIRE_MB : "memory", "pred2" +#define CLOBBERS_PRED2_RELEASE_MB : "memory", "pred2" +#define CLOBBERS_PRED2_STRONG_MB : "memory", "pred2" +#define CLOBBERS_PRED2_RELAXED_MB : "pred2" + + +#define CLOBBERS_PRED2_3_R16_17_LOCK_MB : "memory", "pred2", "pred3",\ + "r16", "r17" +#define CLOBBERS_PRED2_3_R16_17_ACQUIRE_MB : "memory", "pred2", "pred3",\ + "r16", "r17" +#define CLOBBERS_PRED2_3_R16_17_RELEASE_MB : "memory", "pred2", "pred3",\ + "r16", "r17" +#define CLOBBERS_PRED2_3_R16_17_STRONG_MB : "memory", "pred2", "pred3",\ + "r16", "r17" +#define CLOBBERS_PRED2_3_R16_17_RELAXED_MB :"pred2", "pred3", "r16", "r17" + + +#define NATIVE_ATOMIC_CMPXCHG_RETURN(__old, __new, __addr, __stored_val, \ + __rval, size_letter, sxt_size, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n3:" \ + "\n{"\ + "\nnop 4"\ + "\nld"#size_letter ",0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nsxt\t"#sxt_size", %[rval], %[rval]" \ + "\naddd 0x0, %[new], %[stored_val]" \ + "\n}" \ + "\n{" \ + "\nnop 1" \ + "\ncmpedb %[rval], %[old], %%pred2" \ + "\n}" \ + "\n{" \ + "\nnop 1" /* bug 92891 - optimize for performance */ \ + "\naddd 0x0, %[rval], %[stored_val] ? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nst"#size_letter "," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[stored_val], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [stored_val] "=&r" (__stored_val), \ + [addr] "+m" (*(__addr)) \ + : [new] "ir" (__new), [old] "ir" (__old) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC_CMPXCHG_WORD_RETURN(__old, __new, __addr, \ + __stored_val, __rval, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n3:" \ + "\n{"\ + "\nnop 4"\ + "\nldw,0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nnop 1"\ + "\nadds 0x0, %[new], %[stored_val]" \ + "\ncmpesb %[rval], %[old], %%pred2" \ + "\n}" \ + "\n{" \ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\nadds 0x0, %[rval], %[stored_val] ? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nstw," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[stored_val], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [stored_val] "=&r" (__stored_val), \ + [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [new] "ir" (__new), [old] "ir" (__old) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#define NATIVE_ATOMIC_CMPXCHG_DWORD_RETURN(__old, __new, __addr, \ + __stored_val, __rval, mem_model) \ +do { \ + HWBUG_ATOMIC_BEGIN(__addr); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n3:" \ + "\n{"\ + "\nnop 4"\ + "\nldd,0 %[addr], %[rval], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nnop 1"\ + "\naddd 0x0, %[new], %[stored_val]" \ + "\ncmpedb %[rval], %[old], %%pred2" \ + "\n}" \ + "\n{" \ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\naddd 0x0, %[rval], %[stored_val] ? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nstd," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %[stored_val], mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [stored_val] "=&r" (__stored_val), \ + [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [new] "ir" (__new), [old] "ir" (__old) \ + CLOBBERS_PRED2_##mem_model); \ + HWBUG_ATOMIC_END(); \ +} while (0) + +#ifdef CONFIG_HAVE_CMPXCHG_DOUBLE +/* + * Some problem to use ldq/stq operations + * C language don't use quadro operands + * To avoid some changes of compiler we use fixed register for those operations + * r16 r17 + * + * C equivalent: + if (page->freelist == freelist_old && + page->counters == counters_old) { + page->freelist = freelist_new; + page->counters = counters_new; + */ +#define NATIVE_ATOMIC_CMPXCHG_DWORD_PAIRS(__addr, __old1, __old2, \ + __new1, __new2, __rval, mem_model) \ +do { \ + asm NOT_VOLATILE( \ + MB_BEFORE_ATOMIC_##mem_model \ + "\n3:" \ + "\n{"\ + "\nnop 5" /* bug 92891 - optimize for performance */ \ + "\nldq,0 %[addr], %%r16, mas=0x5" \ + "\naddd 0x1, 0x0, %[rval]" \ + "\n}" \ + "\n{" \ + "\ncmpedb %[old1], %%r16, %%pred2" \ + "\ncmpedb %[old2], %%r17, %%pred3" \ + "\n}" \ + "\n\t{" \ + "\nnop 1"\ + "\n\tpass %%pred2, @p0" \ + "\n\tpass %%pred3, @p1" \ + "\n\tlandp @p0, @p1, @p4"\ + "\n\tpass @p4, %%pred2" \ + "\n\t}" \ + "\n{" \ + "\naddd 0x0, %[new1], %%r16 ? %%pred2" \ + "\naddd 0x0, %[new2], %%r17 ? %%pred2" \ + "\naddd 0x0, 0x0, %[rval]? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nstq," mem_model##_ATOMIC_CHANNEL \ + " %[addr], %%r16, mas=" mem_model##_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_##mem_model \ + : [rval] "=&r" (__rval), [addr] "+m" (*(__addr)) \ + : [new1] "ir" (__new1), [old1] "ir" (__old1), \ + [new2] "ir" (__new2), [old2] "ir" (__old2) \ + CLOBBERS_PRED2_3_R16_17_##mem_model); \ +} while (0) +#endif /* CONFIG_HAVE_CMPXCHG_DOUBLE */ + +/* Get number of leading zeroes */ +#define E2K_LZCNTS(val) \ +({ \ + register __e2k_u32_t __res; \ + asm ("lzcnts %1, %0" : "=r" (__res) : "r" (val)); \ + __res; \ +}) + +#define E2K_LZCNTD(val) \ +({ \ + register __e2k_u64_t __res; \ + asm ("lzcntd %1, %0" : "=r" (__res) : "r" (val)); \ + __res; \ +}) + +/* Get number of 1's */ +#define E2K_POPCNTS(val) \ +({ \ + register __e2k_u32_t __res; \ + asm ("popcnts %1, %0" : "=r" (__res) : "r" (val)); \ + __res; \ +}) + +#define E2K_POPCNTD(val) \ +({ \ + register __e2k_u64_t __res; \ + asm ("popcntd %1, %0" : "=r" (__res) : "r" (val)); \ + __res; \ +}) + +#if !defined CONFIG_E2K_MACHINE || \ + defined CONFIG_E2K_ES2_DSP || defined CONFIG_E2K_ES2_RU || \ + (defined CONFIG_E2K_E2S && defined CONFIG_NUMA) + +# define WORKAROUND_WAIT_HWBUG(num) (((num) & (_st_c | _all_c | _sas)) ? \ + ((num) | _ma_c) : (num)) +# define E2K_WAIT_ST_C_SAS() E2K_WAIT(_st_c) +# define E2K_WAIT_ST_C_SAS_MT() E2K_WAIT(_st_c) +# define E2K_WAIT_LD_C_LAL() E2K_WAIT(_ld_c) +# define E2K_WAIT_LD_C_LAL_MT() E2K_WAIT(_ld_c) +# define E2K_WAIT_LD_C_LAL_SAL() E2K_WAIT(_ld_c) +# define E2K_WAIT_ST_C_SAS_LD_C_SAL() E2K_WAIT(_st_c | _ld_c) +# define E2K_WAIT_ST_C_SAS_LD_C_SAL_MT() E2K_WAIT(_st_c | _ld_c) + +#else + +# define WORKAROUND_WAIT_HWBUG(num) num + +/* BUG 79245 - use .word to encode relaxed barriers */ +# define E2K_WAIT_ST_C_SAS() \ +({ \ + int unused; \ + _Pragma("no_asm_inline") \ + asm NOT_VOLATILE (".word 0x00008001\n" \ + ".word 0x30000084\n" \ + : "=r" (unused) :: "memory"); \ +}) +# define E2K_WAIT_LD_C_LAL() \ +({ \ + int unused; \ + _Pragma("no_asm_inline") \ + asm NOT_VOLATILE (".word 0x00008001\n" \ + ".word 0x30000408\n" \ + : "=r" (unused) :: "memory"); \ +}) +# define E2K_WAIT_ST_C_SAS_MT() \ +({ \ + int unused; \ + _Pragma("no_asm_inline") \ + asm NOT_VOLATILE (".word 0x00008001\n" \ + ".word 0x30000884\n" \ + : "=r" (unused) :: "memory"); \ +}) +# define E2K_WAIT_LD_C_LAL_SAL() \ +({ \ + int unused; \ + _Pragma("no_asm_inline") \ + asm NOT_VOLATILE (".word 0x00008001\n" \ + ".word 0x30000508\n" \ + : "=r" (unused) :: "memory"); \ +}) +# define E2K_WAIT_LD_C_LAL_MT() \ +({ \ + int unused; \ + _Pragma("no_asm_inline") \ + asm NOT_VOLATILE (".word 0x00008001\n" \ + ".word 0x30000c08\n" \ + : "=r" (unused) :: "memory"); \ +}) +# define E2K_WAIT_ST_C_SAS_LD_C_SAL() \ +({ \ + int unused; \ + _Pragma("no_asm_inline") \ + asm NOT_VOLATILE (".word 0x00008001\n" \ + ".word 0x3000018c\n" \ + : "=r" (unused) :: "memory"); \ +}) +# define E2K_WAIT_ST_C_SAS_LD_C_SAL_MT() \ +({ \ + int unused; \ + _Pragma("no_asm_inline") \ + asm NOT_VOLATILE (".word 0x00008001\n" \ + ".word 0x3000098c\n" \ + : "=r" (unused) :: "memory"); \ +}) +#endif + +#define E2K_WAIT_V6(_num) \ +({ \ + int unused, num = WORKAROUND_WAIT_HWBUG(_num); \ + /* "trap=1" requires special handling, see C1_wait_trap() */ \ + asm NOT_VOLATILE("{wait mem_mod=%[mem_mod], int=%[intr], mt=%[mt], " \ + " lal=%[lal], las=%[las], sal=%[sal], sas=%[sas], " \ + " ma_c=%[ma_c], fl_c=%[fl_c], ld_c = %[ld_c], " \ + " st_c=%[st_c], all_e=%[all_e], all_c=%[all_c]}"\ + : "=r" (unused) \ + : [all_c] "i" (((num) & 0x1)), \ + [all_e] "i" (((num) & 0x2) >> 1), \ + [st_c] "i" (((num) & 0x4) >> 2), \ + [ld_c] "i" (((num) & 0x8) >> 3), \ + [fl_c] "i" (((num) & 0x10) >> 4), \ + [ma_c] "i" (((num) & 0x20) >> 5), \ + [sas] "i" (((num) & 0x80) >> 7), \ + [sal] "i" (((num) & 0x100) >> 8), \ + [las] "i" (((num) & 0x200) >> 9), \ + [lal] "i" (((num) & 0x400) >> 10), \ + [mt] "i" (((num) & 0x800) >> 11), \ + [intr] "i" (((num) & 0x1000) >> 12), \ + [mem_mod] "i" (((num) & 0x2000) >> 13) \ + : "memory" ); \ + if ((num & (_all_c | _ma_c | _lal | _las)) || \ + (num & _ld_c) && !(num & _sal) || \ + (num & _st_c) && !(num & _sas)) \ + NATIVE_HWBUG_AFTER_LD_ACQ(); \ +}) + + +#define E2K_WAIT_V5(_num) \ +({ \ + int unused, num = WORKAROUND_WAIT_HWBUG(_num); \ + /* "trap=1" requires special handling, see C1_wait_trap() */ \ + asm NOT_VOLATILE ("{wait sal=%[sal], sas=%[sas], ma_c=%[ma_c], " \ + " fl_c=%[fl_c], ld_c=%[ld_c], st_c=%[st_c], " \ + " all_e=%[all_e], all_c=%[all_c]}" \ + : "=r" (unused) \ + : [all_c] "i" (((num) & 0x1)), \ + [all_e] "i" (((num) & 0x2) >> 1), \ + [st_c] "i" (((num) & 0x4) >> 2), \ + [ld_c] "i" (((num) & 0x8) >> 3), \ + [fl_c] "i" (((num) & 0x10) >> 4), \ + [ma_c] "i" (((num) & 0x20) >> 5), \ + [sas] "i" (((num) & 0x80) >> 7), \ + [sal] "i" (((num) & 0x100) >> 8) \ + : "memory" ); \ + if ((num & (_all_c | _ma_c)) || \ + (num & _ld_c) && !(num & _sal) || \ + (num & _st_c) && !(num & _sas)) \ + NATIVE_HWBUG_AFTER_LD_ACQ(); \ +}) + +#define __E2K_WAIT(_num) \ +({ \ + int unused, num = WORKAROUND_WAIT_HWBUG(_num); \ + if ((_num) & ~(_st_c | _ld_c)) \ + asm volatile ("" ::: "memory"); \ + asm NOT_VOLATILE ("{wait ma_c=%6, fl_c=%5, " \ + "ld_c = %4, st_c=%3, all_e=%2, all_c=%1}" \ + : "=r" (unused) \ + : "i" (((num) & 0x1)), \ + "i" (((num) & 0x2) >> 1), \ + "i" (((num) & 0x4) >> 2), \ + "i" (((num) & 0x8) >> 3), \ + "i" (((num) & 0x10) >> 4), \ + "i" (((num) & 0x20) >> 5) \ + : "memory" ); \ + if ((_num) & ~(_st_c | _ld_c)) \ + asm volatile ("" ::: "memory"); \ +}) + +#define E2K_WAIT(num) \ +({ \ + __E2K_WAIT(num); \ + if (num & (_st_c | _ld_c | _all_c | _ma_c)) \ + NATIVE_HWBUG_AFTER_LD_ACQ(); \ +}) + +/* Wait for the load to finish before issuing + * next memory loads/stores. */ +#define E2K_RF_WAIT_LOAD(reg) \ +do { \ + int unused; \ + asm NOT_VOLATILE ("{adds %1, 0, %%empty}" \ + : "=r" (unused) \ + : "r" (reg) \ + : "memory"); \ + NATIVE_HWBUG_AFTER_LD_ACQ(); \ +} while (0) + +/* + * CPU 'WAIT' operation fields structure + */ +#define E2K_WAIT_OP_MA_C_MASK 0x20 /* wait for all previous memory */ + /* access operatons complete */ +#define E2K_WAIT_OP_FL_C_MASK 0x10 /* wait for all previous flush */ + /* cache operatons complete */ +#define E2K_WAIT_OP_LD_C_MASK 0x08 /* wait for all previous load */ + /* operatons complete */ +#define E2K_WAIT_OP_ST_C_MASK 0x04 /* wait for all previous store */ + /* operatons complete */ +#define E2K_WAIT_OP_ALL_E_MASK 0x02 /* wait for all previous operatons */ + /* issue all possible exceptions */ +#define E2K_WAIT_OP_ALL_C_MASK 0x01 /* wait for all previous operatons */ + /* complete */ +#define E2K_WAIT_OP_ALL_MASK (E2K_WAIT_OP_MA_C_MASK | \ + E2K_WAIT_OP_FL_C_MASK | \ + E2K_WAIT_OP_LD_C_MASK | \ + E2K_WAIT_OP_ST_C_MASK | \ + E2K_WAIT_OP_ALL_C_MASK | \ + E2K_WAIT_OP_ALL_E_MASK) + +#define E2K_WAIT_MA E2K_WAIT(E2K_WAIT_OP_MA_C_MASK) +#define E2K_WAIT_FLUSH E2K_WAIT(E2K_WAIT_OP_FL_C_MASK) +#define E2K_WAIT_LD E2K_WAIT(E2K_WAIT_OP_LD_C_MASK) +#define E2K_WAIT_ST E2K_WAIT(E2K_WAIT_OP_ST_C_MASK) +#define E2K_WAIT_ALL_OP E2K_WAIT(E2K_WAIT_OP_ALL_C_MASK) +#define E2K_WAIT_ALL_EX E2K_WAIT(E2K_WAIT_OP_ALL_E_MASK) +#define E2K_WAIT_ALL E2K_WAIT(E2K_WAIT_OP_ALL_MASK) +#define __E2K_WAIT_ALL __E2K_WAIT(E2K_WAIT_OP_ALL_MASK) + +/* + * Force strict CPU ordering. + * And yes, this is required on UP too when we're talking + * to devices. + * + * For now, "wmb()" doesn't actually do anything, as all + * Intel CPU's follow what Intel calls a *Processor Order*, + * in which all writes are seen in the program order even + * outside the CPU. + * + */ + +#define _mem_mod 0x2000 /* watch for modification */ +#define _int 0x1000 /* stop the conveyor untill interrupt */ +#define _mt 0x800 +#define _lal 0x400 /* load-after-load modifier for _ld_c */ +#define _las 0x200 /* load-after-store modifier for _st_c */ +#define _sal 0x100 /* store-after-load modifier for _ld_c */ +#define _sas 0x80 /* store-after-store modifier for _st_c */ +#define _trap 0x40 /* stop the conveyor untill interrupt */ +#define _ma_c 0x20 +#define _fl_c 0x10 /* stop until TLB/cache flush operations complete */ +#define _ld_c 0x8 /* stop until all load operations complete */ +#define _st_c 0x4 /* stop until store operations complete */ +#define _all_e 0x2 +#define _all_c 0x1 + +#define E2K_FLUSHTS \ +do { \ + _Pragma("no_asm_inline") \ + asm volatile ("flushts"); \ +} while (0) + +/* + * Hardware stacks flush rules for e2k: + * + * 1) PSP/PCSP/PSHTP/PCSHTP reads wait for the corresponding SPILL/FILL + * to finish (whatever the reason for SPILL/FILL is - "flushc", "flushr", + * register file overflow, etc). "rr" must not be in the same wide + * instruction as "flushc"/"flushr". + * + * 2) CWD reads wait for the chain stack SPILL/FILL to finish. + * + * 3) On e3m SPILL/FILL were asynchronous and "wait all_e=1" should had + * been used between SPILL/FILL operations and memory accesses. This is + * not needed anymore. + * + * 4) PSP/PCSP writes wait _only_ for SPILL. So if we do not know whether + * there can be a FILL going right now then some form of wait must be + * inserted before the write. Also writing PSHTP/PCSHTP has undefined + * behavior in instruction set, so using it is not recommended because + * of compatibility with future processors. + * + * 5) "wait ma_c=1" waits for all memory accesses including those issued + * by SPILL/FILL opertions. It does _not_ wait for SPILL/FILL itself. + * + * 6) Because of hardware bug #102582 "flushr" shouldn't be in the first + * command after "call". + */ + +#define NATIVE_FLUSHR \ +do { \ + asm volatile ("{nop} {flushr}" ::: "memory"); \ +} while (0) + +#define NATIVE_FLUSHC \ +do { \ + asm volatile ("{nop 2} {flushc; nop 3}" ::: "memory"); \ +} while (0) + +#define NATIVE_FLUSHCPU \ +do { \ + asm volatile ("{nop 2} {flushc; nop 3} {flushr}" ::: "memory"); \ +} while (0) + +#define NATIVE_FLUSH_ALL_TC \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("{nop 3; invtc 0x0, %0}" \ + : "=r" (res)); \ + res; \ +}) + +#define DO_ATOMIC_WRITE_PSR_REG_VALUE(greg_no, psr_off, psr_value, \ + under_upsr_off, under_upsr_bool) \ +({ \ + asm volatile ( \ + "{\n\t" \ + " stw %%dg" #greg_no ", [%0], %2\n\t" \ + " stb %%dg" #greg_no ", [%1], %3\n\t" \ + "}" \ + : \ + : "ri" ((__e2k_u64_t)(psr_off)), \ + "ri" ((__e2k_u64_t)(under_upsr_off)), \ + "r" ((__e2k_u32_t)(psr_value)), \ + "r" ((__e2k_u8_t)(under_upsr_bool))); \ +}) +#define KVM_DO_ATOMIC_WRITE_PSR_REG_VALUE(greg_no, psr_off, psr_value, \ + under_upsr_off, under_upsr_bool) \ + DO_ATOMIC_WRITE_PSR_REG_VALUE(greg_no, psr_off, psr_value, \ + under_upsr_off, under_upsr_bool) \ + +#define NATIVE_GET_TCD() \ +({ \ + register __e2k_u64_t res; \ + asm volatile ( \ + "\n\t{gettc \t0x1 , %%ctpr1; nop 5}" \ + "\n\trrd \t%%ctpr1, %0" \ + : "=r" (res) : : "ctpr1" ); \ + res; \ +}) + +#define NATIVE_SET_TCD(val) \ +({ \ + asm volatile ("{puttc %0, 0x0 , %%tcd}" \ + : \ + :"r" (val)); \ +}) + +#define E2K_BUBBLE(num) \ +do { \ + asm volatile ("{nop %0}" \ + : \ + : "i" (num & 0x7) \ + : "memory"); \ +} while (0) + +/* Add ctpr3 to clobbers to explain to lcc that this + * GNU asm does a return. */ +#define E2K_DONE \ +do { \ + /* #80747: must repeat interrupted barriers */ \ + asm volatile ("{nop 3; wait st_c=1} {done}" ::: "ctpr3"); \ +} while (0) + +#define E2K_SYSCALL_RETURN E2K_RETURN +#define E2K_RETURN(rval) \ +do { \ + asm volatile( "{\n" \ + "return %%ctpr3\n" \ + "addd %[r0], 0, %%dr0\n" \ + "}\n" \ + "{\n" \ + "ct %%ctpr3\n" \ + "}\n" \ + :: [r0] "ir" (rval) \ + : "ctpr3"); \ +} while (0) + +#define E2K_EMPTY_CMD(input...) \ +do { \ + asm volatile ("{nop}" :: input); \ +} while (0) + +#define E2K_PSYSCALL_RETURN(r0, r1, r2, r3, tag2, tag3) \ +do { \ + asm volatile ( "{\n" \ + "return %%ctpr3\n" \ + "puttagd %[_r2], %[_tag2], %%dr2\n" \ + "puttagd %[_r3], %[_tag3], %%dr3\n" \ + "addd %[_r0], 0, %%dr0\n" \ + "addd %[_r1], 0, %%dr1\n" \ + "}\n" \ + "{\n" \ + "ct %%ctpr3\n" \ + "}\n" \ + :: [_r0] "ir" (r0), [_r1] "ir" (r1), \ + [_r2] "ir" (r2), [_r3] "ir" (r3), \ + [_tag2] "ir" (tag2), [_tag3] "ir" (tag3) \ + : "ctpr3"); \ +} while (0) + + +#define GET_USER_ASM(_x, _addr, fmt, __ret_gu) \ + asm ( \ + "1:\n" \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_FEAT_ISET_V6 version */ \ + "{ld" #fmt "[ %[addr] + 0 ], %[x]\n" \ + " adds 0, 0, %[ret]\n" \ + " nop 4}\n" \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + "{ld" #fmt "[ %[addr] + 0 ], %[x]\n" \ + " adds 0, 0, %[ret]\n" \ + " nop 2}\n" \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:{adds 0, %[efault], %[ret]\n" \ + " ibranch 2b}\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + ".dword 1b, 3b\n" \ + ".previous\n" \ + : [ret] "=r" (__ret_gu), [x] "=r"(_x) \ + : [addr] "m" (*(_addr)), [efault] "i" (-EFAULT), \ + [facility] "i" (CPU_FEAT_ISET_V6)) \ + +#define PUT_USER_ASM(x, ptr, fmt, retval) \ + asm ("1:{st" #fmt "%1, %2\n" \ + " adds 0, 0, %0}\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\n{adds 0, %3, %0\n" \ + " ibranch 2b}\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + ".dword 1b, 3b\n" \ + ".previous\n" \ + : "=r" (retval), "=m" (*ptr) \ + : "r" (x), "i" (-EFAULT)) + +#define LOAD_UNALIGNED_ZEROPAD(_addr) \ +({ \ + u64 *__addr = (u64 *) (_addr); \ + u64 _ret, _aligned_addr, _offset; \ + asm ( "1:\n" \ + " ldd [ %[addr] + 0 ], %[ret]\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\n" \ + "{\n" \ + " andnd %[addr_val], 7, %[aligned_addr]\n" \ + " andd %[addr_val], 7, %[offset]\n" \ + "}\n" \ + "{\n" \ + " nop 4\n" \ + " ldd [ %[aligned_addr] + 0 ], %[ret]\n" \ + " shld %[offset], 3, %[offset]\n" \ + "}\n" \ + "{\n" \ + " shrd %[ret], %[offset], %[ret]\n" \ + " ibranch 2b\n" \ + "}\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + ".dword 1b, 3b\n" \ + ".previous\n" \ + : [ret] "=&r" (_ret), [offset] "=&r" (_offset), \ + [aligned_addr] "=&r" (_aligned_addr) \ + : [addr] "m" (*__addr), \ + [addr_val] "r" (__addr)); \ + _ret; \ +}) + +#ifdef CONFIG_DEBUG_BUGVERBOSE + +# define __EMIT_BUG(_flags) \ + asm ("1:\n" \ + "{.word 0x00008001\n" /* SETSFT */ \ + " .word 0x28000000}\n" \ + ".section .rodata.str,\"aMS\",@progbits,1\n" \ + "2: .asciz \""__FILE__"\"\n" \ + ".previous\n" \ + ".section __bug_table,\"aw\"\n" \ + "3:\n" \ + ".word 1b - 3b\n" /* bug_entry:bug_addr_disp */ \ + ".word 2b - 3b\n" /* bug_entry:file_disp */ \ + ".short %[line]\n" /* bug_entry:line */ \ + ".short %[flags]\n" /* bug_entry:flags */ \ + ".org 3b + %[entry_size]\n" \ + ".previous\n" \ + :: [line] "i" (__LINE__), [flags] "i" (_flags), \ + [entry_size] "i" (sizeof(struct bug_entry))) + +#else + +# define __EMIT_BUG(_flags) \ + asm ("1:\n" \ + "{.word 0x00008001\n" /* SETSFT */ \ + " .word 0x28000000}\n" \ + ".section __bug_table,\"aw\"\n" \ + "3:\n" \ + ".word 1b - 3b\n" /* bug_entry:bug_addr_disp */ \ + ".short %[flags]\n" /* bug_entry:flags */ \ + ".org 3b + %[entry_size]\n" \ + ".previous\n" \ + :: [flags] "i" (_flags), \ + [entry_size] "i" (sizeof(struct bug_entry))) + +#endif + +#ifndef __ASSEMBLY__ +/* new version */ +/* + * this code used before call printk in special procedures + * sp register is used to pass parameters for printk + */ +static inline void E2K_SET_USER_STACK(int x) +{ + register __e2k_ptr_t sp asm ("%SP"); + if (__builtin_constant_p(x) ) { + if (x) { + asm volatile ("{getsp -1024, %0\n\t}" + : "=r" (sp)); + } + } else { + /* special for compiler error */ + /* fix gcc problem - warning */ +#ifdef __LCC__ + asm ("" : : "i"(x)); /* hook!! parameter must be const */ +#endif /* __LCC__ */ + } +} +#endif /* __ASSEMBLY__ */ + + +#define E2K_GET_FP() \ +({ \ + register __e2k_ptr_t res; \ + asm volatile ("addd \t0x0, %F0, %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_SET_FP( val) \ +({ \ + asm volatile ("addd \t0x0, %0, %F0" \ + : \ + : "ri" ((__e2k_ptr_t) val)); \ +}) + +#define E2K_GET_SP() \ +({ \ + register __e2k_ptr_t res; \ + asm volatile ("addd \t0x0, %S0, %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_SET_SP( val) \ +({ \ + asm volatile ("addd \t0x0, %0, %S0" \ + : \ + : "ri" ((__e2k_ptr_t) val)); \ +}) + +#define E2K_NOP(nr) __asm__ __volatile__("{nop " #nr "}" ::: "memory") + +#ifdef CONFIG_SMP +# define SMP_ONLY(...) __VA_ARGS__ +#else +# define SMP_ONLY(...) +#endif + +#ifdef CONFIG_CPU_HAS_FILL_INSTRUCTION +# define NATIVE_FILL_HARDWARE_STACKS() \ + asm volatile ("{fillc; fillr}" ::: "memory") +#else +# define NATIVE_FILL_HARDWARE_STACKS() \ +do { \ + asm volatile ( \ + "{\n" \ + "nop 4\n" \ + "return %%ctpr3\n" \ + "movtd [ 0f ], %%dg" __stringify(GUEST_VCPU_STATE_GREG) "\n" \ + "}\n" \ + "{\n" \ + "rrd %%wd, %%dg" __stringify(CURRENT_TASK_GREG) "\n" \ + "}\n" \ + "{\n" \ + "rrd %%br, %%dg" __stringify(SMP_CPU_ID_GREG) "\n" \ + "ct %%ctpr3\n" \ + "}\n" \ + "0:\n" \ + "{\n" \ + "rwd %%dg" __stringify(CURRENT_TASK_GREG) ", %%wd\n" \ + "}\n" \ + "{\n" \ + "rwd %%dg" __stringify(SMP_CPU_ID_GREG) ", %%br\n" \ + "}\n" \ + "{\n" \ + "nop 3\n" \ + SMP_ONLY("ldw %%dg" __stringify(GUEST_VCPU_STATE_GREG) ", " \ + "%[task_ti_cpu_delta], " \ + "%%dg" __stringify(SMP_CPU_ID_GREG) "\n") \ + "subd %%dg" __stringify(GUEST_VCPU_STATE_GREG) ", " \ + "%[task_ti_offset], " \ + "%%dg" __stringify(CURRENT_TASK_GREG) "\n" \ + "}\n" \ + "{\n" \ + "nop\n" /* For "rwd %wd" */ \ + "}\n" \ + :: SMP_ONLY([task_ti_cpu_delta] "i" (offsetof(struct task_struct, cpu) - \ + offsetof(struct task_struct, thread_info)),) \ + [task_ti_offset] "i" (offsetof(struct task_struct, thread_info)) \ + : "ctpr1", "ctpr3", "memory"); \ +} while (0) +#endif + +#ifndef __ASSEMBLY__ + +#define E2K_PARALLEL_WRITE(addr1, val1, addr2, val2) \ +{ \ + asm volatile ("{\n\t" \ + " std 0x0, %2, %4\n\t" \ + " std 0x0, %3, %5\n\t" \ + "}" \ + : "=m" (*(addr1)), "=m" (*(addr2)) \ + : "r" (addr1), "r" (addr2), "r" (val1), "r" (val2)); \ +} + +/* + * Macroses to construct alternative return point from trap + */ + +#define STICK_ON_REG(reg) asm( #reg ) + +#define SAVE_CURRENT_ADDR(_ptr) \ +do { \ + unsigned long long _tmp; \ + _Pragma("no_asm_inline") \ + asm volatile ("movtd [ 0f ], %[tmp]\n" \ + "std [ %[ptr] ], %[tmp]\n" \ + "0:" \ + : [ptr] "=m" (*(_ptr)), [tmp] "=&r" (_tmp)); \ +} while (0) + +#define DO_FUNC_TO_NAME(func) #func +#define FUNC_TO_NAME(func) DO_FUNC_TO_NAME(func) + +#define GET_LBL_ADDR(name, where) \ + _Pragma("no_asm_inline") \ + asm ("movtd [" name "], %0" : "=r" (where)) + +#define E2K_JUMP(func) E2K_JUMP_WITH_ARGUMENTS(func, 0) + +#define E2K_JUMP_WITH_ARGUMENTS(func, num_args, ...) \ + __E2K_JUMP_WITH_ARGUMENTS_##num_args(func, ##__VA_ARGS__) + +#define __E2K_JUMP_WITH_ARGUMENTS_0(func) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %0\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + :: "i" (&(func)) : "ctpr1"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_WITH_ARGUMENTS_1(func, arg1) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %1\n" \ + "addd %0, 0, %%dr0\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : "ri" ((u64) (arg1)), "i" (&(func)) \ + : "ctpr1", "r0"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_WITH_ARGUMENTS_2(func, arg1, arg2) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %2\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), "i" (&(func)) \ + : "ctpr1", "r0", "r1"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_WITH_ARGUMENTS_3(func, arg1, arg2, arg3) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %3\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "i" (&(func)) \ + : "ctpr1", "r0", "r1", "r2"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_WITH_ARGUMENTS_4(func, arg1, arg2, arg3, arg4) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %4\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "ri" ((u64) (arg4)), "i" (&(func)) \ + : "ctpr1", "r0", "r1", "r2", "r3"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_WITH_ARGUMENTS_5(func, arg1, arg2, arg3, arg4, arg5) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %5\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "addd %4, 0, %%dr4\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "ri" ((u64) (arg4)), \ + "ri" ((u64) (arg5)), "i" (&(func)) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_WITH_ARGUMENTS_6(func, \ + arg1, arg2, arg3, arg4, arg5, arg6) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %6\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "addd %4, 0, %%dr4\n" \ + "addd %5, 0, %%dr5\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "ri" ((u64) (arg4)), \ + "ri" ((u64) (arg5)), "ri" ((u64) (arg6)), "i" (&(func)) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4", "r5"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_FUNC_WITH_ARGUMENTS_7(func, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, %7\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "addd %4, 0, %%dr4\n" \ + "addd %5, 0, %%dr5\n" \ + "}\n" \ + "{\n" \ + "addd %6, 0, %%dr6\n" \ + "ct %%ctpr1\n" \ + "}\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "ri" ((u64) (arg4)), \ + "ri" ((u64) (arg5)), "ri" ((u64) (arg6)), \ + "ri" ((u64) (arg7)), "i" (&(func)) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4", "r5", "r6"); \ + unreachable(); \ +} while (0) + +#define __E2K_JUMP_FUNC_ADDR_WITH_ARGUMENTS_7(_func_addr, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7) \ +do { \ + asm volatile ("{\n" \ + "movtd,0,sm %[func_addr], %%ctpr1\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "addd %4, 0, %%dr4\n" \ + "}\n" \ + "{\n" \ + "addd %5, 0, %%dr5\n" \ + "addd %6, 0, %%dr6\n" \ + "ct %%ctpr1\n" \ + "}\n" \ + : \ + : [func_addr] "r" (_func_addr), \ + "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "ri" ((u64) (arg4)), \ + "ri" ((u64) (arg5)), "ri" ((u64) (arg6)), \ + "ri" ((u64) (arg7)) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4", "r5", "r6"); \ + unreachable(); \ +} while (false) +#define __E2K_JUMP_WITH_ARGUMENTS_7(func, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7, is_name) \ +do { \ + if (is_name) { \ + __E2K_JUMP_FUNC_WITH_ARGUMENTS_7(func, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7); \ + } else { \ + __E2K_JUMP_FUNC_ADDR_WITH_ARGUMENTS_7(func, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7); \ + } \ +} while (false) + +#define __E2K_JUMP_FUNC_WITH_ARGUMENTS_8(func_name, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, " func_name "\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "addd %4, 0, %%dr4\n" \ + "addd %5, 0, %%dr5\n" \ + "}\n" \ + "{\n" \ + "addd %6, 0, %%dr6\n" \ + "addd %7, 0, %%dr7\n" \ + "ct %%ctpr1\n" \ + "}\n" \ + : \ + : "ri" ((u64) (arg1)), "ri" ((u64) (arg2)), \ + "ri" ((u64) (arg3)), "ri" ((u64) (arg4)), \ + "ri" ((u64) (arg5)), "ri" ((u64) (arg6)), \ + "ri" ((u64) (arg7)), "ri" ((u64) (arg8)) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4", "r5", "r6", \ + "r7"); \ + unreachable(); \ +} while (0) +#define __E2K_JUMP_WITH_ARGUMENTS_8(func, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \ + __E2K_JUMP_FUNC_WITH_ARGUMENTS_8(FUNC_TO_NAME(func), \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + +#define E2K_GOTO_ARG0(func) \ +do { \ + _Pragma("no_asm_inline") \ + asm volatile ("ibranch " #func "\n" :: ); \ +} while (0) +#define E2K_GOTO_ARG1(label, arg1) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ( \ + "{\n" \ + "addd \t 0, %0, %%dr0\n" \ + "ibranch \t" #label "\n" \ + "}\n" \ + : \ + : "ri" ((__e2k_u64_t) (arg1)) \ + ); \ +} while (false) +#define E2K_GOTO_ARG2(label, arg1, arg2) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ("\n" \ + "{\n" \ + "addd \t 0, %0, %%dr0\n" \ + "addd \t 0, %1, %%dr1\n" \ + "ibranch \t" #label "\n" \ + "}\n" \ + : \ + : "ri" ((__e2k_u64_t) (arg1)), \ + "ri" ((__e2k_u64_t) (arg2)) \ + ); \ +} while (false) +#define E2K_GOTO_ARG3(label, arg1, arg2, arg3) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ("\n" \ + "{\n" \ + "addd \t 0, %0, %%dr0\n" \ + "addd \t 0, %1, %%dr1\n" \ + "addd \t 0, %2, %%dr2\n" \ + "ibranch \t" #label "\n" \ + "}\n" \ + : \ + : "ri" ((__e2k_u64_t) (arg1)), \ + "ri" ((__e2k_u64_t) (arg2)), \ + "ri" ((__e2k_u64_t) (arg3)) \ + ); \ +} while (false) +#define E2K_GOTO_AND_RETURN_ARG6(label, \ + arg1, arg2, arg3, arg4, arg5, arg6) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ("\n" \ + "{\n" \ + "addd \t 0, %0, %%dr0\n" \ + "addd \t 0, %1, %%dr1\n" \ + "addd \t 0, %2, %%dr2\n" \ + "addd \t 0, %3, %%dr3\n" \ + "addd \t 0, %4, %%dr4\n" \ + "addd \t 0, %5, %%dr5\n" \ + "}\n" \ + "{\n" \ + "rrd \t %%nip, %%dr6\n" \ + "ibranch \t" #label \ + "}\n" \ + : \ + : "ri" ((__e2k_u64_t) (arg1)), \ + "ri" ((__e2k_u64_t) (arg2)), \ + "ri" ((__e2k_u64_t) (arg3)), \ + "ri" ((__e2k_u64_t) (arg4)), \ + "ri" ((__e2k_u64_t) (arg5)), \ + "ri" ((__e2k_u64_t) (arg6)) \ + ); \ +} while (false) +#define E2K_COND_GOTO(label, cond, pred_no) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ( \ + "\ncmpesb \t0, %0, %%pred" #pred_no \ + "\n{" \ + "\nibranch \t" #label " ? ~%%pred" #pred_no \ + "\n}" \ + : \ + : "ri" ((__e2k_u32_t) (cond)) \ + : "pred" #pred_no \ + ); \ +} while (false) +#define E2K_COND_GOTO_ARG1(label, cond, pred_no, arg1) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ( \ + "\ncmpesb \t0, %0, %%pred" #pred_no \ + "\n{" \ + "\naddd \t 0, %1, %%dr0 ? ~%%pred" #pred_no \ + "\nibranch \t" #label " ? ~%%pred" #pred_no \ + "\n}" \ + : \ + : "ri" ((__e2k_u32_t) (cond)), \ + "ri" ((__e2k_u64_t) (arg1)) \ + : "pred" #pred_no \ + ); \ +} while (false) +#define E2K_COND_GOTO_ARG2(label, cond, pred_no, arg1, arg2) \ +do { \ +_Pragma("no_asm_inline") \ + asm volatile ( \ + "\ncmpesb \t0, %0, %%pred" #pred_no \ + "\n{" \ + "\naddd \t 0, %1, %%dr0 ? ~%%pred" #pred_no \ + "\naddd \t 0, %2, %%dr1 ? ~%%pred" #pred_no \ + "\nibranch \t" #label " ? ~%%pred" #pred_no \ + "\n}" \ + : \ + : "ri" ((__e2k_u32_t) (cond)), \ + "ri" ((__e2k_u64_t) (arg1)), \ + "ri" ((__e2k_u64_t) (arg2)) \ + : "pred" #pred_no \ + ); \ +} while (false) +#define DEF_COND_GOTO(label, cond) \ + E2K_COND_GOTO(label, cond, 0) +#define DEF_COND_GOTO_ARG1(label, cond, arg1) \ + E2K_COND_GOTO_ARG1(label, cond, 0, arg1) +#define DEF_COND_GOTO_ARG2(label, cond, arg1, arg2) \ + E2K_COND_GOTO_ARG2(label, cond, 0, arg1, arg2) + +#define E2K_JUMP_ABSOLUTE_WITH_ARGUMENTS_1(func, arg1) \ +do { \ + asm volatile ("{\n" \ + "movtd %[_func], %%ctpr1\n" \ + "addd %[_arg1], 0, %%dr0\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : [_func] "ir" (func), \ + [_arg1] "ri" (arg1) \ + : "ctpr1", "r0"); \ + unreachable(); \ +} while (0) + +#define E2K_JUMP_ABSOLUTE_WITH_ARGUMENTS_2(func, arg1, arg2) \ +do { \ + asm volatile ("{\n" \ + "movtd %[_func], %%ctpr1\n" \ + "addd %[_arg1], 0, %%dr0\n" \ + "addd %[_arg2], 0, %%dr1\n" \ + "}\n" \ + "ct %%ctpr1\n" \ + : \ + : [_func] "ir" (func), \ + [_arg1] "ri" (arg1), [_arg2] "ri" (arg2) \ + : "ctpr1", "r0", "r1"); \ + unreachable(); \ +} while (0) + +#define E2K_GOTO_INTEGER_LABEL_ARGS_0(_func) \ +({ \ + asm volatile ( \ + "{\n" \ + "movtd %[func], %%ctpr1\n" \ + "}\n" \ + "{\n" \ + "ct %%ctpr1\n" \ + "}\n" \ + : \ + : [func] "r" (_func) \ + : "ctpr1"); \ +}) + +#define __E2K_RESTART_TTABLE_ENTRY10_C(func, arg0, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7, tags) \ +do { \ + asm volatile ("{\n" \ + "disp %%ctpr1, " #func "\n" \ + "addd %0, 0, %%dr0\n" \ + "addd %1, 0, %%dr1\n" \ + "addd %2, 0, %%dr2\n" \ + "addd %3, 0, %%dr3\n" \ + "addd %4, 0, %%dr4\n" \ + "addd %5, 0, %%dr5\n" \ + "}\n" \ + "{\n" \ + "addd %6, 0, %%dr6\n" \ + "addd %7, 0, %%dr7\n" \ + "addd %8, 0, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr0, %%dr8, %%dr0\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr1, %%dr8, %%dr1\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr2, %%dr8, %%dr2\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr3, %%dr8, %%dr3\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr4, %%dr8, %%dr4\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr5, %%dr8, %%dr5\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr6, %%dr8, %%dr6\n" \ + "shrs %%dr8, 4, %%dr8\n" \ + "}\n" \ + "{\n" \ + "puttagd %%dr7, %%dr8, %%dr7\n" \ + "ct %%ctpr1\n" \ + "}\n" \ + : \ + : "ri" (arg0), "ri" (arg1), "ri" (arg2), "ri" (arg3), \ + "ri" (arg4), "ri" (arg5), "ri" (arg6), "ri" (arg7), \ + "ri" (tags) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4", "r5", "r6", \ + "r7", "r8"); \ + unreachable(); \ +} while (0) + +#define __E2K_RESTART_TTABLE_ENTRY8_C(func, _sys_num, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, _tags) \ +do { \ + u64 tag_lo, tag_hi; \ + asm volatile ( \ + "{\n" \ + "disp %%ctpr1, " #func "\n" \ + "shrd,1 %[tags], 8, %[tag_lo]\n" \ + "shrd,4 %[tags], 12, %[tag_hi]\n" \ + "}\n" \ + "{\n" \ + "puttagd,2 %[a1], %[tag_lo], %%dr2\n" \ + "puttagd,5 %[a2], %[tag_hi], %%dr3\n" \ + "shrd,1 %[tags], 16, %[tag_lo]\n" \ + "shrd,4 %[tags], 20, %[tag_hi]\n" \ + "}\n" \ + "{\n" \ + "puttagd,2 %[a3], %[tag_lo], %%dr4\n" \ + "puttagd,5 %[a4], %[tag_hi], %%dr5\n" \ + "shrd,1 %[tags], 24, %[tag_lo]\n" \ + "shrd,4 %[tags], 28, %[tag_hi]\n" \ + "}\n" \ + "{\n" \ + "puttagd,2 %[a5], %[tag_lo], %%dr6\n" \ + "puttagd,5 %[a6], %[tag_hi], %%dr7\n" \ + "shrd,1 %[tags], 32, %[tag_lo]\n" \ + "shrd,4 %[tags], 36, %[tag_hi]\n" \ + "}\n" \ + "{\n" \ + "puttagd,2 %[a7], %[tag_lo], %%dr8\n" \ + "puttagd,5 %[a8], %[tag_hi], %%dr9\n" \ + "shrd,1 %[tags], 40, %[tag_lo]\n" \ + "shrd,4 %[tags], 44, %[tag_hi]\n" \ + "}\n" \ + "{\n" \ + "puttagd,2 %[a9], %[tag_lo], %%dr10\n" \ + "puttagd,5 %[a10], %[tag_hi], %%dr11\n" \ + "shrd,1 %[tags], 48, %[tag_lo]\n" \ + "shrd,4 %[tags], 52, %[tag_hi]\n" \ + "}\n" \ + "{\n" \ + "puttagd,2 %[a11], %[tag_lo], %%dr12\n" \ + "puttagd,5 %[a12], %[tag_hi], %%dr13\n" \ + "adds 0, %[sys_num], %%r0\n" \ + "ct %%ctpr1\n" \ + "}\n" \ + : [tag_lo] "=&r" (tag_lo), [tag_hi] "=&r" (tag_hi) \ + : [sys_num] "ri" (_sys_num), [a1] "ri" (arg1), \ + [a2] "ri" (arg2), [a3] "ri" (arg3), [a4] "ri" (arg4), \ + [a5] "ri" (arg5), [a6] "ri" (arg6), [a7] "ri" (arg7), \ + [a8] "ri" (arg8), [a9] "ri" (arg9), [a10] "ri" (arg10), \ + [a11] "ri" (arg11), [a12] "ri" (arg12), [tags] "ri" (_tags) \ + : "ctpr1", "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ + "r8", "r9", "r10", "r11", "r12", "r13"); \ + unreachable(); \ +} while (0) + +/* Important: delay after FPU reading is 9 cycles for 0 cluster + * and 11 for 1 cluster, thus the NOPs. */ +#define E2K_GETCONTEXT(fpcr, fpsr, pfpfr, pcsp_lo, pcsp_hi) \ +do { \ + u64 __pcshtp; \ + asm volatile ("rrs %%fpcr, %0\n" \ + "rrs %%fpsr, %1\n" \ + "rrs %%pfpfr, %2\n" \ + "rrd %%pcshtp, %5\n" \ + "rrd %%pcsp.lo, %3\n" \ + "{rrd %%pcsp.hi, %4\n" \ + "shld %5, 53, %5}\n" \ + "sard %5, 53, %5\n" \ + "{addd %4, %5, %4\n" \ + "nop 5}\n" \ + : "=r" (fpcr), "=r" (fpsr), "=r" (pfpfr), \ + "=r" (pcsp_lo), "=r" (pcsp_hi), "=r" (__pcshtp) \ + : ); \ +} while (0) + +#define E2K_CLEAR_RF_108() \ +do { \ + asm volatile ( \ + "{\n" \ + "nop 3\n" \ + "disp %%ctpr1, 1f\n" \ + "setwd wsz=108\n" \ + "setbn rbs=0, rsz=62, rcur=0\n" \ + "rwd 21UL | (1UL << 37), %%lsr\n" \ + "}\n" \ + "{\n" \ + "disp %%ctpr2, 2f\n" \ + "}\n" \ + "1:" \ + "{\n" \ + "loop_mode\n" \ + "addd 0, 0, %%db[0]\n" \ + "addd 0, 0, %%db[1]\n" \ + "addd 0, 0, %%db[42]\n" \ + "addd 0, 0, %%db[43]\n" \ + "addd 0, 0, %%db[84]\n" \ + "addd 0, 0, %%db[85]\n" \ + "alc alcf = 1, alct = 1\n" \ + "abn abnf = 1, abnt = 1\n" \ + "ct %%ctpr1 ? %%NOT_LOOP_END\n" \ + "}\n" \ + "{\n" \ + "nop 4\n" \ + "setbn rbs=63, rsz=44, rcur=0\n" \ + "rwd 15UL | (1UL << 37), %%lsr\n" \ + "}\n" \ + "2:" \ + "{\n" \ + "loop_mode\n" \ + "addd 0, 0, %%db[0]\n" \ + "addd 0, 0, %%db[1]\n" \ + "addd 0, 0, %%db[32]\n" \ + "addd 0, 0, %%db[33]\n" \ + "addd 0, 0, %%db[64]\n" \ + "addd 0, 0, %%db[65]\n" \ + "alc alcf = 1, alct = 1\n" \ + "abn abnf = 1, abnt = 1\n" \ + "ct %%ctpr2 ? %%NOT_LOOP_END\n" \ + "}\n" \ + ::: "ctpr1", "ctpr2"); \ +} while (0) + +#define E2K_CLEAR_RF_112() \ +do { \ + asm volatile ( \ + "{\n" \ + "nop 3\n" \ + "disp %%ctpr1, 1f\n" \ + "setwd wsz=112\n" \ + "setbn rbs=0, rsz=62, rcur=0\n" \ + "rwd 21UL | (1UL << 37), %%lsr\n" \ + "}\n" \ + "{\n" \ + "disp %%ctpr2, 2f\n" \ + "}\n" \ + "1:" \ + "{\n" \ + "loop_mode\n" \ + "addd 0, 0, %%db[0]\n" \ + "addd 0, 0, %%db[1]\n" \ + "addd 0, 0, %%db[42]\n" \ + "addd 0, 0, %%db[43]\n" \ + "addd 0, 0, %%db[84]\n" \ + "addd 0, 0, %%db[85]\n" \ + "alc alcf = 1, alct = 1\n" \ + "abn abnf = 1, abnt = 1\n" \ + "ct %%ctpr1 ? %%NOT_LOOP_END\n" \ + "}\n" \ + "{\n" \ + "nop 4\n" \ + "setbn rbs=63, rsz=48, rcur=0\n" \ + "rwd 16UL | (1UL << 37), %%lsr\n" \ + "}\n" \ + "2:" \ + "{\n" \ + "loop_mode\n" \ + "addd 0, 0, %%db[0]\n" \ + "addd 0, 0, %%db[1]\n" \ + "addd 0, 0, %%db[32]\n" \ + "addd 0, 0, %%db[33]\n" \ + "addd 0, 0, %%db[64]\n" \ + "addd 0, 0, %%db[65]\n" \ + "alc alcf = 1, alct = 1\n" \ + "abn abnf = 1, abnt = 1\n" \ + "ct %%ctpr2 ? %%NOT_LOOP_END\n" \ + "}\n" \ + "{\n" \ + "addd 0, 0, %%db[64]\n" \ + "addd 0, 0, %%db[65]\n" \ + "}\n" \ + ::: "ctpr1", "ctpr2"); \ +} while (0) + +#define E2K_CLEAR_CTPRS() \ +do { \ + __e2k_u64_t reg; \ + asm volatile ( \ + "{\n" \ + "puttagd 0, 5, %0\n" \ + "}\n" \ + "{\n" \ + "movtd,s %0, %%ctpr1\n" \ + "}\n" \ + "{\n" \ + "movtd,s %0, %%ctpr2\n" \ + "}\n" \ + "{\n" \ + "movtd,s %0, %%ctpr3\n" \ + "}\n" \ + : "=r" (reg) \ + : \ + : "ctpr1", "ctpr2", "ctpr3"); \ +} while (0) + +#define NATIVE_RESTORE_COMMON_REGS_VALUES(_ctpr1, _ctpr2, _ctpr3, _ctpr1_hi, \ + _ctpr2_hi, _ctpr3_hi, _lsr, _lsr1, _ilcr, _ilcr1) \ +do { \ + _Pragma("no_asm_inline") \ + asm volatile ( \ + "{\n" \ + "rwd %[ctpr2], %%ctpr2\n" \ + "}\n" \ + \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_FEAT_TRAP_V5 version */ \ + \ + "{\n" \ + "rwd %[ctpr3], %%ctpr3\n" \ + "}\n" \ + "{\n" \ + "rwd %[ctpr1], %%ctpr1\n" \ + "}\n" \ + "{\n" \ + "rwd %[lsr], %%lsr\n" \ + "addd %[lsr1], 0, %%db[1]\n" \ + "addd %[ilcr1], 0, %%db[3]\n" \ + "}\n" \ + /* rwd %db[1], %%lsr1 */ \ + ".word 0x04100011; .word 0x3dc001c3\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + "{\n" \ + "rwd %[ilcr], %%ilcr\n" \ + "}\n" \ + /* rwd %db[3], %%ilcr1 */ \ + ".word 0x04100011; .word 0x3dc003c7\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + \ + ALTERNATIVE_2_ALTINSTR2 \ + /* CPU_FEAT_TRAP_V6 version */ \ + \ + "{\n" \ + "rwd %[ctpr3], %%ctpr3\n" \ + "addd %[ctpr1_hi], %%db[0]\n" \ + "addd %[ctpr2_hi], %%db[2]\n" \ + "}\n" \ + "{\n" \ + "rwd %[ctpr1], %%ctpr1\n" \ + "addd %[ctpr3_hi], %%db[4]\n" \ + "addd %[lsr1], 0, %%db[1]\n" \ + "}\n" \ + /* rwd %db[0], %%ctpr1.hi */ \ + ".word 0x04100011; .word 0x3dc00019\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + /* rwd %db[2], %%ctpr2.hi */ \ + ".word 0x04100011; .word 0x3dc0021a\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + /* rwd %db[4], %%ctpr3.hi */ \ + ".word 0x04100011; .word 0x3dc0041b\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + "{\n" \ + "rwd %[lsr], %%lsr\n" \ + "addd %[ilcr1], 0, %%db[3]\n" \ + "}\n" \ + /* rwd %db[1], %%lsr1 */ \ + ".word 0x04100011; .word 0x3dc001c3\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + "{\n" \ + "rwd %[ilcr], %%ilcr\n" \ + "}\n" \ + /* rwd %db[3], %%ilcr1 */ \ + ".word 0x04100011; .word 0x3dc003c7\n" \ + ".word 0x01c00000; .word 0x00000000\n" \ + \ + ALTERNATIVE_3_OLDINSTR2 \ + \ + "{\n" \ + "rwd %[ctpr3], %%ctpr3\n" \ + "}\n" \ + "{\n" \ + "rwd %[ctpr1], %%ctpr1\n" \ + "}\n" \ + "{\n" \ + "rwd %[lsr], %%lsr\n" \ + "}\n" \ + "{\n" \ + "rwd %[ilcr], %%ilcr\n" \ + "}\n" \ + \ + ALTERNATIVE_4_FEATURE2(%[facility1], %[facility2]) \ + :: [ctpr1] "r" (_ctpr1), [ctpr2] "r" (_ctpr2), \ + [ctpr3] "r" (_ctpr3), [ctpr1_hi] "r" (_ctpr1_hi), \ + [ctpr2_hi] "r" (_ctpr2_hi), [ctpr3_hi] "r" (_ctpr3_hi), \ + [lsr] "r" (_lsr), [lsr1] "r" (_lsr1), \ + [ilcr] "r" (_ilcr), [ilcr1] "r" (_ilcr1), \ + [facility1] "i" (CPU_FEAT_TRAP_V5), \ + [facility2] "i" (CPU_FEAT_TRAP_V6) \ + : "memory", "b[0]", "b[1]", "b[2]", "b[3]", "b[4]"); \ +} while (0) + +#define NATIVE_RESTORE_KERNEL_GREGS(_k_gregs) \ +do { \ + u64 f16, f17, f18, f19, tmp1, tmp2; \ + _Pragma("no_asm_inline") \ + asm volatile ( \ + ALTERNATIVE_1_ALTINSTR \ + /* iset v5 version - restore qp registers extended part */ \ + \ + "{\n" \ + "addd,2 %[k_gregs], %%db[0]\n" \ + "addd,5 %[k_gregs], %%db[1]\n" \ + "}\n" \ + /* "{ldrqp,2 [ %%db[0] + 0x50400000000 ], %%g16\n" \ + " ldrqp,5 [ %%db[1] + 0x50400000010 ], %%g17}\n" */ \ + ".word 0x92400033\n" \ + ".word 0x6b00dcf0\n" \ + ".word 0x6b01def1\n" \ + ".word 0x02c002c0\n" \ + ".word 0x00000504\n" \ + ".word 0x00000010\n" \ + ".word 0x00000504\n" \ + ".word 0x00000000\n" \ + /* "{ldrqp,2 [ %%db[0] + 0x50400000020 ], %%g18\n" \ + " ldrqp,5 [ %%db[1] + 0x50400000030 ], %%g19}\n" */ \ + ".word 0x92400033\n" \ + ".word 0x6b00dcf2\n" \ + ".word 0x6b01def3\n" \ + ".word 0x02c002c0\n" \ + ".word 0x00000504\n" \ + ".word 0x00000030\n" \ + ".word 0x00000504\n" \ + ".word 0x00000020\n" \ + \ + ALTERNATIVE_2_OLDINSTR \ + /* Original instruction - restore only 16 bits */ \ + \ + "{\n" \ + "ldrd,2 [ %[k_gregs] + 0x50400000000 ], %%g16\n" \ + "ldrd,5 [ %[k_gregs] + 0x50400000010 ], %%g17\n" \ + "}\n" \ + "{\n" \ + "ldrd,2 [ %[k_gregs] + 0x50400000020 ], %%g18\n" \ + "ldrd,5 [ %[k_gregs] + 0x50400000030 ], %%g19\n" \ + "}\n" \ + "{\n" \ + "ldh,0 [ %[k_gregs] + 0x8 ], %[f16]\n" \ + "ldh,3 [ %[k_gregs] + 0x18 ], %[f17]\n" \ + "ldh,2 [ %[k_gregs] + 0x28 ], %[f18]\n" \ + "ldh,5 [ %[k_gregs] + 0x38 ], %[f19]\n" \ + "}\n" \ + "{\n" \ + "gettagd,2 %%g16, %[tmp1]\n" \ + "gettagd,5 %%g17, %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "cmpesb,0 0x0, %[tmp1], %%pred16\n" \ + "cmpesb,3 0x0, %[tmp2], %%pred17\n" \ + "gettagd,2 %%g18, %[tmp1]\n" \ + "gettagd,5 %%g19, %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "cmpesb,0 0x0, %[tmp1], %%pred18\n" \ + "cmpesb,3 0x0, %[tmp2], %%pred19\n" \ + "}\n" \ + "{\n" \ + "movif,0 %%g16, %[f16], %%g16 ? %%pred16\n" \ + "movif,3 %%g17, %[f17], %%g17 ? %%pred17\n" \ + "}\n" \ + "{\n" \ + "movif,0 %%g18, %[f18], %%g18 ? %%pred18\n" \ + "movif,3 %%g19, %[f19], %%g19 ? %%pred19\n" \ + "}\n" \ + \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + : [f16] "=&r" (f16), [f17] "=&r" (f17), [f18] "=&r" (f18), \ + [f19] "=&r" (f19), [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2) \ + : [k_gregs] "m" (*(_k_gregs)), [facility] "i" (CPU_FEAT_QPREG) \ + : "g16", "g17", "g18", "g19", \ + "pred16", "pred17", "pred18", "pred19"); \ +} while (0) + +#define NATIVE_RESTORE_HOST_GREGS(_h_gregs) \ +do { \ + u64 f20, f21, tmp1, tmp2; \ + _Pragma("no_asm_inline") \ + asm volatile ( \ + ALTERNATIVE_1_ALTINSTR \ + /* iset v5 version - restore qp registers extended part */ \ + \ + "{\n" \ + "addd,2 %[h_gregs], %%db[0]\n" \ + "addd,5 %[h_gregs], %%db[1]\n" \ + "}\n" \ + /* "{ldrqp,2 [ %%db[0] + 0x50400000000 ], %%g20\n" \ + "ldrqp,5 [ %%db[1] + 0x50400000010 ], %%g21}\n" */ \ + ".word 0x92400033\n" \ + ".word 0x6b00dcf4\n" \ + ".word 0x6b01def5\n" \ + ".word 0x02c002c0\n" \ + ".word 0x00000504\n" \ + ".word 0x00000010\n" \ + ".word 0x00000504\n" \ + ".word 0x00000000\n" \ + \ + ALTERNATIVE_2_OLDINSTR \ + /* Original instruction - restore only 16 bits */ \ + \ + "{\n" \ + "ldrd,2 [ %[h_gregs] + 0x50400000000 ], %%g20\n" \ + "ldrd,5 [ %[h_gregs] + 0x50400000010 ], %%g21\n" \ + "}\n" \ + "{\n" \ + "nop 1\n" \ + "ldh,0 [ %[h_gregs] + 0x8 ], %[f20]\n" \ + "ldh,3 [ %[h_gregs] + 0x18 ], %[f21]\n" \ + "}\n" \ + "{\n" \ + "gettagd,2 %%g20, %[tmp1]\n" \ + "gettagd,5 %%g21, %[tmp2]\n" \ + "}\n" \ + "{\n" \ + "nop 1\n" \ + "cmpesb,0 0x0, %[tmp1], %%pred20\n" \ + "cmpesb,3 0x0, %[tmp2], %%pred21\n" \ + "}\n" \ + "{\n" \ + "movif,0 %%g20, %[f20], %%g20 ? %%pred20\n" \ + "movif,3 %%g21, %[f21], %%g21 ? %%pred21\n" \ + "}\n" \ + \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + : [f20] "=&r" (f20), [f21] "=&r" (f21), \ + [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2) \ + : [h_gregs] "m" (*(_h_gregs)), [facility] "i" (CPU_FEAT_QPREG) \ + : "g20", "g21", "pred20", "pred21"); \ +} while (0) + + +#define LDRD(addr) \ +({ \ + register long __dres; \ + asm volatile ("{ldrd [%1], %0\n}" \ + : "=r"(__dres) \ + : "m" (*((unsigned long long *)(addr)))); \ + __dres; \ +}) + +/* Since v6 this got replaced with "wait int=1,mem_mod=1" */ +#define C1_WAIT_TRAP_V3() \ +do { \ + _Pragma("no_asm_inline") \ + asm volatile ("wait trap=1" ::: "memory"); \ +} while (0) + +#define C3_WAIT_TRAP_V3(__val, __phys_addr) \ +do { \ + u64 _reg; \ + asm volatile ( \ + /* 1) Disable instruction prefetch */ \ + "mmurr %%mmu_cr, %[reg]\n" \ + "andnd %[reg], 0x800, %[reg]\n" /* clear mmu_cr.ipd */ \ + "{nop 3\n" \ + " mmurw %[reg], %%mmu_cr}\n" \ + "disp %%ctpr1, 1f\n" \ + "{wait all_c=1\n" \ + " ct %%ctpr1}\n" /* force Instruction Buffer to use new ipd */ \ + "1:\n" \ + /* 2) Disable %ctpr's */ \ + "rwd 0, %%ctpr1\n" \ + "rwd 0, %%ctpr2\n" \ + "rwd 0, %%ctpr3\n" \ + "wait all_c=1\n" \ + /* 3) Flush TLB and instruction cache (wait only for L1I \ + * flush so that it does not flush stw + wait from under us) */ \ + "wait ma_c=1\n" \ + "std,2 0x0, %[addr_flush_icache], %[val_icache], mas=%[mas_icache]\n" \ + "std,2 0x0, %[addr_flush_tlb], %[val_tlb], mas=%[mas_tlb]\n" \ + "{wait fl_c=1\n" \ + /* 4) Make sure the actual disabling code lies in the same cache line */ \ + " ibranch 2f}\n" \ + ".align 256\n" \ + "2:\n" \ + /* 5) Flush data cache (except L3 which is shared) */ \ + "std,2 0x0, %[addr_flush_cache], %[val_cache], mas=%[mas_cache]\n" \ + "wait fl_c=1, ma_c=1\n" \ + /* 6) Disable the clock. We access SIC register by physical address \ + * because we've just flushed TLB, and accessing by virtual address \ + * would stall until all 4 page table levels are read into TLB. */ \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_HWBUG_C3_WAIT_MA_C version */ \ + "nop 7\n" \ + "nop 7\n" \ + "nop 7\n" \ + "nop 7\n" \ + "nop 7\n" \ + "nop 7\n" \ + "nop 7\n" \ + "nop 7\n" \ + "nop 1\n" \ + "wait ma_c=1\n" \ + "stw %[phys_addr], 0, %[val], mas=%[mas_ioaddr]\n" \ + "wait trap=1\n" \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + "stw %[phys_addr], 0, %[val], mas=%[mas_ioaddr]\n" \ + "wait trap=1\n" \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + /* Will never get here */ \ + : [reg] "=&r" (_reg) \ + : [val] "r" ((u32) (__val)), \ + [phys_addr] "r" ((u64) (__phys_addr)), \ + [addr_flush_cache] "r" ((u64) (_FLUSH_WRITE_BACK_CACHE_L12_OP)), \ + [val_cache] "r" (0ULL), \ + [mas_cache] "i" (MAS_CACHE_FLUSH), \ + [addr_flush_icache] "r" ((u64) (_FLUSH_ICACHE_ALL_OP)), \ + [val_icache] "r" (0ULL), \ + [mas_icache] "i" (MAS_ICACHE_FLUSH), \ + [addr_flush_tlb] "r" ((u64) (_FLUSH_ICACHE_ALL_OP)), \ + [val_tlb] "r" (0ULL), \ + [mas_tlb] "i" (MAS_TLB_FLUSH), \ + [mas_ioaddr] "i" (MAS_IOADDR), \ + [facility] "i" (CPU_HWBUG_C3_WAIT_MA_C) \ + : "memory", "ctpr1", "ctpr2", "ctpr3"); \ +} while (0) + +/* Preparing to turn the synchoniztion clock off + * by writing the value __val to register PMC pointed by __phys_addr */ +#define C3_WAIT_INT_V6(__val, __phys_addr) \ +do { \ + u64 _reg; \ + asm volatile ( \ + /* 1) Disable instruction prefetch */ \ + "mmurr %%mmu_cr, %[reg]\n" \ + "andnd %[reg], 0x800, %[reg]\n" /* clear mmu_cr.ipd */ \ + "{nop 3\n" \ + " mmurw %[reg], %%mmu_cr}\n" \ + "disp %%ctpr1, 1f\n" \ + "{wait all_c=1\n" \ + " ct %%ctpr1}\n" /* force Instruction Buffer to use new ipd */ \ + "1:\n" \ + /* 2) Disable %ctpr's */ \ + "rwd 0, %%ctpr1\n" \ + "rwd 0, %%ctpr2\n" \ + "rwd 0, %%ctpr3\n" \ + "wait all_c=1\n" \ + /* 3) Flush TLB and instruction cache */ \ + "wait ma_c=1\n" \ + "std,2 0x0, %[addr_flush_icache], %[val_icache], mas=%[mas_icache]\n" \ + "std,2 0x0, %[addr_flush_tlb], %[val_tlb], mas=%[mas_tlb]\n" \ + "{wait fl_c=1, ma_c=1\n" \ + /* 4) Make sure the actual disabling code lies in the same cache line */ \ + " ibranch 2f}\n" \ + ".align 256\n" \ + "2:\n" \ + /* 5) Flush data cache (except L3 which is shared) */ \ + "std,2 0x0, %[addr_flush_cache], %[val_cache], mas=%[mas_cache]\n" \ + "wait fl_c=1, ma_c=1\n" \ + /* 6) Disable the clock. We access SIC register by physical address \ + * because we've just flushed TLB, and accessing by virtual address \ + * would stall until all 4 page table levels are read into TLB. */ \ + "stw %[phys_addr], 0, %[val], mas=%[mas_ioaddr]\n" \ + "wait st_c=1, int=1\n" \ + /* 7) We are woken, reenable instruction prefetch */ \ + "mmurr %%mmu_cr, %[reg]\n" \ + "ord %[reg], 0x800, %[reg]\n" /* clear mmu_cr.ipd */ \ + "mmurw %[reg], %%mmu_cr\n" \ + "disp %%ctpr1, 3f\n" \ + "{wait all_c=1\n" \ + " ct %%ctpr1}\n" /* force Instruction Buffer to use new ipd */ \ + "3:\n" \ + : [reg] "=&r" (_reg) \ + : [val] "r" ((u32) (__val)), \ + [phys_addr] "r" ((u64) (__phys_addr)), \ + [addr_flush_cache] "r" ((u64) (_FLUSH_WRITE_BACK_CACHE_L12_OP)), \ + [val_cache] "r" (0ULL), \ + [mas_cache] "i" (MAS_CACHE_FLUSH), \ + [addr_flush_icache] "r" ((u64) (_FLUSH_ICACHE_ALL_OP)), \ + [val_icache] "r" (0ULL), \ + [mas_icache] "i" (MAS_ICACHE_FLUSH), \ + [addr_flush_tlb] "r" ((u64) (_FLUSH_ICACHE_ALL_OP)), \ + [val_tlb] "r" (0ULL), \ + [mas_tlb] "i" (MAS_TLB_FLUSH), \ + [mas_ioaddr] "i" (MAS_IOADDR) \ + : "memory", "ctpr1", "ctpr2", "ctpr3"); \ +} while (0) + +/* Hardware virtualized extensions support */ + +#define E2K_GLAUNCH(_ctpr1, _ctpr1_hi, _ctpr2, _ctpr2_hi, _ctpr3, _ctpr3_hi, \ + _lsr, _lsr1, _ilcr, _ilcr1) \ +do { \ + _Pragma("no_asm_inline") \ + asm volatile ("{rwd %[ctpr1], %%ctpr1}\n" \ + "{rwd %[ctpr1_hi], %%ctpr1.hi}\n" \ + "{rwd %[ctpr3], %%ctpr3}\n" \ + "{rwd %[ctpr3_hi], %%ctpr3.hi}\n" \ + "{\n" \ + "rwd %[lsr], %%lsr\n" \ + "addd %[lsr1], 0, %%db[1]\n" \ + "addd %[ilcr1], 0, %%db[3]\n" \ + "}\n" \ + /* rwd %db[1], %%lsr1 */ \ + ".word 0x04100011\n" \ + ".word 0x3dc001c3\n" \ + ".word 0x01c00000\n" \ + ".word 0x00000000\n" \ + "{\n" \ + "rwd %[ilcr], %%ilcr\n" \ + "}\n" \ + /* rwd %db[3], %%ilcr1 */ \ + ".word 0x04100011\n" \ + ".word 0x3dc003c7\n" \ + ".word 0x01c00000\n" \ + ".word 0x00000000\n" \ + /* #80747: must repeat interrupted barriers */ \ + "{nop 3; wait st_c=1}\n" \ + "{glaunch}\n" \ + "{wait fl_c=1\n" \ + " rrd %%lsr, %[lsr]}\n" \ + "{rrd %%ilcr, %[ilcr]}\n" \ + "{rrd %%lsr1, %[lsr1]}\n" \ + "{rrd %%ilcr1, %[ilcr1]}\n" \ + "{rrd %%ctpr1, %[ctpr1]}\n" \ + "{rrd %%ctpr1.hi, %[ctpr1_hi]}\n" \ + "{rrd %%ctpr2, %[ctpr2]}\n" \ + "{rrd %%ctpr2.hi, %[ctpr2_hi]}\n" \ + "{rrd %%ctpr3, %[ctpr3]}\n" \ + "{rrd %%ctpr3.hi, %[ctpr3_hi]}\n" \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_HWBUG_L1I_STOPS_WORKING version */ \ + "1:\n" \ + "{ipd 0; disp %%ctpr1, 1b}" \ + /* ctpr2 will be cleared after saving AAU */ \ + "{ipd 0; disp %%ctpr3, 1b}" \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + "{nop}" \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + : [lsr] "+r" (_lsr), [lsr1] "+r" (_lsr1), \ + [ilcr] "+r" (_ilcr), [ilcr1] "+r" (_ilcr1), \ + [ctpr1] "+r" (_ctpr1), [ctpr1_hi] "+r" (_ctpr1_hi), \ + [ctpr2] "+r" (_ctpr2), [ctpr2_hi] "+r" (_ctpr2_hi), \ + [ctpr3] "+r" (_ctpr3), [ctpr3_hi] "+r" (_ctpr3_hi) \ + : [facility] "i" (CPU_HWBUG_L1I_STOPS_WORKING) \ + : "memory", "b[1]", "b[3]", "ctpr1", "ctpr2", "ctpr3"); \ +} while (0) + + +/* Clobbers "ctpr" are here to tell lcc that there is a call inside */ +#define E2K_HCALL_CLOBBERS \ + "ctpr1", "ctpr2", "ctpr3", \ + "b[0]", "b[1]", "b[2]", "b[3]", \ + "b[4]", "b[5]", "b[6]", "b[7]" + +#define __E2K_HCALL_0(_trap, _sys_num, _arg1) \ +({ \ + register u64 __res; \ + asm volatile ( \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_1(_trap, _sys_num, _arg1) \ +({ \ + register u64 __res; \ + asm volatile ("{\n" \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_2(_trap, _sys_num, _arg1, _arg2) \ +({ \ + register u64 __res; \ + asm volatile ("{\n" \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "addd 0x0, %[arg2], %%b[2]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)), \ + [arg2] "ri" ((u64) (_arg2)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_3(_trap, _sys_num, _arg1, _arg2, _arg3) \ +({ \ + register u64 __res; \ + asm volatile ("{\n" \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "addd 0x0, %[arg2], %%b[2]\n\t" \ + "addd 0x0, %[arg3], %%b[3]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)), \ + [arg2] "ri" ((u64) (_arg2)), \ + [arg3] "ri" ((u64) (_arg3)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_4(_trap, _sys_num, _arg1, _arg2, _arg3, _arg4) \ +({ \ + register u64 __res; \ + asm volatile ("{\n" \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "addd 0x0, %[arg2], %%b[2]\n\t" \ + "addd 0x0, %[arg3], %%b[3]\n\t" \ + "addd 0x0, %[arg4], %%b[4]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)), \ + [arg2] "ri" ((u64) (_arg2)), \ + [arg3] "ri" ((u64) (_arg3)), \ + [arg4] "ri" ((u64) (_arg4)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_5(_trap, _sys_num, _arg1, _arg2, _arg3, _arg4, _arg5) \ +({ \ + register u64 __res; \ + asm volatile ("{\n" \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "addd 0x0, %[arg2], %%b[2]\n\t" \ + "addd 0x0, %[arg3], %%b[3]\n\t" \ + "addd 0x0, %[arg4], %%b[4]\n\t" \ + "addd 0x0, %[arg5], %%b[5]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)), \ + [arg2] "ri" ((u64) (_arg2)), \ + [arg3] "ri" ((u64) (_arg3)), \ + [arg4] "ri" ((u64) (_arg4)), \ + [arg5] "ri" ((u64) (_arg5)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_6(_trap, _sys_num, _arg1, \ + _arg2, _arg3, _arg4, _arg5, _arg6) \ +({ \ + register u64 __res; \ + asm volatile ( \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "{\n" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "addd 0x0, %[arg2], %%b[2]\n\t" \ + "addd 0x0, %[arg3], %%b[3]\n\t" \ + "addd 0x0, %[arg4], %%b[4]\n\t" \ + "addd 0x0, %[arg5], %%b[5]\n\t" \ + "addd 0x0, %[arg6], %%b[6]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)), \ + [arg2] "ri" ((u64) (_arg2)), \ + [arg3] "ri" ((u64) (_arg3)), \ + [arg4] "ri" ((u64) (_arg4)), \ + [arg5] "ri" ((u64) (_arg5)), \ + [arg6] "ri" ((u64) (_arg6)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_HCALL_7(_trap, _sys_num, _arg1, \ + _arg2, _arg3, _arg4, _arg5, _arg6, _arg7) \ +({ \ + register u64 __res; \ + asm volatile ("{\n" \ + "addd 0x0, %[sys_num], %%b[0]\n\t" \ + "addd 0x0, %[arg1], %%b[1]\n\t" \ + "addd 0x0, %[arg2], %%b[2]\n\t" \ + "addd 0x0, %[arg3], %%b[3]\n\t" \ + "addd 0x0, %[arg4], %%b[4]\n\t" \ + "addd 0x0, %[arg5], %%b[5]\n\t" \ + "}\n" \ + "{\n" \ + "addd 0x0, %[arg6], %%b[6]\n\t" \ + "addd 0x0, %[arg7], %%b[7]\n\t" \ + "}\n" \ + "{\n" \ + "hcall %[trap], wbs = %#\n\t" \ + "}\n" \ + "addd 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((u64) (_sys_num)), \ + [arg1] "ri" ((u64) (_arg1)), \ + [arg2] "ri" ((u64) (_arg2)), \ + [arg3] "ri" ((u64) (_arg3)), \ + [arg4] "ri" ((u64) (_arg4)), \ + [arg5] "ri" ((u64) (_arg5)), \ + [arg6] "ri" ((u64) (_arg6)), \ + [arg7] "ri" ((u64) (_arg7)) \ + : E2K_HCALL_CLOBBERS); \ + __res; \ +}) + +#define E2K_HCALL(trap, sys_num, num_args, args...) \ + __E2K_HCALL_##num_args(trap, sys_num, args) + + +/* Clobbers "ctpr" are here to tell lcc that there is a return inside */ +#define E2K_HRET_CLOBBERS "ctpr1", "ctpr2", "ctpr3" + +#define E2K_HRET(_ret) \ +do { \ + asm volatile ( \ + "addd 0x0, %[ret], %%r0\n" \ + "{.word 0x00005012\n" /* HRET */ \ + " .word 0xc0000020\n" \ + " .word 0x30000003\n" \ + " .word 0x00000000}\n" \ + : \ + : [ret] "ir" (_ret) \ + : E2K_HRET_CLOBBERS); \ + unreachable(); \ +} while (0) + + +typedef unsigned long long __e2k_syscall_arg_t; + +#define E2K_SYSCALL_CLOBBERS \ + "ctpr1", "ctpr2", "ctpr3", \ + "b[0]", "b[1]", "b[2]", "b[3]", \ + "b[4]", "b[5]", "b[6]", "b[7]" + +/* Transaction operation transaction of argument type + * __e2k_syscall_arg_t */ +#ifdef __ptr64__ +#define __E2K_SYSCAL_ARG_ADD "addd,s" +#else +#define __E2K_SYSCAL_ARG_ADD "adds,s" +#endif + +#define __E2K_SYSCALL_0(_trap, _sys_num, _arg1) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_1(_trap, _sys_num, _arg1) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_2(_trap, _sys_num, _arg1, _arg2) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_3(_trap, _sys_num, _arg1, _arg2, _arg3) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_4(_trap, _sys_num, _arg1, _arg2, _arg3, _arg4) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_5(_trap, _sys_num, _arg1, _arg2, _arg3, _arg4, _arg5) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg5], %%b[5]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)), \ + [arg5] "ri" ((__e2k_syscall_arg_t) (_arg5)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_6(_trap, _sys_num, _arg1, \ + _arg2, _arg3, _arg4, _arg5, _arg6) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg5], %%b[5]\n\t" \ + "}\n" \ + "{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg6], %%b[6]\n\t" \ + "call %%ctpr1, wbs = %#\n\t" \ + "}\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)), \ + [arg5] "ri" ((__e2k_syscall_arg_t) (_arg5)), \ + [arg6] "ri" ((__e2k_syscall_arg_t) (_arg6)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_7(_trap, _sys_num, _arg1, \ + _arg2, _arg3, _arg4, _arg5, _arg6, _arg7) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg5], %%b[5]\n\t" \ + "}\n" \ + "{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg6], %%b[6]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg7], %%b[7]\n\t" \ + "call %%ctpr1, wbs = %#\n\t" \ + "}\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)), \ + [arg5] "ri" ((__e2k_syscall_arg_t) (_arg5)), \ + [arg6] "ri" ((__e2k_syscall_arg_t) (_arg6)), \ + [arg7] "ri" ((__e2k_syscall_arg_t) (_arg7)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define E2K_SYSCALL(trap, sys_num, num_args, args...) \ + __E2K_SYSCALL_##num_args(trap, sys_num, args) + +#define ASM_CALL_8_ARGS(func_name_to_call, _arg0, _arg1, _arg2, _arg3, \ + _arg4, _arg5, _arg6, _arg7) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ( \ + "{\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg0], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + "disp %%ctpr1, " #func_name_to_call "\n\t" \ + "}\n\t" \ + "{\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg5], %%b[5]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg6], %%b[6]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg7], %%b[7]\n\t" \ + "call %%ctpr1, wbs = %#\n\t" \ + "}\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]\n\t" \ + : \ + [res] "=r" (__res) \ + : \ + [arg0] "ri" ((__e2k_syscall_arg_t) (_arg0)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)), \ + [arg5] "ri" ((__e2k_syscall_arg_t) (_arg5)), \ + [arg6] "ri" ((__e2k_syscall_arg_t) (_arg6)), \ + [arg7] "ri" ((__e2k_syscall_arg_t) (_arg7)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __arch_this_cpu_read(_var, size) \ +({ \ + typeof(_var) __ret; \ + _Pragma("no_asm_inline") \ + asm ("ld" size " %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret]" \ + : [ret] "=r" (__ret) \ + : [var] "r" (&(_var)) \ + : "memory"); \ + __ret; \ +}) + +#define __arch_this_cpu_write(_var, _val, size) \ +do { \ + _Pragma("no_asm_inline") \ + asm ("st" size " %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[val]" \ + :: [var] "r" (&(_var)), [val] "r" (_val) \ + : "memory"); \ +} while (0) + +/* Use relaxed atomics for percpu if they are available */ +#if CONFIG_CPU_ISET >= 5 + +# define __arch_pcpu_atomic_xchg(_val, _var, size) \ +({ \ + typeof(_var) __ret; \ + HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_RELAXED_MB \ + "\n2:" \ + "\n{"\ + "\nnop 5" /* bug 92891 - optimize for performance */ \ + "\nld" size ",0 %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret], mas=0x7" \ + "\n}" \ + "\n{"\ + "\nst" size "," RELAXED_MB_ATOMIC_CHANNEL " \ + %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[val], mas=" RELAXED_MB_ATOMIC_MAS \ + "\nibranch 2b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_RELAXED_MB \ + : [ret] "=&r" (__ret) \ + : [var] "r" (&(_var)), [val] "r" ((u64) (_val)) \ + : "memory"); \ + HWBUG_ATOMIC_END(); \ + __ret; \ +}) + +# define __arch_pcpu_atomic_cmpxchg(_old, _new, _var, size, sxt_size) \ +({ \ + typeof(_var) __ret, __stored_val; \ + HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_RELAXED_MB \ + "\n3:" \ + "\n{" \ + "\nnop 4" \ + "\nld" size ",0 %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nsxt "#sxt_size", %[ret], %[ret]" \ + "\naddd 0x0, %[new], %[stored_val]" \ + "\n}" \ + "\n{" \ + "\nnop 1" \ + "\ncmpedb %[ret], %[old], %%pred2" \ + "\n}" \ + "\n{" \ + "\nnop 1" /* bug 92891 - optimize for performance */ \ + "\naddd 0x0, %[ret], %[stored_val] ? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nst" size "," RELAXED_MB_ATOMIC_CHANNEL " \ + %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[stored_val], mas=" RELAXED_MB_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_RELAXED_MB \ + : [ret] "=&r" (__ret), [stored_val] "=&r" (__stored_val) \ + : [var] "r" (&(_var)), [new] "ir" (_new), [old] "ir" (_old) \ + : "memory", "pred2"); \ + HWBUG_ATOMIC_END(); \ + __ret; \ +}) + +# define __arch_pcpu_atomic_cmpxchg_word(_old, _new, _var) \ +({ \ + typeof(_var) __ret, __stored_val; \ + HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_RELAXED_MB \ + "\n3:" \ + "\n{"\ + "\nnop 4"\ + "\nldw,0 %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nnop 1"\ + "\nadds 0x0, %[new], %[stored_val]" \ + "\ncmpesb %[ret], %[old], %%pred2" \ + "\n}" \ + "\n{" \ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\nadds 0x0, %[ret], %[stored_val] ? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nstw," RELAXED_MB_ATOMIC_CHANNEL " \ + %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[stored_val], mas=" RELAXED_MB_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_RELAXED_MB \ + : [ret] "=&r" (__ret), [stored_val] "=&r" (__stored_val) \ + : [var] "r" (&(_var)), [new] "ir" (_new), [old] "ir" (_old) \ + : "memory", "pred2"); \ + HWBUG_ATOMIC_END(); \ + __ret; \ +}) + +# define __arch_pcpu_atomic_cmpxchg_dword(_old, _new, _var) \ +({ \ + typeof(_var) __ret, __stored_val; \ + HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_RELAXED_MB \ + "\n3:" \ + "\n{"\ + "\nnop 4"\ + "\nldd,0 %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret], mas=0x7" \ + "\n}" \ + "\n{" \ + "\nnop 1"\ + "\naddd 0x0, %[new], %[stored_val]" \ + "\ncmpedb %[ret], %[old], %%pred2" \ + "\n}" \ + "\n{" \ + "\nnop 2" /* bug 92891 - optimize for performance */ \ + "\naddd 0x0, %[ret], %[stored_val] ? ~ %%pred2" \ + "\n}" \ + "\n{" \ + "\nstd," RELAXED_MB_ATOMIC_CHANNEL " \ + %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[stored_val], mas=" RELAXED_MB_ATOMIC_MAS \ + "\nibranch 3b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_RELAXED_MB \ + : [ret] "=&r" (__ret), [stored_val] "=&r" (__stored_val) \ + : [var] "r" (&(_var)), [new] "ir" ((u64) (_new)), [old] "ir" ((u64) (_old)) \ + : "memory", "pred2"); \ + HWBUG_ATOMIC_END(); \ + __ret; \ +}) + +#define __arch_pcpu_atomic_op(_val, _var, size, op) \ +({ \ + typeof(_var) __ret; \ + HWBUG_ATOMIC_BEGIN(__my_cpu_offset + &(_var)); \ + asm NOT_VOLATILE ( \ + MB_BEFORE_ATOMIC_RELAXED_MB \ + "\n1:" \ + "\n{"\ + "\nnop 4"\ + "\nld" size ",0 %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret], mas=0x7" \ + "\n}" \ + "\n" op " %[ret], %[val], %[ret]" \ + "\n{"\ + "\nst" size "," RELAXED_MB_ATOMIC_CHANNEL " \ + %%dg" __stringify(MY_CPU_OFFSET_GREG) ", %[var], %[ret], mas=" RELAXED_MB_ATOMIC_MAS \ + "\nibranch 1b ? %%MLOCK" \ + "\n}" \ + MB_AFTER_ATOMIC_RELAXED_MB \ + : [ret] "=&r" (__ret) \ + : [var] "r" (&(_var)), [val] "ir" ((u64) (_val)) \ + : "memory"); \ + HWBUG_ATOMIC_END(); \ + __ret; \ +}) + +#endif /* #ifndef CONFIG_CPU_ES2 */ + +/* Disable %aalda writes on iset v6 (iset correction v6.107). + * Use alternatives since we cannot do jumps at this point + * (%ctpr's have been restored already). */ +#define NATIVE_SET_ALL_AALDAS(aaldas_p) \ +do { \ + u32 *aaldas = (u32 *)(aaldas_p); \ + asm ( \ + ALTERNATIVE_1_ALTINSTR \ + /* CPU_FEAT_ISET_V6 version */ \ + "{nop}" \ + ALTERNATIVE_2_OLDINSTR \ + /* Default version */ \ + "{aaurws,2 %[aalda0], %%aalda0\n" \ + " aaurws,5 %[aalda32], %%aalda0}\n" \ + "{aaurws,2 %[aalda4], %%aalda4\n" \ + " aaurws,5 %[aalda36], %%aalda4}\n" \ + "{aaurws,2 %[aalda8], %%aalda8\n" \ + " aaurws,5 %[aalda40], %%aalda8}\n" \ + "{aaurws,2 %[aalda12], %%aalda12\n" \ + " aaurws,5 %[aalda44], %%aalda12}\n" \ + "{aaurws,2 %[aalda16], %%aalda16\n" \ + " aaurws,5 %[aalda48], %%aalda16}\n" \ + "{aaurws,2 %[aalda20], %%aalda20\n" \ + " aaurws,5 %[aalda52], %%aalda20}\n" \ + "{aaurws,2 %[aalda24], %%aalda24\n" \ + " aaurws,5 %[aalda56], %%aalda24}\n" \ + "{aaurws,2 %[aalda28], %%aalda28\n" \ + " aaurws,5 %[aalda60], %%aalda28}\n" \ + ALTERNATIVE_3_FEATURE(%[facility]) \ + :: [aalda0] "r" (aaldas[0]), [aalda32] "r" (aaldas[8]), \ + [aalda4] "r" (aaldas[1]), [aalda36] "r" (aaldas[9]), \ + [aalda8] "r" (aaldas[2]), [aalda40] "r" (aaldas[10]), \ + [aalda12] "r" (aaldas[3]), [aalda44] "r" (aaldas[11]), \ + [aalda16] "r" (aaldas[4]), [aalda48] "r" (aaldas[12]), \ + [aalda20] "r" (aaldas[5]), [aalda52] "r" (aaldas[13]), \ + [aalda24] "r" (aaldas[6]), [aalda56] "r" (aaldas[14]), \ + [aalda28] "r" (aaldas[7]), [aalda60] "r" (aaldas[15]), \ + [facility] "i" (CPU_FEAT_ISET_V6)); \ +} while (0) + +/* Force load OSGD->GD */ +#define E2K_LOAD_OSGD_TO_GD() \ +do { \ + asm volatile ("{nop; sdisp %%ctpr2, 11}\n" \ + "{call %%ctpr2, wbs=%#}\n" \ + ::: "call"); \ +} while (0) +#endif /* __ASSEMBLY__ */ + +#endif /* _E2K_API_H_ */ diff --git a/arch/e2k/include/asm/e2k_debug.h b/arch/e2k/include/asm/e2k_debug.h new file mode 100644 index 0000000..ce10667 --- /dev/null +++ b/arch/e2k/include/asm/e2k_debug.h @@ -0,0 +1,823 @@ +/* + * asm-e2k/e2k_debug.h + */ +#ifndef _E2K_DEBUG_H_ +#define _E2K_DEBUG_H_ + +#ifndef __ASSEMBLY__ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CHK_DEBUGGER(trapnr, signr, error_code, address, regs, after) + +extern void print_stack_frames(struct task_struct *task, + struct pt_regs *pt_regs, int show_reg_window) __cold; +extern void print_mmap(struct task_struct *task) __cold; +extern void print_va_tlb(e2k_addr_t addr, int large_page) __cold; +extern void print_all_TC(const trap_cellar_t *TC, int TC_count) __cold; +extern void print_tc_record(const trap_cellar_t *tcellar, int num) __cold; +extern u64 print_all_TIRs(const e2k_tir_t *TIRs, u64 nr_TIRs) __cold; +extern void print_address_page_tables(unsigned long address, + int last_level_only) __cold; +extern void print_pt_regs(const pt_regs_t *regs) __cold; + +__init extern void setup_stack_print(void); + +static inline void print_address_tlb(unsigned long address) +{ + print_va_tlb(address, 0); + print_va_tlb(pte_virt_offset(round_down(address, PTE_SIZE)), 0); + print_va_tlb(pmd_virt_offset(round_down(address, PMD_SIZE)), 0); + print_va_tlb(pud_virt_offset(round_down(address, PUD_SIZE)), 0); +} + +/** + * *parse_chain_fn_t - function to be called on every frame in chain stack + * @crs - contents of current frame in chain stack + * @real_frame_addr - real address of current frame, can be used to modify frame + * @corrected_frame_addr - address of current frame where it would be in stack + * @flags - PCF_FLUSH_NEEDED if chain stack flush is needed before modifying, + * PCF_IRQS_CLOSE_NEEDED if irqs should be closed before modifying + * @arg - passed argument from parse_chain_stack() + * + * The distinction between @real_frame_addr and @corrected_frame_addr is + * important. Normally top of user chain stack is spilled to kernel chain + * stack, in which case @real_frame_addr points to spilled frame in kernel + * stack and @corrected_frame_addr holds the address in userspace where + * the frame _would_ be if it was spilled to userspace. In all other cases + * these two variables are equal. + * + * Generally @corrected_frame_addr is used in comparisons and + * @real_frame_addr is used for modifying stack in memory. + * + * IMPORTANT: if function wants to modify frame contents it must flush + * chain stack if @flush_needed is set. + */ +#define PCF_FLUSH_NEEDED 0x1 +#define PCF_IRQS_CLOSE_NEEDED 0x2 +typedef int (*parse_chain_fn_t)(e2k_mem_crs_t *crs, + unsigned long real_frame_addr, + unsigned long corrected_frame_addr, + int flags, void *arg); +#define PCS_USER 0x1 +#define PCS_OPEN_IRQS 0x2 +extern notrace int parse_chain_stack(int flags, struct task_struct *p, + parse_chain_fn_t func, void *arg); + + +extern void *kernel_symtab; +extern long kernel_symtab_size; +extern void *kernel_strtab; +extern long kernel_strtab_size; + +#define boot_kernel_symtab boot_get_vo_value(kernel_symtab) +#define boot_kernel_symtab_size boot_get_vo_value(kernel_symtab_size) +#define boot_kernel_strtab boot_get_vo_value(kernel_strtab) +#define boot_kernel_strtab_size boot_get_vo_value(kernel_strtab_size) + +#define NATIVE_IS_USER_ADDR(task, addr) \ + (((e2k_addr_t)(addr)) < NATIVE_TASK_SIZE) +#define NATIVE_GET_PHYS_ADDR(task, addr) \ +({ \ + e2k_addr_t phys; \ + if (NATIVE_IS_USER_ADDR(task, addr)) \ + phys = (unsigned long)user_address_to_pva(task, addr); \ + else \ + phys = (unsigned long)kernel_address_to_pva(addr); \ + phys; \ +}) + +/* Read instruction word (two syllables) from IP address */ +static inline unsigned long +native_read_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip) +{ + return NATIVE_READ_MAS_D(phys_ip, MAS_LOAD_PA); +} +/* Write modified instruction word at IP address */ +static inline void +native_modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip, + unsigned long instr_word) +{ + NATIVE_WRITE_MAS_D(phys_ip, instr_word, MAS_STORE_PA); +} + +#define SIZE_PSP_STACK (16 * 4096) +#define DATA_STACK_PAGES 16 +#define SIZE_DATA_STACK (DATA_STACK_PAGES * PAGE_SIZE) + +#define SIZE_CHAIN_STACK KERNEL_PC_STACK_SIZE + +/* Maximum number of user windows where a trap occured + * for which additional registers will be printed (ctpr's, lsr and ilcr). */ +#define MAX_USER_TRAPS 12 + +/* Maximum number of pt_regs being marked as such + * when showing kernel data stack */ +#define MAX_PT_REGS_SHOWN 30 + +typedef struct printed_trap_regs { + bool valid; + u64 frame; + e2k_ctpr_t ctpr1; + e2k_ctpr_t ctpr2; + e2k_ctpr_t ctpr3; + e2k_ctpr_hi_t ctpr1_hi; + e2k_ctpr_hi_t ctpr2_hi; + e2k_ctpr_hi_t ctpr3_hi; + u64 lsr; + u64 ilcr; + u64 lsr1; + u64 ilcr1; + u64 sbbp[SBBP_ENTRIES_NUM]; +} printed_trap_regs_t; + +typedef struct stack_regs { + bool used; + bool valid; + bool ignore_banner; + struct task_struct *task; + e2k_mem_crs_t crs; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + void *base_psp_stack; + u64 user_size_psp_stack; + u64 orig_base_psp_stack_u; + u64 orig_base_psp_stack_k; + void *psp_stack_cache; + u64 size_psp_stack; + bool show_trap_regs; + bool show_user_regs; + struct printed_trap_regs trap[MAX_USER_TRAPS]; +#ifdef CONFIG_GREGS_CONTEXT + struct global_regs gregs; + bool gregs_valid; +#endif +#ifdef CONFIG_DATA_STACK_WINDOW + bool show_k_data_stack; + void *base_k_data_stack; + void *k_data_stack_cache; + u64 size_k_data_stack; + void *real_k_data_stack_addr; + struct { + unsigned long addr; + bool valid; + } pt_regs[MAX_PT_REGS_SHOWN]; +#endif + u64 size_chain_stack; + void *base_chain_stack; + u64 user_size_chain_stack; + u64 orig_base_chain_stack_u; + u64 orig_base_chain_stack_k; + void *chain_stack_cache; +} stack_regs_t; + +extern void print_chain_stack(struct stack_regs *regs, + int show_reg_window); + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ +#define GET_PHYS_ADDR(task, addr) NATIVE_GET_PHYS_ADDR(task, addr) +#define print_all_guest_stacks() /* nothing to do */ +#define print_guest_vcpu_stack(vcpu) /* nothing to do */ +#define debug_guest_regs(task) false /* none any guests */ +#define get_cpu_type_name() "CPU" /* real CPU */ + +/* Read instruction word (two syllables) from IP address */ +static inline unsigned long +read_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip) +{ + return native_read_instr_on_IP(ip, phys_ip); +} +/* Write modified instruction word at IP address */ +static inline void +modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip, + unsigned long instr_word) +{ + native_modify_instr_on_IP(ip, phys_ip, instr_word); +} +static inline void +print_guest_stack(struct task_struct *task, + stack_regs_t *const regs, bool show_reg_window) +{ + return; +} +static inline void +host_ftrace_stop(void) +{ + return; +} +static inline void +host_ftrace_dump(void) +{ + return; +} +#else /* CONFIG_VIRTUALIZATION */ +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host/guest kernel */ +/* or it is native guest kernel */ +#include +#endif /* ! CONFIG_VIRTUALIZATION */ + +/* + * Print Chain Regs CR0 and CR1 + */ +#undef DEBUG_CRs_MODE +#undef DebugCRs +#define DEBUG_CRs_MODE 0 +#define DebugCRs(POS) if (DEBUG_CRs_MODE) print_chain_stack_regs(POS) +extern inline void +print_chain_stack_regs(char *point) +{ + register e2k_cr0_hi_t cr0_hi = READ_CR0_HI_REG(); + register e2k_cr0_lo_t cr0_lo = READ_CR0_LO_REG(); + register e2k_cr1_hi_t cr1_hi = READ_CR1_HI_REG(); + register e2k_cr1_lo_t cr1_lo = READ_CR1_LO_REG(); + register e2k_psr_t psr; + + printk("Procedure chain registers state"); + if (point != NULL) + printk(" at %s :", point); + printk("\n"); + + printk(" CR0.hi ip 0x%lx\n", (long)AS_STRUCT(cr0_hi).ip << 3); + printk(" CR0.lo pf 0x%lx\n", (long)AS_STRUCT(cr0_lo).pf); + printk(" CR1.hi ussz 0x%x br 0x%x\n", + (int)AS_STRUCT(cr1_hi).ussz << 4, (int)AS_STRUCT(cr1_hi).br); + AS_WORD(psr) = AS_STRUCT(cr1_lo).psr; + printk(" CR1.lo: unmie %d nmie %d uie %d lw %d sge %d ie %d " + "pm %d\n", + (int)AS_STRUCT(psr).unmie, + (int)AS_STRUCT(psr).nmie, + (int)AS_STRUCT(psr).uie, + (int)AS_STRUCT(psr).lw, + (int)AS_STRUCT(psr).sge, + (int)AS_STRUCT(psr).ie, + (int)AS_STRUCT(psr).pm); + printk(" cuir 0x%x wbs 0x%x wpsz %d wfx %d ein %d\n", + (int)AS_STRUCT(cr1_lo).cuir, (int)AS_STRUCT(cr1_lo).wbs, + (int)AS_STRUCT(cr1_lo).wpsz, (int)AS_STRUCT(cr1_lo).wfx, + (int)AS_STRUCT(cr1_lo).ein); +} + +/* + * Registers CPU + */ + +#define DebugCpuR(str) if (DEBUG_CpuR_MODE) print_cpu_regs(str) +#define DebugSPRs(POS) if (DEBUG_SPRs_MODE) print_stack_pointers_reg(POS) +static inline void +print_cpu_regs(char *str) +{ + pr_info("%s\n %s", str, "CPU REGS value:\n"); + pr_info("usbr %llx\n", READ_SBR_REG_VALUE()); + pr_info("usd.hi.curptr %llx usd.hi.size %llx\n", + READ_USD_HI_REG_VALUE() & 0xffffffff, + (READ_USD_HI_REG_VALUE() >> 32) & 0xffffffff); + pr_info("usd.lo.base 0x%llx\n", + READ_USD_LO_REG_VALUE() & 0xffffffffffff); + pr_info("psp.hi.ind %llx psp.hi.size %llx\n", + READ_PSP_HI_REG_VALUE() & 0xffffffff, + (READ_PSP_HI_REG_VALUE() >> 32) & 0xffffffff); + pr_info("psp.lo %llx\n", READ_PSP_LO_REG_VALUE()); + pr_info("pcsp.hi.ind %llx pcsp.hi.size %llx\n", + READ_PCSP_HI_REG_VALUE() & 0xffffffff, + (READ_PCSP_HI_REG_VALUE() >> 32) & 0xffffffff); + pr_info("pcsp.lo %llx\n", READ_PCSP_LO_REG_VALUE()); + pr_info("cr0.hi.ip %llx\n", + READ_CR0_HI_REG_VALUE() & ~0x7UL); + pr_info("cr1.hi.rbs %llx cr1.hi.rsz %llx\ncr1.hi.rcur %llx " + "cr1.hi.psz %llx cr1.hi.pcur %llx\ncr1.hi.ussz %llx\n", + READ_CR1_HI_REG_VALUE() & 0x3f, + READ_CR1_HI_REG_VALUE() >> 6 & 0x3f, + READ_CR1_HI_REG_VALUE() >> 12 & 0x3f, + READ_CR1_HI_REG_VALUE() >> 18 & 0x1f, + READ_CR1_HI_REG_VALUE() >> 23 & 0x1f, + READ_CR1_HI_REG_VALUE() >> 36 & 0xfffffff); + pr_info("cr1.lo.wpsz %llx cr1.lo.wbs %llx cr1.lo.psr %llx\n", + (READ_CR1_LO_REG_VALUE() >> 26) & 0x7f, + (READ_CR1_LO_REG_VALUE() >> 33) & 0x7f, + (READ_CR1_LO_REG_VALUE() >> 57) & 0x7); + pr_info("wd %llx\n", READ_WD_REG_VALUE()); +} + +extern inline void +print_stack_pointers_reg(char *point) +{ + register e2k_psp_hi_t psp_hi = READ_PSP_HI_REG(); + register e2k_psp_lo_t psp_lo = READ_PSP_LO_REG(); + register e2k_pcsp_hi_t pcsp_hi = READ_PCSP_HI_REG(); + register e2k_pcsp_lo_t pcsp_lo = READ_PCSP_LO_REG(); + register long pshtp_reg = READ_PSHTP_REG_VALUE() & + 0xffffUL; + register long pcshtp_reg = READ_PCSHTP_REG_SVALUE() & + 0xffffUL; + + pr_info("Stack pointer registers state"); + if (point != NULL) + pr_info(" at %s :", point); + pr_info("\n"); + pr_info(" USBR_base 0x%llx\n", + READ_USBR_REG().USBR_base); + pr_info(" USD_size 0x%x USD_p %d USD_base 0x%llx\n", + READ_USD_HI_REG().USD_hi_size, + READ_USD_LO_REG().USD_lo_p, + READ_USD_LO_REG().USD_lo_base); + + pr_info(" PSP_size 0x%x PSP_ind 0x%x PSP_base 0x%lx PSHTP " + "0x%llx (0x%lx)\n", + psp_hi.PSP_hi_size, + psp_hi.PSP_hi_ind, pshtp_reg, + psp_lo.PSP_lo_base, + (long)(psp_hi.PSP_hi_ind + pshtp_reg)); + if (psp_hi.PSP_hi_ind + pshtp_reg >= psp_hi.PSP_hi_size) { + pr_info("PROCEDURE STACK OVERFLOW 0x%lx > size 0x%x\n", + (long)(psp_hi.PSP_hi_ind + pshtp_reg), + psp_hi.PSP_hi_size); + } + pr_info(" PCSP_size 0x%x PCSP_ind 0x%x PCSP_base 0x%lx " + "PCSHTP 0x%llx (0x%lx)\n", + pcsp_hi.PCSP_hi_size, + pcsp_hi.PCSP_hi_ind, pcshtp_reg, + pcsp_lo.PCSP_lo_base, + (long)(pcsp_hi.PCSP_hi_ind + pcshtp_reg)); + + DebugCRs(point); + +} + +static inline int print_siginfo(siginfo_t *info, struct pt_regs *regs) +{ + pr_info("Signal #%d info structure:\n" + " errno %d code %d pid %d uid %d\n" + " trap #%d address 0x%px\n", + info->si_signo, info->si_errno, info->si_code, info->si_pid, + info->si_uid, info->si_trapno, info->si_addr); + + print_pt_regs(regs); + + return 1; +} + + +/* + * Print Switch Regs + */ +extern inline void +print_sw_regs(char *point, sw_regs_t *sw_regs) +{ + pr_info("%s\n", point); + pr_info("top: %lx\n", sw_regs->top); + pr_info("usd_lo: %llx\n", AS_WORD(sw_regs->usd_lo)); + pr_info("usd_hi: %llx\n", AS_WORD(sw_regs->usd_hi)); + pr_info("psp_lo: %llx\n", AS_WORD(sw_regs->psp_lo)); + pr_info("psp_hi: %llx\n", AS_WORD(sw_regs->psp_hi)); + pr_info("pcsp_lo: %llx\n", AS_WORD(sw_regs->pcsp_lo)); + pr_info("pcsp_hi: %llx\n", AS_WORD(sw_regs->pcsp_hi)); +} + +/* + * Print PAGE_FAULT (TC TRAP_CELLAR) + */ + +#define DebugTC(a, b) \ + if(DEBUG_PAGE_FAULT_MODE) print_tc_state(a, b); +#include +static inline void print_tc_state(const trap_cellar_t *tcellar, int num) +{ + tc_fault_type_t ftype; + tc_dst_t dst ; + tc_opcode_t opcode; + u64 data; + u8 data_tag; + + AW(dst) = AS(tcellar->condition).dst; + AW(opcode) = AS(tcellar->condition).opcode; + AW(ftype) = AS(tcellar->condition).fault_type; + + load_value_and_tagd(&tcellar->data, &data, &data_tag); + + printk("\n----------------------------" + "TRAP_CELLAR record #%d:" + "-----------------------------\n" + "address = 0x%016lx\n" + "data = 0x%016llx tag = 0x%x\n" + "condition = 0x%016llx:\n" + " dst = 0x%05x: address = 0x%04x, vl = 0x%x, vr = 0x%x\n" + " opcode = 0x%03x: fmt = 0x%02x, npsp = 0x%x\n\n" + " store = 0x%x, s_f = 0x%x, mas = 0x%x\n" + " root = 0x%x, scal = 0x%x, sru = 0x%x\n" + " chan = 0x%x, se = 0x%x, pm = 0x%x\n\n" + " fault_type = 0x%x:\n" + " intl_res_bits = %d MLT_trap = %d\n" + " ph_pr_page = %d page_bound = %d\n" + " io_page = %d isys_page = %d\n" + " prot_page = %d priv_page = %d\n" + " illegal_page = %d nwrite_page = %d\n" + " page_miss = %d ph_bound = %d\n" + " global_sp = %d\n\n" + " miss_lvl = 0x%x, num_align = 0x%x, empt = 0x%x\n" + " clw = 0x%x, rcv = 0x%x dst_rcv = 0x%x\n" + "----------------------------------------------------" + "---------------------------\n", num, + tcellar->address, + data, data_tag, + AW(tcellar->condition), + (u32)AW(dst),(u32)(AS(dst).address), (u32)(AS(dst).vl), + (u32)(AS(dst).vr), + (u32)AW(opcode), (u32)(AS(opcode).fmt),(u32)(AS(opcode).npsp), + (u32)AS(tcellar->condition).store, + (u32)AS(tcellar->condition).s_f, + (u32)AS(tcellar->condition).mas, + (u32)AS(tcellar->condition).root, + (u32)AS(tcellar->condition).scal, + (u32)AS(tcellar->condition).sru, + (u32)AS(tcellar->condition).chan, + (u32)AS(tcellar->condition).spec, + (u32)AS(tcellar->condition).pm, + (u32)AS(tcellar->condition).fault_type, + (u32)AS(ftype).intl_res_bits, (u32)(AS(ftype).exc_mem_lock), + (u32)AS(ftype).ph_pr_page, (u32)AS(ftype).page_bound, + (u32)AS(ftype).io_page, (u32)AS(ftype).isys_page, + (u32)AS(ftype).prot_page, (u32)AS(ftype).priv_page, + (u32)AS(ftype).illegal_page, (u32)AS(ftype).nwrite_page, + (u32)AS(ftype).page_miss, (u32)AS(ftype).ph_bound, + (u32)AS(ftype).global_sp, + (u32)AS(tcellar->condition).miss_lvl, + (u32)AS(tcellar->condition).num_align, + (u32)AS(tcellar->condition).empt, + (u32)AS(tcellar->condition).clw, + (u32)AS(tcellar->condition).rcv, + (u32)AS(tcellar->condition).dst_rcv); + +} + + +/* + * Set instruction data breakpoint at virtual address @addr. + * + * NOTE: breakpoint is set only for the current thread! + * To set it for the whole system, remove restoring of + * debug registers on a task switch. + */ +static inline int set_hardware_instr_breakpoint(u64 addr, + const int stop, const int cp_num, const int v) +{ + u64 dibcr, dibsr, dibar = (u64) addr; + + switch (cp_num) { + case 0: WRITE_DIBAR0_REG_VALUE(dibar); break; + case 1: WRITE_DIBAR1_REG_VALUE(dibar); break; + case 2: WRITE_DIBAR2_REG_VALUE(dibar); break; + case 3: WRITE_DIBAR3_REG_VALUE(dibar); break; + default: + if (__builtin_constant_p((cp_num))) + BUILD_BUG(); + return -EINVAL; + } + + /* Rewrite only the requested breakpoint. */ + dibcr = ( + !!(v) /* enable*/ + | (1ULL << 1) /* generate exc_instr_debug */ + ) << (cp_num * 2); + dibcr |= (!!stop << 9); + dibcr |= READ_DIBCR_REG_VALUE() & ~E2K_DIBCR_MASK(cp_num); + + dibsr = READ_DIBSR_REG_VALUE() & ~E2K_DIBSR_MASK(cp_num); + + WRITE_DIBCR_REG_VALUE(dibcr); + WRITE_DIBSR_REG_VALUE(dibsr); + + return 0; +} + + +/* + * Set hardware data breakpoint at virtual address @addr. + * + * NOTE: breakpoint is set only for the current thread! + * To set it for the whole system, remove restoring of + * debug registers on a task switch. + */ +static inline int set_hardware_data_breakpoint(u64 addr, u64 size, + const int write, const int read, + const int stop, const int cp_num, const int v) +{ + u64 ddbcr, ddbsr, ddbar = (u64) addr; + + switch (size) { + case 1: + size = 1; + break; + case 2: + size = 2; + break; + case 4: + size = 3; + break; + case 8: + size = 4; + break; + case 16: + size = 5; + break; + default: + if (__builtin_constant_p((size))) + BUILD_BUG(); + return -EINVAL; + } + + switch (cp_num) { + case 0: + WRITE_DDBAR0_REG_VALUE(ddbar); + break; + case 1: + WRITE_DDBAR1_REG_VALUE(ddbar); + break; + case 2: + WRITE_DDBAR2_REG_VALUE(ddbar); + break; + case 3: + WRITE_DDBAR3_REG_VALUE(ddbar); + break; + default: + if (__builtin_constant_p((cp_num))) + BUILD_BUG(); + return -EINVAL; + } + + /* Rewrite only the requested breakpoint. */ + ddbcr = ( + !!(v) /* enable*/ + | (0ULL << 1) /* primary space */ + | ((!!write) << 2) + | ((!!read) << 3) + | (size << 4) + | (1ULL << 7) /* sync */ + | (1ULL << 8) /* speculative */ + | (1ULL << 9) /* ap */ + | (1ULL << 10) /* spill/fill */ + | (1ULL << 11) /* hardware */ + | (1ULL << 12) /* generate exc_data_debug */ + ) << (cp_num * 14); + ddbcr |= READ_DDBCR_REG_VALUE() & ~E2K_DDBCR_MASK(cp_num); + + ddbsr = READ_DDBSR_REG_VALUE() & ~E2K_DDBSR_MASK(cp_num); + + WRITE_DDBCR_REG_VALUE(ddbcr); + WRITE_DDBSR_REG_VALUE(ddbsr); + if (stop) { + e2k_dibcr_t dibcr; + + dibcr = READ_DIBCR_REG(); + AS(dibcr).stop = 1; + WRITE_DIBCR_REG(dibcr); + } + + return 0; +} + +static inline int reset_hardware_data_breakpoint(void *addr) +{ + u64 ddbcr, ddbsr, ddbar; + int cp_num; + + ddbcr = READ_DDBCR_REG_VALUE(); + for (cp_num = 0; cp_num < 4; cp_num++, ddbcr >>= 14) { + if (!(ddbcr & 0x1)) /* valid */ + continue; + switch (cp_num) { + case 0: + ddbar = READ_DDBAR0_REG_VALUE(); + break; + case 1: + ddbar = READ_DDBAR1_REG_VALUE(); + break; + case 2: + ddbar = READ_DDBAR2_REG_VALUE(); + break; + case 3: + ddbar = READ_DDBAR3_REG_VALUE(); + break; + default: + if (__builtin_constant_p((cp_num))) + BUILD_BUG(); + return -EINVAL; + } + if ((ddbar & E2K_VA_MASK) == ((e2k_addr_t)addr & E2K_VA_MASK)) + break; + } + if (cp_num >= 4) + return cp_num; + + /* Reset only the requested breakpoint. */ + ddbcr = READ_DDBCR_REG_VALUE() & (~(0x3FFFULL << (cp_num * 14))); + ddbsr = READ_DDBSR_REG_VALUE() & (~(0x3FFFULL << (cp_num * 14))); + mb(); /* wait for completion of all load/store in progress */ + WRITE_DDBCR_REG_VALUE(ddbcr); + WRITE_DDBSR_REG_VALUE(ddbsr); + + switch (cp_num) { + case 0: + WRITE_DDBAR0_REG_VALUE(0); + break; + case 1: + WRITE_DDBAR1_REG_VALUE(0); + break; + case 2: + WRITE_DDBAR2_REG_VALUE(0); + break; + case 3: + WRITE_DDBAR3_REG_VALUE(0); + break; + default: + if (__builtin_constant_p((cp_num))) + BUILD_BUG(); + return -EINVAL; + } + + return cp_num; +} + +struct data_breakpoint_params { + void *address; + u64 size; + int write; + int read; + int stop; + int cp_num; +}; +extern void nmi_set_hardware_data_breakpoint( + struct data_breakpoint_params *params); +/** + * set_hardware_data_breakpoint_on_each_cpu() - set hardware data breakpoint + * on every online cpu. + * @addr: virtual address of the breakpoint. + * + * This uses non-maskable interrupts to set the breakpoint for the whole + * system atomically. That is, by the time this function returns the + * breakpoint will be set everywhere. + */ +#define set_hardware_data_breakpoint_on_each_cpu( \ + addr, sz, wr, rd, st, cp) \ +({ \ + struct data_breakpoint_params params; \ + MAYBE_BUILD_BUG_ON((sz) != 1 && (sz) != 2 && (sz) != 4 \ + && (sz) != 8 && (sz) != 16); \ + MAYBE_BUILD_BUG_ON((cp) != 0 && (cp) != 1 \ + && (cp) != 2 && (cp) != 3); \ + params.address = (addr); \ + params.size = (sz); \ + params.write = (wr); \ + params.read = (rd); \ + params.stop = (st); \ + params.cp_num = (cp); \ + nmi_on_each_cpu(nmi_set_hardware_data_breakpoint, ¶ms, 1, 0); \ +}) + + +extern int jtag_stop_var; +static inline void jtag_stop(void) +{ + set_hardware_data_breakpoint((u64) &jtag_stop_var, + sizeof(jtag_stop_var), 1, 0, 1, 3, 1); + + jtag_stop_var = 0; + + /* Wait for the hardware to stop us */ + wmb(); +} + + +#ifdef CONFIG_USE_AAU +#include + +/* print some aux. & AAU registers */ +static inline void +print_aau_regs(char *str, e2k_aau_t *context, struct pt_regs *regs, + struct thread_info *ti) +{ + int i; + bool old_iset; + + old_iset = (machine.native_iset_ver < E2K_ISET_V5); + + if (str) + pr_info("%s\n", str); + + pr_info("\naasr register = 0x%x (state: %s, iab: %d, stb: %d)\n" + "ctpr2 = 0x%llx\n" + "lsr = 0x%llx\n" + "ilcr = 0x%llx\n", + AW(context->aasr), + AAU_NULL(context->aasr) ? "NULL" : + AAU_READY(context->aasr) ? "READY" : + AAU_ACTIVE(context->aasr) ? "ACTIVE" : + AAU_STOPPED(context->aasr) ? "STOPPED": + "undefined", + AS(context->aasr).iab, + AS(context->aasr).stb, + AW(regs->ctpr2), regs->lsr, regs->ilcr); + + if (AAU_STOPPED(context->aasr)) { + pr_info("aaldv = 0x%llx\n" + "aaldm = 0x%llx\n", + AW(context->aaldv), AW(context->aaldm)); + } else { + /* AAU can be in active state in kernel - automatic + * stop by hardware upon trap enter does not work. */ + pr_info("AAU is not in STOPPED or ACTIVE states, AALDV and " + "AALDM will not be printed\n"); + } + + if (AS(context->aasr).iab) { + for (i = 0; i < 32; i++) { + pr_info("aad[%d].hi = 0x%llx ", i, + AW(context->aads[i]).hi); + pr_info("aad[%d].lo = 0x%llx\n", i, + AW(context->aads[i]).lo); + } + + for (i = 0; i < 8; i++) { + pr_info("aaincr[%d] = 0x%llx\n", i, (old_iset) ? + (u32) context->aaincrs[i] : + context->aaincrs[i]); + } + pr_info("aaincr_tags = 0x%x\n", context->aaincr_tags); + + for (i = 0; i < 16; i++) { + pr_info("aaind[%d] = 0x%llx\n", i, (old_iset) ? + (u64) (u32) context->aainds[i] : + context->aainds[i]); + } + pr_info("aaind_tags = 0x%x\n", context->aaind_tags); + } else { + pr_info("IAB flag in AASR is not set, following registers " + "will not be printed: AAD, AAIND, AAIND_TAGS, " + "AAINCR, AAINCR_TAGS\n"); + } + + if (AS(context->aasr).stb) { + for (i = 0; i < 16; i++) { + pr_info("aasti[%d] = 0x%llx\n", i, (old_iset) ? + (u64) (u32) context->aastis[i] : + context->aastis[i]); + } + pr_info("aasti_tags = 0x%x\n", context->aasti_tags); + } else { + pr_info("STB flag in AASR is not set, following registers " + "will not be printed: AASTI, AASTI_TAGS\n"); + } + + if (ti) { + for (i = 0; i < 32; i++) { + pr_info("aaldi[%d] = 0x%llx ", i, (old_iset) ? + (u64) (u32) context->aaldi[i] : + context->aaldi[i]); + pr_info("aaldi[%d] = 0x%llx\n", i+32, (old_iset) ? + (u64) (u32) context->aaldi[i+32] : + context->aaldi[i+32]); + } + + for (i = 0; i < 32; i++) { + pr_info("aalda[%d] = 0x%x ", i, AW(ti->aalda[i])); + pr_info("aalda[%d] = 0x%x\n", i + 32, + AW(ti->aalda[i+32])); + } + } + + pr_info("aafstr = 0x%x\n", read_aafstr_reg_value()); + pr_info("aafstr = 0x%x\n", context->aafstr); +} +#endif /* CONFIG_USE_AAU */ + +extern int debug_signal; +#define SIGDEBUG_PRINT(format, ...) \ +do { \ + if (debug_signal) \ + pr_info("%s (pid=%d): " format, \ + current->comm, current->pid, ##__VA_ARGS__); \ +} while (0) + +extern int debug_trap; + +#endif /* !(__ASSEMBLY__) */ + +#endif /* _E2K_DEBUG_H_ */ diff --git a/arch/e2k/include/asm/e2k_ptypes.h b/arch/e2k/include/asm/e2k_ptypes.h new file mode 100644 index 0000000..659063d --- /dev/null +++ b/arch/e2k/include/asm/e2k_ptypes.h @@ -0,0 +1,384 @@ + + +/* + * Descriptions of E2K tagged types + */ + +#ifndef _E2K_PTYPES_H_ +#define _E2K_PTYPES_H_ + + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include + + + + + /* + * Tagged values structures + */ + + + /* Address Pointers */ + +typedef union { /* High word of all pointers */ + struct { + u64 curptr : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ + } fields; + u64 word; +} e2k_ptr_hi_t; + +typedef union { + union { + struct { + u64 base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused : 59 - E2K_VA_SIZE; /* [58:48] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + } ap; + struct { + u64 base : 32; /* [31: 0] */ + u64 psl : 16; /* [47:32] */ + u64 unused : 11; /* [58:48] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + } sap; + struct { + u64 unused1 : 59; /* [58: 0] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + }; + struct { + u64 unused2 : 59; /* [58: 0] */ + u64 r : 1; /* [59:59] */ + u64 w : 1; /* [60:60] */ + u64 unused3 : 3; /* [63:61] */ + }; + } fields; + u64 word; +} e2k_ptr_lo_t; + +typedef union { /* Lower word of array pointer */ + union { + struct { + u64 base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused : 59 - E2K_VA_SIZE; /* [58:48] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + }; + struct { + u64 __unused1 : 59; /* [58: 0] */ + u64 r : 1; /* [59:59] */ + u64 w : 1; /* [60:60] */ + u64 __unused2 : 3; /* [63:61] */ + }; + } fields; + u64 word; +} e2k_ap_lo_t; + +typedef union { /* Lower word of stack array pointer */ + union { + struct { + u64 base : 32; /* [31: 0] */ + u64 psl : 16; /* [47:32] */ + u64 unused : 11; /* [58:48] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + }; + struct { + u64 __unused2 : 59; /* [58: 0] */ + u64 r : 1; /* [59:59] */ + u64 w : 1; /* [60:60] */ + u64 __unused3 : 3; /* [63:61] */ + }; + } fields; + u64 word; +} e2k_sap_lo_t; + +typedef struct { + union { + struct { + u64 base : E2K_VA_SIZE; /* [47: 0] */ + u64 unused1 : 59 - E2K_VA_SIZE; /* [58:48] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + }; + struct { + u64 unused2 : 59; /* [58: 0] */ + u64 r : 1; /* [59:59] */ + u64 w : 1; /* [60:60] */ + u64 unused3 : 3; /* [63:61] */ + }; + }; + struct { + u64 curptr : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ + }; +} e2k_ap_t; + +typedef struct { + union { + struct { + u64 base : 32; /* [31: 0] */ + u64 psl : 16; /* [47:32] */ + u64 unused1 : 11; /* [58:48] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + }; + struct { + u64 unused2 : 59; /* [58: 0] */ + u64 r : 1; /* [59:59] */ + u64 w : 1; /* [60:60] */ + u64 unused3 : 3; /* [63:61] */ + }; + }; + struct { + u64 curptr : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ + }; +} e2k_sap_t; + +typedef union { /* Common array pointer */ + union { + e2k_ap_t ap; + e2k_sap_t sap; + struct { + /* Low word common fields */ + union { + struct { + u64 unused1 : 59; /* [58:0] */ + u64 rw : 2; /* [60:59] */ + u64 itag : 3; /* [63:61] */ + }; + struct { + u64 unused2 : 59; /* [58: 0] */ + u64 r : 1; /* [59:59] */ + u64 w : 1; /* [60:60] */ + u64 unused3 : 3; /* [63:61] */ + }; + }; + /* High word common fields */ + struct { + u64 curptr : 32; /* [31: 0] */ + u64 size : 32; /* [63:32] */ + }; + }; + } fields; + struct { + long lo; + long hi; + } word; +} e2k_ptr_t; + +#define R_ENABLE 0x1 +#define W_ENABLE 0x2 +#define RW_ENABLE 0x3 + +#define AP_ITAG_MASK 0xe000000000000000ULL +#define AP_ITAG_SHIFT 61 +#define AP_ITAG 0x0UL +#define SAP_ITAG 0x4UL + +#define __E2K_PTR_BASE(low, sbr_hi) \ +({ \ + e2k_ptr_lo_t lo; \ + AW(lo) = low; \ + (AS(lo).itag == AP_ITAG ? AS(lo).ap.base : (AS(lo).sap.base + (sbr_hi))); \ +}) +#define __E2K_PTR_PTR(low, hiw, sbr_hi) \ +({ \ + e2k_ptr_hi_t hi; \ + AW(hi) = hiw; \ + (__E2K_PTR_BASE(low, (sbr_hi)) + AS(hi).curptr); \ +}) + +#define E2K_PTR_BASE(p, sbr_hi) (AS(p).itag == AP_ITAG ? \ + AS(p).ap.base : (AS(p).sap.base + (sbr_hi))) + +#define E2K_PTR_PTR(p, sbr_hi) (E2K_PTR_BASE(p, (sbr_hi)) + AS(p).curptr) + +#define GET_SBR_HI() (current_thread_info()->u_stack.top & 0xffff00000000ULL) + + /* handling Address Pointers */ + +#define MAKE_AP_LO(area_base, area_size, off, access) \ +({ \ + e2k_ap_lo_t __lo; \ + AW(__lo) = 0UL; \ + AS(__lo).base = area_base; \ + AS(__lo).rw = access; \ + AS(__lo).itag = E2K_AP_ITAG; \ + AW(__lo); \ +}) + +#define MAKE_AP_HI(area_base, area_size, offs, access) \ +({ \ + union { \ + e2k_ptr_hi_t hi; \ + u64 w; \ + } u; \ + u.w = 0UL; \ + AS(u.hi).size = area_size; \ + AS(u.hi).curptr = offs; \ + u.w; \ +}) + +#define MAKE_SAP_LO(area_base, area_size, offs, access) \ +({ \ + e2k_rwsap_lo_struct_t sap_lo; \ + AS_WORD(sap_lo) = 0; \ + AS_SAP_STRUCT(sap_lo).base = area_base; \ + AS_SAP_STRUCT(sap_lo).rw = access; \ + AS_SAP_STRUCT(sap_lo).itag = E2K_SAP_ITAG; \ + AS_WORD(sap_lo); \ +}) + +#define MAKE_SAP_HI(area_base, area_size, offs, access) \ +({ \ + e2k_rwsap_hi_struct_t sap_hi; \ + AS_WORD(sap_hi) = 0; \ + AS_STRUCT(sap_hi).size = area_size; \ + AS_STRUCT(sap_hi).curptr = offs; \ + AS_WORD(sap_hi); \ +}) + +static inline e2k_ptr_t MAKE_AP(u64 base, u64 len) +{ + e2k_ptr_t ptr = {{0}}; + AW(ptr).lo = 0L | ((base & (E2K_VA_SIZE -1)) | + ((u64)E2K_AP_ITAG << 61) | + ((u64)RW_ENABLE << 59)); + AW(ptr).hi = 0L | ((len & 0xFFFFFFFF) << 32); + return ptr; +} + + +/* + * Procedure Label (PL) + */ + +typedef struct pl_lo_fields { + u64 target : E2K_VA_SIZE; /* [47: 0] */ + u64 unused1 : 58 - E2K_VA_MSB; /* [58:48] */ + u64 pm : 1; /* [59] privileged mode */ + /* (affects only from v2) */ + u64 unused2 : 1; /* [60] */ + u64 itag : 3; /* [63:61] internel tag bits */ +} pl_lo_fields_t; +#define PL_PM_BIT 59 /* bit # of privileged label flag */ +#define PL_PM_MASK (1UL << PL_PM_BIT) +#define PL_ITAG_SHIFT 61 +#define PL_ITAG_NUM_BITS 3 /* size of field ITAG in bits */ +#define PL_ITAG_BITS_MASK ((1UL << PL_ITAG_NUM_BITS) - 1) +#define PL_ITAG_GET(pl_lo_word) (((pl_lo_word) >> PL_ITAG_SHIFT) & \ + ((1UL << PL_ITAG_NUM_BITS) - 1)) +#define PL_ITAG_SET(pl_lo_word, itag) \ + (((pl_lo_word) & ~(PL_ITAG_BITS_MASK << PL_ITAG_SHIFT)) | \ + (((itag) & PL_ITAG_BITS_MASK) << PL_ITAG_SHIFT)) + +typedef struct pl_hi_fields { + u64 cui : 16; /* [15: 0] compilation unit undex */ + u64 unused3 : 48; /* [63:16] */ +} pl_hi_fields_t; + +typedef union e2k_pl_lo { + struct { + u64 target : E2K_VA_SIZE; + u64 unused1 : 58 - E2K_VA_MSB; + u64 pm : 1; + u64 unused2 : 1; + u64 itag : 3; + }; + pl_lo_fields_t fields; + u64 word; +} e2k_pl_lo_t; +#define PL_lo_target fields.target +#define PL_lo_itag fields.itag +#define PL_lo_pm fields.pm +#define PL_lo_value word + +typedef union e2k_pl_hi { + pl_hi_fields_t fields; + u64 word; +} e2k_pl_hi_t; +#define PL_hi_cui fields.cui +#define PL_hi_value word + +typedef struct e2k_pl { + e2k_pl_lo_t lo; + e2k_pl_hi_t hi; +} e2k_pl_t; + +#define PL_target lo.PL_lo_target +#define PL_itag lo.PL_lo_itag +#define PL_pm lo.PL_lo_pm +#define PL_cui hi.PL_hi_cui +#define PLLO_value lo.PL_lo_value +#define PLHI_value hi.PL_hi_value +#define PLLO_item lo +#define PLHI_item hi +#define IS_PL_ITAG(pl_lo_word) (PL_ITAG_GET(pl_lo_word) == E2K_PL_ITAG) + +static inline e2k_pl_t DO_MAKE_PL_V2(u64 addr, bool pm) +{ + e2k_pl_t p; + e2k_pl_lo_t pl; + + pl.PL_lo_value = 0; + pl.PL_lo_target = addr; + pl.PL_lo_pm = pm; + pl.PL_lo_itag = E2K_PL_V2_ITAG; + p.lo = pl; + p.hi.word = 0L; + return p; +} + +static inline e2k_pl_t DO_MAKE_PL_V6(u64 addr, bool pm, unsigned int cui) +{ + e2k_pl_t pl; + + pl = DO_MAKE_PL_V2(addr, pm); + pl.PL_itag = E2K_PL_ITAG; + pl.PLHI_value = 0; + pl.PL_cui = cui; + return pl; +} + +static inline e2k_pl_t MAKE_PL_V2(u64 addr) +{ + return DO_MAKE_PL_V2(addr, false); +} + +static inline e2k_pl_t MAKE_PL_V6(u64 addr, unsigned int cui) +{ + return DO_MAKE_PL_V6(addr, false, cui); +} + +static inline e2k_pl_t MAKE_PL(u64 addr, unsigned int cui) +{ + return MAKE_PL_V6(addr, cui); +} + +static inline e2k_pl_t MAKE_PRIV_PL(u64 addr, unsigned int cui) +{ + return DO_MAKE_PL_V6(addr, true, cui); +} + +static inline e2k_pl_lo_t DO_MAKE_INTEGER_PL(u64 addr) +{ + e2k_pl_lo_t pl_lo; + + pl_lo.PL_lo_value = 0; + pl_lo.PL_lo_target = addr; + return pl_lo; +} +#define MAKE_INTEGER_PL(func_p) \ + ((typeof(func_p))(DO_MAKE_INTEGER_PL((u64)func_p).PL_lo_value)) + +#endif /* __ASSEMBLY__ */ + +#endif /* _E2K_PTYPES_H_ */ diff --git a/arch/e2k/include/asm/e2k_sic.h b/arch/e2k/include/asm/e2k_sic.h new file mode 100644 index 0000000..acfb459 --- /dev/null +++ b/arch/e2k/include/asm/e2k_sic.h @@ -0,0 +1,130 @@ +#ifndef _ASM_E2K_SIC_H_ +#define _ASM_E2K_SIC_H_ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * NBR area configuration + */ +#define E2K_NSR_AREA_PHYS_BASE (machine.get_nsr_area_phys_base()) +#define E2K_NSR_AREA_SIZE (machine.nbsr_area_size) +#define E2K_NBSR_OFFSET (machine.nbsr_area_offset) +#define E2K_NBSR_SIZE (machine.nbsr_area_size) +#define E2K_COPSR_AREA_PHYS_BASE (machine.copsr_area_phys_base) +#define E2K_COPSR_AREA_SIZE (machine.copsr_area_size) + +#define BOOT_NSR_AREA_PHYS_BASE (boot_machine.nsr_area_phys_base) +#define BOOT_NSR_AREA_SIZE (boot_machine.nbsr_area_size) +#define BOOT_NBSR_OFFSET (boot_machine.nbsr_area_offset) +#define BOOT_NBSR_SIZE (boot_machine.nbsr_area_size) +#define BOOT_COPSR_AREA_PHYS_BASE (boot_machine.copsr_area_phys_base) +#define BOOT_COPSR_AREA_SIZE (boot_machine.copsr_area_size) + +/* + * Nodes system registers area - NSR = { NSR0 ... NSRj ... } + * NSR is some part of common system communicator area SR + */ +#define NODE_NSR_SIZE E2K_NSR_AREA_SIZE +#define THE_NODE_NSR_PHYS_BASE(node) \ + (E2K_NSR_AREA_PHYS_BASE + (node * NODE_NSR_SIZE)) + +#define BOOT_NODE_NSR_SIZE BOOT_NSR_AREA_SIZE +#define BOOT_THE_NODE_NSR_PHYS_BASE(node) \ + (BOOT_NSR_AREA_PHYS_BASE + (node * BOOT_NODE_NSR_SIZE)) + +/* + * Nodes processor system registers (north bridge) + * NBSR = { NBSR0 ... NBSRj ... } + * NBSR is some part of node system registers area NSR + */ +#define NODE_NBSR_SIZE E2K_NBSR_SIZE +#define NODE_NBSR_OFFSET E2K_NBSR_OFFSET +#define THE_NODE_NBSR_PHYS_BASE(node) \ + ((unsigned char *)(THE_NODE_NSR_PHYS_BASE(node) + \ + NODE_NBSR_OFFSET)) + +#define BOOT_NODE_NBSR_SIZE BOOT_NBSR_SIZE +#define BOOT_NODE_NBSR_OFFSET BOOT_NBSR_OFFSET +#define BOOT_THE_NODE_NBSR_PHYS_BASE(node) \ + ((unsigned char *)(BOOT_THE_NODE_NSR_PHYS_BASE(node) + \ + BOOT_NODE_NBSR_OFFSET)) + +/* + * Nodes system coprocessors registers area - COPSR = { COPSR0 ... COPSRj ... } + */ +#define NODE_COPSR_SIZE E2K_COPSR_AREA_SIZE +#define THE_NODE_COPSR_PHYS_BASE(node) \ + (E2K_COPSR_AREA_PHYS_BASE + (node * NODE_COPSR_SIZE)) + +extern unsigned char *nodes_nbsr_base[MAX_NUMNODES]; +extern phys_addr_t nodes_nbsr_phys_base[MAX_NUMNODES]; + +extern void boot_e2k_sic_setup_arch(void); +#ifndef CONFIG_E2K_MACHINE +extern int boot_get_e2k_machine_id(void); +#endif + +extern int __init e2k_sic_init(void); +extern int __init e2k_early_iohub_online(int node, int link); + +static inline e2k_addr_t sic_get_io_area_base(void) +{ + return machine.x86_io_area_base; +} + +static inline e2k_addr_t sic_get_io_area_size(void) +{ + return machine.x86_io_area_size; +} +extern e2k_addr_t sic_get_io_area_max_size(void); + +static inline unsigned char *sic_get_node_nbsr_base(int node_id) +{ + return nodes_nbsr_base[node_id]; +} + +static inline phys_addr_t sic_get_node_nbsr_phys_base(int node_id) +{ + phys_addr_t base = nodes_nbsr_phys_base[node_id]; + VM_BUG_ON(!base); + return base; +} + +#define sic_domain_pci_conf_size() (machine.pcicfg_area_size) +#define sic_domain_pci_conf_base(domain) \ + (machine.pcicfg_area_phys_base + \ + sic_domain_pci_conf_size() * ((unsigned long)domain)) + +#define boot_sic_domain_pci_conf_base(domain) \ + (boot_machine.pcicfg_area_phys_base + \ + boot_machine.pcicfg_area_size * ((unsigned long)domain)) + +extern unsigned long domain_to_pci_conf_base[]; + +static inline unsigned long +domain_pci_conf_base(unsigned int domain) +{ + return domain_to_pci_conf_base[domain]; +} + +static inline unsigned long +domain_pci_conf_size(unsigned int domain) +{ + return sic_domain_pci_conf_size(); +} + +#endif /* _ASM_E2K_SIC_H_ */ diff --git a/arch/e2k/include/asm/e2k_syswork.h b/arch/e2k/include/asm/e2k_syswork.h new file mode 100644 index 0000000..f02d7fb --- /dev/null +++ b/arch/e2k/include/asm/e2k_syswork.h @@ -0,0 +1,102 @@ +#ifndef _E2K_SYSWORK_H_ +#define _E2K_SYSWORK_H_ + +#include + + +/****************************/ +#define TIME_SHARE 1 +/****************************/ + +/* info_for_other for work_for_other_cpu() */ + +typedef struct info_for_other { + int work; + int wait; +} info_for_other_t; + +#define PRINT_STK_ON_OTHER 1 +#define PRINT_FUNCY_ON_OTHER 2 +#define WAIT_ON_OTHER 3 + +/****************************/ + +/* info_instr_exec for instr_exec() */ + +typedef struct info_instr_exec { + int instr_type; + long addr1; + long addr2; + long val_1; + long val_2; +} info_instr_exec_t; + +#define PAR_WRITE 1 +#define PAR_READ 2 +/****************************/ + +/****************************/ +/* get task pages info for PRINT_STATM syswork */ +typedef struct user_mm { + int size; + int resident; + int text; + int data; + int shared; +} task_pages_info_t; +/****************************/ + +extern void print_all_task(void); +extern long ide_info(long what); +#ifdef CONFIG_PAGE_DOUBLE_FREE_ENTRAP +extern void save_kernel_chine_stack(struct page *page); +#endif /* CONFIG_PAGE_DOUBLE_FREE_ENTRAP */ + +extern int is_kernel_address_valid(e2k_addr_t address); + + +/* This macro fills missing arguments with "(u64) (0)". */ +#define EXPAND_ARGS_TO_8(...) \ + __EXPAND_ARGS_TO_8(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0) +#define __EXPAND_ARGS_TO_8(fmt, a1, a2, a3, a4, a5, a6, a7, ...) \ + fmt, (u64) (a1), (u64) (a2), (u64) (a3), \ + (u64) (a4), (u64) (a5), (u64) (a6), (u64) (a7) + +/* This macro is used to avoid printks with variable number of arguments + * inside of functions with __check_stack attribute. + * + * If a call to printk has less than 8 parameters the macro sets any missing + * arguments to (u64) (0). + * + * NOTE: maximum number of arguments that can be passed to a function + * from within an __interrupt function is 8! */ +#define printk_fixed_args(...) \ + __printk_fixed_args(EXPAND_ARGS_TO_8(__VA_ARGS__)) +#define __trace_bprintk_fixed_args(...) \ + ____trace_bprintk_fixed_args(EXPAND_ARGS_TO_8(__VA_ARGS__)) +#define panic_fixed_args(...) \ + __panic_fixed_args(EXPAND_ARGS_TO_8(__VA_ARGS__)) +#define delay_printk_fixed_args(...) \ + __delay_printk_fixed_args(EXPAND_ARGS_TO_8(__VA_ARGS__)) + +extern void __printk_fixed_args(char *fmt, + u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6, u64 a7); +extern void __panic_fixed_args(char *fmt, + u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6, u64 a7) + __noreturn; +#ifdef CONFIG_TRACING +extern void ____trace_bprintk_fixed_args(unsigned long ip, + char *fmt, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); +#endif + +long do_longjmp(u64 retval, u64 jmp_sigmask, e2k_cr0_hi_t jmp_cr0_hi, + e2k_cr1_lo_t jmp_cr1_lo, e2k_pcsp_lo_t jmp_pcsp_lo, + e2k_pcsp_hi_t jmp_pcsp_hi, u32 jmp_br, u32 jmp_psize, + u32 fpcr, u32 fpsr, u32 pfpfr, bool restore_fpu); + +long write_current_chain_stack(unsigned long dst, void __user *buf, + unsigned long size); +long copy_current_proc_stack(void __user *buf, void __user *p_stack, + unsigned long size, int write, unsigned long ps_used_top); + +#endif /* _E2K_SYSWORK_H_ */ diff --git a/arch/e2k/include/asm/e2s.h b/arch/e2k/include/asm/e2s.h new file mode 100644 index 0000000..7557222 --- /dev/null +++ b/arch/e2k/include/asm/e2s.h @@ -0,0 +1,62 @@ +#ifndef _ASM_E2S_H_ +#define _ASM_E2S_H_ + +/* + * Machine (based on E4C processor) topology: + * E4C is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 4 CPUs (cores) + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_e2s_setup_arch(void); +extern void e2s_setup_machine(void); +extern void setup_APIC_vector_handler(int vector, + void (*handler)(struct pt_regs *), bool system, char *name); +extern void sic_error_interrupt(struct pt_regs *regs); +#endif + +#define E2S_CPU_VENDOR ES2_CPU_VENDOR +#define E2S_CPU_FAMILY ES2_CPU_FAMILY + +#define E2S_NR_NODE_CPUS 4 +#define E2S_MAX_NR_NODE_CPUS E2S_NR_NODE_CPUS + +#define E2S_NODE_IOLINKS 1 + +#define E2S_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE +#define E2S_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE + +#define E2S_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE + +#define E2S_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET +#define E2S_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE + +#define E2S_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE +#define E2S_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE + +#define E2S_MLT_SIZE ES2_MLT_SIZE + +#define E2S_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E2S_TLB_ADDR_LINE_NUM ES2_TLB_ADDR_LINE_NUM +#define E2S_TLB_ADDR_LINE_NUM2 0x000000001fe00000 +#define E2S_TLB_ADDR_LINE_NUM_SHIFT2 21 +#define E2S_TLB_ADDR_SET_NUM ES2_TLB_ADDR_SET_NUM +#define E2S_TLB_ADDR_SET_NUM_SHIFT ES2_TLB_ADDR_SET_NUM_SHIFT + +#define E2S_SIC_MC_SIZE 0xa4 +#define E2S_SIC_MC_COUNT 3 +#define E2S_SIC_MC1_ECC 0x440 + +#define E2S_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E2S_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E2S_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E2S_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E2S_L2_CACHE_BYTES ES2_L2_CACHE_BYTES + +#endif /* _ASM_E2S_H_ */ diff --git a/arch/e2k/include/asm/e8c.h b/arch/e2k/include/asm/e8c.h new file mode 100644 index 0000000..b106ec7 --- /dev/null +++ b/arch/e2k/include/asm/e8c.h @@ -0,0 +1,64 @@ +#ifndef _ASM_E8C_H_ +#define _ASM_E8C_H_ + +/* + * Machine (based on E8C processor) topology: + * E8C is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 8 CPUs (cores) + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_e8c_setup_arch(void); +extern void e8c_setup_machine(void); +extern void setup_APIC_vector_handler(int vector, + void (*handler)(struct pt_regs *), bool system, char *name); +extern void sic_error_interrupt(struct pt_regs *regs); +#endif + +#define E8C_CPU_VENDOR ES2_CPU_VENDOR +#define E8C_CPU_FAMILY ES2_CPU_FAMILY + +#define E8C_NR_NODE_CPUS 8 +#define E8C_MAX_NR_NODE_CPUS 16 + +#define E8C_NODE_IOLINKS 1 + +#define E8C_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE +#define E8C_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE + +#define E8C_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE + +#define E8C_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET +#define E8C_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE + +#define E8C_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE +#define E8C_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE + +#define E8C_MLT_SIZE ES2_MLT_SIZE + +#define E8C_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E8C_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM +#define E8C_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2 +#define E8C_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2 +#define E8C_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM +#define E8C_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT + +#define E8C_SIC_MC_SIZE 0xe4 +#define E8C_SIC_MC_COUNT 4 +#define E8C_SIC_MC1_ECC E2S_SIC_MC1_ECC + +#define E8C_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E8C_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E8C_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E8C_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E8C_L2_CACHE_BYTES ES2_L2_CACHE_BYTES +#define E8C_L3_CACHE_SHIFT 6 +#define E8C_L3_CACHE_BYTES (1 << E8C_L3_CACHE_SHIFT) + +#endif /* _ASM_E8C_H_ */ diff --git a/arch/e2k/include/asm/e8c2.h b/arch/e2k/include/asm/e8c2.h new file mode 100644 index 0000000..5101212 --- /dev/null +++ b/arch/e2k/include/asm/e8c2.h @@ -0,0 +1,64 @@ +#ifndef _ASM_E8C2_H_ +#define _ASM_E8C2_H_ + +/* + * Machine (based on E8C2 processor) topology: + * E8C2 is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 8 CPUs (cores) + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_e8c2_setup_arch(void); +extern void e8c2_setup_machine(void); +extern void setup_APIC_vector_handler(int vector, + void (*handler)(struct pt_regs *), bool system, char *name); +extern void sic_error_interrupt(struct pt_regs *regs); +#endif + +#define E8C2_CPU_VENDOR ES2_CPU_VENDOR +#define E8C2_CPU_FAMILY 5 + +#define E8C2_NR_NODE_CPUS E8C_NR_NODE_CPUS +#define E8C2_MAX_NR_NODE_CPUS E8C_MAX_NR_NODE_CPUS + +#define E8C2_NODE_IOLINKS E8C_NODE_IOLINKS + +#define E8C2_PCICFG_AREA_PHYS_BASE ES2_PCICFG_AREA_PHYS_BASE +#define E8C2_PCICFG_AREA_SIZE ES2_PCICFG_AREA_SIZE + +#define E8C2_NSR_AREA_PHYS_BASE ES2_NSR_AREA_PHYS_BASE + +#define E8C2_NBSR_AREA_OFFSET ES2_NBSR_AREA_OFFSET +#define E8C2_NBSR_AREA_SIZE ES2_NBSR_AREA_SIZE + +#define E8C2_COPSR_AREA_PHYS_BASE ES2_COPSR_AREA_PHYS_BASE +#define E8C2_COPSR_AREA_SIZE ES2_COPSR_AREA_SIZE + +#define E8C2_MLT_SIZE ES2_MLT_SIZE + +#define E8C2_TLB_LINES_BITS_NUM ES2_TLB_LINES_BITS_NUM +#define E8C2_TLB_ADDR_LINE_NUM E2S_TLB_ADDR_LINE_NUM +#define E8C2_TLB_ADDR_LINE_NUM2 E2S_TLB_ADDR_LINE_NUM2 +#define E8C2_TLB_ADDR_LINE_NUM_SHIFT2 E2S_TLB_ADDR_LINE_NUM_SHIFT2 +#define E8C2_TLB_ADDR_SET_NUM E2S_TLB_ADDR_SET_NUM +#define E8C2_TLB_ADDR_SET_NUM_SHIFT E2S_TLB_ADDR_SET_NUM_SHIFT + +#define E8C2_SIC_MC_SIZE 0xf4 +#define E8C2_SIC_MC_COUNT E8C_SIC_MC_COUNT +#define E8C2_SIC_MC1_ECC E2S_SIC_MC1_ECC + +#define E8C2_CLOCK_TICK_RATE ES2_CLOCK_TICK_RATE + +#define E8C2_L1_CACHE_SHIFT ES2_L1_CACHE_SHIFT +#define E8C2_L1_CACHE_BYTES ES2_L1_CACHE_BYTES +#define E8C2_L2_CACHE_SHIFT ES2_L2_CACHE_SHIFT +#define E8C2_L2_CACHE_BYTES ES2_L2_CACHE_BYTES +#define E8C2_L3_CACHE_SHIFT E8C_L3_CACHE_SHIFT +#define E8C2_L3_CACHE_BYTES E8C_L3_CACHE_BYTES + +#endif /* _ASM_E8C2_H_ */ diff --git a/arch/e2k/include/asm/el_posix.h b/arch/e2k/include/asm/el_posix.h new file mode 100644 index 0000000..c89e06a --- /dev/null +++ b/arch/e2k/include/asm/el_posix.h @@ -0,0 +1,59 @@ +#ifndef _ASM_EL_POSIX_ATOMIC_H +#define _ASM_EL_POSIX_ATOMIC_H + +#ifdef CONFIG_HAVE_EL_POSIX_SYSCALL +#ifdef __KERNEL__ +#include +#include + +#define ARCH_HAS_GET_CYCLES + +#define ARCH_HAS_ATOMIC_CMPXCHG + +static int __el_atomic_cmpxchg(int *x, int *uaddr, int oldval, int newval) +{ + int rval; + + TRY_USR_PFAULT { + *x = cmpxchg(uaddr, oldval, newval); + rval = 0; + } CATCH_USR_PFAULT { + DebugUAF("%s (%d) - %s : " + "el_atomic_cmpxchg data fault %px(%ld)\n", + __FILE__, __LINE__, __FUNCTION__, + (uaddr), (sizeof(*uaddr))); + rval = -EFAULT; + } END_USR_PFAULT + + return rval; +} + +#define el_atomic_cmpxchg_acq(x, uaddr, oldval, newval) \ + __el_atomic_cmpxchg(&x, uaddr, oldval, newval) +#define el_atomic_cmpxchg_rel(x, uaddr, oldval, newval) \ + __el_atomic_cmpxchg(&x, uaddr, oldval, newval) + +#define el_atomic_xchg_acq(x, uaddr, value) \ + __el_atomic_xchg_acq(&x, uaddr, value) + +static int __el_atomic_xchg_acq(int *x, int *uaddr, const int value) +{ + int rval; + + TRY_USR_PFAULT { + *x = xchg(uaddr, value); + rval = 0; + } CATCH_USR_PFAULT { + DebugUAF("%s (%d) - %s : " + "el_atomic_xchg data fault %px(%ld)\n", + __FILE__, __LINE__, __FUNCTION__, + (uaddr), (sizeof(*uaddr))); + rval = -EFAULT; + } END_USR_PFAULT + + return rval; +} + +#endif +#endif +#endif diff --git a/arch/e2k/include/asm/elf.h b/arch/e2k/include/asm/elf.h new file mode 100644 index 0000000..0b1d6a2 --- /dev/null +++ b/arch/e2k/include/asm/elf.h @@ -0,0 +1,172 @@ +#ifndef _E2K_ELF_H_ +#define _E2K_ELF_H_ + +/* + * ELF register definitions.. + */ + +#include +#include +#include +#include + +#define PT_E2K_TAGS 0x70000000 + +/* + * e2k relocation types + */ +#define R_E2K_NONE 0 +#define R_E2K_32_PC 2 +#define R_E2K_64_ABS 50 /* Direct 64 bit */ +#define R_E2K_64_ABS_LIT 51 /* Direct 64 bit for LTS syllable */ +#define R_E2K_64_CALL 52 /* PC relative 64 bit for DISP */ +#define R_E2K_DISP 110 /* PC relative 28-bit for DISP */ + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_ARCH_FAKE EM_E2K_FAKE +#define ELF_ARCH EM_E2K +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2LSB + +// #define CORE_DUMP_USE_REGSET !!!! + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ + +#define elf_check_arch(x) \ + ( (((x)->e_machine == ELF_ARCH && \ + ((x)->e_flags & ELF_E2K_PM) == 0) || \ + ((x)->e_machine == ELF_ARCH_FAKE && \ + (x)->e_ident[EI_SEMANTIC] == ELF_CODE_64_UNPROTECTED)) && \ + (x)->e_ident[EI_CLASS] == ELFCLASS64 && \ + elf_check_e2k_mtype(x) \ + ) + +#define compat_elf_check_arch(x) \ + ( (((x)->e_machine == ELF_ARCH && \ + ((x)->e_flags & ELF_E2K_PM) == 0) || \ + ((x)->e_machine == ELF_ARCH_FAKE && \ + (x)->e_ident[EI_SEMANTIC] == ELF_CODE_32_UNPROTECTED)) && \ + (x)->e_ident[EI_CLASS] == ELFCLASS32 && \ + elf_check_e2k_mtype(x) \ + ) + +/* General registers */ + +typedef unsigned long long elf_greg_t; + +typedef struct user_regs_struct elf_gregset_t; + +/* Floating point registers */ + +/* + * NEEDSWORK: Take care about floating point registers too! + */ + +/* just to get the things compiled */ +#define ELF_NFPREG 32 + +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +/* Addition types of symbol type. */ + +#define STT_PRIVATE 5 +#define STT_INIT_FUNC 6 +#define STT_FINI_FUNC 7 + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 +//#define CORE_DUMP_USE_REGSET + +#ifdef __KERNEL__ +/* #define ELF_CORE_COPY_REGS(gregs, regs) \ + memcpy(gregs, regs, sizeof(struct pt_regs)); */ + +/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is + now struct_user_regs, they are different) */ + +#define ELF_CORE_COPY_REGS(pr_reg, regs) \ + core_pt_regs_to_user_regs(regs, (struct user_regs_struct*) (&pr_reg)); +extern void core_pt_regs_to_user_regs (struct pt_regs *pt_regs, + struct user_regs_struct *user_regs); +#endif /* __KERNEL__ */ + +/* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. This could be done in userspace, + but it's not easy, and we've already done it here. */ + +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. + + For the moment, we have only optimizations for the Intel generations, + but that could change... */ + +#define ELF_PLATFORM (NULL) + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) /* NEEDSWORK */ +#define COMPAT_ELF_ET_DYN_BASE (2 * TASK32_SIZE / 3) + +#ifdef __KERNEL__ +#define SET_PERSONALITY(ex) \ +do { \ + current->thread.flags &= ~E2K_FLAG_64BIT_BINCO; \ + if (((ex).e_machine == ELF_ARCH && \ + ((ex).e_flags & ELF_E2K_PM)) || \ + ((ex).e_machine == ELF_ARCH_FAKE && \ + ((ex).e_ident[EI_SEMANTIC] == ELF_CODE_NEW_PROTECTED || \ + (ex).e_ident[EI_SEMANTIC] == ELF_CODE_NEW_PROTECTED_CXX))) { \ + current->thread.flags |= E2K_FLAG_PROTECTED_MODE; \ + if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ + current->thread.flags |= E2K_FLAG_3P_ELF32; \ + } else { \ + current->thread.flags &= ~ E2K_FLAG_3P_ELF32; \ + } \ + } else { \ + current->thread.flags &= ~(E2K_FLAG_PROTECTED_MODE | \ + E2K_FLAG_3P_ELF32); \ + } \ + if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ + current->thread.flags |= E2K_FLAG_32BIT; \ + else \ + current->thread.flags &= ~E2K_FLAG_32BIT; \ + if ((ex).e_flags & ELF_BIN_COMP) \ + current->thread.flags |= E2K_FLAG_BIN_COMP_CODE; \ + else \ + current->thread.flags &= ~E2K_FLAG_BIN_COMP_CODE; \ +} while (0) +#endif + +#define FAST_SYSCALLS_ENTRY 0x1f +/* + * SYSTEM_INFO_ENTRY: + * 0x1: vfork() supported + */ +#define SYSTEM_INFO_ENTRY 0x1 +#define E2K_DLINFO \ +do { \ + NEW_AUX_ENT(AT_FAST_SYSCALLS, FAST_SYSCALLS_ENTRY); \ + NEW_AUX_ENT(AT_SYSTEM_INFO, SYSTEM_INFO_ENTRY); \ +} while (0) + +#define ARCH_DLINFO E2K_DLINFO +#define COMPAT_ARCH_DLINFO E2K_DLINFO + +/* + * Support for tags dumping + */ +extern unsigned long vma_dump_size(struct vm_area_struct *vma, + unsigned long mm_flags); + +#endif /* _E2K_ELF_H_ */ diff --git a/arch/e2k/include/asm/epic.h b/arch/e2k/include/asm/epic.h new file mode 100644 index 0000000..88650d8 --- /dev/null +++ b/arch/e2k/include/asm/epic.h @@ -0,0 +1,95 @@ +#ifndef __ASM_E2K_EPIC_H +#define __ASM_E2K_EPIC_H + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +static inline bool cpu_has_epic(void) +{ + if (cpu_has(CPU_FEAT_EPIC)) + return true; + else + return false; +} + +static inline unsigned get_current_epic_core_priority(void) +{ +#ifdef CONFIG_EPIC + return current_thread_info()->pt_regs->epic_core_priority; +#else + return 0; +#endif +} + +static inline void set_current_epic_core_priority(unsigned p) +{ +#ifdef CONFIG_EPIC + current_thread_info()->pt_regs->epic_core_priority = p; +#endif +} + +/* + * Basic functions accessing EPICs. + */ +static inline void epic_write_w(unsigned int reg, u32 v) +{ + boot_writel(v, (void __iomem *) (EPIC_DEFAULT_PHYS_BASE + reg)); +} + +static inline u32 epic_read_w(unsigned int reg) +{ + return boot_readl((void __iomem *) (EPIC_DEFAULT_PHYS_BASE + reg)); +} + +static inline void epic_write_d(unsigned int reg, u64 v) +{ + boot_writeq(v, (void __iomem *) (EPIC_DEFAULT_PHYS_BASE + reg)); +} + +static inline u64 epic_read_d(unsigned int reg) +{ + return boot_readq((void __iomem *) (EPIC_DEFAULT_PHYS_BASE + reg)); +} + +static inline void boot_epic_write_w(unsigned int reg, u32 v) +{ + epic_write_w(reg, v); +} + +static inline u32 boot_epic_read_w(unsigned int reg) +{ + return epic_read_w(reg); +} + +static inline void epic_write_guest_w(unsigned int reg, unsigned int v) +{ + epic_write_w(CEPIC_GUEST + reg, v); +} + +static inline unsigned int epic_read_guest_w(unsigned int reg) +{ + return epic_read_w(CEPIC_GUEST + reg); +} + +static inline void epic_write_guest_d(unsigned int reg, unsigned long v) +{ + epic_write_d(CEPIC_GUEST + reg, v); +} + +static inline unsigned long epic_read_guest_d(unsigned int reg) +{ + return epic_read_d(CEPIC_GUEST + reg); +} + +#include + +#endif /* !(__ASSEMBLY__) */ +#endif /* __KERNEL__ */ +#endif /* __ASM_E2K_EPIC_H */ diff --git a/arch/e2k/include/asm/epic_regs.h b/arch/e2k/include/asm/epic_regs.h new file mode 100644 index 0000000..d823ab9 --- /dev/null +++ b/arch/e2k/include/asm/epic_regs.h @@ -0,0 +1,4 @@ +#ifndef __ASM_E2K_EPIC_REGS_H +#define __ASM_E2K_EPIC_REGS_H +#include +#endif /* __ASM_E2K_EPIC_REGS_H */ diff --git a/arch/e2k/include/asm/epicdef.h b/arch/e2k/include/asm/epicdef.h new file mode 100644 index 0000000..94673c4 --- /dev/null +++ b/arch/e2k/include/asm/epicdef.h @@ -0,0 +1,13 @@ +#ifndef _ASM_E2K_EPICDEF_H +#define _ASM_E2K_EPICDEF_H + +/* + * Constants for e2k EPICs (CEPIC, IOEPIC) + */ + +#define IO_EPIC_DEFAULT_PHYS_BASE 0xfec00000 +#define EPIC_DEFAULT_PHYS_BASE 0xfee00000 + + +#include +#endif /* _ASM_E2K_EPICDEF_H */ diff --git a/arch/e2k/include/asm/errors_hndl.h b/arch/e2k/include/asm/errors_hndl.h new file mode 100644 index 0000000..d47653f --- /dev/null +++ b/arch/e2k/include/asm/errors_hndl.h @@ -0,0 +1,68 @@ +/* $Id: errors_hndl.h,v 1.6 2009/01/22 17:10:07 atic Exp $ + * + * Handling of errors of boot-time & initialization. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_ERRORS_HNDL_H +#define _E2K_ERRORS_HNDL_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +extern void init_bug(const char *fmt_v, ...) __noreturn __cold; +extern void init_warning(const char *fmt_v, ...) __cold; + +extern void boot_bug(const char *fmt_v, ...) __noreturn __cold; +extern void boot_warning(const char *fmt_v, ...) __cold; +#define BOOT_BUG_POINT(func_name) \ + do_boot_printk("kernel boot-time BUG at %s:%d:%s\n", __FILE__, \ + __LINE__, func_name) +#define BOOT_BUG_ON(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) { \ + do_boot_printk("kernel boot-time BUG at %s:%d:%s\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + boot_bug(format); \ + } \ + unlikely(__ret_warn_on); \ +}) +#define BOOT_BUG(format...) \ +do { \ + do_boot_printk("kernel boot-time BUG at %s:%d:%s\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + boot_bug(format); \ +} while (0) + +#define BOOT_WARNING(format...) \ +do { \ + do_boot_printk("kernel boot-time WARNING at %s:%d:%s\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + boot_warning(format); \ +} while (0) + +#define boot_native_panic(fmt, args...) \ + do_boot_printk(fmt, ##args) + +#define init_printk dump_printk +#define init_vprintk dump_vprintk +#define INIT_BUG(format...) \ +do { \ + init_printk("kernel init-time BUG at %s:%d:%s\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + init_bug(format); \ +} while (0) +#define INIT_WARNING(format...) \ +do { \ + init_printk("kernel init-time WARNING at %s:%d:%s\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + init_warning(format); \ +} while (0) + +#endif /* !(__ASSEMBLY__) */ + +#endif /* !(_E2K_ERRORS_HNDL_H) */ diff --git a/arch/e2k/include/asm/es2.h b/arch/e2k/include/asm/es2.h new file mode 100644 index 0000000..5da081d --- /dev/null +++ b/arch/e2k/include/asm/es2.h @@ -0,0 +1,63 @@ +#ifndef _ASM_ES2_H_ +#define _ASM_ES2_H_ + +/* + * Machine (based on E2C+ processor) topology: + * E2C+ is NUMA system on distributed memory and can have several nodes. + * Each node can have some memory (faster to access) and max 4 CPUs (cores), + * but real processor chip has only two cores (2 other should be considered + * as always disabled). So online CPU numbers will be 0, 1, 4, 5, 8, 9 ... + * Node number is the same as chip-processor number + * Some nodes (CPUs) can be without memory + * LAPIC cluster number is the same as node number + */ + +#ifndef __ASSEMBLY__ +struct pt_regs; + +extern void boot_es2_setup_arch(void); +extern void es2_setup_machine(void); +extern void setup_APIC_vector_handler(int vector, + void (*handler)(struct pt_regs *), bool system, char *name); +extern void eldsp_interrupt(struct pt_regs *regs); +#endif + +#define ES2_CPU_VENDOR "Elbrus-MCST" +#define ES2_CPU_FAMILY 4 + +#define ES2_NR_NODE_CPUS 2 +#define ES2_MAX_NR_NODE_CPUS 4 + +#define ES2_NODE_IOLINKS 2 + +#define ES2_PCICFG_AREA_PHYS_BASE 0x0000000200000000UL +#define ES2_PCICFG_AREA_SIZE 0x0000000010000000UL + +#define ES2_NSR_AREA_PHYS_BASE 0x0000000110000000UL + +#define ES2_NBSR_AREA_OFFSET 0x0000000000000000UL +#define ES2_NBSR_AREA_SIZE 0x0000000000100000UL + +#define ES2_COPSR_AREA_PHYS_BASE 0x00000001c0000000UL +#define ES2_COPSR_AREA_SIZE 0x0000000001000000UL + +#define ES2_MLT_SIZE 16 + +#define ES2_TLB_LINES_BITS_NUM 8 +#define ES2_TLB_ADDR_LINE_NUM 0x00000000000ff000 +#define ES2_TLB_ADDR_LINE_NUM2 0x000000003fc00000 +#define ES2_TLB_ADDR_LINE_NUM_SHIFT2 22 +#define ES2_TLB_ADDR_SET_NUM 0x0000000000000018 +#define ES2_TLB_ADDR_SET_NUM_SHIFT 3 + +#define ES2_SIC_MC_COUNT 2 +#define ES2_SIC_MC1_ECC 0x500 + +#define ES2_CLOCK_TICK_RATE 10000000 + +#define ES2_L1_CACHE_SHIFT 5 +#define ES2_L1_CACHE_BYTES (1 << ES2_L1_CACHE_SHIFT) +#define ES2_L2_CACHE_SHIFT 6 +#define ES2_L2_CACHE_BYTES (1 << ES2_L2_CACHE_SHIFT) + +#endif /* _ASM_ES2_H_ */ diff --git a/arch/e2k/include/asm/exec.h b/arch/e2k/include/asm/exec.h new file mode 100644 index 0000000..6bb2028 --- /dev/null +++ b/arch/e2k/include/asm/exec.h @@ -0,0 +1,3 @@ +#include + +#define arch_align_stack(x) (x) diff --git a/arch/e2k/include/asm/fast_syscalls.h b/arch/e2k/include/asm/fast_syscalls.h new file mode 100644 index 0000000..9dcab84 --- /dev/null +++ b/arch/e2k/include/asm/fast_syscalls.h @@ -0,0 +1,310 @@ +#ifndef _ASM_E2K_FAST_SYSCALLS_H +#define _ASM_E2K_FAST_SYSCALLS_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct fast_syscalls_data { + struct timekeeper *tk; + u32 mult; + u32 shift; + struct clocksource *clock; + struct timespec wall_time_coarse; +}; + +extern struct fast_syscalls_data fsys_data; + +extern seqcount_t timekeeper_seq; + +typedef void (*fast_system_call_func)(u64 arg1, u64 arg2); + +extern const fast_system_call_func fast_sys_calls_table[NR_fast_syscalls]; +extern const fast_system_call_func fast_sys_calls_table_32[NR_fast_syscalls]; + +int fast_sys_ni_syscall(void); + +#define FAST_SYSTEM_CALL_TBL_ENTRY(sysname) \ + (fast_system_call_func) sysname +#define COMPAT_FAST_SYSTEM_CALL_TBL_ENTRY(sysname) \ + (fast_system_call_func) compat_##sysname +#define PROTECTED_FAST_SYSTEM_CALL_TBL_ENTRY(sysname) \ + (fast_system_call_func) protected_##sysname + +int native_do_fast_clock_gettime(const clockid_t which_clock, + struct timespec *tp); +int native_fast_sys_clock_gettime(const clockid_t which_clock, + struct timespec __user *tp); +int native_do_fast_gettimeofday(struct timeval *tv); +int native_fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized basec on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* it is native host kernel withounr virtualization */ +/* or host kernel with virtualization support */ +static inline int +do_fast_clock_gettime(const clockid_t which_clock, struct timespec *tp) +{ + return native_do_fast_clock_gettime(which_clock, tp); +} + +static inline int +fast_sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) +{ + return native_fast_sys_clock_gettime(which_clock, tp); +} + +static inline int +do_fast_gettimeofday(struct timeval *tv) +{ + return native_do_fast_gettimeofday(tv); +} +static inline int +fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize) +{ + return native_fast_sys_siggetmask(oset, sigsetsize); +} +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +/* + * These have to be macros since there is no way to return two + * values (seconds and nanoseconds) to an __interrupt function + * without assembler magic. + */ + +enum { + FAST_SYS_OK, + FAST_SYS_ERROR +}; + +#define fast_get_time(secs, nsecs, monotonic) \ +({ \ + struct clocksource *__clock; \ + struct timekeeper *__tk; \ + u64 __cycles = 0, __cycle_last = 0, __mask = 0; \ + u32 __mult, __shift; \ + unsigned __seq; \ + int __ret = FAST_SYS_ERROR; \ + long wall2mon_sec, wall2mon_nsec; \ + \ + do { \ + __seq = raw_read_seqcount_begin(&timekeeper_seq); \ + \ + __tk = fsys_data.tk; \ + __clock = fsys_data.clock; \ + __mult = fsys_data.mult; \ + __shift = fsys_data.shift; \ + \ + secs = __tk->xtime_sec; \ + nsecs = __tk->tkr_mono.xtime_nsec; \ + \ + if (monotonic) { \ + wall2mon_sec = __tk->wall_to_monotonic.tv_sec; \ + wall2mon_nsec = __tk->wall_to_monotonic.tv_nsec;\ + } \ + \ + if (likely(__clock == &clocksource_sclkr)) { \ + __cycle_last = __tk->tkr_mono.cycle_last; \ + __mask = __clock->mask; \ + __cycles = fast_syscall_read_sclkr(); \ + if (__cycles) \ + __ret = FAST_SYS_OK; \ + } else if (likely(__clock == &clocksource_clkr)) { \ + __cycle_last = __tk->tkr_mono.cycle_last; \ + __mask = __clock->mask; \ + __cycles = fast_syscall_read_clkr(); \ + __ret = FAST_SYS_OK; \ + } \ + } while (unlikely(read_seqcount_retry(&timekeeper_seq, __seq))); \ + \ + if (__ret == FAST_SYS_OK) { \ + nsecs = (((__cycles - __cycle_last) & __mask) \ + * __mult + nsecs) >> __shift; \ + \ + if (monotonic) { \ + secs += wall2mon_sec; \ + nsecs += wall2mon_nsec; \ + } \ + \ + while (nsecs >= NSEC_PER_SEC) { \ + ++secs; \ + nsecs -= NSEC_PER_SEC; \ + } \ + } \ + \ + __ret; \ +}) + +#define fast_get_time_coarse(secs, nsecs, monotonic) \ +({ \ + struct timekeeper *__tk; \ + unsigned __seq; \ + \ + do { \ + __seq = raw_read_seqcount_begin(&timekeeper_seq); \ + \ + secs = fsys_data.wall_time_coarse.tv_sec; \ + nsecs = fsys_data.wall_time_coarse.tv_nsec; \ + \ + if (monotonic) { \ + __tk = fsys_data.tk; \ + secs += __tk->wall_to_monotonic.tv_sec; \ + nsecs += __tk->wall_to_monotonic.tv_nsec; \ + } \ + } while (unlikely(read_seqcount_retry(&timekeeper_seq, __seq))); \ + \ + while (nsecs >= NSEC_PER_SEC) { \ + ++secs; \ + nsecs -= NSEC_PER_SEC; \ + } \ + \ + FAST_SYS_OK; \ +}) + +static inline int +DO_FAST_CLOCK_GETTIME(const clockid_t which_clock, struct timespec *tp) +{ + u64 secs = 0, nsecs = 0; + int ret; + + switch (which_clock) { + case CLOCK_REALTIME: + case CLOCK_MONOTONIC: + ret = fast_get_time(secs, nsecs, + which_clock == CLOCK_MONOTONIC); + break; + case CLOCK_REALTIME_COARSE: + case CLOCK_MONOTONIC_COARSE: + ret = fast_get_time_coarse(secs, nsecs, + which_clock == CLOCK_MONOTONIC_COARSE); + break; + default: + ret = FAST_SYS_ERROR; + break; + } + + if (likely(!ret)) { + tp->tv_sec = secs; + tp->tv_nsec = nsecs; + } + + return ret; +} + +/* trap table entry is called as function (it is closer to hardware start) */ +typedef long (*ttable_entry_args3)(int sys_num, u64 arg1, u64 arg2); +#define ttable_entry3_args3(sys_num, arg1, arg2) \ + ((ttable_entry_args3)(get_ttable_entry3))(sys_num, arg1, arg2) + +/* trap table entry started by direct branch (it is closer to fast system */ +/* call wirthout switch and use user local data stack */ +#define goto_ttable_entry_args3(entry_label, sys_num, arg1, arg2) \ + E2K_GOTO_ARG3(entry_label, sys_num, arg1, arg2) +#define goto_ttable_entry3_args3(sys_num, arg1, arg2) \ + goto_ttable_entry_args3(ttable_entry3, sys_num, arg1, arg2) + +#define ttable_entry_clock_gettime(which, time) \ +/* ibranch */ goto_ttable_entry3_args3(__NR_clock_gettime, which, time) +/* call ttable_entry3_args3(__NR_clock_gettime, which, time) */ +#define ttable_entry_gettimeofday(tv, tz) \ +/* ibranch */ goto_ttable_entry3_args3(__NR_gettimeofday, tv, tz) +/* ttable_entry3_args3(__NR_gettimeofday, tv, tz) */ + +static inline int +FAST_SYS_CLOCK_GETTIME(const clockid_t which_clock, struct timespec __user *tp) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + int r; + + prefetchw(&fsys_data); + + tp = (typeof(tp)) ((u64) tp & E2K_VA_MASK); + if (unlikely((u64) tp + sizeof(struct timespec) > ti->addr_limit.seg)) + return -EFAULT; + + r = do_fast_clock_gettime(which_clock, tp); + if (unlikely(r)) +/* ibranch */ ttable_entry_clock_gettime((u64) which_clock, (u64) tp); +/* call r = ttable_entry_clock_gettime((u64) which_clock, (u64) tp); */ + return r; +} + +static inline int +DO_FAST_GETTIMEOFDAY(struct timeval *tv) +{ + u64 secs = 0, nsecs = 0; + int ret; + + ret = fast_get_time(secs, nsecs, false); + if (likely(!ret)) { + tv->tv_sec = secs; + tv->tv_usec = nsecs / 1000; + } + + return ret; +} + +static inline int +FAST_SYS_SIGGETMASK(u64 __user *oset, size_t sigsetsize) +{ + struct thread_info *const ti = READ_CURRENT_REG(); + struct task_struct *task = thread_info_task(ti); + u64 set; + + set = task->blocked.sig[0]; + + if (unlikely(sigsetsize != 8)) + return -EINVAL; + + oset = (typeof(oset)) ((u64) oset & E2K_VA_MASK); + if (unlikely((u64) oset + sizeof(sigset_t) > ti->addr_limit.seg)) + return -EFAULT; + + *oset = set; + + return 0; +} + +int fast_sys_gettimeofday(struct timeval __user *tv, + struct timezone __user *tz); +int fast_sys_clock_gettime(const clockid_t which_clock, + struct timespec __user *tp); +struct getcpu_cache; +int fast_sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, + struct getcpu_cache __user *unused); +int fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize); +struct ucontext; +int fast_sys_getcontext(struct ucontext __user *ucp, size_t sigsetsize); +int fast_sys_set_return(u64 ip, int flags); + +struct compat_timespec; +int compat_fast_sys_clock_gettime(const clockid_t which_clock, + struct compat_timespec __user *tp); +struct compat_timeval; +int compat_fast_sys_gettimeofday(struct compat_timeval __user *tv, + struct timezone __user *tz); +int compat_fast_sys_siggetmask(u32 __user *oset, size_t sigsetsize); +struct ucontext_32; +int compat_fast_sys_getcontext(struct ucontext_32 __user *ucp, + size_t sigsetsize); +int compat_fast_sys_set_return(u32 ip, int flags); + +int protected_fast_sys_clock_gettime(u32 tags, clockid_t which_clock, + u64 arg3, u64 arg4, u64 arg5); +int protected_fast_sys_gettimeofday(u32 tags, + u64 arg2, u64 arg3, u64 arg4, u64 arg5); +int protected_fast_sys_getcpu(u32 tags, u64 arg2, u64 arg3, u64 arg4, u64 arg5); +int protected_fast_sys_siggetmask(u32 tags, u64 arg2, u64 arg3, size_t sigsetsize); +int protected_fast_sys_getcontext(u32 tags, u64 arg2, u64 arg3, size_t sigsetsize); +#endif /* _ASM_E2K_FAST_SYSCALLS_H */ + diff --git a/arch/e2k/include/asm/fb.h b/arch/e2k/include/asm/fb.h new file mode 100644 index 0000000..129118f --- /dev/null +++ b/arch/e2k/include/asm/fb.h @@ -0,0 +1,19 @@ +#ifndef _ASM_E2K_FB_H +#define _ASM_E2K_FB_H + +#include +#include +#include +#include + +static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, + unsigned long off) +{ + vma->vm_page_prot = (cpu_has(CPU_FEAT_WC_PCI_PREFETCH) && + vma->vm_flags & VM_WRITECOMBINED) ? + pgprot_writecombine(vma->vm_page_prot) : + pgprot_noncached(vma->vm_page_prot); +} + +extern int fb_is_primary_device(struct fb_info *info); +#endif /* _ASM_E2K_FB_H */ diff --git a/arch/e2k/include/asm/fcntl.h b/arch/e2k/include/asm/fcntl.h new file mode 100644 index 0000000..46ab12d --- /dev/null +++ b/arch/e2k/include/asm/fcntl.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/asm/floppy.h b/arch/e2k/include/asm/floppy.h new file mode 100644 index 0000000..de7942e --- /dev/null +++ b/arch/e2k/include/asm/floppy.h @@ -0,0 +1,272 @@ +/* + * Architecture specific parts of the Floppy driver + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995 + */ +#ifndef __ASM_E2K_FLOPPY_H +#define __ASM_E2K_FLOPPY_H + +#include + + +/* + * The DMA channel used by the floppy controller cannot access data at + * addresses >= 16MB + * + * Went back to the 1MB limit, as some people had problems with the floppy + * driver otherwise. It doesn't matter much for performance anyway, as most + * floppy accesses go through the track buffer. + */ +#define _CROSS_64KB(a,s,vdma) \ +(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) + +#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) + + +#define SW fd_routine[use_virtual_dma&1] +#define CSW fd_routine[can_use_virtual_dma & 1] + + +#define fd_inb(port) inb_p(port) +#define fd_outb(value,port) outb_p(value,port) + +#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") +#define fd_free_dma() CSW._free_dma(FLOPPY_DMA) +#define fd_enable_irq() enable_irq(FLOPPY_IRQ) +#define fd_disable_irq() disable_irq(FLOPPY_IRQ) +#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) +#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA) + +/* E2K Use default memory allocation policy (DMA only), defined in floppy.c + * + * #define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size) + */ +#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io) + +/* E2K Disable fallback on no-dma mode + * + * #define FLOPPY_CAN_FALLBACK_ON_NODMA + */ + +static int virtual_dma_count; +static int virtual_dma_residue; +static char *virtual_dma_addr; +static int virtual_dma_mode; +static int doing_pdma; + +static irqreturn_t floppy_hardint(int irq, void *dev_id) +{ + register unsigned char st; + +#undef TRACE_FLPY_INT + +#ifdef TRACE_FLPY_INT + static int calls=0; + static int bytes=0; + static int dma_wait=0; +#endif + if (!doing_pdma) + return floppy_interrupt(irq, dev_id); + +#ifdef TRACE_FLPY_INT + if(!calls) + bytes = virtual_dma_count; +#endif + + { + register int lcount; + register char *lptr; + + st = 1; + for(lcount=virtual_dma_count, lptr=virtual_dma_addr; + lcount; lcount--, lptr++) { + st=inb(virtual_dma_port+4) & 0xa0 ; + if(st != 0xa0) + break; + if(virtual_dma_mode) + outb_p(*lptr, virtual_dma_port+5); + else + *lptr = inb_p(virtual_dma_port+5); + } + virtual_dma_count = lcount; + virtual_dma_addr = lptr; + st = inb(virtual_dma_port+4); + } + +#ifdef TRACE_FLPY_INT + calls++; +#endif + if(st == 0x20) + return IRQ_HANDLED; + if(!(st & 0x20)) { + virtual_dma_residue += virtual_dma_count; + virtual_dma_count=0; +#ifdef TRACE_FLPY_INT + printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", + virtual_dma_count, virtual_dma_residue, calls, bytes, + dma_wait); + calls = 0; + dma_wait=0; +#endif + doing_pdma = 0; + floppy_interrupt(irq, dev_id); + return IRQ_HANDLED; + } +#ifdef TRACE_FLPY_INT + if(!virtual_dma_count) + dma_wait++; +#endif + return IRQ_HANDLED; +} + +static void fd_disable_dma(void) +{ + if(! (can_use_virtual_dma & 1)) + disable_dma(FLOPPY_DMA); + doing_pdma = 0; + virtual_dma_residue += virtual_dma_count; + virtual_dma_count=0; +} + +static int vdma_request_dma(unsigned int dmanr, const char * device_id) +{ + return 0; +} + +static void vdma_nop(unsigned int dummy) +{ +} + + +static int vdma_get_dma_residue(unsigned int dummy) +{ + return virtual_dma_count + virtual_dma_residue; +} + + +static int fd_request_irq(void) +{ + if(can_use_virtual_dma) + return request_irq(FLOPPY_IRQ, floppy_hardint, IRQF_DISABLED, + "floppy", NULL); + else + return request_irq(FLOPPY_IRQ, floppy_interrupt, IRQF_DISABLED, + "floppy", NULL); + +} + +static unsigned long dma_mem_alloc(unsigned long size) +{ + return __get_dma_pages(GFP_KERNEL,get_order(size)); +} + + +static unsigned long vdma_mem_alloc(unsigned long size) +{ + return (unsigned long) vmalloc(size); + +} + +#define nodma_mem_alloc(size) vdma_mem_alloc(size) + +static void _fd_dma_mem_free(unsigned long addr, unsigned long size) +{ + if((unsigned long) addr >= (unsigned long) high_memory) + vfree((void *)addr); + else + free_pages(addr, get_order(size)); +} + +#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size) + +static void _fd_chose_dma_mode(char *addr, unsigned long size) +{ + if(can_use_virtual_dma == 2) { + if((unsigned long) addr >= (unsigned long) high_memory || + isa_virt_to_bus(addr) >= 0x1000000 || + _CROSS_64KB(addr, size, 0)) + use_virtual_dma = 1; + else + use_virtual_dma = 0; + } else { + use_virtual_dma = can_use_virtual_dma & 1; + } +} + +#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size) + + +static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) +{ + doing_pdma = 1; + virtual_dma_port = io; + virtual_dma_mode = (mode == DMA_MODE_WRITE); + virtual_dma_addr = addr; + virtual_dma_count = size; + virtual_dma_residue = 0; + return 0; +} + +static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) +{ +#ifdef FLOPPY_SANITY_CHECK + if (CROSS_64KB(addr, size)) { + printk("DMA crossing 64-K boundary %px-%px\n", addr, addr+size); + return -1; + } +#endif + /* actual, physical DMA */ + doing_pdma = 0; + clear_dma_ff(FLOPPY_DMA); + set_dma_mode(FLOPPY_DMA,mode); + set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr)); + set_dma_count(FLOPPY_DMA,size); + enable_dma(FLOPPY_DMA); + return 0; +} + +static struct fd_routine_l { + int (*_request_dma)(unsigned int dmanr, const char * device_id); + void (*_free_dma)(unsigned int dmanr); + int (*_get_dma_residue)(unsigned int dummy); + unsigned long (*_dma_mem_alloc) (unsigned long size); + int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); +} fd_routine[] = { + { + request_dma, + free_dma, + get_dma_residue, + dma_mem_alloc, + hard_dma_setup + }, + { + vdma_request_dma, + vdma_nop, + vdma_get_dma_residue, + vdma_mem_alloc, + vdma_dma_setup + } +}; + + +static int FDC1 = 0x3f0; +static int FDC2 = -1; + +/* 1.44 Mb */ +#define FLOPPY0_TYPE 4 +#define FLOPPY1_TYPE 4 + +#define N_FDC 2 +#define N_DRIVE 8 + +#define FLOPPY_MOTOR_MASK 0xf0 + +#define AUTO_DMA + +#define EXTRA_FLOPPY_PARAMS + +#endif /* __ASM_E2K_FLOPPY_H */ diff --git a/arch/e2k/include/asm/ftrace.h b/arch/e2k/include/asm/ftrace.h new file mode 100644 index 0000000..4af6f67 --- /dev/null +++ b/arch/e2k/include/asm/ftrace.h @@ -0,0 +1,48 @@ +#ifndef _ASM_E2K_FTRACE_H +#define _ASM_E2K_FTRACE_H + +static inline void return_to_handler(void){} + +extern struct ftrace_ops *function_trace_op; + +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST +# define HAVE_FUNCTION_GRAPH_FP_TEST +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE +/* On e2k _mcount() is used for both dynamic and static cases. */ +# define FTRACE_ADDR ((unsigned long) _mcount) +# define MCOUNT_ADDR ((unsigned long) _mcount) +# define MCOUNT_INSN_SIZE 8 + +# define ARCH_SUPPORTS_FTRACE_OPS 1 + +extern void _mcount(e2k_cr0_hi_t frompc); + +struct dyn_arch_ftrace { + /* No extra data needed for e2k */ +}; + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer); +#endif + +#define ftrace_return_address(n) __e2k_kernel_return_address(n) + +#ifdef CONFIG_E2K_STACKS_TRACER +extern int stack_tracer_enabled; +extern int stack_tracer_kernel_only; +int +stack_trace_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +#endif + +#endif /* _ASM_E2K_FTRACE_H */ + diff --git a/arch/e2k/include/asm/futex.h b/arch/e2k/include/asm/futex.h new file mode 100644 index 0000000..8c301d9 --- /dev/null +++ b/arch/e2k/include/asm/futex.h @@ -0,0 +1,77 @@ +#ifndef _ASM_FUTEX_H +#define _ASM_FUTEX_H + +#ifdef __KERNEL__ + +#include + +#include +#include +#include +#include + +static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, + u32 __user *uaddr) +{ + int oldval, ret = 0; + + pagefault_disable(); + + TRY_USR_PFAULT { + switch (op) { + case FUTEX_OP_SET: + oldval = __api_xchg_return(oparg, uaddr, w, STRONG_MB); + break; + case FUTEX_OP_ADD: + oldval = __api_futex_atomic32_op("adds", oparg, uaddr); + break; + case FUTEX_OP_OR: + oldval = __api_futex_atomic32_op("ors", oparg, uaddr); + break; + case FUTEX_OP_ANDN: + oldval = __api_futex_atomic32_op("andns", oparg, uaddr); + break; + case FUTEX_OP_XOR: + oldval = __api_futex_atomic32_op("xors", oparg, uaddr); + break; + default: + oldval = 0; + ret = -ENOSYS; + break; + } + } CATCH_USR_PFAULT { + pagefault_enable(); + DebugUAF("%s (%d) - %s : futex_atomic_op data fault " + "%px(%ld)\n" , __FILE__, __LINE__, + __FUNCTION__, (uaddr), (sizeof(*uaddr))); + return -EFAULT; + } END_USR_PFAULT + + pagefault_enable(); + + if (!ret) + *oval = oldval; + + return ret; +} + +static int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + if (!access_ok(uaddr, sizeof(int))) + return -EFAULT; + + TRY_USR_PFAULT { + *uval = cmpxchg(uaddr, oldval, newval); + } CATCH_USR_PFAULT { + DebugUAF("%s (%d) - %s : futex_atomic_cmpxchg data fault " + "%px(%ld)\n", __FILE__, __LINE__, + __FUNCTION__, (uaddr), (sizeof(*uaddr))); + return -EFAULT; + } END_USR_PFAULT + + return 0; +} + +#endif +#endif diff --git a/arch/e2k/include/asm/getsp_adj.h b/arch/e2k/include/asm/getsp_adj.h new file mode 100644 index 0000000..3b66fad --- /dev/null +++ b/arch/e2k/include/asm/getsp_adj.h @@ -0,0 +1,123 @@ +#ifndef _E2K_ASM_GETSP_ADJ_H +#define _E2K_ASM_GETSP_ADJ_H + +#include + +#include + +/* + * bug #101468: if user allocated more than 4Gb of stack then %cr1_hi.ussz + * field would overflow. In this case we remember all such over/underflows + * in software and apply corresponding corrections to %usd.lo.base manually + * from exc_last_wish handler. + * + * All under/overflows are kept in a single list. + */ +struct getsp_adj { + struct list_head list_entry; + unsigned long frame_index; + int correction; +}; + + +static inline int __copy_getsp_adj(struct list_head *dst, + const struct list_head *src) +{ + const struct getsp_adj *p; + struct getsp_adj *new; + + list_for_each_entry(p, src, list_entry) { + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return -ENOMEM; + + new->correction = p->correction; + new->frame_index = p->frame_index; + list_add_tail(&new->list_entry, dst); + } + + return 0; +} + +static inline int copy_getsp_adj(struct thread_info *new_ti, + const struct thread_info *old_ti) +{ + return __copy_getsp_adj(&new_ti->getsp_adj, &old_ti->getsp_adj); +} + +static inline void free_getsp_adj(struct list_head *getsp_adj_list) +{ + struct getsp_adj *p, *tmp; + + list_for_each_entry_safe(p, tmp, getsp_adj_list, list_entry) { + list_del(&p->list_entry); + kfree(p); + } +} + +static inline s64 getsp_adj_get_correction(unsigned long frame) +{ + unsigned long frame_index = frame - (u64) CURRENT_PCS_BASE(); + struct getsp_adj *p; + + list_for_each_entry(p, ¤t_thread_info()->getsp_adj, list_entry) { + if (p->frame_index == frame_index) + return (s64) p->correction; + } + + return 0; +} + +static inline int getsp_adj_set_correction(int correction, unsigned long frame) +{ + unsigned long frame_index = frame - (u64) CURRENT_PCS_BASE(); + struct getsp_adj *new, *p; + + list_for_each_entry(p, ¤t_thread_info()->getsp_adj, list_entry) { + if (p->frame_index == frame_index) { + if (correction) { + p->correction = correction; + } else { + list_del(&p->list_entry); + kfree(p); + } + return 0; + } + } + + if (!correction) + return 0; + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return -ENOMEM; + + new->correction = correction; + new->frame_index = frame_index; + list_add(&new->list_entry, ¤t_thread_info()->getsp_adj); + + return 0; +} + +static inline void getsp_adj_apply(struct pt_regs *regs) +{ + unsigned long frame, frame_index; + struct getsp_adj *p; + + frame = AS(regs->stacks.pcsp_lo).base + AS(regs->stacks.pcsp_hi).ind; + frame_index = frame - (u64) CURRENT_PCS_BASE(); + + list_for_each_entry(p, ¤t_thread_info()->getsp_adj, list_entry) { + if (p->frame_index == frame_index) + goto found; + } + + return; + +found: + AS(regs->stacks.usd_lo).base += p->correction * 0x100000000ULL; + list_del(&p->list_entry); + kfree(p); +} + +#endif diff --git a/arch/e2k/include/asm/glob_regs.h b/arch/e2k/include/asm/glob_regs.h new file mode 100644 index 0000000..20b95df --- /dev/null +++ b/arch/e2k/include/asm/glob_regs.h @@ -0,0 +1,211 @@ +/* + * + * Copyright (C) 2014 MCST + * + * CPU global registers using by kernel + */ +#ifndef _E2K_GLOB_REGS_H +#define _E2K_GLOB_REGS_H + +#ifdef __KERNEL__ + +#include + +/* + * MAP of global registers using for the user purposes + */ +#define E2K_GLOBAL_REGS_NUM E2K_MAXGR_d +#define GLOBAL_GREGS_START 0 +#define GLOBAL_GREGS_NUM (E2K_GLOBAL_REGS_NUM / 2) +#define LOCAL_GREGS_START (GLOBAL_GREGS_START + GLOBAL_GREGS_NUM) +#define LOCAL_GREGS_NUM (E2K_GLOBAL_REGS_NUM - GLOBAL_GREGS_NUM) + +/* Follow global registers are global for user applications according to ABI */ +#define GLOBAL_GREGS_USER_MASK \ +( \ + 1UL << 0 | 1UL << 1 | /* %dg0 - %dg1 */ \ + 1UL << 2 | 1UL << 3 | /* %dg2 - %dg3 */ \ + 1UL << 4 | 1UL << 5 | /* %dg4 - %dg5 */ \ + 1UL << 6 | 1UL << 7 | /* %dg6 - %dg7 */ \ + 1UL << 8 | 1UL << 9 | /* %dg8 - %dg9 */ \ + 1UL << 10 | 1UL << 11 | /* %dg10 - %dg11 */ \ + 1UL << 12 | 1UL << 13 | /* %dg12 - %dg13 */ \ + 1UL << 14 | 1UL << 15 | /* %dg14 - %dg15 */ \ + 0UL \ +) +/* Follow global registers are local for user applications according to ABI */ +#define LOCAL_GREGS_USER_MASK \ +( \ + 1UL << 16 | 1UL << 17 | /* %dg16 - %dg17 */ \ + 1UL << 18 | 1UL << 19 | /* %dg18 - %dg19 */ \ + 1UL << 20 | 1UL << 21 | /* %dg20 - %dg21 */ \ + 1UL << 22 | 1UL << 23 | /* %dg22 - %dg23 */ \ + 1UL << 24 | 1UL << 25 | /* %dg24 - %dg25 */ \ + 1UL << 26 | 1UL << 27 | /* %dg26 - %dg27 */ \ + 1UL << 28 | 1UL << 29 | /* %dg28 - %dg29 */ \ + 1UL << 30 | 1UL << 31 | /* %dg30 - %dg31 */ \ + 0UL \ +) + +#define USER_THREAD_TLS_GREG 13 /* TLS of user threads */ + +/* + * MAP of global registers using for the kernel purposes + */ + +/* THe next register is used only at paravirtualization mode on host & guest */ +#define GUEST_VCPU_STATE_GREG 16 /* pointer to VCPU state structure */ +/* Global registers to point to current structure */ +#define CURRENT_TASK_GREG 17 /* pointer to current task structure */ +/* smp_processor_id() & per_cpu_offset */ +#define MY_CPU_OFFSET_GREG 18 /* offset of per CPU data */ +#define SMP_CPU_ID_GREG 19 /* CPU number */ +#define KERNEL_GREGS_MAX_NUM 4 /* kernel use 4 global registers */ + +#ifdef CONFIG_VIRTUALIZATION +/* Global register to support virtualization */ +#define HOST_GREGS_MAX_NUM 1 /* 1 global register is used by host */ + /* kernel to support virtualization */ +#endif /* CONFIG_VIRTUALIZATION */ + +#define CURRENTS_GREGS_MASK ((1UL << GUEST_VCPU_STATE_GREG) | \ + (1UL << CURRENT_TASK_GREG)) +#define CPUS_GREGS_MASK ((1UL << MY_CPU_OFFSET_GREG) | \ + (1UL << SMP_CPU_ID_GREG)) +#define KERNEL_GREGS_MAX_MASK (CURRENTS_GREGS_MASK | CPUS_GREGS_MASK) +#ifdef CONFIG_GREGS_CONTEXT +#define NATIVE_KERNEL_GREGS_MAX_NUM KERNEL_GREGS_MAX_NUM +#define CURRENTS_GREGS_KERNEL_MASK CURRENTS_GREGS_MASK +#define CPUS_GREGS_KERNEL_MASK CPUS_GREGS_MASK +#else /* ! CONFIG_GREGS_CONTEXT */ +#define NATIVE_KERNEL_GREGS_MAX_NUM 0 +#define CURRENTS_GREGS_KERNEL_MASK 0UL +#define CPUS_GREGS_KERNEL_MASK 0UL +#endif /* CONFIG_GREGS_CONTEXT */ + +#define NATIVE_KERNEL_GREGS_MASK \ + (CURRENTS_GREGS_KERNEL_MASK | CPUS_GREGS_KERNEL_MASK) + +#ifdef CONFIG_VIRTUALIZATION +/* Global register to point to guest VCPU state */ +#define VCPU_STATE_GREGS_MASK (1UL << GUEST_VCPU_STATE_GREG) +#define VCPU_STATE_GREGS_PAIR_MASK \ + (VCPU_STATE_GREGS_MASK | (1UL << CURRENT_TASK_GREG)) + +#define HOST_KERNEL_GREGS_MAX_NUM HOST_GREGS_MAX_NUM +#define HOST_GREGS_KERNEL_MASK VCPU_STATE_GREGS_MASK +#define HOST_GREGS_PAIR_KERNEL_MASK VCPU_STATE_GREGS_PAIR_MASK +#else /* ! CONFIG_VIRTUALIZATION */ +#define HOST_KERNEL_GREGS_MAX_NUM 0 +#define HOST_GREGS_KERNEL_MASK 0UL +#define HOST_GREGS_PAIR_KERNEL_MASK 0UL +#endif /* CONFIG_VIRTUALIZATION */ + +#define HOST_KERNEL_GREGS_MASK HOST_GREGS_KERNEL_MASK +#define HOST_KERNEL_GREGS_PAIR_MASK HOST_GREGS_PAIR_KERNEL_MASK + +/* Guest kernel can use global registers too (now only same as native kernel) */ +/* and addition registers on host to support virtulaization (now only one */ +/* register as pointer to VCPU state structure). */ +/* All guest global registers are saved/restored by host */ +/* Host can not now is guest used own global registers, so should */ +/* save/restore on max */ +#define GUEST_GREGS_NUM KERNEL_GREGS_MAX_NUM +#define GUEST_GREGS_MASK KERNEL_GREGS_MAX_MASK + +/* real number & mask of registers used by kernel */ +/* in accordance with config variables anf execution mode */ +#define KERNEL_GREGS_NUM NATIVE_KERNEL_GREGS_MAX_NUM +#define KERNEL_GREGS_MASK NATIVE_KERNEL_GREGS_MASK + +/* + * Global register used by user and kernel, so it need save/restore + * some global registers state while enter to/return from kernel. + * Global registers can contain tagged values and be used by protected + * processes. So it need store/restore registers state by pairs to do not + * destroy quad pointers + */ + +#ifdef CONFIG_GREGS_CONTEXT +/* Pair of global registers used by kernel: */ +#define KERNEL_GREGS_PAIRS_START GUEST_VCPU_STATE_GREG /* %dg16 */ +/* Pair of global registers to point to current structures: */ +/* current & current_thread_info() */ +#define CURRENT_GREGS_PAIR_LO GUEST_VCPU_STATE_GREG /* %dg16 */ +#define CURRENT_GREGS_PAIR_HI CURRENT_TASK_GREG /* %dg17 */ +#define CURRENT_GREGS_PAIRS_NUM 1 /* one pair: */ + /* low: thread info */ + /* high: task structure */ +#define CURRENT_GREGS_PAIRS_SIZE /* one pair of */ \ + /* two registers */ \ + (CURRENT_GREGS_PAIRS_NUM * 2) +#define GUEST_VCPU_STATE_GREGS_PAIRS_INDEX /* g[0] */ \ + (GUEST_VCPU_STATE_GREG - KERNEL_GREGS_PAIRS_START) +#define CURRENT_TASK_GREGS_PAIRS_INDEX /* g[1] */ \ + (CURRENT_TASK_GREG - KERNEL_GREGS_PAIRS_START) +#define CURRENT_GREGS_PAIRS_INDEX_LO GUEST_VCPU_STATE_GREGS_PAIRS_INDEX +#define CURRENT_GREGS_PAIRS_INDEX_HI CURRENT_TASK_GREGS_PAIRS_INDEX +/* raw_smp_processor_id & __my_cpu_offset */ +#define CPU_GREGS_PAIR_LO MY_CPU_OFFSET_GREG /* %dg18 */ +#define CPU_GREGS_PAIR_HI SMP_CPU_ID_GREG /* %dg19 */ +#define CPU_GREGS_PAIRS_NUM 1 /* one pair: */ + /* low: my per cpu offset */ + /* high: CPU ID */ +#define CPU_GREGS_PAIRS_SIZE /* one pair of */ \ + /* two registers */ \ + (CPU_GREGS_PAIRS_NUM * 2) +#define MY_CPU_OFFSET_GREGS_PAIRS_INDEX /* g[2] */ \ + (MY_CPU_OFFSET_GREG - KERNEL_GREGS_PAIRS_START) +#define SMP_CPU_ID_GREGS_PAIRS_INDEX /* g[3] */ \ + (SMP_CPU_ID_GREG - KERNEL_GREGS_PAIRS_START) +#define CPU_GREGS_PAIRS_INDEX_LO MY_CPU_OFFSET_GREGS_PAIRS_INDEX +#define CPU_GREGS_PAIRS_INDEX_HI SMP_CPU_ID_GREGS_PAIRS_INDEX + +#define NATIVE_KERNEL_GREGS_PAIRS_NUM \ + (CURRENT_GREGS_PAIRS_NUM + CPU_GREGS_PAIRS_NUM) +#define NATIVE_KERNEL_GREGS_PAIRS_SIZE \ + (CURRENT_GREGS_PAIRS_SIZE + CPU_GREGS_PAIRS_SIZE) +#else /* ! CONFIG_GREGS_CONTEXT */ +#define NATIVE_KERNEL_GREGS_PAIRS_NUM 0 +#define NATIVE_KERNEL_GREGS_PAIRS_SIZE 0 +#endif /* CONFIG_GREGS_CONTEXT */ + +#ifdef CONFIG_VIRTUALIZATION +/* Pair of global registers used by host to support virtualization */ +#define HOST_GREGS_PAIRS_START GUEST_VCPU_STATE_GREG /* %dg16 */ +/* VCPU state pointer is used always for virtualization support */ +/* so it need be into starting pair */ +#define VCPU_STATE_GREGS_PAIR_LO GUEST_VCPU_STATE_GREG /* %dg16 */ +#define VCPU_STATE_GREGS_PAIR_HI CURRENT_TASK_GREG /* %dg17 */ +#define VCPU_STATE_GREGS_PAIRS_NUM 1 /* one pair: */ + /* low: kvm_vcpu_state */ + /* high: current task */ +#define VCPU_STATE_GREGS_PAIRS_SIZE /* one pair of */ \ + /* two registers */ \ + (VCPU_STATE_GREGS_PAIRS_NUM * 2) +#define VCPU_STATE_GREGS_PAIRS_INDEX /* g[0] */ \ + (GUEST_VCPU_STATE_GREG - HOST_GREGS_PAIRS_START) +#define VCPU_STATE_GREGS_PAIRS_HI_INDEX /* g[1] */ \ + (CURRENT_TASK_GREG - HOST_GREGS_PAIRS_START) + +/* indexes of global registers for virtualization in structure */ +/* guest_gregs->g[] */ +#define HOST_VCPU_STATE_GREGS_PAIRS_INDEX_LO /* g[0] */ \ + VCPU_STATE_GREGS_PAIRS_INDEX +#define HOST_VCPU_STATE_GREGS_PAIRS_INDEX_HI /* g[1] */ \ + VCPU_STATE_GREGS_PAIRS_HI_INDEX + +#define HOST_KERNEL_GREGS_PAIRS_NUM VCPU_STATE_GREGS_PAIRS_NUM +#define HOST_KERNEL_GREGS_PAIRS_SIZE VCPU_STATE_GREGS_PAIRS_SIZE +#else /* ! CONFIG_VIRTUALIZATION */ +#define HOST_KERNEL_GREGS_PAIRS_NUM 0 +#define HOST_KERNEL_GREGS_PAIRS_SIZE 0 +#endif /* CONFIG_VIRTUALIZATION */ + +/* real number & size of global registers pairs used by kernel */ +/* in accordance with config and execution mode */ +#define KERNEL_GREGS_PAIRS_NUM NATIVE_KERNEL_GREGS_PAIRS_NUM +#define KERNEL_GREGS_PAIRS_SIZE NATIVE_KERNEL_GREGS_PAIRS_SIZE + +#endif /* __KERNEL__ */ +#endif /* _E2K_GLOB_REGS_H */ diff --git a/arch/e2k/include/asm/gpio.h b/arch/e2k/include/asm/gpio.h new file mode 100644 index 0000000..9414a7c --- /dev/null +++ b/arch/e2k/include/asm/gpio.h @@ -0,0 +1,21 @@ +/* + * Generic GPIO API implementation for e2k. + * + * Derived from the generic GPIO API for x86: + * + * Copyright (c) 2012 MCST. + * + * Author: Evgeny Kravtsunov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ASM_X86_GPIO_H +#define _ASM_X86_GPIO_H + +#include + +#endif /* _ASM_X86_GPIO_H */ diff --git a/arch/e2k/include/asm/gregs.h b/arch/e2k/include/asm/gregs.h new file mode 100644 index 0000000..d5e9b11 --- /dev/null +++ b/arch/e2k/include/asm/gregs.h @@ -0,0 +1,219 @@ +#ifndef _E2K_GREGS_H +#define _E2K_GREGS_H + +#include +#include +#include +#include +#include + +/* + * Save new value of gN and set current pointer into these register + * to can use macroses current & current_thread_info() + */ +#define SET_CURRENTS_GREGS(__task) \ +({ \ + E2K_SET_DGREG_NV(CURRENT_TASK_GREG, (__task)); \ +}) +#define SET_SMP_CPUS_GREGS(__cpu, __per_cpu_off) \ +({ \ + E2K_SET_DGREG_NV(SMP_CPU_ID_GREG, (__cpu)); \ + E2K_SET_DGREG_NV(MY_CPU_OFFSET_GREG, (__per_cpu_off)); \ +}) +#define SET_KERNEL_GREGS(__task, __cpu, __per_cpu_off) \ +({ \ + SET_CURRENTS_GREGS(__task); \ + SET_SMP_CPUS_GREGS(__cpu, __per_cpu_off); \ +}) +#define ONLY_SET_CURRENTS_GREGS(__ti) \ +({ \ + SET_CURRENTS_GREGS(thread_info_task(__ti)); \ +}) +#define ONLY_SAVE_KERNEL_CURRENTS_GREGS(task__) \ +({ \ + (task__) = NATIVE_GET_UNTEGGED_DGREG(CURRENT_TASK_GREG); \ +}) +#ifdef CONFIG_SMP +#define ONLY_SAVE_KERNEL_SMP_CPUS_GREGS(cpu_id__, cpu_off__) \ +({ \ + (cpu_id__) = NATIVE_GET_UNTEGGED_DGREG(SMP_CPU_ID_GREG); \ + (cpu_off__) = NATIVE_GET_UNTEGGED_DGREG(MY_CPU_OFFSET_GREG); \ +}) +#else /* ! CONFIG_SMP */ +#define ONLY_SAVE_KERNEL_SMP_CPUS_GREGS(cpu_id__, cpu_off__) +#endif /* CONFIG_SMP */ +#define ONLY_SAVE_KERNEL_GREGS(task__, cpu_id__, cpu_off__) \ +({ \ + ONLY_SAVE_KERNEL_CURRENTS_GREGS(task__); \ + ONLY_SAVE_KERNEL_SMP_CPUS_GREGS(cpu_id__, cpu_off__); \ +}) + +#define ONLY_RESTORE_KERNEL_CURRENTS_GREGS(task__) \ +({ \ + NATIVE_SET_DGREG(CURRENT_TASK_GREG, task__); \ +}) +#ifdef CONFIG_SMP +#define ONLY_RESTORE_KERNEL_SMP_CPUS_GREGS(cpu_id__, cpu_off__) \ +({ \ + NATIVE_SET_DGREG(SMP_CPU_ID_GREG, cpu_id__); \ + NATIVE_SET_DGREG(MY_CPU_OFFSET_GREG, cpu_off__); \ +}) +#else /* ! CONFIG_SMP */ +#define ONLY_RESTORE_KERNEL_SMP_CPUS_GREGS(cpu_id__, cpu_off__) +#endif /* CONFIG_SMP */ +#define ONLY_RESTORE_KERNEL_GREGS(task__, cpu_id__, cpu_off__)\ +({ \ + ONLY_RESTORE_KERNEL_CURRENTS_GREGS(task__); \ + ONLY_RESTORE_KERNEL_SMP_CPUS_GREGS(cpu_id__, cpu_off__); \ +}) + +#ifdef CONFIG_SMP +#define ONLY_SET_SMP_CPUS_GREGS(__ti) \ +({ \ + long __cpu = task_cpu(thread_info_task(__ti)); \ + \ + SET_SMP_CPUS_GREGS(__cpu, per_cpu_offset(__cpu)); \ +}) +#else /* ! CONFIG_SMP */ +#define ONLY_SET_SMP_CPUS_GREGS(__ti) +#endif /* CONFIG_SMP */ + +#define ONLY_SET_KERNEL_GREGS(__ti) \ +({ \ + ONLY_SET_CURRENTS_GREGS(__ti); \ + ONLY_SET_SMP_CPUS_GREGS(__ti); \ +}) + +#define CLEAR_KERNEL_GREGS() \ +({ \ + SET_KERNEL_GREGS(0, 0, 0); \ +}) +#define NATIVE_SAVE_KERNEL_GREGS_AND_SET(__ti) \ +({ \ + machine.save_kernel_gregs(&(__ti)->k_gregs); \ + ONLY_SET_KERNEL_GREGS(__ti); \ +}) +/* + * global registers used as pointers to current task & thread info + * must be restored and current & current_thread_info() can not be + * used from now + */ +#define ONLY_COPY_FROM_KERNEL_CURRENT_GREGS(__k_gregs, task__) \ +({ \ + (task__) = (__k_gregs)->g[CURRENT_TASK_GREGS_PAIRS_INDEX].base; \ +}) +#ifdef CONFIG_SMP +#define ONLY_COPY_FROM_KERNEL_SMP_CPUS_GREGS(__k_gregs, cpu_id__, cpu_off__) \ +({ \ + (cpu_id__) = (__k_gregs)->g[SMP_CPU_ID_GREGS_PAIRS_INDEX].base; \ + (cpu_off__) = (__k_gregs)->g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].base; \ +}) +#else /* ! CONFIG_SMP */ +#define ONLY_COPY_FROM_KERNEL_SMP_CPUS_GREGS(__k_gregs, cpu_id__, cpu_off__) +#endif /* CONFIG_SMP */ +#define ONLY_COPY_FROM_KERNEL_GREGS(__k_gregs, task__, cpu_id__, cpu_off__) \ +({ \ + ONLY_COPY_FROM_KERNEL_CURRENT_GREGS(__k_gregs, task__); \ + ONLY_COPY_FROM_KERNEL_SMP_CPUS_GREGS(__k_gregs, cpu_id__, cpu_off__); \ +}) + +#define ONLY_COPY_TO_KERNEL_CURRENT_GREGS(__k_gregs, task__) \ +({ \ + (__k_gregs)->g[CURRENT_TASK_GREGS_PAIRS_INDEX].base = (task__); \ +}) +#ifdef CONFIG_SMP +#define ONLY_COPY_TO_KERNEL_SMP_CPUS_GREGS(__k_gregs, cpu_id__, cpu_off__) \ +({ \ + (__k_gregs)->g[SMP_CPU_ID_GREGS_PAIRS_INDEX].base = (cpu_id__); \ + (__k_gregs)->g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].base = (cpu_off__); \ +}) +#else /* ! CONFIG_SMP */ +#define ONLY_COPY_TO_KERNEL_SMP_CPUS_GREGS(__k_gregs, cpu_id__, cpu_off__) +#endif /* CONFIG_SMP */ +#define ONLY_COPY_TO_KERNEL_GREGS(__k_gregs, task__, cpu_id__, cpu_off__) \ +({ \ + ONLY_COPY_TO_KERNEL_CURRENT_GREGS(__k_gregs, task__); \ + ONLY_COPY_TO_KERNEL_SMP_CPUS_GREGS(__k_gregs, cpu_id__, cpu_off__); \ +}) +#define CLEAR_KERNEL_GREGS_COPY(__ti) \ + ONLY_COPY_TO_KERNEL_GREGS(&(__ti)->k_gregs, 0, 0, 0) + +#define NATIVE_RESTORE_KERNEL_GREGS_IN_SYSCALL(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + \ + NATIVE_RESTORE_KERNEL_GREG(__ti->k_gregs.g, \ + GUEST_VCPU_STATE_GREGS_PAIRS_INDEX, \ + CURRENT_TASK_GREGS_PAIRS_INDEX, \ + MY_CPU_OFFSET_GREGS_PAIRS_INDEX, \ + SMP_CPU_ID_GREGS_PAIRS_INDEX, \ + GUEST_VCPU_STATE_GREG, CURRENT_TASK_GREG, \ + MY_CPU_OFFSET_GREG, SMP_CPU_ID_GREG); \ +}) + +/* User global registers, used by kernel, keep into thread info structure */ +/* and save to/restore from while enter to/return from kernel */ +#define CLEAR_GREGS_COPY_FROM_CURRENTS(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + \ + __ti->k_gregs.g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].base = 0; \ + __ti->k_gregs.g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].ext = 0; \ + __ti->k_gregs.g[CURRENT_TASK_GREGS_PAIRS_INDEX].base = 0; \ + __ti->k_gregs.g[CURRENT_TASK_GREGS_PAIRS_INDEX].ext = 0; \ +}) + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ + +#define SAVE_KERNEL_GREGS_AND_SET(thread_info) \ + NATIVE_SAVE_KERNEL_GREGS_AND_SET(thread_info) +#define RESTORE_KERNEL_GREGS_AND_FREE(thread_info) \ + NATIVE_RESTORE_KERNEL_GREGS(&(thread_info)->k_gregs) +#define RESTORE_KERNEL_GREGS_IN_SYSCALL(thread_info) \ + NATIVE_RESTORE_KERNEL_GREGS_IN_SYSCALL(thread_info) + #ifdef CONFIG_VIRTUALIZATION + /* it is native host kernel with virtualization support */ + #include + #endif /* CONFIG_VIRTUALIZATION */ +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +static inline void copy_k_gregs_to_gregs(struct global_regs *dst, + const struct kernel_gregs *src) +{ + tagged_memcpy_8(&dst->g[KERNEL_GREGS_PAIRS_START], src->g, + sizeof(src->g)); +} + +static inline void copy_k_gregs_to_k_gregs(struct kernel_gregs *dst, + const struct kernel_gregs *src) +{ + tagged_memcpy_8(dst->g, src->g, sizeof(src->g)); +} + +static inline void get_k_gregs_from_gregs(struct kernel_gregs *dst, + const struct global_regs *src) +{ + tagged_memcpy_8(dst->g, &src->g[KERNEL_GREGS_PAIRS_START], + sizeof(dst->g)); +} + +static inline void copy_k_gregs_to_l_gregs(struct local_gregs *dst, + const struct kernel_gregs *src) +{ + BUG_ON(KERNEL_GREGS_PAIRS_START < LOCAL_GREGS_START); + tagged_memcpy_8(&dst->g[KERNEL_GREGS_PAIRS_START - LOCAL_GREGS_START], + src->g, sizeof(src->g)); +} + +static inline void get_k_gregs_from_l_regs(struct kernel_gregs *dst, + const struct local_gregs *src) +{ + BUG_ON(KERNEL_GREGS_PAIRS_START < LOCAL_GREGS_START); + tagged_memcpy_8(dst->g, + &src->g[KERNEL_GREGS_PAIRS_START - LOCAL_GREGS_START], + sizeof(dst->g)); +} + +#endif diff --git a/arch/e2k/include/asm/hardirq.h b/arch/e2k/include/asm/hardirq.h new file mode 100644 index 0000000..2f4ff34 --- /dev/null +++ b/arch/e2k/include/asm/hardirq.h @@ -0,0 +1,40 @@ +#pragma once +#include + +#define __ARCH_IRQ_EXIT_IRQS_DISABLED + +extern void irq_enter(void); +extern void irq_exit(void); + +#define l_irq_enter() irq_enter() +#define l_irq_exit() irq_exit() + +#include + +static inline bool is_from_C1_wait_trap(const struct pt_regs *regs) +{ + unsigned long ip = get_return_ip(regs); + + return unlikely(ip >= (unsigned long) __C1_wait_trap_start && + ip < (unsigned long) __C1_wait_trap_end); +} +static inline bool is_from_C3_wait_trap(const struct pt_regs *regs) +{ + unsigned long ip = get_return_ip(regs); + + return unlikely(ip >= (unsigned long) __C3_wait_trap_start && + ip < (unsigned long) __C3_wait_trap_end); +} +static inline bool is_from_wait_trap(const struct pt_regs *regs) +{ + return is_from_C1_wait_trap(regs) || is_from_C3_wait_trap(regs); +} +extern void handle_wtrap(struct pt_regs *regs); +#define arch_nmi_enter() \ +do { \ + inc_irq_stat(__nmi_count); \ + if (is_from_wait_trap(regs)) \ + handle_wtrap(regs); \ +} while (0) + +#define arch_nmi_exit() do { } while (0) diff --git a/arch/e2k/include/asm/hb_regs.h b/arch/e2k/include/asm/hb_regs.h new file mode 100644 index 0000000..319f943 --- /dev/null +++ b/arch/e2k/include/asm/hb_regs.h @@ -0,0 +1,437 @@ + +#ifndef _E2K_HB_REGS_H_ +#define _E2K_HB_REGS_H_ + +#ifdef __KERNEL__ + +#include +#ifndef __ASSEMBLY__ +#include +#include +#include +#endif /* __ASSEMBLY__ */ + +#undef DEBUG_ERALY_HB_MODE +#undef DebugEHB +#define DEBUG_ERALY_HB_MODE 0 /* early Host Bridge access */ +#undef DEBUG_BOOT_HB_MODE +#undef DebugBEHB +#define DEBUG_BOOT_HB_MODE 0 /* boot Host Bridge access */ +#ifndef CONFIG_BOOT_E2K +#define DebugEHB(fmt, args...) \ + ({ if (DEBUG_ERALY_HB_MODE) \ + printk(fmt, ##args); }) +#define DebugBEHB(fmt, args...) \ + ({ if (DEBUG_BOOT_HB_MODE) \ + do_boot_printk(fmt, ##args); }) +#else /* CONFIG_BOOT_E2K */ +#define DebugEHB(fmt, args...) \ + ({ if (DEBUG_ERALY_HB_MODE) \ + rom_printk(fmt, ##args); }) +#define DebugBEHB(fmt, args...) \ + ({ if (DEBUG_BOOT_HB_MODE) \ + rom_printk(fmt, ##args); }) +#endif /* ! CONFIG_BOOT_E2K */ + +#undef DEBUG_HB_MODE +#undef DebugHB +#define DEBUG_HB_MODE 0 /* Host Bridge access */ +#define DebugHB(fmt, args...) \ + ({ if (DEBUG_HB_MODE) \ + printk(fmt, ##args); }) + +/* + * Host Bridge is PCI device on root bus #0 and has common PCI configure + * registers and some additional special registers + */ + +/* Host bridge is device 0x1f on root bus #0 */ +#define HB_PCI_BUS_NUM 0x00 +#define HB_PCI_SLOT 0x1f +#define HB_PCI_FUNC 0x00 +/* Embeded Graphic is device 0x1e on root bus #0 */ +#define EG_PCI_BUS_NUM 0x00 +#define EG_PCI_SLOT 0x1e +#define EG_PCI_FUNC 0x00 + +/* Base address of legacy NBSR registers */ +#define HB_PCI_LEGACY_BAR PCI_BASE_ADDRESS_0 /* 0x10 64 bits */ + #define HB_PCI_LEGACY_MEMORY_BAR 0x000000fffff00000 /* [39:20] */ + #define HB_PCI_LEGACY_ADDR_MASK 0x00000000000ffff0 /* [19: 4] 1M */ +/* Base address of Power Management Controller registers */ +#define HB_PCI_PMC_BAR PCI_BASE_ADDRESS_2 /* 0x18 64 bits */ + #define HB_PCI_PMC_MEMORY_BAR 0x000000ffffff0000 /* [39:16] */ + #define HB_PCI_PMC_ADDR_MASK 0x000000000000fff0 /* [15: 4] */ + /* 64K */ + +/* Additional special registers */ + +/* Host Bridge configuration register */ +#define HB_PCI_CFG 0x40 /* 32 bits */ + #define HB_CFG_MaskIntSic 0x00000080 /* SIC interrupts */ + /* to embedeed IOAPIC */ + #define HB_CFG_MaskIntWlcc 0x00000040 /* WLCC interrupts */ + /* to embedeed IOAPIC */ + #define HB_CFG_MaskIntIommu 0x00000020 /* IOMMU interrupts */ + /* to embedeed IOAPIC */ + #define HB_CFG_ShareHostInterrupts 0x00000010 /* HB interrupts */ + /* are shared as IRQ2 */ + /* else IRQ2 & IRQ3 */ + #define HB_CFG_ShareGraphicsInterrupts 0x00000008 /* EG interrupts */ + /* are shared as IRQ0 */ + /* else IRQ0 & IRQ1 */ + #define HB_CFG_InternalIoApicEnable 0x00000004 /* embeded interrupts */ + /* to embedeed IOAPIC */ + /* else to LAPIC LVT */ + #define HB_CFG_IntegratedVgaEnable 0x00000002 /* Legacy VGA access */ + /* to EG */ + /* else to IOHUB */ + #define HB_CFG_IntegratedGraphicsEnable 0x00000001 /* EG is on */ + +/* Top Of low Memory register */ +#define HB_PCI_TOM 0x44 /* 32 bits */ + #define HB_PCI_TOM_LOW_MASK 0x00000000fff00000 /* [31:20] */ + +/* Top Of high Memory register */ +#define HB_PCI_TOM2 0x48 /* 64 bits */ + #define HB_PCI_TOM2_HI_MASK 0x000000fffff00000 /* [39:20] */ + #define HB_PCI_HI_ADDR_BASE 0x0000000100000000 /* 4Gb */ + +/* Base Address of high memory from which remapped low memore */ +#define HB_PCI_REMAPBASE 0x50 /* 64 bits */ + #define HB_PCI_REMAPBASE_MASK 0x000000fffff00000 /* [39:20] */ + +/* Base Address of embeded IO APIC */ +#define HB_PCI_IOAPICBASE 0x58 /* 64 bits */ + #define HB_PCI_IOAPICBASE_MASK 0x000000ffffffff00 /* [39: 8] */ + +/* PMC MSI configuration register */ +#define HB_PCI_PMC_MSGCTL 0x62 /* 16 bits */ +/* PMC MSI address register */ +#define HB_PCI_PMC_MSGADDR 0x64 /* 32 bits */ + #define HB_PCI_MESSADGEADDRESS 0xfffffffc /* [31: 2] */ + +/* + * Embeded Graphic controller registers + */ + +/* Base address of Video RAM */ +#define EG_PCI_VRAM_BAR PCI_BASE_ADDRESS_0 /* 0x10 64 bits */ + #define EG_PCI_VRAM_MEMORY_BAR 0x000000fff8000000 /* [39:30] */ + #define EG_PCI_VRAM_ADDRMASK1024 0x0000000020000000 /* [29] */ + #define EG_PCI_VRAM_ADDRMASK512 0x0000000010000000 /* [28] */ + #define EG_PCI_VRAM_ADDRMASK256 0x0000000008000000 /* [27] */ + #define EG_PCI_VRAM_ADDR_MASK 0x0000000007fffff0 /* [26: 4] 128M */ +/* Base address of MGA-2 registers */ +#define EG_PCI_MGA2_BAR PCI_BASE_ADDRESS_2 /* 0x18 32 bits */ + #define EG_PCI_MGA2_MEMORY_BAR 0xfffc0000 /* [31:18] */ + #define EG_PCI_MGA2_ADDR_MASK 0x0003ffff /* [17: 4] 256K */ +/* Base address of GC2500 registers */ +#define EG_PCI_GC2500_BAR PCI_BASE_ADDRESS_3 /* 0x1c 32 bits */ + #define EG_PCI_GC2500_MEMORY_BAR 0xfffc0000 /* [31:18] */ + #define EG_PCI_GC2500_ADDR_MASK 0x0003ffff /* [17: 4] 256K */ +/* Embeded Graphic controller CFG register */ +#define EG_PCI_CFG 0x40 /* 32 bits */ + #define EG_CFG_VRAM_SIZE_MASK 0x00000003 /* [ 1: 0] */ + #define EG_CFG_VRAM_SIZE_128 0x0 /* 128 Mb */ + #define EG_CFG_VRAM_SIZE_256 0x1 /* 256 Mb */ + #define EG_CFG_VRAM_SIZE_512 0x2 /* 512 Mb */ + #define EG_CFG_VRAM_SIZE_1024 0x3 /* 1 Gb */ +/* Embeded Graphic MSI configuration register */ +#define EG_PCI_MSGCTL 0x46 /* 16 bits */ +/* Embeded Graphic MSI address register */ +#define EG_PCI_MSGADDR 0x48 /* 32 bits */ + #define EG_PCI_MESSADGEADDRESS 0xfffffffc /* [31: 2] */ + +#ifndef __ASSEMBLY__ + +/* + * Host bridge & embeded graphic see as PCI devices on bus #0 + */ +#define hb_eg_early_readb(addr) \ + boot_readb((void *)(addr)) +#define hb_eg_early_readw(addr) \ + boot_readw((void *)(addr)) +#define hb_eg_early_readl(addr) \ + boot_readl((void *)(addr)) +#define hb_eg_early_writeb(value, addr) \ + boot_writeb((value), (void *)(addr)) +#define hb_eg_early_writew(value, addr) \ + boot_writew((value), (void *)(addr)) +#define hb_eg_early_writel(value, addr) \ + boot_writel((value), (void *)(addr)) + +#define hb_eg_early_pci_conf_base() sic_domain_pci_conf_base(0) + +#define boot_hb_eg_readl(addr) boot_readl((void *)(addr)) +#define boot_hb_eg_pci_conf_base() boot_sic_domain_pci_conf_base(0) + +static inline unsigned char +early_readb_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + unsigned char reg_value; + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + reg_value = hb_eg_early_readb(reg_addr); + DebugEHB("early_readb_hb_eg_reg() reg 0x%x read 0x%02hhx from 0x%lx\n", + reg_offset, reg_value, reg_addr); + return reg_value; +} + +static inline unsigned short +early_readw_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + unsigned short reg_value; + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + reg_value = hb_eg_early_readw(reg_addr); + DebugEHB("early_readw_hb_eg_reg() reg 0x%x read 0x%04hx from 0x%lx\n", + reg_offset, reg_value, reg_addr); + return reg_value; +} + +static inline unsigned int +early_readl_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + unsigned int reg_value; + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + reg_value = hb_eg_early_readl(reg_addr); + DebugEHB("early_readl_hb_eg_reg() reg 0x%x read 0x%08x from 0x%lx\n", + reg_offset, reg_value, reg_addr); + return reg_value; +} + +static inline unsigned long +early_readll_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + unsigned long reg_value_lo; + unsigned long reg_value_hi; + unsigned long reg_value; + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + reg_value_lo = hb_eg_early_readl(reg_addr); + reg_value_hi = hb_eg_early_readl(reg_addr + sizeof(unsigned int)); + reg_value = reg_value_lo | (reg_value_hi << sizeof(unsigned int) * 8); + DebugEHB("early_readw_hb_eg_reg() reg 0x%x read 0x%016lx from 0x%lx\n", + reg_offset, reg_value, reg_addr); + return reg_value; +} + +static inline unsigned long +boot_readll_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned int reg_offset) +{ + unsigned long reg_addr = boot_hb_eg_pci_conf_base(); + unsigned long reg_value_lo; + unsigned long reg_value_hi; + unsigned long reg_value; + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + reg_value_lo = boot_hb_eg_readl(reg_addr); + reg_value_hi = boot_hb_eg_readl(reg_addr + sizeof(unsigned int)); + reg_value = reg_value_lo | (reg_value_hi << sizeof(unsigned int) * 8); + DebugBEHB("boot_readw_hb_eg_reg() reg 0x%x read 0x%016lx from 0x%lx\n", + reg_offset, reg_value, reg_addr); + return reg_value; +} + +static inline void +early_writeb_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned char reg_value, unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + hb_eg_early_writeb(reg_value, reg_addr); + DebugEHB("early_writeb_hb_eg_reg() reg 0x%x write 0x%02hhx to 0x%lx\n", + reg_offset, reg_value, reg_addr); +} + +static inline void +early_writew_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned short reg_value, unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + hb_eg_early_writew(reg_value, reg_addr); + DebugEHB("early_writew_hb_eg_reg() reg 0x%x write 0x%04hx to 0x%lx\n", + reg_offset, reg_value, reg_addr); +} + +static inline void +early_writel_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned int reg_value, unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + hb_eg_early_writel(reg_value, reg_addr); + DebugEHB("early_writel_hb_eg_reg() reg 0x%x write 0x%08x to 0x%lx\n", + reg_offset, reg_value, reg_addr); +} + +static inline void +early_writell_hb_eg_reg(unsigned int bus, unsigned int slot, unsigned int func, + unsigned long reg_value, unsigned int reg_offset) +{ + unsigned long reg_addr = hb_eg_early_pci_conf_base(); + unsigned int reg_value_lo; + unsigned int reg_value_hi; + + reg_addr += CONFIG_CMD(bus, PCI_DEVFN(slot, func), reg_offset); + reg_value_lo = reg_value & 0x00000000ffffffff; + reg_value_hi = reg_value >> (sizeof(unsigned int) * 8); + hb_eg_early_writel(reg_value_lo, reg_addr); + hb_eg_early_writel(reg_value_hi, reg_addr + sizeof(unsigned int)); + DebugEHB("early_writell_hb_eg_reg() reg 0x%x write 0x%016lx to 0x%lx\n", + reg_offset, reg_value, reg_addr); +} + +static inline unsigned char +early_readb_hb_reg(unsigned int reg_offset) +{ + return early_readb_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_offset); +} + +static inline unsigned short +early_readw_hb_reg(unsigned int reg_offset) +{ + return early_readw_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_offset); +} + +static inline unsigned int +early_readl_hb_reg(unsigned int reg_offset) +{ + return early_readl_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_offset); +} + +static inline unsigned long +early_readll_hb_reg(unsigned int reg_offset) +{ + return early_readll_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_offset); +} + +static inline unsigned long +boot_readll_hb_reg(unsigned int reg_offset) +{ + return boot_readll_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_offset); +} + +static inline void +early_writeb_hb_reg(unsigned char reg_value, unsigned int reg_offset) +{ + early_writeb_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_value, reg_offset); +} + +static inline void +early_writew_hb_reg(unsigned short reg_value, unsigned int reg_offset) +{ + early_writew_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_value, reg_offset); +} + +static inline void +early_writel_hb_reg(unsigned int reg_value, unsigned int reg_offset) +{ + early_writel_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_value, reg_offset); +} + +static inline void +early_writell_hb_reg(unsigned long reg_value, unsigned int reg_offset) +{ + early_writell_hb_eg_reg(HB_PCI_BUS_NUM, HB_PCI_SLOT, HB_PCI_FUNC, + reg_value, reg_offset); +} + +static inline unsigned char +early_readb_eg_reg(unsigned int reg_offset) +{ + return early_readb_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_offset); +} + +static inline unsigned short +early_readw_eg_reg(unsigned int reg_offset) +{ + return early_readw_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_offset); +} + +static inline unsigned int +early_readl_eg_reg(unsigned int reg_offset) +{ + return early_readl_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_offset); +} + +static inline unsigned long +early_readll_eg_reg(unsigned int reg_offset) +{ + return early_readll_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_offset); +} + +static inline void +early_writeb_eg_reg(unsigned char reg_value, unsigned int reg_offset) +{ + early_writeb_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_value, reg_offset); +} + +static inline void +early_writew_eg_reg(unsigned short reg_value, unsigned int reg_offset) +{ + early_writew_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_value, reg_offset); +} + +static inline void +early_writel_eg_reg(unsigned int reg_value, unsigned int reg_offset) +{ + early_writel_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_value, reg_offset); +} + +static inline void +early_writell_eg_reg(unsigned long reg_value, unsigned int reg_offset) +{ + early_writell_hb_eg_reg(EG_PCI_BUS_NUM, EG_PCI_SLOT, EG_PCI_FUNC, + reg_value, reg_offset); +} + +static inline unsigned long +early_get_legacy_nbsr_base(void) +{ + return early_readll_hb_reg(HB_PCI_LEGACY_BAR) & + HB_PCI_LEGACY_MEMORY_BAR; +} + +static inline unsigned long +boot_get_legacy_nbsr_base(void) +{ + return boot_readll_hb_reg(HB_PCI_LEGACY_BAR) & + HB_PCI_LEGACY_MEMORY_BAR; +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_HB_REGS_H_ */ diff --git a/arch/e2k/include/asm/head.h b/arch/e2k/include/asm/head.h new file mode 100644 index 0000000..629c82b --- /dev/null +++ b/arch/e2k/include/asm/head.h @@ -0,0 +1,306 @@ +/* $Id: head.h,v 1.41 2009/10/27 10:14:51 atic Exp $ */ +#ifndef _E2K_HEAD_H +#define _E2K_HEAD_H + +#include +#include +#include +#include +#ifndef __ASSEMBLY__ +#include +#endif /* _ASSEMBLY__ */ + +#define EOS_RAM_BASE_LABEL _data +#define KERNEL_START_LABEL _start /* start label of Linux Image */ +#define KERNEL_END_LABEL _end /* end label of Linux Image */ + +#define TTABLE_START_LABEL __ttable_start /* start label of kernel */ + /* trap table */ +#define TTABLE_END_LABEL __ttable_end /* end label of kernel */ + /* trap table */ + +#ifdef __ASSEMBLY__ + +#define KERNEL_BASE [KERNEL_START_LABEL] /* virtual address of Linux */ + /* Image begining */ +#define KERNEL_END [KERNEL_END_LABEL] /* virtual address of Linux */ + /* Image end */ +#define EOS_RAM_BASE [EOS_RAM_BASE_LABEL] + +#define KERNEL_TTABLE_BASE [TTABLE_START_LABEL] /* kernel trap table */ + /* start address */ +#define KERNEL_TTABLE_END [TTABLE_END_LABEL] /* kernel trap table */ + /* end address */ + +#else /* !(__ASSEMBLY__) */ + +#define EOS_RAM_BASE ((e2k_addr_t)&EOS_RAM_BASE_LABEL) + +#define KERNEL_BASE ((e2k_addr_t)&KERNEL_START_LABEL) +#define KERNEL_END ((e2k_addr_t)&KERNEL_END_LABEL) + +#define KERNEL_TTABLE_BASE ((e2k_addr_t)&TTABLE_START_LABEL) +#define KERNEL_TTABLE_END ((e2k_addr_t)&TTABLE_END_LABEL) + +#endif /* !(__ASSEMBLY__) */ + + +#define E2K_EOS_RAM_PAGE_SIZE E2K_SMALL_PAGE_SIZE /* Loader warks into */ + /* the small pages */ + +/* Size of pages where the kernel is loaded */ +#define E2K_KERNEL_PAGE_SIZE (cpu_has(CPU_HWBUG_LARGE_PAGES) ? \ + E2K_SMALL_PAGE_SIZE : E2K_LARGE_PAGE_SIZE) +#define BOOT_E2K_KERNEL_PAGE_SIZE (boot_cpu_has(CPU_HWBUG_LARGE_PAGES) ? \ + E2K_SMALL_PAGE_SIZE : BOOT_E2K_LARGE_PAGE_SIZE) + + /* Equal map of phys */ + /* to virt addresses */ + /* should be done */ + /* into pages of one */ + /* size */ +#define BOOT_E2K_EQUAL_MAP_PAGE_SIZE BOOT_E2K_KERNEL_PAGE_SIZE + +#define E2K_KERNEL_PS_PAGE_SIZE E2K_SMALL_PAGE_SIZE /* kernel procedure */ + /* stack loads into */ + /* the small pages */ + + /* kernel procedure */ + /* stack size 8 * 4KB */ + /* at boot-time */ +#define E2K_BOOT_KERNEL_PS_SIZE (16 * E2K_KERNEL_PS_PAGE_SIZE) + + /* kernel procedure */ + /* chain stack loads */ + /* into the small */ + /* pages */ +#define E2K_KERNEL_PCS_PAGE_SIZE E2K_SMALL_PAGE_SIZE + + /* kernel procedure */ + /* chain stack size */ + /* at boot-time */ + /* 4 * 4KB */ +#define E2K_BOOT_KERNEL_PCS_SIZE (4 * E2K_KERNEL_PCS_PAGE_SIZE) + + /* kernel stack loads */ + /* into the small */ + /* pages */ +#define E2K_KERNEL_US_PAGE_SIZE E2K_SMALL_PAGE_SIZE + + /* kernel stack size */ + /* at boot-time */ + /* 8 * 4KB */ +#define E2K_BOOT_KERNEL_US_SIZE (4 * E2K_KERNEL_US_PAGE_SIZE) + + /* map initrd using */ + /* 4K pages (4Mb in */ + /* the future) */ +#define E2K_INITRD_PAGE_SIZE E2K_SMALL_PAGE_SIZE + + /* map bootinfo data */ + /* using 4K pages */ +#define E2K_BOOTINFO_PAGE_SIZE E2K_SMALL_PAGE_SIZE + + /* map MP tables */ + /* using 4K pages */ +#define E2K_MPT_PAGE_SIZE E2K_SMALL_PAGE_SIZE + + /* map symbols & */ + /* strings tables */ + /* using 4K pages */ +#define E2K_NAMETAB_PAGE_SIZE E2K_SMALL_PAGE_SIZE + + /* map x86 HW area */ + /* using 4K pages */ +#define E2K_X86_HW_PAGE_SIZE E2K_SMALL_PAGE_SIZE + +/* + * All or some parts of physical memory pages are mapped to virtual + * space starting from 'PAGE_OFFSET' + */ +#define E2K_MAPPED_PHYS_MEM_SIZE (0 * (1024 * 1024)) + /* full physical */ + /* memory */ + +/* Size of pages to map physical memory */ +#define E2K_MAPPED_PHYS_MEM_PAGE_SIZE \ + ((cpu_has(CPU_HWBUG_LARGE_PAGES) || \ + IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) ? \ + E2K_SMALL_PAGE_SIZE : E2K_LARGE_PAGE_SIZE) +#define BOOT_E2K_MAPPED_PHYS_MEM_PAGE_SIZE \ + ((boot_cpu_has(CPU_HWBUG_LARGE_PAGES) || \ + IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) ? \ + E2K_SMALL_PAGE_SIZE : BOOT_E2K_LARGE_PAGE_SIZE) + +/* + * Size of the top of kernel stack to map to equal virtual addresses to ensure + * switching from physical to virtual addressing + */ +#ifndef __ASSEMBLY__ +#define E2K_KERNEL_US_PAGE_SWITCHING_SIZE (128 * sizeof(long)) +#else +#define E2K_KERNEL_US_PAGE_SWITCHING_SIZE (128 * 8) +#endif /* !(__ASSEMBLY__) */ + +/* + * Kernel virtual memory layout + */ + +/* + * The topmost virtual addresses are used to allocate Virtually Mapped + * Linear Page Tables (VM LPT). + * All page tables is virtually mapped into the same virtual space as kernel + * Definition of Virtually Mapped Linear Page Table base address. + * Virtual page table lives at the end of virtual addresses space + * 0x0000 ff80 0000 0000 - 0x0000 ffff ffff ffff all PTs virtual space: + * + * 0x0000 ff80 0000 0000 - 0x0000 ffff bfff ffff first-level PTs (PTEs) + * 0x0000 ffff c000 0000 - 0x0000 ffff ffdf ffff second-level PTs (PMDs) + * 0x0000 ffff ffe0 0000 - 0x0000 ffff ffff efff third-level PTs (PUDs) + * 0x0000 ffff ffff f000 - 0x0000 ffff ffff fffe root-fourth-level PTs (PGD) + * 0x0000 ffff ffff ffff - root-fourth-level itself PGD + */ + +#define KERNEL_VPTB_BASE_ADDR 0x0000ff8000000000UL +#ifndef __ASSEMBLY__ +#define KERNEL_PPTB_BASE_ADDR ((e2k_addr_t)boot_root_pt) +#else +#define KERNEL_PPTB_BASE_ADDR (boot_root_pt) +#endif /* !(__ASSEMBLY__) */ + +/* + * Area dedicated for I/O ports and BIOS physical memory + * 0x0000 00ff fe00 0000 - 0x0000 00ff ffff ffff all I/O physical memory + * 0x0000 ff7b fc00 0000 - 0x0000 ff7b ffff ffff all I/O virtual memory + */ + +#define E2K_KERNEL_IO_BIOS_AREAS_BASE 0x0000ff7bfc000000UL +#define E2K_KERNEL_IO_BIOS_AREAS_SIZE 0x0000000004000000UL + +/* + * See BUG in pcim configuration block in jump func + * should be 0x00000000FEBFFFFFUL due to specification + */ +#define E2K_PCI_MEM_AREA_PHYS_END 0x00000000F7FFFFFFUL +#define E2K_SCRB_SIZE 0x0000000000001000UL + +#define E2K_FULL_SIC_IO_AREA_PHYS_BASE 0x0000000101000000UL +#define E2K_FULL_SIC_IO_AREA_SIZE 0x0000000000010000UL /* 64K */ + +#define E2K_LEGACY_SIC_IO_AREA_PHYS_BASE 0x000000ff20000000UL +#define E2K_LEGACY_SIC_IO_AREA_SIZE 0x0000000010000000UL /* 256M */ + +#define E2K_VIRT_CPU_X86_IO_AREA_PHYS_BASE 0x000000fff0000000UL + +/* + * Area dedicated for I/O ports and BIOS physical memory + * Area size should be max of + * E2K_FULL_SIC_IO_AREA_SIZE 0x0000000001000000UL + * E2K_LEGACY_SIC_BIOS_AREA_SIZE 0x0000000010000000UL 256 Mb IGNORE + * + * 0x0000 00ff f000 0000 - 0x0000 00ff ffff ffff all I/O physical memory + * 0x0000 ff7b fc00 0000 - 0x0000 ff7b ffff ffff all I/O virtual memory + * + * see area dedication above : + * + * #define E2K_KERNEL_IO_BIOS_AREAS_BASE 0x0000ff7bfc000000UL + * #define E2K_KERNEL_IO_BIOS_AREAS_SIZE 0x0000000004000000UL + */ + +/* + * Area dedicated for kernel resident image virtual space and virtual space + * to allocate and load kernel modules. + * Both this areas should be within 2 ** 30 bits of virtual adresses to provide + * call of extern functions based on literal displacement DISP + * 0x0000 e200 0000 0000 - 0x0000 e200 3fff ffff kernel image area with modules + * 0x0000 e200 0000 0000 - 0x0000 e200 0xxx x000 kernel image area + * xxx x defined by kernel_image_size + * 0x0000 e200 0xxx x000 - 0x0000 e200 3fff ffff area to load modules + */ +#define NATIVE_KERNEL_IMAGE_AREA_BASE 0x0000e20000000000 + +#define KERNEL_IMAGE_PGD_INDEX pgd_index(E2K_KERNEL_IMAGE_AREA_BASE) + +#define E2K_KERNEL_IMAGE_AREA_SIZE kernel_image_size +#define E2K_MODULES_START _PAGE_ALIGN_DOWN( \ + (E2K_KERNEL_IMAGE_AREA_BASE + \ + E2K_KERNEL_IMAGE_AREA_SIZE), \ + E2K_KERNEL_PAGE_SIZE) +#define E2K_MODULES_END (E2K_KERNEL_IMAGE_AREA_BASE + (1 << 30)) +#define E2K_KERNEL_AREAS_SIZE 0x0000000040000000UL /* 2 ** 30 */ + +#define KERNEL_CODES_INDEX 0UL /* kernel CUI */ +/* bug 114501: use 0 index for all unprotected executables */ +#define USER_CODES_UNPROT_INDEX(p) \ + ((machine.native_iset_ver >= E2K_ISET_V6 || \ + !(current->thread.flags & E2K_FLAG_32BIT)) ? 0UL : 1UL) /* user CUI */ +#define USER_CODES_PROT_INDEX 1UL /* user protected codes */ + /* index */ +#define MAX_KERNEL_CODES_UNITS (KERNEL_CODES_INDEX + 1) + +#define GUEST_CODES_INDEX 0UL +#define HOST_CODES_INDEX (KERNEL_CODES_INDEX) +#define MAX_GUEST_CODES_UNITS (GUEST_CODES_INDEX + 1) +#define MAX_HOST_CODES_UNITS (MAX_KERNEL_CODES_UNITS) + +/* + * Area dedicated for kernel symbols & strings tables + * 0x0000 e200 4000 0000 - 0x0000 e200 ffff ffff kernel symbols & strings tables + */ +#define E2K_KERNEL_NAMETAB_AREA_BASE (E2K_KERNEL_IMAGE_AREA_BASE + \ + E2K_KERNEL_AREAS_SIZE) + +/* + * Area dedicated for physical memory mapping to virtual space + * 0x0000 0000 0000 0000 - 0x0000 00ff ffff ffff all physical memory + * 0x0000 d000 0000 0000 - 0x0000 d0ff ffff ffff all virtual memory to map + * all physical memory + */ +#define E2K_KERNEL_PHYS_MEM_VIRT_BASE PAGE_OFFSET /* 0x0000d00000000000 */ +#define E2K_KERNEL_PHYS_MEM_SIZE MAX_PM_SIZE /* == 2**40 - 2**48 */ + +/* virtualization support */ +#include + +/* + * Kernel virtual memory context + */ +#define E2K_KERNEL_CONTEXT 0x000 + +/* + * CPU 'WAIT' operation fields structure + */ +#define E2K_WAIT_OP_MA_C_MASK 0x20 /* wait for all previous memory */ + /* access operatons complete */ +#define E2K_WAIT_OP_FL_C_MASK 0x10 /* wait for all previous flush */ + /* cache operatons complete */ +#define E2K_WAIT_OP_LD_C_MASK 0x08 /* wait for all previous load */ + /* operatons complete */ +#define E2K_WAIT_OP_ST_C_MASK 0x04 /* wait for all previous store */ + /* operatons complete */ +#define E2K_WAIT_OP_ALL_E_MASK 0x02 /* wait for all previous operatons */ + /* issue all possible exceptions */ +#define E2K_WAIT_OP_ALL_C_MASK 0x01 /* wait for all previous operatons */ + /* complete */ + +/* + * IMPORTANT NOTE!!! + * Do not add 'sas' and 'sal' here, as they are modifiers + * for st_c/ld_c which make them _less_ restrictive. + */ +#define E2K_WAIT_OP_ALL_MASK (E2K_WAIT_OP_MA_C_MASK | \ + E2K_WAIT_OP_FL_C_MASK | \ + E2K_WAIT_OP_LD_C_MASK | \ + E2K_WAIT_OP_ST_C_MASK | \ + E2K_WAIT_OP_ALL_C_MASK | \ + E2K_WAIT_OP_ALL_E_MASK) + +#define E2K_WAIT_MA E2K_WAIT(E2K_WAIT_OP_MA_C_MASK) +#define E2K_WAIT_FLUSH E2K_WAIT(E2K_WAIT_OP_FL_C_MASK) +#define E2K_WAIT_LD E2K_WAIT(E2K_WAIT_OP_LD_C_MASK) +#define E2K_WAIT_ST E2K_WAIT(E2K_WAIT_OP_ST_C_MASK) +#define E2K_WAIT_ALL_OP E2K_WAIT(E2K_WAIT_OP_ALL_C_MASK) +#define E2K_WAIT_ALL_EX E2K_WAIT(E2K_WAIT_OP_ALL_E_MASK) +#define E2K_WAIT_ALL E2K_WAIT(E2K_WAIT_OP_ALL_MASK) + +#endif /* !(_E2K_HEAD_H) */ diff --git a/arch/e2k/include/asm/host_printk.h b/arch/e2k/include/asm/host_printk.h new file mode 100644 index 0000000..b657387 --- /dev/null +++ b/arch/e2k/include/asm/host_printk.h @@ -0,0 +1,13 @@ +/* + * Guest VM printk() on host support + * + * Copyright 2015 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_HOST_PRINTK_H +#define _E2K_HOST_PRINTK_H + +#include +#include + +#endif /* ! _E2K_HOST_PRINTK_H */ diff --git a/arch/e2k/include/asm/hugetlb.h b/arch/e2k/include/asm/hugetlb.h new file mode 100644 index 0000000..f482b24 --- /dev/null +++ b/arch/e2k/include/asm/hugetlb.h @@ -0,0 +1,89 @@ +#ifndef _ASM_E2K_HUGETLB_H_ +#define _ASM_E2K_HUGETLB_H_ + +#include + +#include +#include +#include + + +extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); +extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); + + +static inline void arch_clear_hugepage_flags(struct page *page) +{ +} + +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, + unsigned long len) +{ + return 0; +} + +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} + +#define __HAVE_ARCH_HUGE_PTE_NONE +static inline int huge_pte_none(pte_t pte) +{ +#ifndef CONFIG_MAKE_ALL_PAGES_VALID + return _PAGE_CLEAR(pte_val(pte), UNI_PAGE_HUGE) == 0; +#else /* CONFIG_MAKE_ALL_PAGES_VALID */ + return _PAGE_CLEAR(pte_val(pte), UNI_PAGE_VALID | UNI_PAGE_HUGE) == 0; +#endif /* !CONFIG_MAKE_ALL_PAGES_VALID */ +} + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); + if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE) + ptep_set_wrprotect(mm, addr, ++ptep); +} + +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + int changed = !pte_same(*ptep, pte); + if (changed) { + set_pte_at(vma->vm_mm, addr, ptep, pte); + if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE) + set_pte_at(vma->vm_mm, addr, ++ptep, pte); + flush_tlb_range(vma, addr, addr + PMD_SIZE); + } + return changed; +} + +#define __HAVE_ARCH_HUGE_PTE_CLEAR +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long address, + pte_t *page_table, unsigned long sz) +{ + /* + * In this case virtual page occupied two sequential entries in + * page table on 2-th level (PMD). + * All two pte's (pmd's) should be cleared. + */ + pte_clear(mm, address, page_table); + if (E2K_LARGE_PAGE_SIZE == E2K_4M_PAGE_SIZE) + pte_clear(mm, address, (++page_table)); +} + +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT + +#include + +#endif /* _ASM_E2K_HUGETLB_H_ */ diff --git a/arch/e2k/include/asm/hw_breakpoint.h b/arch/e2k/include/asm/hw_breakpoint.h new file mode 100644 index 0000000..c25bff8 --- /dev/null +++ b/arch/e2k/include/asm/hw_breakpoint.h @@ -0,0 +1,53 @@ +#ifndef _E2K_HW_BREAKPOINT_H +#define _E2K_HW_BREAKPOINT_H + +#include + +struct arch_hw_breakpoint { + unsigned long address; + u8 len; + u8 type; + u8 ss; +}; + +#define HBP_NUM 4 +static inline int hw_breakpoint_slots(int type) +{ + return HBP_NUM; +} + +struct perf_event; +struct perf_event_attr; +struct task_struct; + +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); + +extern int arch_install_hw_breakpoint(struct perf_event *bp); +extern void arch_uninstall_hw_breakpoint(struct perf_event *bp); + +struct notifier_block; +extern int hw_breakpoint_exceptions_notify( + struct notifier_block *unused, unsigned long val, void *data); + +extern void hw_breakpoint_pmu_read(struct perf_event *bp); + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +extern int bp_data_overflow_handle(struct pt_regs *regs); +extern int bp_instr_overflow_handle(struct pt_regs *regs); +extern void clear_ptrace_hw_breakpoint(struct task_struct *tsk); +#else /* ! CONFIG_HAVE_HW_BREAKPOINT */ +static inline int bp_data_overflow_handle(struct pt_regs *regs) +{ + return 0; +} +static inline int bp_instr_overflow_handle(struct pt_regs *regs) +{ + return 0; +} +static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk) {} +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + +#endif /* _E2K_HW_BREAKPOINT_H */ diff --git a/arch/e2k/include/asm/hw_irq.h b/arch/e2k/include/asm/hw_irq.h new file mode 100644 index 0000000..2d885d4 --- /dev/null +++ b/arch/e2k/include/asm/hw_irq.h @@ -0,0 +1,8 @@ +#ifndef _ASM_E2K_HW_IRQ_H +#define _ASM_E2K_HW_IRQ_H + +/* required by linux/irq.h */ + +#include + +#endif /* _ASM_E2K_HW_IRQ_H */ diff --git a/arch/e2k/include/asm/hw_stacks.h b/arch/e2k/include/asm/hw_stacks.h new file mode 100644 index 0000000..a90abfb --- /dev/null +++ b/arch/e2k/include/asm/hw_stacks.h @@ -0,0 +1,695 @@ +/* + * Hardware stacks support + * + * Copyright 2001-2015 Salavat S. Guilyazov (atic@mcst.ru) + */ + +#ifndef _E2K_HW_STACKS_H +#define _E2K_HW_STACKS_H + +#include +#include +#include + +typedef enum hw_stack_type { + HW_STACK_TYPE_PS, + HW_STACK_TYPE_PCS +} hw_stack_type_t; + +/* + * Procedure chain stacks can be mapped to user (user processes) + * or kernel space (kernel threads). But mapping is always to privileged area + * and directly can be accessed only by host kernel. + * SPECIAL CASE: access to current procedure chain stack: + * 1. Current stack frame must be locked (resident), so access is + * safety and can use common load/store operations + * 2. Top of stack can be loaded to the special hardware register file and + * must be spilled to memory before any access. + * 3. If items of chain stack are not updated, then spilling is enough to + * their access + * 4. If items of chain stack are updated, then interrupts and + * any calling of function should be disabled in addition to spilling, + * because of return (done) will fill some part of stack from memory and can be + * two copy of chain stack items: in memory and in registers file. + * We can update only in memory and following spill recover not updated + * value from registers file. + */ +static inline unsigned long +native_get_active_cr_mem_value(e2k_addr_t base, + e2k_addr_t cr_ind, e2k_addr_t cr_item) +{ + return *((unsigned long *)(base + cr_ind + cr_item)); +} +static inline unsigned long +native_get_active_cr0_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr_mem_value(base, cr_ind, CR0_LO_I); +} +static inline unsigned long +native_get_active_cr0_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr_mem_value(base, cr_ind, CR0_HI_I); +} +static inline unsigned long +native_get_active_cr1_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr_mem_value(base, cr_ind, CR1_LO_I); +} +static inline unsigned long +native_get_active_cr1_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr_mem_value(base, cr_ind, CR1_HI_I); +} +static inline void +native_put_active_cr_mem_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind, e2k_addr_t cr_item) +{ + *((unsigned long *)(base + cr_ind + cr_item)) = cr_value; +} +static inline void +native_put_active_cr0_lo_value(unsigned long cr0_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr_mem_value(cr0_lo_value, base, cr_ind, CR0_LO_I); +} +static inline void +native_put_active_cr0_hi_value(unsigned long cr0_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr_mem_value(cr0_hi_value, base, cr_ind, CR0_HI_I); +} +static inline void +native_put_active_cr1_lo_value(unsigned long cr1_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr_mem_value(cr1_lo_value, base, cr_ind, CR1_LO_I); +} +static inline void +native_put_active_cr1_hi_value(unsigned long cr1_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr_mem_value(cr1_hi_value, base, cr_ind, CR1_HI_I); +} + +static inline e2k_cr0_lo_t +native_get_active_cr0_lo(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr0_lo_t cr0_lo; + + cr0_lo.CR0_lo_half = native_get_active_cr0_lo_value(base, cr_ind); + return cr0_lo; +} +static inline e2k_cr0_hi_t +native_get_active_cr0_hi(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr0_hi_t cr0_hi; + + cr0_hi.CR0_hi_half = native_get_active_cr0_hi_value(base, cr_ind); + return cr0_hi; +} +static inline e2k_cr1_lo_t +native_get_active_cr1_lo(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr1_lo_t cr1_lo; + + cr1_lo.CR1_lo_half = native_get_active_cr1_lo_value(base, cr_ind); + return cr1_lo; +} +static inline e2k_cr1_hi_t +native_get_active_cr1_hi(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr1_hi_t cr1_hi; + + cr1_hi.CR1_hi_half = native_get_active_cr1_hi_value(base, cr_ind); + return cr1_hi; +} +static inline void +native_put_active_cr0_lo(e2k_cr0_lo_t cr0_lo, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr0_lo_value(cr0_lo.CR0_lo_half, base, cr_ind); +} +static inline void +native_put_active_cr0_hi(e2k_cr0_hi_t cr0_hi, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr0_hi_value(cr0_hi.CR0_hi_half, base, cr_ind); +} +static inline void +native_put_active_cr1_lo(e2k_cr1_lo_t cr1_lo, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr1_lo_value(cr1_lo.CR1_lo_half, base, cr_ind); +} +static inline void +native_put_active_cr1_hi(e2k_cr1_hi_t cr1_hi, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr1_hi_value(cr1_hi.CR1_hi_half, base, cr_ind); +} + +static inline int +native_get_user_cr0_lo(e2k_cr0_lo_t *cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __get_user(AS_WORD_P(cr0_lo), + (u64 __user *)(base + cr_ind + CR0_LO_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_get_user_cr0_hi(e2k_cr0_hi_t *cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __get_user(AS_WORD_P(cr0_hi), + (u64 __user *)(base + cr_ind + CR0_HI_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_get_user_cr1_lo(e2k_cr1_lo_t *cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __get_user(AS_WORD_P(cr1_lo), + (u64 __user *)(base + cr_ind + CR1_LO_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_get_user_cr1_hi(e2k_cr1_hi_t *cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __get_user(AS_WORD_P(cr1_hi), + (u64 __user *)(base + cr_ind + CR1_HI_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_put_user_cr0_lo(e2k_cr0_lo_t cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __put_user(AS_WORD(cr0_lo), + (u64 __user *)(base + cr_ind + CR0_LO_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_put_user_cr0_hi(e2k_cr0_hi_t cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __put_user(AS_WORD(cr0_hi), + (u64 __user *)(base + cr_ind + CR0_HI_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_put_user_cr1_lo(e2k_cr1_lo_t cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __put_user(AS_WORD(cr1_lo), + (u64 __user *)(base + cr_ind + CR1_LO_I)); + clear_ts_flag(ts_flag); + + return ret; +} +static inline int +native_put_user_cr1_hi(e2k_cr1_hi_t cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __put_user(AS_WORD(cr1_hi), + (u64 __user *)(base + cr_ind + CR1_HI_I)); + clear_ts_flag(ts_flag); + + return ret; +} + +static inline void +native_get_kernel_cr0_lo(e2k_cr0_lo_t *cr0_lo, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + AS_WORD_P(cr0_lo) = *((u64 *)(base + cr_ind + CR0_LO_I)); +} +static inline void +native_get_kernel_cr0_hi(e2k_cr0_hi_t *cr0_hi, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + AS_WORD_P(cr0_hi) = *((u64 *)(base + cr_ind + CR0_HI_I)); +} +static inline void +native_get_kernel_cr1_lo(e2k_cr1_lo_t *cr1_lo, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + AS_WORD_P(cr1_lo) = *((u64 *)(base + cr_ind + CR1_LO_I)); +} +static inline void +native_get_kernel_cr1_hi(e2k_cr1_hi_t *cr1_hi, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + AS_WORD_P(cr1_hi) = *((u64 *)(base + cr_ind + CR1_HI_I)); +} +static inline void +native_put_kernel_cr0_lo(e2k_cr0_lo_t cr0_lo, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + *((u64 *)(base + cr_ind + CR0_LO_I)) = AS_WORD(cr0_lo); +} +static inline void +native_put_kernel_cr0_hi(e2k_cr0_hi_t cr0_hi, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + *((u64 *)(base + cr_ind + CR0_HI_I)) = AS_WORD(cr0_hi); +} +static inline void +native_put_kernel_cr1_lo(e2k_cr1_lo_t cr1_lo, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + *((u64 *)(base + cr_ind + CR1_LO_I)) = AS_WORD(cr1_lo); +} +static inline void +native_put_kernel_cr1_hi(e2k_cr1_hi_t cr1_hi, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + *((u64 *)(base + cr_ind + CR1_HI_I)) = AS_WORD(cr1_hi); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* pure guest kernel (not paravirtualized) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel with or without virtualization support */ +static inline unsigned long +get_active_cr0_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr0_lo_value(base, cr_ind); +} +static inline unsigned long +get_active_cr0_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr0_hi_value(base, cr_ind); +} +static inline unsigned long +get_active_cr1_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr1_lo_value(base, cr_ind); +} +static inline unsigned long +get_active_cr1_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_active_cr1_hi_value(base, cr_ind); +} +static inline void +put_active_cr0_lo_value(unsigned long cr0_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr0_lo_value(cr0_lo_value, base, cr_ind); +} +static inline void +put_active_cr0_hi_value(unsigned long cr0_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr0_hi_value(cr0_hi_value, base, cr_ind); +} +static inline void +put_active_cr1_lo_value(unsigned long cr1_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr1_lo_value(cr1_lo_value, base, cr_ind); +} +static inline void +put_active_cr1_hi_value(unsigned long cr1_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_active_cr1_hi_value(cr1_hi_value, base, cr_ind); +} +#endif /* CONFIG_PARAVIRT */ + +static inline e2k_cr0_lo_t +get_active_cr0_lo(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr0_lo_t cr0_lo; + + cr0_lo.CR0_lo_half = get_active_cr0_lo_value(base, cr_ind); + return cr0_lo; +} +static inline e2k_cr0_hi_t +get_active_cr0_hi(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr0_hi_t cr0_hi; + + cr0_hi.CR0_hi_half = get_active_cr0_hi_value(base, cr_ind); + return cr0_hi; +} +static inline e2k_cr1_lo_t +get_active_cr1_lo(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr1_lo_t cr1_lo; + + cr1_lo.CR1_lo_half = get_active_cr1_lo_value(base, cr_ind); + return cr1_lo; +} +static inline e2k_cr1_hi_t +get_active_cr1_hi(e2k_addr_t base, e2k_addr_t cr_ind) +{ + e2k_cr1_hi_t cr1_hi; + + cr1_hi.CR1_hi_half = get_active_cr1_hi_value(base, cr_ind); + return cr1_hi; +} +static inline void +put_active_cr0_lo(e2k_cr0_lo_t cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + put_active_cr0_lo_value(cr0_lo.CR0_lo_half, base, cr_ind); +} +static inline void +put_active_cr0_hi(e2k_cr0_hi_t cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + put_active_cr0_hi_value(cr0_hi.CR0_hi_half, base, cr_ind); +} +static inline void +put_active_cr1_lo(e2k_cr1_lo_t cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + put_active_cr1_lo_value(cr1_lo.CR1_lo_half, base, cr_ind); +} +static inline void +put_active_cr1_hi(e2k_cr1_hi_t cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + put_active_cr1_hi_value(cr1_hi.CR1_hi_half, base, cr_ind); +} + +static inline int +get_user_cr0_lo(e2k_cr0_lo_t *cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_user_cr0_lo(cr0_lo, base, cr_ind); +} +static inline int +get_user_cr0_hi(e2k_cr0_hi_t *cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_user_cr0_hi(cr0_hi, base, cr_ind); +} +static inline int +get_user_cr1_lo(e2k_cr1_lo_t *cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_user_cr1_lo(cr1_lo, base, cr_ind); +} +static inline int +get_user_cr1_hi(e2k_cr1_hi_t *cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_get_user_cr1_hi(cr1_hi, base, cr_ind); +} +static inline int +put_user_cr0_lo(e2k_cr0_lo_t cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_put_user_cr0_lo(cr0_lo, base, cr_ind); +} +static inline int +put_user_cr0_hi(e2k_cr0_hi_t cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_put_user_cr0_hi(cr0_hi, base, cr_ind); +} +static inline int +put_user_cr1_lo(e2k_cr1_lo_t cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_put_user_cr1_lo(cr1_lo, base, cr_ind); +} +static inline int +put_user_cr1_hi(e2k_cr1_hi_t cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + return native_put_user_cr1_hi(cr1_hi, base, cr_ind); +} + +static inline void +get_kernel_cr0_lo(e2k_cr0_lo_t *cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_get_kernel_cr0_lo(cr0_lo, base, cr_ind); +} +static inline void +get_kernel_cr0_hi(e2k_cr0_hi_t *cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_get_kernel_cr0_hi(cr0_hi, base, cr_ind); +} +static inline void +get_kernel_cr1_lo(e2k_cr1_lo_t *cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_get_kernel_cr1_lo(cr1_lo, base, cr_ind); +} +static inline void +get_kernel_cr1_hi(e2k_cr1_hi_t *cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_get_kernel_cr1_hi(cr1_hi, base, cr_ind); +} +static inline void +put_kernel_cr0_lo(e2k_cr0_lo_t cr0_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_kernel_cr0_lo(cr0_lo, base, cr_ind); +} +static inline void +put_kernel_cr0_hi(e2k_cr0_hi_t cr0_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_kernel_cr0_hi(cr0_hi, base, cr_ind); +} +static inline void +put_kernel_cr1_lo(e2k_cr1_lo_t cr1_lo, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_kernel_cr1_lo(cr1_lo, base, cr_ind); +} +static inline void +put_kernel_cr1_hi(e2k_cr1_hi_t cr1_hi, e2k_addr_t base, e2k_addr_t cr_ind) +{ + native_put_kernel_cr1_hi(cr1_hi, base, cr_ind); +} + +static inline int +get_cr0_lo(e2k_cr0_lo_t *cr0_lo, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = get_user_cr0_lo(cr0_lo, base, cr_ind); + else + get_kernel_cr0_lo(cr0_lo, base, cr_ind); + return ret; +} + +static inline int +get_cr0_hi(e2k_cr0_hi_t *cr0_hi, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = get_user_cr0_hi(cr0_hi, base, cr_ind); + else + get_kernel_cr0_hi(cr0_hi, base, cr_ind); + return ret; +} + +static inline int +get_cr1_lo(e2k_cr1_lo_t *cr1_lo, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = get_user_cr1_lo(cr1_lo, base, cr_ind); + else + get_kernel_cr1_lo(cr1_lo, base, cr_ind); + return ret; +} + +static inline int +get_cr1_hi(e2k_cr1_hi_t *cr1_hi, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = get_user_cr1_hi(cr1_hi, base, cr_ind); + else + get_kernel_cr1_hi(cr1_hi, base, cr_ind); + return ret; +} + +static inline int +put_cr0_lo(e2k_cr0_lo_t cr0_lo, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = put_user_cr0_lo(cr0_lo, base, cr_ind); + else + put_kernel_cr0_lo(cr0_lo, base, cr_ind); + return ret; +} +static inline int +put_cr0_hi(e2k_cr0_hi_t cr0_hi, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = put_user_cr0_hi(cr0_hi, base, cr_ind); + else + put_kernel_cr0_hi(cr0_hi, base, cr_ind); + return ret; +} + +static inline int +put_cr1_lo(e2k_cr1_lo_t cr1_lo, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = put_user_cr1_lo(cr1_lo, base, cr_ind); + else + put_kernel_cr1_lo(cr1_lo, base, cr_ind); + return ret; +} + +static inline int +put_cr1_hi(e2k_cr1_hi_t cr1_hi, e2k_addr_t base, u64 cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = put_user_cr1_hi(cr1_hi, base, cr_ind); + else + put_kernel_cr1_hi(cr1_hi, base, cr_ind); + return ret; +} + +static inline int +get_cr0(e2k_cr0_lo_t *cr0_lo, e2k_cr0_hi_t *cr0_hi, + e2k_pcsp_lo_t pcsp_lo, u64 cr_ind) +{ + u64 base = pcsp_lo.PCSP_lo_base; + int ret = 0; + + ret += get_cr0_lo(cr0_lo, base, cr_ind); + ret += get_cr0_hi(cr0_hi, base, cr_ind); + return ret; +} + +static inline int +get_cr1(e2k_cr1_lo_t *cr1_lo, e2k_cr1_hi_t *cr1_hi, + e2k_pcsp_lo_t pcsp_lo, u64 cr_ind) +{ + u64 base = pcsp_lo.PCSP_lo_base; + int ret = 0; + + ret += get_cr1_lo(cr1_lo, base, cr_ind); + ret += get_cr1_hi(cr1_hi, base, cr_ind); + return ret; +} + +static inline int +get_user_crs(e2k_mem_crs_t *crs, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_from_user(crs, (const char __user *)(base + cr_ind), + sizeof(*crs)); + clear_ts_flag(ts_flag); + + return ret; +} + +static inline int +put_user_crs(e2k_mem_crs_t *crs, e2k_addr_t base, e2k_addr_t cr_ind) +{ + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = __copy_to_user((char __user *)(base + cr_ind), crs, + sizeof(*crs)); + clear_ts_flag(ts_flag); + + return ret; +} + +static inline void +get_kernel_crs(e2k_mem_crs_t *crs, e2k_addr_t base, e2k_addr_t cr_ind) +{ + get_kernel_cr0_lo(&crs->cr0_lo, base, cr_ind); + get_kernel_cr0_hi(&crs->cr0_hi, base, cr_ind); + get_kernel_cr1_lo(&crs->cr1_lo, base, cr_ind); + get_kernel_cr1_hi(&crs->cr1_hi, base, cr_ind); +} + +static inline void +put_kernel_crs(e2k_mem_crs_t *crs, e2k_addr_t base, e2k_addr_t cr_ind) +{ + put_kernel_cr0_lo(crs->cr0_lo, base, cr_ind); + put_kernel_cr0_hi(crs->cr0_hi, base, cr_ind); + put_kernel_cr1_lo(crs->cr1_lo, base, cr_ind); + put_kernel_cr1_hi(crs->cr1_hi, base, cr_ind); +} + +static inline int +get_crs(e2k_mem_crs_t *crs, e2k_addr_t base, e2k_addr_t cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = get_user_crs(crs, base, cr_ind); + else + get_kernel_crs(crs, base, cr_ind); + return ret; +} + +static inline int +put_crs(e2k_mem_crs_t *crs, e2k_addr_t base, e2k_addr_t cr_ind) +{ + int ret = 0; + + if (base < TASK_SIZE) + ret = put_user_crs(crs, base, cr_ind); + else + put_kernel_crs(crs, base, cr_ind); + return ret; +} + +extern void __update_psp_regs(unsigned long base, unsigned long size, + unsigned long new_fp, + e2k_psp_lo_t *psp_lo, e2k_psp_hi_t *psp_hi); +extern void update_psp_regs(unsigned long new_fp, + e2k_psp_lo_t *psp_lo, e2k_psp_hi_t *psp_hi); + +extern void __update_pcsp_regs(unsigned long base, unsigned long size, + unsigned long new_fp, + e2k_pcsp_lo_t *pcsp_lo, e2k_pcsp_hi_t *pcsp_hi); +extern void update_pcsp_regs(unsigned long new_fp, + e2k_pcsp_lo_t *pcsp_lo, e2k_pcsp_hi_t *pcsp_hi); + +#endif /* _E2K_HW_STACKS_H */ diff --git a/arch/e2k/include/asm/io.h b/arch/e2k/include/asm/io.h new file mode 100644 index 0000000..0c6de82 --- /dev/null +++ b/arch/e2k/include/asm/io.h @@ -0,0 +1,616 @@ +#ifndef _E2K_IO_H_ +#define _E2K_IO_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +extern int __init native_arch_pci_init(void); + +#define E2K_X86_IO_AREA_BASE E2K_KERNEL_IO_BIOS_AREAS_BASE + +/* Size of pages for the IO area */ +#define E2K_X86_IO_PAGE_SIZE (cpu_has(CPU_HWBUG_LARGE_PAGES) ? \ + E2K_SMALL_PAGE_SIZE : E2K_LARGE_PAGE_SIZE) +#define X86_IO_AREA_PHYS_BASE (machine.x86_io_area_base) +#define X86_IO_AREA_PHYS_SIZE (machine.x86_io_area_size) + + +/* + * We add all the necessary barriers manually + */ +#define __io_br() +#define __io_ar(v) do { (void) (v); } while (0) +#define __io_bw() +#define __io_aw() + +/* + * _relaxed() accessors. + */ + +static inline u8 native_readb_relaxed(const volatile void __iomem *addr) +{ + u8 res = IO_READ_B(addr); + if (cpu_has(CPU_HWBUG_PIO_READS)) + __E2K_WAIT(_ld_c); + return res; +} + +static inline u16 native_readw_relaxed(const volatile void __iomem *addr) +{ + u16 res = IO_READ_H(addr); + if (cpu_has(CPU_HWBUG_PIO_READS)) + __E2K_WAIT(_ld_c); + return res; +} + +static inline u32 native_readl_relaxed(const volatile void __iomem *addr) +{ + u32 res = IO_READ_W(addr); + if (cpu_has(CPU_HWBUG_PIO_READS)) + __E2K_WAIT(_ld_c); + return res; +} + +static inline u64 native_readq_relaxed(const volatile void __iomem *addr) +{ + u64 res = IO_READ_D(addr); + if (cpu_has(CPU_HWBUG_PIO_READS)) + __E2K_WAIT(_ld_c); + return res; +} + +static inline void native_writeb_relaxed(u8 value, volatile void __iomem *addr) +{ + IO_WRITE_B(addr, value); +} + +static inline void native_writew_relaxed(u16 value, volatile void __iomem *addr) +{ + IO_WRITE_H(addr, value); + *(volatile u16 __force *) addr = value; +} + +static inline void native_writel_relaxed(u32 value, volatile void __iomem *addr) +{ + IO_WRITE_W(addr, value); +} + +static inline void native_writeq_relaxed(u64 value, volatile void __iomem *addr) +{ + IO_WRITE_D(addr, value); +} + + +/* + * Strongly ordered accessors. + */ + +static inline u8 native_readb(const volatile void __iomem *addr) +{ + u8 res; + if (cpu_has(CPU_FEAT_ISET_V6)) { + LOAD_NV_MAS((volatile u8 __force *) addr, res, + MAS_LOAD_ACQUIRE_V6(MAS_MT_0), b, "memory"); + } else { + res = native_readb_relaxed(addr); + } + return res; +} + +static inline u16 native_readw(const volatile void __iomem *addr) +{ + u16 res; + if (cpu_has(CPU_FEAT_ISET_V6)) { + LOAD_NV_MAS((volatile u16 __force *) addr, res, + MAS_LOAD_ACQUIRE_V6(MAS_MT_0), h, "memory"); + } else { + res = native_readw_relaxed(addr); + } + return res; +} + +static inline u32 native_readl(const volatile void __iomem *addr) +{ + u32 res; + if (cpu_has(CPU_FEAT_ISET_V6)) { + LOAD_NV_MAS((volatile u32 __force *) addr, res, + MAS_LOAD_ACQUIRE_V6(MAS_MT_0), w, "memory"); + } else { + res = native_readl_relaxed(addr); + } + return res; +} + +static inline u64 native_readq(const volatile void __iomem *addr) +{ + u64 res; + if (cpu_has(CPU_FEAT_ISET_V6)) { + LOAD_NV_MAS((volatile u64 __force *) addr, res, + MAS_LOAD_ACQUIRE_V6(MAS_MT_0), d, "memory"); + } else { + res = native_readq_relaxed(addr); + } + return res; +} + +static inline void native_writeb(u8 value, volatile void __iomem *addr) +{ + if (cpu_has(CPU_FEAT_ISET_V6)) { + STORE_NV_MAS((volatile u8 __force *) addr, value, + MAS_STORE_RELEASE_V6(MAS_MT_0), b, "memory"); + /* wmb() after MMIO writes is not required by documentation, but + * this is how x86 works and how most of the drivers are tested. */ + wmb(); + } else { + native_writeb_relaxed(value, addr); + } +} + +static inline void native_writew(u16 value, volatile void __iomem *addr) +{ + if (cpu_has(CPU_FEAT_ISET_V6)) { + STORE_NV_MAS((volatile u16 __force *) addr, value, + MAS_STORE_RELEASE_V6(MAS_MT_0), h, "memory"); + wmb(); + } else { + native_writew_relaxed(value, addr); + } +} + +static inline void native_writel(u32 value, volatile void __iomem *addr) +{ + if (cpu_has(CPU_FEAT_ISET_V6)) { + STORE_NV_MAS((volatile u32 __force *) addr, value, + MAS_STORE_RELEASE_V6(MAS_MT_0), w, "memory"); + wmb(); + } else { + native_writel_relaxed(value, addr); + } +} + +static inline void native_writeq(u64 value, volatile void __iomem *addr) +{ + if (cpu_has(CPU_FEAT_ISET_V6)) { + STORE_NV_MAS((volatile u64 __force *) addr, value, + MAS_STORE_RELEASE_V6(MAS_MT_0), d, "memory"); + wmb(); + } else { + native_writeq_relaxed(value, addr); + } +} + +/* + * Port accessors, also strongly ordered + */ + +#if CONFIG_CPU_ISET >= 6 +# define __io_par() E2K_WAIT_V6(_ld_c | _sal | _lal) +# define __io_pbw() E2K_WAIT_V6(_st_c | _sas | _ld_c | _sal) +/* Not required by documentation, but this is how + * x86 works and how most of the drivers are tested. */ +# define __io_paw() E2K_WAIT_V6(_st_c | _sas) +#else +# define __io_par() \ +do { \ + if (cpu_has(CPU_HWBUG_PIO_READS)) \ + __E2K_WAIT(_ld_c); \ +} while (0) +# define __io_pbw() +# define __io_paw() +#endif + +static inline u8 native_inb(unsigned int port) +{ + u8 byte = NATIVE_READ_MAS_B(X86_IO_AREA_PHYS_BASE + port, MAS_IOADDR); + __io_par(); + return byte; +} +static inline u16 native_inw(unsigned int port) +{ + u16 hword = NATIVE_READ_MAS_H(X86_IO_AREA_PHYS_BASE + port, MAS_IOADDR); + __io_par(); + return hword; +} +static inline u32 native_inl(unsigned int port) +{ + u32 word = NATIVE_READ_MAS_W(X86_IO_AREA_PHYS_BASE + port, MAS_IOADDR); + __io_par(); + return word; +} +static inline void native_outb(u8 byte, unsigned int port) +{ + __io_pbw(); + NATIVE_WRITE_MAS_B(X86_IO_AREA_PHYS_BASE + port, byte, MAS_IOADDR); + __io_paw(); +} +static inline void native_outw(u16 halfword, unsigned int port) +{ + __io_pbw(); + NATIVE_WRITE_MAS_H(X86_IO_AREA_PHYS_BASE + port, halfword, MAS_IOADDR); + __io_paw(); +} +static inline void native_outl(u32 word, unsigned int port) +{ + __io_pbw(); + NATIVE_WRITE_MAS_W(X86_IO_AREA_PHYS_BASE + port, word, MAS_IOADDR); + __io_paw(); +} + + +/* + * Variants of inX/outX that repeatedly access the same port + */ + +static inline void native_insb(unsigned short port, void *dst, unsigned long count) +{ + u8 *b_p = dst; + while (count--) + *b_p++ = native_inb(port); +} +static inline void native_insw(unsigned short port, void *dst, unsigned long count) +{ + u16 *hw_p = dst; + while (count--) + *hw_p++ = native_inw(port); +} +static inline void native_insl(unsigned short port, void *dst, unsigned long count) +{ + u32 *l_p = dst; + while (count--) + *l_p++ = native_inl(port); +} + +static inline void native_outsb(unsigned short port, const void *src, unsigned long count) +{ + const u8 *b_p = src; + while (count--) + native_outb(*b_p++, port); +} +static inline void native_outsw(unsigned short port, const void *src, unsigned long count) +{ + const u16 *hw_p = src; + while (count--) + native_outw(*hw_p++, port); +} +static inline void native_outsl(unsigned short port, const void *src, unsigned long count) +{ + const u32 *l_p = src; + while (count--) + native_outl(*l_p++, port); +} + +/* + * And some e2k-specific accessors + */ +static inline void native_debug_cons_outb(u8 byte, u16 port) +{ + native_outb(byte, port); +} +static inline u8 native_debug_cons_inb(u16 port) +{ + return native_inb(port); +} +static inline u32 native_debug_cons_inl(u16 port) +{ + return native_inl(port); +} + +extern void native_conf_inb(unsigned int domain, unsigned int bus, + unsigned long port, u8 *byte); +extern void native_conf_inw(unsigned int domain, unsigned int bus, + unsigned long port, u16 *hword); +extern void native_conf_inl(unsigned int domain, unsigned int bus, + unsigned long port, u32 *word); +extern void native_conf_outb(unsigned int domain, unsigned int bus, + unsigned long port, u8 byte); +extern void native_conf_outw(unsigned int domain, unsigned int bus, + unsigned long port, u16 hword); +extern void native_conf_outl(unsigned int domain, unsigned int bus, + unsigned long port, u32 word); + + +#if defined CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +# include + +# define __raw_readb kvm_readb_relaxed +# define __raw_readw kvm_readw_relaxed +# define __raw_readl kvm_readl_relaxed +# define __raw_readq kvm_readq_relaxed +# define __raw_writeb kvm_writeb_relaxed +# define __raw_writew kvm_writew_relaxed +# define __raw_writel kvm_writel_relaxed +# define __raw_writeq kvm_writeq_relaxed +# define readb kvm_hv_readb +# define readw kvm_hv_readw +# define readl kvm_hv_readl +# define readq kvm_hv_readq +# define writeb kvm_hv_writeb +# define writew kvm_hv_writew +# define writel kvm_hv_writel +# define writeq kvm_hv_writeq +# define inb kvm_hv_inb +# define inw kvm_hv_inw +# define inl kvm_hv_inl +# define outb kvm_hv_outb +# define outw kvm_hv_outw +# define outl kvm_hv_outl +# define insb kvm_hv_insb +# define insw kvm_hv_insw +# define insl kvm_hv_insl +# define outsb kvm_hv_outsb +# define outsw kvm_hv_outsw +# define outsl kvm_hv_outsl +#elif defined CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +# include +# define __raw_readb pv_readb +# define __raw_readw pv_readw +# define __raw_readl pv_readl +# define __raw_readq pv_readq +# define __raw_writeb pv_writeb +# define __raw_writew pv_writew +# define __raw_writel pv_writel +# define __raw_writeq pv_writeq +# define inb pv_inb +# define inw pv_inw +# define inl pv_inl +# define outb pv_outb +# define outw pv_outw +# define outl pv_outl +# define insb pv_insb +# define insw pv_insw +# define insl pv_insl +# define outsb pv_outsb +# define outsw pv_outsw +# define outsl pv_outsl +#else +/* Native kernel - either host or without any virtualization at all */ +# define __raw_readb native_readb_relaxed +# define __raw_readw native_readw_relaxed +# define __raw_readl native_readl_relaxed +# define __raw_readq native_readq_relaxed +# define __raw_writeb native_writeb_relaxed +# define __raw_writew native_writew_relaxed +# define __raw_writel native_writel_relaxed +# define __raw_writeq native_writeq_relaxed +# define readb native_readb +# define readw native_readw +# define readl native_readl +# define readq native_readq +# define writeb native_writeb +# define writew native_writew +# define writel native_writel +# define writeq native_writeq +# define inb native_inb +# define inw native_inw +# define inl native_inl +# define outb native_outb +# define outw native_outw +# define outl native_outl +# define insb native_insb +# define insw native_insw +# define insl native_insl +# define outsb native_outsb +# define outsw native_outsw +# define outsl native_outsl + +static inline void boot_writeb(u8 b, void __iomem *addr) +{ + boot_native_writeb(b, addr); +} + +static inline void boot_writew(u16 w, void __iomem *addr) +{ + boot_native_writew(w, addr); +} + +static inline void boot_writel(u32 l, void __iomem *addr) +{ + boot_native_writel(l, addr); +} + +static inline void boot_writeq(u64 q, void __iomem *addr) +{ + boot_native_writeq(q, addr); +} + +static inline u8 boot_readb(void __iomem *addr) +{ + return boot_native_readb(addr); +} + +static inline u16 boot_readw(void __iomem *addr) +{ + return boot_native_readw(addr); +} + +static inline u32 boot_readl(void __iomem *addr) +{ + return boot_native_readl(addr); +} + +static inline u64 boot_readq(void __iomem *addr) +{ + return boot_native_readq(addr); +} + + +static inline void +conf_inb(unsigned int domain, unsigned int bus, unsigned long port, u8 *byte) +{ + native_conf_inb(domain, bus, port, byte); +} +static inline void +conf_inw(unsigned int domain, unsigned int bus, unsigned long port, u16 *hword) +{ + native_conf_inw(domain, bus, port, hword); +} +static inline void +conf_inl(unsigned int domain, unsigned int bus, unsigned long port, u32 *word) +{ + native_conf_inl(domain, bus, port, word); +} +static inline void +conf_outb(unsigned int domain, unsigned int bus, unsigned long port, u8 byte) +{ + native_conf_outb(domain, bus, port, byte); +} +static inline void +conf_outw(unsigned int domain, unsigned int bus, unsigned long port, u16 hword) +{ + native_conf_outw(domain, bus, port, hword); +} +static inline void +conf_outl(unsigned int domain, unsigned int bus, unsigned long port, u32 word) +{ + native_conf_outl(domain, bus, port, word); +} + +static inline void debug_cons_outb(u8 byte, u16 port) +{ + native_debug_cons_outb(byte, port); +} +static inline void debug_cons_outb_p(u8 byte, u16 port) +{ + native_debug_cons_outb(byte, port); +} +static inline u8 debug_cons_inb(u16 port) +{ + return native_debug_cons_inb(port); +} +static inline u32 debug_cons_inl(u16 port) +{ + return native_debug_cons_inl(port); +} +static inline void boot_debug_cons_outb(u8 byte, u16 port) +{ + boot_native_outb(byte, port); +} +static inline u8 boot_debug_cons_inb(u16 port) +{ + return boot_native_inb(port); +} +static inline u32 boot_debug_cons_inl(u16 port) +{ + return boot_native_inl(port); +} + +static inline int __init arch_pci_init(void) +{ + return native_arch_pci_init(); +} + +#endif + +/* + * Map in an area of physical address space, for accessing + * I/O devices etc. + */ +#define ioremap_cache ioremap_cache +#define ioremap_wc ioremap_wc +#define ioremap_wt ioremap_wc +#define ioremap_nocache ioremap_nocache +#define ioremap_uc ioremap_nocache +#define ioremap ioremap_nocache +extern void __iomem *ioremap_cache(resource_size_t address, unsigned long size); +extern void __iomem *ioremap_wc(resource_size_t address, unsigned long size); +extern void __iomem *ioremap_nocache(resource_size_t address, unsigned long size); + +#define iounmap iounmap +extern void iounmap(volatile void __iomem *addr); + +#define ARCH_HAS_IOREMAP_WC +#define ARCH_HAS_IOREMAP_WT + + +extern void __memset_io(void *s, long c, size_t count); + +#define memset_io(dst, c, n) _memset_io(dst, c, n, __alignof(*(dst))) +static inline void _memset_io(volatile void __iomem *dst, int c, size_t n, + const unsigned long dst_align) +{ + long cc; + + cc = c & 0xff; + cc = cc | (cc << 8); + cc = cc | (cc << 16); + cc = cc | (cc << 32); + + if (__builtin_constant_p(n) && dst_align >= 8 && n < 136) { + /* Inline small aligned memset's */ + volatile u64 __iomem *l_dst = dst; + + if (n >= 8) + l_dst[0] = cc; + if (n >= 16) + l_dst[1] = cc; + if (n >= 24) + l_dst[2] = cc; + if (n >= 32) + l_dst[3] = cc; + if (n >= 40) + l_dst[4] = cc; + if (n >= 48) + l_dst[5] = cc; + if (n >= 56) + l_dst[6] = cc; + if (n >= 64) + l_dst[7] = cc; + if (n >= 72) + l_dst[8] = cc; + if (n >= 80) + l_dst[9] = cc; + if (n >= 88) + l_dst[10] = cc; + if (n >= 96) + l_dst[11] = cc; + if (n >= 104) + l_dst[12] = cc; + if (n >= 112) + l_dst[13] = cc; + if (n >= 120) + l_dst[14] = cc; + if (n >= 128) + l_dst[15] = cc; + + /* Set the tail */ + if (n & 4) + *(u32 __iomem *) (dst + (n & ~0x7UL)) = cc; + if (n & 2) + *(u16 __iomem *) (dst + (n & ~0x3UL)) = cc; + if (n & 1) + *(u8 __iomem *) (dst + (n & ~0x1UL)) = cc; + } else { + __memset_io((void * __force) dst, cc, n); + } +} + +extern void __memcpy_fromio(void *dst, const void *src, size_t n); +extern void __memcpy_toio(void *dst, const void *src, size_t n); +#define memcpy_fromio(a, b, c) __memcpy_fromio((a), (void * __force) (b), (c)) +#define memcpy_toio(a, b, c) __memcpy_toio((void * __force) (a), (b), (c)) + + +#include +#undef PCI_IOBASE + + +extern unsigned long get_domain_pci_conf_base(unsigned int domain); +extern unsigned long get_domain_pci_conf_size(unsigned int domain); + +/* + * ISA I/O bus memory addresses are 1:1 with the physical address. + */ +#define isa_virt_to_bus virt_to_phys + +#endif /* _E2K_IO_H_ */ diff --git a/arch/e2k/include/asm/io_apic.h b/arch/e2k/include/asm/io_apic.h new file mode 100644 index 0000000..0cc55a6 --- /dev/null +++ b/arch/e2k/include/asm/io_apic.h @@ -0,0 +1,12 @@ +#ifndef __ASM_E2K_IO_APIC_H +#define __ASM_E2K_IO_APIC_H + +#ifdef __KERNEL__ + +#include +#include + +extern int e2k_msi_disabled; + +#endif /* __KERNEL__ */ +#endif /* __ASM_E2K_IO_APIC_H */ diff --git a/arch/e2k/include/asm/io_apic_regs.h b/arch/e2k/include/asm/io_apic_regs.h new file mode 100644 index 0000000..6d14039 --- /dev/null +++ b/arch/e2k/include/asm/io_apic_regs.h @@ -0,0 +1,81 @@ +#ifndef __ASM_IO_APIC_REGS_H +#define __ASM_IO_APIC_REGS_H + +#include + + +/* + * The structure of the IO-APIC: + */ +union IO_APIC_reg_00 { + u32 raw; + struct { + u32 __reserved_2 : 14, + LTS : 1, + delivery_type : 1, + __reserved_1 : 8, + ID : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_01 { + u32 raw; + struct { + u32 version : 8, + __reserved_2 : 7, + PRQ : 1, + entries : 8, + __reserved_1 : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_02 { + u32 raw; + struct { + u32 __reserved_2 : 24, + arbitration : 4, + __reserved_1 : 4; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_03 { + u32 raw; + struct { + u32 boot_DT : 1, + __reserved_1 : 31; + } __attribute__ ((packed)) bits; +}; + +struct IO_APIC_route_entry { + __u32 vector : 8, + delivery_mode : 3, /* 000: FIXED + * 001: lowest prio + * 111: ExtINT + */ + dest_mode : 1, /* 0: physical, 1: logical */ + delivery_status : 1, + polarity : 1, + irr : 1, + trigger : 1, /* 0: edge, 1: level */ + mask : 1, /* 0: enabled, 1: disabled */ + __reserved_2 : 15; + + __u32 __reserved_3 : 24, + dest : 8; +} __attribute__ ((packed)); + +struct IR_IO_APIC_route_entry { + __u64 vector : 8, + zero : 3, + index2 : 1, + delivery_status : 1, + polarity : 1, + irr : 1, + trigger : 1, + mask : 1, + reserved : 31, + format : 1, + index : 15; +} __attribute__ ((packed)); + +#endif /* __ASM_IO_APIC_REGS_H */ diff --git a/arch/e2k/include/asm/io_epic.h b/arch/e2k/include/asm/io_epic.h new file mode 100644 index 0000000..2250062 --- /dev/null +++ b/arch/e2k/include/asm/io_epic.h @@ -0,0 +1,29 @@ +#ifndef _ASM_E2K_IO_EPIC_H +#define _ASM_E2K_IO_EPIC_H + +#include + +static inline void epic_ioapic_eoi(u8 vector) +{ + unsigned int value = vector << 8; + + value |= 0x5; + + sic_write_nbsr_reg(SIC_hc_ioapic_eoi, value); +} + +static inline void get_io_epic_msi(int node, u32 *lo, u32 *hi) +{ + if (node < 0) + node = 0; + /* FIXME SIC reads with mas 0x13 aren't supported by hypervisor */ + if (paravirt_enabled()) { + *lo = early_sic_read_node_nbsr_reg(node, SIC_rt_msi); + *hi = early_sic_read_node_nbsr_reg(node, SIC_rt_msi_h); + } else { + *lo = sic_read_node_nbsr_reg(node, SIC_rt_msi); + *hi = sic_read_node_nbsr_reg(node, SIC_rt_msi_h); + } +} +#include +#endif /* _ASM_E2K_IO_EPIC_H */ diff --git a/arch/e2k/include/asm/io_epic_regs.h b/arch/e2k/include/asm/io_epic_regs.h new file mode 100644 index 0000000..297de80 --- /dev/null +++ b/arch/e2k/include/asm/io_epic_regs.h @@ -0,0 +1,4 @@ +#ifndef __ASM_E2K_IO_EPIC_REGS_H +#define __ASM_E2K_IO_EPIC_REGS_H +#include +#endif /* __ASM_E2K_IO_EPIC_REGS_H */ diff --git a/arch/e2k/include/asm/ioctl.h b/arch/e2k/include/asm/ioctl.h new file mode 100644 index 0000000..b279fe0 --- /dev/null +++ b/arch/e2k/include/asm/ioctl.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/asm/ioctls.h b/arch/e2k/include/asm/ioctls.h new file mode 100644 index 0000000..dd12291 --- /dev/null +++ b/arch/e2k/include/asm/ioctls.h @@ -0,0 +1,17 @@ +#ifndef _E2K_IOCTLS_H_ +#define _E2K_IOCTLS_H_ + +/* + * We are too far from real ioctl handling and it is difficult to predict + * any errors now. So I accept i386(ia64) ioctl's stuff as the basis. + */ + + +#include +#include + +#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ +#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +#define TIOCGDB 0x547F /* enable GDB stub mode on this tty */ + +#endif /* _E2K_IOCTLS_H_ */ diff --git a/arch/e2k/include/asm/iolinkmask.h b/arch/e2k/include/asm/iolinkmask.h new file mode 100644 index 0000000..fa0860f --- /dev/null +++ b/arch/e2k/include/asm/iolinkmask.h @@ -0,0 +1,6 @@ +#ifndef __ASM_IOHUBMASK_H +#define __ASM_IOHUBMASK_H + +#include + +#endif /* __LINUX_IOHUBMASK_H */ diff --git a/arch/e2k/include/asm/ipcbuf.h b/arch/e2k/include/asm/ipcbuf.h new file mode 100644 index 0000000..dc3376b --- /dev/null +++ b/arch/e2k/include/asm/ipcbuf.h @@ -0,0 +1,28 @@ +#ifndef _E2K_IPCBUF_H_ +#define _E2K_IPCBUF_H_ + +/* + * The ipc64_perm structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit seq + * - 2 miscellaneous 64-bit values + */ + +struct ipc64_perm +{ + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + unsigned short seq; + unsigned short __pad1; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _E2K_IPCBUF_H_ */ diff --git a/arch/e2k/include/asm/irq.h b/arch/e2k/include/asm/irq.h new file mode 100644 index 0000000..d5879da --- /dev/null +++ b/arch/e2k/include/asm/irq.h @@ -0,0 +1,22 @@ +#ifndef _ASM_E2K_IRQ_H_ +#define _ASM_E2K_IRQ_H_ +/* + * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar + * + * IRQ/IPI changes taken from work by Thomas Radke + * + */ + +#include +#include +#include +#include + +#define irq_canonicalize(irq) (irq) + +extern int can_request_irq(unsigned int, unsigned long flags); +extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, + bool exclude_self) __cold; +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace + +#endif /* _ASM_E2K_IRQ_H_ */ diff --git a/arch/e2k/include/asm/irq_vectors.h b/arch/e2k/include/asm/irq_vectors.h new file mode 100644 index 0000000..71dbcc2 --- /dev/null +++ b/arch/e2k/include/asm/irq_vectors.h @@ -0,0 +1,56 @@ +#ifndef _ASM_E2K_IRQ_VECTORS_H +#define _ASM_E2K_IRQ_VECTORS_H + +#define ERROR_APIC_VECTOR 0xfe +#define RESCHEDULE_VECTOR 0xfd +#define CALL_FUNCTION_VECTOR 0xfc +#define CALL_FUNCTION_SINGLE_VECTOR 0xfb +#define RDMA_INTERRUPT_VECTOR 0xf9 +#define LVT3_INTERRUPT_VECTOR 0xf8 +#define LVT4_INTERRUPT_VECTOR 0xf7 +#define IRQ_WORK_VECTOR 0xf6 +#define NMI_PSEUDO_VECTOR 0x100 +/* + * Local APIC timer IRQ vector is on a different priority level, + * to work around the 'lost local interrupt if more than 2 IRQ + * sources per level' errata. + */ +#define LOCAL_TIMER_VECTOR 0xef + +#ifdef CONFIG_VIRTUALIZATION +/* VIRQ vector to emulate SysRq on guest kernel */ +#define SYSRQ_SHOWSTATE_APIC_VECTOR 0xfa +/* VIRQ vector to emulate NMI on guest kernel */ +#define KVM_NMI_APIC_VECTOR 0xee + +#define SYSRQ_SHOWSTATE_EPIC_VECTOR 0x3fa +#define KVM_NMI_EPIC_VECTOR 0x3ee +#endif /* CONFIG_VIRTUALIZATION */ + +#ifdef CONFIG_EPIC +/* EPIC system vectors have the highest priority level of 3 (0x300 - 0x3ff) */ +#define LINP0_INTERRUPT_VECTOR 0x3e0 +#define LINP1_INTERRUPT_VECTOR 0x3e1 +#define LINP2_INTERRUPT_VECTOR 0x3e2 +#define LINP3_INTERRUPT_VECTOR 0x3e3 +#define LINP4_INTERRUPT_VECTOR 0x3e4 +#define LINP5_INTERRUPT_VECTOR 0x3e5 +#define CEPIC_TIMER_VECTOR 0x3ef +#define CEPIC_EPIC_INT_VECTOR 0x3f0 +#define EPIC_IRQ_WORK_VECTOR 0x3f6 +#define PREPIC_ERROR_VECTOR 0x3f9 +#define EPIC_CALL_FUNCTION_SINGLE_VECTOR 0x3fb +#define EPIC_CALL_FUNCTION_VECTOR 0x3fc +#define EPIC_RESCHEDULE_VECTOR 0x3fd +#define ERROR_EPIC_VECTOR 0x3fe +#define SPURIOUS_EPIC_VECTOR 0x3ff +#endif + +#ifdef CONFIG_KVM_ASYNC_PF +#define ASYNC_PF_WAKE_VECTOR 0x3f8 +#endif /* CONFIG_KVM_ASYNC_PF */ + +#include + +#endif /* _ASM_E2K_IRQ_VECTORS_H */ + diff --git a/arch/e2k/include/asm/irq_work.h b/arch/e2k/include/asm/irq_work.h new file mode 100644 index 0000000..ee424c3 --- /dev/null +++ b/arch/e2k/include/asm/irq_work.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/asm/irqdomain.h b/arch/e2k/include/asm/irqdomain.h new file mode 100644 index 0000000..c81446b --- /dev/null +++ b/arch/e2k/include/asm/irqdomain.h @@ -0,0 +1,6 @@ +#ifndef __ASM_IRQDOMAIN_H +#define __ASM_IRQDOMAIN_H + +#include + +#endif diff --git a/arch/e2k/include/asm/irqflags.h b/arch/e2k/include/asm/irqflags.h new file mode 100644 index 0000000..363b17c --- /dev/null +++ b/arch/e2k/include/asm/irqflags.h @@ -0,0 +1,477 @@ +#ifndef _E2K_IRQFLAGS_H_ +#define _E2K_IRQFLAGS_H_ + +#ifndef __ASSEMBLY__ + +#ifndef _LINUX_TRACE_IRQFLAGS_H +# error "Do not include directly; use instead." +#endif + +#include + +/* + * There are two registers to control interrupts (enable/disable) + * + * The main register is privileged register PSR, + * + * the seconde is nonprivileged UPSR. + * + * PSR bits should enable interrupts and enable user interrupts to use UPSR + * as control interrupts register. + * + * Principal difference between two registers is scope. UPSR is global + * register: its scope is all execution, if some function enables/disables + * interrupts in UPSR and returns to caller then caller will have enabled/ + * disabled interrupts as well. PSR is local register: its scope is current + * function, and all invoked functions inherit its PSR state, but if invoked + * function changes PSR and returns, then current function (caller) will see + * own unchanged PSR state. + * + * (PSR is saved by call operation and is restored by return operation from + * chine registers). + * + * So in PSR case, in particular, if interrupts are enabled/disabled + * by some function call, then it is an error - interrupts enable/disable + * state will be unchanged. But it is not error in UPSR case. + * + * Interrupts control using PSR requires structured kernel organization and + * it can be permited only inheritance of interrupts enable/disable state + * (from caller to invoked function) and it cannot be permited return of + * interrupts enable/disable state (to caller) + * + * There is doubt that we should use interrupts control under UPSR + * + * + * PSR and UPSR bits are used to enable and disable interrupts. + * + * PSR bits are used while: + * - A user process executes; + * - Trap or interrupt occures on user or kernel process, hardware + * disables interrupts mask in PSR and PSR becomes main register to control + * interrupts. Trap handler switches control from PSR register to UPSR + * in the appropriate point and all following trap handling is done under + * UPSR control; + * - Trap handler returns control from UPSR to PSR in the appropriate + * point of trap handling end. Return from trap handler (DONE) restores + * PSR from CR register and recovers interrupts control type in the trap point; + * - System call is same as trap (see above); + * - System call end is same as trap handler end (see above); + * - Switch from kernel process to user (exec() and signal handler) + * is same as trap handler end. Before return to user function kernel sets + * control under PSR and (only for signal handler) after return from user + * recovers control under UPSR. + * + * Kernel cannot use standard macros, functions to enable / disable + * interrupts same as local_irq_xxx() spin_lock_irq_xxx() ... while + * interrupts are controled by PSR. + * + * UPSR bits are used by kernel while: + * Kernel jumpstart (system call #12) set UPSR register in the + * initial state (where interrupts are disabled) and switches + * control from PSR register to UPSR; From this point kernel runs + * (except cases listed above for PSR) under UPSR interrupt bits + */ +#define NATIVE_SWITCH_IRQ_TO_UPSR() \ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_ENABLED)) + +#define NATIVE_RETURN_IRQ_TO_PSR() \ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)) + +#define NATIVE_SET_USER_INITIAL_UPSR(upsr) \ +({ \ + NATIVE_RETURN_IRQ_TO_PSR(); \ + NATIVE_WRITE_UPSR_REG(upsr); \ +}) + +#define BOOT_NATIVE_SWITCH_IRQ_TO_UPSR() \ + BOOT_NATIVE_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_ENABLED)) + +#define SWITCH_IRQ_TO_UPSR(set_cr1_lo) \ +do { \ + if (set_cr1_lo) { \ + e2k_cr1_lo_t cr1_lo = READ_CR1_LO_REG(); \ + AS(cr1_lo).ie = 1; \ + AS(cr1_lo).nmie = 1; \ + AS(cr1_lo).uie = 1; \ + AS(cr1_lo).unmie = 1; \ + WRITE_CR1_LO_REG(cr1_lo); \ + } \ + \ + WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_ENABLED)); \ +} while (0) + +#define BOOT_SWITCH_IRQ_TO_UPSR() \ + BOOT_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_ENABLED)) + +#define UPSR_STI() \ +({ \ + condition_collect_disable_interrupt_ticks( \ + READ_UPSR_REG_VALUE() & ~UPSR_IE); \ + WRITE_UPSR_IRQ_BARRIER(AW(E2K_KERNEL_UPSR_ENABLED)); \ +}) + +#define UPSR_CLI() \ +({ \ + WRITE_UPSR_IRQ_BARRIER(AW(E2K_KERNEL_UPSR_DISABLED)); \ + condition_mark_disable_interrupt_ticks(1); \ +}) + +#define UPSR_SAVE_AND_CLI() \ +({ \ + unsigned long __flags = READ_UPSR_REG_VALUE(); \ + WRITE_UPSR_IRQ_BARRIER(AW(E2K_KERNEL_UPSR_DISABLED)); \ + condition_mark_disable_interrupt_ticks(1); \ + __flags; \ +}) + +#define UPSR_SAVE() READ_UPSR_REG_VALUE() + +/* + * nmi_* versions work only with non-maskbale ones interrupts. + */ + +#define upsr_nmi_irqs_disabled() \ + ((READ_UPSR_REG_VALUE() & UPSR_NMIE) == 0) + +#define upsr_nmi_irqs_disabled_flags(flags) \ + ((flags & UPSR_NMIE) == 0) +#define NATIVE_UPSR_NMI_SAVE_AND_CLI(flags) \ +({ \ + flags = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + NATIVE_WRITE_UPSR_IRQ_BARRIER(flags & ~UPSR_NMIE); \ + condition_mark_disable_interrupt_ticks(1); \ +}) +#define NATIVE_UPSR_NMI_STI(flags) \ +({ \ + NATIVE_WRITE_UPSR_IRQ_BARRIER((flags) | UPSR_NMIE); \ + condition_mark_disable_interrupt_ticks(0); \ +}) +#define NATIVE_UPSR_ALL_SAVE_AND_CLI(flags) \ +({ \ + flags = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + NATIVE_WRITE_UPSR_IRQ_BARRIER( \ + AW(E2K_KERNEL_UPSR_DISABLED_ALL)); \ + condition_mark_disable_interrupt_ticks(1); \ +}) + +/* + * all_* versions work on all interrupts including + * both maskable and non-maskbale ones. + */ + +#define UPSR_ALL_STI() \ +({ \ + condition_collect_disable_interrupt_ticks( \ + READ_UPSR_REG_VALUE() & ~UPSR_IE & ~UPSR_NMIE); \ + WRITE_UPSR_IRQ_BARRIER(AW(E2K_KERNEL_UPSR_ENABLED)); \ +}) + +#define UPSR_ALL_CLI() \ +({ \ + WRITE_UPSR_IRQ_BARRIER(AW(E2K_KERNEL_UPSR_DISABLED_ALL)); \ + condition_mark_disable_interrupt_ticks(1); \ +}) + +#define UPSR_ALL_SAVE_AND_CLI(flags) \ +({ \ + flags = READ_UPSR_REG_VALUE(); \ + WRITE_UPSR_IRQ_BARRIER(AW(E2K_KERNEL_UPSR_DISABLED_ALL)); \ + condition_mark_disable_interrupt_ticks(1); \ +}) +#define BOOT_UPSR_ALL_STI() \ +({ \ + unsigned long last_upsr = BOOT_READ_UPSR_REG_VALUE(); \ + unsigned long cur_upsr; \ + cur_upsr = last_upsr | (UPSR_IE | UPSR_NMIE); \ + BOOT_WRITE_UPSR_REG_VALUE(cur_upsr); \ +}) +#define BOOT_UPSR_ALL_CLI() \ +({ \ + unsigned long last_upsr = BOOT_READ_UPSR_REG_VALUE(); \ + unsigned long cur_upsr; \ + cur_upsr = last_upsr & ~(UPSR_IE | UPSR_NMIE); \ + BOOT_WRITE_UPSR_REG_VALUE(cur_upsr); \ +}) +#define BOOT_UPSR_ALL_SAVE_AND_CLI(flags) \ +({ \ + flags = BOOT_READ_UPSR_REG_VALUE(); \ + BOOT_WRITE_UPSR_REG_VALUE(flags & ~(UPSR_IE | UPSR_NMIE)); \ +}) +#define BOOT_UPSR_SAVE(src_upsr) \ + (src_upsr = BOOT_READ_UPSR_REG_VALUE()) +#define BOOT_UPSR_RESTORE(src_upsr) \ + BOOT_WRITE_UPSR_REG_VALUE(src_upsr) + +#define psr_irqs_disabled_flags(flags) (((flags) & PSR_IE) == 0) +#define upsr_irqs_disabled_flags(flags) (((flags) & UPSR_IE) == 0) +#define irqs_under_upsr_flags(psr_flags) (((psr_flags) & PSR_UIE) != 0) +#define psr_and_upsr_irqs_disabled_flags(psr_flags, upsr_flags) \ +({ \ + bool ret; \ + if (psr_irqs_disabled_flags(psr_flags)) { \ + ret = true; \ + } else if (irqs_under_upsr_flags(psr_flags)) { \ + ret = upsr_irqs_disabled_flags(upsr_flags); \ + } else { \ + ret = false; \ + } \ + ret; \ +}) + +#define upsr_all_irqs_disabled_flags(flags) \ + ((flags & (UPSR_IE | UPSR_NMIE)) == 0) +#define upsr_all_irqs_disabled() \ + upsr_all_irqs_disabled_flags(READ_UPSR_REG_VALUE()) + +#define psr_all_irqs_disabled_flags(flags) \ + ((flags & (PSR_IE | PSR_NMIE)) == 0) +#define psr_all_irqs_enabled_flags(flags) \ + ((flags & (PSR_IE | PSR_NMIE)) == (PSR_IE | PSR_NMIE)) +#define psr_all_irqs_disabled() \ + psr_all_irqs_disabled_flags(READ_PSR_REG_VALUE()) +#define all_irqs_under_upsr_flags(psr_flags) \ + (((psr_flags) & (PSR_UIE | PSR_UNMIE)) != 0) +#define psr_and_upsr_all_irqs_disabled_flags(psr_flags, upsr_flags) \ +({ \ + bool ret; \ + if (psr_all_irqs_disabled_flags(psr_flags)) { \ + ret = true; \ + } else if (all_irqs_under_upsr_flags(psr_flags)) { \ + ret = upsr_all_irqs_disabled_flags(upsr_flags); \ + } else { \ + ret = false; \ + } \ + ret; \ +}) + +#define psr_irqs_disabled() \ + psr_irqs_disabled_flags(READ_PSR_REG_VALUE()) +#define upsr_irqs_disabled() \ + upsr_irqs_disabled_flags(READ_UPSR_REG_VALUE()) + +#define psr_and_upsr_irqs_disabled() \ +({ \ + unsigned long psr = READ_PSR_REG_VALUE(); \ + unsigned long upsr = READ_UPSR_REG_VALUE(); \ + \ + psr_and_upsr_irqs_disabled_flags(psr, upsr); \ +}) + +#define psr_and_upsr_all_irqs_disabled() \ +({ \ + unsigned long psr = READ_PSR_REG_VALUE(); \ + unsigned long upsr = READ_UPSR_REG_VALUE(); \ + \ + psr_and_upsr_all_irqs_disabled_flags(psr, upsr); \ +}) +#define __raw_all_irqs_disabled() psr_and_upsr_all_irqs_disabled() + +#define native_psr_irqs_disabled() \ + psr_irqs_disabled_flags(NATIVE_NV_READ_PSR_REG_VALUE()) + +#define native_trap_irqs_disabled(regs) (regs->irqs_disabled) + +#define psr_and_upsr_nm_irqs_disabled() \ +({ \ + int ret; \ + unsigned long psr = READ_PSR_REG_VALUE(); \ + if ((psr & PSR_NMIE) == 0) { \ + ret = 1; \ + } else if (psr & PSR_UNMIE) { \ + ret = !(READ_UPSR_REG_VALUE() & UPSR_NMIE); \ + } else { \ + ret = 0; \ + } \ + ret; \ +}) + +#ifndef CONFIG_DEBUG_IRQ +#define __raw_irqs_disabled() upsr_irqs_disabled() +#else +#define __raw_irqs_disabled() psr_and_upsr_irqs_disabled() +#endif /* ! CONFIG_DEBUG_IRQ */ + +#define __raw_irqs_disabled_flags(flags) ((flags & UPSR_IE) == 0) + +#ifdef CONFIG_MCST_RT + +#define SAVE_CURR_TIME_SWITCH_TO \ +{ \ + cpu_times[raw_smp_processor_id()].curr_time_switch_to = \ + READ_CLKR_REG_VALUE(); \ +} + +#define CALCULATE_TIME_SWITCH_TO \ +{ \ + int cpu = raw_smp_processor_id(); \ + cpu_times[cpu].curr_time_switch_to = READ_CLKR_REG_VALUE() - \ + cpu_times[cpu].curr_time_switch_to; \ + if (cpu_times[cpu].curr_time_switch_to < \ + cpu_times[cpu].min_time_switch_to){ \ + cpu_times[cpu].min_time_switch_to = \ + cpu_times[cpu].curr_time_switch_to; \ + } \ + if (cpu_times[cpu].curr_time_switch_to > \ + cpu_times[cpu].max_time_switch_to){ \ + cpu_times[cpu].max_time_switch_to = \ + cpu_times[cpu].curr_time_switch_to; \ + } \ +} + +#else /* !CONFIG_MCST_RT */ + #define SAVE_CURR_TIME_SWITCH_TO + #define CALCULATE_TIME_SWITCH_TO +#endif /* CONFIG_MCST_RT */ + +#ifdef CONFIG_CLI_CHECK_TIME + +typedef struct cli_info { + long cli; + long max_cli; + long max_cli_cl; + long max_cli_ip; + + long gcli; + long max_gcli; + long max_gcli_cl; + long max_gcli_ip; + +} cli_info_t; + +typedef struct tt0_info { + long max_tt0_prolog; + long max_tt0_cl; +} tt0_info_t; + +extern cli_info_t cli_info[]; +extern tt0_info_t tt0_info[]; +extern int cli_info_needed; +extern void tt0_prolog_ticks(long ticks); + +#define Cli_cl cli_info[raw_smp_processor_id()].cli +#define Max_cli cli_info[raw_smp_processor_id()].max_cli +#define Max_cli_cl cli_info[raw_smp_processor_id()].max_cli_cl +#define Max_cli_ip cli_info[raw_smp_processor_id()].max_cli_ip +#define Cli_irq cli_info[raw_smp_processor_id()].irq + +#define Gcli_cl cli_info[raw_smp_processor_id()].gcli +#define Max_gcli cli_info[raw_smp_processor_id()].max_gcli +#define Max_gcli_cl cli_info[raw_smp_processor_id()].max_gcli_cl +#define Max_gcli_ip cli_info[raw_smp_processor_id()].max_gcli_ip + +#define Max_tt0_prolog tt0_info[raw_smp_processor_id()].max_tt0_prolog +#define Max_tt0_cl tt0_info[raw_smp_processor_id()].max_tt0_cl + +#define e2k_cli() \ +{ \ + bool __save_time = cli_info_needed && !__raw_irqs_disabled(); \ + UPSR_CLI(); \ + if (__save_time) \ + Cli_cl = READ_CLKR_REG_VALUE(); \ +} + +#define e2k_sti() \ +{ \ + if (Cli_cl && __raw_irqs_disabled() && \ + (Max_cli < READ_CLKR_REG_VALUE() - Cli_cl)) { \ + Max_cli = READ_CLKR_REG_VALUE() - Cli_cl; \ + Max_cli_cl = Cli_cl; \ + Max_cli_ip = READ_IP_REG_VALUE(); \ + } \ + UPSR_STI(); \ +} + +// check_cli() works under cli() but we want to check time of cli() + +#define check_cli() \ +{ \ + if (cli_info_needed) { \ + Cli_cl = READ_CLKR_REG_VALUE(); \ + } \ +} + +#define sti_return() \ +{ \ + if (cli_info_needed && __raw_irqs_disabled() && \ + (Max_cli < READ_CLKR_REG_VALUE() - Cli_cl)) { \ + Max_cli = READ_CLKR_REG_VALUE() - Cli_cl; \ + Max_cli_cl = Cli_cl; \ + Max_cli_ip = READ_IP_REG_VALUE(); \ + } \ +} +#else /* above CONFIG_CLI_CHECK_TIME */ +#define e2k_cli() UPSR_CLI() +#define e2k_sti() UPSR_STI() +#define check_cli() +#define sti_return() +#endif /* CONFIG_CLI_CHECK_TIME */ + +/* Normal irq operations: disable maskable interrupts only, + * but enable both maskable and non-maskable interrupts. */ + +#define arch_local_irq_enable() e2k_sti() +#define arch_local_irq_disable() e2k_cli() + +#define arch_local_irq_save() UPSR_SAVE_AND_CLI() +#define arch_local_irq_restore(x) UPSR_RESTORE(x) + +#define arch_local_save_flags() UPSR_SAVE() + +#define arch_irqs_disabled_flags(x) __raw_irqs_disabled_flags(x) +#define arch_irqs_disabled() __raw_irqs_disabled() + +/* nmi_irq_*() - the same as above, but checks only non-maskable interrupts. */ + +#define raw_nmi_irqs_disabled_flags(x) upsr_nmi_irqs_disabled_flags(x) +#define raw_nmi_irqs_disabled() upsr_nmi_irqs_disabled() + +/* all_irq_*() - the same as above, but enables, disables and checks + * both non-maskable and maskable interrupts. */ + +#define raw_all_irq_enable() UPSR_ALL_STI() +#define raw_all_irq_disable() UPSR_ALL_CLI() +#define boot_raw_all_irq_enable() BOOT_UPSR_ALL_STI() +#define boot_raw_all_irq_disable() BOOT_UPSR_ALL_CLI() + +#define raw_all_irq_save(x) UPSR_ALL_SAVE_AND_CLI(x) +#define raw_all_irq_restore(x) UPSR_RESTORE(x) +#define boot_raw_all_irq_save(x) BOOT_UPSR_ALL_SAVE_AND_CLI(x) +#define boot_raw_all_irq_restore(x) BOOT_UPSR_RESTORE(x) + +#define raw_all_irqs_disabled_flags(x) upsr_all_irqs_disabled_flags(x) +#define raw_all_irqs_disabled() upsr_all_irqs_disabled() + +#define all_irq_enable() \ + do { trace_hardirqs_on(); raw_all_irq_enable(); } while (0) + +#define all_irq_disable() \ + do { raw_all_irq_disable(); trace_hardirqs_off(); } while (0) + +#define all_irq_save(flags) \ + do { \ + typecheck(unsigned long, flags); \ + raw_all_irq_save(flags); \ + trace_hardirqs_off(); \ + } while (0) + +#define all_irq_restore(flags) \ + do { \ + typecheck(unsigned long, flags); \ + if (raw_all_irqs_disabled_flags(flags)) { \ + raw_all_irq_restore(flags); \ + trace_hardirqs_off(); \ + } else { \ + trace_hardirqs_on(); \ + raw_all_irq_restore(flags); \ + } \ + } while (0) + +/* + * Used in the idle loop + */ +static inline void arch_safe_halt(void) +{ +} + +#endif /* __ASSEMBLY__ */ +#endif /* _E2K_IRQFLAGS_H_ */ diff --git a/arch/e2k/include/asm/kdebug.h b/arch/e2k/include/asm/kdebug.h new file mode 100644 index 0000000..5679fca --- /dev/null +++ b/arch/e2k/include/asm/kdebug.h @@ -0,0 +1,22 @@ +#ifndef _ASM_E2K_KDEBUG_H +#define _ASM_E2K_KDEBUG_H + +#include + +struct pt_regs; + +/* Grossly misnamed. */ +enum die_val { + DIE_OOPS = 1, + DIE_BREAKPOINT +}; + +extern void printk_address(unsigned long address, int reliable); +extern void show_trace(struct task_struct *t, struct pt_regs *regs, + unsigned long *sp, unsigned long bp); +extern void __show_regs(struct pt_regs *regs, int all); +extern void show_regs(struct pt_regs *regs); +extern unsigned long oops_begin(void); +extern void oops_end(unsigned long, struct pt_regs *, int signr); + +#endif /* _ASM_E2K_KDEBUG_H */ diff --git a/arch/e2k/include/asm/keyboard.h b/arch/e2k/include/asm/keyboard.h new file mode 100644 index 0000000..33adac8 --- /dev/null +++ b/arch/e2k/include/asm/keyboard.h @@ -0,0 +1,70 @@ + +/* + * Assume that we have a generic PC-style keyboard controller + * in our E2K station built-in. + */ + +/* + * linux/include/asm-i386/keyboard.h + * + * Created 3 Nov 1996 by Geert Uytterhoeven + */ + +#ifndef _E2K_KEYBOARD_H_ +#define _E2K_KEYBOARD_H_ + +#ifdef __KERNEL__ + +#include +#include +#include + +#define KEYBOARD_IRQ 1 +#define DISABLE_KBD_DURING_INTERRUPTS 0 + +extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode); +extern int pckbd_getkeycode(unsigned int scancode); +extern int pckbd_translate(unsigned char scancode, unsigned char *keycode, + char raw_mode); +extern char pckbd_unexpected_up(unsigned char keycode); +extern void pckbd_leds(unsigned char leds); +extern void pckbd_init_hw(void); +extern unsigned char pckbd_sysrq_xlate[128]; + +#define kbd_setkeycode pckbd_setkeycode +#define kbd_getkeycode pckbd_getkeycode +#define kbd_translate pckbd_translate +#define kbd_unexpected_up pckbd_unexpected_up +#define kbd_leds pckbd_leds +#define kbd_init_hw pckbd_init_hw +#define kbd_sysrq_xlate pckbd_sysrq_xlate + +#define SYSRQ_KEY 0x54 + +/* resource allocation */ +#define kbd_request_region() +#define kbd_request_irq(handler) request_irq(KEYBOARD_IRQ, handler, 0, \ + "keyboard", NULL) + +/* How to access the keyboard macros on this platform. */ +#define kbd_read_input() inb(KBD_DATA_REG) +#define kbd_read_status() inb(KBD_STATUS_REG) +#define kbd_write_output(val) outb(val, KBD_DATA_REG) +#define kbd_write_command(val) outb(val, KBD_CNTL_REG) + +/* Some stoneage hardware needs delays after some operations. */ +#define kbd_pause() do { } while(0) + +/* + * Machine specific bits for the PS/2 driver + */ + +#define AUX_IRQ 12 + +#define aux_request_irq(hand, dev_id) \ + request_irq(AUX_IRQ, hand, SA_SHIRQ, "PS/2 Mouse", dev_id) + +#define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id) + +#endif /* __KERNEL__ */ +#endif /* _E2K_KEYBOARD_H_ */ diff --git a/arch/e2k/include/asm/kprobes.h b/arch/e2k/include/asm/kprobes.h new file mode 100644 index 0000000..b45d43c --- /dev/null +++ b/arch/e2k/include/asm/kprobes.h @@ -0,0 +1,87 @@ +#ifndef __ASM_E2K_KPROBES_H +#define __ASM_E2K_KPROBES_H + +#include + +#ifdef CONFIG_KPROBES + +#include +#include + +#include +#include +#include +#include + +#define __ARCH_WANT_KPROBES_INSN_SLOT + +typedef u8 kprobe_opcode_t; + +#define KPROBE_BREAK_1 0x0dc0c04004000001UL + +/* + * We need to store one additional instruction after the copied one + * to make sure processor won't generate exc_illegal_opcode instead + * of exc_last_wish/exc_instr_debug (exc_illegal_opcode has priority). + */ +#define MAX_INSN_SIZE (E2K_INSTR_MAX_SIZE + sizeof(unsigned long)) + +struct arch_specific_insn { + kprobe_opcode_t *insn; +}; + +/* per-cpu kprobe control block */ +#define MAX_STACK_SIZE 256 +struct kprobe_ctlblk { + int kprobe_status; +}; + +#define kretprobe_blacklist_size 0 +#define arch_remove_kprobe(p) do { } while (0) + +#define flush_insn_slot(p) \ +do { \ + unsigned long slot = (unsigned long) p->ainsn.insn; \ + flush_icache_range(slot, slot + \ + MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); \ +} while (0) + +extern int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr); +extern int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data); + +static inline int is_kprobe_break1_trap(struct pt_regs *regs) +{ + u64 *instr = (u64 *)GET_IP_CR0_HI(regs->crs.cr0_hi); + + if (cpu_has(CPU_HWBUG_BREAKPOINT_INSTR)) + return (*instr & ~E2K_INSTR_HS_LNG_MASK) == + (KPROBE_BREAK_1 & ~E2K_INSTR_HS_LNG_MASK); + else + return *instr == KPROBE_BREAK_1; +} + +extern int kprobe_instr_debug_handle(struct pt_regs *); + +#else +static inline int is_kprobe_break1_trap(struct pt_regs *regs) +{ + return false; +} + +static inline int kprobe_instr_debug_handle(struct pt_regs *regs) +{ + return 0; +} +#endif /* #ifdef CONFIG_KPROBES */ + +#ifdef CONFIG_KRETPROBES +extern int kretprobe_last_wish_handle(struct pt_regs *); +#else +static inline int kretprobe_last_wish_handle(struct pt_regs *regs) +{ + return 0; +} +#endif + +#endif /*__ASM_E2K_KPROBES_H */ diff --git a/arch/e2k/include/asm/kvm/Kbuild b/arch/e2k/include/asm/kvm/Kbuild new file mode 100644 index 0000000..4f1c6a5 --- /dev/null +++ b/arch/e2k/include/asm/kvm/Kbuild @@ -0,0 +1,8 @@ + +### e2k virtualization + +unifdef-y += guest/ + +unifdef-y += guest.h +unifdef-y += threads.h + diff --git a/arch/e2k/include/asm/kvm/aau_regs_access.h b/arch/e2k/include/asm/kvm/aau_regs_access.h new file mode 100644 index 0000000..62ac478 --- /dev/null +++ b/arch/e2k/include/asm/kvm/aau_regs_access.h @@ -0,0 +1,663 @@ +/* + * KVM AAU registers model access + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _KVM_AAU_REGS_ACCESS_H_ +#define _KVM_AAU_REGS_ACCESS_H_ + +#include +#include +#include + +/* + * Basic functions accessing virtual AAUs registers on guest. + */ +#define GUEST_AAU_REGS_BASE offsetof(kvm_vcpu_state_t, cpu.aau) +#define GUEST_AAU_REG(reg_name) (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, reg_name))) +#define GUEST_AAU_AAIND(AAIND_no) (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aainds)) + \ + (sizeof(u64) * (AAIND_no))) +#define GUEST_AAU_AAINCR(AAINCR_no) (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aaincrs)) + \ + (sizeof(u64) * (AAINCR_no))) +#define GUEST_AAU_AASTI(AASTI_no) (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aastis)) + \ + (sizeof(u64) * (AASTI_no))) +#define GUEST_AAU_AALDI(AALDI_no) (offsetof(kvm_vcpu_state_t, cpu.aaldi) + \ + (sizeof(u64) * (AALDI_no))) +#define GUEST_AAU_AALDA(AALDA_no) (offsetof(kvm_vcpu_state_t, cpu.aalda) + \ + (sizeof(e2k_aalda_t) * (AALDA_no))) +#define GUEST_AAU_AAD_lo(AAD_no) (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aads)) + \ + (sizeof(e2k_aadj_t) * (AAD_no)) + \ + (offsetof(e2k_aadj_t, word.lo))) +#define GUEST_AAU_AAD_hi(AAD_no) (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aads)) + \ + (sizeof(e2k_aadj_t) * (AAD_no)) + \ + (offsetof(e2k_aadj_t, word.hi))) +#define GUEST_AAU_AALDM_lo() (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aaldm)) + \ + (offsetof(e2k_aaldm_t, lo))) +#define GUEST_AAU_AALDM_hi() (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aaldm)) + \ + (offsetof(e2k_aaldm_t, hi))) +#define GUEST_AAU_AALDV_lo() (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aaldv)) + \ + (offsetof(e2k_aaldv_t, lo))) +#define GUEST_AAU_AALDV_hi() (GUEST_AAU_REGS_BASE + \ + (offsetof(e2k_aau_t, aaldv)) + \ + (offsetof(e2k_aaldv_t, hi))) +#define GUEST_GET_AAU_BREG(reg_name) \ + E2K_LOAD_GUEST_VCPU_STATE_B(GUEST_AAU_REG(reg_name)) +#define GUEST_GET_AAU_SREG(reg_name) \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_AAU_REG(reg_name)) +#define GUEST_GET_AAU_DREG(reg_name) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_REG(reg_name)) +#define GUEST_SET_AAU_BREG(reg_name, value) \ + E2K_STORE_GUEST_VCPU_STATE_B(GUEST_AAU_REG(reg_name), value) +#define GUEST_SET_AAU_SREG(reg_name, value) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_AAU_REG(reg_name), value) +#define GUEST_SET_AAU_DREG(reg_name, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_REG(reg_name), value) +#define GUEST_GET_AAU_AAIND(AAIND_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_AAIND(AAIND_no)) +#define GUEST_GET_AAU_AAINCR(AAINCR_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_AAINCR(AAINCR_no)) +#define GUEST_GET_AAU_AASTI(AASTI_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_AASTI(AASTI_no)) +#define GUEST_GET_AAU_AALDI(AALDI_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_AALDI(AALDI_no)) +#define GUEST_GET_AAU_AALDA(AALDA_no) \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_AAU_AALDA(AALDA_no)) +#define GUEST_GET_AAU_AAD_lo(AAD_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_AAD_lo(AAD_no)) +#define GUEST_GET_AAU_AAD_hi(AAD_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_AAU_AAD_hi(AAD_no)) +#define GUEST_GET_AAU_AAD(AAD_no, mem_p) \ +({ \ + AWP(mem_p).lo = GUEST_GET_AAU_AAD_lo(AAD_no); \ + AWP(mem_p).hi = GUEST_GET_AAU_AAD_hi(AAD_no); \ +}) +#define GUEST_GET_AAU_AALDM_lo() \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_AAU_AALDM_lo()) +#define GUEST_GET_AAU_AALDM_hi() \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_AAU_AALDM_hi()) +#define GUEST_GET_AAU_AALDV_lo() \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_AAU_AALDV_lo()) +#define GUEST_GET_AAU_AALDV_hi() \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_AAU_AALDV_hi()) + +#define GUEST_SET_AAU_AAIND(AAIND_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_AAIND(AAIND_no), value) +#define GUEST_SET_AAU_AAINCR(AAINCR_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_AAINCR(AAINCR_no), value) +#define GUEST_SET_AAU_AASTI(AASTI_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_AASTI(AASTI_no), value) +#define GUEST_SET_AAU_AALDI(AALDI_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_AALDI(AALDI_no), value) +#define GUEST_SET_AAU_AALDA(AALDA_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_AAU_AALDI(AALDA_no), value) +#define GUEST_SET_AAU_AAD_lo(AAD_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_AAD_lo(AAD_no), value) +#define GUEST_SET_AAU_AAD_hi(AAD_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_AAU_AAD_hi(AAD_no), value) +#define GUEST_SET_AAU_AAD(AAD_no, mem_p) \ +({ \ + GUEST_SET_AAU_AAD_lo(AAD_no, AWP(mem_p).lo); \ + GUEST_SET_AAU_AAD_hi(AAD_no, AWP(mem_p).hi); \ +}) +#define GUEST_SET_AAU_AALDM_lo(lo) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_AAU_AALDM_lo(), lo) +#define GUEST_SET_AAU_AALDM_hi(hi) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_AAU_AALDM_hi(), hi) +#define GUEST_SET_AAU_AALDV_lo(lo) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_AAU_AALDV_lo(), lo) +#define GUEST_SET_AAU_AALDV_hi(hi) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_AAU_AALDV_hi(), hi) + +/* macros to deal with KVM AAU registers model */ + +#define KVM_GET_AAU_AAD(AAD_no, mem_p) \ + GUEST_GET_AAU_AAD(AAD_no, (mem_p)) +#define KVM_GET_AAU_4_AADs(AAD_4_no, mem_4_p) \ +({ \ + KVM_GET_AAU_AAD(((AAD_4_no) + 0), &(mem_4_p)[0]); \ + KVM_GET_AAU_AAD(((AAD_4_no) + 1), &(mem_4_p)[1]); \ + KVM_GET_AAU_AAD(((AAD_4_no) + 2), &(mem_4_p)[2]); \ + KVM_GET_AAU_AAD(((AAD_4_no) + 3), &(mem_4_p)[3]); \ +}) +#define KVM_GET_AAU_AAIND(AAIND_no) \ + GUEST_GET_AAU_AAIND(AAIND_no) +#define KVM_GET_AAU_AAINDS_VAL(AAIND1_no, AAIND2_no, val1, val2) \ +({ \ + val1 = GUEST_GET_AAU_AAIND(AAIND1_no); \ + val2 = GUEST_GET_AAU_AAIND(AAIND2_no); \ +}) +#define KVM_GET_AAU_AAIND_TAG() \ + GUEST_GET_AAU_SREG(aaind_tags) +#define KVM_GET_AAU_AAINDS(AAIND1_no, AAIND2_no, val1, val2) \ +({ \ + if (((AAIND1_no) != AAINDS_TAG_no) && \ + ((AAIND2_no) != AAINDS_TAG_no)) { \ + KVM_GET_AAU_AAINDS_VAL(AAIND1_no, AAIND2_no, val1, val2); \ + } else if ((AAIND1_no) == AAINDS_TAG_no) { \ + val1 = KVM_GET_AAU_AAIND_TAG(); \ + val2 = GUEST_GET_AAU_AAIND(AAIND2_no); \ + } else { \ + val1 = GUEST_GET_AAU_AAIND(AAIND1_no); \ + val2 = KVM_GET_AAU_AAIND_TAG(); \ + } \ +}) +#define KVM_GET_AAU_AAINCR(AAINCR_no) \ + GUEST_GET_AAU_AAINCR(AAINCR_no) +#define KVM_GET_AAU_AAINCRS_VAL(AAINCR1_no, AAINCR2_no, val1, val2) \ +({ \ + val1 = GUEST_GET_AAU_AAINCR(AAINCR1_no); \ + val2 = GUEST_GET_AAU_AAINCR(AAINCR2_no); \ +}) +#define KVM_GET_AAU_AAINCR_TAG() \ + GUEST_GET_AAU_SREG(aaincr_tags) +#define KVM_GET_AAU_AAINCRS(AAINCR1_no, AAINCR2_no, val1, val2) \ +({ \ + if (((AAINCR1_no) != AAINCRS_TAG_no) && \ + ((AAINCR2_no) != AAINCRS_TAG_no)) { \ + KVM_GET_AAU_AAINCRS_VAL(AAINCR1_no, AAINCR2_no, val1, val2); \ + } else if ((AAINCR1_no) == AAINCRS_TAG_no) { \ + val1 = KVM_GET_AAU_AAINCR_TAG(); \ + val2 = GUEST_GET_AAU_AAINCR(AAINCR2_no); \ + } else { \ + val1 = GUEST_GET_AAU_AAINCR(AAINCR1_no); \ + val2 = KVM_GET_AAU_AAINCR_TAG(); \ + } \ +}) +#define KVM_GET_AAU_AASTI(AASTI_no) \ + GUEST_GET_AAU_AASTI(AASTI_no) +#define KVM_GET_AAU_AASTIS(AASTI1_no, AASTI2_no, val1, val2) \ +({ \ + val1 = GUEST_GET_AAU_AASTI(AASTI1_no); \ + val2 = GUEST_GET_AAU_AASTI(AASTI2_no); \ +}) +#define KVM_GET_AAU_AASTI_TAG() \ + GUEST_GET_AAU_SREG(aasti_tags) +#define KVM_GET_AAU_AASR() \ + GUEST_GET_AAU_SREG(aasr) +#define KVM_GET_AAU_AAFSTR() \ + GUEST_GET_AAU_SREG(aafstr) +#define KVM_GET_AAU_AALDI(AALDI_no, lval, rval) \ +({ \ + lval = GUEST_GET_AAU_AALDI(((AALDI_no) + 0)); \ + rval = GUEST_GET_AAU_AALDI(((AALDI_no) + 32)); \ +}) +#define KVM_GET_AAU_AALDA(AALDA_no, lval, rval) \ +({ \ + lval = GUEST_GET_AAU_AALDA((AALDA_no) + 0); \ + rval = GUEST_GET_AAU_AALDA((AALDA_no) + 32); \ +}) +#define KVM_GET_AAU_AALDM_lo() GUEST_GET_AAU_AALDM_lo() +#define KVM_GET_AAU_AALDM_hi() GUEST_GET_AAU_AALDM_hi() +#define KVM_GET_AAU_AALDM(lo, hi) \ +({ \ + lo = KVM_GET_AAU_AALDM_lo(); \ + hi = KVM_GET_AAU_AALDM_hi(); \ +}) +#define KVM_GET_AAU_AALDV_lo() GUEST_GET_AAU_AALDV_lo() +#define KVM_GET_AAU_AALDV_hi() GUEST_GET_AAU_AALDV_hi() +#define KVM_GET_AAU_AALDV(lo, hi) \ +({ \ + lo = KVM_GET_AAU_AALDV_lo(); \ + hi = KVM_GET_AAU_AALDV_hi(); \ +}) + +#define KVM_SET_AAU_AAD(AAD_no, mem_p) \ + GUEST_SET_AAU_AAD(AAD_no, (mem_p)) +#define KVM_SET_AAU_4_AADs(AAD_4_no, mem_4_p) \ +({ \ + KVM_SET_AAU_AAD(((AAD_4_no) + 0), &(mem_4_p)[0]); \ + KVM_SET_AAU_AAD(((AAD_4_no) + 1), &(mem_4_p)[1]); \ + KVM_SET_AAU_AAD(((AAD_4_no) + 2), &(mem_4_p)[2]); \ + KVM_SET_AAU_AAD(((AAD_4_no) + 3), &(mem_4_p)[3]); \ +}) +#define KVM_SET_AAU_AAIND(AAIND_no, value) \ + GUEST_SET_AAU_AAIND(AAIND_no, value) +#define KVM_SET_AAU_AAINDS_VAL(AAIND1_no, AAIND2_no, val1, val2) \ +({ \ + GUEST_SET_AAU_AAIND(AAIND1_no, val1); \ + GUEST_SET_AAU_AAIND(AAIND2_no, val2); \ +}) +#define KVM_SET_AAU_AAIND_TAG(val) \ + GUEST_SET_AAU_SREG(aaind_tags, val) +#define KVM_SET_AAU_AAINDS(AAIND1_no, AAIND2_no, val1, val2) \ +({ \ + if (((AAIND1_no) != AAINDS_TAG_no) && \ + ((AAIND2_no) != AAINDS_TAG_no)) { \ + KVM_SET_AAU_AAINDS_VAL(AAIND1_no, AAIND2_no, val1, val2); \ + } else if ((AAIND1_no) == AAINDS_TAG_no) { \ + KVM_SET_AAU_AAIND_TAG(val1); \ + GUEST_SET_AAU_AAIND(AAIND2_no, val2); \ + } else { \ + GUEST_SET_AAU_AAIND(AAIND1_no, val1); \ + KVM_SET_AAU_AAIND_TAG(val2); \ + } \ +}) +#define KVM_SET_AAU_AAINCR(AAINCR_no, val) \ + GUEST_SET_AAU_AAINCR(AAINCR_no, val) +#define KVM_SET_AAU_AAINCRS_VAL(AAINCR1_no, AAINCR2_no, val1, val2) \ +({ \ + GUEST_SET_AAU_AAINCR(AAINCR1_no, val1); \ + GUEST_SET_AAU_AAINCR(AAINCR2_no, val2); \ +}) +#define KVM_SET_AAU_AAINCR_TAG(val) \ + GUEST_SET_AAU_SREG(aaincr_tags, val) +#define KVM_SET_AAU_AAIND_AAINCR_TAGS(aaind, aaincr) \ +do { \ + GUEST_SET_AAU_SREG(aaincr_tags, aaincr); \ + GUEST_SET_AAU_SREG(aaind_tags, aaind); \ +} while (0) + +#define KVM_SET_AAU_AAINCRS(AAINCR1_no, AAINCR2_no, val1, val2) \ +({ \ + if (((AAINCR1_no) != AAINCRS_TAG_no) && \ + ((AAINCR2_no) != AAINCRS_TAG_no)) { \ + KVM_SET_AAU_AAINCRS_VAL(AAINCR1_no, AAINCR2_no, val1, val2); \ + } else if ((AAINCR1_no) == AAINCRS_TAG_no) { \ + KVM_SET_AAU_AAINCR_TAG(val1); \ + GUEST_SET_AAU_AAINCR(AAINCR2_no, val2); \ + } else { \ + GUEST_SET_AAU_AAINCR(AAINCR1_no, val1); \ + KVM_SET_AAU_AAINCR_TAG(val2); \ + } \ +}) +#define KVM_SET_AAU_AASTI(AASTI_no, val) \ + GUEST_SET_AAU_AASTI(AASTI_no, val) +#define KVM_SET_AAU_AASTIS(AASTI1_no, AASTI2_no, val1, val2) \ +({ \ + GUEST_SET_AAU_AASTI(AASTI1_no, val1); \ + GUEST_SET_AAU_AASTI(AASTI2_no, val2); \ +}) +#define KVM_SET_AAU_AASTI_TAG(val) \ + GUEST_SET_AAU_SREG(aasti_tags, val) +#define KVM_SET_AAU_AASR(val) \ + GUEST_SET_AAU_SREG(aasr, val) +#define KVM_SET_AAU_AAFSTR(val) \ + GUEST_SET_AAU_SREG(aafstr, val) +#define KVM_SET_AAU_AALDI(AALDI_no, lval, rval) \ +({ \ + GUEST_SET_AAU_AALDI(((AALDI_no) + 0), lval); \ + GUEST_SET_AAU_AALDI(((AALDI_no) + 32), rval); \ +}) +#define KVM_SET_AAU_AALDA(AALDA_no, lval, rval) \ +({ \ + GUEST_SET_AAU_AALDA(((AALDA_no) + 0), lval); \ + GUEST_SET_AAU_AALDA(((AALDA_no) + 32), rval); \ +}) +#define KVM_SET_AAU_AALDM(lo, hi) \ +({ \ + GUEST_SET_AAU_AALDM_lo(lo); \ + GUEST_SET_AAU_AALDM_hi(hi); \ +}) +#define KVM_SET_AAU_AALDV(lo, hi) \ +({ \ + GUEST_SET_AAU_AALDV_lo(lo); \ + GUEST_SET_AAU_AALDV_hi(hi); \ +}) + +/* + * KVM virtual AAU registers access function (can be paravirtualized) + */ +static __always_inline u32 +kvm_read_aasr_reg_value(void) +{ + return KVM_GET_AAU_AASR(); +} +static __always_inline void +kvm_write_aasr_reg_value(u32 reg_value) +{ + KVM_SET_AAU_AASR(reg_value); +} +static inline u32 +kvm_read_aafstr_reg_value(void) +{ + return KVM_GET_AAU_AAFSTR(); +} +static inline void +kvm_write_aafstr_reg_value(u32 reg_value) +{ + KVM_SET_AAU_AAFSTR(reg_value); +} + +static __always_inline e2k_aasr_t +kvm_read_aasr_reg(void) +{ + e2k_aasr_t aasr; + + AW(aasr) = kvm_read_aasr_reg_value(); + return aasr; +} +static __always_inline void +kvm_write_aasr_reg(e2k_aasr_t aasr) +{ + kvm_write_aafstr_reg_value(AW(aasr)); +} + +static inline u64 +kvm_read_aaind_reg_value(int AAIND_no) +{ + return KVM_GET_AAU_AAIND(AAIND_no); +} +static inline void +kvm_write_aaind_reg_value(int AAIND_no, u64 reg_value) +{ + KVM_SET_AAU_AAIND(AAIND_no, reg_value); +} + +static inline void +kvm_read_aainds_pair_value(int AAINDs_pair, u64 *lo_value, u64 *hi_value) +{ + u64 value1, value2; + + KVM_GET_AAU_AAINDS(AAINDs_pair, (AAINDs_pair + 1), value1, value2); + *lo_value = value1; + *hi_value = value2; +} +#define KVM_READ_AAINDS_PAIR_VALUE_V2(AAINDs_pair, value1, value2) \ + KVM_GET_AAU_AAINDS(AAINDs_pair, ((AAINDs_pair) + 1), \ + value1, value2) +#define KVM_READ_AAINDS_PAIR_VALUE_V5(AAINDs_pair, value1, value2) \ + KVM_GET_AAU_AAINDS(AAINDs_pair, ((AAINDs_pair) + 1), \ + value1, value2) +#define KVM_READ_AAIND_REG15_AND_TAGS_VALUE_V5(value15, tags) \ +({ \ + value15 = kvm_read_aaind_reg_value(15); \ + tags = kvm_read_aaind_tags_reg_value(); \ +}) + +static inline void +kvm_write_aainds_pair_value(int AAINDs_pair, u64 lo_value, u64 hi_value) +{ + KVM_SET_AAU_AAINDS(AAINDs_pair, (AAINDs_pair + 1), lo_value, hi_value); +} +#define KVM_WRITE_AAINDS_PAIR_VALUE_V2(AAINDs_pair, lo_value, hi_value) \ + kvm_write_aainds_pair_value(AAINDs_pair, lo_value, hi_value) +#define KVM_WRITE_AAINDS_PAIR_VALUE_V5(AAINDs_pair, lo_value, hi_value) \ + kvm_write_aainds_pair_value(AAINDs_pair, lo_value, hi_value) + +static inline u32 +kvm_read_aaind_tags_reg_value(void) +{ + return KVM_GET_AAU_AAIND_TAG(); +} +static inline void +kvm_write_aaind_tags_reg_value(u32 reg_value) +{ + KVM_SET_AAU_AAIND_TAG(reg_value); +} +static inline u64 +kvm_read_aaincr_reg_value(int AAINCR_no) +{ + return KVM_GET_AAU_AAINCR(AAINCR_no); +} +static inline void +kvm_write_aaincr_reg_value(int AAINCR_no, u64 reg_value) +{ + KVM_SET_AAU_AAINCR(AAINCR_no, reg_value); +} +static inline u32 +kvm_read_aaincr_tags_reg_value(void) +{ + return KVM_GET_AAU_AAINCR_TAG(); +} +static inline void +kvm_write_aaincr_tags_reg_value(u32 reg_value) +{ + KVM_SET_AAU_AAINCR_TAG(reg_value); +} + +static inline void +kvm_read_aaincrs_pair_value(int AAINCRs_pair, u64 *lo_value, u64 *hi_value) +{ + u64 value1, value2; + + KVM_GET_AAU_AAINCRS(AAINCRs_pair, (AAINCRs_pair + 1), value1, value2); + *lo_value = value1; + *hi_value = value2; +} +#define KVM_READ_AAINCRS_PAIR_VALUE_V2(AAINCRs_pair, value1, value2) \ + KVM_GET_AAU_AAINCRS(AAINCRs_pair, ((AAINCRs_pair) + 1), \ + value1, value2) +#define KVM_READ_AAINCRS_PAIR_VALUE_V5(AAINCRs_pair, value1, value2) \ + KVM_GET_AAU_AAINCRS(AAINCRs_pair, ((AAINCRs_pair) + 1), \ + value1, value2) +#define KVM_READ_AAINCR_REG7_AND_TAGS_VALUE_V5(value7, tags) \ +({ \ + value7 = kvm_read_aaincr_reg_value(7); \ + tags = kvm_read_aaincr_tags_reg_value(); \ +}) + +static inline void +kvm_write_aaincrs_pair_value(int AAINCRs_pair, u64 lo_value, u64 hi_value) +{ + KVM_SET_AAU_AAINCRS(AAINCRs_pair, (AAINCRs_pair + 1), + lo_value, hi_value); +} +#define KVM_WRITE_AAINCRS_PAIR_VALUE_V2(AAINCRs_pair, lo_value, hi_value) \ + kvm_write_aaincrs_pair_value(AAINCRs_pair, lo_value, hi_value) +#define KVM_WRITE_AAINCRS_PAIR_VALUE_V5(AAINCRs_pair, lo_value, hi_value) \ + kvm_write_aaincrs_pair_value(AAINCRs_pair, lo_value, hi_value) + +static inline u64 +kvm_read_aasti_reg_value(int AASTI_no) +{ + return KVM_GET_AAU_AASTI(AASTI_no); +} +static inline void +kvm_write_aasti_reg_value(int AASTI_no, u64 reg_value) +{ + KVM_SET_AAU_AASTI(AASTI_no, reg_value); +} +static inline u32 +kvm_read_aasti_tags_reg_value(void) +{ + return KVM_GET_AAU_AASTI_TAG(); +} +static inline void +kvm_write_aasti_tags_reg_value(u32 reg_value) +{ + KVM_SET_AAU_AASTI_TAG(reg_value); +} + +static inline void +kvm_read_aastis_pair_value(int AASTIs_pair, u64 *lo_value, u64 *hi_value) +{ + u64 value1, value2; + + KVM_GET_AAU_AASTIS(AASTIs_pair, (AASTIs_pair + 1), value1, value2); + *lo_value = value1; + *hi_value = value2; +} +#define KVM_READ_AASTIS_PAIR_VALUE_V2(AASTIs_pair, value1, value2) \ + KVM_GET_AAU_AASTIS(AASTIs_pair, ((AASTIs_pair) + 1), \ + value1, value2) +#define KVM_READ_AASTIS_PAIR_VALUE_V5(AASTIs_pair, value1, value2) \ + KVM_GET_AAU_AASTIS(AASTIs_pair, ((AASTIs_pair) + 1), \ + value1, value2) + +static inline void +kvm_write_aastis_pair_value(int AASTIs_pair, u64 lo_value, u64 hi_value) +{ + KVM_SET_AAU_AASTIS(AASTIs_pair, (AASTIs_pair + 1), lo_value, hi_value); +} +#define KVM_WRITE_AASTIS_PAIR_VALUE_V2(AASTIs_pair, lo_value, hi_value) \ + kvm_write_aastis_pair_value(AASTIs_pair, lo_value, hi_value) +#define KVM_WRITE_AASTIS_PAIR_VALUE_V5(AASTIs_pair, lo_value, hi_value) \ + kvm_write_aastis_pair_value(AASTIs_pair, lo_value, hi_value) + +static inline void +kvm_read_aaldi_reg_value(int AALDI_no, u64 *l_value, u64 *r_value) +{ + u64 value1, value2; + + KVM_GET_AAU_AALDI(AALDI_no, value1, value2); + *l_value = value1; + *r_value = value2; +} +#define KVM_READ_AALDI_REG_VALUE_V2(AALDI_no, value1, value2) \ + KVM_GET_AAU_AALDI(AALDI_no, value1, value2) +#define KVM_READ_AALDI_REG_VALUE_V5(AALDI_no, value1, value2) \ + KVM_GET_AAU_AALDI(AALDI_no, value1, value2) + +static inline void +kvm_write_aaldi_reg_value(int AALDI_no, u64 l_value, u64 r_value) +{ + KVM_SET_AAU_AALDI(AALDI_no, l_value, r_value); +} + +static inline void +kvm_read_aaldas_reg_value(int AALDAs_no, u32 *l_value, u32 *r_value) +{ + u32 value1, value2; + + KVM_GET_AAU_AALDA(AALDAs_no, value1, value2); + *l_value = value1; + *r_value = value2; +} + +static inline void +kvm_write_aaldas_reg_value(int AALDAs_no, u32 l_value, u32 r_value) +{ + KVM_SET_AAU_AALDA(AALDAs_no, l_value, r_value); +} +static inline void +kvm_read_aaldm_reg_value(u32 *lo_value, u32 *hi_value) +{ + u32 value1, value2; + + KVM_GET_AAU_AALDM(value1, value2); + *lo_value = value1; + *hi_value = value2; +} +static inline void +kvm_write_aaldm_reg_value(u32 lo_value, u32 hi_value) +{ + KVM_SET_AAU_AALDM(lo_value, hi_value); +} +static inline void +kvm_read_aaldm_reg(e2k_aaldm_t *aaldm) +{ + kvm_read_aaldm_reg_value(&aaldm->lo, &aaldm->hi); +} +static inline void +kvm_write_aaldm_reg(e2k_aaldm_t *aaldm) +{ + kvm_write_aaldm_reg_value(aaldm->lo, aaldm->hi); +} +static inline void +kvm_read_aaldv_reg_value(u32 *lo_value, u32 *hi_value) +{ + u32 value1, value2; + + KVM_GET_AAU_AALDV(value1, value2); + *lo_value = value1; + *hi_value = value2; +} +static inline void +kvm_write_aaldv_reg_value(u32 lo_value, u32 hi_value) +{ + KVM_SET_AAU_AALDV(lo_value, hi_value); +} +static inline void +kvm_read_aaldv_reg(e2k_aaldv_t *aaldv) +{ + kvm_read_aaldv_reg_value(&aaldv->lo, &aaldv->hi); +} +static inline void +kvm_write_aaldv_reg(e2k_aaldv_t *aaldv) +{ + kvm_write_aaldm_reg_value(aaldv->lo, aaldv->hi); +} + +static inline void +kvm_read_aad_reg(int AAD_no, e2k_aadj_t *mem_p) +{ + KVM_GET_AAU_AAD(AAD_no, mem_p); +} + +static inline void +kvm_write_aad_reg(int AAD_no, e2k_aadj_t *mem_p) +{ + KVM_SET_AAU_AAD(AAD_no, mem_p); +} + +static inline void +kvm_read_aads_4_reg(int AADs_no, e2k_aadj_t *mem_p) +{ + KVM_GET_AAU_4_AADs(AADs_no, mem_p); +} + +static inline void +kvm_write_aads_4_reg(int AADs_no, e2k_aadj_t *mem_p) +{ + KVM_SET_AAU_4_AADs(AADs_no, mem_p); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure kvm kernel without paravirtualization */ + +#include + +static __always_inline u32 read_aasr_reg_value(void) +{ + return kvm_read_aasr_reg_value(); +} +static __always_inline void write_aasr_reg_value(u32 reg_value) +{ + kvm_write_aasr_reg_value(reg_value); +} +static __always_inline e2k_aasr_t read_aasr_reg(void) +{ + return kvm_read_aasr_reg(); +} +static __always_inline void write_aasr_reg(e2k_aasr_t aasr) +{ + kvm_write_aasr_reg(aasr); +} +static inline u32 read_aafstr_reg_value(void) +{ + return kvm_read_aafstr_reg_value(); +} +static inline void write_aafstr_reg_value(u32 reg_value) +{ + kvm_write_aafstr_reg_value(reg_value); +} +static inline void read_aaldm_reg(e2k_aaldm_t *aaldm) +{ + kvm_read_aaldm_reg_value(&aaldm->lo, &aaldm->hi); +} +static inline void write_aaldm_reg(e2k_aaldm_t *aaldm) +{ + kvm_write_aaldm_reg_value(aaldm->lo, aaldm->hi); +} +static inline void read_aaldv_reg(e2k_aaldv_t *aaldv) +{ + kvm_read_aaldv_reg_value(&aaldv->lo, &aaldv->hi); +} +static inline void write_aaldv_reg(e2k_aaldv_t *aaldv) +{ + kvm_write_aaldm_reg_value(aaldv->lo, aaldv->hi); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _KVM_AAU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/kvm/async_pf.h b/arch/e2k/include/asm/kvm/async_pf.h new file mode 100644 index 0000000..e6bdaf9 --- /dev/null +++ b/arch/e2k/include/asm/kvm/async_pf.h @@ -0,0 +1,33 @@ +#ifndef ASYNC_PF_E2K_H +#define ASYNC_PF_E2K_H + +#ifdef CONFIG_KVM_ASYNC_PF + +/* No async page fault occured */ +#define KVM_APF_NO 0 +/* Physical page was swapped out by host */ +#define KVM_APF_PAGE_IN_SWAP 1 +/* Physical page is loaded from swap and ready for access */ +#define KVM_APF_PAGE_READY 2 + +#define KVM_APF_HASH_BITS 8 +/* Number of hash buckets in apf cache */ +#define KVM_APF_CACHE_SIZE (1 << KVM_APF_HASH_BITS) + +/* + * Type of irq controller which will be used + * by host to notify guest that page is ready + */ +enum { + EPIC_CONTROLLER = 1, + APIC_CONTROLLER = 2 +}; + +u32 pv_apf_read_and_reset_reason(void); + +void pv_apf_wait(void); +void pv_apf_wake(void); + +#endif /* CONFIG_KVM_ASYNC_PF */ + +#endif /* ASYNC_PF_H */ diff --git a/arch/e2k/include/asm/kvm/boot.h b/arch/e2k/include/asm/kvm/boot.h new file mode 100644 index 0000000..2684553 --- /dev/null +++ b/arch/e2k/include/asm/kvm/boot.h @@ -0,0 +1,50 @@ +/* + * E2K boot-time initializtion virtualization for KVM host + * + * Copyright 2017 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_BOOT_H_ +#define _E2K_KVM_BOOT_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +#include +#include + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization support */ +#else /* CONFIG_VIRTUALIZATION */ + +#if defined(CONFIG_PARAVIRT_GUEST) || defined(CONFIG_KVM_GUEST_KERNEL) +/* it is paravirtualized host and guest kernel */ +/* or pure guest kernel */ +#include +#endif /* CONFIG_PARAVIRT_GUEST || CONFIG_KVM_GUEST_KERNEL */ + +#ifndef CONFIG_KVM_GUEST_KERNEL +/* it is native host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +static inline void +kvm_host_machine_setup(machdep_t *host_machine) +{ + machdep_t *node_mach; + int nid; + + for_each_node_has_dup_kernel(nid) { + node_mach = the_node_machine(nid); + if (host_machine->native_iset_ver < E2K_ISET_V5) { + } else { + } + } +} +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_KVM_BOOT_H_ */ diff --git a/arch/e2k/include/asm/kvm/boot_spinlock.h b/arch/e2k/include/asm/kvm/boot_spinlock.h new file mode 100644 index 0000000..977dc1a --- /dev/null +++ b/arch/e2k/include/asm/kvm/boot_spinlock.h @@ -0,0 +1,38 @@ +#ifndef __ASM_E2K_KVM_BOOT_SPINLOCK_H +#define __ASM_E2K_KVM_BOOT_SPINLOCK_H +/* + * This file implements the arch-dependent parts of kvm guest + * boot time spin_lock()/spin_unlock() slow part + * + * Copyright 2020 MCST + */ + +#include +#include + +#if defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else + #error "Unknown virtualization type" +#endif /* CONFIG_PARAVIRT_GUEST */ + +#define arch_boot_spin_unlock kvm_boot_spin_unlock +static inline void kvm_boot_spin_unlock(boot_spinlock_t *lock) +{ + boot_spinlock_t val; + u16 ticket, ready; + + wmb(); /* wait for all store completion */ + val.lock = __api_atomic16_add_return32_lock( + 1 << BOOT_SPINLOCK_HEAD_SHIFT, &lock->lock); + ticket = val.tail; + ready = val.head; + + if (unlikely(ticket != ready)) { + /* spinlock has more user(s): so activate it(s) */ + boot_arch_spin_unlock_slow(lock); + } +} + +#endif /* __ASM_E2K_KVM_BOOT_SPINLOCK_H */ diff --git a/arch/e2k/include/asm/kvm/boot_spinlock_slow.h b/arch/e2k/include/asm/kvm/boot_spinlock_slow.h new file mode 100644 index 0000000..17bdaec --- /dev/null +++ b/arch/e2k/include/asm/kvm/boot_spinlock_slow.h @@ -0,0 +1,44 @@ +#ifndef __ASM_E2K_KVM_BOOT_SPINLOCK_SLOW_H +#define __ASM_E2K_KVM_BOOT_SPINLOCK_SLOW_H +/* + * This file implements on host the arch-dependent parts of kvm guest + * boot-time spin_lock()/spin_unlock() slow part + * + * Copyright 2020 MCST + */ + +#include +#include +#include +#include + +typedef struct boot_spinlock_unlocked { + struct list_head unlocked_list; + struct kvm_vcpu *vcpu; + struct list_head checked_unlocked; /* list of tasks */ + /* which alredy */ + /* checked spin */ + /* was unlocked */ + + void *lock; +} boot_spinlock_unlocked_t; + +#define BOOT_SPINLOCK_HASH_BITS 6 +#define BOOT_SPINLOCK_HASH_SHIFT 4 /* [9:4] hash bits */ +#define BOOT_SPINLOCK_HASH_SIZE (1 << SPINLOCK_HASH_BITS) +#define boot_spinlock_hashfn(lockp) \ + hash_long(((unsigned long)(lockp)) >> \ + BOOT_SPINLOCK_HASH_SHIFT, \ + BOOT_SPINLOCK_HASH_BITS) +#define BOOT_SPINUNLOCKED_LIST_SIZE 32 + +extern int kvm_boot_spin_lock_slow(struct kvm_vcpu *vcpu, void *lock, + bool check_unlock); +extern int kvm_boot_spin_locked_slow(struct kvm_vcpu *vcpu, void *lock); +extern int kvm_boot_spin_unlock_slow(struct kvm_vcpu *vcpu, void *lock, + bool add_to_unlock); + +extern int kvm_boot_spinlock_init(struct kvm *kvm); +extern void kvm_boot_spinlock_destroy(struct kvm *kvm); + +#endif /* __ASM_E2K_KVM_BOOT_SPINLOCK_SLOW_H */ \ No newline at end of file diff --git a/arch/e2k/include/asm/kvm/cpu_hv_regs_access.h b/arch/e2k/include/asm/kvm/cpu_hv_regs_access.h new file mode 100644 index 0000000..fc93174 --- /dev/null +++ b/arch/e2k/include/asm/kvm/cpu_hv_regs_access.h @@ -0,0 +1,512 @@ +#ifndef _E2K_KVM_CPU_HV_REGS_ACCESS_H_ +#define _E2K_KVM_CPU_HV_REGS_ACCESS_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +/* + * Virtualization control registers + */ +#define READ_VIRT_CTRL_CU_REG_VALUE() NATIVE_GET_DSREG_CLOSED(virt_ctrl_cu) +/* Bug #127239: on some CPUs "rwd %virt_ctrl_cu" instruction must also + * contain a NOP. This is already accomplished by using delay "5" here. */ +#define WRITE_VIRT_CTRL_CU_REG_VALUE(virt_ctrl) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(virt_ctrl_cu, virt_ctrl, 5) +#define READ_VIRT_CTRL_CU_REG() read_VIRT_CTRL_CU_reg() +#define WRITE_VIRT_CTRL_CU_REG(virt_ctrl) \ + write_VIRT_CTRL_CU_reg(virt_ctrl) + +/* Shadow CPU registers */ + +/* + * Read/write low/high double-word OS Compilation Unit Descriptor (SH_OSCUD) + */ + +#define READ_SH_OSCUD_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_oscud.lo) +#define READ_SH_OSCUD_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_oscud.hi) + +#define WRITE_SH_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_oscud.lo, OSCUD_lo_value, 5) +#define WRITE_SH_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_oscud.hi, OSCUD_hi_value, 5) +#define WRITE_SH_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + WRITE_SH_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define WRITE_SH_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + WRITE_SH_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define WRITE_SH_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + WRITE_SH_OSCUD_LO_REG_VALUE(OSCUD_hi_value); \ + WRITE_SH_OSCUD_HI_REG_VALUE(OSCUD_lo_value); \ +}) +#define WRITE_SH_OSCUD_REG(OSCUD_hi, OSCUD_lo) \ +({ \ + WRITE_SH_OSCUD_REG_VALUE(OSCUD_hi.OSCUD_hi_half, \ + OSCUD_lo.OSCUD_lo_half); \ +}) + +/* + * Read/write low/hgh double-word OS Globals Register (SH_OSGD) + */ + +#define READ_SH_OSGD_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_osgd.lo) +#define READ_SH_OSGD_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_osgd.hi) + +#define WRITE_SH_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_osgd.lo, OSGD_lo_value, 5) +#define WRITE_SH_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_osgd.hi, OSGD_hi_value, 5) +#define WRITE_SH_OSGD_LO_REG(OSGD_lo) \ +({ \ + WRITE_SH_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define WRITE_SH_OSGD_HI_REG(OSGD_hi) \ +({ \ + WRITE_SH_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) +#define WRITE_SH_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \ +({ \ + WRITE_SH_OSGD_HI_REG_VALUE(OSGD_hi_value); \ + WRITE_SH_OSGD_LO_REG_VALUE(OSGD_lo_value); \ +}) +#define WRITE_SH_OSGD_REG(OSGD_hi, OSGD_lo) \ +({ \ + WRITE_SH_OSGD_REG_VALUE(OSGD_hi.OSGD_hi_half, \ + OSGD_lo.OSGD_lo_half); \ +}) + +/* + * Read/write low/high quad-word Procedure Stack Pointer Register + * (SH_PSP, backup BU_PSP) + */ + +#define READ_SH_PSP_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_psp.lo) +#define READ_SH_PSP_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_psp.hi) +#define READ_BU_PSP_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(bu_psp.lo) +#define READ_BU_PSP_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(bu_psp.hi) + +#define WRITE_SH_PSP_LO_REG_VALUE(PSP_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_psp.lo, PSP_lo_value, 5) +#define WRITE_SH_PSP_HI_REG_VALUE(PSP_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_psp.hi, PSP_hi_value, 5) +#define WRITE_BU_PSP_LO_REG_VALUE(PSP_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(bu_psp.lo, PSP_lo_value, 5) +#define WRITE_BU_PSP_HI_REG_VALUE(PSP_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(bu_psp.hi, PSP_hi_value, 5) + +/* + * Read/write word Procedure Stack Harware Top Pointer (SH_PSHTP) + */ +#define READ_SH_PSHTP_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_pshtp) +#define WRITE_SH_PSHTP_REG_VALUE(PSHTP_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_pshtp, PSHTP_value, 5) + +/* + * Read/write low/high quad-word Procedure Chain Stack Pointer Register + * (SH_PCSP, backup registers BU_PCSP) + */ +#define READ_SH_PCSP_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_pcsp.lo) +#define READ_SH_PCSP_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_pcsp.hi) +#define READ_BU_PCSP_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(bu_pcsp.lo) +#define READ_BU_PCSP_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(bu_pcsp.hi) + +#define WRITE_SH_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_pcsp.lo, PCSP_lo_value, 5) +#define WRITE_SH_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_pcsp.hi, PCSP_hi_value, 5) +#define WRITE_BU_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(bu_pcsp.lo, PCSP_lo_value, 5) +#define WRITE_BU_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(bu_pcsp.hi, PCSP_hi_value, 5) + +/* + * Read/write word Procedure Chain Stack Harware Top Pointer (SH_PCSHTP) + * and shadow pointer (SH_PCSHTP) + */ +#define READ_SH_PCSHTP_REG_VALUE() \ + NATIVE_GET_SREG_CLOSED(sh_pcshtp) +#define READ_SH_PCSHTP_REG_SVALUE() \ + PCSHTP_SIGN_EXTEND(NATIVE_GET_SREG_CLOSED(sh_pcshtp)) +#define WRITE_SH_PCSHTP_REG_SVALUE(PCSHTP_svalue) \ + NATIVE_SET_SREG_CLOSED_NOEXC(sh_pcshtp, PCSHTP_svalue, 5) + +/* + * Read/write current window descriptor register (SH_WD) + */ +#define READ_SH_WD_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_wd) +#define WRITE_SH_WD_REG_VALUE(WD_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_wd, WD_value, 5) + +/* + * Read/write OS register which point to current process thread info + * structure (SH_OSR0) + */ +#define READ_SH_OSR0_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_osr0) + +#define WRITE_SH_OSR0_REG_VALUE(osr0_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_osr0, osr0_value, 5) + +/* + * Read/Write system clock registers (SH_SCLKM3) + */ +#define READ_SH_SCLKM3_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_sclkm3) + +#define WRITE_SH_SCLKM3_REG_VALUE(reg_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_sclkm3, reg_value, 4) + +/* + * Read/write double-word Compilation Unit Table Register (SH_OSCUTD) + */ +#define READ_SH_OSCUTD_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sh_oscutd) + +#define WRITE_SH_OSCUTD_REG_VALUE(CUTD_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_oscutd, CUTD_value, 7) + +/* + * Read/write word Compilation Unit Index Register (SH_OSCUIR) + */ +#define READ_SH_OSCUIR_REG_VALUE() NATIVE_GET_SREG_CLOSED(sh_oscuir) + +#define WRITE_SH_OSCUIR_REG_VALUE(CUIR_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sh_oscuir, CUIR_value, 7) + +/* + * Read/Write Processor Core Mode Register (SH_CORE_MODE) + */ +#define READ_SH_CORE_MODE_REG_VALUE() NATIVE_GET_SREG_CLOSED(sh_core_mode) +#define WRITE_SH_CORE_MODE_REG_VALUE(modes) \ + NATIVE_SET_SREG_CLOSED_NOEXC(sh_core_mode, modes, 5) + +extern unsigned long read_VIRT_CTRL_CU_reg_value(void); +extern void write_VIRT_CTRL_CU_reg_value(unsigned long value); +extern unsigned int read_SH_CORE_MODE_reg_value(void); +extern void write_SH_CORE_MODE_reg_value(unsigned int value); +extern unsigned long read_SH_PSP_LO_reg_value(void); +extern unsigned long read_SH_PSP_HI_reg_value(void); +extern void write_SH_PSP_LO_reg_value(unsigned long value); +extern void write_SH_PSP_HI_reg_value(unsigned long value); +extern unsigned long read_BU_PSP_LO_reg_value(void); +extern unsigned long read_BU_PSP_HI_reg_value(void); +extern void write_BU_PSP_LO_reg_value(unsigned long value); +extern void write_BU_PSP_HI_reg_value(unsigned long value); +extern unsigned long read_SH_PSHTP_reg_value(void); +extern void write_SH_PSHTP_reg_value(unsigned long value); +extern unsigned long read_SH_PCSP_LO_reg_value(void); +extern unsigned long read_SH_PCSP_HI_reg_value(void); +extern void write_SH_PCSP_LO_reg_value(unsigned long value); +extern void write_SH_PCSP_HI_reg_value(unsigned long value); +extern unsigned long read_BU_PCSP_LO_reg_value(void); +extern unsigned long read_BU_PCSP_HI_reg_value(void); +extern void write_BU_PCSP_LO_reg_value(unsigned long value); +extern void write_BU_PCSP_HI_reg_value(unsigned long value); +extern int read_SH_PCSHTP_reg_value(void); +extern void write_SH_PCSHTP_reg_value(int value); +extern unsigned long read_SH_WD_reg_value(void); +extern void write_SH_WD_reg_value(unsigned long value); +extern unsigned long read_SH_OSCUD_LO_reg_value(void); +extern unsigned long read_SH_OSCUD_HI_reg_value(void); +extern void write_SH_OSCUD_LO_reg_value(unsigned long value); +extern void write_SH_OSCUD_HI_reg_value(unsigned long value); +extern unsigned long read_SH_OSGD_LO_reg_value(void); +extern unsigned long read_SH_OSGD_HI_reg_value(void); +extern void write_SH_OSGD_LO_reg_value(unsigned long value); +extern void write_SH_OSGD_HI_reg_value(unsigned long value); +extern unsigned long read_SH_OSCUTD_reg_value(void); +extern void write_SH_OSCUTD_reg_value(unsigned long value); +extern unsigned int read_SH_OSCUIR_reg_value(void); +extern void write_SH_OSCUIR_reg_value(unsigned int value); +extern unsigned long read_SH_OSR0_reg_value(void); +extern void write_SH_OSR0_reg_value(unsigned long value); + +static inline virt_ctrl_cu_t read_VIRT_CTRL_CU_reg(void) +{ + virt_ctrl_cu_t virt_ctrl; + + virt_ctrl.VIRT_CTRL_CU_reg = read_VIRT_CTRL_CU_reg_value(); + return virt_ctrl; +} +static inline void write_VIRT_CTRL_CU_reg(virt_ctrl_cu_t virt_ctrl) +{ + write_VIRT_CTRL_CU_reg_value(virt_ctrl.VIRT_CTRL_CU_reg); +} + +static inline e2k_psp_lo_t read_SH_PSP_LO_reg(void) +{ + e2k_psp_lo_t psp_lo; + + psp_lo.PSP_lo_half = read_SH_PSP_LO_reg_value(); + return psp_lo; +} +static inline e2k_psp_hi_t read_SH_PSP_HI_reg(void) +{ + e2k_psp_hi_t psp_hi; + + psp_hi.PSP_hi_half = read_SH_PSP_HI_reg_value(); + return psp_hi; +} +static inline void write_SH_PSP_LO_reg(e2k_psp_lo_t psp_lo) +{ + write_SH_PSP_LO_reg_value(psp_lo.PSP_lo_half); +} +static inline void write_SH_PSP_HI_reg(e2k_psp_hi_t psp_hi) +{ + write_SH_PSP_HI_reg_value(psp_hi.PSP_hi_half); +} + +static inline e2k_pcsp_lo_t read_SH_PCSP_LO_reg(void) +{ + e2k_pcsp_lo_t pcsp_lo; + + pcsp_lo.PCSP_lo_half = read_SH_PCSP_LO_reg_value(); + return pcsp_lo; +} +static inline e2k_pcsp_hi_t read_SH_PCSP_HI_reg(void) +{ + e2k_pcsp_hi_t pcsp_hi; + + pcsp_hi.PCSP_hi_half = read_SH_PCSP_HI_reg_value(); + return pcsp_hi; +} +static inline void write_SH_PCSP_LO_reg(e2k_pcsp_lo_t pcsp_lo) +{ + write_SH_PCSP_LO_reg_value(pcsp_lo.PCSP_lo_half); +} +static inline void write_SH_PCSP_HI_reg(e2k_pcsp_hi_t pcsp_hi) +{ + write_SH_PCSP_HI_reg_value(pcsp_hi.PCSP_hi_half); +} + +static inline e2k_psp_lo_t read_BU_PSP_LO_reg(void) +{ + e2k_psp_lo_t psp_lo; + + psp_lo.PSP_lo_half = read_BU_PSP_LO_reg_value(); + return psp_lo; +} +static inline e2k_psp_hi_t read_BU_PSP_HI_reg(void) +{ + e2k_psp_hi_t psp_hi; + + psp_hi.PSP_hi_half = read_BU_PSP_HI_reg_value(); + return psp_hi; +} +static inline void write_BU_PSP_LO_reg(e2k_psp_lo_t psp_lo) +{ + write_BU_PSP_LO_reg_value(psp_lo.PSP_lo_half); +} +static inline void write_BU_PSP_HI_reg(e2k_psp_hi_t psp_hi) +{ + write_BU_PSP_HI_reg_value(psp_hi.PSP_hi_half); +} + +static inline e2k_pcsp_lo_t read_BU_PCSP_LO_reg(void) +{ + e2k_pcsp_lo_t pcsp_lo; + + pcsp_lo.PCSP_lo_half = read_BU_PCSP_LO_reg_value(); + return pcsp_lo; +} +static inline e2k_pcsp_hi_t read_BU_PCSP_HI_reg(void) +{ + e2k_pcsp_hi_t pcsp_hi; + + pcsp_hi.PCSP_hi_half = read_BU_PCSP_HI_reg_value(); + return pcsp_hi; +} +static inline void write_BU_PCSP_LO_reg(e2k_pcsp_lo_t pcsp_lo) +{ + write_BU_PCSP_LO_reg_value(pcsp_lo.PCSP_lo_half); +} +static inline void write_BU_PCSP_HI_reg(e2k_pcsp_hi_t pcsp_hi) +{ + write_BU_PCSP_HI_reg_value(pcsp_hi.PCSP_hi_half); +} + +static inline e2k_oscud_lo_t read_SH_OSCUD_LO_reg(void) +{ + e2k_oscud_lo_t oscud_lo; + + oscud_lo.OSCUD_lo_half = read_SH_OSCUD_LO_reg_value(); + return oscud_lo; +} +static inline e2k_oscud_hi_t read_SH_OSCUD_HI_reg(void) +{ + e2k_oscud_hi_t oscud_hi; + + oscud_hi.OSCUD_hi_half = read_SH_OSCUD_HI_reg_value(); + return oscud_hi; +} +static inline void write_SH_OSCUD_LO_reg(e2k_oscud_lo_t oscud_lo) +{ + write_SH_OSCUD_LO_reg_value(oscud_lo.OSCUD_lo_half); +} +static inline void write_SH_OSCUD_HI_reg(e2k_oscud_hi_t oscud_hi) +{ + write_SH_OSCUD_HI_reg_value(oscud_hi.OSCUD_hi_half); +} + +static inline e2k_osgd_lo_t read_SH_OSGD_LO_reg(void) +{ + e2k_osgd_lo_t osgd_lo; + + osgd_lo.OSGD_lo_half = read_SH_OSGD_LO_reg_value(); + return osgd_lo; +} +static inline e2k_osgd_hi_t read_SH_OSGD_HI_reg(void) +{ + e2k_osgd_hi_t osgd_hi; + + osgd_hi.OSGD_hi_half = read_SH_OSGD_HI_reg_value(); + return osgd_hi; +} +static inline void write_SH_OSGD_LO_reg(e2k_osgd_lo_t osgd_lo) +{ + write_SH_OSGD_LO_reg_value(osgd_lo.OSGD_lo_half); +} +static inline void write_SH_OSGD_HI_reg(e2k_osgd_hi_t osgd_hi) +{ + write_SH_OSGD_HI_reg_value(osgd_hi.OSGD_hi_half); +} + +static inline e2k_cutd_t read_SH_OSCUTD_reg(void) +{ + e2k_cutd_t cutd; + + cutd.CUTD_reg = read_SH_OSCUTD_reg_value(); + return cutd; +} +static inline void write_SH_OSCUTD_reg(e2k_cutd_t cutd) +{ + write_SH_OSCUTD_reg_value(cutd.CUTD_reg); +} + +static inline e2k_cuir_t read_SH_OSCUIR_reg(void) +{ + e2k_cuir_t cuir; + + cuir.CUIR_reg = read_SH_OSCUIR_reg_value(); + return cuir; +} +static inline void write_SH_OSCUIR_reg(e2k_cuir_t cuir) +{ + write_SH_OSCUIR_reg_value(cuir.CUIR_reg); +} + +static inline e2k_core_mode_t read_SH_CORE_MODE_reg(void) +{ + e2k_core_mode_t core_mode; + + core_mode.CORE_MODE_reg = read_SH_CORE_MODE_reg_value(); + return core_mode; +} +static inline void write_SH_CORE_MODE_reg(e2k_core_mode_t core_mode) +{ + write_SH_CORE_MODE_reg_value(core_mode.CORE_MODE_reg); +} + +#define READ_G_PREEMPT_TMR_REG() \ + ((e2k_g_preempt_tmr_t) NATIVE_GET_SREG_CLOSED(g_preempt_tmr)) +#define WRITE_G_PREEMPT_TMR_REG(x) \ + NATIVE_SET_SREG_CLOSED_NOEXC(g_preempt_tmr, AW(x), 5) + +#define READ_INTC_PTR_CU() NATIVE_GET_DSREG_CLOSED(intc_ptr_cu) +#define READ_INTC_INFO_CU() NATIVE_GET_DSREG_CLOSED(intc_info_cu) +#define WRITE_INTC_INFO_CU(x) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(intc_info_cu, x, 5) + +static inline void save_intc_info_cu(intc_info_cu_t *info, int *num) +{ + u64 info_ptr, i = 0; + + /* + * The read of INTC_PTR will clear the hardware pointer, + * but the subsequent reads fo INTC_INFO will increase + * it again until it reaches the same value it had before. + */ + info_ptr = READ_INTC_PTR_CU(); + if (!info_ptr) { + *num = -1; + AW(info->header.lo) = 0; + AW(info->header.hi) = 0; + return; + } + + /* + * CU header should be cleared --- fg@mcst.ru + */ + AW(info->header.lo) = READ_INTC_INFO_CU(); + AW(info->header.hi) = READ_INTC_INFO_CU(); + READ_INTC_PTR_CU(); + WRITE_INTC_INFO_CU(0ULL); + WRITE_INTC_INFO_CU(0ULL); + info_ptr -= 2; + + /* + * Read intercepted events list + */ + for (; info_ptr > 0; info_ptr -= 2) { + AW(info->entry[i].lo) = READ_INTC_INFO_CU(); + info->entry[i].hi = READ_INTC_INFO_CU(); + info->entry[i].no_restore = false; + ++i; + }; + + *num = i; +} + +static inline void restore_intc_info_cu(const intc_info_cu_t *info, int num) +{ + int i; + + /* + * 1) Clear the hardware pointer + */ + READ_INTC_PTR_CU(); + if (num == -1) + return; + + /* + * 2) Write the registers + * + * CU header should be cleared --- fg@mcst.ru + */ + WRITE_INTC_INFO_CU(0ULL); + WRITE_INTC_INFO_CU(0ULL); + for (i = 0; i < num; i++) { + if (!info->entry[i].no_restore) { + WRITE_INTC_INFO_CU(AW(info->entry[i].lo)); + WRITE_INTC_INFO_CU(info->entry[i].hi); + } + } +} + +static inline void +kvm_reset_intc_info_cu_is_updated(struct kvm_vcpu *vcpu) +{ + vcpu->arch.intc_ctxt.cu_updated = false; +} +static inline void +kvm_set_intc_info_cu_is_updated(struct kvm_vcpu *vcpu) +{ + vcpu->arch.intc_ctxt.cu_updated = true; +} +static inline bool +kvm_get_intc_info_cu_is_updated(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.intc_ctxt.cu_updated; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_KVM_CPU_HV_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/kvm/cpu_hv_regs_types.h b/arch/e2k/include/asm/kvm/cpu_hv_regs_types.h new file mode 100644 index 0000000..4fbc21e --- /dev/null +++ b/arch/e2k/include/asm/kvm/cpu_hv_regs_types.h @@ -0,0 +1,418 @@ +#ifndef _E2K_KVM_CPU_HV_REGS_TYPES_H_ +#define _E2K_KVM_CPU_HV_REGS_TYPES_H_ + +#ifdef __KERNEL__ + +#include + +#ifndef __ASSEMBLY__ + +typedef union virt_ctrl_cu { + struct { + u64 evn_c : 16; /* [15: 0] */ + u64 exc_c : 8; /* [23:16] */ + u64 glnch : 2; /* [25:24] */ + u64 __pad1 : 38; /* [63:26] */ + }; + struct { + /* env_c: */ + u64 rr_idr : 1; /* [ 0] */ + u64 rr_clkr : 1; /* [ 1] */ + u64 rr_sclkr : 1; /* [ 2] */ + u64 rr_dbg : 1; /* [ 3] */ + u64 rw_core_mode : 1; /* [ 4] */ + u64 rw_clkr : 1; /* [ 5] */ + u64 rw_sclkr : 1; /* [ 6] */ + u64 rw_sclkm3 : 1; /* [ 7] */ + u64 rw_dbg : 1; /* [ 8] */ + u64 hcem : 1; /* [ 9] */ + u64 virt : 1; /* [10] */ + u64 stop : 1; /* [11] */ + u64 evn_c_res : 4; /* [15:12] */ + /* exc_c: */ + u64 exc_instr_debug : 1; /* [16] */ + u64 exc_data_debug : 1; /* [17] */ + u64 exc_instr_page : 1; /* [18] */ + u64 exc_data_page : 1; /* [19] */ + u64 exc_mova : 1; /* [20] */ + u64 exc_interrupt : 1; /* [21] */ + u64 exc_nm_interrupt : 1; /* [22] */ + u64 exc_c_res : 1; /* [23] */ + /* glnch: */ + u64 g_th : 1; /* [24] */ + u64 tir_fz : 1; /* [25] */ + u64 tir_rst : 1; /* [26] */ + u64 __resb : 37; /* [63:27] */ + }; + u64 word; /* as entire register */ +} virt_ctrl_cu_t; +#define VIRT_CTRL_CU_evn_c evn_c /* events mask to intercept */ +#define VIRT_CTRL_CU_rr_idr rr_idr +#define VIRT_CTRL_CU_rr_clkr rr_clkr +#define VIRT_CTRL_CU_rr_sclkr rr_sclkr +#define VIRT_CTRL_CU_rr_dbg rr_dbg +#define VIRT_CTRL_CU_rw_core_mode rw_core_mode +#define VIRT_CTRL_CU_rw_clkr rw_clkr +#define VIRT_CTRL_CU_rw_sclkr rw_sclkr +#define VIRT_CTRL_CU_rw_sclkm3 rw_sclkm3 +#define VIRT_CTRL_CU_rw_dbg rw_dbg +#define VIRT_CTRL_CU_hcem hcem +#define VIRT_CTRL_CU_virt virt +#define VIRT_CTRL_CU_stop stop +#define VIRT_CTRL_CU_exc_c exc_c /* exceptions mask */ + /* to intercept */ +#define VIRT_CTRL_CU_exc_instr_debug exc_instr_debug +#define VIRT_CTRL_CU_exc_data_debug exc_data_debug +#define VIRT_CTRL_CU_exc_instr_page exc_instr_page +#define VIRT_CTRL_CU_exc_data_page exc_data_page +#define VIRT_CTRL_CU_exc_mova exc_mova +#define VIRT_CTRL_CU_exc_interrupt exc_interrupt +#define VIRT_CTRL_CU_exc_nm_interrupt exc_nm_interrupt +#define VIRT_CTRL_CU_glnch glnch /* modes of guest launch */ + /* instruction execution */ +#define VIRT_CTRL_CU_glnch_g_th g_th +#define VIRT_CTRL_CU_glnch_tir_fz tir_fz +#define VIRT_CTRL_CU_tir_rst tir_rst /* mode of TIR registers */ + /* restore */ +#define VIRT_CTRL_CU_reg word /* [63: 0] - entire register */ + +/* Bits mask of VIRT_CTRL_CU fields and flags */ +#define VIRT_CTRL_CU_ENV_C_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_evn_c = -1, }.word) +#define VIRT_CTRL_CU_RR_IDR_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rr_idr = 1, }.word) +#define VIRT_CTRL_CU_RR_CLKR_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rr_clkr = 1, }.word) +#define VIRT_CTRL_CU_RR_SCLKR_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rr_sclkr = 1, }.word) +#define VIRT_CTRL_CU_RR_DBG_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rr_dbg = 1, }.word) +#define VIRT_CTRL_CU_RW_CORE_MODE_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_core_mode = 1, }.word) +#define VIRT_CTRL_CU_RW_CLKR_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_clkr = 1, }.word) +#define VIRT_CTRL_CU_RW_SCLKR_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_sclkr = 1, }.word) +#define VIRT_CTRL_CU_RW_SCLKM3_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_sclkm3 = 1, }.word) +#define VIRT_CTRL_CU_RW_DBG_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_rw_dbg = 1, }.word) +#define VIRT_CTRL_CU_HCEM_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_hcem = 1, }.word) +#define VIRT_CTRL_CU_VIRT_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_virt = 1, }.word) +#define VIRT_CTRL_CU_STOP_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_stop = 1, }.word) +#define VIRT_CTRL_CU_EXC_C_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_c = -1, }.word) +#define VIRT_CTRL_CU_EXC_INSTR_DEBUG_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_instr_debug = 1, }.word) +#define VIRT_CTRL_CU_EXC_DATA_DEBUG_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_data_debug = 1, }.word) +#define VIRT_CTRL_CU_EXC_INSTR_PAGE_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_instr_page = 1, }.word) +#define VIRT_CTRL_CU_EXC_DATA_PAGE_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_data_page = 1, }.word) +#define VIRT_CTRL_CU_EXC_MOVA_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_mova = 1, }.word) +#define VIRT_CTRL_CU_EXC_INTERRUPT_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_interrupt = 1, }.word) +#define VIRT_CTRL_CU_EXC_NM_INTERRUPT_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_exc_nm_interrupt = 1, }.word) +#define VIRT_CTRL_CU_GLNCH_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_glnch = -1, }.word) +#define VIRT_CTRL_CU_GLNCH_G_TH_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_glnch_g_th = 1, }.word) +#define VIRT_CTRL_CU_GLNCH_TIR_FZ_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_glnch_tir_fz = 1, }.word) +#define VIRT_CTRL_CU_TIR_RST_MASK \ + ((virt_ctrl_cu_t) { .VIRT_CTRL_CU_tir_rst = 1, }.word) + +#endif /* ! __ASSEMBLY__ */ + +#define INTC_CU_COND_EVENT_NO 0 +#define INTC_CU_COND_EVENT_MAX 16 +#define INTC_CU_COND_EXC_NO (INTC_CU_COND_EVENT_NO + \ + INTC_CU_COND_EVENT_MAX) +#define INTC_CU_COND_EXC_MAX 8 +#define INTC_CU_UNCOND_EVENT_NO (INTC_CU_COND_EXC_NO + \ + INTC_CU_COND_EXC_MAX) +#define INTC_CU_UNCOND_EVENT_MAX 8 +#define INTC_CU_EVENTS_NUM_MAX (INTC_CU_COND_EVENT_MAX + \ + INTC_CU_COND_EXC_MAX + \ + INTC_CU_UNCOND_EVENT_MAX) +typedef union { + struct { + u64 evn_c : INTC_CU_COND_EVENT_MAX; + u64 exc_c : INTC_CU_COND_EXC_MAX; + u64 evn_u : INTC_CU_UNCOND_EVENT_MAX; + u64 hi_half : 32; + }; + struct { + /* evn_c fields */ + u64 rr_idr : 1; + u64 rr_clkr : 1; + u64 rr_sclkr : 1; + u64 rr_dbg : 1; + u64 rw_core_mode : 1; + u64 rw_clkr : 1; + u64 rw_sclkr : 1; + u64 rw_sclkm3 : 1; + u64 rw_dbg : 1; + u64 hcem : 1; + u64 virt : 1; + u64 stop : 1; + u64 hret_last_wish : 1; + u64 __reserved_evn_c : 3; + /* exc_c fields */ + u64 exc_instr_debug : 1; + u64 exc_data_debug : 1; + u64 exc_instr_page : 1; + u64 exc_data_page : 1; + u64 exc_mova : 1; + u64 exc_interrupt : 1; + u64 exc_nm_interrupt : 1; + u64 __reserved_exc_c : 1; + /* evn_u fields */ + u64 hv_int : 1; + u64 hv_nm_int : 1; + u64 g_tmr : 1; + u64 rr : 1; + u64 rw : 1; + u64 exc_mem_error : 1; + u64 wait_trap : 1; + u64 dbg : 1; + /* high half of hdr_lo */ + u64 tir_fz : 1; + u64 __reserved : 31; + }; + u64 word; +} intc_info_cu_hdr_lo_t; + +/* evn_c fields bit # */ +#define INTC_CU_RR_IDR_NO 0 +#define INTC_CU_RR_CLKR_NO 1 +#define INTC_CU_RR_SCLKR_NO 2 +#define INTC_CU_RR_DBG_NO 3 +#define INTC_CU_RW_CORE_MODE_NO 4 +#define INTC_CU_RW_CLKR_NO 5 +#define INTC_CU_RW_SCLKR_NO 6 +#define INTC_CU_RW_SCLKM3_NO 7 +#define INTC_CU_RW_DBG_NO 8 +#define INTC_CU_HCEM_NO 9 +#define INTC_CU_VIRT_NO 10 +#define INTC_CU_STOP_NO 11 +#define INTC_CU_HRET_LAST_WISH_NO 12 + +/* INTC_INFO_CU.evn_c fields mask */ +#define intc_cu_evn_c_rr_idr_mask (1UL << INTC_CU_RR_IDR_NO) +#define intc_cu_evn_c_rr_clkr_mask (1UL << INTC_CU_RR_CLKR_NO) +#define intc_cu_evn_c_rr_sclkr_mask (1UL << INTC_CU_RR_SCLKR_NO) +#define intc_cu_evn_c_rr_dbg_mask (1UL << INTC_CU_RR_DBG_NO) +#define intc_cu_evn_c_rw_core_mode_mask (1UL << INTC_CU_RW_CORE_MODE_NO) +#define intc_cu_evn_c_rw_clkr_mask (1UL << INTC_CU_RW_CLKR_NO) +#define intc_cu_evn_c_rw_sclkr_mask (1UL << INTC_CU_RW_SCLKR_NO) +#define intc_cu_evn_c_rw_sclkm3_mask (1UL << INTC_CU_RW_SCLKM3_NO) +#define intc_cu_evn_c_rw_dbg_mask (1UL << INTC_CU_RW_DBG_NO) +#define intc_cu_evn_c_hcem_mask (1UL << INTC_CU_HCEM_NO) +#define intc_cu_evn_c_virt_mask (1UL << INTC_CU_VIRT_NO) +#define intc_cu_evn_c_stop_mask (1UL << INTC_CU_STOP_NO) +#define intc_cu_evn_c_hret_last_wish_mask (1UL << INTC_CU_HRET_LAST_WISH_NO) + +/* common mask of all 'read registers' interceptions */ +#define intc_cu_evn_c_rr_mask (intc_cu_evn_c_rr_idr_mask | \ + intc_cu_evn_c_rr_clkr_mask | \ + intc_cu_evn_c_rr_sclkr_mask | \ + intc_cu_evn_c_rr_dbg_mask) +/* common mask of all 'write registers' interceptions */ +#define intc_cu_evn_c_rw_mask (intc_cu_evn_c_rw_core_mode_mask | \ + intc_cu_evn_c_rw_clkr_mask | \ + intc_cu_evn_c_rw_sclkr_mask | \ + intc_cu_evn_c_rw_sclkm3_mask | \ + intc_cu_evn_c_rw_dbg_mask) +/* INTC_INFO_CU.hdr.evn_c fields mask */ +#define intc_cu_hdr_lo_rr_idr_mask \ + (intc_cu_evn_c_rr_idr_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rr_clkr_mask \ + (intc_cu_evn_c_rr_clkr_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rr_sclkr_mask \ + (intc_cu_evn_c_rr_sclkr_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rr_dbg_mask \ + (intc_cu_evn_c_rr_dbg_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rw_core_mode_mask \ + (intc_cu_evn_c_rw_core_mode_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rw_clkr_mask \ + (intc_cu_evn_c_rw_clkr_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rw_sclkr_mask \ + (intc_cu_evn_c_rw_sclkr_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rw_sclkm3_mask \ + (intc_cu_evn_c_rw_sclkm3_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_rw_dbg_mask \ + (intc_cu_evn_c_rw_dbg_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_hcem_mask \ + (intc_cu_evn_c_hcem_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_virt_mask \ + (intc_cu_evn_c_virt_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_stop_mask \ + (intc_cu_evn_c_stop_mask << INTC_CU_COND_EVENT_NO) +#define intc_cu_hdr_lo_hret_last_wish_mask \ + (intc_cu_evn_c_hret_last_wish_mask << INTC_CU_COND_EVENT_NO) + +/* common mask of all 'read registers' interceptions */ +#define intc_cu_hrd_lo_rr_mask (intc_cu_hdr_lo_rr_idr_mask | \ + intc_cu_hdr_lo_rr_clkr_mask | \ + intc_cu_hdr_lo_rr_sclkr_mask | \ + intc_cu_hdr_lo_rr_dbg_mask) +/* common mask of all 'write registers' interceptions */ +#define intc_cu_hrd_lo_rw_mask (intc_cu_hdr_lo_rw_core_mode_mask | \ + intc_cu_hdr_lo_rw_clkr_mask | \ + intc_cu_hdr_lo_rw_sclkr_mask | \ + intc_cu_hdr_lo_rw_sclkm3_mask | \ + intc_cu_hdr_lo_rw_dbg_mask) + +/* exc_c fields bit # */ +#define INTC_CU_EXC_INSTR_DEBUG_NO 0 +#define INTC_CU_EXC_DATA_DEBUG_NO 1 +#define INTC_CU_EXC_INSTR_PAGE_NO 2 +#define INTC_CU_EXC_DATA_PAGE_NO 3 +#define INTC_CU_EXC_MOVA_NO 4 +#define INTC_CU_EXC_INTERRUPT_NO 5 +#define INTC_CU_EXC_NM_INTERRUPT_NO 6 +/* exc_c fields mask */ +#define intc_cu_exc_c_exc_instr_debug_mask \ + (1UL << INTC_CU_EXC_INSTR_DEBUG_NO) +#define intc_cu_exc_c_exc_data_debug_mask \ + (1UL << INTC_CU_EXC_DATA_DEBUG_NO) +#define intc_cu_exc_c_exc_instr_page_mask \ + (1UL << INTC_CU_EXC_INSTR_PAGE_NO) +#define intc_cu_exc_c_exc_data_page_mask \ + (1UL << INTC_CU_EXC_DATA_PAGE_NO) +#define intc_cu_exc_c_exc_mova_mask \ + (1UL << INTC_CU_EXC_MOVA_NO) +#define intc_cu_exc_c_exc_interrupt_mask \ + (1UL << INTC_CU_EXC_INTERRUPT_NO) +#define intc_cu_exc_c_exc_nm_interrupt_mask \ + (1UL << INTC_CU_EXC_NM_INTERRUPT_NO) +/* INTC_INFO_CU.exc_c fields mask */ +#define intc_cu_hdr_lo_exc_instr_debug_mask \ + (intc_cu_exc_c_exc_instr_debug_mask << INTC_CU_COND_EXC_NO) +#define intc_cu_hdr_lo_exc_data_debug_mask \ + (intc_cu_exc_c_exc_data_debug_mask << INTC_CU_COND_EXC_NO) +#define intc_cu_hdr_lo_exc_instr_page_mask \ + (intc_cu_exc_c_exc_instr_page_mask << INTC_CU_COND_EXC_NO) +#define intc_cu_hdr_lo_exc_data_page_mask \ + (intc_cu_exc_c_exc_data_page_mask << INTC_CU_COND_EXC_NO) +#define intc_cu_hdr_lo_exc_mova_mask \ + (intc_cu_exc_c_exc_mova_mask << INTC_CU_COND_EXC_NO) +#define intc_cu_hdr_lo_exc_interrupt_mask \ + (intc_cu_exc_c_exc_interrupt_mask << INTC_CU_COND_EXC_NO) +#define intc_cu_hdr_lo_exc_nm_interrupt_mask \ + (intc_cu_exc_c_exc_nm_interrupt_mask << INTC_CU_COND_EXC_NO) + +/* evn_u fields bit # */ +#define INTC_CU_HV_INT_NO 0 +#define INTC_CU_HV_NM_INT_NO 1 +#define INTC_CU_G_TMR_NO 2 +#define INTC_CU_RR_NO 3 +#define INTC_CU_RW_NO 4 +#define INTC_CU_EXC_MEM_ERROR_NO 5 +#define INTC_CU_WAIT_TRAP_NO 6 +#define INTC_CU_DBG_NO 7 +/* evn_u fields mask */ +#define intc_cu_evn_u_hv_int_mask (1UL << INTC_CU_HV_INT_NO) +#define intc_cu_evn_u_hv_nm_int_mask (1UL << INTC_CU_HV_NM_INT_NO) +#define intc_cu_evn_u_g_tmr_mask (1UL << INTC_CU_G_TMR_NO) +#define intc_cu_evn_u_rr_mask (1UL << INTC_CU_RR_NO) +#define intc_cu_evn_u_rw_mask (1UL << INTC_CU_RW_NO) +#define intc_cu_evn_u_exc_mem_error_mask \ + (1UL << INTC_CU_EXC_MEM_ERROR_NO) +#define intc_cu_evn_u_wait_trap_mask (1UL << INTC_CU_WAIT_TRAP_NO) +#define intc_cu_evn_u_dbg_mask (1UL << INTC_CU_DBG_NO) +/* INT_INFO_CU.evn_u fields mask */ +#define intc_cu_hdr_lo_hv_int_mask \ + (intc_cu_evn_u_hv_int_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_hv_nm_int_mask \ + (intc_cu_evn_u_hv_nm_int_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_g_tmr_mask \ + (intc_cu_evn_u_g_tmr_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_rr_mask \ + (intc_cu_evn_u_rr_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_rw_mask \ + (intc_cu_evn_u_rw_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_exc_mem_error_mask \ + (intc_cu_evn_u_exc_mem_error_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_wait_trap_mask \ + (intc_cu_evn_u_wait_trap_mask << INTC_CU_UNCOND_EVENT_NO) +#define intc_cu_hdr_lo_dbg_mask \ + (intc_cu_evn_u_dbg_mask << INTC_CU_UNCOND_EVENT_NO) + +#if (1UL << INTC_CU_EVENTS_NUM_MAX) < intc_cu_hdr_lo_dbg_mask +#error "INTC_CU_EVENTS_NUM_MAX value is out of real events number" +#endif + +#define INTC_CU_TIR_FZ_NO 32 +#define intc_cu_hdr_lo_tir_fz_mask (1UL << INTC_CU_TIR_FZ_NO) + +typedef union { + u64 word; +} intc_info_cu_hdr_hi_t; + +typedef struct e2k_intc_info_cu_hdr { + intc_info_cu_hdr_lo_t lo; + intc_info_cu_hdr_hi_t hi; +} intc_info_cu_hdr_t; + +typedef union { + struct { + u64 event_code : 8; + u64 ch_code : 4; + u64 reg_num : 8; + u64 dst : 8; + u64 vm_dst : 3; + u64 __reserved : 33; + }; + u64 word; +} intc_info_cu_entry_lo_t; + +#define intc_cu_info_lo_get_event_code(x) ((x) & 0xff) + +/* Possible values for `INTC_INFO_CU[2 * j].event_code' */ +typedef enum info_cu_event_code { + ICE_FORCED = 0, + ICE_READ_CU = 1, + ICE_WRITE_CU = 2, + ICE_MASKED_HCALL = 3, + ICE_GLAUNCH = 4, + ICE_HRET = 5, +} info_cu_event_code_t; + +typedef u64 intc_info_cu_entry_hi_t; + +typedef struct e2k_intc_info_cu_entry { + intc_info_cu_entry_lo_t lo; + intc_info_cu_entry_hi_t hi; + bool no_restore; +} intc_info_cu_entry_t; + +#define INTC_INFO_CU_MAX 6 +#define INTC_INFO_CU_HDR_MAX 2 +#define INTC_INFO_CU_ENTRY_MAX (INTC_INFO_CU_MAX - INTC_INFO_CU_HDR_MAX) +#define INTC_INFO_CU_PAIRS_MAX (INTC_INFO_CU_ENTRY_MAX / 2) + +typedef struct { + intc_info_cu_hdr_t header; + intc_info_cu_entry_t entry[INTC_INFO_CU_PAIRS_MAX]; +} intc_info_cu_t; + +typedef union { + struct { + u64 tmr : 32; + u64 v : 1; + u64 __reserved : 31; + }; + u64 word; +} g_preempt_tmr_t; + +#endif /* __KERNEL__ */ + +#endif /* _E2K_KVM_CPU_HV_REGS_TYPES_H_ */ diff --git a/arch/e2k/include/asm/kvm/cpu_regs_access.h b/arch/e2k/include/asm/kvm/cpu_regs_access.h new file mode 100644 index 0000000..1062baf --- /dev/null +++ b/arch/e2k/include/asm/kvm/cpu_regs_access.h @@ -0,0 +1,1771 @@ +#ifndef _E2K_KVM_CPU_REGS_ACCESS_H_ +#define _E2K_KVM_CPU_REGS_ACCESS_H_ + +#ifdef __KERNEL__ + +#include +#include + +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include + +/* + * Basic macroses to access to virtual CPUs registers status on guest. + */ +#define GUEST_CPU_REGS_STATUS (offsetof(kvm_vcpu_state_t, cpu) + \ + offsetof(kvm_cpu_state_t, regs_status)) +#define KVM_GET_CPU_REGS_STATUS() \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_REGS_STATUS) +#define KVM_PUT_CPU_REGS_STATUS(status) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_CPU_REGS_STATUS, status) +#define KVM_RESET_CPU_REGS_STATUS() \ + KVM_SET_CPU_REGS_STATUS(0) +#define KVM_PUT_UPDATED_CPU_REGS_FLAGS(flags) \ +({ \ + unsigned long regs_status = KVM_GET_CPU_REGS_STATUS(); \ + regs_status = KVM_SET_UPDATED_CPU_REGS_FLAGS(regs_status, flags); \ + KVM_PUT_CPU_REGS_STATUS(regs_status); \ +}) +#define KVM_RESET_UPDATED_CPU_REGS_FLAGS(flags) \ +({ \ + unsigned long regs_status = KVM_GET_CPU_REGS_STATUS(); \ + regs_status = KVM_INIT_UPDATED_CPU_REGS_FLAGS(regs_status); \ + KVM_PUT_CPU_REGS_STATUS(regs_status); \ +}) + +/* + * Basic functions accessing virtual CPUs registers on guest. + */ +#define GUEST_CPU_SREGS_BASE (offsetof(kvm_vcpu_state_t, cpu) + \ + offsetof(kvm_cpu_state_t, regs)) +#define GUEST_CPU_SREG(reg_name) (GUEST_CPU_SREGS_BASE + \ + (offsetof(kvm_cpu_regs_t, CPU_##reg_name))) +#define GUEST_CPU_TIR_lo(TIR_no) (GUEST_CPU_SREGS_BASE + \ + (offsetof(kvm_cpu_regs_t, CPU_TIRs)) + \ + (sizeof(e2k_tir_t) * TIR_no) + \ + (offsetof(e2k_tir_t, TIR_lo))) +#define GUEST_CPU_TIR_hi(TIR_no) (GUEST_CPU_SREGS_BASE + \ + (offsetof(kvm_cpu_regs_t, CPU_TIRs)) + \ + (sizeof(e2k_tir_t) * TIR_no) + \ + (offsetof(e2k_tir_t, TIR_hi))) +#define GUEST_GET_CPU_SREG(reg_name) \ + E2K_LOAD_GUEST_VCPU_STATE_W(GUEST_CPU_SREG(reg_name)) +#define GUEST_GET_CPU_DSREG(reg_name) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_SREG(reg_name)) +#define GUEST_SET_CPU_SREG(reg_name, value) \ + E2K_STORE_GUEST_VCPU_STATE_W(GUEST_CPU_SREG(reg_name), value) +#define GUEST_SET_CPU_DSREG(reg_name, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_CPU_SREG(reg_name), value) +#define GUEST_GET_CPU_TIR_lo(TIR_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_TIR_lo(TIR_no)) +#define GUEST_GET_CPU_TIR_hi(TIR_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_CPU_TIR_hi(TIR_no)) +#define GUEST_IRQS_UNDER_UPSR() \ + offsetof(kvm_vcpu_state_t, irqs_under_upsr) + +/* + * Read virtual VCPU register with VCPU # + */ +#define KVM_READ_VCPU_ID() ((u32)GUEST_GET_CPU_SREG(VCPU_ID)) + +/* + * Read/write word Procedure Stack Harware Top Pointer (PSHTP) + */ +#define KVM_READ_PSHTP_REG_VALUE() GUEST_GET_CPU_DSREG(PSHTP) +#define KVM_COPY_WRITE_PSHTP_REG_VALUE(PSHTP_value) \ + GUEST_SET_CPU_DSREG(PSHTP, PSHTP_value) +#define KVM_WRITE_PSHTP_REG_VALUE(PSHTP_value) \ +({ \ + KVM_COPY_WRITE_PSHTP_REG_VALUE(PSHTP_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_PSHTP_REG_VALUE(PSHTP_value); \ +}) + +#define KVM_NV_READ_PSHTP_REG_VALUE KVM_READ_PSHTP_REG_VALUE +#define KVM_READ_PSHTP_REG() \ +({ \ + e2k_pshtp_t PSHTP; \ + PSHTP.PSHTP_reg = KVM_READ_PSHTP_REG_VALUE(); \ + PSHTP; \ +}) + +/* + * Read/write word Procedure Chain Stack Hardware Top Pointer (PCSHTP) + */ +#define KVM_READ_PCSHTP_REG_SVALUE() \ + (((e2k_pcshtp_t)GUEST_GET_CPU_SREG(PCSHTP) << \ + (32 - E2K_PCSHTP_SIZE)) >> \ + (32 - E2K_PCSHTP_SIZE)) +#define KVM_COPY_WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) \ + GUEST_SET_CPU_DSREG(PCSHTP, PCSHTP_svalue) +#define KVM_WRITE_PCSHTP_REG_SVALUE(PCSHTP_value) \ +({ \ + KVM_COPY_WRITE_PCSHTP_REG_SVALUE(PCSHTP_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_PCSHTP_REG_SVALUE(PCSHTP_value); \ +}) + +#define KVM_READ_PCSHTP_REG() \ +({ \ + e2k_pcshtp_t PCSHTP; \ + PCSHTP = KVM_READ_PCSHTP_REG_SVALUE(); \ + PCSHTP; \ +}) + +/* + * Read/write low/high double-word OS Compilation Unit Descriptor (OSCUD) + */ + +#define KVM_READ_OSCUD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(OSCUD_lo) +#define KVM_READ_OSCUD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(OSCUD_hi) +#define BOOT_KVM_READ_OSCUD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(OSCUD_lo) +#define BOOT_KVM_READ_OSCUD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(OSCUD_hi) + +#define KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + GUEST_SET_CPU_DSREG(OSCUD_lo, OSCUD_lo_value) +#define KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + GUEST_SET_CPU_DSREG(OSCUD_hi, OSCUD_hi_value); +#define KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ +({ \ + KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ +({ \ + KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ +}) + +#define BOOT_KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + GUEST_SET_CPU_DSREG(OSCUD_lo, OSCUD_lo_value) +#define BOOT_KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + GUEST_SET_CPU_DSREG(OSCUD_hi, OSCUD_hi_value); +#define BOOT_KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ +({ \ + BOOT_KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define BOOT_KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ +({ \ + BOOT_KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ +}) + +#define KVM_COPY_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define KVM_COPYWRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define KVM_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define KVM_WRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define KVM_WRITE_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define KVM_WRITE_OSCUD_REG(OSCUD_hi, OSCUD_lo) \ +({ \ + KVM_WRITE_OSCUD_REG_VALUE(OSCUD_hi.OSCUD_hi_half, \ + OSCUD_lo.OSCUD_lo_half); \ +}) + +#define BOOT_KVM_COPY_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + BOOT_KVM_COPY_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define BOOT_KVM_COPYWRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + BOOT_KVM_COPY_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define BOOT_KVM_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + BOOT_KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define BOOT_KVM_WRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + BOOT_KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) + +/* + * Read/write low/hgh double-word OS Globals Register (OSGD) + */ + +#define KVM_READ_OSGD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(OSGD_lo) +#define KVM_READ_OSGD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(OSGD_hi) +#define BOOT_KVM_READ_OSGD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(OSGD_lo) +#define BOOT_KVM_READ_OSGD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(OSGD_hi) + +#define KVM_COPY_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + GUEST_SET_CPU_DSREG(OSGD_lo, OSGD_lo_value) +#define KVM_COPY_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + GUEST_SET_CPU_DSREG(OSGD_hi, OSGD_hi_value) +#define BOOT_KVM_COPY_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + GUEST_SET_CPU_DSREG(OSGD_lo, OSGD_lo_value) +#define BOOT_KVM_COPY_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + GUEST_SET_CPU_DSREG(OSGD_hi, OSGD_hi_value) + +#define KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ +({ \ + KVM_COPY_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ +}) +#define KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ +({ \ + KVM_COPY_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ +}) +#define BOOT_KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ +({ \ + BOOT_KVM_COPY_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ +}) +#define BOOT_KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ +({ \ + BOOT_KVM_COPY_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ +}) + +#define KVM_COPY_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + KVM_COPY_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define KVM_COPY_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + KVM_COPY_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) +#define KVM_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define KVM_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) + +#define BOOT_KVM_COPY_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + BOOT_KVM_COPY_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define BOOT_KVM_COPY_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + BOOT_KVM_COPY_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) +#define BOOT_KVM_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + BOOT_KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define BOOT_KVM_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + BOOT_KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) + +/* + * Read/write low/high double-word Compilation Unit Register (CUD) + */ + +#define KVM_READ_CUD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(CUD_lo) +#define KVM_READ_CUD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(CUD_hi) +#define BOOT_KVM_READ_CUD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(CUD_lo) +#define BOOT_KVM_READ_CUD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(CUD_hi) + +#define KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ +({ \ + GUEST_SET_CPU_DSREG(CUD_lo, CUD_lo_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value); \ +}) +#define KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ +({ \ + GUEST_SET_CPU_DSREG(CUD_hi, CUD_hi_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value); \ +}) + +#define BOOT_KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ +({ \ + GUEST_SET_CPU_DSREG(CUD_lo, CUD_lo_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value); \ +}) +#define BOOT_KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ +({ \ + GUEST_SET_CPU_DSREG(CUD_hi, CUD_hi_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value); \ +}) + +#define KVM_WRITE_CUD_LO_REG(CUD_lo) \ +({ \ + KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo.CUD_lo_half); \ +}) +#define KVM_WRITE_CUD_HI_REG(CUD_hi) \ +({ \ + KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi.CUD_hi_half); \ +}) +#define KVM_WRITE_CUD_REG_VALUE(CUD_hi_value, CUD_lo_value) \ +({ \ + KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi_value); \ + KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo_value); \ +}) +#define KVM_WRITE_CUD_REG(CUD_hi, CUD_lo) \ +({ \ + KVM_WRITE_CUD_REG_VALUE(CUD_hi.CUD_hi_half, CUD_lo.CUD_lo_half); \ +}) + +#define BOOT_KVM_WRITE_CUD_LO_REG(CUD_lo) \ +({ \ + BOOT_KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo.CUD_lo_half); \ +}) +#define BOOT_KVM_WRITE_CUD_HI_REG(CUD_hi) \ +({ \ + BOOT_KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi.CUD_hi_half); \ +}) + +/* + * Read/write low/high double-word Globals Register (GD) + */ + +#define KVM_READ_GD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(GD_lo) +#define KVM_READ_GD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(GD_hi) +#define BOOT_KVM_READ_GD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(GD_lo) +#define BOOT_KVM_READ_GD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(GD_hi) + +#define KVM_WRITE_GD_LO_REG_VALUE(GD_lo_value) \ +({ \ + GUEST_SET_CPU_DSREG(GD_lo, GD_lo_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value); \ +}) +#define KVM_WRITE_GD_HI_REG_VALUE(GD_hi_value) \ +({ \ + GUEST_SET_CPU_DSREG(GD_hi, GD_hi_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value); \ +}) + +#define BOOT_KVM_WRITE_GD_LO_REG_VALUE(GD_lo_value) \ +({ \ + GUEST_SET_CPU_DSREG(GD_lo, GD_lo_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value); \ +}) +#define BOOT_KVM_WRITE_GD_HI_REG_VALUE(GD_hi_value) \ +({ \ + GUEST_SET_CPU_DSREG(GD_hi, GD_hi_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value); \ +}) +#define KVM_WRITE_GD_LO_REG(GD_lo) \ +({ \ + KVM_WRITE_GD_LO_REG_VALUE(GD_lo.GD_lo_half); \ +}) +#define KVM_WRITE_GD_HI_REG(GD_hi) \ +({ \ + KVM_WRITE_GD_HI_REG_VALUE(GD_hi.GD_hi_half); \ +}) +#define BOOT_KVM_WRITE_GD_LO_REG(GD_lo) \ +({ \ + BOOT_KVM_WRITE_GD_LO_REG_VALUE(GD_lo.GD_lo_half); \ +}) +#define BOOT_KVM_WRITE_GD_HI_REG(GD_hi) \ +({ \ + BOOT_KVM_WRITE_GD_HI_REG_VALUE(GD_hi.GD_hi_half); \ +}) + +/* + * Read/write double-word Compilation Unit Table Register (CUTD/OSCUTD) + */ +#define KVM_READ_CUTD_REG_VALUE() GUEST_GET_CPU_DSREG(CUTD) +#define KVM_READ_OSCUTD_REG_VALUE() GUEST_GET_CPU_DSREG(OSCUTD) +#define BOOT_KVM_READ_CUTD_REG_VALUE() GUEST_GET_CPU_DSREG(CUTD) +#define BOOT_KVM_READ_OSCUTD_REG_VALUE() GUEST_GET_CPU_DSREG(OSCUTD) + +#define KVM_WRITE_CUTD_REG_VALUE(CUTD_value) \ +({ \ + GUEST_SET_CPU_DSREG(CUTD, CUTD_value); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD_value); \ +}) +#define KVM_COPY_WRITE_OSCUTD_REG_VALUE(CUTD_value) \ + GUEST_SET_CPU_DSREG(OSCUTD, CUTD_value) +#define KVM_WRITE_OSCUTD_REG_VALUE(CUTD_value) \ +({ \ + KVM_COPY_WRITE_OSCUTD_REG_VALUE(CUTD_value); \ + if (IS_HV_GM()) \ + native_write_OSCUTD_reg_value(CUTD_value); \ +}) +#define BOOT_KVM_WRITE_CUTD_REG_VALUE(CUTD_value) \ +({ \ + GUEST_SET_CPU_DSREG(CUTD, CUTD_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD_value); \ +}) +#define BOOT_KVM_COPY_WRITE_OSCUTD_REG_VALUE(CUTD_value) \ + GUEST_SET_CPU_DSREG(OSCUTD, CUTD_value) +#define BOOT_KVM_WRITE_OSCUTD_REG_VALUE(CUTD_value) \ +({ \ + BOOT_KVM_COPY_WRITE_OSCUTD_REG_VALUE(CUTD_value); \ + if (IS_HV_GM()) \ + boot_native_write_OSCUTD_reg_value(CUTD_value); \ +}) + +#define KVM_WRITE_CUTD_REG(CUTD) \ + KVM_WRITE_CUTD_REG_VALUE(CUTD.CUTD_reg) +#define KVM_COPY_WRITE_OSCUTD_REG(CUTD) \ + KVM_COPY_WRITE_OSCUTD_REG_VALUE(CUTD.CUTD_reg) +#define KVM_WRITE_OSCUTD_REG(CUTD) \ + KVM_WRITE_OSCUTD_REG_VALUE(CUTD.CUTD_reg) +#define BOOT_KVM_WRITE_CUTD_REG(CUTD) \ + BOOT_KVM_WRITE_CUTD_REG_VALUE(CUTD.CUTD_reg) +#define BOOT_KVM_COPY_WRITE_OSCUTD_REG(CUTD) \ + BOOT_KVM_COPY_WRITE_OSCUTD_REG_VALUE(CUTD.CUTD_reg) +#define BOOT_KVM_WRITE_OSCUTD_REG(CUTD) \ + BOOT_KVM_WRITE_OSCUTD_REG_VALUE(CUTD.CUTD_reg) + +/* + * Read word Compilation Unit Index Register (CUIR/OSCUIR) + */ +#define KVM_READ_CUIR_REG_VALUE() GUEST_GET_CPU_SREG(CUIR) +#define KVM_READ_OSCUIR_REG_VALUE() GUEST_GET_CPU_SREG(OSCUIR) +#define BOOT_KVM_READ_CUIR_REG_VALUE() GUEST_GET_CPU_SREG(CUIR) +#define BOOT_KVM_READ_OSCUIR_REG_VALUE() GUEST_GET_CPU_SREG(OSCUIR) + +#define KVM_COPY_WRITE_OSCUIR_REG_VALUE(v) GUEST_SET_CPU_SREG(OSCUIR, (v)) +#define KVM_WRITE_OSCUIR_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_OSCUIR_REG_VALUE(v); \ + if (IS_HV_GM()) \ + native_write_OSCUIR_reg_value((v)); \ +}) +#define BOOT_KVM_COPY_WRITE_OSCUIR_REG_VALUE(v) GUEST_SET_CPU_SREG(OSCUIR, (v)) +#define BOOT_KVM_WRITE_OSCUIR_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_OSCUIR_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + boot_native_write_OSCUIR_reg_value((v)); \ +}) + +/* + * Read/write low/high quad-word Procedure Stack Pointer Register (PSP) + */ + +#define KVM_READ_PSP_LO_REG_VALUE() GUEST_GET_CPU_DSREG(PSP_lo) +#define KVM_READ_PSP_HI_REG_VALUE() GUEST_GET_CPU_DSREG(PSP_hi) +#define KVM_COPY_WRITE_PSP_LO_REG_VALUE(v) GUEST_SET_CPU_DSREG(PSP_lo, v) +#define KVM_COPY_WRITE_PSP_HI_REG_VALUE(v) GUEST_SET_CPU_DSREG(PSP_hi, v) +#define KVM_WRITE_PSP_LO_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_PSP_LO_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_WRITE_PSP_LO_REG_VALUE((v)); \ +}) +#define KVM_WRITE_PSP_HI_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_PSP_HI_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(v); \ +}) + +#define KVM_NV_READ_PSP_LO_REG_VALUE KVM_READ_PSP_LO_REG_VALUE +#define KVM_NV_READ_PSP_HI_REG_VALUE KVM_READ_PSP_HI_REG_VALUE +#define KVM_READ_PSP_LO_REG() \ +({ \ + e2k_psp_lo_t PSP_lo; \ + PSP_lo.PSP_lo_half = KVM_READ_PSP_LO_REG_VALUE(); \ + PSP_lo; \ +}) +#define KVM_READ_PSP_HI_REG() \ +({ \ + e2k_psp_hi_t PSP_hi; \ + PSP_hi.PSP_hi_half = KVM_READ_PSP_HI_REG_VALUE(); \ + PSP_hi; \ +}) + +#define KVM_NV_WRITE_PSP_REG_VALUE KVM_WRITE_PSP_REG_VALUE +#define KVM_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ +({ \ + KVM_WRITE_PSP_HI_REG_VALUE(PSP_hi_value); \ + KVM_WRITE_PSP_LO_REG_VALUE(PSP_lo_value); \ +}) +#define KVM_WRITE_PSP_REG(PSP_hi, PSP_lo) \ +({ \ + KVM_WRITE_PSP_REG_VALUE(PSP_hi.PSP_hi_half, PSP_lo.PSP_lo_half); \ +}) + +#define BOOT_KVM_READ_PSP_LO_REG_VALUE() GUEST_GET_CPU_DSREG(PSP_lo) +#define BOOT_KVM_READ_PSP_HI_REG_VALUE() GUEST_GET_CPU_DSREG(PSP_hi) +#define BOOT_KVM_COPY_WRITE_PSP_LO_REG_VALUE(v) GUEST_SET_CPU_DSREG(PSP_lo, v) +#define BOOT_KVM_COPY_WRITE_PSP_HI_REG_VALUE(v) GUEST_SET_CPU_DSREG(PSP_hi, v) +#define BOOT_KVM_WRITE_PSP_LO_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_PSP_LO_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_WRITE_PSP_LO_REG_VALUE((v)); \ +}) +#define BOOT_KVM_WRITE_PSP_HI_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_PSP_HI_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(v); \ +}) +#define BOOT_KVM_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ +({ \ + BOOT_KVM_WRITE_PSP_HI_REG_VALUE(PSP_hi_value); \ + BOOT_KVM_WRITE_PSP_LO_REG_VALUE(PSP_lo_value); \ +}) +#define BOOT_KVM_WRITE_PSP_REG(PSP_hi, PSP_lo) \ +({ \ + BOOT_KVM_WRITE_PSP_REG_VALUE(PSP_hi.PSP_hi_half, PSP_lo.PSP_lo_half); \ +}) + +/* + * Read/write low/high quad-word Procedure Chain Stack Pointer Register (PCSP) + */ +#define KVM_READ_PCSP_LO_REG_VALUE() GUEST_GET_CPU_DSREG(PCSP_lo) +#define KVM_READ_PCSP_HI_REG_VALUE() GUEST_GET_CPU_DSREG(PCSP_hi) +#define KVM_COPY_WRITE_PCSP_LO_REG_VALUE(v) GUEST_SET_CPU_DSREG(PCSP_lo, v) +#define KVM_COPY_WRITE_PCSP_HI_REG_VALUE(v) GUEST_SET_CPU_DSREG(PCSP_hi, v) +#define KVM_WRITE_PCSP_LO_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_PCSP_LO_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(v); \ +}) +#define KVM_WRITE_PCSP_HI_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_PCSP_HI_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(v); \ +}) + +#define KVM_NV_READ_PCSP_LO_REG_VALUE KVM_READ_PCSP_LO_REG_VALUE +#define KVM_NV_READ_PCSP_HI_REG_VALUE KVM_READ_PCSP_HI_REG_VALUE +#define KVM_READ_PCSP_LO_REG() \ +({ \ + e2k_pcsp_lo_t PCSP_lo; \ + PCSP_lo.PCSP_lo_half = KVM_READ_PCSP_LO_REG_VALUE(); \ + PCSP_lo; \ +}) +#define KVM_READ_PCSP_HI_REG() \ +({ \ + e2k_pcsp_hi_t PCSP_hi; \ + PCSP_hi.PCSP_hi_half = KVM_READ_PCSP_HI_REG_VALUE(); \ + PCSP_hi; \ +}) + +#define KVM_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ +({ \ + KVM_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value); \ + KVM_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value); \ +}) +#define KVM_NV_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ + KVM_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) +#define KVM_WRITE_PCSP_REG(PCSP_hi, PCSP_lo) \ +({ \ + KVM_WRITE_PCSP_REG_VALUE(PCSP_hi.PCSP_hi_half, PCSP_lo.PCSP_lo_half); \ +}) + +#define BOOT_KVM_READ_PCSP_LO_REG_VALUE() GUEST_GET_CPU_DSREG(PCSP_lo) +#define BOOT_KVM_READ_PCSP_HI_REG_VALUE() GUEST_GET_CPU_DSREG(PCSP_hi) +#define BOOT_KVM_COPY_WRITE_PCSP_LO_REG_VALUE(v) \ + GUEST_SET_CPU_DSREG(PCSP_lo, v) +#define BOOT_KVM_COPY_WRITE_PCSP_HI_REG_VALUE(v) \ + GUEST_SET_CPU_DSREG(PCSP_hi, v) +#define BOOT_KVM_WRITE_PCSP_LO_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_PCSP_LO_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(v); \ +}) +#define BOOT_KVM_WRITE_PCSP_HI_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_PCSP_HI_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(v); \ +}) +#define BOOT_KVM_WRITE_PCSP_REG_VALUE(hi_value, lo_value) \ +({ \ + BOOT_KVM_WRITE_PCSP_HI_REG_VALUE(hi_value); \ + BOOT_KVM_WRITE_PCSP_LO_REG_VALUE(lo_value); \ +}) +#define BOOT_KVM_WRITE_PCSP_REG(hi, lo) \ +({ \ + BOOT_KVM_WRITE_PCSP_REG_VALUE(hi.PCSP_hi_half, lo.PCSP_lo_half); \ +}) + +/* + * Read/write low/high quad-word Current Chain Register (CR0/CR1) + */ +#define KVM_COPY_READ_CR0_LO_REG_VALUE() GUEST_GET_CPU_DSREG(CR0_lo) +#define KVM_COPY_READ_CR0_HI_REG_VALUE() GUEST_GET_CPU_DSREG(CR0_hi) +#define KVM_COPY_READ_CR1_LO_REG_VALUE() GUEST_GET_CPU_DSREG(CR1_lo) +#define KVM_COPY_READ_CR1_HI_REG_VALUE() GUEST_GET_CPU_DSREG(CR1_hi) +#define KVM_READ_CR0_LO_REG_VALUE() \ +({ \ + ((IS_HV_GM()) ? NATIVE_NV_READ_CR0_LO_REG_VALUE() \ + : \ + KVM_COPY_READ_CR0_LO_REG_VALUE()); \ +}) +#define KVM_READ_CR0_HI_REG_VALUE() \ +({ \ + ((IS_HV_GM()) ? NATIVE_NV_READ_CR0_HI_REG_VALUE() \ + : \ + KVM_COPY_READ_CR0_HI_REG_VALUE()); \ +}) +#define KVM_READ_CR1_LO_REG_VALUE() \ +({ \ + ((IS_HV_GM()) ? NATIVE_NV_READ_CR1_LO_REG_VALUE() \ + : \ + KVM_COPY_READ_CR1_LO_REG_VALUE()); \ +}) +#define KVM_READ_CR1_HI_REG_VALUE() \ +({ \ + ((IS_HV_GM()) ? NATIVE_NV_READ_CR1_HI_REG_VALUE() \ + : \ + KVM_COPY_READ_CR1_HI_REG_VALUE()); \ +}) + +#define KVM_COPY_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + GUEST_SET_CPU_DSREG(CR0_lo, CR0_lo_value) +#define KVM_COPY_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + GUEST_SET_CPU_DSREG(CR0_hi, CR0_hi_value) +#define KVM_COPY_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + GUEST_SET_CPU_DSREG(CR1_lo, CR1_lo_value) +#define KVM_COPY_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + GUEST_SET_CPU_DSREG(CR1_hi, CR1_hi_value) +#define KVM_WRITE_CR0_LO_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_CR0_LO_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(v); \ +}) +#define KVM_WRITE_CR0_HI_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_CR0_HI_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(v); \ +}) +#define KVM_WRITE_CR1_LO_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_CR1_LO_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(v); \ +}) +#define KVM_WRITE_CR1_HI_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_CR1_HI_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(v); \ +}) + +#define KVM_NV_READ_CR0_LO_REG_VALUE() KVM_READ_CR0_LO_REG_VALUE() +#define KVM_NV_READ_CR0_HI_REG_VALUE() KVM_READ_CR0_HI_REG_VALUE() +#define KVM_NV_READ_CR1_LO_REG_VALUE() KVM_READ_CR1_LO_REG_VALUE() +#define KVM_NV_READ_CR1_HI_REG_VALUE() KVM_READ_CR1_HI_REG_VALUE() + +#define KVM_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + KVM_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) +#define KVM_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + KVM_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) +#define KVM_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + KVM_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) +#define KVM_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + KVM_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) + +/* + * Read/write double-word Control Transfer Preparation Registers + * (CTPR1/CTPR2/CTPR3) + */ +#define KVM_READ_CTPR_REG_VALUE(reg_no) GUEST_GET_CPU_DSREG(CTPR##reg_no) +#define KVM_READ_CTPR1_REG_VALUE() KVM_READ_CTPR_REG_VALUE(1) +#define KVM_READ_CTPR2_REG_VALUE() KVM_READ_CTPR_REG_VALUE(2) +#define KVM_READ_CTPR3_REG_VALUE() KVM_READ_CTPR_REG_VALUE(3) + +#define KVM_WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) \ + GUEST_SET_CPU_DSREG(CTPR##reg_no, CTPR_value) +#define KVM_WRITE_CTPR1_REG_VALUE(CTPR_value) \ + KVM_WRITE_CTPR_REG_VALUE(1, CTPR_value) +#define KVM_WRITE_CTPR2_REG_VALUE(CTPR_value) \ + KVM_WRITE_CTPR_REG_VALUE(2, CTPR_value) +#define KVM_WRITE_CTPR3_REG_VALUE(CTPR_value) \ + KVM_WRITE_CTPR_REG_VALUE(3, CTPR_value) + +/* + * Read/write low/high double-word Trap Info Registers (TIRs) + */ +#define KVM_READ_TIRs_num() GUEST_GET_CPU_SREG(TIRs_num) +#define KVM_WRITE_TIRs_num(TIRs_num_value) \ + GUEST_SET_CPU_SREG(TIRs_num, TIRs_num_value) +#define KVM_READ_TIR_HI_REG_VALUE() \ +({ \ + unsigned long TIR_hi_value; \ + int TIRs_num; \ + TIRs_num = KVM_READ_TIRs_num(); \ + if (TIRs_num >= 0) { \ + TIR_hi_value = GUEST_GET_CPU_TIR_hi(TIRs_num); \ + } else { \ + TIR_hi_value = 0; \ + } \ + TIR_hi_value; \ +}) +#define KVM_READ_TIR_LO_REG_VALUE() \ +({ \ + unsigned long TIR_lo_value; \ + int TIRs_num; \ + TIRs_num = KVM_READ_TIRs_num(); \ + if (TIRs_num >= 0) { \ + TIR_lo_value = GUEST_GET_CPU_TIR_lo(TIRs_num); \ + TIRs_num--; \ + KVM_WRITE_TIRs_num(TIRs_num); \ + } else { \ + TIR_lo_value = 0; \ + } \ + TIR_lo_value; \ +}) + +#define KVM_WRITE_TIR_LO_REG_VALUE(TIR_lo_value) \ + KVM_WRITE_TIRs_num(-1) +#define KVM_WRITE_TIR_HI_REG_VALUE(TIR_hi_value) \ + KVM_WRITE_TIRs_num(-1) + +/* + * Read/write virtual deferred traps register - DTR + */ +#define KVM_READ_DTR_REG_VALUE() GUEST_GET_CPU_DSREG(DTR) + +#define KVM_WRITE_DTR_REG_VALUE(DTR_value) \ + GUEST_SET_CPU_DSREG(DTR, DTR_value) + +/* + * Read/write low/high double-word Non-Protected User Stack Descriptor + * Register (USD) + */ +#define KVM_READ_USD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(USD_lo) +#define KVM_READ_USD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(USD_hi) +#define KVM_COPY_WRITE_USD_LO_REG_VALUE(v) GUEST_SET_CPU_DSREG(USD_lo, v) +#define KVM_COPY_WRITE_USD_HI_REG_VALUE(v) GUEST_SET_CPU_DSREG(USD_hi, v) +#define KVM_WRITE_USD_LO_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_USD_LO_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(v); \ +}) +#define KVM_WRITE_USD_HI_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_USD_HI_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(v); \ +}) + +#define KVM_NV_WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define KVM_NV_WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value) +#define KVM_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ +({ \ + KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define KVM_NV_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ + KVM_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) +#define KVM_WRITE_USD_REG(USD_hi, USD_lo) \ +({ \ + KVM_WRITE_USD_REG_VALUE(USD_hi.USD_hi_half, USD_lo.USD_lo_half); \ +}) + +#define KVM_WRITE_USBR_USD_REG_VALUE(usbr, USD_hi_value, USD_lo_value) \ +({ \ + KVM_WRITE_USBR_REG_VALUE(usbr); \ + KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define KVM_NV_WRITE_USBR_USD_REG_VALUE(usbr, USD_hi_value, USD_lo_value) \ + KVM_WRITE_USBR_USD_REG_VALUE(usbr, USD_hi_value, USD_lo_value) + +#define BOOT_KVM_READ_USD_LO_REG_VALUE() GUEST_GET_CPU_DSREG(USD_lo) +#define BOOT_KVM_READ_USD_HI_REG_VALUE() GUEST_GET_CPU_DSREG(USD_hi) +#define BOOT_KVM_COPY_WRITE_USD_LO_REG_VALUE(v) GUEST_SET_CPU_DSREG(USD_lo, v) +#define BOOT_KVM_COPY_WRITE_USD_HI_REG_VALUE(v) GUEST_SET_CPU_DSREG(USD_hi, v) +#define BOOT_KVM_WRITE_USD_LO_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_USD_LO_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(v); \ +}) +#define BOOT_KVM_WRITE_USD_HI_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_USD_HI_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(v); \ +}) +#define BOOT_KVM_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ +({ \ + BOOT_KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + BOOT_KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define BOOT_KVM_WRITE_USD_REG(USD_hi, USD_lo) \ +({ \ + BOOT_KVM_WRITE_USD_REG_VALUE(USD_hi.USD_hi_half, USD_lo.USD_lo_half); \ +}) +#define BOOT_KVM_WRITE_USBR_USD_REG_VALUE(usbr, USD_hi_value, USD_lo_value) \ +({ \ + BOOT_KVM_WRITE_USBR_REG_VALUE(usbr); \ + BOOT_KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + BOOT_KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) + +/* + * Read/write low/high double-word Protected User Stack Descriptor + * Register (PUSD) + */ +#define KVM_READ_PUSD_LO_REG_VALUE() KVM_READ_USD_LO_REG_VALUE() +#define KVM_READ_PUSD_HI_REG_VALUE() KVM_READ_USD_HI_REG_VALUE() + +#define KVM_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) \ + KVM_WRITE_USD_LO_REG_VALUE(PUSD_lo_value) +#define KVM_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) \ + KVM_WRITE_USD_HI_REG_VALUE(PUSD_hi_value) + +/* + * Read/write double-word User Stacks Base Register (USBR) + */ +#define KVM_READ_SBR_REG_VALUE() GUEST_GET_CPU_DSREG(SBR) +#define KVM_COPY_WRITE_SBR_REG_VALUE(v) GUEST_SET_CPU_DSREG(SBR, v) +#define KVM_WRITE_SBR_REG_VALUE(v) \ +({ \ + KVM_COPY_WRITE_SBR_REG_VALUE(v); \ + if (IS_HV_GM()) \ + NATIVE_NV_WRITE_SBR_REG_VALUE(v); \ +}) + +#define KVM_READ_USBR_REG_VALUE() KVM_READ_SBR_REG_VALUE() +#define KVM_NV_READ_SBR_REG_VALUE() KVM_READ_SBR_REG_VALUE() + +#define KVM_WRITE_USBR_REG_VALUE(USBR_value) KVM_WRITE_SBR_REG_VALUE(USBR_value) +#define KVM_NV_WRITE_SBR_REG_VALUE(SBR_value) KVM_WRITE_SBR_REG_VALUE(SBR_value) +#define KVM_WRITE_USBR_REG(USBR) \ + KVM_WRITE_USBR_REG_VALUE(USBR.USBR_reg) + +#define BOOT_KVM_READ_SBR_REG_VALUE() GUEST_GET_CPU_DSREG(SBR) +#define BOOT_KVM_COPY_WRITE_SBR_REG_VALUE(v) GUEST_SET_CPU_DSREG(SBR, v) +#define BOOT_KVM_WRITE_SBR_REG_VALUE(v) \ +({ \ + BOOT_KVM_COPY_WRITE_SBR_REG_VALUE(v); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_NV_WRITE_SBR_REG_VALUE(v); \ +}) + +#define BOOT_KVM_READ_USBR_REG_VALUE() BOOT_KVM_READ_SBR_REG_VALUE() + +#define BOOT_KVM_WRITE_USBR_REG_VALUE(v) \ + BOOT_KVM_WRITE_SBR_REG_VALUE(v) +#define BOOT_KVM_WRITE_USBR_REG(USBR) \ + BOOT_KVM_WRITE_USBR_REG_VALUE(USBR.USBR_reg) + +/* + * Read/write double-word Window Descriptor Register (WD) + */ +#define KVM_READ_WD_REG_VALUE() GUEST_GET_CPU_DSREG(WD) +#define KVM_READ_WD_REG() \ +({ \ + e2k_wd_t WD; \ + WD.WD_reg = KVM_READ_WD_REG_VALUE(); \ + WD; \ +}) + +#define KVM_WRITE_WD_REG_VALUE(WD_value) \ + GUEST_SET_CPU_DSREG(WD, WD_value) +#define KVM_WRITE_WD_REG(WD) \ + KVM_WRITE_WD_REG_VALUE(WD.WD_reg) + +/* + * Read/write double-word Loop Status Register (LSR) + */ +#define KVM_READ_LSR_REG_VALUE() GUEST_GET_CPU_DSREG(LSR) + +#define KVM_WRITE_LSR_REG_VALUE(LSR_value) \ + GUEST_SET_CPU_DSREG(LSR, LSR_value) + +/* + * Read/write double-word Initial Loop Counters Register (ILCR) + */ +#define KVM_READ_ILCR_REG_VALUE() GUEST_GET_CPU_DSREG(ILCR) + +#define KVM_WRITE_ILCR_REG_VALUE(ILCR_value) \ + GUEST_SET_CPU_DSREG(ILCR, ILCR_value) + +/* + * Read/write OS register which point to current process thread info + * structure (OSR0) + */ +#define KVM_GET_OSR0_REG_VALUE() GUEST_GET_CPU_DSREG(OSR0) + +#define KVM_SET_OSR0_REG_VALUE(osr0_value) \ + GUEST_SET_CPU_DSREG(OSR0, osr0_value) + +#define KVM_READ_CURRENT_REG_VALUE() \ +({ \ + unsigned long osr0_value; \ + if (IS_HV_GM()) { \ + osr0_value = NATIVE_GET_OSR0_REG_VALUE(); \ + } else { \ + osr0_value = KVM_GET_OSR0_REG_VALUE(); \ + } \ + osr0_value; \ +}) + +#define BOOT_KVM_READ_CURRENT_REG_VALUE() KVM_GET_OSR0_REG_VALUE() +#define KVM_WRITE_CURRENT_REG_VALUE(osr0_value) \ +({ \ + KVM_SET_OSR0_REG_VALUE(osr0_value); \ + if (IS_HV_CPU_HV_MMU_KVM()) { \ + NATIVE_SET_OSR0_REG_VALUE(osr0_value); \ + } \ +}) +#define BOOT_KVM_WRITE_CURRENT_REG_VALUE(osr0_value) \ +({ \ + KVM_SET_OSR0_REG_VALUE(osr0_value); \ + if (IS_HV_CPU_HV_MMU_KVM()) { \ + NATIVE_SET_OSR0_REG_VALUE(osr0_value); \ + } \ +}) + +#define KVM_READ_CURRENT_REG() \ +({ \ + struct thread_info *TI; \ + TI = (struct thread_info *)KVM_READ_CURRENT_REG_VALUE(); \ + TI; \ +}) +#define BOOT_KVM_READ_CURRENT_REG() \ +({ \ + struct thread_info *TI; \ + TI = (struct thread_info *)BOOT_KVM_READ_CURRENT_REG_VALUE(); \ + TI; \ +}) +#define KVM_WRITE_CURRENT_REG(TI) \ + KVM_WRITE_CURRENT_REG_VALUE((unsigned long)TI) +#define BOOT_KVM_WRITE_CURRENT_REG(TI) \ + BOOT_KVM_WRITE_CURRENT_REG_VALUE((unsigned long)TI) + +/* + * Read/write OS Entries Mask (OSEM) + */ +#define KVM_READ_OSEM_REG_VALUE() GUEST_GET_CPU_SREG(OSEM) + +#define KVM_WRITE_OSEM_REG_VALUE(OSEM_value) \ + GUEST_SET_CPU_SREG(OSEM, OSEM_value) + +/* + * Read/write word Base Global Register (BGR) + */ +#define KVM_READ_BGR_REG_VALUE() GUEST_GET_CPU_SREG(BGR) + +#define KVM_WRITE_BGR_REG_VALUE(BGR_value) \ + GUEST_SET_CPU_SREG(BGR, BGR_value) + +#define BOOT_KVM_WRITE_BGR_REG_VALUE(BGR_value) \ +({ \ + KVM_WRITE_BGR_REG_VALUE(BGR_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_BGR_REG_VALUE(BGR_value); \ +}) + +/* + * Read CPU current clock regigister (CLKR) + */ +#define KVM_READ_CLKR_REG_VALUE() NATIVE_GET_DSREG_CLOSED(clkr) + +/* + * Read/Write system clock registers (SCLKM) + */ +#define KVM_READ_SCLKR_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sclkr) +#define KVM_READ_SCLKM1_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sclkm1) +#define KVM_READ_SCLKM2_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sclkm2) +#define KVM_READ_SCLKM3_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sclkm3) + +#define KVM_WRITE_SCLKR_REG_VALUE(reg_value) \ + GUEST_SET_CPU_DSREG(SCLKR, reg_value) +#define KVM_WRITE_SCLKM1_REG_VALUE(reg_value) \ + GUEST_SET_CPU_DSREG(SCLKM1, reg_value) +#define KVM_WRITE_SCLKM2_REG_VALUE(reg_value) \ + GUEST_SET_CPU_DSREG(SCLKM2, reg_value) +#define KVM_WRITE_SCLKM3_REG_VALUE(reg_value) \ + GUEST_SET_CPU_DSREG(SCLKM3, reg_value) + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +#define KVM_READ_CU_HW0_REG_VALUE() NATIVE_READ_CU_HW0_REG_VALUE() +#define KVM_READ_CU_HW1_REG_VALUE() \ +({ \ + u64 reg_value = -1; \ + if (machine.get_cu_hw1 != NULL) \ + reg_value = machine.get_cu_hw1(); \ + reg_value; \ +}) + +#define KVM_WRITE_CU_HW0_REG_VALUE(reg) GUEST_SET_CPU_DSREG(CU_HW0, reg) +#define KVM_WRITE_CU_HW1_REG_VALUE(reg) GUEST_SET_CPU_DSREG(CU_HW1, reg) + +/* + * Read/write low/high double-word Recovery point register (RPR) + */ +#define KVM_READ_RPR_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(rpr.lo) +#define KVM_READ_RPR_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(rpr.hi) +#define KVM_READ_SBBP_REG_VALUE() NATIVE_GET_DSREG_OPEN(sbbp) + +#define KVM_WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \ + NATIVE_SET_DSREG_OPEN(rpr.lo, RPR_lo_value) +#define KVM_WRITE_RPR_HI_REG_VALUE(RPR_hi_value) \ + NATIVE_SET_DSREG_OPEN(rpr.hi, RPR_hi_value) + +/* + * Read double-word CPU current Instruction Pointer register (IP) + */ +#define KVM_READ_IP_REG_VALUE() NATIVE_GET_DSREG_CLOSED(ip) + +/* + * Read debug and monitors registers + */ +#define KVM_READ_DIBCR_REG_VALUE() NATIVE_GET_SREG_CLOSED(dibcr) +#define KVM_READ_DIBSR_REG_VALUE() NATIVE_GET_SREG_CLOSED(dibsr) +#define KVM_READ_DIMCR_REG_VALUE() NATIVE_GET_DSREG_CLOSED(dimcr) +#define KVM_READ_DIBAR0_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar0) +#define KVM_READ_DIBAR1_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar1) +#define KVM_READ_DIBAR2_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar2) +#define KVM_READ_DIBAR3_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar3) +#define KVM_READ_DIMAR0_REG_VALUE() NATIVE_GET_DSREG_OPEN(dimar0) +#define KVM_READ_DIMAR1_REG_VALUE() NATIVE_GET_DSREG_OPEN(dimar1) + +#define KVM_WRITE_DIBCR_REG_VALUE(DIBCR_value) \ + NATIVE_SET_SREG_CLOSED_NOEXC(dibcr, DIBCR_value, 4) +#define KVM_WRITE_DIBSR_REG_VALUE(DIBSR_value) \ + NATIVE_SET_SREG_CLOSED_NOEXC(dibsr, DIBSR_value, 4) +#define KVM_WRITE_DIMCR_REG_VALUE(DIMCR_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dimcr, DIMCR_value, 4) +#define KVM_WRITE_DIBAR0_REG_VALUE(DIBAR0_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dibar0, DIBAR0_value, 4) +#define KVM_WRITE_DIBAR1_REG_VALUE(DIBAR1_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dibar1, DIBAR1_value, 4) +#define KVM_WRITE_DIBAR2_REG_VALUE(DIBAR2_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dibar2, DIBAR2_value, 4) +#define KVM_WRITE_DIBAR3_REG_VALUE(DIBAR3_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dibar3, DIBAR3_value, 4) +#define KVM_WRITE_DIMAR0_REG_VALUE(DIMAR0_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dimar0, DIMAR0_value, 4) +#define KVM_WRITE_DIMAR1_REG_VALUE(DIMAR1_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dimar1, DIMAR1_value, 4) + +/* + * Read/write double-word Compilation Unit Types Descriptor (TSD) + */ +#define KVM_READ_TSD_REG_VALUE() GUEST_GET_CPU_DSREG(TSD) + +#define KVM_WRITE_TSD_REG_VALUE(TSD_value) \ + GUEST_SET_CPU_DSREG(TSD, TSD_value) + +/* + * Read/write word Processor State Register (PSR) + */ +#define KVM_READ_PSR_REG_VALUE() \ +({ \ +extern void dump_stack(void); \ + unsigned long PSR_value = GUEST_GET_CPU_SREG(PSR); \ + unsigned long vcpu_base; \ + \ + KVM_GET_VCPU_STATE_BASE(vcpu_base); \ + if (((kvm_vcpu_state_t *)(vcpu_base))->irqs_under_upsr && \ + ((PSR_value & ~PSR_PM) == 0)) \ + dump_stack(); \ + if (IS_HV_GM()) \ + PSR_value = NATIVE_NV_READ_PSR_REG_VALUE(); \ + PSR_value; \ +}) +#define BOOT_KVM_READ_PSR_REG_VALUE() \ +({ \ + unsigned long PSR_value; \ + \ + if (BOOT_IS_HV_GM()) \ + PSR_value = NATIVE_NV_READ_PSR_REG_VALUE(); \ + else \ + PSR_value = GUEST_GET_CPU_SREG(PSR); \ + PSR_value; \ +}) + +#define KVM_ATOMIC_WRITE_PSR_REG_VALUE(PSR_value, under_upsr) \ + KVM_DO_ATOMIC_WRITE_PSR_REG_VALUE(GUEST_VCPU_STATE_GREG, \ + GUEST_CPU_SREG(PSR), PSR_value, \ + GUEST_IRQS_UNDER_UPSR(), under_upsr) + +#define KVM_WRITE_SW_PSR_REG_VALUE(PSR_value) \ +({ \ + kvm_vcpu_state_t *vcpu_state; \ + bool under_upsr; \ + \ + KVM_GET_VCPU_STATE_BASE(vcpu_state); \ + under_upsr = vcpu_state->irqs_under_upsr; \ + if (((PSR_value) & (PSR_IE | PSR_NMIE | PSR_UIE | PSR_UNMIE)) == \ + (PSR_IE | PSR_NMIE | PSR_UIE | PSR_UNMIE)) \ + under_upsr = true; \ + if (((PSR_value) & (PSR_IE | PSR_NMIE | PSR_UIE | PSR_UNMIE)) == 0) \ + under_upsr = false; \ + KVM_ATOMIC_WRITE_PSR_REG_VALUE(PSR_value, under_upsr); \ +}) +#define KVM_WRITE_PSR_REG_VALUE(PSR_value) \ +({ \ + KVM_WRITE_SW_PSR_REG_VALUE(PSR_value); \ + if (IS_HV_GM()) \ + NATIVE_WRITE_PSR_REG_VALUE(PSR_value); \ +}) +#define BOOT_KVM_WRITE_PSR_REG_VALUE(PSR_value) \ +({ \ + KVM_WRITE_SW_PSR_REG_VALUE(PSR_value); \ + if (BOOT_IS_HV_GM()) \ + NATIVE_WRITE_PSR_REG_VALUE(PSR_value); \ +}) + +#define KVM_WRITE_PSR_REG(PSR) \ + KVM_WRITE_PSR_REG_VALUE((PSR).PSR_reg) + +/* + * Read/write word User Processor State Register (UPSR) + */ +#define KVM_READ_UPSR_REG_VALUE() \ +({ \ + unsigned long UPSR_value; \ + \ + if (IS_HV_GM()) \ + UPSR_value = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + else \ + UPSR_value = GUEST_GET_CPU_SREG(UPSR); \ + UPSR_value; \ +}) +#define BOOT_KVM_READ_UPSR_REG_VALUE() \ +({ \ + unsigned long UPSR_value; \ + \ + if (BOOT_IS_HV_GM()) \ + UPSR_value = NATIVE_NV_READ_UPSR_REG_VALUE(); \ + else \ + UPSR_value = GUEST_GET_CPU_SREG(UPSR); \ + UPSR_value; \ +}) +#if defined(CONFIG_DIRECT_VIRQ_INJECTION) +#define KVM_WRITE_UPSR_REG_VALUE(UPSR_value) \ +({ \ + kvm_vcpu_state_t *vcpu_state; \ + bool under_upsr; \ + \ + KVM_GET_VCPU_STATE_BASE(vcpu_state); \ + under_upsr = vcpu_state->irqs_under_upsr; \ + GUEST_SET_CPU_SREG(UPSR, UPSR_value); \ + NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value); \ + if (under_upsr && vcpu_state->lapic.virqs_num.counter) { \ + if ((UPSR_value) & UPSR_IE) \ + HYPERVISOR_inject_interrupt(); \ + } \ +}) +#define BOOT_KVM_WRITE_UPSR_REG_VALUE(UPSR_value) \ +({ \ + kvm_vcpu_state_t *vcpu_state; \ + bool under_upsr; \ + \ + KVM_GET_VCPU_STATE_BASE(vcpu_state); \ + under_upsr = vcpu_state->irqs_under_upsr; \ + GUEST_SET_CPU_SREG(UPSR, UPSR_value); \ + NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value); \ + if (under_upsr && vcpu_state->lapic.virqs_num.counter) { \ + if ((UPSR_value) & UPSR_IE) \ + HYPERVISOR_inject_interrupt(); \ + } \ +}) +#elif defined(CONFIG_VIRQ_VCPU_INJECTION) +#define KVM_WRITE_UPSR_REG_VALUE(UPSR_value) \ +({ \ + GUEST_SET_CPU_SREG(UPSR, UPSR_value); \ + NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value); \ +}) +#define BOOT_KVM_WRITE_UPSR_REG_VALUE(UPSR_value) \ +({ \ + GUEST_SET_CPU_SREG(UPSR, UPSR_value); \ + NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value); \ +}) +#else /* ! CONFIG_DIRECT_VIRQ_INJECTION && ! CONFIG_VIRQ_VCPU_INJECTION */ +#define KVM_WRITE_UPSR_REG_VALUE(UPSR_value) +#define BOOT_KVM_WRITE_UPSR_REG_VALUE(UPSR_value) +#endif /* CONFIG_DIRECT_VIRQ_INJECTION */ + +#define KVM_WRITE_UPSR_REG(UPSR) \ + KVM_WRITE_UPSR_REG_VALUE((UPSR).UPSR_reg) + +/* + * Read/write word floating point control registers (PFPFR/FPCR/FPSR) + */ +#define KVM_READ_PFPFR_REG_VALUE() NATIVE_GET_SREG_OPEN(pfpfr) +#define KVM_READ_FPCR_REG_VALUE() NATIVE_GET_SREG_OPEN(fpcr) +#define KVM_READ_FPSR_REG_VALUE() NATIVE_GET_SREG_OPEN(fpsr) + +#define KVM_WRITE_PFPFR_REG_VALUE(PFPFR_value) \ + NATIVE_SET_SREG_OPEN(pfpfr, PFPFR_value) +#define KVM_WRITE_FPCR_REG_VALUE(FPCR_value) \ + NATIVE_SET_SREG_OPEN(fpcr, FPCR_value) +#define KVM_WRITE_FPSR_REG_VALUE(FPSR_value) \ + NATIVE_SET_SREG_OPEN(fpsr, FPSR_value) + +/* + * Read/write low/high double-word Intel segments registers (xS) + */ + +#define KVM_READ_CS_LO_REG_VALUE() GUEST_GET_CPU_DSREG(CS_lo) +#define KVM_READ_CS_HI_REG_VALUE() GUEST_GET_CPU_DSREG(CS_hi) +#define KVM_READ_DS_LO_REG_VALUE() GUEST_GET_CPU_DSREG(DS_lo) +#define KVM_READ_DS_HI_REG_VALUE() GUEST_GET_CPU_DSREG(DS_hi) +#define KVM_READ_ES_LO_REG_VALUE() GUEST_GET_CPU_DSREG(ES_lo) +#define KVM_READ_ES_HI_REG_VALUE() GUEST_GET_CPU_DSREG(ES_hi) +#define KVM_READ_FS_LO_REG_VALUE() GUEST_GET_CPU_DSREG(FS_lo) +#define KVM_READ_FS_HI_REG_VALUE() GUEST_GET_CPU_DSREG(FS_hi) +#define KVM_READ_GS_LO_REG_VALUE() GUEST_GET_CPU_DSREG(GS_lo) +#define KVM_READ_GS_HI_REG_VALUE() GUEST_GET_CPU_DSREG(GS_hi) +#define KVM_READ_SS_LO_REG_VALUE() GUEST_GET_CPU_DSREG(SS_lo) +#define KVM_READ_SS_HI_REG_VALUE() GUEST_GET_CPU_DSREG(SS_hi) + +#define KVM_WRITE_CS_LO_REG_VALUE(sd) GUEST_SET_CPU_DSREG(CS_lo, sd) +#define KVM_WRITE_CS_HI_REG_VALUE(sd) GUEST_SET_CPU_DSREG(CS_hi, sd) +#define KVM_WRITE_DS_LO_REG_VALUE(sd) GUEST_SET_CPU_DSREG(DS_lo, sd) +#define KVM_WRITE_DS_HI_REG_VALUE(sd) GUEST_SET_CPU_DSREG(DS_hi, sd) +#define KVM_WRITE_ES_LO_REG_VALUE(sd) GUEST_SET_CPU_DSREG(ES_lo, sd) +#define KVM_WRITE_ES_HI_REG_VALUE(sd) GUEST_SET_CPU_DSREG(ES_hi, sd) +#define KVM_WRITE_FS_LO_REG_VALUE(sd) GUEST_SET_CPU_DSREG(FS_lo, sd) +#define KVM_WRITE_FS_HI_REG_VALUE(sd) GUEST_SET_CPU_DSREG(FS_hi, sd) +#define KVM_WRITE_GS_LO_REG_VALUE(sd) GUEST_SET_CPU_DSREG(GS_lo, sd) +#define KVM_WRITE_GS_HI_REG_VALUE(sd) GUEST_SET_CPU_DSREG(GS_hi, sd) +#define KVM_WRITE_SS_LO_REG_VALUE(sd) GUEST_SET_CPU_DSREG(SS_lo, sd) +#define KVM_WRITE_SS_HI_REG_VALUE(sd) GUEST_SET_CPU_DSREG(SS_hi, sd) + +/* + * Read doubleword User Processor Identification Register (IDR) + */ +#define KVM_READ_IDR_REG_VALUE() GUEST_GET_CPU_DSREG(IDR) + +/* + * Read/Write Processor Core Mode Register (CORE_MODE) + */ +#define KVM_READ_CORE_MODE_REG_VALUE() \ + GUEST_GET_CPU_DSREG(CORE_MODE) +#define KVM_WRITE_CORE_MODE_REG_VALUE(modes) \ + GUEST_SET_CPU_DSREG(CORE_MODE, modes) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is native (pure) guest kernel */ + +/* + * Set flags of updated VCPU registers + */ +#define PUT_UPDATED_CPU_REGS_FLAGS(flags) \ + KVM_PUT_UPDATED_CPU_REGS_FLAGS(flags) + +#define READ_VCPU_ID() KVM_READ_VCPU_ID() + +/* + * Read/write word Procedure Stack Harware Top Pointer (PSHTP) + */ +#define READ_PSHTP_REG_VALUE() KVM_READ_PSHTP_REG_VALUE() +#define WRITE_PSHTP_REG_VALUE(PSHTP_value) \ + KVM_WRITE_PSHTP_REG_VALUE(PSHTP_value) + +/* + * Read/write word Procedure Chain Stack Harware Top Pointer (PCSHTP) + */ +#define READ_PCSHTP_REG_SVALUE() KVM_READ_PCSHTP_REG_SVALUE() +#define WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) \ + KVM_WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) + +/* + * Read/write low/high double-word OS Compilation Unit Descriptor (OSCUD) + */ + +#define READ_OSCUD_LO_REG_VALUE() KVM_READ_OSCUD_LO_REG_VALUE() +#define READ_OSCUD_HI_REG_VALUE() KVM_READ_OSCUD_HI_REG_VALUE() +#define BOOT_READ_OSCUD_LO_REG_VALUE() BOOT_KVM_READ_OSCUD_LO_REG_VALUE() +#define BOOT_READ_OSCUD_HI_REG_VALUE() BOOT_KVM_READ_OSCUD_HI_REG_VALUE() + +#define WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) +#define WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) +#define BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + BOOT_KVM_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) +#define BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + BOOT_KVM_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) + +/* + * Read/write low/hgh double-word OS Globals Register (OSGD) + */ + +#define READ_OSGD_LO_REG_VALUE() KVM_READ_OSGD_LO_REG_VALUE() +#define READ_OSGD_HI_REG_VALUE() KVM_READ_OSGD_HI_REG_VALUE() +#define BOOT_READ_OSGD_LO_REG_VALUE() KVM_READ_OSGD_LO_REG_VALUE() +#define BOOT_READ_OSGD_HI_REG_VALUE() KVM_READ_OSGD_HI_REG_VALUE() + +#define WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) +#define WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) +#define BOOT_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + BOOT_KVM_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) +#define BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + BOOT_KVM_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) + +/* + * Read/write low/high double-word Compilation Unit Register (CUD) + */ + +#define READ_CUD_LO_REG_VALUE() KVM_READ_CUD_LO_REG_VALUE() +#define READ_CUD_HI_REG_VALUE() KVM_READ_CUD_HI_REG_VALUE() +#define BOOT_READ_CUD_LO_REG_VALUE() KVM_READ_CUD_LO_REG_VALUE() +#define BOOT_READ_CUD_HI_REG_VALUE() KVM_READ_CUD_HI_REG_VALUE() + +#define WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) +#define WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) +#define BOOT_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + BOOT_KVM_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) +#define BOOT_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + BOOT_KVM_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) + +/* + * Read/write low/high double-word Globals Register (GD) + */ + +#define READ_GD_LO_REG_VALUE() KVM_READ_GD_LO_REG_VALUE() +#define READ_GD_HI_REG_VALUE() KVM_READ_GD_HI_REG_VALUE() +#define BOOT_READ_GD_LO_REG_VALUE() KVM_READ_GD_LO_REG_VALUE() +#define BOOT_READ_GD_HI_REG_VALUE() KVM_READ_GD_HI_REG_VALUE() + +#define WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + KVM_WRITE_GD_LO_REG_VALUE(GD_lo_value) +#define WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + KVM_WRITE_GD_HI_REG_VALUE(GD_hi_value) +#define BOOT_WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + BOOT_KVM_WRITE_GD_LO_REG_VALUE(GD_lo_value) +#define BOOT_WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + BOOT_KVM_WRITE_GD_HI_REG_VALUE(GD_hi_value) + +/* + * Read/write low/high quad-word Procedure Stack Pointer Register (PSP) + */ + +#define READ_PSP_LO_REG_VALUE() KVM_READ_PSP_LO_REG_VALUE() +#define READ_PSP_HI_REG_VALUE() KVM_READ_PSP_HI_REG_VALUE() +#define BOOT_READ_PSP_LO_REG_VALUE() KVM_READ_PSP_LO_REG_VALUE() +#define BOOT_READ_PSP_HI_REG_VALUE() KVM_READ_PSP_HI_REG_VALUE() + +#define WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + KVM_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + KVM_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) +#define BOOT_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + KVM_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define BOOT_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + KVM_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) + +/* + * Read/write low/high quad-word Procedure Chain Stack Pointer Register (PCSP) + */ +#define READ_PCSP_LO_REG_VALUE() KVM_READ_PCSP_LO_REG_VALUE() +#define READ_PCSP_HI_REG_VALUE() KVM_READ_PCSP_HI_REG_VALUE() +#define BOOT_READ_PCSP_LO_REG_VALUE() KVM_READ_PCSP_LO_REG_VALUE() +#define BOOT_READ_PCSP_HI_REG_VALUE() KVM_READ_PCSP_HI_REG_VALUE() + +#define WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + KVM_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + KVM_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) +#define BOOT_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + KVM_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define BOOT_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + KVM_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) + +/* + * Read/write low/high quad-word Current Chain Register (CR0/CR1) + */ +#define READ_CR0_LO_REG_VALUE() KVM_READ_CR0_LO_REG_VALUE() +#define READ_CR0_HI_REG_VALUE() KVM_READ_CR0_HI_REG_VALUE() +#define READ_CR1_LO_REG_VALUE() KVM_READ_CR1_LO_REG_VALUE() +#define READ_CR1_HI_REG_VALUE() KVM_READ_CR1_HI_REG_VALUE() + +#define WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + KVM_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) +#define WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + KVM_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) +#define WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + KVM_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) +#define WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + KVM_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) + +/* + * Read/write double-word Control Transfer Preparation Registers + * (CTPR1/CTPR2/CTPR3) + */ +#define READ_CTPR_REG_VALUE(reg_no) KVM_READ_CTPR_REG_VALUE(reg_no) + +#define WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) \ + KVM_WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) + +/* + * Read/write low/high double-word Trap Info Registers (TIRs) + */ +#define READ_TIR_LO_REG_VALUE() KVM_READ_TIR_LO_REG_VALUE() +#define READ_TIR_HI_REG_VALUE() KVM_READ_TIR_HI_REG_VALUE() + +#define WRITE_TIR_LO_REG_VALUE(TIR_lo_value) \ + KVM_WRITE_TIR_LO_REG_VALUE(TIR_lo_value) +#define WRITE_TIR_HI_REG_VALUE(TIR_hi_value) \ + KVM_WRITE_TIR_HI_REG_VALUE(TIR_hi_value) + +/* + * Read/write low/high double-word Non-Protected User Stack Descriptor + * Register (USD) + */ +#define READ_USD_LO_REG_VALUE() KVM_READ_USD_LO_REG_VALUE() +#define READ_USD_HI_REG_VALUE() KVM_READ_USD_HI_REG_VALUE() +#define BOOT_READ_USD_LO_REG_VALUE() KVM_READ_USD_LO_REG_VALUE() +#define BOOT_READ_USD_HI_REG_VALUE() KVM_READ_USD_HI_REG_VALUE() + +#define WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value) +#define BOOT_WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + KVM_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define BOOT_WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + KVM_WRITE_USD_HI_REG_VALUE(USD_hi_value) + +/* + * Read/write low/high double-word Protected User Stack Descriptor + * Register (PUSD) + */ +#define READ_PUSD_LO_REG_VALUE() KVM_READ_PUSD_LO_REG_VALUE() +#define READ_PUSD_HI_REG_VALUE() KVM_READ_PUSD_HI_REG_VALUE() + +#define WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) \ + KVM_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) +#define WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) \ + KVM_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) + +/* + * Read/write double-word User Stacks Base Register (USBR) + */ +#define READ_SBR_REG_VALUE() KVM_READ_SBR_REG_VALUE() +#define READ_USBR_REG_VALUE() KVM_READ_USBR_REG_VALUE() +#define BOOT_READ_USBR_REG_VALUE() KVM_READ_USBR_REG_VALUE() +#define BOOT_READ_SBR_REG_VALUE() KVM_READ_SBR_REG_VALUE() + +#define WRITE_SBR_REG_VALUE(SBR_value) \ + KVM_WRITE_SBR_REG_VALUE(SBR_value) +#define WRITE_USBR_REG_VALUE(USBR_value) \ + KVM_WRITE_USBR_REG_VALUE(USBR_value) +#define BOOT_WRITE_USBR_REG_VALUE(USBR_value) \ + KVM_WRITE_USBR_REG_VALUE(USBR_value) +#define BOOT_WRITE_SBR_REG_VALUE(SBR_value) \ + KVM_WRITE_SBR_REG_VALUE(SBR_value) + +/* + * Read/write double-word Window Descriptor Register (WD) + */ +#define READ_WD_REG_VALUE() KVM_READ_WD_REG_VALUE() +#define WRITE_WD_REG_VALUE(WD_value) KVM_WRITE_WD_REG_VALUE(WD_value) + +/* + * Read/write double-word Loop Status Register (LSR) + */ +#define READ_LSR_REG_VALUE() \ + KVM_READ_LSR_REG_VALUE() +#define WRITE_LSR_REG_VALUE(LSR_value) \ + KVM_WRITE_LSR_REG_VALUE(LSR_value) + +/* + * Read/write double-word Initial Loop Counters Register (ILCR) + */ +#define READ_ILCR_REG_VALUE() \ + KVM_READ_ILCR_REG_VALUE() +#define WRITE_ILCR_REG_VALUE(ILCR_value) \ + KVM_WRITE_ILCR_REG_VALUE(ILCR_value) + +/* + * Read/write OS register which point to current process thread info + * structure (OSR0) + */ +#define READ_CURRENT_REG_VALUE() KVM_READ_CURRENT_REG_VALUE() +#define BOOT_READ_CURRENT_REG_VALUE() BOOT_KVM_READ_CURRENT_REG_VALUE() + +#define WRITE_CURRENT_REG_VALUE(osr0_value) \ + KVM_WRITE_CURRENT_REG_VALUE(osr0_value) +#define BOOT_WRITE_CURRENT_REG_VALUE(osr0_value) \ + BOOT_KVM_WRITE_CURRENT_REG_VALUE(osr0_value) + +/* + * Read/write OS Entries Mask (OSEM) + */ +#define READ_OSEM_REG_VALUE() \ + KVM_READ_OSEM_REG_VALUE() +#define WRITE_OSEM_REG_VALUE(OSEM_value) \ + KVM_WRITE_OSEM_REG_VALUE(OSEM_value) + +/* + * Read/write word Base Global Register (BGR) + */ +#define READ_BGR_REG_VALUE() KVM_READ_BGR_REG_VALUE() +#define BOOT_READ_BGR_REG_VALUE() KVM_READ_BGR_REG_VALUE() + +#define WRITE_BGR_REG_VALUE(BGR_value) \ + KVM_WRITE_BGR_REG_VALUE(BGR_value) +#define BOOT_WRITE_BGR_REG_VALUE(BGR_value) \ + BOOT_KVM_WRITE_BGR_REG_VALUE(BGR_value) + +/* + * Read CPU current clock regigister (CLKR) + */ +#define READ_CLKR_REG_VALUE() KVM_READ_CLKR_REG_VALUE() + +/* + * Read/Write system clock registers (SCLKM) + */ +#define READ_SCLKR_REG_VALUE() KVM_READ_SCLKR_REG_VALUE() +#define READ_SCLKM1_REG_VALUE() KVM_READ_SCLKM1_REG_VALUE() +#define READ_SCLKM2_REG_VALUE() KVM_READ_SCLKM2_REG_VALUE() +#define READ_SCLKM3_REG_VALUE() KVM_READ_SCLKM3_REG_VALUE() + +#define WRITE_SCLKR_REG_VALUE(reg_value) \ + KVM_WRITE_SCLKR_REG_VALUE(reg_value) +#define WRITE_SCLKM1_REG_VALUE(reg_value) \ + KVM_WRITE_SCLKM1_REG_VALUE(reg_value) +#define WRITE_SCLKM2_REG_VALUE(reg_value) \ + KVM_WRITE_SCLKM2_REG_VALUE(reg_value) +#define WRITE_SCLKM3_REG_VALUE(reg_value) \ + KVM_WRITE_SCLKM3_REG_VALUE(reg_value) + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +#define READ_CU_HW0_REG_VALUE() KVM_READ_CU_HW0_REG_VALUE() +#define READ_CU_HW1_REG_VALUE() KVM_READ_CU_HW1_REG_VALUE() + +#define WRITE_CU_HW0_REG_VALUE(reg) KVM_WRITE_CU_HW0_REG_VALUE(reg) +#define WRITE_CU_HW1_REG_VALUE(reg) KVM_WRITE_CU_HW1_REG_VALUE(reg) + +/* + * Read/write low/high double-word Recovery point register (RPR) + */ +#define READ_RPR_LO_REG_VALUE() KVM_READ_RPR_LO_REG_VALUE() +#define READ_RPR_HI_REG_VALUE() KVM_READ_RPR_HI_REG_VALUE() +#define READ_SBBP_REG_VALUE() KVM_READ_SBBP_REG_VALUE() + +#define WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \ + KVM_WRITE_RPR_LO_REG_VALUE(RPR_lo_value) +#define WRITE_RPR_HI_REG_VALUE(RPR_hi_value) \ + KVM_WRITE_RPR_HI_REG_VALUE(RPR_hi_value) + +/* + * Read double-word CPU current Instruction Pointer register (IP) + */ +#define READ_IP_REG_VALUE() KVM_READ_IP_REG_VALUE() + +/* + * Read debug and monitors regigisters + */ +#define READ_DIBCR_REG_VALUE() KVM_READ_DIBCR_REG_VALUE() +#define READ_DIBSR_REG_VALUE() KVM_READ_DIBSR_REG_VALUE() +#define READ_DIMCR_REG_VALUE() KVM_READ_DIMCR_REG_VALUE() +#define READ_DIBAR0_REG_VALUE() KVM_READ_DIBAR0_REG_VALUE() +#define READ_DIBAR1_REG_VALUE() KVM_READ_DIBAR1_REG_VALUE() +#define READ_DIBAR2_REG_VALUE() KVM_READ_DIBAR2_REG_VALUE() +#define READ_DIBAR3_REG_VALUE() KVM_READ_DIBAR3_REG_VALUE() +#define READ_DIMAR0_REG_VALUE() KVM_READ_DIMAR0_REG_VALUE() +#define READ_DIMAR1_REG_VALUE() KVM_READ_DIMAR1_REG_VALUE() + +#define WRITE_DIBCR_REG_VALUE(DIBCR_value) \ + KVM_WRITE_DIBCR_REG_VALUE(DIBCR_value) +#define WRITE_DIBSR_REG_VALUE(DIBSR_value) \ + KVM_WRITE_DIBSR_REG_VALUE(DIBSR_value) +#define WRITE_DIMCR_REG_VALUE(DIMCR_value) \ + KVM_WRITE_DIMCR_REG_VALUE(DIMCR_value) +#define WRITE_DIBAR0_REG_VALUE(DIBAR0_value) \ + KVM_WRITE_DIBAR0_REG_VALUE(DIBAR0_value) +#define WRITE_DIBAR1_REG_VALUE(DIBAR1_value) \ + KVM_WRITE_DIBAR1_REG_VALUE(DIBAR1_value) +#define WRITE_DIBAR2_REG_VALUE(DIBAR2_value) \ + KVM_WRITE_DIBAR2_REG_VALUE(DIBAR2_value) +#define WRITE_DIBAR3_REG_VALUE(DIBAR3_value) \ + KVM_WRITE_DIBAR3_REG_VALUE(DIBAR3_value) +#define WRITE_DIMAR0_REG_VALUE(DIMAR0_value) \ + KVM_WRITE_DIMAR0_REG_VALUE(DIMAR0_value) +#define WRITE_DIMAR1_REG_VALUE(DIMAR1_value) \ + KVM_WRITE_DIMAR1_REG_VALUE(DIMAR1_value) + +/* + * Read/write double-word Compilation Unit Table Register (CUTD) + */ +#define READ_CUTD_REG_VALUE() \ + KVM_READ_CUTD_REG_VALUE() +#define READ_OSCUTD_REG_VALUE() KVM_READ_OSCUTD_REG_VALUE() +#define WRITE_CUTD_REG_VALUE(CUTD_value) \ + KVM_WRITE_CUTD_REG_VALUE(CUTD_value) +#define WRITE_OSCUTD_REG_VALUE(OSCUTD_value) \ + KVM_WRITE_OSCUTD_REG_VALUE(OSCUTD_value) +#define BOOT_READ_CUTD_REG_VALUE() BOOT_KVM_READ_CUTD_REG_VALUE() +#define BOOT_READ_OSCUTD_REG_VALUE() BOOT_KVM_READ_OSCUTD_REG_VALUE() +#define BOOT_WRITE_CUTD_REG_VALUE(CUTD_value) \ + BOOT_KVM_WRITE_CUTD_REG_VALUE(CUTD_value) +#define BOOT_WRITE_OSCUTD_REG_VALUE(CUTD_value) \ + BOOT_KVM_WRITE_OSCUTD_REG_VALUE(CUTD_value) + +/* + * Read word Compilation Unit Index Register (CUIR) + */ +#define READ_CUIR_REG_VALUE() KVM_READ_CUIR_REG_VALUE() +#define WRITE_CUIR_REG_VALUE(v) KVM_WRITE_CUIR_REG_VALUE(v) +#define READ_OSCUIR_REG_VALUE() KVM_READ_OSCUIR_REG_VALUE() +#define WRITE_OSCUIR_REG_VALUE(v) KVM_WRITE_OSCUIR_REG_VALUE(v) +#define BOOT_READ_CUIR_REG_VALUE() BOOT_KVM_READ_CUIR_REG_VALUE() +#define BOOT_WRITE_CUIR_REG_VALUE(v) BOOT_KVM_WRITE_CUIR_REG_VALUE(v) +#define BOOT_READ_OSCUIR_REG_VALUE() BOOT_KVM_READ_OSCUIR_REG_VALUE() +#define BOOT_WRITE_OSCUIR_REG_VALUE(v) BOOT_KVM_WRITE_OSCUIR_REG_VALUE(v) + +/* + * Read/write double-word Compilation Unit Types Descriptor (TSD) + */ +#define READ_TSD_REG_VALUE() \ + KVM_READ_TSD_REG_VALUE() +#define WRITE_TSD_REG_VALUE(TSD_value) \ + KVM_WRITE_TSD_REG_VALUE(TSD_value) + +/* + * Read/write double-word Type Descriptor (TD) and current Type Register (TR) + */ +#define READ_TD_REG_VALUE() KVM_READ_TD_REG_VALUE() +#define READ_TR_REG_VALUE() KVM_READ_TR_REG_VALUE() + +#define WRITE_TD_REG_VALUE(TD_value) KVM_WRITE_TD_REG_VALUE(TD_value) +#define WRITE_TR_REG_VALUE(TR_value) KVM_WRITE_TR_REG_VALUE(TR_value) + +/* + * Read/write word Processor State Register (PSR) + */ +#define READ_PSR_REG_VALUE() KVM_READ_PSR_REG_VALUE() +#define BOOT_READ_PSR_REG_VALUE() BOOT_KVM_READ_PSR_REG_VALUE() + +#define WRITE_PSR_REG_VALUE(PSR_value) \ + KVM_WRITE_PSR_REG_VALUE(PSR_value) +#define BOOT_WRITE_PSR_REG_VALUE(PSR_value) \ + BOOT_KVM_WRITE_PSR_REG_VALUE(PSR_value) +#define KVM_WRITE_PSR_IRQ_BARRIER(PSR_value) \ + KVM_WRITE_PSR_REG_VALUE(PSR_value) +#define WRITE_PSR_IRQ_BARRIER(PSR_value) \ + KVM_WRITE_PSR_IRQ_BARRIER(PSR_value) + +/* + * Read/write word User Processor State Register (UPSR) + */ +#define READ_UPSR_REG_VALUE() KVM_READ_UPSR_REG_VALUE() +#define BOOT_READ_UPSR_REG_VALUE() BOOT_KVM_READ_UPSR_REG_VALUE() + +#define WRITE_UPSR_REG_VALUE(UPSR_value) \ + KVM_WRITE_UPSR_REG_VALUE(UPSR_value) +#define BOOT_WRITE_UPSR_REG_VALUE(UPSR_value) \ + BOOT_KVM_WRITE_UPSR_REG_VALUE(UPSR_value) +#define WRITE_UPSR_IRQ_BARRIER(UPSR_value) \ + KVM_WRITE_UPSR_REG_VALUE(UPSR_value) + +/* + * Read/write word floating point control registers (PFPFR/FPCR/FPSR) + */ +#define READ_PFPFR_REG_VALUE() KVM_READ_PFPFR_REG_VALUE() +#define READ_FPCR_REG_VALUE() KVM_READ_FPCR_REG_VALUE() +#define READ_FPSR_REG_VALUE() KVM_READ_FPSR_REG_VALUE() + +#define WRITE_PFPFR_REG_VALUE(PFPFR_value) \ + KVM_WRITE_PFPFR_REG_VALUE(PFPFR_value) +#define WRITE_FPCR_REG_VALUE(FPCR_value) \ + KVM_WRITE_FPCR_REG_VALUE(FPCR_value) +#define WRITE_FPSR_REG_VALUE(FPSR_value) \ + KVM_WRITE_FPSR_REG_VALUE(FPSR_value) + +/* + * Read/write low/high double-word Intel segments registers (xS) + */ + +#define READ_CS_LO_REG_VALUE() KVM_READ_CS_LO_REG_VALUE() +#define READ_CS_HI_REG_VALUE() KVM_READ_CS_HI_REG_VALUE() +#define READ_DS_LO_REG_VALUE() KVM_READ_DS_LO_REG_VALUE() +#define READ_DS_HI_REG_VALUE() KVM_READ_DS_HI_REG_VALUE() +#define READ_ES_LO_REG_VALUE() KVM_READ_ES_LO_REG_VALUE() +#define READ_ES_HI_REG_VALUE() KVM_READ_ES_HI_REG_VALUE() +#define READ_FS_LO_REG_VALUE() KVM_READ_FS_LO_REG_VALUE() +#define READ_FS_HI_REG_VALUE() KVM_READ_FS_HI_REG_VALUE() +#define READ_GS_LO_REG_VALUE() KVM_READ_GS_LO_REG_VALUE() +#define READ_GS_HI_REG_VALUE() KVM_READ_GS_HI_REG_VALUE() +#define READ_SS_LO_REG_VALUE() KVM_READ_SS_LO_REG_VALUE() +#define READ_SS_HI_REG_VALUE() KVM_READ_SS_HI_REG_VALUE() + +#define WRITE_CS_LO_REG_VALUE(sd) KVM_WRITE_CS_LO_REG_VALUE(sd) +#define WRITE_CS_HI_REG_VALUE(sd) KVM_WRITE_CS_HI_REG_VALUE(sd) +#define WRITE_DS_LO_REG_VALUE(sd) KVM_WRITE_DS_LO_REG_VALUE(sd) +#define WRITE_DS_HI_REG_VALUE(sd) KVM_WRITE_DS_HI_REG_VALUE(sd) +#define WRITE_ES_LO_REG_VALUE(sd) KVM_WRITE_ES_LO_REG_VALUE(sd) +#define WRITE_ES_HI_REG_VALUE(sd) KVM_WRITE_ES_HI_REG_VALUE(sd) +#define WRITE_FS_LO_REG_VALUE(sd) KVM_WRITE_FS_LO_REG_VALUE(sd) +#define WRITE_FS_HI_REG_VALUE(sd) KVM_WRITE_FS_HI_REG_VALUE(sd) +#define WRITE_GS_LO_REG_VALUE(sd) KVM_WRITE_GS_LO_REG_VALUE(sd) +#define WRITE_GS_HI_REG_VALUE(sd) KVM_WRITE_GS_HI_REG_VALUE(sd) +#define WRITE_SS_LO_REG_VALUE(sd) KVM_WRITE_SS_LO_REG_VALUE(sd) +#define WRITE_SS_HI_REG_VALUE(sd) KVM_WRITE_SS_HI_REG_VALUE(sd) + +/* + * Read doubleword User Processor Identification Register (IDR) + */ +#define READ_IDR_REG_VALUE() KVM_READ_IDR_REG_VALUE() +#define BOOT_READ_IDR_REG_VALUE() KVM_READ_IDR_REG_VALUE() + +/* + * Read/Write Processor Core Mode Register (CORE_MODE) and + */ +#define READ_CORE_MODE_REG_VALUE() \ + KVM_READ_CORE_MODE_REG_VALUE() +#define BOOT_READ_CORE_MODE_REG_VALUE() \ + KVM_READ_CORE_MODE_REG_VALUE() +#define WRITE_CORE_MODE_REG_VALUE(modes) \ + KVM_WRITE_CORE_MODE_REG_VALUE(modes) +#define BOOT_WRITE_CORE_MODE_REG_VALUE(modes) \ + KVM_WRITE_CORE_MODE_REG_VALUE(modes) + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_KVM_CPU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/kvm/csd_lock.h b/arch/e2k/include/asm/kvm/csd_lock.h new file mode 100644 index 0000000..36b9998 --- /dev/null +++ b/arch/e2k/include/asm/kvm/csd_lock.h @@ -0,0 +1,43 @@ +#ifndef _ASM_E2K_KVM_CSD_LOCK_H +#define _ASM_E2K_KVM_CSD_LOCK_H +/* + * This file implements on host the arch-dependent parts of kvm guest + * csd_lock/csd_unlock functions to serialize access to per-cpu csd resources + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include +#include + +#ifdef CONFIG_SMP + +#include +#include + +typedef struct csd_lock_waiter { + struct list_head wait_list; + struct kvm_vcpu *vcpu; + struct task_struct *task; + void *lock; +} csd_lock_waiter_t; + +/* max number of csd lock waiters structures: */ +/* on each VCPU 2 structures - current and next */ +#define KVM_MAX_CSD_LOCK_FREE_NUM (KVM_MAX_VCPUS * 2) + +extern int kvm_guest_csd_lock_ctl(struct kvm_vcpu *vcpu, + csd_ctl_t csd_ctl_no, void *lock); + +extern int kvm_guest_csd_lock_init(struct kvm *kvm); +extern void kvm_guest_csd_lock_destroy(struct kvm *kvm); + +#else /* ! CONFIG_SMP */ +#define kvm_guest_csd_lock_ctl(vcpu, csd_ctl_no, lock) (-ENOSYS) +#define kvm_guest_csd_lock_init(kvm) (0) +#define kvm_guest_csd_lock_destroy(kvm) +#endif /* CONFIG_SMP */ +#endif /* _ASM_E2K_KVM_CSD_LOCK_H */ \ No newline at end of file diff --git a/arch/e2k/include/asm/kvm/debug.h b/arch/e2k/include/asm/kvm/debug.h new file mode 100644 index 0000000..a337d98 --- /dev/null +++ b/arch/e2k/include/asm/kvm/debug.h @@ -0,0 +1,119 @@ +/* + * KVM guest kernel processes support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_DEBUG_H +#define _E2K_KVM_DEBUG_H + +/* do not include this header directly, only through asm/e2k_debug.h */ + +#include + +/* + * Some definitions to print/dump/show stacks + */ + +extern e2k_addr_t kvm_get_guest_phys_addr(struct task_struct *task, + e2k_addr_t virt); +extern void kvm_print_all_vm_stacks(void); +extern void kvm_print_vcpu_stack(struct kvm_vcpu *vcpu); +extern void kvm_dump_guest_stack(struct task_struct *task, + stack_regs_t *const regs, bool show_reg_window); + +#define IS_GUEST_USER_ADDR(task, addr) \ + (((e2k_addr_t)(addr)) < GUEST_TASK_SIZE) +#define GUEST_GET_PHYS_ADDR(task, addr) \ +({ \ + e2k_addr_t phys; \ + if (IS_GUEST_USER_ADDR(task, addr)) \ + phys = (unsigned long)user_address_to_pva(task, addr); \ + else \ + phys = (unsigned long)kernel_address_to_pva(addr); \ + phys; \ +}) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#include +#else /* CONFIG_VIRTUALIZATION && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host and guest kernel */ +#define GET_PHYS_ADDR(task, addr) \ +({ \ + struct thread_info *ti = task_thread_info(task); \ + e2k_addr_t phys; \ + \ + if (paravirt_enabled() && !IS_HV_GM()) { \ + /* it is guest kernel or user address */ \ + phys = GUEST_GET_PHYS_ADDR(task, addr); \ + } else if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) { \ + /* it is native kernel or user process of host */ \ + phys = NATIVE_GET_PHYS_ADDR(task, addr); \ + } else { \ + /* it is virtual CPU process of host and it can run */ \ + /* host kernel (hypercall or trap), guest kernel */ \ + /* or guest user */ \ + phys = kvm_get_guest_phys_addr(task, addr); \ + } \ + phys; \ +}) +#define debug_guest_regs(task) \ + (paravirt_enabled() && !IS_HV_GM() || \ + test_ti_thread_flag(task_thread_info(task), \ + TIF_VIRTUALIZED_GUEST)) +#define get_cpu_type_name() \ + ((paravirt_enabled()) ? "VCPU" : "CPU") + +static inline void print_all_guest_stacks(void) +{ + kvm_print_all_vm_stacks(); +} +static inline void print_guest_vcpu_stack(struct kvm_vcpu *vcpu) +{ + kvm_print_vcpu_stack(vcpu); +} +static inline void +print_guest_stack(struct task_struct *task, + stack_regs_t *const regs, bool show_reg_window) +{ + kvm_dump_guest_stack(task, regs, show_reg_window); +} +#include +static inline void +host_ftrace_stop(void) +{ + if (paravirt_enabled()) + HYPERVISOR_ftrace_stop(); +} +static inline void +host_ftrace_dump(void) +{ + if (paravirt_enabled()) + HYPERVISOR_ftrace_dump(); +} + +#include + +/* Read instruction word (two syllables) from IP address */ +static inline unsigned long +read_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip) +{ + if (!paravirt_enabled() || IS_HV_GM()) + return native_read_instr_on_IP(ip, phys_ip); + else + return kvm_read_instr_on_IP(ip, phys_ip); +} +/* Write modified instruction word at IP address */ +static inline void +modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip, + unsigned long instr_word) +{ + if (!paravirt_enabled() || IS_HV_GM()) + native_modify_instr_on_IP(ip, phys_ip, instr_word); + else + kvm_modify_instr_on_IP(ip, phys_ip, instr_word); +} +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_DEBUG_H */ diff --git a/arch/e2k/include/asm/kvm/gmmu_context.h b/arch/e2k/include/asm/kvm/gmmu_context.h new file mode 100644 index 0000000..d0e28f1 --- /dev/null +++ b/arch/e2k/include/asm/kvm/gmmu_context.h @@ -0,0 +1,339 @@ +/* + * KVM guest kernel virtual space context support + * Copyright 2016 Salavat S. Gilyazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GMMU_CONTEXT_H +#define _E2K_KVM_GMMU_CONTEXT_H + +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_SWITCH_MODE +#undef DebugKVMSW +#define DEBUG_KVM_SWITCH_MODE 0 /* switch mm debugging */ +#define DebugKVMSW(fmt, args...) \ +({ \ + if (DEBUG_KVM_SWITCH_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define GUEST_USER_PTRS_PER_PGD (GUEST_PAGE_OFFSET / PGDIR_SIZE) +#define GUEST_KERNEL_PGD_PTRS_START GUEST_USER_PTRS_PER_PGD +#define GUEST_KERNEL_PGD_PTRS_END (GUEST_KERNEL_MEM_END / PGDIR_SIZE) +#define GUEST_KERNEL_PTRS_PER_PGD (GUEST_KERNEL_PGD_PTRS_END - \ + GUEST_KERNEL_PGD_PTRS_START) +#define HOST_USER_PTRS_PER_PGD (HOST_PAGE_OFFSET / PGDIR_SIZE) + +#ifdef CONFIG_VIRTUALIZATION + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +static inline void +copy_guest_user_pgd_to_kernel_root_pt(pgd_t *user_pgd) +{ + KVM_BUG_ON(MMU_IS_SEPARATE_PT()); + copy_user_pgd_to_kernel_pgd_range(cpu_kernel_root_pt, user_pgd, + 0, GUEST_USER_PTRS_PER_PGD); +} +static inline void +copy_guest_kernel_pgd_to_kernel_root_pt(pgd_t *user_pgd) +{ + KVM_BUG_ON(MMU_IS_SEPARATE_PT()); + copy_user_pgd_to_kernel_pgd_range(cpu_kernel_root_pt, user_pgd, + GUEST_KERNEL_PGD_PTRS_START, + GUEST_KERNEL_PGD_PTRS_END); +} +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +static inline int +kvm_init_new_context(struct kvm *kvm, gmm_struct_t *gmm) +{ + /* current cui of guest user will be inited later while */ + /* switch to new guest user process */ + __init_new_context(NULL, NULL, &gmm->context); + return 0; +} + +#ifdef CONFIG_KVM_HV_MMU +static inline pgd_t * +kvm_mmu_get_init_gmm_root(struct kvm *kvm) +{ + GTI_BUG_ON(pv_mmu_get_init_gmm(kvm) == NULL); + if (!VALID_PAGE(pv_mmu_get_init_gmm(kvm)->root_hpa)) + return NULL; + return (pgd_t *)__va(pv_mmu_get_init_gmm(kvm)->root_hpa); +} +static inline void +kvm_mmu_set_init_gmm_root(struct kvm_vcpu *vcpu, hpa_t root) +{ + gmm_struct_t *gmm = pv_mmu_get_init_gmm(vcpu->kvm); + gpa_t root_gpa; + + if (gmm == NULL) + return; + KVM_BUG_ON(VALID_PAGE(gmm->root_hpa)); + if (VALID_PAGE(root)) { + gmm->root_hpa = root; + } + if (is_sep_virt_spaces(vcpu)) { + root_gpa = kvm_get_space_type_guest_os_root(vcpu); + } else { + root_gpa = kvm_get_space_type_guest_u_root(vcpu); + } + gmm->u_pptb = vcpu->arch.mmu.get_vcpu_u_pptb(vcpu); + gmm->os_pptb = vcpu->arch.mmu.get_vcpu_os_pptb(vcpu); +} +static inline pgd_t * +kvm_mmu_get_gmm_root(struct gmm_struct *gmm) +{ + GTI_BUG_ON(gmm == NULL); + if (!VALID_PAGE(gmm->root_hpa)) + return NULL; + return (pgd_t *)__va(gmm->root_hpa); +} +static inline pgd_t * +kvm_mmu_load_the_gmm_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + pgd_t *root; + bool u_space = gmm != pv_vcpu_get_init_gmm(vcpu); + + GTI_BUG_ON(vcpu == NULL); + root = kvm_mmu_get_gmm_root(gmm); + GTI_BUG_ON(root == NULL); + + if (unlikely(!u_space)) { + if (unlikely(is_sep_virt_spaces(vcpu))) { + vcpu->arch.mmu.set_vcpu_os_pptb(vcpu, gmm->os_pptb); + kvm_set_space_type_spt_os_root(vcpu, (hpa_t)__pa(root)); + } else { + vcpu->arch.mmu.set_vcpu_u_pptb(vcpu, gmm->u_pptb); + vcpu->arch.mmu.set_vcpu_os_pptb(vcpu, gmm->u_vptb); + kvm_set_space_type_spt_os_root(vcpu, (hpa_t)__pa(root)); + kvm_set_space_type_spt_u_root(vcpu, (hpa_t)__pa(root)); + } + } else { + vcpu->arch.mmu.set_vcpu_u_pptb(vcpu, gmm->u_pptb); + kvm_set_space_type_spt_u_root(vcpu, (hpa_t)__pa(root)); + if (likely(!is_sep_virt_spaces(vcpu))) { + vcpu->arch.mmu.set_vcpu_os_pptb(vcpu, gmm->u_pptb); + kvm_set_space_type_spt_os_root(vcpu, (hpa_t)__pa(root)); + } + } + return root; +} + +static inline pgd_t * +kvm_mmu_load_gmm_root(thread_info_t *next_ti, gthread_info_t *next_gti) +{ + struct kvm_vcpu *vcpu; + gmm_struct_t *next_gmm = next_gti->gmm; + pgd_t *root; + + vcpu = next_ti->vcpu; + root = kvm_mmu_load_the_gmm_root(vcpu, next_gmm); + return root; +} + +static inline pgd_t * +kvm_mmu_load_init_root(struct kvm_vcpu *vcpu) +{ + gmm_struct_t *init_gmm; + pgd_t *root; + + init_gmm = pv_vcpu_get_init_gmm(vcpu); + root = kvm_mmu_load_the_gmm_root(vcpu, init_gmm); + return root; +} +#else /* !CONFIG_KVM_HV_MMU */ +static inline pgd_t * +kvm_mmu_get_init_gmm_root(struct kvm *kvm) +{ + return NULL; +} +static inline pgd_t * +kvm_mmu_get_gmm_root(struct gmm_struct *gmm) +{ + return NULL; +} +static inline pgd_t * +kvm_mmu_load_the_gmm_root(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + return NULL; +} +static inline pgd_t * +kvm_mmu_load_gmm_root(thread_info_t *next_ti, gthread_info_t *next_gti) +{ + return kvm_mmu_get_gmm_root(next_gti->gmm); +} + +static inline pgd_t * +kvm_mmu_load_init_root(struct kvm_vcpu *vcpu) +{ + return kvm_mmu_get_init_gmm_root(vcpu->kvm); +} +#endif /* CONFIG_KVM_HV_MMU */ + +static inline void +switch_guest_pgd(pgd_t *next_pgd) +{ + thread_info_t *thread_info = native_current_thread_info(); + pgd_t *pgd_to_set; + + DebugKVMSW("CPU #%d %s(%d) kernel image pgd %px = 0x%lx\n", + raw_smp_processor_id(), current->comm, current->pid, + thread_info->kernel_image_pgd_p, + (thread_info->kernel_image_pgd_p) ? + pgd_val(*thread_info->kernel_image_pgd_p) + : + 0); + KVM_BUG_ON(next_pgd == NULL); + + if (unlikely(test_ti_thread_flag(thread_info, TIF_PARAVIRT_GUEST))) { +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + if (!MMU_IS_SEPARATE_PT() && THERE_IS_DUP_KERNEL) + pgd_to_set = NULL; + else +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + pgd_to_set = thread_info->vcpu_pgd; + if (pgd_to_set) { + /* copy user part of PT including guest kernel part */ + copy_pgd_range(pgd_to_set, next_pgd, + 0, USER_PTRS_PER_PGD); + } + } else { + pgd_to_set = next_pgd; + } + + reload_root_pgd(pgd_to_set); + /* FIXME: support of guest secondary space is not yet implemented + reload_secondary_page_dir(mm); + */ + + /* any function call can fill old state of hardware stacks */ + /* so after all calls do flush stacks again */ + NATIVE_FLUSHCPU; + E2K_WAIT(_all_e); +} + +#define DO_NOT_USE_ACTIVE_GMM /* turn OFF optimization */ + +static inline void +switch_guest_mm(gthread_info_t *next_gti, struct gmm_struct *next_gmm) +{ + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + gthread_info_t *cur_gti = pv_vcpu_get_gti(vcpu); + gmm_struct_t *active_gmm; + pgd_t *next_pgd; + + DebugKVMSW("started to switch guest mm from GPID #%d to GPID #%d\n", + cur_gti->gpid->nid.nr, next_gti->gpid->nid.nr); + active_gmm = pv_vcpu_get_active_gmm(vcpu); + if (next_gmm == NULL || next_gti->gmm == NULL) { +#ifdef DO_NOT_USE_ACTIVE_GMM + /* switch to guest kernel thread, but optimization */ + /* has been turned OFF, so switch to init gmm & PTs */ + next_gmm = pv_vcpu_get_init_gmm(vcpu); +#else /* !DO_NOT_USE_ACTIVE_GMM */ + /* switch to guest kernel thread: do not switch mm */ + if (active_gmm == NULL) { + /* now active is guest kernel init mm */ + DebugKVMSW("task to switch is guest kernel thread, " + "active mm is init mm\n"); + } else { + DebugKVMSW("task to switch is guest kernel thread, " + "active mm is now %px #%d\n", + active_gmm, active_gmm->nid.nr); + } + goto out; +#endif /* DO_NOT_USE_ACTIVE_GMM */ + } else if (active_gmm == next_gmm) { + /* new mm is already active: so do not switch to mm again */ + DebugKVMSW("task to switch is guest user thread, but its mm is " + "already active, so do not switch to active mm %px #%d " + "again\n", + active_gmm, active_gmm->nid.nr); + goto out; + } + if (likely(!next_gmm->in_release && !next_gti->gmm_in_release && + !pv_vcpu_is_init_gmm(vcpu, next_gmm))) { + next_pgd = kvm_mmu_load_gmm_root(current_thread_info(), + next_gti); + } else { + next_pgd = kvm_mmu_load_init_root(vcpu); + } + switch_guest_pgd(next_pgd); + pv_vcpu_set_active_gmm(vcpu, next_gmm); + DebugKVMSW("task to switch is guest user thread, and its mm is not " + "already active, so switch and make active mm %px #%d\n", + next_gmm, next_gmm->nid.nr); + return; +out: + if (DEBUG_KVM_SWITCH_MODE) { + /* any function call can fill old state of hardware stacks */ + /* so after all calls do flush stacks again */ + NATIVE_FLUSHCPU; + E2K_WAIT(_all_e); + } +} + +static inline void +kvm_switch_to_init_guest_mm(struct kvm_vcpu *vcpu) +{ + gthread_info_t *cur_gti = pv_vcpu_get_gti(vcpu); + gmm_struct_t *init_gmm; + gmm_struct_t *active_gmm; + pgd_t *root; + + init_gmm = pv_vcpu_get_init_gmm(vcpu); + active_gmm = pv_vcpu_get_active_gmm(vcpu); + if (unlikely(init_gmm == active_gmm)) { + /* already on init mm */ + return; + } + KVM_BUG_ON(cur_gti->gmm != active_gmm); + root = kvm_mmu_load_the_gmm_root(vcpu, init_gmm); + switch_guest_pgd(root); + cur_gti->gmm_in_release = true; + pv_vcpu_set_active_gmm(vcpu, init_gmm); + pv_vcpu_clear_gmm(vcpu); +} + +static inline void +kvm_guest_kernel_pgd_populate(struct mm_struct *mm, pgd_t *pgd) +{ + /* should be populated on page fault */ + /* while access by guest kernel or user */ +} +static inline void +kvm_guest_user_pgd_populate(gmm_struct_t *gmm, pgd_t *pgd) +{ + /* should be populated on page fault */ + /* while access by guest user */ +} + +static inline void +virt_kernel_pgd_populate(struct mm_struct *mm, pgd_t *pgd) +{ + kvm_guest_kernel_pgd_populate(mm, pgd); +} + +extern e2k_addr_t kvm_guest_user_address_to_pva(struct task_struct *task, + e2k_addr_t address); +static inline e2k_addr_t +guest_user_address_to_pva(struct task_struct *task, e2k_addr_t address) +{ + return kvm_guest_user_address_to_pva(task, address); +} +#else /* ! CONFIG_VIRTUALIZATION */ +static inline void +virt_kernel_pgd_populate(struct mm_struct *mm, pgd_t *pgd) +{ + /* nothing to do, none any guests */ +} +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* ! _E2K_KVM_GMMU_CONTEXT_H */ diff --git a/arch/e2k/include/asm/kvm/gpid.h b/arch/e2k/include/asm/kvm/gpid.h new file mode 100644 index 0000000..c1d9b9b --- /dev/null +++ b/arch/e2k/include/asm/kvm/gpid.h @@ -0,0 +1,69 @@ +#ifndef _ASM_E2K_KVM_GPID_H +#define _ASM_E2K_KVM_GPID_H + +/* + * Guest processes identifier (gpid) allocator + * Based on simplified include/linux/pid.h + */ + +#include +#include + +#include + +#define GPID_MAX_LIMIT (PID_MAX_LIMIT / 2) +#define RESERVED_GPIDS 300 + +#define GPIDMAP_ENTRIES ((GPID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8) + +#define GPID_HASH_BITS 4 +#define GPID_HASH_SIZE NID_HASH_SIZE(GPID_HASH_BITS) + +struct kvm; + +typedef struct gpid { + kvm_nid_t nid; + struct gthread_info *gthread_info; +} gpid_t; + +typedef struct kvm_nid_table kvm_gpid_table_t; + +#define gpid_hashfn(nr) nid_hashfn(nr, GPID_HASH_BITS) + +extern gpid_t *kvm_alloc_gpid(kvm_gpid_table_t *gpid_table); +extern void kvm_do_free_gpid(gpid_t *gpid, kvm_gpid_table_t *gpid_table); +extern void kvm_free_gpid(gpid_t *gpid, kvm_gpid_table_t *gpid_table); +extern int kvm_gpidmap_init(struct kvm *kvm, kvm_gpid_table_t *gpid_table, + kvm_nidmap_t *gpid_nidmap, int gpidmap_entries, + struct hlist_head *gpid_hash, int gpid_hash_bits); +extern void kvm_gpidmap_destroy(kvm_gpid_table_t *gpid_table); + +#define for_each_guest_thread_info(gpid, entry, next, gpid_table) \ + for_each_guest_nid_node(gpid, entry, next, gpid_table, \ + nid.nid_chain) +#define gpid_entry(ptr) container_of(ptr, gpid_t, nid) +#define gpid_table_lock(gpid_table) \ + nid_table_lock(gpid_table) +#define gpid_table_unlock(gpid_table) \ + nid_table_unlock(gpid_table) +#define gpid_table_lock_irq(gpid_table) \ + nid_table_lock_irq(gpid_table) +#define gpid_table_unlock(gpid_table) \ + nid_table_unlock(gpid_table) +#define gpid_table_lock_irqsave(gpid_table, flags) \ + nid_table_lock_irqsave(gpid_table, flags) +#define gpid_table_unlock_irqrestore(gpid_table, flags) \ + nid_table_unlock_irqrestore(gpid_table, flags) + +static inline gpid_t * +kvm_find_gpid(kvm_gpid_table_t *gpid_table, int gpid_nr) +{ + kvm_nid_t *nid; + + nid = kvm_find_nid(gpid_table, gpid_nr, gpid_hashfn(gpid_nr)); + if (nid == NULL) + return NULL; + return gpid_entry(nid); +} + +#endif /* _ASM_E2K_KVM_GPID_H */ diff --git a/arch/e2k/include/asm/kvm/gregs.h b/arch/e2k/include/asm/kvm/gregs.h new file mode 100644 index 0000000..80da247 --- /dev/null +++ b/arch/e2k/include/asm/kvm/gregs.h @@ -0,0 +1,156 @@ +#ifndef _E2K_ASM_KVM_GREGS_H +#define _E2K_ASM_KVM_GREGS_H + +#include +#include +#include +#include +#include + +#ifdef CONFIG_VIRTUALIZATION +/* It is native host guest kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +/* or pure guest kernel */ + +#define HOST_ONLY_COPY_TO_VCPU_STATE_GREG(__k_gregs, __vs) \ +({ \ + (__k_gregs)->g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].base = (__vs); \ +}) + +#define HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(__k_gregs, __vs) \ +({ \ + (__vs) = (__k_gregs)->g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].base; \ +}) + +#define HOST_GET_SAVED_VCPU_STATE_GREG(__ti) \ +({ \ + unsigned long greg_vs; \ + \ + HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(&(__ti)->k_gregs, greg_vs); \ + greg_vs; \ +}) + +#define HOST_ONLY_SAVE_VCPU_STATE_GREG(vs__) \ +({ \ + (vs__) = NATIVE_GET_UNTEGGED_DGREG(GUEST_VCPU_STATE_GREG); \ +}) +#define HOST_ONLY_RESTORE_VCPU_STATE_GREG(vs__) \ +({ \ + NATIVE_SET_DGREG(GUEST_VCPU_STATE_GREG, vs__); \ +}) + +#define HOST_INIT_VCPU_STATE_GREG(__ti) \ +({ \ + kernel_gregs_t *k_gregs = &(__ti)->k_gregs; \ + unsigned long vs; \ + \ + vs = k_gregs->g[GUEST_VCPU_STATE_GREGS_PAIRS_INDEX].base; \ + HOST_ONLY_RESTORE_VCPU_STATE_GREG(vs); \ +}) + +#ifdef CONFIG_KVM_HOST_MODE +#define HOST_SAVE_HOST_GREGS_TO(__k_gregs, only_kernel) \ +({ \ + kernel_gregs_t *k_gregs = (__k_gregs); \ + unsigned long task__; \ + unsigned long cpu_id__; \ + unsigned long cpu_off__; \ + \ + if (likely(!(only_kernel))) { \ + unsigned long vs__; \ + \ + HOST_ONLY_SAVE_VCPU_STATE_GREG(vs__); \ + HOST_ONLY_COPY_TO_VCPU_STATE_GREG(k_gregs, vs__); \ + } \ + ONLY_SAVE_KERNEL_GREGS(task__, cpu_id__, cpu_off__); \ + k_gregs->g[CURRENT_TASK_GREGS_PAIRS_INDEX].base = task__; \ + k_gregs->g[SMP_CPU_ID_GREGS_PAIRS_INDEX].base = cpu_id__; \ + k_gregs->g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].base = cpu_off__; \ +}) + +#define HOST_SAVE_KERNEL_GREGS_AS_LIGHT(__ti) \ + HOST_SAVE_HOST_GREGS_TO(&(__ti)->k_gregs_light, true) + +#define HOST_SAVE_KERNEL_GREGS(__ti) \ + HOST_SAVE_HOST_GREGS_TO(&(__ti)->k_gregs, true) + +#define HOST_SAVE_HOST_GREGS(__ti) \ + HOST_SAVE_HOST_GREGS_TO(&(__ti)->k_gregs, false) + +#define HOST_RESTORE_HOST_GREGS_FROM(__k_gregs, only_kernel) \ +({ \ + kernel_gregs_t *k_gregs = (__k_gregs); \ + unsigned long task__; \ + unsigned long cpu_id__; \ + unsigned long cpu_off__; \ + \ + if (likely(!(only_kernel))) { \ + unsigned long vs__; \ + \ + HOST_ONLY_COPY_FROM_VCPU_STATE_GREG(k_gregs, vs__); \ + HOST_ONLY_RESTORE_VCPU_STATE_GREG(vs__); \ + } \ + task__ = k_gregs->g[CURRENT_TASK_GREGS_PAIRS_INDEX].base; \ + cpu_id__ = k_gregs->g[SMP_CPU_ID_GREGS_PAIRS_INDEX].base; \ + cpu_off__ = k_gregs->g[MY_CPU_OFFSET_GREGS_PAIRS_INDEX].base; \ + ONLY_RESTORE_KERNEL_GREGS(task__, cpu_id__, cpu_off__); \ +}) + +#define HOST_RESTORE_KERNEL_GREGS_AS_LIGHT(_ti) \ + HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs_light, true) + +#define HOST_RESTORE_KERNEL_GREGS(_ti) \ + HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs, true) + +#define HOST_RESTORE_HOST_GREGS(_ti) \ + HOST_RESTORE_HOST_GREGS_FROM(&(_ti)->k_gregs, false) +#else /* !CONFIG_KVM_HOST_MODE */ +#define HOST_SAVE_HOST_GREGS(__ti) +#define HOST_RESTORE_HOST_GREGS(_ti) +#define HOST_SAVE_KERNEL_GREGS_AS_LIGHT(__ti) +#define HOST_RESTORE_KERNEL_GREGS_AS_LIGHT(_ti) +#endif /* CONFIG_KVM_HOST_MODE */ + +#else /* ! CONFIG_VIRTUALIZATION */ +/* It is native host kernel without any virtualization */ +/* not used */ +#endif /* CONFIG_VIRTUALIZATION */ + +static inline void +copy_h_gregs_to_gregs(global_regs_t *dst, const host_gregs_t *src) +{ + tagged_memcpy_8(&dst->g[HOST_GREGS_PAIRS_START], src->g, + sizeof(src->g)); +} + +static inline void +copy_h_gregs_to_h_gregs(host_gregs_t *dst, const host_gregs_t *src) +{ + tagged_memcpy_8(dst->g, src->g, sizeof(src->g)); +} + +static inline void +get_h_gregs_from_gregs(host_gregs_t *dst, const global_regs_t *src) +{ + tagged_memcpy_8(dst->g, &src->g[HOST_GREGS_PAIRS_START], + sizeof(dst->g)); +} + +static inline void +copy_h_gregs_to_l_gregs(local_gregs_t *dst, const host_gregs_t *src) +{ + BUG_ON(HOST_GREGS_PAIRS_START < LOCAL_GREGS_START); + tagged_memcpy_8(&dst->g[HOST_GREGS_PAIRS_START - LOCAL_GREGS_START], + src->g, sizeof(src->g)); +} + +static inline void +get_h_gregs_from_l_regs(host_gregs_t *dst, const local_gregs_t *src) +{ + BUG_ON(HOST_GREGS_PAIRS_START < LOCAL_GREGS_START); + tagged_memcpy_8(dst->g, + &src->g[HOST_GREGS_PAIRS_START - LOCAL_GREGS_START], + sizeof(dst->g)); +} + +#endif /* _E2K_ASM_KVM_GREGS_H */ diff --git a/arch/e2k/include/asm/kvm/guest.h b/arch/e2k/include/asm/kvm/guest.h new file mode 100644 index 0000000..40de524 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest.h @@ -0,0 +1,353 @@ +/* + * Kernel-based Virtual Machine driver for Linux + * + * This header defines architecture specific interfaces, e2k version + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef _ASM_E2K_KVM_GUEST_H +#define _ASM_E2K_KVM_GUEST_H + +#include + +#include +#include +#include +#include +#include +#include + +#include + +typedef struct kvm_cpu_regs { +#if defined(CONFIG_KVM_GUEST_KERNEL) && \ + defined(CONFIG_KVM_GUEST_HW_PV) && defined(CONFIG_KVM_HV_MMU) + u64 __pad1[24]; +#else /* ! CONFIG_KVM_GUEST_KERNEL || | + ! CONFIG_KVM_GUEST_HW_PV || ! CONFIG_KVM_HV_MMU */ + e2k_cud_lo_t CPU_CUD_lo; /* Compilation Unit Descriptor */ + e2k_cud_hi_t CPU_CUD_hi; + e2k_gd_lo_t CPU_GD_lo; /* CU Globals Descriptor */ + e2k_gd_hi_t CPU_GD_hi; + e2k_oscud_lo_t CPU_OSCUD_lo; /* OS Compilation Unit Descriptor */ + e2k_oscud_hi_t CPU_OSCUD_hi; + e2k_osgd_lo_t CPU_OSGD_lo; /* OS CU Globals Descriptor */ + e2k_osgd_hi_t CPU_OSGD_hi; + e2k_cutd_t CPU_CUTD; /* Compilation Unit Table Register */ + e2k_cuir_t CPU_CUIR; /* Compilation Unit Index Register */ + e2k_tsd_t CPU_TSD; /* Compilation Unit Types Descriptor */ + + e2k_usd_lo_t CPU_USD_lo; /* User Stack Descriptor Register */ + e2k_usd_hi_t CPU_USD_hi; + e2k_sbr_t CPU_SBR; /* Stack Base Register */ + e2k_psp_lo_t CPU_PSP_lo; /* Procedure Stack Pointer */ + e2k_psp_hi_t CPU_PSP_hi; + e2k_pshtp_t CPU_PSHTP; /* Procedure Stack Hardware */ + /* Top Pointer */ + e2k_pcsp_lo_t CPU_PCSP_lo; /* Procedure Chain Stack Pointer */ + e2k_pcsp_hi_t CPU_PCSP_hi; + e2k_cr0_lo_t CPU_CR0_lo; /* Current Chain Register */ + e2k_cr0_hi_t CPU_CR0_hi; + e2k_cr1_lo_t CPU_CR1_lo; + e2k_cr1_hi_t CPU_CR1_hi; + e2k_pcshtp_t CPU_PCSHTP; /* Procedure Chain Stack Hardware */ + /* Top Pointer */ +#endif /* CONFIG_KVM_GUEST_KERNEL && \ + CONFIG_KVM_GUEST_HW_PV && CONFIG_KVM_HV_MMU */ + e2k_ctpr_t CPU_CTPR1; /* Control Transfer Preparation */ + e2k_ctpr_t CPU_CTPR2; /* Registers */ + e2k_ctpr_t CPU_CTPR3; + e2k_tir_t CPU_TIRs[MAX_TIRs_NUM]; /* Trap Info Registers */ + int CPU_TIRs_num; /* number of occupied TIRs */ + e2k_wd_t CPU_WD; /* Window Descriptor Register */ + e2k_bgr_t CPU_BGR; /* Base Global Register */ + e2k_lsr_t CPU_LSR; /* Loop Status Register */ + e2k_ilcr_t CPU_ILCR; /* Initial Loop Counters Register */ + e2k_rpr_lo_t CPU_RPR_lo; /* Recovery point register */ + e2k_rpr_hi_t CPU_RPR_hi; + e2k_cutd_t CPU_OSCUTD; /* CUTD Register of OS */ + e2k_cuir_t CPU_OSCUIR; /* CUI register of OS */ + u64 CPU_OSR0; /* OS register #0 */ + u32 CPU_OSEM; /* OS Entries Mask */ + e2k_psr_t CPU_PSR; /* Processor State Register */ + e2k_upsr_t CPU_UPSR; /* User Processor State Register */ + e2k_pfpfr_t CPU_PFPFR; /* floating point control registers */ + e2k_fpcr_t CPU_FPCR; + e2k_fpsr_t CPU_FPSR; + u64 CPU_CLKR; /* CPU current clock regigister */ + u64 CPU_SCLKR; /* CPU system clock regigister */ + u64 CPU_SCLKM1; /* CPU system clock regigister 1 */ + u64 CPU_SCLKM2; /* CPU system clock regigister 2 */ + u64 CPU_SCLKM3; /* CPU system clock regigister 3 */ + u64 CPU_CU_HW0; /* Control Unit HardWare registers 0 */ + u64 CPU_CU_HW1; /* Control Unit HardWare registers 1 */ + u64 CPU_IP; /* Instruction Pointer register */ + e2k_idr_t CPU_IDR; /* Processor Identification Register */ + e2k_core_mode_t CPU_CORE_MODE; /* Processor Core Modes Register */ + u32 CPU_DIBCR; /* diagnostic and monitors registers */ + u32 CPU_DIBSR; + u64 CPU_DIMCR; + u64 CPU_DIBAR0; + u64 CPU_DIBAR1; + u64 CPU_DIBAR2; + u64 CPU_DIBAR3; + u64 CPU_DIMAR0; + u64 CPU_DIMAR1; + u64 CPU_CS_lo; /* Intel Segments registers */ + u64 CPU_CS_hi; + u64 CPU_DS_lo; + u64 CPU_DS_hi; + u64 CPU_ES_lo; + u64 CPU_ES_hi; + u64 CPU_FS_lo; + u64 CPU_FS_hi; + u64 CPU_GS_lo; + u64 CPU_GS_hi; + u64 CPU_SS_lo; + u64 CPU_SS_hi; + /* virtual register only to support paravirtualization mode */ + u32 CPU_VCPU_ID; /* VCPU # is set by host and can not */ + /* be modified */ +} kvm_cpu_regs_t; + +/* + * CPU registers status flags + */ +/* the next flags mark updating of some VCPU registers by guest and */ +/* host should recovere physical CPU registers from the memory copy */ +#define WD_UPDATED_CPU_REGS 0x00000001UL /* register WD */ +#define USD_UPDATED_CPU_REGS 0x00000002UL /* USD/SBR */ +#define CRS_UPDATED_CPU_REGS 0x00000004UL /* CR0/CR1 */ +#define HS_REGS_UPDATED_CPU_REGS 0x00000008UL /* PSP/PCSP/PSHTP */ + /* PCSHTP */ +#define MASK_UPDATED_CPU_REGS 0x0000ffffUL /* mask of all */ + /* updating flags */ + +#define KVM_SET_CPU_REGS_FLAGS(regs_status, flags_to_add) \ + ((regs_status) | (flags_to_add)) +#define KVM_CLEAR_CPU_REGS_FLAGS(regs_status, flags_to_clear) \ + (((regs_status) | (flags_to_clear)) ^ (flags_to_clear)) +#define KVM_TEST_CPU_REGS_FLAGS(regs_status, flags_to_test) \ + (((regs_status) & (flags_to_test)) != 0) +#define KVM_TEST_CPU_REGS_STATUS(regs_status) \ + ((regs_status) != 0) +#define TO_UPDATED_CPU_REGS_FLAGS(flags) \ + ((flags) & MASK_UPDATED_CPU_REGS) +#define KVM_SET_UPDATED_CPU_REGS_FLAGS(regs_status, flags_to_add) \ + KVM_SET_CPU_REGS_FLAGS(regs_status, \ + TO_UPDATED_CPU_REGS_FLAGS(flags_to_add)) +#define KVM_CLEAR_UPDATED_CPU_REGS_FLAGS(regs_status, flags_to_clear) \ + KVM_CLEAR_CPU_REGS_FLAGS(regs_status, \ + TO_UPDATED_CPU_REGS_FLAGS(flags_to_clear)) +#define KVM_TEST_UPDATED_CPU_REGS_FLAG(regs_status, flag) \ + KVM_TEST_CPU_REGS_FLAGS(regs_status, \ + TO_UPDATED_CPU_REGS_FLAGS(flag)) +#define KVM_TEST_UPDATED_CPU_REGS_FLAGS(regs_status) \ + KVM_TEST_CPU_REGS_FLAGS(regs_status, MASK_UPDATED_CPU_REGS) +#define KVM_INIT_UPDATED_CPU_REGS_FLAGS(regs_status) \ + KVM_CLEAR_CPU_REGS_FLAGS(regs_status, MASK_UPDATED_CPU_REGS) + +typedef struct kvm_cpu_state { + unsigned long regs_status; /* CPU register status flags */ + kvm_cpu_regs_t regs; /* CPU registers state */ + e2k_aau_t aau; /* AAU registers state */ + u64 aaldi[AALDIS_REGS_NUM]; /* AALDI registers state */ + e2k_aalda_t aalda[AALDAS_REGS_NUM]; /* AALDA registers state */ +} kvm_cpu_state_t; + +typedef struct kvm_mmu_state { + mmu_reg_t regs[MMU_REGS_NUM]; /* MMU registers state */ + mmu_reg_t debug_regs[MMU_DEBUG_REGS_NUM]; /* MMU DEBUG */ + /* registers state */ + trap_cellar_t tcellar[MAX_TC_SIZE]; /* trap cellar */ + int tc_count; /* number of entries in */ + /* trap cellar */ +} kvm_mmu_state_t; + +typedef struct kvm_apic_state { + u8 regs[APIC_REGS_SIZE]; + atomic_t virqs_num; /* Local APIC unhendled and in */ + /* progress VIRQs number */ +} kvm_apic_state_t; + +typedef struct kvm_epic_state { + u8 regs[EPIC_REGS_SIZE]; + atomic_t virqs_num; /* CEPIC unhandled and in */ + /* progress VIRQs number */ +} kvm_epic_state_t; + +/* + * Guest Local APIC is virtual and emulates on host. + * So it can access to any local APIC from any virtual CPUs. + * Base address of local APIC is expanded by follow flag and CPU # + * to enable such access. + */ +#define KVM_LAPIC_BASE_CPU_FLAG 0x4000000000000000UL /* [62] */ +#define KVM_LAPIC_BASE_CPU_NUM_MASK 0x0fff000000000000UL /* [59:48] */ +#define KVM_LAPIC_BASE_CPU_NUM_SHIFT 48 +#define KVM_CPU_TO_LAPIC_BASE(cpu_id) \ + ((cpu_id) << KVM_LAPIC_BASE_CPU_NUM_SHIFT) +#define KVM_CPU_FROM_LAPIC_BASE(base) \ + ((((e2k_addr_t)(base)) & KVM_LAPIC_BASE_CPU_NUM_MASK) >> \ + KVM_LAPIC_BASE_CPU_NUM_SHIFT) +#define KVM_GET_LAPIC_REG_ADDR(addr) \ + ((addr) & MAX_PA_MASK) +#define KVM_SET_LAPIC_BASE_CPU(base, cpu_id) \ + ((((e2k_addr_t)(base)) & ~(KVM_LAPIC_BASE_CPU_FLAG | \ + KVM_LAPIC_BASE_CPU_NUM_MASK)) | \ + KVM_LAPIC_BASE_CPU_FLAG | \ + KVM_CPU_TO_LAPIC_BASE(cpu_id)) + +typedef struct kvm_virqs_state { + atomic_t timer_virqs_num; /* timer VIRQs unhendled and */ + /* in progress number */ + atomic_t hvc_virqs_num; /* Hypervisor Virtual Console */ + /* VIRQs unhendled and */ + /* in progress number */ +} kvm_virqs_state_t; + +typedef struct kvm_timespec { + long tv_sec; /* seconds */ + long tv_nsec; /* nanoseconds */ +} kvm_timespec_t; + +typedef struct kvm_time { + kvm_timespec_t wall_time; /* boot time */ + kvm_timespec_t sys_time; /* current system time */ +} kvm_time_t; + +/* + * Host machine info + */ +typedef struct kvm_host_info { + int mach_id; /* host machine ID */ + int cpu_rev; /* host CPU revision */ + int cpu_iset; /* host CPU instruction set version */ + bool mmu_support_pt_v6; /* host MMU support new MMU Page */ + /* Tables structures V6 */ + bool is_hv; /* host is hardware virtualized */ + bool support_hw_hc; /* host support hardware hypercals */ + unsigned long features; /* KVM and hypervisor features */ + /* see details */ + kvm_time_t time; /* current host time state */ + int clock_rate; /* clock tick frequency */ +} kvm_host_info_t; + +/* + * Information about the state and running time of a VCPU. + * Based on Xen interface include/xen/interface/vcpu.h + */ + +/* + * VCPU is not runnable, but it is not blocked. + * This is a 'catch all' state for things like hotplug and pauses by the + * system administrator (or for critical sections in the hypervisor). + * RUNSTATE_blocked dominates this state (it is the preferred state). + */ +#define RUNSTATE_offline 0 + +/* VCPU is currently running on a physical CPU. */ +#define RUNSTATE_running 1 + +/* VCPU is runnable, but not currently scheduled on any physical CPU. */ +#define RUNSTATE_runnable 2 + +/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ +#define RUNSTATE_blocked 3 + +/* VCPU execute hypercall */ +#define RUNSTATE_in_hcall 4 + +/* VCPU is exited to handle exit request by QEMU. */ +#define RUNSTATE_in_QEMU 5 + +/* VCPU is interrupted and execute trap handler. */ +#define RUNSTATE_in_trap 6 + +/* VCPU is intercepted and is handling interception */ +#define RUNSTATE_in_intercept 7 +#define RUNSTATES_num (RUNSTATE_in_intercept + 1) + +typedef struct kvm_runstate_info { + /* VCPU's current state (RUNSTATE_*). */ + /* volatile */ int state; + /* When was current state entered (system time, ns)? */ + /* volatile */ uint64_t state_entry_time; + /* + * Time spent in each RUNSTATE_* (ns). The sum of these times is + * guaranteed not to drift from system time. + */ + /* volatile */ uint64_t time[RUNSTATES_num]; +} kvm_runstate_info_t; + +typedef struct kvm_vcpu_state { + kvm_cpu_state_t cpu; /* virtual CPU state */ + kvm_mmu_state_t mmu; /* virtual MMU state */ + kvm_apic_state_t lapic; /* virtual Local APIC state */ + kvm_epic_state_t cepic; /* virtual CEPIC state */ + kvm_virqs_state_t virqs; /* virtual IRQs state */ + /* (excluding VIRQs from LAPIC) */ + kvm_host_info_t *host; /* host machine and kernel info */ + /* VCPU's current running state */ + kvm_runstate_info_t runstate; + bool debug_mode_on; + bool irqs_under_upsr; + bool do_dump_state; /* dump all stacks */ + bool do_dump_stack; /* dump only active stack */ +} kvm_vcpu_state_t; + +#define DEBUG_MODE_ON (vcpu->arch.kmap_vcpu_state->debug_mode_on) +#define DO_DUMP_VCPU_STATE(vcpu) \ + ((vcpu)->arch.kmap_vcpu_state->do_dump_state) +#define DO_DUMP_VCPU_STACK(vcpu) \ + ((vcpu)->arch.kmap_vcpu_state->do_dump_stack) +#define DO_DUMP_VCPU(vcpu) \ + (DO_DUMP_VCPU_STATE(vcpu) || DO_DUMP_VCPU_STACK(vcpu)) +#define VCPU_IRQS_UNDER_UPSR(vcpu) \ + (vcpu->arch.kmap_vcpu_state->irqs_under_upsr) + +/* + * Basic macroses to access to VCPU state from guest + */ +#ifdef CONFIG_USE_GD_TO_VCPU_ACCESS +#define KVM_GET_VCPU_STATE_BASE(res) \ +({ \ + e2k_osgd_lo_t osgd_lo = native_read_OSGD_lo_reg(); \ + res = osgd_lo.GD_lo_base; \ + res; \ +}) + +#define E2K_LOAD_GUEST_VCPU_STATE_W(offset) E2K_LOAD_GLOBAL_W(offset) +#define E2K_LOAD_GUEST_VCPU_STATE_D(offset) E2K_LOAD_GLOBAL_D(offset) +#define E2K_STORE_GUEST_VCPU_STATE_W(offset, value) \ + E2K_STORE_GLOBAL_W(offset, value) +#define E2K_STORE_GUEST_VCPU_STATE_D(offset, value) \ + E2K_STORE_GLOBAL_D(offset, value) +#else /* ! CONFIG_USE_GD_TO_VCPU_ACCESS */ + +#define DO_GET_VCPU_STATE_BASE(vcpu_state_greg_no, res) \ + E2K_MOVE_DGREG_TO_DREG(vcpu_state_greg_no, res) +#define KVM_GET_VCPU_STATE_BASE(res) \ + DO_GET_VCPU_STATE_BASE(GUEST_VCPU_STATE_GREG, res) +#define KVM_SAVE_VCPU_STATE_BASE(res) KVM_GET_VCPU_STATE_BASE(res) + +#define DO_SET_VCPU_STATE_BASE(vcpu_state_greg_no, res) \ + E2K_MOVE_DREG_TO_DGREG(vcpu_state_greg_no, res) +#define KVM_SET_VCPU_STATE_BASE(res) \ + DO_SET_VCPU_STATE_BASE(GUEST_VCPU_STATE_GREG, res) +#define KVM_RESTORE_VCPU_STATE_BASE(res) KVM_SET_VCPU_STATE_BASE(res) + +#define E2K_LOAD_GUEST_VCPU_STATE_W(offset) \ + E2K_LOAD_GREG_BASED_W(GUEST_VCPU_STATE_GREG, offset) +#define E2K_LOAD_GUEST_VCPU_STATE_D(offset) \ + E2K_LOAD_GREG_BASED_D(GUEST_VCPU_STATE_GREG, offset) +#define E2K_STORE_GUEST_VCPU_STATE_W(offset, value) \ + E2K_STORE_GREG_BASED_W(GUEST_VCPU_STATE_GREG, offset, value) +#define E2K_STORE_GUEST_VCPU_STATE_D(offset, value) \ + E2K_STORE_GREG_BASED_D(GUEST_VCPU_STATE_GREG, offset, value) +#endif /* CONFIG_USE_GD_TO_VCPU_ACCESS */ + +#endif /* _ASM_E2K_KVM_GUEST_H */ diff --git a/arch/e2k/include/asm/kvm/guest/Kbuild b/arch/e2k/include/asm/kvm/guest/Kbuild new file mode 100644 index 0000000..97dad26 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/Kbuild @@ -0,0 +1,4 @@ + +### e2k virtualization guest + +unifdef-y += hvc_l.h diff --git a/arch/e2k/include/asm/kvm/guest/aau_context.h b/arch/e2k/include/asm/kvm/guest/aau_context.h new file mode 100644 index 0000000..5dffb86 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/aau_context.h @@ -0,0 +1,312 @@ +/* + * KVM AAU registers model access + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _ASM_E2K_KVM_GUEST_AAU_CONTEXT_H_ +#define _ASM_E2K_KVM_GUEST_AAU_CONTEXT_H_ + +#include +#include + +#define KVM_SAVE_AAU_MASK_REGS(aau_context, aasr) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr); \ + } else { \ + PREFIX_SAVE_AAU_MASK_REGS(KVM, kvm, aau_context, aasr); \ + } \ +}) + +#define KVM_RESTORE_AAU_MASK_REGS(aau_context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_RESTORE_AAU_MASK_REGS(aau_context); \ + } else { \ + PREFIX_RESTORE_AAU_MASK_REGS(KVM, kvm, aau_context); \ + } \ +}) + +#define KVM_SAVE_AADS(aau_regs) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AADS(aau_regs); \ + } else { \ + PREFIX_SAVE_AADS(KVM, kvm, aau_regs); \ + } \ +}) + +#define KVM_RESTORE_AADS(aau_regs) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_RESTORE_AADS(aau_regs); \ + } else { \ + PREFIX_RESTORE_AADS(KVM, kvm, aau_regs); \ + } \ +}) + +#define KVM_SAVE_AALDIS(regs) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AALDIS(regs); \ + } else { \ + PREFIX_SAVE_AALDIS_V5(KVM, kvm, regs); \ + } \ +}) +#define KVM_SAVE_AALDIS_V2(regs) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AALDIS_V2(regs); \ + } else { \ + PREFIX_SAVE_AALDIS_V5(KVM, kvm, regs); \ + } \ +}) +#define KVM_SAVE_AALDIS_V5(regs) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AALDIS_V5(regs); \ + } else { \ + PREFIX_SAVE_AALDIS_V5(KVM, kvm, regs); \ + } \ +}) + +#define KVM_SAVE_AALDAS(aaldas_p) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AALDAS(aaldas_p); \ + } else { \ + PREFIX_SAVE_AALDAS(KVM, kvm, aaldas_p); \ + } \ +}) + +#define KVM_SAVE_AAFSTR(aau_context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AAFSTR(aau_context); \ + } else { \ + PREFIX_SAVE_AAFSTR(KVM, kvm, aau_context); \ + } \ +}) + +#define KVM_SAVE_AAU_REGS_FOR_PTRACE(regs, ti) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_AAU_REGS_FOR_PTRACE(regs, ti); \ + } else { \ + PREFIX_SAVE_AAU_REGS_FOR_PTRACE(KVM, kvm, regs, ti); \ + } \ +}) + +#define KVM_GET_ARRAY_DESCRIPTORS(aau_context) \ + PREFIX_GET_ARRAY_DESCRIPTORS_V5(KVM, kvm, aau_context) +#define KVM_GET_ARRAY_DESCRIPTORS_V2(aau_context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_ARRAY_DESCRIPTORS_V2(aau_context); \ + } else { \ + KVM_GET_ARRAY_DESCRIPTORS(aau_context); \ + } \ +}) +#define KVM_GET_ARRAY_DESCRIPTORS_V5(aau_context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_ARRAY_DESCRIPTORS_V5(aau_context); \ + } else { \ + KVM_GET_ARRAY_DESCRIPTORS(aau_context); \ + } \ +}) +#define KVM_SET_ARRAY_DESCRIPTORS(context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SET_ARRAY_DESCRIPTORS(context); \ + } else { \ + PREFIX_SET_ARRAY_DESCRIPTORS(KVM, kvm, context); \ + } \ +}) + +#define KVM_GET_SYNCHRONOUS_PART(context) \ + PREFIX_GET_SYNCHRONOUS_PART_V5(KVM, kvm, context) +#define KVM_GET_SYNCHRONOUS_PART_V2(context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_SYNCHRONOUS_PART_V2(context); \ + } else { \ + KVM_GET_SYNCHRONOUS_PART(context); \ + } \ +}) +#define KVM_GET_SYNCHRONOUS_PART_V5(context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_SYNCHRONOUS_PART_V5(context); \ + } else { \ + KVM_GET_SYNCHRONOUS_PART(context); \ + } \ +}) + +#define KVM_GET_AAU_CONTEXT(context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_AAU_CONTEXT(context); \ + } else { \ + PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context); \ + } \ +}) +#define KVM_GET_AAU_CONTEXT_V2(context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_AAU_CONTEXT_V2(context); \ + } else { \ + PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context); \ + } \ +}) +#define KVM_GET_AAU_CONTEXT_V5(context) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_GET_AAU_CONTEXT_V5(context); \ + } else { \ + PREFIX_GET_AAU_CONTEXT(KVM, kvm, V5, v5, context); \ + } \ +}) + +static inline void +kvm_save_aaldi(u64 *aaldis) +{ + KVM_SAVE_AALDIS(aaldis); +} +static inline void +kvm_save_aaldi_v2(u64 *aaldis) +{ + KVM_SAVE_AALDIS_V2(aaldis); +} +static inline void +kvm_save_aaldi_v5(u64 *aaldis) +{ + KVM_SAVE_AALDIS_V5(aaldis); +} + +static inline void +kvm_get_array_descriptors(e2k_aau_t *context) +{ + KVM_GET_ARRAY_DESCRIPTORS(context); +} +static inline void +kvm_get_array_descriptors_v2(e2k_aau_t *context) +{ + KVM_GET_ARRAY_DESCRIPTORS_V2(context); +} +static inline void +kvm_get_array_descriptors_v5(e2k_aau_t *context) +{ + KVM_GET_ARRAY_DESCRIPTORS_V5(context); +} + +static inline void +kvm_set_array_descriptors(const e2k_aau_t *context) +{ + KVM_SET_ARRAY_DESCRIPTORS(context); +} + +static inline void +kvm_get_synchronous_part(e2k_aau_t *context) +{ + KVM_GET_SYNCHRONOUS_PART(context); +} +static inline void +kvm_get_synchronous_part_v2(e2k_aau_t *context) +{ + KVM_GET_SYNCHRONOUS_PART_V2(context); +} +static inline void +kvm_get_synchronous_part_v5(e2k_aau_t *context) +{ + KVM_GET_SYNCHRONOUS_PART_V5(context); +} + +/* + * It's taken that aasr was get earlier(from get_aau_context caller) + * and comparison with aasr.iab was taken. + */ +static inline void +kvm_get_aau_context(e2k_aau_t *context) +{ + KVM_GET_AAU_CONTEXT(context); +} +static inline void +kvm_get_aau_context_v2(e2k_aau_t *context) +{ + KVM_GET_AAU_CONTEXT_V2(context); +} +static inline void +kvm_get_aau_context_v5(e2k_aau_t *context) +{ + KVM_GET_AAU_CONTEXT_V5(context); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure kvm kernel without paravirtualization */ + +#define SAVE_AAU_MASK_REGS(aau_context, aasr) \ + KVM_SAVE_AAU_MASK_REGS(aau_context, aasr) + +#define RESTORE_AAU_MASK_REGS(aau_context) \ + KVM_RESTORE_AAU_MASK_REGS(aau_context) + +#define SAVE_AADS(aau_regs) \ + KVM_SAVE_AADS(aau_regs) + +#define RESTORE_AADS(aau_regs) \ + KVM_RESTORE_AADS(aau_regs) + +#define SAVE_AALDIS_V2(regs) KVM_SAVE_AALDIS_V2(regs) +#define SAVE_AALDIS_V5(regs) KVM_SAVE_AALDIS_V5(regs) + +#define SAVE_AALDA(aaldas) KVM_SAVE_AALDAS(aaldas) + +#define SAVE_AAFSTR(regs) KVM_SAVE_AAFSTR_REG(regs) + +#define SAVE_AAU_REGS_FOR_PTRACE(regs, ti) \ + KVM_SAVE_AAU_REGS_FOR_PTRACE(regs, ti) + +#define GET_ARRAY_DESCRIPTORS_V2(context) \ + KVM_GET_ARRAY_DESCRIPTORS_V2(context) +#define GET_ARRAY_DESCRIPTORS_V5(context) \ + KVM_GET_ARRAY_DESCRIPTORS_V5(context) + +#define GET_SYNCHRONOUS_PART_V2(context) \ + KVM_GET_SYNCHRONOUS_PART_V2(context) +#define GET_SYNCHRONOUS_PART_V5(context) \ + KVM_GET_SYNCHRONOUS_PART_V5(context) + +#define GET_AAU_CONTEXT_V2(context) KVM_GET_AAU_CONTEXT_V2(context) +#define GET_AAU_CONTEXT_V5(context) KVM_GET_AAU_CONTEXT_V5(context) + +static inline void +save_aaldi(u64 *aaldis) +{ + kvm_save_aaldi(aaldis); +} +static inline void +set_array_descriptors(e2k_aau_t *context) +{ + kvm_set_array_descriptors(context); +} +static inline void +get_aau_context(e2k_aau_t *context) +{ + kvm_get_aau_context(context); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _ASM_E2K_KVM_GUEST_AAU_CONTEXT_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/area_alloc.h b/arch/e2k/include/asm/kvm/guest/area_alloc.h new file mode 100644 index 0000000..8a6d260 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/area_alloc.h @@ -0,0 +1,13 @@ +#ifndef __ASM_KVM_GUEST_AREA_ALLOC_H +#define __ASM_KVM_GUEST_AREA_ALLOC_H + +#ifdef __KERNEL__ + +#include + +#ifdef CONFIG_KVM_GUEST_KERNEL +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_GUEST_AREA_ALLOC_H */ diff --git a/arch/e2k/include/asm/kvm/guest/atomic_api.h b/arch/e2k/include/asm/kvm/guest/atomic_api.h new file mode 100644 index 0000000..aa49334 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/atomic_api.h @@ -0,0 +1,31 @@ +#ifndef _ASM_E2K_KVM_GUEST_ATOMIC_API_H_ +#define _ASM_E2K_KVM_GUEST_ATOMIC_API_H_ + +#include +#include + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +/* FIXME: here it is not implemented hardware bugs workarounds, */ +/* because of such workarounds contain privileged actions and */ +/* can be done only on host using appropriate hypercalls */ + +#define KVM_HWBUG_AFTER_LD_ACQ() NATIVE_HWBUG_AFTER_LD_ACQ() + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not virtualized based on pv_ops */ + +/* Guest virtual machine should examine host machine bugs too, but now */ +/* it is not implemented */ +#define virt_cpu_has(hwbug) false + +#define VIRT_HWBUG_AFTER_LD_ACQ() KVM_HWBUG_AFTER_LD_ACQ() +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_E2K_KVM_GUEST_ATOMIC_API_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/boot.h b/arch/e2k/include/asm/kvm/guest/boot.h new file mode 100644 index 0000000..8bc08e4 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/boot.h @@ -0,0 +1,152 @@ +/* + * E2K boot-time initializtion virtualization for KVM guest + * + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_BOOT_H_ +#define _E2K_KVM_GUEST_BOOT_H_ + +#ifndef __ASSEMBLY__ +#include + +#include +#include +#include + +extern void boot_kvm_setup_machine_id(bootblock_struct_t *bootblock); +extern int __init boot_kvm_probe_memory(node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock); +extern e2k_size_t __init boot_kvm_get_bootblock_size(boot_info_t *bblock); + +extern void boot_kvm_panic(const char *fmt_v, ...); +extern void __init_recv boot_kvm_cpu_relax(void); + +#ifdef CONFIG_SMP +/* + * redefine for guest: number of loops in each iteration of waiting for + * synchronization completion + */ +#define BOOT_WAITING_FOR_SYNC_LOOPS (NR_CPUS * 160) + +extern int __init_recv boot_kvm_smp_cpu_config(boot_info_t *bootblock); +extern void __init_recv boot_kvm_smp_node_config(boot_info_t *bootblock); +#endif /* CONFIG_SMP */ + +extern void __init boot_kvm_reserve_all_bootmem(bool bsp, + boot_info_t *boot_info); +extern void __init boot_kvm_map_all_bootmem(bool bsp, boot_info_t *boot_info); +extern void __init_recv boot_kvm_map_needful_to_equal_virt_area( + e2k_addr_t stack_top_addr); +extern void __init_recv boot_kvm_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, + int cpuid, int cpus)); +extern void __init init_kvm_terminate_boot_init(bool bsp, int cpuid); +extern void __init boot_kvm_parse_param(bootblock_struct_t *bootblock); +extern void __init boot_kvm_clear_bss(void); +extern void __init boot_kvm_check_bootblock(bool bsp, + bootblock_struct_t *bootblock); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +static inline void +boot_setup_machine_id(bootblock_struct_t *bootblock) +{ + boot_kvm_setup_machine_id(bootblock); +} +static inline int __init +boot_loader_probe_memory(node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock) +{ + return boot_kvm_probe_memory(nodes_phys_mem, bootblock); +} + +static inline e2k_size_t __init +boot_get_bootblock_size(boot_info_t *bootblock) +{ + return boot_kvm_get_bootblock_size(bootblock); +} + +#define boot_panic(fmt, args...) boot_kvm_panic(fmt, ##args) + +static inline void +boot_cpu_relax(void) +{ + boot_kvm_cpu_relax(); +} + +#ifdef CONFIG_SMP +static inline e2k_size_t __init +boot_smp_cpu_config(boot_info_t *bootblock) +{ + return boot_kvm_smp_cpu_config(bootblock); +} + +static inline void __init +boot_smp_node_config(boot_info_t *bootblock) +{ + boot_kvm_smp_node_config(bootblock); +} +#endif /* CONFIG_SMP */ + +static inline void __init +boot_reserve_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + boot_kvm_reserve_all_bootmem(bsp, boot_info); +} + +static inline void __init +boot_map_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + boot_kvm_map_all_bootmem(bsp, boot_info); +} + +static inline void __init_recv +boot_map_needful_to_equal_virt_area(e2k_addr_t stack_top_addr) +{ + boot_kvm_map_needful_to_equal_virt_area(stack_top_addr); +} + +static inline void __init_recv +boot_kernel_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus_to_sync)) +{ + boot_kvm_switch_to_virt(bsp, cpuid, boot_init_sequel_func); +} + +static inline void __init +init_terminate_boot_init(bool bsp, int cpuid) +{ + init_kvm_terminate_boot_init(bsp, cpuid); +} + +static inline void __init +boot_parse_param(bootblock_struct_t *bootblock) +{ + boot_kvm_parse_param(bootblock); +} + +static inline void __init +boot_clear_bss(void) +{ + boot_kvm_clear_bss(); +} +static inline void __init +boot_check_bootblock(bool bsp, bootblock_struct_t *bootblock) +{ + boot_kvm_check_bootblock(bsp, bootblock); +} + +/* pv_ops does not used in native host/guest mode */ +static inline void native_pv_ops_to_boot_ops(void) +{ +} +static inline void native_boot_pv_ops_to_ops(void) +{ +} + +#endif /* CONFIG_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_KVM_GUEST_BOOT_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/boot_flags.h b/arch/e2k/include/asm/kvm/guest/boot_flags.h new file mode 100644 index 0000000..78c048d --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/boot_flags.h @@ -0,0 +1,43 @@ +/* + * E2K boot info flags support on KVM guest + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_BOOT_FLAGS_H_ +#define _E2K_KVM_GUEST_BOOT_FLAGS_H_ + +#ifndef __ASSEMBLY__ +#include + +#include +#include + +/* + * bootblock manipulations (read/write/set/reset) in virtual kernel mode + * on physical level: + * write through and uncachable access on virtual "physical" address + * bootblock virtual address can be only read + */ + +#define GUEST_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + DO_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + MAS_BYPASS_ALL_CACHES) + +#define GUEST_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + DO_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value, \ + MAS_BYPASS_ALL_CACHES) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#define READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + GUEST_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) +#define WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + GUEST_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value) +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_KVM_GUEST_BOOT_FLAGS_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/boot_mmu_context.h b/arch/e2k/include/asm/kvm/guest/boot_mmu_context.h new file mode 100644 index 0000000..5a77e83 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/boot_mmu_context.h @@ -0,0 +1,36 @@ +/* + * guest boot-time mmu_context.h support + */ + +#ifndef _E2K_KVM_GUEST_BOOT_MMU_CONTEXT_H_ +#define _E2K_KVM_GUEST_BOOT_MMU_CONTEXT_H_ + +#include + +#include +#include +#include +#include + +/* + * Set guest kernel MMU state + */ + +extern void boot_kvm_set_kernel_MMU_state_before(void); +extern void boot_kvm_set_kernel_MMU_state_after(void); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ + +static inline void boot_set_kernel_MMU_state_before(void) +{ + boot_kvm_set_kernel_MMU_state_before(); +} + +static inline void boot_set_kernel_MMU_state_after(void) +{ + boot_kvm_set_kernel_MMU_state_after(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_KVM_GUEST_BOOT_MMU_CONTEXT_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/boot_spinlock.h b/arch/e2k/include/asm/kvm/guest/boot_spinlock.h new file mode 100644 index 0000000..2a81725 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/boot_spinlock.h @@ -0,0 +1,38 @@ +#ifndef __ASM_KVM_GUEST_BOOT_SPINLOCK_H +#define __ASM_KVM_GUEST_BOOT_SPINLOCK_H +/* + * This file implements the arch-dependent parts of kvm guest + * boot-time spin_lock()/spin_unlock() fast and slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include + +extern void kvm_arch_boot_spin_lock_slow(void *lock); +extern void kvm_arch_boot_spin_locked_slow(void *lock); +extern void kvm_arch_boot_spin_unlock_slow(void *lock); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* native guest kernel */ + +#define arch_spin_relax(lock) kvm_cpu_relax() +#define arch_read_relax(lock) kvm_cpu_relax() +#define arch_write_relax(lock) kvm_cpu_relax() + +static inline void boot_arch_spin_lock_slow(boot_spinlock_t *lock) +{ + kvm_arch_boot_spin_lock_slow(lock); +} +static inline void boot_arch_spin_locked_slow(boot_spinlock_t *lock) +{ + kvm_arch_boot_spin_locked_slow(lock); +} +static inline void boot_arch_spin_unlock_slow(boot_spinlock_t *lock) +{ + kvm_arch_boot_spin_unlock_slow(lock); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASM_KVM_GUEST_BOOT_SPINLOCK_H */ diff --git a/arch/e2k/include/asm/kvm/guest/bootinfo.h b/arch/e2k/include/asm/kvm/guest/bootinfo.h new file mode 100644 index 0000000..0d46491 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/bootinfo.h @@ -0,0 +1,44 @@ +/* + * E2K boot-time initializtion virtualization for KVM guest + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_BOOTINFO_H_ +#define _E2K_KVM_GUEST_BOOTINFO_H_ + +#ifndef __ASSEMBLY__ + +#include + +#include + +/* + * bootblock manipulations (read/write/set/reset) in virtual kernel mode + * on physical level: + * write through and uncachable access on virtual "physical" address + * bootblock virtual address can be only read + */ + +#define KVM_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + DO_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + MAS_BYPASS_ALL_CACHES) + +#define KVM_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + DO_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value, \ + MAS_BYPASS_ALL_CACHES) + +#ifdef CONFIG_KVM_GUEST_KERNEL + +#define READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + KVM_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) +#define WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + KVM_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value) + +#endif /* CONFIG_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_KVM_GUEST_BOOTINFO_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/cacheflush.h b/arch/e2k/include/asm/kvm/guest/cacheflush.h new file mode 100644 index 0000000..47e7381 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/cacheflush.h @@ -0,0 +1,100 @@ +#ifndef __ASM_KVM_GUEST_CACHEFLUSH_H +#define __ASM_KVM_GUEST_CACHEFLUSH_H + +#include + +struct icache_range_array; +struct vm_area_struct; +struct page; + +#ifdef CONFIG_SMP +/* + * Guest kernel supports pseudo page tables, + * real page tables are managed now by host kernel + * So guest flushes can be empty + */ +static inline void +kvm_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ +} +static inline void +kvm_smp_flush_icache_range_array(struct icache_range_array *icache_range_arr) +{ +} +static inline void +kvm_smp_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ +} +static inline void +kvm_smp_flush_icache_all(void) +{ +} +static inline void +kvm_smp_flush_icache_kernel_line(e2k_addr_t addr) +{ +} +#endif /* CONFIG_SMP */ + +extern void kvm_flush_dcache_line(e2k_addr_t virt_addr); +extern void kvm_clear_dcache_l1_set(e2k_addr_t virt_addr, unsigned long set); +extern void kvm_flush_dcache_range(void *addr, size_t len); +extern void kvm_clear_dcache_l1_range(void *virt_addr, size_t len); +extern void kvm_write_dcache_l2_reg(unsigned long reg_val, + int reg_num, int bank_num); +extern unsigned long kvm_read_dcache_l2_reg(int reg_num, int bank_num); +extern int kvm_flush_icache_range(e2k_addr_t start, e2k_addr_t end); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#ifdef CONFIG_SMP +static inline void +smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + kvm_smp_flush_icache_range(start, end); +} +static inline void +smp_flush_icache_range_array(struct icache_range_array *icache_range_arr) +{ + kvm_smp_flush_icache_range_array(icache_range_arr); +} +static inline void +smp_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + kvm_smp_flush_icache_page(vma, page); +} +static inline void +smp_flush_icache_all(void) +{ + kvm_smp_flush_icache_all(); +} +static inline void +smp_flush_icache_kernel_line(e2k_addr_t addr) +{ + kvm_smp_flush_icache_kernel_line(addr); +} +#endif /* CONFIG_SMP */ + +static inline void +flush_DCACHE_range(void *addr, size_t len) +{ + kvm_flush_dcache_range(addr, len); +} +static inline void +clear_DCACHE_L1_range(void *virt_addr, size_t len) +{ + kvm_clear_dcache_l1_range(virt_addr, len); +} +static inline void +__flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + int ret; + + ret = kvm_flush_icache_range(start, end); + if (ret) { + panic("%s(): could not flush ICACHE, error %d\n", + __func__, ret); + } +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASM_KVM_GUEST_CACHEFLUSH_H */ diff --git a/arch/e2k/include/asm/kvm/guest/clkr.h b/arch/e2k/include/asm/kvm/guest/clkr.h new file mode 100644 index 0000000..87e2ae4 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/clkr.h @@ -0,0 +1,17 @@ +#ifndef _ASM_E2K_KVM_GUEST_CLKR_H +#define _ASM_E2K_KVM_GUEST_CLKR_H + +#include +#include + +extern unsigned long long kvm_sched_clock(void); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline unsigned long long do_sched_clock(void) +{ + return kvm_sched_clock(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _ASM_E2K_KVM_GUEST_CLKR_H */ diff --git a/arch/e2k/include/asm/kvm/guest/console.h b/arch/e2k/include/asm/kvm/guest/console.h new file mode 100644 index 0000000..4da88dd --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/console.h @@ -0,0 +1,33 @@ + +#ifndef _ASM_E2K_KVM_GUEST_CONSOLE_H_ +#define _ASM_E2K_KVM_GUEST_CONSOLE_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#include +#include + +static inline void +kvm_virt_console_dump_putc(char c) +{ +#if defined(CONFIG_HVC_L) && defined(CONFIG_EARLY_VIRTIO_CONSOLE) + if (early_virtio_cons_enabled) + hvc_l_raw_putc(c); +#endif /* CONFIG_HVC_L && CONFIG_EARLY_VIRTIO_CONSOLE */ +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void +virt_console_dump_putc(char c) +{ + kvm_virt_console_dump_putc(c); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_E2K_KVM_GUEST_CONSOLE_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/cpu.h b/arch/e2k/include/asm/kvm/guest/cpu.h new file mode 100644 index 0000000..9af3d55 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/cpu.h @@ -0,0 +1,31 @@ +#ifndef __ASM_KVM_GUEST_CPU_H +#define __ASM_KVM_GUEST_CPU_H + +#ifdef __KERNEL__ + +#include +#include +#include + +extern unsigned long kvm_get_cpu_running_cycles(void); + +static inline bool kvm_vcpu_host_support_hw_hc(void) +{ + kvm_host_info_t *host_info; + + host_info = kvm_get_host_info(); + return host_info->support_hw_hc; +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline unsigned long +get_cpu_running_cycles(void) +{ + return kvm_get_cpu_running_cycles(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_GUEST_CPU_H */ diff --git a/arch/e2k/include/asm/kvm/guest/debug.h b/arch/e2k/include/asm/kvm/guest/debug.h new file mode 100644 index 0000000..aebb016 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/debug.h @@ -0,0 +1,76 @@ +/* + * KVM guest kernel processes debugging support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_DEBUG_H +#define _E2K_KVM_GUEST_DEBUG_H + +#include + +/* Read instruction word (two syllables) from IP address */ +static inline unsigned long +kvm_read_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip) +{ + /* guest image should be read on virtual physical IP */ + return *((u64 *)pa_to_vpa(phys_ip)); +} +/* Write modified instruction word at IP address */ +static inline void +kvm_modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip, + unsigned long instr_word) +{ + /* guest image should be writed on virtual physical IP */ + *((u64 *)pa_to_vpa(phys_ip)) = instr_word; +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ + +#define GET_PHYS_ADDR(task, addr) GUEST_GET_PHYS_ADDR(task, addr) + +#define debug_guest_regs(task) false /* none any guests */ +#define get_cpu_type_name() "VCPU" /* virtual CPU */ + +static inline void print_all_guest_stacks(void) +{ + /* nothing to do, guest has not other guest processes */ +} +static inline void print_guest_vcpu_stack(struct kvm_vcpu *vcpu) +{ + /* nothing to do, guest has not other guest processes */ +} +static inline void +print_guest_stack(struct task_struct *task, + stack_regs_t *const regs, bool show_reg_window) +{ + /* nothing to do, guest has not other guest processes */ +} +static inline void +host_ftrace_stop(void) +{ + HYPERVISOR_ftrace_stop(); +} +static inline void +host_ftrace_dump(void) +{ + HYPERVISOR_ftrace_dump(); +} + +/* Read instruction word (two syllables) from IP address */ +static inline unsigned long +read_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip) +{ + return kvm_read_instr_on_IP(ip, phys_ip); +} +/* Write modified instruction word at IP address */ +static inline void +modify_instr_on_IP(e2k_addr_t ip, e2k_addr_t phys_ip, + unsigned long instr_word) +{ + kvm_modify_instr_on_IP(ip, phys_ip, instr_word); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_GUEST_DEBUG_H */ diff --git a/arch/e2k/include/asm/kvm/guest/e2k.h b/arch/e2k/include/asm/kvm/guest/e2k.h new file mode 100644 index 0000000..8785b28 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/e2k.h @@ -0,0 +1,51 @@ +#ifndef _ASM_KVM_GUEST_E2K_H_ +#define _ASM_KVM_GUEST_E2K_H_ + +/* Do not include the header directly, only through asm/e2k.h */ + + +#include + +#include + +#ifdef CONFIG_VIRTUALIZATION + +#if defined(CONFIG_PARAVIRT_GUEST) +extern unsigned int guest_machine_id; +#define boot_guest_machine_id boot_get_vo_value(guest_machine_id) +#endif /* CONFIG_E2K_MACHINE */ + +extern void kvm_set_mach_type_id(void); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#ifdef CONFIG_E2K_MACHINE + #if defined(CONFIG_E2K_VIRT) + #define guest_machine_id MACHINE_ID_E2K_VIRT + #define boot_guest_machine_id guest_machine_id + #else + #error "E2K VIRTUAL MACHINE type does not defined" + #endif +#else /* ! CONFIG_E2K_MACHINE */ +extern unsigned int guest_machine_id; +#define boot_guest_machine_id boot_get_vo_value(guest_machine_id) +#endif /* CONFIG_E2K_MACHINE */ + +#define machine_id guest_machine_id +#define boot_machine_id boot_guest_machine_id + +#define get_machine_id() machine_id +#define boot_get_machine_id() boot_machine_id +#define set_machine_id(mach_id) (machine_id = (mach_id)) +#define boot_set_machine_id(mach_id) (boot_machine_id = (mach_id)) + +static inline void set_mach_type_id(void) +{ + kvm_set_mach_type_id(); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* _ASM_KVM_GUEST_E2K_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/e2k_virt.h b/arch/e2k/include/asm/kvm/guest/e2k_virt.h new file mode 100644 index 0000000..6eba659 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/e2k_virt.h @@ -0,0 +1,60 @@ +#ifndef _ASM_KVM_GUEST_E2K_VIRT_H_ +#define _ASM_KVM_GUEST_E2K_VIRT_H_ + +#include +#include + +#define E2K_VIRT_CPU_VENDOR "Elbrus-MCST" +#define E2K_VIRT_CPU_FAMILY 0xff +#define E2K_VIRT_CPU_MODEL IDR_E2K_VIRT_MDL +#define E2K_VIRT_CPU_REVISION 16 /* 2016 year */ +#define E2K_VIRT_CPU_ISET 07 /* month */ + +/* + * Machine (based on e2k virtual processor) topology: + * It is classical SMP system on common memory, so can have only + * one node and this node includes all CPUs + */ + +#define E2K_VIRT_NR_NODE_CPUS KVM_MAX_VCPUS /* all VCPUs */ +#define E2K_VIRT_MAX_NR_NODE_CPUS E2K_VIRT_NR_NODE_CPUS +#define e2k_virt_cpu_to_node(cpu) (0) +#define e2k_virt_node_to_cpumask(node, main_cpu_mask) \ + (main_cpu_mask) +#define e2k_virt_node_to_first_cpu(node, main_cpu_mask) \ + (0) /* CPU #0 should be allways */ + +#define boot_e2k_virt_cpu_to_node(cpu) e2k_virt_cpu_to_node(cpu) +#define boot_e2k_virt_node_to_cpumask(node, boot_main_cpu_mask) \ + (boot_main_cpu_mask) +#define boot_e2k_virt_node_to_first_cpu(node, boot_main_cpu_mask) \ + e2k_virt_node_to_first_cpu(node, boot_main_cpu_mask) + +/* + * Local APIC cluster mode is not used for e2k-virt, + * so APIC quad is the same as all CPUs combined to single quad #0 + */ +#define E2K_VIRT_NR_APIC_QUAD_CPUS E2K_VIRT_NR_NODE_CPUS +#define E2K_VIRT_MAX_APIC_QUADS 1 +#define e2k_virt_apic_quad_to_cpumask(quad, main_cpu_mask) \ +({ \ + main_cpu_mask; \ +}) +#define e2k_virt_cpu_to_apic_quad(cpu) (0) +#define e2K_virt_cpu_to_apic_cpu(cpu) (cpu) + +/* + * IO links and IO controllers topology + * E2K virtual machines use virtio interface to access to IO devices + * All other machines use IO links and own chipset and main IO buses controller + * is IOHUB. + * Without losing generality, IO controller of e2k-virt can consider + * as connected through simple IO link too, but it needs do not forget + * that IO controller is VIRTIO while details are essential + */ +#define E2K_VIRT_MAX_NUMIOLINKS 1 /* e2k-virt has only one IO */ + /* controller connected through */ + /* North bridge emulated by QEMU */ +#define E2K_VIRT_NODE_IOLINKS E2K_VIRT_MAX_NUMIOLINKS + +#endif /* _ASM_KVM_GUEST_E2K_VIRT_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/fast_syscalls.h b/arch/e2k/include/asm/kvm/guest/fast_syscalls.h new file mode 100644 index 0000000..51638f7 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/fast_syscalls.h @@ -0,0 +1,42 @@ +#ifndef _ASM_E2K_KVM_GUEST_FAST_SYSCALLS_H +#define _ASM_E2K_KVM_GUEST_FAST_SYSCALLS_H + +#include +#include +#include + +int kvm_do_fast_clock_gettime(const clockid_t which_clock, + struct timespec *tp); +int kvm_fast_sys_clock_gettime(const clockid_t which_clock, + struct timespec __user *tp); +int kvm_do_fast_gettimeofday(struct timeval *tv); +int kvm_fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel (not paravirtualized based on pv_ops) */ +static inline int +do_fast_clock_gettime(const clockid_t which_clock, struct timespec *tp) +{ + return kvm_do_fast_clock_gettime(which_clock, tp); +} + +static inline int +fast_sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) +{ + return kvm_fast_sys_clock_gettime(which_clock, tp); +} + +static inline int +do_fast_gettimeofday(struct timeval *tv) +{ + return kvm_do_fast_gettimeofday(tv); +} +static inline int +fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize) +{ + return kvm_fast_sys_siggetmask(oset, sigsetsize); +} +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _ASM_E2K_KVM_GUEST_FAST_SYSCALLS_H */ + diff --git a/arch/e2k/include/asm/kvm/guest/gregs.h b/arch/e2k/include/asm/kvm/guest/gregs.h new file mode 100644 index 0000000..2f34d24 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/gregs.h @@ -0,0 +1,90 @@ +#ifndef __KVM_GUEST_E2K_GREGS_H +#define __KVM_GUEST_E2K_GREGS_H + +/* Does not include this header directly, include */ + +#include + +#ifndef CONFIG_E2K_ISET_VER +#define KVM_SAVE_HOST_GREGS(__ti) \ +({ \ + if (IS_HV_GM()) { \ + machine.guest.save_host_gregs(&__ti->h_gregs); \ + } \ +}) +#define KVM_RESTORE_HOST_GREGS(__ti) \ +({ \ + if (IS_HV_GM()) { \ + machine.guest.restore_host_gregs(&__ti->h_gregs); \ + } \ +}) +#elif CONFIG_E2K_ISET_VER < 5 +#define KVM_SAVE_HOST_GREGS(__ti) \ +({ \ + if (IS_HV_GM()) { \ + DO_SAVE_VCPU_STATE_GREGS_V2(__ti->h_gregs.g); \ + } \ +}) +#define KVM_RESTORE_HOST_GREGS(__ti) \ +({ \ + if (IS_HV_GM()) { \ + DO_RESTORE_VCPU_STATE_GREGS_V2(__ti->h_gregs.g); \ + } \ +}) +#else /* CONFIG_E2K_ISET_VER >= 5 */ +#define KVM_SAVE_HOST_GREGS(__ti) \ +({ \ + if (IS_HV_GM()) { \ + DO_SAVE_VCPU_STATE_GREGS_V5(__ti->h_gregs.g); \ + } \ +}) +#define KVM_RESTORE_HOST_GREGS(__ti) \ +({ \ + if (IS_HV_GM()) { \ + DO_RESTORE_VCPU_STATE_GREGS_V5(__ti->h_gregs.g); \ + } \ +}) +#endif /* CONFIG_E2K_ISET_VER */ + +/* save/restore of globals is executed by host kernel, so guest do nothing */ +#define KVM_SAVE_KERNEL_GREGS_AND_SET(__ti) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_KERNEL_GREGS_AND_SET(__ti); \ + } \ +}) +#define KVM_RESTORE_KERNEL_GREGS_AND_FREE(__ti) \ +({ \ + if (IS_HV_GM()) { \ + NATIVE_RESTORE_KERNEL_GREGS(&(__ti)->k_gregs); \ + } \ +}) +#define KVM_RESTORE_KERNEL_GREGS_IN_SYSCALL(__ti) \ +({ \ + if (IS_HV_GM()) { \ + E2K_CMD_SEPARATOR; /* to do not have priv action */ \ + NATIVE_RESTORE_KERNEL_GREGS_IN_SYSCALL(__ti); \ + } else { \ + /* macros should be used only to return to guest */ \ + /* kernel from host, so restore kernel gregs state */ \ + ONLY_SET_KERNEL_GREGS(__ti); \ + } \ +}) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native KVM guest kernel (not paravirtualized) */ + +#define HOST_SAVE_VCPU_STATE_GREGS(__ti) +#define HOST_RESTORE_VCPU_STATE_GREGS(__ti) + +/* save/restore of globals is executed by host kernel, so guest do nothing */ +#define SAVE_KERNEL_GREGS_AND_SET(thread_info) \ + KVM_SAVE_KERNEL_GREGS_AND_SET(thread_info) +#define RESTORE_KERNEL_GREGS_AND_FREE(thread_info) \ + KVM_RESTORE_KERNEL_GREGS_AND_FREE(thread_info) +#define RESTORE_KERNEL_GREGS_IN_SYSCALL(thread_info) \ + KVM_RESTORE_KERNEL_GREGS_IN_SYSCALL(thread_info) + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KVM_GUEST_E2K_GREGS_H */ diff --git a/arch/e2k/include/asm/kvm/guest/host_printk.h b/arch/e2k/include/asm/kvm/guest/host_printk.h new file mode 100644 index 0000000..c3cf946 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/host_printk.h @@ -0,0 +1,20 @@ +/* + * KVM guest printk() on host support + * + * Copyright 2015 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_HOST_PRINTK_H +#define _E2K_KVM_GUEST_HOST_PRINTK_H + +#include +#include + +extern int kvm_host_printk(const char *fmt, ...); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest */ +#define host_printk(fmt, args...) kvm_host_printk(fmt, ##args) +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_GUEST_HOST_PRINTK_H */ diff --git a/arch/e2k/include/asm/kvm/guest/hvc_l.h b/arch/e2k/include/asm/kvm/guest/hvc_l.h new file mode 100644 index 0000000..5823e1f --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/hvc_l.h @@ -0,0 +1,27 @@ +/****************************************************************************** + * hvc_l.h (based on XEN console interface + * + * HyperVisor Console I/O interface for Elbrus guest OSes. + * + * Copyright (c) 2005, Keir Fraser + * (c) 2013 Salavat Gilyazov + */ + +#ifndef __L_PUBLIC_IO_CONSOLE_H__ +#define __L_PUBLIC_IO_CONSOLE_H__ + +typedef uint32_t LCONS_RING_IDX; + +#define MASK_LCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) + +typedef struct lcons_interface { + char in[1024]; /* input buffer */ + char out[2048]; /* output buffer */ + LCONS_RING_IDX in_cons, in_prod; /* input buffer indexes */ + LCONS_RING_IDX out_cons, out_prod; /* output buffer indexes */ +} lcons_interface_t; + +#define LCONS_OUTPUT_NOTIFIER (('l'<<24) | ('c'<<16) | ('o'<<8) | 't') +#define LCONS_INPUT_NOTIFIER (('l'<<24) | ('c'<<16) | ('i'<<8) | 'n') + +#endif /* __L_PUBLIC_IO_CONSOLE_H__ */ diff --git a/arch/e2k/include/asm/kvm/guest/hw_stacks.h b/arch/e2k/include/asm/kvm/guest/hw_stacks.h new file mode 100644 index 0000000..50c4ed9 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/hw_stacks.h @@ -0,0 +1,88 @@ +/* + * KVM guest hardware stacks access support + * + * Copyright (C) 2016 MCST + */ + +#ifndef _E2K_KVM_GUEST_HW_STACKS_H_ +#define _E2K_KVM_GUEST_HW_STACKS_H_ + +#ifndef __ASSEMBLY__ + +#include + +/* procedure chain stack items access */ +extern unsigned long kvm_get_active_cr0_lo_value(e2k_addr_t base, + e2k_addr_t cr_ind); +extern unsigned long kvm_get_active_cr0_hi_value(e2k_addr_t base, + e2k_addr_t cr_ind); +extern unsigned long kvm_get_active_cr1_lo_value(e2k_addr_t base, + e2k_addr_t cr_ind); +extern unsigned long kvm_get_active_cr1_hi_value(e2k_addr_t base, + e2k_addr_t cr_ind); +extern void kvm_put_active_cr0_lo_value(unsigned long cr0_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind); +extern void kvm_put_active_cr0_hi_value(unsigned long cr0_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind); +extern void kvm_put_active_cr1_lo_value(unsigned long cr1_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind); +extern void kvm_put_active_cr1_hi_value(unsigned long cr1_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* pure guest kernel (not paravirtualized based on pv_ops) */ + +/* + * Procedure chain stack items access + */ +static inline unsigned long +get_active_cr0_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return kvm_get_active_cr0_lo_value(base, cr_ind); +} +static inline unsigned long +get_active_cr0_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return kvm_get_active_cr0_hi_value(base, cr_ind); +} +static inline unsigned long +get_active_cr1_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return kvm_get_active_cr1_lo_value(base, cr_ind); +} +static inline unsigned long +get_active_cr1_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return kvm_get_active_cr1_hi_value(base, cr_ind); +} +static inline void +put_active_cr0_lo_value(unsigned long cr0_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + kvm_put_active_cr0_lo_value(cr0_lo_value, base, cr_ind); +} +static inline void +put_active_cr0_hi_value(unsigned long cr0_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + kvm_put_active_cr0_hi_value(cr0_hi_value, base, cr_ind); +} +static inline void +put_active_cr1_lo_value(unsigned long cr1_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + kvm_put_active_cr1_lo_value(cr1_lo_value, base, cr_ind); +} +static inline void +put_active_cr1_hi_value(unsigned long cr1_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + kvm_put_active_cr1_hi_value(cr1_hi_value, base, cr_ind); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ +#endif /* _E2K_KVM_GUEST_HW_STACKS_H_ */ + + diff --git a/arch/e2k/include/asm/kvm/guest/io.h b/arch/e2k/include/asm/kvm/guest/io.h new file mode 100644 index 0000000..74007de --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/io.h @@ -0,0 +1,426 @@ + +#ifndef _E2K_KVM_GUEST_IO_H_ +#define _E2K_KVM_GUEST_IO_H_ + +#include + +#include + +#define GUEST_IO_PORTS_ADDRESS(port) (GUEST_IO_PORTS_VIRT_BASE + (port)) +static inline void +KVM_DEBUG_OUTB(u8 byte, u16 port) +{ + u8 __iomem *io_port = (u8 __iomem *)GUEST_IO_PORTS_ADDRESS(port); + + *io_port = byte; + wmb(); /* wait for write completed */ +} + +static inline u8 +KVM_DEBUG_INB(u16 port) +{ + u8 __iomem *io_port = (u8 __iomem *)GUEST_IO_PORTS_ADDRESS(port); + u8 data; + + data = *io_port; + rmb(); /* wait for read completed */ + return data; +} + +static inline u32 +KVM_DEBUG_INL(u16 port) +{ + u32 __iomem *io_port = (u32 __iomem *)GUEST_IO_PORTS_ADDRESS(port); + u32 data; + + data = *io_port; + rmb(); /* wait for read completed */ + return data; +} + +extern void kvm_writeb(u8 b, volatile void __iomem *addr); +extern void kvm_writew(u16 w, volatile void __iomem *addr); +extern void kvm_writel(u32 l, volatile void __iomem *addr); +extern void kvm_writell(u64 q, volatile void __iomem *addr); + +extern u8 kvm_readb(const volatile void __iomem *addr); +extern u16 kvm_readw(const volatile void __iomem *addr); +extern u32 kvm_readl(const volatile void __iomem *addr); +extern u64 kvm_readll(const volatile void __iomem *addr); + +extern void boot_kvm_writeb(u8 b, void __iomem *addr); +extern void boot_kvm_writew(u16 w, void __iomem *addr); +extern void boot_kvm_writel(u32 l, void __iomem *addr); +extern void boot_kvm_writell(u64 q, void __iomem *addr); + +extern u8 boot_kvm_readb(void __iomem *addr); +extern u16 boot_kvm_readw(void __iomem *addr); +extern u32 boot_kvm_readl(void __iomem *addr); +extern u64 boot_kvm_readll(void __iomem *addr); + +extern u8 kvm_inb(unsigned short port); +extern u16 kvm_inw(unsigned short port); +extern u32 kvm_inl(unsigned short port); + +extern void kvm_outb(unsigned char byte, unsigned short port); +extern void kvm_outw(unsigned short halfword, unsigned short port); +extern void kvm_outl(unsigned int word, unsigned short port); + +extern void kvm_outsb(unsigned short port, const void *src, unsigned long count); +extern void kvm_outsw(unsigned short port, const void *src, unsigned long count); +extern void kvm_outsl(unsigned short port, const void *src, unsigned long count); +extern void kvm_insb(unsigned short port, void *src, unsigned long count); +extern void kvm_insw(unsigned short port, void *src, unsigned long count); +extern void kvm_insl(unsigned short port, void *src, unsigned long count); +extern void kvm_conf_inb(unsigned int domain, unsigned int bus, + unsigned long port, u8 *byte); +extern void kvm_conf_inw(unsigned int domain, unsigned int bus, + unsigned long port, u16 *hword); +extern void kvm_conf_inl(unsigned int domain, unsigned int bus, + unsigned long port, u32 *word); +extern void kvm_conf_outb(unsigned int domain, unsigned int bus, + unsigned long port, u8 byte); +extern void kvm_conf_outw(unsigned int domain, unsigned int bus, + unsigned long port, u16 hword); +extern void kvm_conf_outl(unsigned int domain, unsigned int bus, + unsigned long port, u32 word); + +extern unsigned long kvm_notify_io(unsigned int notifier_io); + +extern int __init kvm_arch_pci_init(void); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops */ + +static inline void kvm_hv_writeb(u8 b, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writeb(b, addr); + + kvm_writeb(b, addr); +} +static inline void kvm_hv_writew(u16 w, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writew(w, addr); + + kvm_writew(w, addr); +} +static inline void kvm_hv_writel(u32 l, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writel(l, addr); + + kvm_writel(l, addr); +} +static inline void kvm_hv_writeq(u64 q, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writeq(q, addr); + + kvm_writell(q, addr); +} + +static inline void boot_writeb(u8 b, void __iomem *addr) +{ + boot_kvm_writeb(b, addr); +} +static inline void boot_writew(u16 w, void __iomem *addr) +{ + boot_kvm_writew(w, addr); +} +static inline void boot_writel(u32 l, void __iomem *addr) +{ + boot_kvm_writel(l, addr); +} +static inline void boot_writeq(u64 l, void __iomem *addr) +{ + boot_kvm_writell(l, addr); +} + +static inline u8 kvm_hv_readb(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readb(addr); + + return kvm_readb(addr); +} +static inline u16 kvm_hv_readw(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readw(addr); + + return kvm_readw(addr); +} +static inline u32 kvm_hv_readl(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readl(addr); + + return kvm_readl(addr); +} +static inline u64 kvm_hv_readq(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readq(addr); + + return kvm_readll(addr); +} + +static inline u8 boot_readb(void __iomem *addr) +{ + return boot_kvm_readb(addr); +} +static inline u16 boot_readw(void __iomem *addr) +{ + return boot_kvm_readw(addr); +} +static inline u32 boot_readl(void __iomem *addr) +{ + return boot_kvm_readl(addr); +} +static inline u64 boot_readq(void __iomem *addr) +{ + return boot_kvm_readll(addr); +} + +/* + * _relaxed() accessors. + */ +static inline u8 kvm_readb_relaxed(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readb_relaxed(addr); + + return kvm_readb(addr); +} + +static inline u16 kvm_readw_relaxed(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readw_relaxed(addr); + + return kvm_readw(addr); +} + +static inline u32 kvm_readl_relaxed(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readl_relaxed(addr); + + return kvm_readl(addr); +} + +static inline u64 kvm_readq_relaxed(const volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_readq_relaxed(addr); + + return kvm_readll(addr); +} + +static inline void kvm_writeb_relaxed(u8 value, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writeb_relaxed(value, addr); + + kvm_writeb(value, addr); +} + +static inline void kvm_writew_relaxed(u16 value, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writew_relaxed(value, addr); + + kvm_writew(value, addr); +} + +static inline void kvm_writel_relaxed(u32 value, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writel_relaxed(value, addr); + + kvm_writel(value, addr); +} + +static inline void kvm_writeq_relaxed(u64 value, volatile void __iomem *addr) +{ + if (IS_HV_GM()) + return native_writeq_relaxed(value, addr); + + kvm_writell(value, addr); +} + + +static inline u8 kvm_hv_inb(unsigned long port) +{ + if (IS_HV_GM()) + return native_inb(port); + + return kvm_inb(port); +} +static inline u16 kvm_hv_inw(unsigned long port) +{ + if (IS_HV_GM()) + return native_inw(port); + + return kvm_inw(port); +} +static inline u32 kvm_hv_inl(unsigned long port) +{ + if (IS_HV_GM()) + return native_inl(port); + + return kvm_inl(port); +} +static inline void kvm_hv_outb(unsigned char byte, unsigned long port) +{ + if (IS_HV_GM()) + return native_outb(byte, port); + + kvm_outb(byte, port); +} +static inline void kvm_hv_outw(unsigned short halfword, unsigned long port) +{ + if (IS_HV_GM()) + return native_outw(halfword, port); + + kvm_outw(halfword, port); +} +static inline void kvm_hv_outl(unsigned int word, unsigned long port) +{ + if (IS_HV_GM()) + return native_outl(word, port); + + kvm_outl(word, port); +} + +static inline void kvm_hv_outsb(unsigned short port, const void *src, unsigned long count) +{ + if (IS_HV_GM()) + return native_outsb(port, src, count); + + kvm_outsb(port, src, count); +} +static inline void kvm_hv_outsw(unsigned short port, const void *src, unsigned long count) +{ + if (IS_HV_GM()) + return native_outsw(port, src, count); + + kvm_outsw(port, src, count); +} +static inline void kvm_hv_outsl(unsigned short port, const void *src, unsigned long count) +{ + if (IS_HV_GM()) + return native_outsl(port, src, count); + + kvm_outsl(port, src, count); +} + +static inline void kvm_hv_insb(unsigned short port, void *dst, unsigned long count) +{ + if (IS_HV_GM()) + return native_insb(port, dst, count); + + kvm_insb(port, dst, count); +} +static inline void kvm_hv_insw(unsigned short port, void *dst, unsigned long count) +{ + if (IS_HV_GM()) + return native_insw(port, dst, count); + + kvm_insw(port, dst, count); +} +static inline void kvm_hv_insl(unsigned short port, void *dst, unsigned long count) +{ + if (IS_HV_GM()) + return native_insl(port, dst, count); + + kvm_insl(port, dst, count); +} + +static inline void +conf_inb(unsigned int domain, unsigned int bus, unsigned long port, u8 *byte) +{ + if (IS_HV_GM()) + return native_conf_inb(domain, bus, port, byte); + + kvm_conf_inb(domain, bus, port, byte); +} +static inline void +conf_inw(unsigned int domain, unsigned int bus, unsigned long port, u16 *hword) +{ + if (IS_HV_GM()) + return native_conf_inw(domain, bus, port, hword); + + kvm_conf_inw(domain, bus, port, hword); +} +static inline void +conf_inl(unsigned int domain, unsigned int bus, unsigned long port, u32 *word) +{ + if (IS_HV_GM()) + return native_conf_inl(domain, bus, port, word); + + kvm_conf_inl(domain, bus, port, word); +} +static inline void +conf_outb(unsigned int domain, unsigned int bus, unsigned long port, u8 byte) +{ + if (IS_HV_GM()) + return native_conf_outb(domain, bus, port, byte); + + kvm_conf_outb(domain, bus, port, byte); +} +static inline void +conf_outw(unsigned int domain, unsigned int bus, unsigned long port, u16 hword) +{ + if (IS_HV_GM()) + return native_conf_outw(domain, bus, port, hword); + + kvm_conf_outw(domain, bus, port, hword); +} +static inline void +conf_outl(unsigned int domain, unsigned int bus, unsigned long port, u32 word) +{ + if (IS_HV_GM()) + return native_conf_outl(domain, bus, port, word); + + kvm_conf_outl(domain, bus, port, word); +} + +static inline void boot_debug_cons_outb(u8 byte, u16 port) +{ + KVM_DEBUG_OUTB(byte, port); +} +static inline u8 boot_debug_cons_inb(u16 port) +{ + return KVM_DEBUG_INB(port); +} +static inline u32 boot_debug_cons_inl(u16 port) +{ + return KVM_DEBUG_INL(port); +} +static inline void debug_cons_outb(u8 byte, u16 port) +{ + KVM_DEBUG_OUTB(byte, port); +} +static inline void debug_cons_outb_p(u8 byte, u16 port) +{ + KVM_DEBUG_OUTB(byte, port); +} +static inline u8 debug_cons_inb(u16 port) +{ + return KVM_DEBUG_INB(port); +} +static inline u32 debug_cons_inl(u16 port) +{ + return KVM_DEBUG_INL(port); +} + +static inline int __init arch_pci_init(void) +{ + return kvm_arch_pci_init(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_KVM_GUEST_IO_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/irq.h b/arch/e2k/include/asm/kvm/guest/irq.h new file mode 100644 index 0000000..34251df --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/irq.h @@ -0,0 +1,85 @@ + +#ifndef __E2K_ASM_KVM_GUEST_IRQ_H_ +#define __E2K_ASM_KVM_GUEST_IRQ_H_ + +#include +#include +#include + +/* Interrupt types. */ +typedef enum kvm_irq_type { + IRQT_UNBOUND = 0, + IRQT_VIRQ, + IRQT_IPI, +} kvm_irq_type_t; + +#define KVM_NR_IRQS NR_IRQS /* now limited by common NR IRQS */ + /* as on host */ +#define KVM_NR_VIRQS_PER_CPU 4 +#define KVM_NR_VCPUS (KVM_NR_VIRQS_PER_CPU * NR_CPUS) +/* + * Modes to handle Virtual IRQs (see field 'flags' below) + */ +#define BY_DIRECT_INJ_VIRQ_MODE 3 /* handle VIRQ by direct */ + /* injection on VCPU */ +#define BY_DIRECT_INJ_VIRQ_FLAG (1UL << BY_DIRECT_INJ_VIRQ_MODE) + +static inline unsigned long +kvm_get_default_virq_flags(int virq_id) +{ + unsigned long def_flags = 0; + +#ifdef CONFIG_DIRECT_VIRQ_INJECTION + def_flags |= BY_DIRECT_INJ_VIRQ_FLAG; +#endif /* CONFIG_DIRECT_VIRQ_INJECTION */ + + return def_flags; +} + +typedef struct kvm_virq_info { + unsigned long mode; /* handling mode of virtual IRQ */ + /* (see above) */ + unsigned long flags; /* flags of virtual IRQ (see above) */ + int virq_nr; /* # of VIRQ */ + int gpid_nr; /* guest kernel thread ID on host */ + void *dev_id; /* VIRQ device ID */ + irq_handler_t handler; /* VIRQ handler */ + atomic_t *count; /* pointer to current atomic counter */ + /* of received VIRQs */ + /* counter is common for host & guest */ + struct task_struct *task; /* kernel thread task to handle VIRQ */ +} kvm_virq_info_t; + +/* + * Packed IRQ information: + * type - enum kvm_irq_type + * cpu - cpu this event channel is bound to + * index - type-specific information: + * PIRQ - vector, with MSB being "needs EIO" + * VIRQ - virq number + * IPI - IPI vector + * EVTCHN - + */ +typedef struct kvm_irq_info { + kvm_irq_type_t type; /* type */ + int cpu; /* cpu bound (-1 if not bound) */ + bool active; /* IRQ is active */ + + union { /* type-specific info */ + kvm_virq_info_t virq; + } u; +} kvm_irq_info_t; + +extern int kvm_request_virq(int virq, irq_handler_t handler, int cpu, + unsigned long irqflags, const char *name, void *dev); +static inline int +kvm_request_direct_virq(int virq, irq_handler_t handler, int cpu, + const char *name, void *dev) +{ + return kvm_request_virq(virq, handler, cpu, + BY_DIRECT_INJ_VIRQ_FLAG, name, dev); +} + +extern int kvm_free_virq(int virq, int cpu, void *dev); + +#endif /* __E2K_ASM_KVM_GUEST_IRQ_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/machdep.h b/arch/e2k/include/asm/kvm/guest/machdep.h new file mode 100644 index 0000000..3e5fdf8 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/machdep.h @@ -0,0 +1,30 @@ +#ifndef _E2K_KVM_GUEST_MACHDEP_H_ +#define _E2K_KVM_GUEST_MACHDEP_H_ + +#include +#include + +#ifdef __KERNEL__ + +#ifdef CONFIG_VIRTUALIZATION + +typedef struct guest_machdep { + /* only for guest kernel and machines */ + int id; /* guest machine Id */ + int rev; /* guest VCPU revision */ + unsigned char iset_ver; /* Instruction set version */ + + /* guest interface functions */ +} guest_machdep_t; + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure guest kernel (not paravirtualized based on pv_ops) */ +typedef struct host_machdep { + /* nothing to support and do */ +} host_machdep_t; +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* CONFIG_VIRTUALIZATION */ +#endif /* __KERNEL__ */ + +#endif /* _E2K_KVM_GUEST_MACHDEP_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/mmu.h b/arch/e2k/include/asm/kvm/guest/mmu.h new file mode 100644 index 0000000..6473af2 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/mmu.h @@ -0,0 +1,156 @@ +#ifndef __ASM_KVM_GUEST_MMU_H +#define __ASM_KVM_GUEST_MMU_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +extern long kvm_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, + u64 opc_ext, int chan, int qp_store, int atomic_store); +extern long kvm_recovery_faulted_load(e2k_addr_t address, u64 *ld_val, + u8 *data_tag, u64 ld_rec_opc, int chan); +extern long kvm_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load); +extern long kvm_recovery_faulted_load_to_greg(e2k_addr_t address, + u32 greg_num_d, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load, + void *saved_greg_lo, void *saved_greg_hi); +extern long kvm_move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to); +extern long kvm_move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to); +extern long kvm_move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to); + +static inline void +kvm_handle_mpdma_fault(e2k_addr_t hva) +{ + /* model of sic NBSR is not used on guest */ +} + +static inline bool +kvm_is_guest_kernel_gregs(struct thread_info *ti, + unsigned greg_num_d, u64 **greg_copy) +{ + if (HOST_KERNEL_GREGS_PAIR_MASK == 0 || + !(HOST_KERNEL_GREGS_PAIR_MASK & (1UL << greg_num_d))) + /* register is not used by host and guest */ + /* to support virtualization */ + return false; + + *greg_copy = ti->h_gregs.g[greg_num_d - HOST_GREGS_PAIRS_START].xreg; + return true; +} + +#ifdef CONFIG_KVM_GUEST_KERNEL + +static inline int +guest_addr_to_host(void **addr, pt_regs_t *regs) +{ + return native_guest_addr_to_host(addr); +} + +static inline void * +guest_ptr_to_host(void *ptr, int size, pt_regs_t *regs) +{ + /* there are not any guests, so nothing convertion */ + return native_guest_ptr_to_host(ptr, size); +} + +static inline bool +is_guest_kernel_gregs(struct thread_info *ti, + unsigned greg_num_d, u64 **greg_copy) +{ + return kvm_is_guest_kernel_gregs(ti, greg_num_d, greg_copy); +} +static inline long +recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, + u64 opc_ext, int chan, int qp_store, int atomic_store) +{ + if (likely(IS_HV_GM())) + return native_recovery_faulted_tagged_store(address, wr_data, + data_tag, st_rec_opc, data_ext, data_ext_tag, + opc_ext, chan, qp_store, atomic_store); + else + return kvm_recovery_faulted_tagged_store(address, wr_data, + data_tag, st_rec_opc, data_ext, data_ext_tag, + opc_ext, chan, qp_store, atomic_store); +} +static inline long +recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan) +{ + if (likely(IS_HV_GM())) + return native_recovery_faulted_load(address, ld_val, + data_tag, ld_rec_opc, chan); + else + return kvm_recovery_faulted_load(address, ld_val, + data_tag, ld_rec_opc, chan); +} +static inline long +recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load) +{ + if (likely(IS_HV_GM())) + return native_recovery_faulted_move(addr_from, addr_to, + addr_to_hi, vr, ld_rec_opc, chan, + qp_load, atomic_load); + else + return kvm_recovery_faulted_move(addr_from, addr_to, + addr_to_hi, vr, ld_rec_opc, chan, + qp_load, atomic_load); +} +static inline long +recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, int vr, + u64 ld_rec_opc, int chan, int qp_load, int atomic_load, + void *saved_greg_lo, void *saved_greg_hi) +{ + if (likely(IS_HV_GM())) + return native_recovery_faulted_load_to_greg(address, greg_num_d, + vr, ld_rec_opc, chan, qp_load, atomic_load, + saved_greg_lo, saved_greg_hi); + else + return kvm_recovery_faulted_load_to_greg(address, greg_num_d, + vr, ld_rec_opc, chan, qp_load, atomic_load, + saved_greg_lo, saved_greg_hi); +} +static inline long +move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + if (likely(IS_HV_GM())) + return native_move_tagged_word(addr_from, addr_to); + else + return kvm_move_tagged_word(addr_from, addr_to); +} +static inline long +move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + if (likely(IS_HV_GM())) + return native_move_tagged_dword(addr_from, addr_to); + else + return kvm_move_tagged_dword(addr_from, addr_to); +} +static inline long +move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + if (likely(IS_HV_GM())) + return native_move_tagged_qword(addr_from, addr_to); + else + return kvm_move_tagged_qword(addr_from, addr_to); +} + +static inline void +handle_mpdma_fault(e2k_addr_t hva) +{ + kvm_handle_mpdma_fault(hva); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_GUEST_MMU_H */ diff --git a/arch/e2k/include/asm/kvm/guest/mmu_context.h b/arch/e2k/include/asm/kvm/guest/mmu_context.h new file mode 100644 index 0000000..d44dd54 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/mmu_context.h @@ -0,0 +1,27 @@ +#ifndef __ASM_KVM_GUEST_MMU_CONTEXT_H +#define __ASM_KVM_GUEST_MMU_CONTEXT_H + +#ifdef __KERNEL__ + +#include + +extern void kvm_activate_mm(struct mm_struct *active_mm, + struct mm_struct *mm); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void +activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) +{ + kvm_activate_mm(active_mm, mm); +} +static inline void +deactivate_mm(struct task_struct *dead_task, struct mm_struct *mm) +{ + native_deactivate_mm(dead_task, mm); + HYPERVISOR_switch_to_guest_init_mm(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ +#endif /* __ASM_KVM_GUEST_MMU_CONTEXT_H */ diff --git a/arch/e2k/include/asm/kvm/guest/pgatomic.h b/arch/e2k/include/asm/kvm/guest/pgatomic.h new file mode 100644 index 0000000..9e925d7 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/pgatomic.h @@ -0,0 +1,165 @@ +/* + * E2K page table atomic update operations. + * + * Copyright 2018 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_PGATOMIC_H +#define _E2K_KVM_GUEST_PGATOMIC_H + +#include + +#include +#include +#include + +#ifdef CONFIG_KVM_SHADOW_PT +extern pgprot_t kvm_pt_atomic_update(struct mm_struct *mm, + unsigned long addr, pgprot_t *ptp, + pt_atomic_op_t atomic_op, pgprotval_t prot_mask); +extern pgprot_t kvm_pt_atomic_clear_relaxed(pgprotval_t ptot_mask, + pgprot_t *pgprot); + +extern pte_t kvm_get_pte_for_address(struct vm_area_struct *vma, + e2k_addr_t address); + +static inline pgprotval_t +kvm_pt_set_wrprotect_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (IS_HV_MMU_TDP()) { + return native_pt_set_wrprotect_atomic(&pgprot->pgprot); + } else { + return pgprot_val(kvm_pt_atomic_update(mm, addr, pgprot, + ATOMIC_SET_WRPROTECT, _PAGE_INIT_WRITEABLE)); + } +} + +static inline pgprotval_t +kvm_pt_get_and_clear_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (IS_HV_MMU_TDP()) { + return native_pt_get_and_clear_atomic(&pgprot->pgprot); + } else { + return pgprot_val(kvm_pt_atomic_update(mm, addr, pgprot, + ATOMIC_GET_AND_CLEAR, _PAGE_INIT_VALID)); + } +} + +static inline pgprotval_t +kvm_pt_get_and_xchg_atomic(struct mm_struct *mm, unsigned long addr, + pgprotval_t newval, pgprot_t *pgprot) +{ + if (IS_HV_MMU_TDP()) { + return native_pt_get_and_xchg_atomic(newval, &pgprot->pgprot); + } else { + return pgprot_val(kvm_pt_atomic_update(mm, addr, pgprot, + ATOMIC_GET_AND_XCHG, newval)); + } +} + +static inline pgprotval_t +kvm_pt_clear_relaxed_atomic(pgprotval_t prot_mask, pgprot_t *pgprot) +{ + if (IS_HV_MMU_TDP()) { + return native_pt_clear_relaxed_atomic(prot_mask, + &pgprot->pgprot); + } else { + return pgprot_val(kvm_pt_atomic_clear_relaxed(prot_mask, + pgprot)); + } +} + +static inline pgprotval_t +kvm_pt_clear_young_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (IS_HV_MMU_TDP()) { + return native_pt_clear_young_atomic(&pgprot->pgprot); + } else { + return pgprot_val(kvm_pt_atomic_update(mm, addr, pgprot, + ATOMIC_TEST_AND_CLEAR_YOUNG, + _PAGE_INIT_ACCESSED)); + } +} + +static inline pgprotval_t +kvm_pt_modify_prot_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (IS_HV_MMU_TDP()) { + return native_pt_modify_prot_atomic(&pgprot->pgprot); + } else { + return pgprot_val(kvm_pt_atomic_update(mm, addr, pgprot, + ATOMIC_MODIFY_START, + _PAGE_INIT_VALID)); + } +} +static inline pte_t kvm_ptep_get_and_clear_to_move(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return __pte(kvm_pt_get_and_clear_atomic(mm, addr, (pgprot_t *)ptep)); +} +#elif defined(CONFIG_KVM_GUEST_KERNEL) + #error "CONFIG_KVM_SHADOW_PT should be set for guest paravirtualized kernel" +#endif /* CONFIG_KVM_SHADOW_PT */ + +#if defined(CONFIG_KVM_GUEST_KERNEL) +/* It is native guest kernel (without paravirtualization on pv_ops) */ + +static inline pgprotval_t +pt_set_wrprotect_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return kvm_pt_set_wrprotect_atomic(mm, addr, pgprot); +} + +static inline pgprotval_t +pt_get_and_clear_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return kvm_pt_get_and_clear_atomic(mm, addr, pgprot); +} + +static inline pgprotval_t +pt_get_and_xchg_atomic(struct mm_struct *mm, unsigned long addr, + pgprotval_t newval, pgprot_t *pgprot) +{ + return kvm_pt_get_and_xchg_atomic(mm, addr, newval, pgprot); +} + +static inline pgprotval_t +pt_clear_relaxed_atomic(pgprotval_t mask, pgprot_t *pgprot) +{ + return kvm_pt_clear_relaxed_atomic(mask, pgprot); +} + +static inline pgprotval_t +pt_clear_young_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return kvm_pt_clear_young_atomic(mm, addr, pgprot); +} + +static inline pgprotval_t +pt_modify_prot_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return kvm_pt_modify_prot_atomic(mm, addr, pgprot); +} + +static inline pte_t ptep_get_and_clear_to_move(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return kvm_ptep_get_and_clear_to_move(mm, addr, ptep); +} + +static inline pte_t get_pte_for_address(struct vm_area_struct *vma, + e2k_addr_t address) +{ + return kvm_get_pte_for_address(vma, address); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_GUEST_PGATOMIC_H */ diff --git a/arch/e2k/include/asm/kvm/guest/process.h b/arch/e2k/include/asm/kvm/guest/process.h new file mode 100644 index 0000000..05c864d --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/process.h @@ -0,0 +1,869 @@ +/* + * KVM guest kernel processes support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_PROCESS_H +#define _E2K_KVM_GUEST_PROCESS_H + +#include +#include +#include +#include + +#undef DEBUG_USER_STACKS_MODE +#undef DebugKVMUS +#define DEBUG_USER_STACKS_MODE 0 +#define DebugKVMUS(fmt, args...) \ +({ \ + if (DEBUG_USER_STACKS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +extern bool debug_ustacks; +#undef DEBUG_USER_STACKS_MODE +#undef DebugUST +#define DEBUG_USER_STACKS_MODE 0 /* guest user stacks debug mode */ +#define DebugUST(fmt, args...) \ +({ \ + if (debug_ustacks) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* real flush of hardware stacks should be done by host hypercall */ +/* so here nothing to do */ +#ifdef CONFIG_KVM_GUEST_HW_PV +# define KVM_FLUSHCPU \ +do \ + if (IS_HV_GM()) { \ + NATIVE_FLUSHCPU; \ + } \ +while (false) +# define KVM_FLUSHR \ +do \ + if (IS_HV_GM()) { \ + NATIVE_FLUSHR; \ + } \ +while (false) +# define KVM_FLUSHC \ +do \ + if (IS_HV_GM()) { \ + NATIVE_FLUSHC; \ + } \ +while (false) +# define BOOT_KVM_FLUSHCPU \ +do \ + if (BOOT_IS_HV_GM()) { \ + NATIVE_FLUSHCPU; \ + } \ +while (false) +# define BOOT_KVM_FLUSHR \ +do \ + if (BOOT_IS_HV_GM()) { \ + NATIVE_FLUSHR; \ + } \ +while (false) +# define BOOT_KVM_FLUSHC \ +do \ + if (BOOT_IS_HV_GM()) { \ + NATIVE_FLUSHC; \ + } \ +while (false) +#else /* ! CONFIG_KVM_GUEST_HW_PV */ +# define KVM_FLUSHCPU +# define KVM_FLUSHR +# define KVM_FLUSHC +# define BOOT_KVM_FLUSHCPU +# define BOOT_KVM_FLUSHR +# define BOOT_KVM_FLUSHC +#endif /* CONFIG_KVM_GUEST_HW_PV */ + +#define kvm_kernel_mode(regs) kvm_from_kernel_IP((regs)->crs.cr0_hi) + +#define kvm_from_trap_on_kernel(regs) \ + is_trap_from_kernel(regs, GUEST_TASK_SIZE) + +/* + * to define kernel_mode in trap we must use trap_ip from trap celler + * (in user code may be sys_call and psr may be after sys_call) + */ +#define kvm_trap_kernel_mode(regs) \ + (kvm_kernel_mode(regs) && kvm_from_trap_on_kernel(regs)) +#define kvm_trap_user_mode(regs) \ + (guest_user_mode(regs) && !LIGHT_HYPERCALL_MODE(regs)) + +static inline void KVM_COPY_STACKS_TO_MEMORY(void) +{ + if (IS_HV_GM()) + NATIVE_COPY_STACKS_TO_MEMORY(); + else + HYPERVISOR_copy_stacks_to_memory(); +} + +static inline void +kvm_kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size) +{ + fast_tagged_memory_copy(dst, src, size, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, true); +} + +static __always_inline void +kvm_collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size) +{ + e2k_psp_hi_t k_psp_hi; + u64 ps_ind, ps_size; + u64 size; + + DebugUST("current host procedure stack index 0x%x, PSHTP 0x%x\n", + NATIVE_NV_READ_PSP_HI_REG().PSP_hi_ind, + NATIVE_NV_READ_PSHTP_REG().PSHTP_ind); + + KVM_COPY_STACKS_TO_MEMORY(); + ATOMIC_GET_HW_PS_SIZES(ps_ind, ps_size); + + size = ps_ind - spilled_size; + BUG_ON(!IS_ALIGNED(size, ALIGN_PSTACK_TOP_SIZE) || (s64) size < 0); + + kvm_kernel_hw_stack_frames_copy(dst, src, size); + + k_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + k_psp_hi.PSP_hi_ind = size; + HYPERVISOR_update_psp_hi(k_psp_hi.PSP_hi_half); + + DebugUST("move spilled procedure part from host top %px to " + "bottom %px, size 0x%llx\n", + src, dst, size); + DebugUST("host kernel procedure stack index is now 0x%x, " + "guest user PSHTP 0x%llx\n", + k_psp_hi.PSP_hi_ind, spilled_size); +} + +static __always_inline void +kvm_collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size) +{ + e2k_pcsp_hi_t k_pcsp_hi; + u64 pcs_ind, pcs_size; + u64 size; + + DebugUST("current host chain stack index 0x%x, PCSHTP 0x%llx\n", + NATIVE_NV_READ_PCSP_HI_REG().PCSP_hi_ind, + NATIVE_READ_PCSHTP_REG_SVALUE()); + + KVM_COPY_STACKS_TO_MEMORY(); + ATOMIC_GET_HW_PCS_SIZES(pcs_ind, pcs_size); + + size = pcs_ind - spilled_size; + BUG_ON(!IS_ALIGNED(size, ALIGN_PCSTACK_TOP_SIZE) || (s64) size < 0); + + kvm_kernel_hw_stack_frames_copy(dst, src, size); + + k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + k_pcsp_hi.PCSP_hi_ind = size; + HYPERVISOR_update_pcsp_hi(k_pcsp_hi.PCSP_hi_half); + + DebugUST("move spilled chain part from host top %px to " + "bottom %px, size 0x%llx\n", + src, dst, size); + DebugUST("host kernel chain stack index is now 0x%x, " + "guest user PCSHTP 0x%llx\n", + k_pcsp_hi.PCSP_hi_ind, spilled_size); +} + +static __always_inline int +copy_stack_page_from_kernel(void __user *dst, void *src, e2k_size_t to_copy, + bool is_chain) +{ + int ret; + + ret = HYPERVISOR_copy_hw_stacks_frames(dst, src, to_copy, is_chain); + return ret; +} + +static __always_inline int +copy_stack_page_to_user(void __user *dst, void *src, e2k_size_t to_copy, + bool is_chain) +{ + struct page *page = NULL; + unsigned long addr = (unsigned long)dst; + void *k_dst; + e2k_size_t offset; + mm_segment_t seg; + unsigned long ts_flag; + int npages; + int ret; + + if (to_copy == 0) + return 0; + + DebugUST("started to copy %s stack from kernel stack %px to user %px " + "size 0x%lx\n", + (is_chain) ? "chain" : "procedure", + src, dst, to_copy); + seg = get_fs(); + set_fs(K_USER_DS); + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + do { + npages = __get_user_pages_fast(addr, 1, 1, &page); + if (npages == 1) + break; + npages = get_user_pages_unlocked(addr, 1, &page, FOLL_WRITE); + if (npages == 1) + break; + clear_ts_flag(ts_flag); + set_fs(seg); + ret = -EFAULT; + goto failed; + } while (npages != 1); + clear_ts_flag(ts_flag); + set_fs(seg); + + offset = addr & ~PAGE_MASK; + k_dst = page_address(page) + offset; + DebugUST("copy stack frames from kernel %px to user %px, size 0x%lx\n", + src, k_dst, to_copy); + ret = copy_stack_page_from_kernel(k_dst, src, to_copy, is_chain); + if (ret != 0) { + pr_err("%s(): copy %s stack to user %px from kernel %px, " + "size 0x%lx failed, error %d\n", + __func__, (is_chain) ? "chain" : "procedure", + src, k_dst, to_copy, ret); + goto failed_copy; + } + +failed_copy: + put_page(page); +failed: + return ret; +} + +static __always_inline int +kvm_copy_user_stack_from_kernel(void __user *dst, void *src, + e2k_size_t to_copy, bool is_chain) +{ + e2k_size_t offset, len; + int ret; + + if (to_copy == 0) + return 0; + + DebugUST("started to copy %s stack from kernel stack %px to user %px " + "size 0x%lx\n", + (is_chain) ? "chain" : "procedure", + src, dst, to_copy); + do { + offset = (unsigned long)dst & ~PAGE_MASK; + len = min(to_copy, PAGE_SIZE - offset); + ret = copy_stack_page_to_user(dst, src, len, is_chain); + if (ret != 0) + goto failed; + dst += len; + src += len; + to_copy -= len; + } while (to_copy > 0); + + return 0; + +failed: + return ret; +} + +static __always_inline int +kvm_user_hw_stacks_copy(pt_regs_t *regs, int add_frames_num) +{ + e2k_psp_lo_t psp_lo; + e2k_psp_hi_t psp_hi; + e2k_pshtp_t pshtp; + e2k_pcsp_lo_t pcsp_lo; + e2k_pcsp_hi_t pcsp_hi; + e2k_pcshtp_t pcshtp; + e2k_stacks_t *stacks; + void __user *dst; + void *src; + long to_copy, from, there_are, add_frames_size; + int ret; + + BUG_ON(irqs_disabled()); + + ret = HYPERVISOR_copy_stacks_to_memory(); + if (ret != 0) { + pr_err("%s(): flush of kernel stacks failed, error %d\n", + __func__, ret); + goto failed; + } + + /* copy user part of procedure stack from kernel back to user */ + stacks = ®s->stacks; + ATOMIC_READ_HW_STACKS_REGS(psp_lo.PSP_lo_half, psp_hi.PSP_hi_half, + pshtp.PSHTP_reg, + pcsp_lo.PCSP_lo_half, pcsp_hi.PCSP_hi_half, + pcshtp); + src = (void *)psp_lo.PSP_lo_base; + DebugUST("procedure stack at kernel from %px, size 0x%x, ind 0x%x, " + "pshtp 0x%llx\n", + src, psp_hi.PSP_hi_size, psp_hi.PSP_hi_ind, pshtp.PSHTP_reg); + BUG_ON(psp_hi.PSP_hi_ind > psp_hi.PSP_hi_size); + + if (stacks->psp_hi.PSP_hi_ind >= stacks->psp_hi.PSP_hi_size) { + /* procedure stack overflow, need expand */ + ret = handle_proc_stack_bounds(stacks, regs->trap); + if (unlikely(ret)) { + pr_err("%s(): could not handle process %s (%d) " + "procedure stack overflow, error %d\n", + __func__, current->comm, current->pid, ret); + goto failed; + } + } + to_copy = GET_PSHTP_MEM_INDEX(stacks->pshtp); + BUG_ON(to_copy < 0); + from = stacks->psp_hi.PSP_hi_ind - to_copy; + BUG_ON(from < 0); + dst = (void __user *)stacks->psp_lo.PSP_lo_base + from; + DebugUST("procedure stack at user from %px, ind 0x%x, " + "pshtp size to copy 0x%lx\n", + dst, stacks->psp_hi.PSP_hi_ind, to_copy); + there_are = stacks->psp_hi.PSP_hi_size - from; + if (there_are < to_copy) { + pr_err("%s(): user procedure stack overflow, there are 0x%lx " + "to copy need 0x%lx, not yet implemented\n", + __func__, there_are, to_copy); + BUG_ON(true); + } + if (to_copy > 0) { + ret = kvm_copy_user_stack_from_kernel(dst, src, to_copy, false); + if (ret != 0) { + pr_err("%s(): procedure stack copying from kernel %px " + "to user %px, size 0x%lx failed, error %d\n", + __func__, src, dst, to_copy, ret); + goto failed; + } + } + + /* copy user part of chain stack from kernel back to user */ + add_frames_size = add_frames_num * SZ_OF_CR; + src = (void *)pcsp_lo.PCSP_lo_base; + DebugUST("chain stack at kernel from %px, size 0x%x + 0x%lx, ind 0x%x, " + "pcshtp 0x%x\n", + src, pcsp_hi.PCSP_hi_size, add_frames_size, pcsp_hi.PCSP_hi_ind, + pcshtp); + BUG_ON(pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp) + + add_frames_size > pcsp_hi.PCSP_hi_size); + if (stacks->pcsp_hi.PCSP_hi_ind >= stacks->pcsp_hi.PCSP_hi_size) { + /* chain stack overflow, need expand */ + ret = handle_chain_stack_bounds(stacks, regs->trap); + if (unlikely(ret)) { + pr_err("%s(): could not handle process %s (%d) " + "chain stack overflow, error %d\n", + __func__, current->comm, current->pid, ret); + goto failed; + } + } + to_copy = PCSHTP_SIGN_EXTEND(stacks->pcshtp); + BUG_ON(to_copy < 0); + from = stacks->pcsp_hi.PCSP_hi_ind - to_copy; + BUG_ON(from < 0); + dst = (void *)stacks->pcsp_lo.PCSP_lo_base + from; + to_copy += add_frames_size; + BUG_ON(to_copy > pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp)); + DebugUST("chain stack at user from %px, ind 0x%x, " + "pcshtp size to copy 0x%lx\n", + dst, stacks->pcsp_hi.PCSP_hi_ind, to_copy); + there_are = stacks->pcsp_hi.PCSP_hi_size - from; + if (there_are < to_copy) { + pr_err("%s(): user chain stack overflow, there are 0x%lx " + "to copy need 0x%lx, not yet implemented\n", + __func__, there_are, to_copy); + BUG_ON(true); + } + if (to_copy > 0) { + ret = kvm_copy_user_stack_from_kernel(dst, src, to_copy, true); + if (ret != 0) { + pr_err("%s(): chain stack copying from kernel %px " + "to user %px, size 0x%lx failed, error %d\n", + __func__, src, dst, to_copy, ret); + goto failed; + } + } + if (add_frames_size > 0) { + /* increment chain stack pointer */ + stacks->pcsp_hi.PCSP_hi_ind += add_frames_size; + } + +failed: + if (DEBUG_USER_STACKS_MODE) + debug_ustacks = false; + return ret; +} + +extern void kvm_get_mm_notifier(thread_info_t *ti, struct mm_struct *mm); + +/** + * user_hw_stacks_prepare - prepare user hardware stacks that have been + * SPILLed to kernel back to user space + * @stacks - saved user stack registers + * @cur_window_q - size of current window in procedure stack, + * needed only if @copy_full is not set + * @syscall - true if called upon direct system call exit (no signal handlers) + * + * This does two things: + * + * 1) It is possible that upon kernel entry pcshtp == 0 in some cases: + * - user signal handler had pcshtp==0x20 before return to sigreturn() + * - user context had pcshtp==0x20 before return to makecontext_trampoline() + * - chain stack underflow happened + * So it is possible in sigreturn() and traps, but not in system calls. + * If we are using the trick with return to FILL user hardware stacks than + * we must have frame in chain stack to return to. So in this case kernel's + * chain stack is moved up by one frame (0x20 bytes). + * We also fill the new frame with actual user data and update stacks->pcshtp, + * this is needed to keep the coherent state where saved stacks->pcshtp values + * shows how much data from user space has been spilled to kernel space. + * + * 2) It is not possible to always FILL all of user data that have been + * SPILLed to kernel stacks. So we manually copy the leftovers that can + * not be FILLed to user space. + * This copy does not update stacks->pshtp and stacks->pcshtp. Main reason + * is signals: if a signal arrives after copying then it must see a coherent + * state where saved stacks->pshtp and stacks->pcshtp values show how much + * data from user space has been spilled to kernel space. + */ +static __always_inline int kvm_user_hw_stacks_prepare( + struct e2k_stacks *stacks, pt_regs_t *regs, + u64 cur_window_q, enum restore_caller from, int syscall) +{ + e2k_pcshtp_t u_pcshtp = stacks->pcshtp; + int ret; + + BUG_ON(!kvm_trap_user_mode(regs)); + + BUG_ON(from & FROM_PV_VCPU_MODE); + + /* + * 1) Make sure there is free space in kernel chain stack to return to + */ + if (!syscall && u_pcshtp == 0) { + DebugUST("%s(): PCSHTP is empty\n", __func__); + } + + /* + * 2) User data copying will be done some later at + * kvm_prepare_user_hv_stacks() + */ + ret = kvm_user_hw_stacks_copy(regs, 0); + if (ret != 0) { + pr_err("%s(): copying of hardware stacks failed< error %d\n", + __func__, ret); + do_exit(SIGKILL); + } + return ret; +} + +static inline int +kvm_ret_from_fork_prepare_hv_stacks(struct pt_regs *regs) +{ + return kvm_user_hw_stacks_copy(regs, 0); +} + +static inline void kvm_release_task_struct(struct task_struct *task) +{ + thread_info_t *ti; + int ret; + + ti = task_thread_info(task); + BUG_ON(ti == NULL); + + ret = HYPERVISOR_release_task_struct(ti->gpid_nr); + if (ret != 0) { + pr_err("%s(): could not release task struct of %s (%d) " + "GPID #%d on host, error %d\n", + __func__, task->comm, task->pid, ti->gpid_nr, ret); + } +} + +/* + * These functions for guest kernel, see comment for virtualization at + * arch/e2k/include/asm/ptrace.h + * In this case guest is main kernel and here knows that it is guest + * Extra kernel is host + * + * Get/set kernel stack limits of area reserved at the top of hardware stacks + * Kernel areas include two part: + * guest kernel stack reserved area at top of stack + * host kernel stack reserved area at top of stack + */ + +static __always_inline e2k_size_t +kvm_get_hw_ps_user_size(hw_stack_t *hw_stacks) +{ + return get_hw_ps_user_size(hw_stacks); +} +static __always_inline e2k_size_t +kvm_get_hw_pcs_user_size(hw_stack_t *hw_stacks) +{ + return get_hw_pcs_user_size(hw_stacks); +} +static __always_inline void +kvm_set_hw_ps_user_size(hw_stack_t *hw_stacks, e2k_size_t u_ps_size) +{ + set_hw_ps_user_size(hw_stacks, u_ps_size); +} +static __always_inline void +kvm_set_hw_pcs_user_size(hw_stack_t *hw_stacks, e2k_size_t u_pcs_size) +{ + set_hw_pcs_user_size(hw_stacks, u_pcs_size); +} + +/* + * Table of pointers to VCPUs state. + * Own VCPU state pointer is loaded on some global registers to direct access + * Other VCPUs state pointers can be accessible through this table + */ +extern kvm_vcpu_state_t *vcpus_state[NR_CPUS]; + +static inline kvm_vcpu_state_t *kvm_get_the_vcpu_state(long vcpu_id) +{ + return vcpus_state[vcpu_id]; +} + +/* own VCPU state: directly accessible through global registers */ +static inline kvm_vcpu_state_t *kvm_get_vcpu_state(void) +{ + unsigned long vcpu_base; + + KVM_GET_VCPU_STATE_BASE(vcpu_base); + return (kvm_vcpu_state_t *)(vcpu_base); +} + +#define KVM_ONLY_SET_GUEST_GREGS(ti) \ + KVM_SET_VCPU_STATE_BASE(kvm_get_the_vcpu_state( \ + smp_processor_id())) + +/* guest kernel does not support own guests and cannot be run as host */ +/* so has not the problem - nothing to do */ +/* see arch/e2k/include/asm/process.h for more details why and how */ +#define KVM_GUEST_UPDATE_VCPU_THREAD_CONTEXT(task, ti, regs, gti, vcpu) +#define KVM_GUEST_CHECK_VCPU_THREAD_CONTEXT(__ti) + +extern void kvm_vcpu_boot_thread_init(struct task_struct *boot_task); +extern int kvm_copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg); +extern void kvm_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks); +extern void boot_kvm_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks); +extern void kvm_define_user_hw_stacks_sizes(hw_stack_t *hw_stacks); + +extern void kvm_release_hw_stacks(thread_info_t *dead_ti); +extern void kvm_release_kernel_stacks(thread_info_t *dead_ti); +extern int kvm_kmem_area_host_chunk(e2k_addr_t stack_base, + e2k_size_t stack_size, int hw_flag); +extern void kvm_kmem_area_unhost_chunk(e2k_addr_t stack_base, + e2k_size_t stack_size); +extern int kvm_switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel); + +extern int kvm_clone_prepare_spilled_user_stacks(e2k_stacks_t *child_stacks, + const e2k_mem_crs_t *child_crs, const struct pt_regs *regs, + struct sw_regs *new_sw_regs, struct thread_info *new_ti, + unsigned long clone_flags); +extern int kvm_copy_spilled_user_stacks(e2k_stacks_t *child_stacks, + e2k_mem_crs_t *child_crs, sw_regs_t *new_sw_regs, + thread_info_t *new_ti); + +extern int kvm_copy_user_stacks(unsigned long clone_flags, + e2k_addr_t new_stk_base, e2k_size_t new_stk_sz, + struct task_struct *new_task, pt_regs_t *regs); + +extern void kvm_fix_process_pt_regs(thread_info_t *ti, e2k_stacks_t *stacks, + pt_regs_t *regs, pt_regs_t *old_regs); + +extern void __init kvm_setup_arch(void); + +#ifdef COMMON_KERNEL_USER_HW_STACKS +/* + * Free guest kernel hardware stacks after completion of sys_execve() + * and switch to new user process. The new process executes on own stacks + * and old kernel hardware stacks on which was run do_execve() can be released + * only after switch to new user stacks. + * WARNING: probably release of stacks should be done earlier (not while exit + * from the process and deactivate mm), perhaps as pending work + */ +static inline void +kvm_free_old_kernel_hardware_stacks(void) +{ + thread_info_t *ti = current_thread_info(); + + if (!test_ts_flag(TS_MAPPED_HW_STACKS_INVALID)) + /* it is not process after sys_execve() */ + return; + + if (test_ts_flag(TS_MAPPED_HW_STACKS)) { + release_old_hw_stack_mappings(ti); + } else { + release_old_kernel_hardware_stacks(ti); + } + + clear_ts_flag(TS_MAPPED_HW_STACKS_INVALID); +} +#endif /* COMMON_KERNEL_USER_HW_STACKS */ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* native guest kernel */ +#define E2K_FLUSHCPU KVM_FLUSHCPU +#define E2K_FLUSHR KVM_FLUSHR +#define E2K_FLUSHC KVM_FLUSHC +#define BOOT_FLUSHCPU KVM_FLUSHCPU +#define BOOT_FLUSHR KVM_FLUSHR +#define BOOT_FLUSHC KVM_FLUSHC + +#define ONLY_SET_GUEST_GREGS(ti) KVM_ONLY_SET_GUEST_GREGS(ti) + +#define UPDATE_VCPU_THREAD_CONTEXT(task, ti, regs, gti, vcpu) \ + KVM_GUEST_UPDATE_VCPU_THREAD_CONTEXT(task, ti, regs, gti, vcpu) +#define CHECK_VCPU_THREAD_CONTEXT(__ti) \ + KVM_GUEST_CHECK_VCPU_THREAD_CONTEXT(__ti) + +#define GOTO_RETURN_TO_PARAVIRT_GUEST(ret_value) +#define COND_GOTO_RETURN_TO_PARAVIRT_GUEST(cond, ret_value) +#define GOTO_DONE_TO_PARAVIRT_GUEST() +#define COND_GOTO_DONE_TO_PARAVIRT_GUEST(cond) + +#define do_map_user_hard_stack_to_kernel(node, kstart, ubase, size) \ + do_map_native_user_hard_stack_to_kernel(node, kstart, \ + ubase, size) +#define resume_vm_thread() /* none any virtual machines and threads */ + +static inline bool host_is_at_HV_GM_mode(void) +{ + /* the guest has not own guests, so cannot be as host */ + return false; +} + +static inline void COPY_STACKS_TO_MEMORY(void) +{ + KVM_COPY_STACKS_TO_MEMORY(); +} + +static __always_inline void +kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size) +{ + kvm_kernel_hw_stack_frames_copy(dst, src, size); +} + +static __always_inline void +collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size) +{ + kvm_collapse_kernel_ps(dst, src, spilled_size); +} + +static __always_inline void +collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size) +{ + kvm_collapse_kernel_pcs(dst, src, spilled_size); +} + +static __always_inline void host_user_hw_stacks_prepare( + struct e2k_stacks *stacks, pt_regs_t *regs, + u64 cur_window_q, enum restore_caller from, int syscall) +{ + if (regs->sys_num == __NR_e2k_longjmp2) { + /* hardware stacks already are prepared */ + return; + } + kvm_user_hw_stacks_prepare(stacks, regs, cur_window_q, + from, syscall); +} + +static inline int +ret_from_fork_prepare_hv_stacks(struct pt_regs *regs) +{ + return kvm_ret_from_fork_prepare_hv_stacks(regs); +} + +static inline void +virt_cpu_thread_init(struct task_struct *boot_task) +{ + unsigned long vcpu_state_base; + + KVM_GET_VCPU_STATE_BASE(vcpu_state_base); + task_thread_info(boot_task)->vcpu_state_base = vcpu_state_base; + if (!IS_HV_GM()) + kvm_vcpu_boot_thread_init(boot_task); +} + +static inline int +copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg) +{ + if (IS_HV_GM()) + return native_copy_kernel_stacks(new_task, fn, arg); + else + return kvm_copy_kernel_stacks(new_task, fn, arg); +} +#ifdef COMMON_KERNEL_USER_HW_STACKS +static inline int +copy_user_stacks(unsigned long clone_flags, + e2k_addr_t new_stk_base, e2k_size_t new_stk_sz, + struct task_struct *new_task, pt_regs_t *regs) +{ + if (IS_HV_GM()) + return native_copy_user_stacks(clone_flags, new_stk_base, + new_stk_sz, new_task, regs); + else + return kvm_copy_user_stacks(clone_flags, new_stk_base, + new_stk_sz, new_task, regs); +} +#endif /* COMMON_KERNEL_USER_HW_STACKS */ + +static inline void +define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + if (IS_HV_GM()) + native_do_define_kernel_hw_stacks_sizes(hw_stacks); + else + kvm_define_kernel_hw_stacks_sizes(hw_stacks); +} + +static inline void +boot_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + if (BOOT_IS_HV_GM()) + native_do_define_kernel_hw_stacks_sizes(hw_stacks); + else + boot_kvm_define_kernel_hw_stacks_sizes(hw_stacks); +} + +static inline void +define_user_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + if (IS_HV_GM()) + native_define_user_hw_stacks_sizes(hw_stacks); + else + kvm_define_user_hw_stacks_sizes(hw_stacks); +} + +#ifdef COMMON_KERNEL_USER_HW_STACKS +static inline void +release_hw_stacks(thread_info_t *dead_ti) +{ + if (IS_HV_GM()) { + native_release_hw_stacks(dead_ti); + } else { + kvm_release_hw_stacks(dead_ti); + } +} +static inline void +release_kernel_stacks(thread_info_t *dead_ti) +{ + if (IS_HV_GM()) { + native_release_kernel_stacks(dead_ti); + } else { + kvm_release_kernel_stacks(dead_ti); + } +} +#endif /* COMMON_KERNEL_USER_HW_STACKS */ + +#define GET_PARAVIRT_GUEST_MODE(pv_guest, regs) /* nothing to do */ + +static inline int +switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel) +{ + if (likely(IS_HV_GM())) { + return native_switch_to_new_user(stacks, hw_stacks, + cut_base, cut_size, entry_point, cui, flags, kernel); + } else { + return kvm_switch_to_new_user(stacks, hw_stacks, + cut_base, cut_size, entry_point, cui, flags, kernel); + } +} + +static inline int +clone_prepare_spilled_user_stacks(e2k_stacks_t *child_stacks, + const e2k_mem_crs_t *child_crs, const struct pt_regs *regs, + struct sw_regs *new_sw_regs, struct thread_info *new_ti, + unsigned long clone_flags) +{ + if (likely(IS_HV_GM())) { + return native_clone_prepare_spilled_user_stacks(child_stacks, + child_crs, regs, new_sw_regs, new_ti, + clone_flags); + } else { + return kvm_clone_prepare_spilled_user_stacks(child_stacks, + child_crs, regs, new_sw_regs, new_ti, + clone_flags); + } +} + +static inline int +copy_spilled_user_stacks(e2k_stacks_t *child_stacks, e2k_mem_crs_t *child_crs, + sw_regs_t *new_sw_regs, thread_info_t *new_ti) +{ + if (likely(IS_HV_GM())) { + native_copy_spilled_user_stacks(child_stacks, child_crs, + new_sw_regs, new_ti); + return 0; + } else { + return kvm_copy_spilled_user_stacks(child_stacks, child_crs, + new_sw_regs, new_ti); + } +} + +#ifdef COMMON_KERNEL_USER_HW_STACKS +static inline void +free_old_kernel_hardware_stacks(void) +{ + if (likely(IS_HV_GM())) { + native_free_old_kernel_hardware_stacks(); + } else { + kvm_free_old_kernel_hardware_stacks(); + } +} +#endif /* COMMON_KERNEL_USER_HW_STACKS */ + +/* the function is not used in guest mode so only to compile without errors */ +static __always_inline __interrupt void +complete_switch_to_user_func(void) +{ + /* none own guests, so nothing to do in virtualization mode */ + /* but the function should switch interrupt control from UPSR to */ + /* PSR and set initial state of user UPSR */ + KVM_SET_USER_INITIAL_UPSR(E2K_USER_INITIAL_UPSR); +} +/* the function is not used in guest mode so only to compile without errors */ +static __always_inline __interrupt void +complete_go2user(thread_info_t *ti, long fn) +{ + /* none own guests, so nothing to do in virtualization mode */ + /* but the function should restore user UPSR state */ + KVM_WRITE_UPSR_REG(ti->upsr); +} + +#define clear_vm_thread_flags() /* own virtual machines is not */ + /* supported on guest */ + /* so nothing to clear */ + +static inline void +clear_virt_thread_struct(thread_info_t *ti) +{ +} + +static inline void virt_setup_arch(void) +{ + kvm_setup_arch(); +} + +static inline void free_virt_task_struct(struct task_struct *task) +{ + kvm_release_task_struct(task); +} + +#define usd_cannot_be_expanded(regs) user_stack_cannot_be_expanded() + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* !(_E2K_KVM_GUEST_PROCESS_H) */ diff --git a/arch/e2k/include/asm/kvm/guest/processor.h b/arch/e2k/include/asm/kvm/guest/processor.h new file mode 100644 index 0000000..f38fe26 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/processor.h @@ -0,0 +1,64 @@ +/* + * KVM guest processor and processes support + * + * Copyright (C) 2014 MCST + */ + +#ifndef _E2K_KVM_GUEST_PROCESSOR_H_ +#define _E2K_KVM_GUEST_PROCESSOR_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +extern int kvm_prepare_start_thread_frames(unsigned long entry, + unsigned long sp); + +extern void kvm_default_idle(void); +extern void kvm_cpu_relax(void); +extern void kvm_cpu_relax_no_resched(void); + +/* defined at kernel/sched.c */ +extern void wake_up_idle_vcpu(int cpu); + +extern void kvm_print_machine_type_info(void); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* pure guest kernel (not paravirtualized based on pv_ops) */ + +#define paravirt_enabled() true +#define boot_paravirt_enabled() paravirt_enabled() + +static inline int +prepare_start_thread_frames(unsigned long entry, unsigned long sp) +{ + if (likely(IS_HV_GM())) { + return native_do_prepare_start_thread_frames(entry, sp); + } else { + return kvm_prepare_start_thread_frames(entry, sp); + } +} + +#define default_idle() kvm_default_idle() +#define cpu_relax() kvm_cpu_relax() +#define cpu_relax_no_resched() kvm_cpu_relax_no_resched() + +static inline void +print_machine_type_info(void) +{ + kvm_print_machine_type_info(); +} +static inline void +paravirt_banner(void) +{ + printk(KERN_INFO "Booting pure guest kernel (not paravirtualized " + "based on pv_ops)\n"); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ +#endif /* _E2K_KVM_GUEST_PROCESSOR_H_ */ + + diff --git a/arch/e2k/include/asm/kvm/guest/ptrace.h b/arch/e2k/include/asm/kvm/guest/ptrace.h new file mode 100644 index 0000000..6c2979f --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/ptrace.h @@ -0,0 +1,151 @@ +#ifndef __E2K_KVM_GUEST_PTRACE_H +#define __E2K_KVM_GUEST_PTRACE_H + +/* Does not include this header directly, include */ + +#include +#include +#include + +struct task_struct; + +static inline void +kvm_save_DAM(unsigned long long dam[DAM_ENTRIES_NUM]) +{ + int ret; + + ret = HYPERVISOR_get_DAM(dam, DAM_ENTRIES_NUM); + if (ret != 0) { + pr_err("%s(): could not receive DAM state, error %d\n", + __func__, ret); + } +} + +static inline void kvm_atomic_load_osgd_to_gd(void) +{ + /* FIXME: it is not now understand what to do */ +} + +static inline e2k_addr_t +kvm_check_is_user_address(struct task_struct *task, e2k_addr_t address) +{ + if (likely(address < GUEST_TASK_SIZE)) + return 0; + if (address < NATIVE_TASK_SIZE) { + pr_err("Address 0x%016lx is guest kernel address\n", + address); + return -1; + } + pr_err("Address 0x%016lx is host kernel address\n", + address); + return -1; +} +#define KVM_IS_GUEST_USER_ADDRESS_TO_PVA(task, address) \ + false /* pure guest kernel has not own guests */ +#define KVM_IS_GUEST_ADDRESS_TO_HOST(address) \ + IS_HOST_KERNEL_ADDRESS(address) + +/* guest page table is pseudo PT and only host PT is used */ +/* to translate any guest addresses */ +#define kvm_print_host_user_address_ptes(mm, address) \ +({ \ + HYPERVISOR_print_guest_user_address_ptes((mm)->gmmid_nr, address); \ +}) + +#ifdef CONFIG_KVM_GUEST_KERNEL + +#define user_mode(regs) is_user_mode(regs, GUEST_TASK_SIZE) +#define kernel_mode(regs) is_kernel_mode(regs, GUEST_TASK_SIZE) + +/* guest kernel can be: */ +/* user of host kernel, so USER MODE (pm = 0) */ +/* hardware virtualized guest kernel, so KERNEL MODE (pm = 1) */ +#define from_guest_kernel_mode(cr1_lo) \ + ((IS_HV_GM()) ? from_kernel_mode(cr1_lo) : \ + from_user_mode(cr1_lo)) +#define from_guest_kernel(cr0_hi, cr1_lo) \ + (from_guest_kernel_mode(cr1_lo) && \ + from_guest_kernel_IP(cr0_hi)) + +#define is_call_from_host_user(cr0_hi, cr1_lo) \ + (from_host_user_IP(cr0_hi) && from_host_user_mode(cr1_lo)) +#define is_call_from_host_user_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_host_user(cr0_hi, cr1_lo) : \ + from_host_user_mode(cr1_lo)) +#define is_call_from_guest_user(cr0_hi, cr1_lo) \ + (from_guest_user_IP(cr0_hi) && from_guest_user_mode(cr1_lo)) +#define is_call_from_guest_user_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_guest_user(cr0_hi, cr1_lo) : \ + from_guest_user_mode(cr1_lo)) +#define is_call_from_host_kernel(cr0_hi, cr1_lo) \ + (from_host_kernel_IP(cr0_hi) && from_host_kernel_mode(cr1_lo)) +#define is_call_from_host_kernel_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_host_kernel(cr0_hi, cr1_lo) : \ + from_host_kernel_mode(cr1_lo)) +#define is_call_from_guest_kernel(cr0_hi, cr1_lo) \ + from_guest_kernel(cr0_hi, cr1_lo) +#define is_call_from_guest_kernel_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_guest_kernel(cr0_hi, cr1_lo) : \ + from_guest_kernel_mode(cr1_lo)) + +/* macroses to detect guest traps on host */ +/* Gust has not own nested VM, so nothing guests exist */ +/* and macroses should always return 'false' */ +#define trap_on_guest(regs) \ + false /* own guest is not supported */ +#define trap_on_pv_hv_guest(vcpu, regs) \ + false /* own guest is not supported */ +/* trap occurred on guest user or kernel */ +#define guest_trap_on_host(regs) \ + false /* own guest is not supported */ +#define guest_trap_on_pv_hv_host(vcpu, regs) \ + false /* own guest is not supported */ +/* trap occurred on guest kernel or user, but in host mode */ +/* and the trap can be due to guest or not */ +#define host_trap_on_guest(regs) \ + false /* own guest is not supported */ +/* trap occurred on guest user or kernel or on host but due to guest */ +#define due_to_guest_trap_on_host(regs) \ + false /* own guest is not supported */ +#define due_to_guest_trap_on_pv_hv_host(vcpu, regs) \ + false /* own guest is not supported */ + +#define ON_HOST_KERNEL() false /* it is guest, not host */ +#define call_from_user_mode(cr0_hi, cr1_lo) \ + is_call_from_user(cr0_hi, cr1_lo, ON_HOST_KERNEL()) +#define call_from_kernel_mode(cr0_hi, cr1_lo) \ + is_call_from_kernel(cr0_hi, cr1_lo, ON_HOST_KERNEL()) +#define call_from_user(regs) \ + call_from_user_mode((regs)->crs.cr0_hi, (regs)->crs.cr1_lo) +#define call_from_kernel(regs) \ + call_from_kernel_mode((regs)->crs.cr0_hi, (regs)->crs.cr1_lo) + +#define SAVE_DAM(dam) kvm_save_DAM(dam) + +static inline void atomic_load_osgd_to_gd(void) +{ + kvm_atomic_load_osgd_to_gd(); +} + +/* it is pure KVM guest kernel (not paravirtualized based on pv_ops) */ +#define LIGHT_HYPERCALL_MODE(regs) 0 /* hypercalls not supported */ +#define TI_GENERIC_HYPERCALL_MODE(thread_info) 0 /* hypercalls not supported */ +#define GENERIC_HYPERCALL_MODE() 0 /* hypercalls not supported */ +#define IN_LIGHT_HYPERCALL() 0 /* hypercalls not supported */ +#define IN_GENERIC_HYPERCALL() 0 /* hypercalls not supported */ +#define IN_HYPERCALL() 0 /* hypercalls not supported */ + +static inline e2k_addr_t +check_is_user_address(struct task_struct *task, e2k_addr_t address) +{ + return kvm_check_is_user_address(task, address); +} +#define IS_GUEST_USER_ADDRESS_TO_PVA(task, address) \ + KVM_IS_GUEST_USER_ADDRESS_TO_PVA(task, address) +#define IS_GUEST_ADDRESS_TO_HOST(address) \ + KVM_IS_GUEST_ADDRESS_TO_HOST(address) +#define print_host_user_address_ptes(mm, address) \ + kvm_print_host_user_address_ptes(mm, address) +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __E2K_KVM_GUEST_PTRACE_H */ diff --git a/arch/e2k/include/asm/kvm/guest/pv_info.h b/arch/e2k/include/asm/kvm/guest/pv_info.h new file mode 100644 index 0000000..16c3e1d --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/pv_info.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2016 MCST, Salavat Gilyazov atic@mcst.ru + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#ifndef __ASM_E2K_KVM_GUEST_PV_INFO_H +#define __ASM_E2K_KVM_GUEST_PV_INFO_H + +#include + +/* + * e2k kernel general info + */ +#define KVM_PAGE_OFFSET GUEST_PAGE_OFFSET +#define KVM_TASK_SIZE PAGE_OFFSET +#define KVM_VMALLOC_START GUEST_VMALLOC_START +#define KVM_VMALLOC_END GUEST_VMALLOC_END +#define KVM_VMEMMAP_START GUEST_VMEMMAP_START +#define KVM_VMEMMAP_END GUEST_VMEMMAP_END + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#define paravirt_enabled() true +#define boot_paravirt_enabled() paravirt_enabled() + +#define PAGE_OFFSET KVM_PAGE_OFFSET +#define TASK_SIZE PAGE_OFFSET +#define VMALLOC_START KVM_VMALLOC_START +#define VMALLOC_END KVM_VMALLOC_END +#define VMEMMAP_START KVM_VMEMMAP_START +#define VMEMMAP_END KVM_VMEMMAP_END + +#define BOOT_PAGE_OFFSET PAGE_OFFSET +#define BOOT_TASK_SIZE TASK_SIZE +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASM_E2K_KVM_GUEST_PV_INFO_H */ diff --git a/arch/e2k/include/asm/kvm/guest/regs_state.h b/arch/e2k/include/asm/kvm/guest/regs_state.h new file mode 100644 index 0000000..635dcc6 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/regs_state.h @@ -0,0 +1,463 @@ +#ifndef _E2K_KVM_GUEST_REGS_STATE_H +#define _E2K_KVM_GUEST_REGS_STATE_H + +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif /* __ASSEMBLY__ */ + +extern void kvm_save_glob_regs(global_regs_t *gregs); +extern void kvm_save_glob_regs_dirty_bgr(global_regs_t *gregs); +extern void kvm_save_local_glob_regs(local_gregs_t *l_gregs); +extern void kvm_restore_glob_regs(const global_regs_t *gregs); +extern void kvm_restore_local_glob_regs(const local_gregs_t *l_gregs); +extern void kvm_get_all_user_glob_regs(global_regs_t *gregs); + +static inline void +guest_save_glob_regs_v2(global_regs_t *gregs) +{ + kvm_guest_save_gregs_v2(gregs); +} + +static inline void +guest_save_glob_regs_v5(global_regs_t *gregs) +{ + kvm_guest_save_gregs_v5(gregs); +} + +static inline void +guest_save_glob_regs_dirty_bgr_v2(global_regs_t *gregs) +{ + kvm_guest_save_gregs_v2(gregs); +} + +static inline void +guest_save_glob_regs_dirty_bgr_v5(global_regs_t *gregs) +{ + kvm_guest_save_gregs_v5(gregs); +} + +static inline void +guest_save_local_glob_regs_v2(local_gregs_t *l_gregs) +{ + kvm_guest_save_local_gregs_v2(l_gregs); + if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK) + copy_k_gregs_to_l_gregs(l_gregs, + ¤t_thread_info()->k_gregs); + if (HOST_KERNEL_GREGS_MASK & LOCAL_GREGS_USER_MASK) + copy_h_gregs_to_l_gregs(l_gregs, + ¤t_thread_info()->h_gregs); +} + +static inline void +guest_save_local_glob_regs_v5(local_gregs_t *l_gregs) +{ + kvm_guest_save_local_gregs_v5(l_gregs); + if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK) + copy_k_gregs_to_l_gregs(l_gregs, + ¤t_thread_info()->k_gregs); + if (HOST_KERNEL_GREGS_MASK & LOCAL_GREGS_USER_MASK) + copy_h_gregs_to_l_gregs(l_gregs, + ¤t_thread_info()->h_gregs); +} + +static inline void +guest_restore_glob_regs_v2(const global_regs_t *gregs) +{ + kvm_guest_restore_gregs_v2(gregs); +} + +static inline void +guest_restore_glob_regs_v5(const global_regs_t *gregs) +{ + kvm_guest_restore_gregs_v5(gregs); +} + +static inline void +guest_restore_local_glob_regs_v2(const local_gregs_t *l_gregs) +{ + kvm_guest_restore_local_gregs_v2(l_gregs); + if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK) + get_k_gregs_from_l_regs(¤t_thread_info()->k_gregs, + l_gregs); + if (HOST_KERNEL_GREGS_MASK & LOCAL_GREGS_USER_MASK) + get_h_gregs_from_l_regs(¤t_thread_info()->h_gregs, + l_gregs); +} + +static inline void +guest_restore_local_glob_regs_v5(const local_gregs_t *l_gregs) +{ + kvm_guest_restore_local_gregs_v5(l_gregs); + if (KERNEL_GREGS_MAX_MASK & LOCAL_GREGS_USER_MASK) + get_k_gregs_from_l_regs(¤t_thread_info()->k_gregs, + l_gregs); + if (HOST_KERNEL_GREGS_MASK & LOCAL_GREGS_USER_MASK) + get_h_gregs_from_l_regs(¤t_thread_info()->h_gregs, + l_gregs); +} + +static inline void +guest_get_all_user_glob_regs(global_regs_t *gregs) +{ + machine.save_gregs(gregs); + copy_k_gregs_to_gregs(gregs, ¤t_thread_info()->k_gregs); + copy_h_gregs_to_gregs(gregs, ¤t_thread_info()->h_gregs); +} + +#ifdef CONFIG_GREGS_CONTEXT +#define KVM_INIT_G_REGS() \ +({ \ + unsigned long vcpu_base; \ + /* VCPU state base can be on global register, so save & restore */ \ + KVM_SAVE_VCPU_STATE_BASE(vcpu_base); \ + NATIVE_INIT_G_REGS(); \ + KVM_RESTORE_VCPU_STATE_BASE(vcpu_base); \ + clear_memory_8(¤t_thread_info()->h_gregs, \ + sizeof(current_thread_info()->h_gregs), ETAGEWD); \ +}) +#define BOOT_KVM_INIT_G_REGS() \ +({ \ + unsigned long vcpu_base; \ + /* VCPU state base can be on global register, so save & restore */ \ + KVM_SAVE_VCPU_STATE_BASE(vcpu_base); \ + NATIVE_BOOT_INIT_G_REGS(); \ + KVM_RESTORE_VCPU_STATE_BASE(vcpu_base); \ +}) + +#else /* ! CONFIG_GREGS_CONTEXT */ +#define KVM_INIT_G_REGS() +#define BOOT_KVM_INIT_G_REGS() NATIVE_BOOT_INIT_G_REGS() +#endif /* CONFIG_GREGS_CONTEXT */ + +#define KVM_GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase) \ +({ \ + u64 *greg_vals = (u64 *) g_user; \ + u8 *greg_tags = (u8 *) gtag_user; \ + u64 *glob_regs = (u64 *) gbase; \ + int greg_no; \ + \ + for (greg_no = 0; greg_no < E2K_GLOBAL_REGS_NUM; greg_no++) { \ + load_value_and_tagd((void *) glob_regs, \ + greg_vals, greg_tags); \ + glob_regs += 4; \ + greg_vals++; \ + greg_tags++; \ + } \ +}) + +#define KVM_SET_GREGS_TO_THREAD(gbase, g_user, gtag_user) \ +({ \ + u64 *greg_vals = (u64 *) g_user; \ + u8 *greg_tags = (u8 *) gtag_user; \ + u64 *glob_regs = (u64 *) gbase; \ + int greg_no; \ + u32 greg_tag; \ + \ + for (greg_no = 0; greg_no < E2K_GLOBAL_REGS_NUM; greg_no++) { \ + greg_tag = (u32) greg_tags[greg_no]; \ + store_tagged_dword((void *) glob_regs, \ + greg_vals[greg_no], greg_tag); \ + glob_regs += 4; \ + } \ +}) + +/* ptrace related guys: we do not use them on switching. */ +# define GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase) \ +({ \ + if (likely(IS_HV_GM())) \ + NATIVE_GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase); \ + else \ + KVM_GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase); \ +}) + +# define SET_GREGS_TO_THREAD(gbase, g_user, gtag_user) \ +({ \ + if (likely(IS_HV_GM())) \ + NATIVE_SET_GREGS_TO_THREAD(gbase, g_user, gtag_user); \ + else \ + KVM_SET_GREGS_TO_THREAD(gbase, g_user, gtag_user); \ +}) + +/* Save stack registers on guest kernel mode */ +#define KVM_SAVE_STACK_REGS(regs, ti, from_ti, trap) \ +do { \ + if (IS_HV_GM()) { \ + NATIVE_SAVE_STACK_REGS(regs, ti, from_ti, trap); \ + } else { \ + PREFIX_SAVE_STACK_REGS(KVM, regs, ti, from_ti, trap); \ + } \ +} while (false) + +/* Save hardware stack registers on guest kernel mode */ +#define KVM_SAVE_HW_STACKS_AT_TI(ti) \ +do { \ + struct hw_stacks *stacks = &(ti)->tmp_user_stacks; \ + \ + stacks->psp_lo = KVM_READ_PSP_LO_REG(); \ + stacks->psp_hi = KVM_READ_PSP_HI_REG(); \ + stacks->pshtp = KVM_READ_PSHTP_REG(); \ + stacks->pcsp_lo = KVM_READ_PCSP_LO_REG(); \ + stacks->pcsp_hi = KVM_READ_PCSP_HI_REG(); \ + stacks->pcshtp = KVM_READ_PCSHTP_REG_SVALUE(); \ +} while (0) + +#define KVM_DO_RESTORE_HS_REGS(regs, updated) \ +({ \ + PREFIX_RESTORE_HS_REGS(KVM, regs); \ + UPDATE_CPU_REGS_FLAGS(updated, HS_REGS_UPDATED_CPU_REGS); \ + PUT_UPDATED_CPU_REGS_FLAGS(updated); \ +}) +#define KVM_DO_RESTORE_USER_STACK_REGS(regs, in_syscall, updated) \ +({ \ + PREFIX_RESTORE_USER_STACK_REGS(KVM, regs, in_syscall); \ + UPDATE_CPU_REGS_FLAGS(updated, USD_UPDATED_CPU_REGS); \ + UPDATE_CPU_REGS_FLAGS(updated, CRS_UPDATED_CPU_REGS); \ + PUT_UPDATED_CPU_REGS_FLAGS(updated); \ +}) + +/* it is paravirtualized guest or native guest kernel */ +#define UPDATE_CPU_REGS_FLAGS(__updated, flags) \ + ((__updated) |= (flags)) +#define KVM_RESTORE_HS_REGS(regs) \ +({ \ + u64 updated = 0; \ + \ + KVM_DO_RESTORE_HS_REGS(regs, updated); \ +}) +#define KVM_RESTORE_USER_STACK_REGS(regs, in_syscall) \ +({ \ + u64 updated = 0; \ + \ + KVM_DO_RESTORE_USER_STACK_REGS(regs, in_syscall, updated); \ +}) +#define KVM_RESTORE_USER_TRAP_STACK_REGS(regs) \ + KVM_RESTORE_USER_STACK_REGS(regs, false) +#define KVM_RESTORE_USER_SYSCALL_STACK_REGS(regs) \ + KVM_RESTORE_USER_STACK_REGS(regs, true) +#define KVM_RESTORE_USER_CUT_REGS(ti, regs) /* CUTD is set by host */ + +#define KVM_SAVE_TRAP_CELLAR(regs, trap) \ +({ \ + kernel_trap_cellar_t *kernel_tcellar = \ + (kernel_trap_cellar_t *)KERNEL_TRAP_CELLAR; \ + kernel_trap_cellar_ext_t *kernel_tcellar_ext = \ + (kernel_trap_cellar_ext_t *) \ + ((void *) KERNEL_TRAP_CELLAR + TC_EXT_OFFSET); \ + trap_cellar_t *tcellar = (trap)->tcellar; \ + int cnt, cs_req_num = 0, cs_a4 = 0, max_cnt; \ + u64 kstack_pf_addr = 0; \ + bool end_flag = false, is_qp; \ + \ + max_cnt = KVM_READ_MMU_TRAP_COUNT(); \ + if (max_cnt < 3) { \ + max_cnt = 3 * HW_TC_SIZE; \ + end_flag = true; \ + } \ + (trap)->curr_cnt = -1; \ + (trap)->ignore_user_tc = 0; \ + (trap)->tc_called = 0; \ + (trap)->is_intc = false; \ + (trap)->from_sigreturn = 0; \ + CLEAR_CLW_REQUEST_COUNT(regs); \ + BUG_ON(max_cnt > 3 * HW_TC_SIZE); \ + for (cnt = 0; 3 * cnt < max_cnt; cnt++) { \ + tc_opcode_t opcode; \ + tc_cond_t condition; \ + \ + if (end_flag) \ + if (AW(kernel_tcellar[cnt].condition) == -1) \ + break; \ + \ + tcellar[cnt].address = kernel_tcellar[cnt].address; \ + condition = kernel_tcellar[cnt].condition; \ + tcellar[cnt].condition = condition; \ + AW(opcode) = AS(condition).opcode; \ + is_qp = (AS(opcode).fmt == LDST_QP_FMT || \ + cpu_has(CPU_FEAT_QPREG) && AS(condition).fmtc && \ + AS(opcode).fmt == LDST_QWORD_FMT); \ + if (AS(condition).clw) { \ + if (GET_CLW_REQUEST_COUNT(regs) == 0) { \ + SET_CLW_FIRST_REQUEST(regs, cnt); \ + } \ + INC_CLW_REQUEST_COUNT(regs); \ + } \ + if (is_qp) \ + tcellar[cnt].mask = kernel_tcellar_ext[cnt].mask; \ + if (AS(condition).store) { \ + e2k_addr_t kt = \ + (e2k_addr_t)&(kernel_tcellar[cnt].data); \ + e2k_addr_t t = \ + (e2k_addr_t)&(tcellar[cnt].data); \ + e2k_addr_t ktx = \ + (e2k_addr_t)&(kernel_tcellar_ext[cnt].data); \ + e2k_addr_t tx = \ + (e2k_addr_t)&(kernel_tcellar_ext[cnt].data); \ + kvm_move_tagged_dword(kt, t); \ + if (is_qp) { \ + kvm_move_tagged_dword(ktx, tx); \ + } \ + } else if (AS(condition).s_f && AS(condition).sru) { \ + if (cs_req_num == 0) \ + cs_a4 = tcellar[cnt].address & (1 << 4); \ + cs_req_num++; \ + } \ + tcellar[cnt].flags = 0; \ + } \ + (trap)->tc_count = cnt * 3; \ + kstack_pf_addr; \ +}) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure guest kernel */ + +#define INIT_G_REGS() KVM_INIT_G_REGS() +#define BOOT_INIT_G_REGS() BOOT_KVM_INIT_G_REGS() + +#define SAVE_HW_STACKS_AT_TI(ti) KVM_SAVE_HW_STACKS_AT_TI(ti) +#define SAVE_STACK_REGS(regs, ti, from_ti, trap) \ + KVM_SAVE_STACK_REGS(regs, ti, from_ti, trap) +#define RESTORE_HS_REGS(regs) \ + KVM_RESTORE_HS_REGS(regs) +#define RESTORE_USER_STACK_REGS(regs, in_syscall) \ + KVM_RESTORE_USER_STACK_REGS(regs, in_syscall) +#define RESTORE_USER_TRAP_STACK_REGS(regs) \ + RESTORE_USER_STACK_REGS(regs, false) +#define RESTORE_USER_SYSCALL_STACK_REGS(regs) \ + RESTORE_USER_STACK_REGS(regs, true) +#define RESTORE_USER_CUT_REGS(ti, regs, in_sys_call) \ + KVM_RESTORE_USER_CUT_REGS(ti, regs) + +static inline void +save_glob_regs_v2(global_regs_t *gregs) +{ + if (IS_HV_GM()) { + guest_save_glob_regs_v2(gregs); + } else { + kvm_save_glob_regs(gregs); + } +} + +static inline void +save_glob_regs_v5(global_regs_t *gregs) +{ + if (IS_HV_GM()) { + guest_save_glob_regs_v5(gregs); + } else { + kvm_save_glob_regs(gregs); + } +} + +static inline void +save_glob_regs_dirty_bgr_v2(global_regs_t *gregs) +{ + if (IS_HV_GM()) { + guest_save_glob_regs_dirty_bgr_v2(gregs); + } else { + kvm_save_glob_regs_dirty_bgr(gregs); + } +} + +static inline void +save_glob_regs_dirty_bgr_v5(global_regs_t *gregs) +{ + if (IS_HV_GM()) { + kvm_guest_save_gregs_dirty_bgr_v5(gregs); + } else { + kvm_save_glob_regs_dirty_bgr(gregs); + } +} + +static inline void +save_local_glob_regs_v2(local_gregs_t *l_gregs) +{ + if (IS_HV_GM()) { + guest_save_local_glob_regs_v2(l_gregs); + } else { + kvm_save_local_glob_regs(l_gregs); + } +} + +static inline void +save_local_glob_regs_v5(local_gregs_t *l_gregs) +{ + if (IS_HV_GM()) { + guest_save_local_glob_regs_v5(l_gregs); + } else { + kvm_save_local_glob_regs(l_gregs); + } +} + +static inline void +restore_glob_regs_v2(const global_regs_t *gregs) +{ + if (IS_HV_GM()) { + guest_restore_glob_regs_v2(gregs); + } else { + kvm_restore_glob_regs(gregs); + } +} + +static inline void +restore_glob_regs_v5(const global_regs_t *gregs) +{ + if (IS_HV_GM()) { + guest_restore_glob_regs_v5(gregs); + } else { + kvm_restore_glob_regs(gregs); + } +} + +static inline void +restore_local_glob_regs_v2(const local_gregs_t *l_gregs) +{ + if (IS_HV_GM()) + guest_restore_local_glob_regs_v2(l_gregs); + else + kvm_restore_local_glob_regs(l_gregs); +} + +static inline void +restore_local_glob_regs_v5(const local_gregs_t *l_gregs) +{ + if (IS_HV_GM()) + guest_restore_local_glob_regs_v5(l_gregs); + else + kvm_restore_local_glob_regs(l_gregs); +} + +static inline void +save_local_glob_regs(local_gregs_t *l_gregs) +{ + machine.save_local_gregs(l_gregs); +} +static inline void +restore_local_glob_regs(const local_gregs_t *l_gregs) +{ + machine.restore_local_gregs(l_gregs); +} + +static inline void +get_all_user_glob_regs(global_regs_t *gregs) +{ + if (IS_HV_GM()) + guest_get_all_user_glob_regs(gregs); + else + kvm_get_all_user_glob_regs(gregs); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ +#endif /* _E2K_KVM_GUEST_REGS_STATE_H */ + diff --git a/arch/e2k/include/asm/kvm/guest/secondary_space.h b/arch/e2k/include/asm/kvm/guest/secondary_space.h new file mode 100644 index 0000000..e85b497 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/secondary_space.h @@ -0,0 +1,21 @@ +/* + * Secondary space support for E2K binary compiler + * Guest kernel support + */ +#ifndef _ASM_KVM_GUEST_SECONDARY_SPACE_H +#define _ASM_KVM_GUEST_SECONDARY_SPACE_H + +/* do not include the header directly, use asm/secondary_space.h include */ + +#define KVM_IS_NEXT_ELBRUS_2S true +#define KVM_SS_ADDR_START 0x180000000000L +#define KVM_SS_SIZE 0x040000000000UL + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#define IS_NEXT_ELBRUS_2S KVM_IS_NEXT_ELBRUS_2S +#define SS_SIZE KVM_SS_SIZE +#define SS_ADDR_START KVM_SS_ADDR_START +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _ASM_KVM_GUEST_SECONDARY_SPACE_H */ diff --git a/arch/e2k/include/asm/kvm/guest/setup.h b/arch/e2k/include/asm/kvm/guest/setup.h new file mode 100644 index 0000000..9ede913 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/setup.h @@ -0,0 +1,39 @@ +#ifndef _ASM_KVM_GUEST_MACHDEP_H_ +#define _ASM_KVM_GUEST_MACHDEP_H_ + +#include +#include + +#ifdef CONFIG_VIRTUALIZATION + +extern void __init boot_e2k_virt_setup_arch(void); +extern void __init e2k_virt_setup_machine(void); +extern void kvm_bsp_switch_to_init_stack(void); +extern void kvm_setup_bsp_idle_task(int cpu); +extern void setup_guest_interface(void); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void arch_setup_machine(void) +{ + native_setup_machine(); + if (IS_HV_GM()) { + setup_guest_interface(); + return; + } + e2k_virt_setup_machine(); +} + +static inline void bsp_switch_to_init_stack(void) +{ + kvm_bsp_switch_to_init_stack(); +} + +static inline void setup_bsp_idle_task(int cpu) +{ + kvm_setup_bsp_idle_task(cpu); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* CONFIG_VIRTUALIZATION */ +#endif /* _ASM_KVM_GUEST_MACHDEP_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/sge.h b/arch/e2k/include/asm/kvm/guest/sge.h new file mode 100644 index 0000000..4d124aa --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/sge.h @@ -0,0 +1,75 @@ +#ifndef _E2K_ASM_KVM_GUEST_SGE_H +#define _E2K_ASM_KVM_GUEST_SGE_H + +#ifdef __KERNEL__ + +#include + +#include +#include +#include + +#undef DEBUG_GUEST_SGE_MODE +#undef DebugGSGE +#define DEBUG_GUEST_SGE_MODE 0 /* stack guard debugging */ +#define DebugGSGE(fmt, args...) \ +({ \ + if (DEBUG_GUEST_SGE_MODE) \ + pr_info(fmt, ##args); \ +}) + +#define printk printk_fixed_args +#define panic panic_fixed_args +static inline void +kvm_do_switch_to_expanded_proc_stack(long delta_size, long delta_offset, + bool decr_k_ps) +{ + int ret; + + ret = HYPERVISOR_switch_to_expanded_guest_proc_stack(delta_size, + delta_offset, decr_k_ps); + if (ret) { + panic("kvm_do_switch_to_expanded_proc_stack() host could not " + "switch to updated stack, error %d\n", + ret); + } +} +static inline void +kvm_do_switch_to_expanded_chain_stack(long delta_size, long delta_offset, + bool decr_k_pcs) +{ + int ret; + + ret = HYPERVISOR_switch_to_expanded_guest_chain_stack(delta_size, + delta_offset, decr_k_pcs); + if (ret) { + panic("kvm_do_switch_to_expanded_chain_stack() host could not " + "switch to updated stack, error %d\n", + ret); + } +} + +#undef printk +#undef panic + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is native guest kernel (without paravirtualization) */ + +static inline void +switch_to_expanded_proc_stack(long delta_size, long delta_offset, + bool decr_k_ps) +{ + kvm_do_switch_to_expanded_proc_stack(delta_size, delta_offset, + decr_k_ps); +} +static inline void +switch_to_expanded_chain_stack(long delta_size, long delta_offset, + bool decr_k_pcs) +{ + kvm_do_switch_to_expanded_chain_stack(delta_size, delta_offset, + decr_k_pcs); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ +#endif /* _E2K_ASM_KVM_GUEST_SGE_H */ diff --git a/arch/e2k/include/asm/kvm/guest/signal.h b/arch/e2k/include/asm/kvm/guest/signal.h new file mode 100644 index 0000000..adcdba5 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/signal.h @@ -0,0 +1,34 @@ +#ifndef _E2K_KVM_GUEST_SIGNAL_H_ +#define _E2K_KVM_GUEST_SIGNAL_H_ + +#include + +#ifndef __ASSEMBLY__ + +extern int kvm_signal_setup(struct pt_regs *regs); +extern int kvm_complete_long_jump(struct pt_regs *regs); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native paravirtualized guest kernel */ +/* or native kernel without virtualization support */ + +static inline int signal_setup(struct pt_regs *regs) +{ + return kvm_signal_setup(regs); +} + +static inline int complete_long_jump(struct pt_regs *regs) +{ + if (likely(IS_HV_GM())) { + return native_complete_long_jump(regs); + } else { + return kvm_complete_long_jump(regs); + } +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + + +#endif /* !__ASSEMBLY__ */ + +#endif /* !_E2K_KVM_GUEST_SIGNAL_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/smp.h b/arch/e2k/include/asm/kvm/guest/smp.h new file mode 100644 index 0000000..9e6b9fd --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/smp.h @@ -0,0 +1,84 @@ +#ifndef __ASM_KVM_GUEST_SMP_H +#define __ASM_KVM_GUEST_SMP_H + +#include + +extern void kvm_ap_switch_to_init_stack(e2k_addr_t stack_base, int cpuid, + int cpu); +extern void kvm_setup_secondary_task(int cpu); + +extern void kvm_wait_for_cpu_booting(void); +extern void kvm_wait_for_cpu_wake_up(void); +extern int kvm_activate_cpu(int cpu_id); +extern int kvm_activate_all_cpus(void); + +extern void kvm_csd_lock_wait(call_single_data_t *data); +extern void kvm_csd_lock(call_single_data_t *data); +extern void kvm_arch_csd_lock_async(call_single_data_t *data); +extern void kvm_csd_unlock(call_single_data_t *data); + +extern void kvm_setup_pic_virq(unsigned int cpuid); +extern void kvm_startup_pic_virq(unsigned int cpuid); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +static inline void +ap_switch_to_init_stack(e2k_addr_t stack_base, int cpuid, int cpu) +{ + kvm_ap_switch_to_init_stack(stack_base, cpuid, cpu); +} +static inline void setup_secondary_task(int cpu) +{ + kvm_setup_secondary_task(cpu); +} +static inline void +wait_for_cpu_booting(void) +{ + kvm_wait_for_cpu_booting(); +} +static inline void +wait_for_cpu_wake_up(void) +{ + kvm_wait_for_cpu_wake_up(); +} +static inline int +activate_cpu(int cpu_id) +{ + return kvm_activate_cpu(cpu_id); +} +static inline int +activate_all_cpus(void) +{ + return kvm_activate_all_cpus(); +} + +static inline void csd_lock_wait(call_single_data_t *data) +{ + kvm_csd_lock_wait(data); +} +static inline void csd_lock(call_single_data_t *data) +{ + kvm_csd_lock(data); +} +static inline void arch_csd_lock_async(call_single_data_t *data) +{ + kvm_arch_csd_lock_async(data); +} +static inline void csd_unlock(call_single_data_t *data) +{ + kvm_csd_unlock(data); +} + +static inline void +setup_local_pic_virq(unsigned int cpuid) +{ + kvm_setup_pic_virq(cpuid); +} +static inline void +startup_local_pic_virq(unsigned int cpuid) +{ + kvm_startup_pic_virq(cpuid); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASM_KVM_GUEST_SMP_H */ diff --git a/arch/e2k/include/asm/kvm/guest/spinlock.h b/arch/e2k/include/asm/kvm/guest/spinlock.h new file mode 100644 index 0000000..58a4601 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/spinlock.h @@ -0,0 +1,75 @@ +#ifndef __ASM_KVM_GUEST_SPINLOCK_H +#define __ASM_KVM_GUEST_SPINLOCK_H +/* + * This file implements the arch-dependent parts of kvm guest + * spin_lock()/spin_unlock() fast and slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include + +extern void kvm_arch_spin_lock_slow(void *lock); +extern void kvm_wait_read_lock_slow(arch_rwlock_t *rw); +extern void kvm_wait_write_lock_slow(arch_rwlock_t *rw); +extern void kvm_arch_spin_locked_slow(void *lock); +extern void kvm_arch_read_locked_slow(arch_rwlock_t *rw); +extern void kvm_arch_write_locked_slow(arch_rwlock_t *rw); +extern void kvm_arch_spin_unlock_slow(void *lock); +extern void kvm_arch_read_unlock_slow(arch_rwlock_t *lock); +extern void kvm_arch_write_unlock_slow(arch_rwlock_t *lock); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* native guest kernel */ + +#define arch_spin_relax(lock) kvm_cpu_relax() +#define arch_read_relax(lock) kvm_cpu_relax() +#define arch_write_relax(lock) kvm_cpu_relax() + +static inline void +ord_wait_read_lock_slow(arch_rwlock_t *rw) +{ + kvm_wait_read_lock_slow(rw); +} +static inline void +ord_wait_write_lock_slow(arch_rwlock_t *rw) +{ + kvm_wait_write_lock_slow(rw); +} +static inline void +ord_arch_read_locked_slow(arch_rwlock_t *rw) +{ + kvm_arch_read_locked_slow(rw); +} +static inline void +ord_arch_write_locked_slow(arch_rwlock_t *rw) +{ + kvm_arch_write_locked_slow(rw); +} +static inline void +ord_arch_read_unlock_slow(arch_rwlock_t *rw) +{ + kvm_arch_read_unlock_slow(rw); +} +static inline void +ord_arch_write_unlock_slow(arch_rwlock_t *rw) +{ + kvm_arch_write_unlock_slow(rw); +} + +static inline void arch_spin_lock_slow(arch_spinlock_t *lock) +{ + kvm_arch_spin_lock_slow(lock); +} +static inline void arch_spin_locked_slow(arch_spinlock_t *lock) +{ + kvm_arch_spin_locked_slow(lock); +} +static inline void arch_spin_unlock_slow(arch_spinlock_t *lock) +{ + kvm_arch_spin_unlock_slow(lock); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASM_KVM_GUEST_SPINLOCK_H */ diff --git a/arch/e2k/include/asm/kvm/guest/stacks.h b/arch/e2k/include/asm/kvm/guest/stacks.h new file mode 100644 index 0000000..e4a516f --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/stacks.h @@ -0,0 +1,33 @@ +/* + * KVM guest stacks support + * Copyright 2017 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_GUEST_STACKS_H +#define _E2K_KVM_GUEST_STACKS_H + +#include +#include + +/* + * Guest kernel thread stacks descriptions + */ +#define KVM_GUEST_KERNEL_C_STACK_SIZE KERNEL_C_STACK_SIZE /* as on host */ +#define KVM_GUEST_KERNEL_PS_SIZE (16 * PAGE_SIZE) /* 64 KBytes */ +#define KVM_GUEST_KERNEL_PS_INIT_SIZE (1 * PAGE_SIZE) /* 4 KBytes */ +#define KVM_GUEST_KERNEL_PCS_SIZE (2 * PAGE_SIZE) /* 8 KBytes */ +#define KVM_GUEST_KERNEL_PCS_INIT_SIZE (1 * PAGE_SIZE) /* 4 KBytes */ + +/* + * Guest user task stacks descriptions + */ +#define KVM_GUEST_USER_DATA_STACK_SIZE \ + DEFAULT_USER_DATA_STACK_SIZE /* as on host */ +#define KVM_GUEST_USER_PS_MAX_SIZE USER_P_STACK_SIZE /* as on host */ +#define KVM_GUEST_USER_PS_INIT_SIZE USER_P_STACK_INIT_SIZE /* as on host */ +#define KVM_GUEST_USER_PS_PRESENT_SIZE USER_P_STACK_PRESENT_SIZE /* --''-- */ +#define KVM_GUEST_USER_PCS_MAX_SIZE USER_PC_STACK_SIZE /* as on host */ +#define KVM_GUEST_USER_PCS_INIT_SIZE USER_PC_STACK_INIT_SIZE /* as on host */ +#define KVM_GUEST_USER_PCS_PRESENT_SIZE USER_PC_STACK_PRESENT_SIZE /* --''-- */ + +#endif /* ! _E2K_KVM_GUEST_STACKS_H */ diff --git a/arch/e2k/include/asm/kvm/guest/string.h b/arch/e2k/include/asm/kvm/guest/string.h new file mode 100644 index 0000000..fb36999 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/string.h @@ -0,0 +1,215 @@ +#ifndef _E2K_KVM_GUEST_STRING_H_ +#define _E2K_KVM_GUEST_STRING_H_ + +#include + +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * optimized copy memory along with tags + * using privileged LD/ST recovery operations + */ +static inline unsigned long +kvm_do_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + long ret; + + if (IS_HOST_KERNEL_ADDRESS((e2k_addr_t)src) || + IS_HOST_KERNEL_ADDRESS((e2k_addr_t)dst)) { + ret = HYPERVISOR_fast_tagged_guest_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + } else { + ret = HYPERVISOR_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + } + return ret; +} +static inline unsigned long +kvm_do_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + long ret; + + if (IS_HOST_KERNEL_ADDRESS((e2k_addr_t)addr)) { + ret = HYPERVISOR_fast_tagged_guest_memory_set(addr, val, tag, + len, strd_opcode); + } else { + ret = HYPERVISOR_fast_tagged_memory_set(addr, val, tag, len, + strd_opcode); + } + return ret; +} + +/* + * Extract tags from 32 bytes of data + * FIXME: need improve function to extract tags from any size of data + */ +static inline unsigned long +kvm_do_extract_tags_32(u16 *dst, const void *src) +{ + return HYPERVISOR_extract_tags_32(dst, src); +} + +#define DEBUG_GUEST_STRINGS + +#ifndef DEBUG_GUEST_STRINGS +static inline unsigned long +kvm_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + if (likely(IS_HV_GM())) + return native_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + else + return kvm_do_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +kvm_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + if (likely(IS_HV_GM())) + return native_fast_tagged_memory_set(addr, val, tag, len, + strd_opcode); + else + return kvm_do_fast_tagged_memory_set(addr, val, tag, len, + strd_opcode); +} + +static inline unsigned long +kvm_extract_tags_32(u16 *dst, const void *src) +{ + if (likely(IS_HV_GM())) + return native_extract_tags_32(dst, src); + else + return kvm_do_extract_tags_32(dst, src); +} +#else /* DEBUG_GUEST_STRINGS */ +extern unsigned long kvm_fast_tagged_memory_copy(void *dst, const void *src, + size_t len, + unsigned long strd_opcode, + unsigned long ldrd_opcode, + int prefetch); +extern unsigned long kvm_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode); +extern unsigned long boot_kvm_fast_tagged_memory_copy(void *dst, + const void *src, size_t len, + unsigned long strd_opcode, + unsigned long ldrd_opcode, + int prefetch); +extern unsigned long boot_kvm_fast_tagged_memory_set(void *addr, u64 val, + u64 tag, size_t len, u64 strd_opcode); + +extern unsigned long kvm_extract_tags_32(u16 *dst, const void *src); +#endif /* ! DEBUG_GUEST_STRINGS */ + +static inline int +kvm_fast_tagged_memory_copy_to_user(void __user *dst, const void *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + /* guest kernel does not support any nested guests */ + return kvm_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline int +kvm_fast_tagged_memory_copy_from_user(void *dst, const void __user *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + /* guest kernel does not support any nested guests */ + return kvm_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline void kvm_tagged_memcpy_8(void *dst, const void *src, size_t n) +{ + E2K_PREFETCH_L2(src); + + __tagged_memcpy_8(dst, src, n); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +/** + * optimized copy memory along with tags + * using privileged LD/ST recovery operations + */ + +#define tagged_memcpy_8(dst, src, n) \ +({ \ + if (likely(IS_HV_GM())) \ + native_tagged_memcpy_8(dst, src, n, \ + __alignof(*(dst)), __alignof(*(src))); \ + else \ + kvm_tagged_memcpy_8(dst, src, n); \ +}) + +static inline unsigned long +fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return kvm_fast_tagged_memory_copy(dst, src, len, strd_opcode, + ldrd_opcode, prefetch); +} +static inline unsigned long +boot_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return boot_kvm_fast_tagged_memory_copy(dst, src, len, strd_opcode, + ldrd_opcode, prefetch); +} +static inline unsigned long +fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return kvm_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} +static inline void +boot_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + boot_kvm_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} + +static inline unsigned long +extract_tags_32(u16 *dst, const void *src) +{ + return kvm_extract_tags_32(dst, src); +} + +static inline int +fast_tagged_memory_copy_to_user(void __user *dst, const void *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return kvm_fast_tagged_memory_copy_to_user(dst, src, len, regs, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline int +fast_tagged_memory_copy_from_user(void *dst, const void __user *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return kvm_fast_tagged_memory_copy_from_user(dst, src, len, regs, + strd_opcode, ldrd_opcode, prefetch); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASSEMBLY__ */ +#endif /* _E2K_KVM_GUEST_STRING_H_ */ diff --git a/arch/e2k/include/asm/kvm/guest/switch.h b/arch/e2k/include/asm/kvm/guest/switch.h new file mode 100644 index 0000000..3907b21 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/switch.h @@ -0,0 +1,162 @@ +#ifndef _E2K_KVM_GUEST_SWITCH_H +#define _E2K_KVM_GUEST_SWITCH_H + +#include + +static inline void kvm_guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + /* guest cannot have own nested guests */ +} + +static inline void kvm_guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + /* guest cannot have own nested guests */ +} +static inline void +kvm_trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, + unsigned flags) +{ + /* guest cannot have own nested guests */ + if (likely(!IS_HV_GM())) + /* there is not hardware virtualization support */ + /* so native trap handler cannot be used */ + return; + if (!(flags & EXIT_FROM_TRAP_SWITCH)) + return; + /* restore global registers used as native kernel too */ + native_trap_guest_enter(ti, regs, flags); +} +static inline void +kvm_trap_guest_exit(struct thread_info *ti, struct pt_regs *regs, + trap_pt_regs_t *trap, unsigned flags) +{ + /* guest cannot have own nested guests */ + if (likely(!IS_HV_GM())) + /* there is not hardware virtualization support */ + /* so native trap handler cannot be used */ + return; + native_trap_guest_exit(ti, regs, trap, flags); +} +static inline bool +kvm_guest_trap_pending(struct thread_info *ti) +{ + /* nothing guest can be */ + return false; +} + +static inline bool +kvm_trap_from_guest_user(struct thread_info *ti) +{ + return native_trap_from_guest_user(ti); +} + +static inline bool +kvm_syscall_from_guest_user(struct thread_info *ti) +{ + return native_syscall_from_guest_user(ti); +} + +static inline struct e2k_stacks * +kvm_trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + return native_trap_guest_get_restore_stacks(ti, regs); +} + +static inline struct e2k_stacks * +kvm_syscall_guest_get_restore_stacks(struct thread_info *ti, + struct pt_regs *regs) +{ + return native_syscall_guest_get_restore_stacks(ti, regs); +} + +/* + * The function should return bool is the system call from guest + */ +static inline bool +kvm_guest_syscall_enter(struct thread_info *ti, struct pt_regs *regs) +{ + /* guest cannot have own nested guests */ + + return false; /* it is not nested guest system call */ +} +static inline void +kvm_guest_syscall_exit_to(struct thread_info *ti, struct pt_regs *regs, + unsigned flags) +{ + /* nothing guests can be */ +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravrtualized) */ + +static inline void __guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + kvm_guest_enter(ti, vcpu, flags); +} + +static inline void __guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + kvm_guest_exit(ti, vcpu, flags); +} +static inline void +trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, unsigned flags) +{ + kvm_trap_guest_enter(ti, regs, flags); +} +static inline void +trap_guest_exit(struct thread_info *ti, struct pt_regs *regs, + trap_pt_regs_t *trap, unsigned flags) +{ + kvm_trap_guest_exit(ti, regs, trap, flags); +} +static inline bool +guest_trap_pending(struct thread_info *ti) +{ + return kvm_guest_trap_pending(ti); +} + +static inline bool +guest_trap_from_user(struct thread_info *ti) +{ + return kvm_trap_from_guest_user(ti); +} + +static inline bool +guest_syscall_from_user(struct thread_info *ti) +{ + return kvm_syscall_from_guest_user(ti); +} + +static inline struct e2k_stacks * +trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + return kvm_trap_guest_get_restore_stacks(ti, regs); +} + +static inline struct e2k_stacks * +syscall_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + return kvm_syscall_guest_get_restore_stacks(ti, regs); +} + +/* + * The function should return bool is the system call from guest + */ +static inline bool +guest_syscall_enter(struct thread_info *ti, struct pt_regs *regs) +{ + return kvm_guest_syscall_enter(ti, regs); +} +static inline void +guest_syscall_exit_to(struct thread_info *ti, struct pt_regs *regs, + unsigned flags) +{ + kvm_guest_syscall_exit_to(ti, regs, flags); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_GUEST_SWITCH_H */ diff --git a/arch/e2k/include/asm/kvm/guest/switch_to.h b/arch/e2k/include/asm/kvm/guest/switch_to.h new file mode 100644 index 0000000..22cfc83 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/switch_to.h @@ -0,0 +1,217 @@ +#ifndef _ASM_KVM_GUEST_SWITCH_TO_H +#define _ASM_KVM_GUEST_SWITCH_TO_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +#undef DEBUG_KVM_SWITCH_MODE +#undef DebugKVMSW +#define DEBUG_KVM_SWITCH_MODE 0 /* KVM switching debugging */ +#define DebugKVMSW(fmt, args...) \ +({ \ + if (DEBUG_KVM_SWITCH_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define KVM_UPSR_NMI_SAVE_AND_CLI(flags) \ +({ \ + flags = KVM_READ_UPSR_REG_VALUE(); \ + KVM_WRITE_UPSR_REG_VALUE(flags & ~UPSR_NMIE); \ +}) +#define KVM_RESTORE_USER_UPSR(user_upsr) \ +({ \ + KVM_WRITE_UPSR_REG_VALUE(user_upsr); \ +}) + +static inline struct task_struct * +kvm_ret_from_fork_get_prev_task(struct task_struct *prev) +{ + prev = current->thread.sw_regs.prev_task; + BUG_ON(prev == NULL); + return prev; +} + +static inline void +KVM_SAVE_TASK_STACKS_REGS_TO_SWITCH(struct task_struct *task, int save_ip) +{ + struct sw_regs *sw_regs = &task->thread.sw_regs; + unsigned long usd_lo; + unsigned long usd_hi; + + ATOMIC_DO_SAVE_ALL_STACKS_REGS(sw_regs, + &sw_regs->crs.cr1_hi, usd_lo, usd_hi); + + sw_regs->usd_lo.USD_lo_half = usd_lo; + sw_regs->usd_hi.USD_hi_half = usd_hi; + sw_regs->top = NATIVE_NV_READ_SBR_REG_VALUE(); + sw_regs->crs.cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + if (likely(save_ip)) { + sw_regs->crs.cr0_lo = NATIVE_NV_READ_CR0_LO_REG(); + sw_regs->crs.cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + } +} +static inline void +KVM_SAVE_TASK_REGS_TO_SWITCH(struct task_struct *task, int save_ip) +{ + + /* Save interrupt mask state and disable NMIs */ + KVM_UPSR_NMI_SAVE_AND_CLI(AW(task->thread.sw_regs.upsr)); + + KVM_SAVE_TASK_STACKS_REGS_TO_SWITCH(task, save_ip); + + /* global registers and user registers */ + /* will be saved by host kernel while real switch */ + +} + +/* + * now lcc has problem with structure on registers + * (It move these structures is stack memory) + */ +static inline void +KVM_RESTORE_TASK_STACKS_REGS_TO_SWITCH(struct task_struct *task, int restore_ip) +{ + u64 sbr = task->thread.sw_regs.top; + u64 usd_lo = AS_WORD(task->thread.sw_regs.usd_lo); + u64 usd_hi = AS_WORD(task->thread.sw_regs.usd_hi); + u64 psp_lo = AS_WORD(task->thread.sw_regs.psp_lo); + u64 psp_hi = AS_WORD(task->thread.sw_regs.psp_hi); + u64 pcsp_lo = AS_WORD(task->thread.sw_regs.pcsp_lo); + u64 pcsp_hi = AS_WORD(task->thread.sw_regs.pcsp_hi); + u64 cr_wd = AS_WORD(task->thread.sw_regs.crs.cr1_lo); + u64 cr_ussz = AS_WORD(task->thread.sw_regs.crs.cr1_hi); + + KVM_FLUSHCPU; + + KVM_WRITE_USBR_USD_REG_VALUE(sbr, usd_hi, usd_lo); + KVM_WRITE_PSP_REG_VALUE(psp_hi, psp_lo); + KVM_WRITE_PCSP_REG_VALUE(pcsp_hi, pcsp_lo); + + KVM_WRITE_CR1_LO_REG_VALUE(cr_wd); + KVM_WRITE_CR1_HI_REG_VALUE(cr_ussz); + if (unlikely(restore_ip)) { + KVM_WRITE_CR0_LO_REG_VALUE(AW(task->thread.sw_regs.crs.cr0_lo)); + KVM_WRITE_CR0_HI_REG_VALUE(AW(task->thread.sw_regs.crs.cr0_hi)); + } +} +static inline void +KVM_RESTORE_TASK_REGS_TO_SWITCH(struct task_struct *task, int restore_ip) +{ + KVM_RESTORE_TASK_STACKS_REGS_TO_SWITCH(task, restore_ip); + + /* global registers and user registers */ + /* will be restored by host kernel while real switch */ + + /* Enable interrupt */ + KVM_RESTORE_USER_UPSR(task->thread.sw_regs.upsr.UPSR_reg); +} + +static __always_inline struct task_struct * +kvm_do_switch_to(struct task_struct *prev, struct task_struct *next) +{ + thread_info_t *next_ti = task_thread_info(next); + struct sw_regs *sw_regs; + e2k_size_t ps_size; + e2k_size_t ps_ind; + e2k_size_t pcs_size; + e2k_size_t pcs_ind; + + DebugKVMSW("started on VCPU #%d to switch %s(%d/%d) parent %s (%d) " + "-> %s(%d/%d) parent %s (%d)\n", + smp_processor_id(), prev->comm, prev->pid, + task_thread_info(prev)->gpid_nr, + prev->real_parent->comm, prev->real_parent->pid, + next->comm, next->pid, task_thread_info(next)->gpid_nr, + next->real_parent->comm, next->real_parent->pid); + + /* Save interrupt mask state and disable NMIs */ + UPSR_ALL_SAVE_AND_CLI(AW(prev->thread.sw_regs.upsr)); + + ATOMIC_GET_HW_STACK_SIZES(ps_ind, ps_size, pcs_ind, pcs_size); + DebugKVMSW("prev task PS ind 0x%lx size 0x%lx\n", + ps_ind, ps_size); + DebugKVMSW("prev task PCS ind 0x%lx size 0x%lx\n", + pcs_ind, pcs_size); + if (ps_ind + MAX_SRF_SIZE >= ps_size || + pcs_ind + SZ_OF_CR >= pcs_size) { + /* + * Hardware stack(s) overflow and need expand stack(s) + * before switching to new process to avoid trap in + * hypercall while switch will be in progress. + * Provoke to trap now to handle stack bounds exception + */ + KVM_COPY_STACKS_TO_MEMORY(); + DebugKVMSW("copy stacks to memory to trap on bounds\n"); + } + KVM_SAVE_TASK_REGS_TO_SWITCH(prev, 1); + sw_regs = &prev->thread.sw_regs; + DebugKVMSW("prev task regs saved: PS base 0x%llx ind 0x%x size 0x%x\n", + sw_regs->psp_lo.PSP_lo_base, + sw_regs->psp_hi.PSP_hi_ind, sw_regs->psp_hi.PSP_hi_size); + DebugKVMSW("prev task regs saved: PCS base 0x%llx ind 0x%x size 0x%x\n", + sw_regs->pcsp_lo.PCSP_lo_base, + sw_regs->pcsp_hi.PCSP_hi_ind, sw_regs->pcsp_hi.PCSP_hi_size); + sw_regs = &next->thread.sw_regs; + DebugKVMSW("next task regs saved: PS base 0x%llx ind 0x%x size 0x%x\n", + sw_regs->psp_lo.PSP_lo_base, + sw_regs->psp_hi.PSP_hi_ind, sw_regs->psp_hi.PSP_hi_size); + DebugKVMSW("next task regs saved: PCS base 0x%llx ind 0x%x size 0x%x\n", + sw_regs->pcsp_lo.PCSP_lo_base, + sw_regs->pcsp_hi.PCSP_hi_ind, sw_regs->pcsp_hi.PCSP_hi_size); + + set_current_thread_info(next_ti, next); + + KVM_RESTORE_TASK_REGS_TO_SWITCH(next, 0); + + /* remember previous task to restore after real switch */ + sw_regs->prev_task = prev; + + /* real switch guest kernel stacks can be done only by hypervisor */ + DebugKVMSW("will start hypercall to switch real guest thread stacks\n"); + HYPERVISOR_switch_guest_thread_stacks( + next_ti->gpid_nr, next_ti->gmmid_nr); + + /* reload locals after hardware and local data stack switch */ + /* now its state contain previous switch from the current */ + next = current; + prev = current->thread.sw_regs.prev_task; + + /* Restore interrupt mask and enable NMIs */ + UPSR_RESTORE(AW(current->thread.sw_regs.upsr)); + + /* return actualized structure of previous task */ + return prev; +} + +#define kvm_switch_to(prev, next, last) \ +do { \ + if (IS_HV_GM()) { \ + native_switch_to(prev, next, last); \ + } else { \ + last = kvm_do_switch_to(prev, next); \ + e2k_finish_switch(last); \ + } \ +} while (0) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ + +static inline struct task_struct * +ret_from_fork_get_prev_task(struct task_struct *prev) +{ + return kvm_ret_from_fork_get_prev_task(prev); +} + +/* switch_to() should be only macros to update pointer 'prev' at */ +/* __schedule() function */ +#define switch_to(prev, next, last) kvm_switch_to(prev, next, last) + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_KVM_GUEST_SWITCH_TO_H */ diff --git a/arch/e2k/include/asm/kvm/guest/system.h b/arch/e2k/include/asm/kvm/guest/system.h new file mode 100644 index 0000000..c6c0691 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/system.h @@ -0,0 +1,227 @@ +/* + * KVM guest processor and processes support + * + * Copyright (C) 2014 MCST + */ + +#ifndef _E2K_KVM_GUEST_SYSTEM_H_ +#define _E2K_KVM_GUEST_SYSTEM_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +/* + * Guest kernel case assumption is that the host emulates hardware updates + * of CPU registers state on trap or system call, in particular PSR state. + * Hardware disables interrupt masks and switch interrupts control to PSR, + * so host sets VCPU registers (copy into memory) in same state. + * Trap handler should switch interrupts control from PSR to UPSR + * previously it should set UPSR to initial state for kernel with disabled + * interrupts (so UPSR disable interrupts) + * If trap occurs on guest kernel, then interrupts should be enabled + * and control should be under UPSR. So do not restore control under PSR and + * restore only UPSR state. + * Guest kernel cannot use 'done' instruction and restore PSR state + * saved into CR1.lo register, it should be done by host. + */ + +#define KVM_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ +({ \ + e2k_upsr_t __upsr_val; \ + \ + __upsr_val = nmirq_dis ? E2K_KERNEL_INITIAL_UPSR_WITH_DISABLED_NMI \ + : \ + E2K_KERNEL_INITIAL_UPSR; \ + KVM_WRITE_UPSR_REG_VALUE(__upsr_val.UPSR_reg); \ +}) +#define BOOT_KVM_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ +({ \ + e2k_upsr_t __upsr_val; \ + \ + __upsr_val = nmirq_dis ? E2K_KERNEL_INITIAL_UPSR_WITH_DISABLED_NMI \ + : \ + E2K_KERNEL_INITIAL_UPSR; \ + BOOT_KVM_WRITE_UPSR_REG_VALUE(__upsr_val.UPSR_reg); \ +}) +#define KVM_INIT_USER_UPSR_REG() \ + KVM_WRITE_UPSR_REG_VALUE(E2K_USER_INITIAL_UPSR.UPSR_reg) +#define KVM_INIT_USER_PSR() \ + KVM_ATOMIC_WRITE_PSR_REG_VALUE(E2K_USER_INITIAL_PSR.PSR_reg, \ + false) /* IRQs under UPSR */ + +#define KVM_DO_SAVE_PSR_REG(psr_reg) \ + (psr_reg.PSR_reg = KVM_READ_PSR_REG_VALUE()) +#define KVM_DO_SAVE_UPSR_REG(upsr_reg) \ + (upsr_reg.UPSR_reg = KVM_READ_UPSR_REG_VALUE()) +#define KVM_DO_SAVE_PSR_UPSR_REGS(psr_reg, upsr_reg, under_upsr) \ +({ \ + KVM_DO_SAVE_PSR_REG((psr_reg)); \ + KVM_DO_SAVE_UPSR_REG((upsr_reg)); \ + under_upsr = kvm_get_vcpu_state()->irqs_under_upsr; \ +}) +#define KVM_DO_RESTORE_PSR_REG(psr_reg) \ + (KVM_WRITE_PSR_REG_VALUE(psr_reg.PSR_reg)) +#define KVM_DO_RESTORE_UPSR_REG(upsr_reg) \ + (KVM_WRITE_UPSR_REG_VALUE(upsr_reg.UPSR_reg)) + +#define KVM_SWITCH_IRQ_TO_UPSR(disable_sge) \ + KVM_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_ENABLED)) + +#define BOOT_KVM_SWITCH_IRQ_TO_UPSR() \ + BOOT_KVM_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_ENABLED)) + +#define KVM_DO_RETURN_IRQ_TO_PSR(under_upsr, disable_sge) \ + KVM_ATOMIC_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_DISABLED), under_upsr) + +#define KVM_RETURN_IRQ_TO_PSR(under_upsr) \ + KVM_DO_RETURN_IRQ_TO_PSR(under_upsr, false) + +#define KVM_SET_USER_INITIAL_UPSR(upsr) \ +({ \ + KVM_RETURN_IRQ_TO_PSR(false); \ + KVM_WRITE_UPSR_REG(upsr); \ +}) + +#define KVM_CHECK_IRQ_UNDER_PSR(psr_reg, under_upsr) \ +({ \ + if (psr_reg.PSR_ie || psr_reg.PSR_uie || !psr_reg.PSR_pm) { \ + pr_err("#U1 PSR 0x%x under upsr %d\n", \ + psr_reg.PSR_reg, under_upsr); \ + psr_reg.PSR_ie = 0; \ + psr_reg.PSR_uie = 0; \ + psr_reg.PSR_pm = 1; \ + WARN_ON(true); \ + } \ + if (under_upsr) { \ + pr_err("#U2 PSR 0x%x under upsr %d\n", \ + psr_reg.PSR_reg, under_upsr); \ + kvm_get_vcpu_state()->irqs_under_upsr = false; \ + WARN_ON(true); \ + } \ +}) +#define KVM_CHECK_IRQ_UNDER_UPSR(psr_reg, upsr_reg, under_upsr, has_irqs) \ +({ \ + if (psr_reg.PSR_ie || !psr_reg.PSR_pm || \ + !psr_reg.PSR_uie && under_upsr) { \ + pr_err("#K1 PSR 0x%x UPSR 0x%x under upsr %d\n", \ + psr_reg.PSR_reg, upsr_reg.UPSR_reg, \ + under_upsr); \ + psr_reg.PSR_ie = 0; \ + psr_reg.PSR_pm = 1; \ + if (under_upsr) \ + psr_reg.PSR_uie = 1; \ + KVM_WRITE_PSR_REG_VALUE(psr_reg.PSR_reg); \ + WARN_ON(true); \ + } \ + if (psr_reg.PSR_uie && !under_upsr) { \ + E2K_LMS_HALT_OK; \ + pr_err("#K2 PSR 0x%x UPSR 0x%x under upsr %d\n", \ + psr_reg.PSR_reg, upsr_reg.UPSR_reg, \ + under_upsr); \ + kvm_get_vcpu_state()->irqs_under_upsr = true; \ + WARN_ON(true); \ + } \ + if (!upsr_reg.UPSR_ie && under_upsr && has_irqs) { \ + pr_err("#K3 PSR 0x%x UPSR 0x%x under upsr %d " \ + "has IRQs %d\n", \ + psr_reg.PSR_reg, upsr_reg.UPSR_reg, \ + under_upsr, has_irqs); \ + upsr_reg.UPSR_ie = 1; \ + KVM_WRITE_UPSR_REG_VALUE(upsr_reg.UPSR_reg); \ + WARN_ON(true); \ + } \ +}) +#define KVM_CHECK_IRQ_STATE(psr_reg, upsr_reg, under_upsr, \ + has_irqs, user_mode) \ +do { \ + if (user_mode) { \ + KVM_CHECK_IRQ_UNDER_PSR(psr_reg, under_upsr); \ + } else { \ + KVM_CHECK_IRQ_UNDER_UPSR(psr_reg, upsr_reg, \ + under_upsr, has_irqs); \ + } \ +} while (false) + +#define KVM_RETURN_TO_KERNEL_UPSR(upsr_reg) \ +({ \ + if (IS_HV_GM()) { \ + e2k_cr1_lo_t cr1_lo; \ + unsigned psr; \ +\ + cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); \ + psr = cr1_lo.CR1_lo_psr; \ + KVM_WRITE_SW_PSR_REG_VALUE(psr); \ + NATIVE_RETURN_IRQ_TO_PSR(); \ + } else { \ + KVM_RETURN_IRQ_TO_PSR(true); \ + } \ + KVM_DO_RESTORE_UPSR_REG(upsr_reg); \ +}) +#define KVM_RETURN_TO_INIT_USER_UPSR() \ +({ \ + KVM_INIT_USER_PSR(); \ + KVM_INIT_USER_UPSR_REG(); \ +}) +#define KVM_SWITCH_TO_KERNEL_UPSR(psr_reg, upsr_reg, under_upsr, \ + irq_en, nmirq_dis) \ +({ \ + KVM_DO_SAVE_PSR_UPSR_REGS(psr_reg, upsr_reg, under_upsr); \ + KVM_DO_SWITCH_TO_KERNEL_UPSR(irq_en, nmirq_dis); \ + kvm_get_vcpu_state()->irqs_under_upsr = true; \ +}) + +#define KVM_DO_SWITCH_TO_KERNEL_UPSR(irq_en, nmirq_dis) \ + PREFIX_DO_SWITCH_TO_KERNEL_UPSR(KVM, kvm, \ + irq_en, nmirq_dis) +#define KVM_RETURN_TO_USER_UPSR(upsr_reg, under_upsr) \ +({ \ + KVM_RETURN_IRQ_TO_PSR(under_upsr); \ + KVM_DO_RESTORE_UPSR_REG(upsr_reg); \ +}) +#define KVM_SET_KERNEL_UPSR_WITH_DISABLED_NMI() \ + PREFIX_SET_KERNEL_UPSR_WITH_DISABLED_NMI(KVM) +#define KVM_SET_KERNEL_UPSR(disable_sge) \ + PREFIX_SET_KERNEL_UPSR(KVM, disable_sge) +#define BOOT_KVM_SET_KERNEL_UPSR() \ + BOOT_PREFIX_SET_KERNEL_UPSR(KVM) + +#define kvm_psr_and_upsr_irqs_disabled() \ +({ \ + e2k_psr_t psr; \ + e2k_upsr_t upsr; \ + bool under_upsr; \ + \ + KVM_DO_SAVE_PSR_UPSR_REGS(psr, upsr, under_upsr); \ + psr_and_upsr_irqs_disabled_flags(psr.PSR_reg, upsr.UPSR_reg); \ +}) + +extern void *kvm_nested_kernel_return_address(int n); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized) */ + +#define INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + KVM_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) +#define SET_KERNEL_UPSR(disable_sge) \ + KVM_SET_KERNEL_UPSR(disable_sge) +#define BOOT_SET_KERNEL_UPSR() \ + BOOT_KVM_SET_KERNEL_UPSR() +#define SET_KERNEL_UPSR_WITH_DISABLED_NMI() \ + KVM_SET_KERNEL_UPSR_WITH_DISABLED_NMI() +#define RETURN_TO_KERNEL_UPSR(upsr_reg) \ + KVM_RETURN_TO_KERNEL_UPSR(upsr_reg) + +static inline void * +nested_kernel_return_address(int n) +{ + return kvm_nested_kernel_return_address(n); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ +#endif /* _E2K_KVM_GUEST_SYSTEM_H_ */ + + diff --git a/arch/e2k/include/asm/kvm/guest/time.h b/arch/e2k/include/asm/kvm/guest/time.h new file mode 100644 index 0000000..50c6b00 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/time.h @@ -0,0 +1,29 @@ +#ifndef __ASM_KVM_GUEST_TIME_H +#define __ASM_KVM_GUEST_TIME_H + +#ifdef __KERNEL__ +#include + +extern unsigned long kvm_get_wallclock(void); +extern int kvm_set_wallclock(unsigned long now); +extern void kvm_clock_init(void); + +#ifdef CONFIG_PARAVIRT +/* FIXME: this method has support on arch-independent code */ +/* so it should be main method to account steal time */ + +extern unsigned long kvm_steal_clock(int cpu); +#endif /* CONFIG_PARAVIRT */ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void +arch_clock_init(void) +{ + kvm_clock_init(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_GUEST_TIME_H */ diff --git a/arch/e2k/include/asm/kvm/guest/timex.h b/arch/e2k/include/asm/kvm/guest/timex.h new file mode 100644 index 0000000..e80287e --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/timex.h @@ -0,0 +1,26 @@ +#ifndef __ASM_KVM_GUEST_TIMEX_H +#define __ASM_KVM_GUEST_TIMEX_H + +#ifdef __KERNEL__ +#include + +extern void kvm_time_init(void); +extern int kvm_read_current_timer(unsigned long *timer_val); + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void +time_init(void) +{ + kvm_time_init(); +} +static inline int +read_current_timer(unsigned long *timer_val) +{ + return kvm_read_current_timer(timer_val); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_GUEST_TIMEX_H */ diff --git a/arch/e2k/include/asm/kvm/guest/tlb_regs_types.h b/arch/e2k/include/asm/kvm/guest/tlb_regs_types.h new file mode 100644 index 0000000..3009123 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/tlb_regs_types.h @@ -0,0 +1,17 @@ +#ifndef __ASM_KVM_GUEST_TLB_REGS_TYPES_H +#define __ASM_KVM_GUEST_TLB_REGS_TYPES_H + +#ifdef __KERNEL__ + +#include + +#include +#include + +extern probe_entry_t kvm_mmu_entry_probe(e2k_addr_t virt_addr); +extern probe_entry_t kvm_mmu_address_probe(e2k_addr_t virt_addr); +extern mmu_reg_t kvm_read_dtlb_reg(e2k_addr_t virt_addr); + +#endif /* __KERNEL__ */ + +#endif /* __ASM_KVM_GUEST_TLB_REGS_TYPES_H */ diff --git a/arch/e2k/include/asm/kvm/guest/tlbflush.h b/arch/e2k/include/asm/kvm/guest/tlbflush.h new file mode 100644 index 0000000..fc191e1 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/tlbflush.h @@ -0,0 +1,183 @@ +/* + * Guest MMU caches flushing support on guest kernel + * + * Guest VM is fake virtual memory support. + * Guest kernel manage own VM as any linux kernel, but all it is fake, + * real used page tables, real physical pages, real flushing is made by host. + * + * Copyright 2016 Salavat S. Gilyazov (atic@mcst.ru) + */ +#ifndef _E2K_KVM_GUEST_TLBFLUSH_H +#define _E2K_KVM_GUEST_TLBFLUSH_H + +#include +#include + +#ifdef CONFIG_KVM_GUEST_KERNEL +extern void kvm_pv_flush_tlb_all(void); +extern void kvm_pv_flush_tlb_mm(struct mm_struct *mm); +extern void kvm_pv_flush_tlb_page(struct mm_struct *mm, e2k_addr_t addr); +extern void kvm_pv_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end); +extern void kvm_pv_flush_tlb_kernel_range(e2k_addr_t start, e2k_addr_t end); +extern void kvm_pv_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end); +extern void kvm_pv_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, + e2k_addr_t end); +#ifndef CONFIG_SMP +static inline void +kvm_flush_tlb_all(void) +{ + if (IS_HV_GM()) + __flush_tlb_all(); + else + kvm_pv_flush_tlb_all(); +} +static inline void +kvm_flush_tlb_mm(struct mm_struct *mm) +{ + if (IS_HV_GM()) + __flush_tlb_mm(mm); + else + kvm_pv_flush_tlb_mm(mm); +} +static inline void +kvm_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + if (IS_HV_GM()) + __flush_tlb_page(vma->vm_mm, addr); + else + kvm_pv_flush_tlb_page(vma->vm_mm, addr); +} +static inline void +kvm_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end) +{ + if (IS_HV_GM()) + __flush_tlb_range(mm, start, end); + else + kvm_pv_flush_tlb_range(mm, start, end); +} +static inline void +kvm_flush_tlb_kernel_range(e2k_addr_t start, e2k_addr_t end) +{ + if (IS_HV_GM()) + __flush_tlb_all(); + else + kvm_pv_flush_tlb_kernel_range(start, end); +} +static inline void +kvm_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end) +{ + if (IS_HV_GM()) + __flush_pmd_tlb_range(mm, start, end); + else + kvm_pv_flush_pmd_tlb_range(mm, start, end); +} +static inline void +kvm_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + if (IS_HV_GM()) + __flush_tlb_range_and_pgtables(mm, start, end); + else + kvm_pv_flush_tlb_range_and_pgtables(mm, start, end); +} +#else /* CONFIG_SMP */ +extern void native_smp_flush_tlb_all(void); +extern void native_smp_flush_tlb_mm(struct mm_struct *mm); +extern void native_smp_flush_tlb_page(struct vm_area_struct *vma, + e2k_addr_t addr); +extern void native_smp_flush_tlb_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +extern void native_smp_flush_pmd_tlb_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +extern void native_smp_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +static inline void +kvm_flush_tlb_all(void) +{ + if (IS_HV_GM()) + native_smp_flush_tlb_all(); + else + kvm_pv_flush_tlb_all(); +} +static inline void +kvm_flush_tlb_mm(struct mm_struct *mm) +{ + if (IS_HV_GM()) + native_smp_flush_tlb_mm(mm); + else + kvm_pv_flush_tlb_mm(mm); +} +static inline void +kvm_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + if (IS_HV_GM()) + native_smp_flush_tlb_page(vma, addr); + else + kvm_pv_flush_tlb_page(vma->vm_mm, addr); +} +static inline void +kvm_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end) +{ + if (IS_HV_GM()) + native_smp_flush_tlb_range(mm, start, end); + else + kvm_pv_flush_tlb_range(mm, start, end); +} +static inline void +kvm_flush_tlb_kernel_range(e2k_addr_t start, e2k_addr_t end) +{ + if (IS_HV_GM()) + native_smp_flush_tlb_all(); + else + kvm_pv_flush_tlb_kernel_range(start, end); +} +static inline void +kvm_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end) +{ + if (IS_HV_GM()) + native_smp_flush_pmd_tlb_range(mm, start, end); + else + kvm_pv_flush_pmd_tlb_range(mm, start, end); +} +static inline void +kvm_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + if (IS_HV_GM()) + native_smp_flush_tlb_range_and_pgtables(mm, start, end); + else + kvm_pv_flush_tlb_range_and_pgtables(mm, start, end); +} +#endif /* CONFIG_SMP */ + +/* it is native KVM guest kernel (not paravirtualized) */ +/* guest kernel does not support other virtual machines and guests */ +static __always_inline bool +__flush_guest_cpu_root_pt_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + return false; /* none any guests and guest addresses */ +} +static __always_inline bool +__flush_guest_cpu_root_pt_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + return false; /* none any guests and guest addresses */ +} +static __always_inline bool +__flush_guest_cpu_root_pt_mm(struct mm_struct *mm) +{ + return false; /* none any guests and guest addresses */ +} +static __always_inline bool +__flush_guest_cpu_root_pt(void) +{ + return false; /* none any guests and guest addresses */ +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_KVM_GUEST_TLBFLUSH_H */ diff --git a/arch/e2k/include/asm/kvm/guest/trap_table.S.h b/arch/e2k/include/asm/kvm/guest/trap_table.S.h new file mode 100644 index 0000000..5b2281e --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/trap_table.S.h @@ -0,0 +1,158 @@ +/* + * + * Copyright (C) 2018 MCST + * + * Defenition of guest kernel traps handling routines. + */ + +#ifndef _E2K_KVM_GUEST_TRAP_TABLE_ASM_H +#define _E2K_KVM_GUEST_TRAP_TABLE_ASM_H + +#ifdef __ASSEMBLY__ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is guest virtualized kernel */ + +/* + * goto guest kernel system call table entry, if system call is from guest user + * rti: register of current_thread_info() + * rtmp0 rtmp1 rtmp2: temporary registers + * ptmp0 ptmp1: temporary predicates + */ +.macro KVM_GOTO_PV_VCPU_KERNEL_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 + /* not used */ +.endm /* GOTO_GUEST_KERNEL_TTABLE */ + +#ifdef CONFIG_KVM_GUEST_HW_PV +/* guest virtualization based on hardware virtualized hypervisor */ + +.macro NEED_SAVE_CUR_AND_VCPU_STATE_GREGS drti, predV5, \ + drtmp0, drtmp1, predtmp, \ + predCUR, predVCPU, predEXTk + /* + * Native trap handler and these macroses is used only + * if IS_HW_GM() == true + * so need not any additional conditions to calculate. + * drti - pointer to thread_info + * predV5 - ISET is >= V5 + * predCUR - is now set to true (trap from user) and cannot be updated: + * trap on guest user and (GCURTI & GCURTASK & CPU_ID & CPU_OFF) + * should be saved same as native or host mode + * %predVCPU - save VCPU state pointer regs: + * set same as %predCUR + * predEXTk - need save kernel (predCUR) & need save extention (!predV5) + */ + { + pass \predV5, @p0; + pass \predCUR, @p1; + landp @p1, @p1, @p5; + landp ~@p0, @p1, @p4; + pass @p5, \predVCPU; + pass @p4, \predEXTk; + } +.endm /* NEED_SAVE_CUR_AND_VCPU_STATE_GREGS */ + +/* guest VCPU state registers are saved at thread_info->h_gregs */ +/* same as by host for paravirtualized guest */ + +.macro DO_SAVE_HOST_GREGS_V2 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \ + drti, predSAVE, drtmp, rtmp0, rtmp1 + /* drtmp: thread_info->h_gregs.g */ + addd \drti, TI_HOST_GREGS_TO_VIRT, \drtmp ? \predSAVE; + SAVE_GREGS_PAIR_COND_V2 \gvcpu_lo, \gvcpu_hi, \hvcpu_lo, \hvcpu_hi, \ + \drtmp, /* thread_info->h_gregs.g base address */ \ + \predSAVE, \ + \rtmp0, \rtmp1 +.endm /* DO_SAVE_HOST_GREGS_V2 */ + +.macro DO_SAVE_HOST_GREGS_V5 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \ + drti, predSAVE, drtmp + /* drtmp: thread_info->h_gregs.g */ + addd \drti, TI_HOST_GREGS_TO_VIRT, \drtmp ? \predSAVE; + SAVE_GREGS_PAIR_COND_V5 \gvcpu_lo, \gvcpu_hi, \hvcpu_lo, \hvcpu_hi, \ + \drtmp, /* thread_info->h_gregs.g base address */ \ + \predSAVE +.endm /* DO_SAVE_HOST_GREGS_V5 */ + +.macro SAVE_HOST_GREGS_V2 drti, predSAVE, drtmp, rtmp0, rtmp1 + DO_SAVE_HOST_GREGS_V2 \ + GUEST_VCPU_STATE_GREG, GUEST_VCPU_STATE_UNUSED_GREG, \ + VCPU_STATE_GREGS_PAIRS_INDEX, VCPU_STATE_GREGS_PAIRS_HI_INDEX, \ + \drti, \predSAVE, \ + \drtmp, \rtmp0, \rtmp1 +.endm /* SAVE_HOST_GREGS_V2 */ + +.macro SAVE_HOST_GREGS_V5 drti, predSAVE, drtmp + DO_SAVE_HOST_GREGS_V5 \ + GUEST_VCPU_STATE_GREG, GUEST_VCPU_STATE_UNUSED_GREG, \ + VCPU_STATE_GREGS_PAIRS_INDEX, VCPU_STATE_GREGS_PAIRS_HI_INDEX, \ + \drti, \predSAVE, \ + \drtmp, +.endm /* SAVE_HOST_GREGS_V5 */ + +.macro SAVE_HOST_GREGS_UNEXT gvcpu, hvcpu, drti, drtmp + /* drtmp: thread_info->h_gregs.g */ + addd \drti, TI_HOST_GREGS_TO_VIRT, \drtmp; + SAVE_GREG_UNEXT \gvcpu, \hvcpu, \drtmp +.endm /* SAVE_HOST_GREGS_UNEXT */ + +.global vcpus_state; + +.macro SET_VCPU_STATE_GREGS drti, predSAVE, drtmp + ldw [ \drti + TSK_TI_CPU_DELTA ], \drtmp ? \predSAVE /* VCPU # */ + shld \drtmp, 3, \drtmp ? \predSAVE + ldd [ \drtmp + vcpus_state ], GVCPUSTATE ? \predSAVE +.endm /* SET_VCPU_STATE_GREGS */ + +.macro SET_VCPU_STATE_GREGS_UNCOND drti, drtmp + ldw [ \drti + TSK_TI_CPU_DELTA ], \drtmp /* VCPU # */ + shld \drtmp, 3, \drtmp + ldd [ \drtmp + vcpus_state ], GVCPUSTATE +.endm /* SET_VCPU_STATE_GREGS */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1 + SAVE_HOST_GREGS_V2 \drti, \predSAVE, \drtmp, \rtmp0, \rtmp1 + SET_VCPU_STATE_GREGS \drti, \predSAVE, \drtmp +.endm /* SAVE_HOST_GREGS_TO_VIRT_V2 */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V5 drti, predSAVE, drtmp + SAVE_HOST_GREGS_V5 \drti, \predSAVE, \drtmp + SET_VCPU_STATE_GREGS \drti, \predSAVE, \drtmp +.endm /* SAVE_HOST_GREGS_TO_VIRT_V5 */ + +.macro SAVE_HOST_GREGS_TO_VIRT_UNEXT drti, drtmp + SAVE_HOST_GREGS_UNEXT GVCPUSTATE, \ + VCPU_STATE_GREGS_PAIRS_INDEX, \ + \drti, \drtmp + SET_VCPU_STATE_GREGS_UNCOND \drti, \drtmp +.endm /* SAVE_HOST_GREGS_TO_VIRT_UNEXT */ + +#else /* ! CONFIG_KVM_GUEST_HW_PV */ +/* It is virtualized guest based on paravirtualized hypervisor */ +/* without hardware support of virtualization */ + +.macro NEED_SAVE_CUR_AND_VCPU_STATE_GREGS drti, predV5, \ + drtmp0, drtmp1, predtmp, \ + predCUR, predVCPU, predEXTk + /* not used */ +.endm /* NEED_SAVE_CUR_AND_VCPU_STATE_GREGS */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1 + /* not used */ +.endm /* SAVE_VCPU_STATE_GREGS */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V5 drti, predSAVE, drtmp + /* not used */ +.endm /* SAVE_GREGS_TO_VIRT */ + +.macro SAVE_HOST_GREGS_TO_VIRT_UNEXT drti, drtmp + /* not used */ +.endm /* SAVE_HOST_GREGS_TO_VIRT_UNEXT */ + +#endif /* CONFIG_KVM_GUEST_HW_PV */ +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _E2K_KVM_GUEST_TRAP_TABLE_ASM_H */ diff --git a/arch/e2k/include/asm/kvm/guest/trap_table.h b/arch/e2k/include/asm/kvm/guest/trap_table.h new file mode 100644 index 0000000..69ebc97 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/trap_table.h @@ -0,0 +1,304 @@ +#ifndef __KVM_GUEST_E2K_TRAP_TABLE_H +#define __KVM_GUEST_E2K_TRAP_TABLE_H + +/* Does not include this header directly, include */ + +#include +#include + +extern int kvm_guest_ttable_entry0(void); +extern long kvm_guest_ttable_entry1(int sys_num, + u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 arg5, u32 arg6); +extern long kvm_guest_ttable_entry3(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6); +extern long kvm_guest_ttable_entry4(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6); +extern long kvm_guest_ttable_entry5(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6); +extern long kvm_guest_ttable_entry6(int sys_num, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6); + +static __always_inline void +kvm_init_traps_handling(struct pt_regs *regs, bool user_mode_trap) +{ + regs->deferred_traps = 0; +} +static __always_inline void +kvm_init_syscalls_handling(struct pt_regs *regs) +{ + kvm_init_traps_handling(regs, true); /* now as traps init */ +} +static inline bool +kvm_have_deferred_traps(struct pt_regs *regs) +{ + return regs->deferred_traps != 0; +} + +#define KVM_FILL_HARDWARE_STACKS() /* host itself will fill */ + +extern void kvm_correct_trap_psp_pcsp(struct pt_regs *regs, + thread_info_t *thread_info); +extern void kvm_correct_scall_psp_pcsp(struct pt_regs *regs, + thread_info_t *thread_info); +extern void kvm_correct_trap_return_ip(struct pt_regs *regs, + unsigned long return_ip); + +#ifdef COMMON_KERNEL_USER_HW_STACKS +static inline void +kvm_do_correct_trap_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + NATIVE_CORRECT_TRAP_PSP_PCSP(regs, thread_info); +} +static inline void +kvm_do_correct_scall_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + /* hardware stacks do not increment for system call on guest */ + /* so nothing to do */ +} +#endif /* COMMON_KERNEL_USER_HW_STACKS */ + +/* + * Guest trap handler on hardware stacks bounds can be called only on + * exceptions flags into TIRs, which be passed to guest by host handler. + * So nothing addition condition to run handler. + */ +static inline bool +kvm_is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return false; +} +static inline bool +kvm_is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return false; +} + +static inline void kvm_set_sge(void) +{ + KVM_WRITE_PSR_REG_VALUE((KVM_READ_PSR_REG_VALUE() | PSR_SGE)); +} +static inline void kvm_reset_sge(void) +{ + KVM_WRITE_PSR_REG_VALUE((KVM_READ_PSR_REG_VALUE() & ~PSR_SGE)); +} +static inline void boot_kvm_set_sge(void) +{ + BOOT_KVM_WRITE_PSR_REG_VALUE((BOOT_KVM_READ_PSR_REG_VALUE() | + PSR_SGE)); +} +static inline void boot_kvm_reset_sge(void) +{ + BOOT_KVM_WRITE_PSR_REG_VALUE((BOOT_KVM_READ_PSR_REG_VALUE() & + ~PSR_SGE)); +} +static inline void +kvm_stack_bounds_trap_enable(void) +{ + /* TODO now sge is always enabled (even in kernel), + * so this probably isn't needed anymore */ + kvm_set_sge(); +} + +extern void kvm_handle_deferred_traps(struct pt_regs *regs); +extern void kvm_handle_deferred_traps_in_syscall(struct pt_regs *regs); + +static inline void +kvm_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const unsigned int aa_no) +{ + native_do_aau_page_fault(regs, address, condition, mask, aa_no); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native KVM guest kernel (not paravirtualized) */ + +#define ttable_entry1 kvm_guest_ttable_entry1 +#define ttable_entry3 kvm_guest_ttable_entry3 +#define ttable_entry4 kvm_guest_ttable_entry4 +#define ttable_entry5 kvm_guest_ttable_entry5 +#define ttable_entry6 kvm_guest_ttable_entry6 + +#define get_ttable_entry1 \ +({ \ + ttable_entry_args_t ttable_entry; \ + ttable_entry = \ + ((IS_HV_GM()) ? (ttable_entry_args_t)native_ttable_entry1 : \ + (ttable_entry_args_t)kvm_guest_ttable_entry1); \ + ttable_entry; \ +}) +#define get_ttable_entry3 \ +({ \ + ttable_entry_args_t ttable_entry; \ + ttable_entry = \ + ((IS_HV_GM()) ? (ttable_entry_args_t)native_ttable_entry3 : \ + (ttable_entry_args_t)kvm_guest_ttable_entry3); \ + ttable_entry; \ +}) + +#define get_ttable_entry4 \ +({ \ + ttable_entry_args_t ttable_entry; \ + ttable_entry = \ + ((IS_HV_GM()) ? (ttable_entry_args_t)native_ttable_entry4 : \ + (ttable_entry_args_t)kvm_guest_ttable_entry4); \ + ttable_entry; \ +}) + +#define FILL_HARDWARE_STACKS() \ +do { \ + if (IS_HV_GM()) { \ + NATIVE_FILL_HARDWARE_STACKS(); \ + } else { \ + KVM_FILL_HARDWARE_STACKS(); \ + } \ +} while (false) + +static inline void +exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi, + e2k_usd_lo_t usd_lo, e2k_upsr_t upsr) +{ + kvm_exit_handle_syscall(sbr, usd_hi, usd_lo, upsr); +} + +#define handle_guest_traps(regs) /* none any guests */ + +static __always_inline void +init_guest_traps_handling(struct pt_regs *regs, bool user_mode_trap) +{ + kvm_init_traps_handling(regs, user_mode_trap); +} +static __always_inline void +init_guest_syscalls_handling(struct pt_regs *regs) +{ + kvm_init_syscalls_handling(regs); +} +static inline bool +is_guest_TIRs_frozen(struct pt_regs *regs) +{ + return false; /* none any guest */ +} +static inline bool +have_deferred_traps(struct pt_regs *regs) +{ + return kvm_have_deferred_traps(regs); +} +static inline void +handle_deferred_traps_in_syscall(struct pt_regs *regs) +{ + kvm_handle_deferred_traps_in_syscall(regs); +} +static inline bool +is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return kvm_is_proc_stack_bounds(ti, regs); +} +static inline bool +is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return kvm_is_chain_stack_bounds(ti, regs); +} +static inline void +stack_bounds_trap_enable(void) +{ + kvm_stack_bounds_trap_enable(); +} + +#ifdef COMMON_KERNEL_USER_HW_STACKS +static inline void +correct_trap_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + kvm_do_correct_trap_psp_pcsp(regs, thread_info); +} +static inline void +correct_scall_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + kvm_do_correct_scall_psp_pcsp(regs, thread_info); +} +#endif /* COMMON_KERNEL_USER_HW_STACKS */ + +static inline void +correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip) +{ + kvm_correct_trap_return_ip(regs, return_ip); +} + +static inline bool +handle_guest_last_wish(struct pt_regs *regs) +{ + return false; /* none any guest and any wishes from */ +} + +static inline void +do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const unsigned int aa_no) +{ + kvm_do_aau_page_fault(regs, address, condition, mask, aa_no); +} + +/* + * Following functions run on host, check if traps occurred on guest user + * or kernel, so probably should be passed to guest kernel to handle. + * Guest has not any own guests + */ +static inline unsigned long +pass_aau_trap_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_the_trap_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo, + int trap_no) +{ + return 0; +} +static inline unsigned long +pass_stack_bounds_trap_to_guest(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds) +{ + return 0; +} +static inline unsigned long +pass_coredump_trap_to_guest(struct pt_regs *regs) +{ + return 0; +} +static inline unsigned long +pass_interrupt_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_nm_interrupt_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_virqs_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_clw_fault_to_guest(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return 0; +} +static inline unsigned long +pass_page_fault_to_guest(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return 0; +} +static inline void +complete_page_fault_to_guest(unsigned long what_complete) +{ +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KVM_GUEST_E2K_TRAP_TABLE_H */ diff --git a/arch/e2k/include/asm/kvm/guest/traps.h b/arch/e2k/include/asm/kvm/guest/traps.h new file mode 100644 index 0000000..403a147 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/traps.h @@ -0,0 +1,129 @@ +/* + * + * Copyright (C) 2012 MCST + * + * Defenition of kvm guest kernel traps handling routines. + */ + +#ifndef _E2K_ASM_KVM_GUEST_TRAPS_H +#define _E2K_ASM_KVM_GUEST_TRAPS_H + +#include +#include +#include + +extern int kvm_do_hw_stack_bounds(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds); +extern irqreturn_t guest_do_interrupt(struct pt_regs *regs); +extern unsigned long kvm_do_mmio_page_fault(struct pt_regs *regs, + trap_cellar_t *tcellar); +extern void kvm_sysrq_showstate_interrupt(struct pt_regs *regs); +extern void kvm_init_system_handlers_table(void); + +static inline void +kvm_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + native_instr_page_fault(regs, ftype, async_instr); +} + +static inline u64 +kvm_TIR0_clear_false_exceptions(u64 TIR_hi, int nr_TIRs) +{ + /* all false exceptions were cleared while traps passing to guest */ + return TIR_hi; +} + +extern int kvm_host_apply_psp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta); +extern int kvm_host_apply_pcsp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta); + +static inline unsigned long +kvm_mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + e2k_addr_t address; + + if (!kernel_mode(regs)) { + /* trap on user and cannot be to IO */ + return 0; /* not handled */ + } + address = tcellar->address; + if (likely(((address < GUEST_VMALLOC_START || + address >= GUEST_VMALLOC_END)) && + !KVM_IS_VGA_VRAM_VIRT_ADDR(address))) { + /* address is out of address space area to IO remap or */ + /* VGA VRAM */ + return 0; /* not handled */ + } + return kvm_do_mmio_page_fault(regs, tcellar); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (paravirtualized pv_ops not used) */ + +static inline u64 +TIR0_clear_false_exceptions(u64 TIR_hi, int nr_TIRs) +{ + return kvm_TIR0_clear_false_exceptions(TIR_hi, nr_TIRs); +} + +static inline void +instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + kvm_instr_page_fault(regs, ftype, async_instr); +} + +/* The follow function should be deleted */ +static inline int +do_hw_stack_bounds(struct pt_regs *regs, bool proc_bounds, bool chain_bounds) +{ + return kvm_do_hw_stack_bounds(regs, proc_bounds, chain_bounds); +} + +static inline int host_apply_psp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + if (IS_HV_GM()) { + return native_host_apply_psp_delta_to_signal_stack(base, size, + start, end, delta); + } + return kvm_host_apply_psp_delta_to_signal_stack(base, size, + start, end, delta); +} + +static inline int host_apply_pcsp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + if (IS_HV_GM()) { + return native_host_apply_pcsp_delta_to_signal_stack(base, size, + start, end, delta); + } + return kvm_host_apply_pcsp_delta_to_signal_stack(base, size, + start, end, delta); +} + +static inline void +handle_interrupt(struct pt_regs *regs) +{ + guest_do_interrupt(regs); +} +static inline unsigned long +mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return kvm_mmio_page_fault(regs, tcellar); +} +static inline void +init_guest_system_handlers_table(void) +{ + kvm_init_system_handlers_table(); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_ASM_KVM_GUEST_TRAPS_H */ diff --git a/arch/e2k/include/asm/kvm/guest/v2p.h b/arch/e2k/include/asm/kvm/guest/v2p.h new file mode 100644 index 0000000..9ad4903 --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/v2p.h @@ -0,0 +1,133 @@ +/* + * + * Heading of boot-time initialization. + * + * Copyright (C) 2001 Salavat Guiliazov + */ + +#ifndef _E2K_KVM_GUEST_V2P_H +#define _E2K_KVM_GUEST_V2P_H + +#include + +#include +#include + +#ifndef __ASSEMBLY__ + + +#define kvm_pa(gvpa) __pa(gvpa) +#define kvm_va(gpa) __va(gpa) + +static inline void * +boot_kvm_kernel_va_to_pa(void *virt_pnt, unsigned long kernel_base) +{ + unsigned long os_base; + + if ((e2k_addr_t)virt_pnt < KERNEL_BASE || + (e2k_addr_t)virt_pnt >= KERNEL_END) + return virt_pnt; + os_base = BOOT_KVM_READ_OSCUD_LO_REG_VALUE() & OSCUD_lo_base_mask; + if (os_base >= KERNEL_BASE) + return virt_pnt; + if (kernel_base == -1) + kernel_base = os_base; + return (void *)(kernel_base + ((e2k_addr_t)virt_pnt - KERNEL_BASE)); +} + +/* + * Guest kernel runs into virtual space, so functions can not be converted + * to virtual physical space (execute protection does not set) and + * should remain source virtual addresses + */ +static inline void * +boot_kvm_func_to_pa(void *virt_pnt) +{ + return boot_kvm_kernel_va_to_pa(virt_pnt, -1); +} + +static inline void * +boot_kvm_va_to_pa(void *virt_pnt) +{ + return boot_kvm_kernel_va_to_pa(virt_pnt, -1); +} + +/* + * KVM guest kernel booting on physical memory mapped + * to virtual space with GUEST_PAGE_OFFSET + * So it needs convert a virtual physical address to real physical. + */ +static inline e2k_addr_t +boot_kvm_vpa_to_pa(e2k_addr_t vpa) +{ + return (vpa >= GUEST_PAGE_OFFSET) ? kvm_pa((void *)vpa) : vpa; +} +static inline e2k_addr_t +boot_kvm_pa_to_vpa(e2k_addr_t pa) +{ + unsigned long os_base; + + os_base = BOOT_KVM_READ_OSCUD_LO_REG_VALUE() & OSCUD_lo_base_mask; + if (os_base >= GUEST_PAGE_OFFSET) + /* VPA is supported */ + return (e2k_addr_t)kvm_va((void *)pa); + else + /* nonpaging mode: all addresses can be only physical */ + return pa; +} +static inline e2k_addr_t +kvm_vpa_to_pa(e2k_addr_t vpa) +{ + return (vpa >= GUEST_PAGE_OFFSET) ? kvm_pa((void *)vpa) : vpa; +} +static inline e2k_addr_t +kvm_pa_to_vpa(e2k_addr_t pa) +{ + return (e2k_addr_t)kvm_va((void *)pa); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +static inline void * +boot_kernel_va_to_pa(void *virt_pnt, unsigned long kernel_base) +{ + return boot_kvm_kernel_va_to_pa(virt_pnt, kernel_base); +} +static inline void * +boot_func_to_pa(void *virt_pnt) +{ + return boot_kvm_func_to_pa(virt_pnt); +} + +static inline void * +boot_va_to_pa(void *virt_pnt) +{ + return boot_kvm_va_to_pa(virt_pnt); +} + +static inline e2k_addr_t +boot_vpa_to_pa(e2k_addr_t vpa) +{ + return boot_kvm_vpa_to_pa(vpa); +} +static inline e2k_addr_t +boot_pa_to_vpa(e2k_addr_t pa) +{ + return boot_kvm_pa_to_vpa(pa); +} + +static inline e2k_addr_t +vpa_to_pa(e2k_addr_t vpa) +{ + return kvm_vpa_to_pa(vpa); +} +static inline e2k_addr_t +pa_to_vpa(e2k_addr_t pa) +{ + return kvm_pa_to_vpa(pa); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASSEMBLY__ */ + +#endif /* !(_E2K_KVM_GUEST_V2P_H) */ diff --git a/arch/e2k/include/asm/kvm/guest/vga.h b/arch/e2k/include/asm/kvm/guest/vga.h new file mode 100644 index 0000000..061f38f --- /dev/null +++ b/arch/e2k/include/asm/kvm/guest/vga.h @@ -0,0 +1,52 @@ +#ifndef _E2K_KVM_GUEST_VGA_H_ +#define _E2K_KVM_GUEST_VGA_H_ + +#include + +/* + * VGA screen support + * VGA Video Memory emulated as part of common guest VCPUs virtual memory + */ +#define KVM_VGA_VRAM_PHYS_BASE GUEST_VCPU_VRAM_PHYS_BASE +#define KVM_VGA_VRAM_OFFSET VGA_VRAM_PHYS_BASE /* a0000 - c0000 */ +#define KVM_VGA_VRAM_START KVM_VGA_VRAM_OFFSET +#define KVM_VGA_VRAM_SIZE VGA_VRAM_SIZE +#define KVM_VGA_VRAM_END (KVM_VGA_VRAM_START + KVM_VGA_VRAM_SIZE) +#define KVM_VGA_VRAM_VIRT_TO_PHYS(addr) virt_to_phys(addr) +#define KVM_VGA_VRAM_PHYS_TO_VIRT(addr) phys_to_virt(addr) + +#define KVM_IS_PHYS_MEM_MAP_ADDR(addr) \ + ((addr) >= GUEST_PAGE_OFFSET && \ + (addr) < (GUEST_PAGE_OFFSET + MAX_PM_SIZE)) +#define KVM_IS_VGA_VRAM_PHYS_ADDR(addr) \ + ((addr) >= KVM_VGA_VRAM_START && (addr) < KVM_VGA_VRAM_END) +#define KVM_IS_VGA_VRAM_VIRT_ADDR(addr) \ + KVM_IS_VGA_VRAM_PHYS_ADDR(KVM_VGA_VRAM_VIRT_TO_PHYS(addr)) + +extern void kvm_scr_writew(u16 val, volatile u16 *addr); +extern u16 kvm_scr_readw(volatile const u16 *addr); +extern void kvm_vga_writeb(u8 val, volatile u8 *addr); +extern u8 kvm_vga_readb(volatile const u8 *addr); + +#ifdef CONFIG_KVM_GUEST_KERNEL +static inline void scr_writew(u16 val, volatile u16 *addr) +{ + kvm_scr_writew(val, addr); +} + +static inline u16 scr_readw(volatile const u16 *addr) +{ + return kvm_scr_readw(addr); +} +static inline void vga_writeb(u8 val, volatile u8 *addr) +{ + kvm_vga_writeb(val, addr); +} + +static inline u8 vga_readb(volatile const u8 *addr) +{ + return kvm_vga_readb(addr); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_KVM_GUEST_VGA_H_ */ diff --git a/arch/e2k/include/asm/kvm/head.h b/arch/e2k/include/asm/kvm/head.h new file mode 100644 index 0000000..61ef5ba --- /dev/null +++ b/arch/e2k/include/asm/kvm/head.h @@ -0,0 +1,111 @@ + +#ifndef _ASM_E2K_KVM_HEAD_H +#define _ASM_E2K_KVM_HEAD_H + +#include + +/* + * Kernel virtual memory layout + */ +#ifdef CONFIG_VIRTUALIZATION +/* + * it can be: + * paravirtualized host and guest kernel + * native host kernel with virtualization support + * pure guest kernel (not paravirtualized based on pv_ops) + */ + +/* 0x0000 e200 0000 0000 - 0x0000 e200 3fff ffff host image area + modules */ +#define HOST_KERNEL_IMAGE_AREA_BASE NATIVE_KERNEL_IMAGE_AREA_BASE +/* 0x0000 2e00 0000 0000 - 0x0000 2e00 3fff ffff shadow host image area + */ +/* modules at guest space */ +#define SHADOW_KERNEL_IMAGE_AREA_BASE 0x00002e0000000000 +#endif /* CONFIG_VIRTUALIZATION */ + +#if !defined(CONFIG_VIRTUALIZATION) +/* it is native kernel without any virtualization */ +#include + +#define E2K_KERNEL_IMAGE_AREA_BASE NATIVE_KERNEL_IMAGE_AREA_BASE +#elif !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native host kernel with virtualization support */ + +#define E2K_KERNEL_IMAGE_AREA_BASE HOST_KERNEL_IMAGE_AREA_BASE +#define GUEST_KERNEL_IMAGE_AREA_BASE SHADOW_KERNEL_IMAGE_AREA_BASE +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include + +#define E2K_KERNEL_IMAGE_AREA_BASE GUEST_KERNEL_IMAGE_AREA_BASE +#define GUEST_KERNEL_IMAGE_AREA_BASE SHADOW_KERNEL_IMAGE_AREA_BASE +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include + +#define E2K_KERNEL_IMAGE_AREA_BASE HOST_KERNEL_IMAGE_AREA_BASE +#define GUEST_KERNEL_IMAGE_AREA_BASE NATIVE_KERNEL_IMAGE_AREA_BASE +#else + #error "Unknown virtualization type" +#endif /* ! CONFIG_VIRTUALIZATION */ + +#ifdef CONFIG_VIRTUALIZATION + +#define HOST_KERNEL_PHYS_MEM_VIRT_BASE HOST_PAGE_OFFSET /* 0x0000c000 ... */ +#define GUEST_KERNEL_PHYS_MEM_VIRT_BASE GUEST_PAGE_OFFSET /* 0x00002000 ... */ +#define GUEST_IO_PORTS_VIRT_BASE 0x00003f7e7e000000UL + +#define GUEST_NBSR_BASE THE_NODE_NBSR_PHYS_BASE(0); + +/* + * Guest physical memory (RAM) is emulated as one or more host virtual + * contigous areas (gfn + GUEST_PAGE_OFFSET) + * + * Probably it should be different for different architecture release + * 0x0000 0000 0000 0000 - 0x0000 00ef ffff ffff now limited like this, + * 0x0000 2000 0000 0000 - 0x0000 20ff ffff ffff but limit can be decremented + */ + +#define GUEST_RAM_PHYS_BASE 0x0000000000000000UL +#define GUEST_MAX_RAM_SIZE 0x000000f000000000UL +#define GUEST_RAM_VIRT_BASE \ + (GUEST_RAM_PHYS_BASE + GUEST_PAGE_OFFSET) + +/* + * Virtual memory (VRAM) is used to emulate VCPUs (CPU, MMU, SIC, VIRQ VCPU, + * other hardware) registers, tables, structures. + * Now it should be of size to locate VCPU state and CUT of guest kernel + * + * WARNING: VRAM physical base should not intersects with real physical + * memory address space layout. Probably it should be different for different + * architecture + * 0x0000 00ff 0000 0000 - 0x0000 00ff 00ff ffff should not be valid for + * 0x0000 20ff 0000 0000 - 0x0000 20ff 00ff ffff real memory layout + */ + +/* Macros defines VRAM for one VCPU or VIRQ VCPU, */ +/* but VRAM should be created for all VCPU and VIRQ VCPU */ +#define GUEST_ONE_VCPU_VRAM_SIZE (4 *4096) /* 4 pages */ +#define GUEST_VCPU_VRAM_PHYS_BASE 0x000000ff00000000UL +#define GUEST_MAX_VCPU_VRAM_SIZE 0x0000000001000000UL +#define GUEST_VCPU_VRAM_VIRT_BASE \ + (GUEST_VCPU_VRAM_PHYS_BASE + GUEST_PAGE_OFFSET) + +/* + * Virtual IO memory (IO-VRAM) is used to emulate ISA, VGA low memory ... + * Now this memory is used only to emulate VGA low memory + * + * WARNING: IO-VRAM physical base should not intersects with real physical + * memory address space layout and VRAM. + * Probably it should be different for different architecture release + * 0x0000 00ff 0100 0000 - 0x0000 00ff 010f ffff should not be valid for + * 0x0000 20ff 0100 0000 - 0x0000 20ff 010f ffff real memory layout and VRAM + */ + +#define GUEST_IO_VRAM_PHYS_BASE 0x000000ff01000000UL +#define GUEST_IO_VRAM_SIZE 0x0000000000100000UL +#define GUEST_IO_VRAM_VIRT_BASE \ + (GUEST_IO_VRAM_PHYS_BASE + GUEST_PAGE_OFFSET) + +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* ! _ASM_E2K_KVM_HEAD_H */ diff --git a/arch/e2k/include/asm/kvm/host_printk.h b/arch/e2k/include/asm/kvm/host_printk.h new file mode 100644 index 0000000..92650a8 --- /dev/null +++ b/arch/e2k/include/asm/kvm/host_printk.h @@ -0,0 +1,28 @@ +/* + * Guest VM printk() on host support + * + * Copyright 2015 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_HOST_PRINTK_H +#define _E2K_KVM_HOST_PRINTK_H + +#include + +#define HOST_PRINTK_BUFFER_MAX 128 /* max size of buffer to print */ + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native host without any virtualization or */ +/* native kernel with virtualization support */ +#define host_printk(fmt, args...) printk(fmt, ##args) +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest (not virtualized based on pv_ops) */ +#include +#else + #error "Undefined type of virtualization" +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_HOST_PRINTK_H */ diff --git a/arch/e2k/include/asm/kvm/hvc-console.h b/arch/e2k/include/asm/kvm/hvc-console.h new file mode 100644 index 0000000..299410b --- /dev/null +++ b/arch/e2k/include/asm/kvm/hvc-console.h @@ -0,0 +1,27 @@ +#ifndef L_HVC_CONSOLE_H +#define L_HVC_CONSOLE_H + +#include + +#ifdef CONFIG_EARLY_VIRTIO_CONSOLE +extern int boot_hvc_l_cons_init(unsigned long console_base); +extern void boot_hvc_l_raw_putc(unsigned char c); +extern bool early_virtio_cons_enabled; +#define boot_early_virtio_cons_enabled \ + boot_get_vo_value(early_virtio_cons_enabled) +#else /* !CONFIG_EARLY_VIRTIO_CONSOLE */ +#define early_virtio_cons_enabled false +#define boot_early_virtio_cons_enabled false +#endif /* CONFIG_EARLY_VIRTIO_CONSOLE */ + +#ifdef CONFIG_HVC_L +extern __init struct console *hvc_l_early_cons_init(int idx); +extern void hvc_l_raw_putc(unsigned char c); +#else /* !CONFIG_HVC_L */ +static inline struct console *hvc_l_early_cons_init(int idx) +{ + return NULL; +} +#endif /* CONFIG_HVC_L */ + +#endif /* L_HVC_CONSOLE_H */ diff --git a/arch/e2k/include/asm/kvm/hypercall.h b/arch/e2k/include/asm/kvm/hypercall.h new file mode 100644 index 0000000..c6e167c --- /dev/null +++ b/arch/e2k/include/asm/kvm/hypercall.h @@ -0,0 +1,1473 @@ +/****************************************************************************** + * hypercall.h + * + * KVM host <-> guest Linux-specific hypervisor handling. + * + * Copyright (c) 2011-2012, Salavat Gilyazov + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _ASM_E2K_HYPERCALL_H +#define _ASM_E2K_HYPERCALL_H + +#include +#include + +#include +#include +#include +#include +#include + +#ifdef CONFIG_KVM_GUEST_HW_HCALL +extern unsigned long light_hw_hypercall(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5, unsigned long arg6); +extern unsigned long generic_hw_hypercall(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5, unsigned long arg6, + unsigned long arg7); +#endif /* CONFIG_KVM_GUEST_HW_HCALL */ + +static inline unsigned long light_hypercall(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5, unsigned long arg6) +{ + unsigned long ret; + +#ifdef CONFIG_KVM_GUEST_HW_HCALL +# ifdef CONFIG_KVM_GUEST_KERNEL + if (kvm_vcpu_host_support_hw_hc()) +# endif /* CONFIG_KVM_GUEST_KERNEL */ + return light_hw_hypercall(nr, arg1, arg2, arg3, + arg4, arg5, arg6); +#endif /* CONFIG_KVM_GUEST_HW_HCALL */ + + ret = E2K_SYSCALL(LIGHT_HYPERCALL_TRAPNUM, nr, 6, + arg1, arg2, arg3, arg4, arg5, arg6); + + return ret; +} +static inline unsigned long light_hypercall0(unsigned long nr) +{ + return light_hypercall(nr, 0, 0, 0, 0, 0, 0); +} + +static inline unsigned long light_hypercall1(unsigned long nr, + unsigned long arg1) +{ + return light_hypercall(nr, arg1, 0, 0, 0, 0, 0); +} + +static inline unsigned long light_hypercall2(unsigned long nr, + unsigned long arg1, unsigned long arg2) +{ + return light_hypercall(nr, arg1, arg2, 0, 0, 0, 0); +} + +static inline unsigned long light_hypercall3(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3) +{ + return light_hypercall(nr, arg1, arg2, arg3, 0, 0, 0); +} + +static inline unsigned long light_hypercall4(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4) +{ + return light_hypercall(nr, arg1, arg2, arg3, arg4, 0, 0); +} + +static inline unsigned long light_hypercall5(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5) +{ + return light_hypercall(nr, arg1, arg2, arg3, arg4, arg5, 0); +} + +static inline unsigned long light_hypercall6(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5, unsigned long arg6) +{ + return light_hypercall(nr, arg1, arg2, arg3, arg4, arg5, arg6); +} + +static inline unsigned long generic_hypercall(unsigned long nr, + unsigned long arg1, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, unsigned long arg6, + unsigned long arg7) +{ + unsigned long ret; + +#ifdef CONFIG_KVM_GUEST_HW_HCALL +# ifdef CONFIG_KVM_GUEST_KERNEL + if (kvm_vcpu_host_support_hw_hc()) +# endif /* CONFIG_KVM_GUEST_KERNEL */ + return generic_hw_hypercall(nr, + arg1, arg2, arg3, arg4, arg5, arg6, arg7); +#endif /* CONFIG_KVM_GUEST_HW_HCALL */ + + ret = E2K_SYSCALL(GENERIC_HYPERCALL_TRAPNUM, nr, 7, + arg1, arg2, arg3, arg4, arg5, arg6, arg7); + return ret; +} +static inline unsigned long generic_hypercall0(unsigned long nr) +{ + return generic_hypercall(nr, 0, 0, 0, 0, 0, 0, 0); +} + +static inline unsigned long generic_hypercall1(unsigned long nr, + unsigned long arg1) +{ + return generic_hypercall(nr, arg1, 0, 0, 0, 0, 0, 0); +} + +static inline unsigned long generic_hypercall2(unsigned long nr, + unsigned long arg1, unsigned long arg2) +{ + return generic_hypercall(nr, arg1, arg2, 0, 0, 0, 0, 0); +} + +static inline unsigned long generic_hypercall3(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3) +{ + return generic_hypercall(nr, arg1, arg2, arg3, 0, 0, 0, 0); +} + +static inline unsigned long generic_hypercall4(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4) +{ + return generic_hypercall(nr, arg1, arg2, arg3, arg4, 0, 0, 0); +} + +static inline unsigned long generic_hypercall5(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5) +{ + return generic_hypercall(nr, arg1, arg2, arg3, arg4, arg5, 0, 0); +} + +static inline unsigned long generic_hypercall6(unsigned long nr, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5, unsigned long arg6) +{ + return generic_hypercall(nr, arg1, arg2, arg3, arg4, arg5, arg6, 0); +} + +/* + * KVM hypervisor (host) <-> guest lite hypercalls list + */ + +#define KVM_HCALL_COPY_STACKS_TO_MEMORY 1 /* copy (flush) hardware */ + /* stacks to memory */ +#define KVM_HCALL_SWITCH_GUEST_THREAD_STACKS 2 /* switch guest kernel thread */ + /* stacks from current to new */ + /* specified by GPID number */ +#define KVM_HCALL_GET_ACTIVE_CR_MEM_ITEM 3 /* get current active */ + /* procedure chain stack item */ +#define KVM_HCALL_PUT_ACTIVE_CR_MEM_ITEM 4 /* put current active */ + /* procedure chain stack item */ +#define KVM_HCALL_GET_HOST_RUNSTATE_KTIME 5 /* get host kernel current */ + /* time at terms of runstate */ + /* time (cycles) */ +#define KVM_HCALL_GET_GUEST_RUNNING_TIME 6 /* get running time of guest */ + /* VCPU at cycles */ +#define KVM_HCALL_GET_VCPU_START_THREAD 8 /* register on host the guest */ + /* kernel VCPU booting thread */ +#define KVM_HCALL_UPDATE_PSP_HI 10 /* write updated value to */ + /* PSP_hi register */ +#define KVM_HCALL_UPDATE_PCSP_HI 11 /* write updated value to */ + /* PCSP_hi register */ +#define KVM_HCALL_SETUP_IDLE_TASK 12 /* setup current task of */ + /* guest as task */ +#define KVM_HCALL_MOVE_TAGGED_DATA 15 /* move quad value from to */ +#define KVM_HCALL_UNFREEZE_TRAPS 16 /* unfreeze TIRs & trap */ + /* cellar */ +#define KVM_HCALL_SWITCH_TO_INIT_MM 17 /* deactivate current guest */ + /* mm, switch to init mm */ +#define KVM_HCALL_EXTRACT_TAGS_32 19 /* extract tags from 32 bytes */ + /* of data */ +#define KVM_HCALL_INJECT_INTERRUPT 20 /* inject interrupt to handle */ + /* pending VIRQs by guest */ +#define KVM_HCALL_VIRQ_HANDLED 21 /* info host about all VIRQs */ + /* were handled by guest */ +#define KVM_HCALL_TEST_PENDING_VIRQ 22 /* test pending VIRQs on VCPU */ +#define KVM_HCALL_READ_DTLB_REG 23 /* read DTLB entry for */ + /* virtual address */ +#define KVM_HCALL_GET_DAM 24 /* get current DAM state */ +#define KVM_HCALL_FLUSH_DCACHE_LINE 25 /* flush DCACHE line */ +#define KVM_HCALL_CLEAR_DCACHE_L1_SET 26 /* clear DCACHE L1 set */ +#define KVM_HCALL_FLUSH_DCACHE_RANGE 27 /* flush DCACHE range of */ + /* virtual addresses */ +#define KVM_HCALL_CLEAR_DCACHE_L1_RANGE 28 /* flush DCACHE L1 range of */ + /* virtual addresses */ +#define KVM_HCALL_MMU_PROBE 29 /* probe MMU entry or */ + /* address */ +#define KVM_HCALL_FLUSH_ICACHE_RANGE 30 /* flush ICACHE range */ +/* notify host kernel aboout switch to updated procedure stack on guest */ +#define KVM_HCALL_SWITCH_TO_EXPANDED_PROC_STACK 31 +/* notify host kernel aboout switch to updated procedure chain stack on guest */ +#define KVM_HCALL_SWITCH_TO_EXPANDED_CHAIN_STACK 32 + +typedef struct kvm_hw_stacks_flush { + unsigned long psp_lo; + unsigned long psp_hi; + unsigned long pcsp_lo; + unsigned long pcsp_hi; +} kvm_hw_stacks_flush_t; + +static inline unsigned long +HYPERVISOR_copy_stacks_to_memory(void) +{ + return light_hypercall1(KVM_HCALL_COPY_STACKS_TO_MEMORY, + (unsigned long)NULL); +} + +static inline unsigned long +HYPERVISOR_flush_hw_stacks_to_memory(kvm_hw_stacks_flush_t __user *hw_stacks) +{ + return light_hypercall1(KVM_HCALL_COPY_STACKS_TO_MEMORY, + (unsigned long)hw_stacks); +} + +static inline unsigned long +HYPERVISOR_switch_guest_thread_stacks(unsigned long gpid_nr, + unsigned long gmmid_nr) +{ + return light_hypercall2(KVM_HCALL_SWITCH_GUEST_THREAD_STACKS, + gpid_nr, gmmid_nr); +} + +static inline unsigned long +HYPERVISOR_get_active_cr_mem_item(unsigned long __user *cr_value, + e2k_addr_t base, e2k_addr_t cr_ind, + e2k_addr_t cr_item) +{ + return light_hypercall4(KVM_HCALL_GET_ACTIVE_CR_MEM_ITEM, + (unsigned long)cr_value, + base, cr_ind, cr_item); +} +static inline unsigned long +HYPERVISOR_put_active_cr_mem_item(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind, + e2k_addr_t cr_item) +{ + return light_hypercall4(KVM_HCALL_PUT_ACTIVE_CR_MEM_ITEM, + cr_value, base, cr_ind, cr_item); +} + +static inline unsigned long +HYPERVISOR_get_host_runstate_ktime(void) +{ + return light_hypercall0(KVM_HCALL_GET_HOST_RUNSTATE_KTIME); +} +static inline unsigned long +HYPERVISOR_get_guest_running_time(void) +{ + return light_hypercall0(KVM_HCALL_GET_GUEST_RUNNING_TIME); +} + +static inline unsigned long +HYPERVISOR_get_vcpu_start_thread(void) +{ + return light_hypercall0(KVM_HCALL_GET_VCPU_START_THREAD); +} + +static inline unsigned long +HYPERVISOR_update_psp_hi(unsigned long psp_hi_value) +{ + return light_hypercall1(KVM_HCALL_UPDATE_PSP_HI, psp_hi_value); +} + +static inline unsigned long +HYPERVISOR_update_pcsp_hi(unsigned long pcsp_hi_value) +{ + return light_hypercall1(KVM_HCALL_UPDATE_PCSP_HI, pcsp_hi_value); +} + +static inline unsigned long +HYPERVISOR_setup_idle_task(int cpu) +{ + return light_hypercall1(KVM_HCALL_SETUP_IDLE_TASK, (unsigned long)cpu); +} + +static inline unsigned long +HYPERVISOR_unfreeze_guest_traps(void) +{ + return light_hypercall0(KVM_HCALL_UNFREEZE_TRAPS); +} + +static inline unsigned long +HYPERVISOR_switch_to_guest_init_mm(void) +{ + return light_hypercall0(KVM_HCALL_SWITCH_TO_INIT_MM); +} + +union recovery_faulted_arg { + struct { + char vr; + char chan; + char qp; + char atomic; + u16 tag; + u16 tag_ext; + }; + u64 entire; +}; +static inline unsigned long +HYPERVISOR_move_tagged_data(int word_size, + e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + return light_hypercall3(KVM_HCALL_MOVE_TAGGED_DATA, + word_size, addr_from, addr_to); +} +static inline unsigned long +HYPERVISOR_extract_tags_32(u16 *dst, const void *src) +{ + return light_hypercall2(KVM_HCALL_EXTRACT_TAGS_32, + (unsigned long)dst, (unsigned long)src); +} + +static inline unsigned long +HYPERVISOR_inject_interrupt(void) +{ + return light_hypercall0(KVM_HCALL_INJECT_INTERRUPT); +} +static inline unsigned long +HYPERVISOR_virqs_handled(void) +{ + return light_hypercall0(KVM_HCALL_VIRQ_HANDLED); +} + +static inline unsigned long +HYPERVISOR_test_pending_virqs(void) +{ + return light_hypercall0(KVM_HCALL_TEST_PENDING_VIRQ); +} +static inline unsigned long +HYPERVISOR_read_dtlb_reg(e2k_addr_t virt_addr) +{ + return light_hypercall1(KVM_HCALL_READ_DTLB_REG, virt_addr); +} +static inline unsigned long +HYPERVISOR_get_DAM(unsigned long long *dam, int dam_entries) +{ + return light_hypercall2(KVM_HCALL_GET_DAM, (unsigned long)dam, + dam_entries); +} +static inline unsigned long +HYPERVISOR_flush_dcache_line(e2k_addr_t virt_addr) +{ + return light_hypercall1(KVM_HCALL_FLUSH_DCACHE_LINE, virt_addr); +} +static inline unsigned long +HYPERVISOR_clear_dcache_l1_set(e2k_addr_t virt_addr, unsigned long set) +{ + return light_hypercall2(KVM_HCALL_CLEAR_DCACHE_L1_SET, virt_addr, set); +} +static inline unsigned long +HYPERVISOR_flush_dcache_range(void *addr, size_t len) +{ + return light_hypercall2(KVM_HCALL_FLUSH_DCACHE_RANGE, + (unsigned long)addr, len); +} +static inline unsigned long +HYPERVISOR_clear_dcache_l1_range(void *addr, size_t len) +{ + return light_hypercall2(KVM_HCALL_CLEAR_DCACHE_L1_RANGE, + (unsigned long)addr, len); +} +static inline unsigned long +HYPERVISOR_flush_icache_range(e2k_addr_t start, e2k_addr_t end, u64 dummy) +{ + return light_hypercall3(KVM_HCALL_FLUSH_ICACHE_RANGE, + start, end, dummy); +} + +typedef enum kvm_mmu_probe { + KVM_MMU_PROBE_ENTRY, + KVM_MMU_PROBE_ADDRESS, +} kvm_mmu_probe_t; + +static inline unsigned long +HYPERVISOR_mmu_probe(e2k_addr_t virt_address, kvm_mmu_probe_t what) +{ + return light_hypercall2(KVM_HCALL_MMU_PROBE, virt_address, what); +} + +static inline long +HYPERVISOR_switch_to_expanded_guest_proc_stack(long delta_size, + long delta_offset, bool decr_gk_ps) +{ + return light_hypercall3(KVM_HCALL_SWITCH_TO_EXPANDED_PROC_STACK, + delta_size, delta_offset, (unsigned long)decr_gk_ps); +} +static inline long +HYPERVISOR_switch_to_expanded_guest_chain_stack(long delta_size, + long delta_offset, bool decr_gk_pcs) +{ + return light_hypercall3(KVM_HCALL_SWITCH_TO_EXPANDED_CHAIN_STACK, + delta_size, delta_offset, (unsigned long)decr_gk_pcs); +} + +/* + * KVM hypervisor (host) <-> guest generic hypercalls list + */ + +#define KVM_HCALL_PV_WAIT 1 /* suspend current vcpu until */ + /* it will be woken up by */ + /* call KVM_HCALL_PV_KICK */ +#define KVM_HCALL_PV_KICK 2 /* wake up vcpu suspended by */ + /* call KVM_HCALL_PV_WAIT */ +#define KVM_HCALL_RELEASE_TASK_STRUCT 8 /* release task struct on */ + /* host after task completion */ +#define KVM_HCALL_SET_CLOCKEVENT 10 /* set clock event */ +#define KVM_HCALL_COMPLETE_LONG_JUMP 12 /* long jump completion */ +#define KVM_HCALL_LAUNCH_SIG_HANDLER 14 /* launch guest user signal */ + /* handler */ +#define KVM_HCALL_SWITCH_TO_VIRT_MODE 16 /* switch from physical to */ + /* virtual addresses mode */ + /* (enable paging, TLB, TLU) */ +#define KVM_HCALL_APPLY_PSP_BOUNDS 17 /* update pending procedure */ + /* stack pointers after stack */ + /* bounds handling */ +#define KVM_HCALL_APPLY_PCSP_BOUNDS 18 /* update pending chain */ + /* stack pointers after stack */ + /* bounds handling */ +#define KVM_HCALL_CORRECT_TRAP_RETURN_IP 19 /* correct IP to return from */ + /* guest trap */ +#define KVM_HCALL_SWITCH_GUEST_KERNEL_STACKS 21 /* switch guest kernel thread */ + /* to new stacks */ +#define KVM_HCALL_COPY_GUEST_KERNEL_STACKS 22 /* copy guest kernel stacks */ + /* for kernel thread creation */ +#define KVM_HCALL_SWITCH_TO_GUEST_NEW_USER 28 /* end of execve() for guest */ + /* switch to new user stacks */ + /* start from user entry */ + /* point */ +#define KVM_HCALL_CLONE_GUEST_USER_STACKS 29 /* end of clone_user_stacks() */ + /* create new process thread */ + /* info agent on host and */ + /* register guest kernel */ + /* user local data stack */ +#define KVM_HCALL_COPY_GUEST_USER_STACKS 30 /* end of clone_user_stacks() */ + /* create new process thread */ + /* info agent on host and */ + /* register guest kernel */ + /* user local data stack */ +#define KVM_HCALL_UPDATE_HW_STACKS_FRAMES 36 /* update hardware stacks */ + /* frames */ +#define KVM_HCALL_PATCH_GUEST_DATA_AND_CHAIN_STACKS 38 + /* patch guest kernel data */ + /* chain stacks */ +#define KVM_HCALL_COPY_HW_STACKS_FRAMES 39 /* copy guest hardware stacks */ + /* (user<->kernel) on guest */ + /* addresses */ +#define KVM_HCALL_GET_GUEST_GLOB_REGS 40 /* get current state of */ + /* global registers, except */ + /* someones */ +#define KVM_HCALL_SET_GUEST_GLOB_REGS 41 /* set current state of */ + /* global registers, except */ + /* someones */ +#define KVM_HCALL_GET_GUEST_LOCAL_GLOB_REGS 42 /* get current state of */ + /* local global registers */ +#define KVM_HCALL_SET_GUEST_LOCAL_GLOB_REGS 43 /* set current state of */ + /* local global registers */ +#define KVM_HCALL_GET_ALL_GUEST_GLOB_REGS 44 /* get current state of all */ + /* guest user global regs */ +#define KVM_HCALL_BOOT_SPIN_LOCK_SLOW 47 /* slowpath of guest boot */ + /* time spinlock locking */ +#define KVM_HCALL_BOOT_SPIN_LOCKED_SLOW 48 /* slowpath of guest boot */ + /* time spinlock locking */ + /* is taken */ +#define KVM_HCALL_BOOT_SPIN_UNLOCK_SLOW 49 /* slowpath of guest boot */ + /* time spinlock unlocking */ +#define KVM_HCALL_GUEST_SPIN_LOCK_SLOW 50 /* slowpath of guest spinlock */ + /* locking */ +#define KVM_HCALL_GUEST_SPIN_LOCKED_SLOW 51 /* slowpath of guest spinlock */ + /* locking is taken */ +#define KVM_HCALL_GUEST_SPIN_UNLOCK_SLOW 52 /* slowpath of guest spinlock */ + /* unlocking */ +#define KVM_HCALL_GUEST_CSD_LOCK_CTL 53 /* serialize access to */ + /* per-cpu csd resources */ + /* wait and wake up on host */ +#define KVM_HCALL_GUEST_MM_DROP 63 /* drop host kernel agent */ + /* for the guest process mm */ +#define KVM_HCALL_ACTIVATE_GUEST_MM 64 /* activate host agent */ + /* for the new guest mm */ +#define KVM_HCALL_PT_ATOMIC_UPDATE 68 /* atomicaly update PT items */ +#define KVM_HCALL_SWITCH_GUEST_MM 78 /* switch host agent to */ + /* the next guest mm */ +#define KVM_HCALL_VCPU_MMU_STATE 79 /* common hcall to control */ + /* guest MMU state */ +#define KVM_HCALL_GUEST_IOPORT_REQ 80 /* guest kernel IO ports */ + /* in/out request */ +#define KVM_HCALL_GUEST_IOPORT_STRING_REQ 81 /* guest kernel IO ports */ + /* in/out string request */ +#define KVM_HCALL_GUEST_MMIO_REQ 82 /* guest kernel MMIO */ + /* read/write request */ +#define KVM_HCALL_CONSOLE_IO 83 /* raw write/read to/from */ + /* guest task console */ +#define KVM_HCALL_NOTIFY_IO 84 /* notify host on IO request */ + /* completion or starting */ +#define KVM_HCALL_GUEST_INTR_HANDLER 85 /* create guest interrupt */ + /* handler */ +#define KVM_HCALL_GUEST_INTR_THREAD 86 /* create guest interrupt */ + /* handler thread */ +#define KVM_HCALL_GUEST_FREE_INTR_HANDLER 87 /* stop guest interrupt */ + /* hansler thread */ +#define KVM_HCALL_WAIT_FOR_VIRQ 88 /* wait for the VIRQ */ +#define KVM_HCALL_GET_GUEST_DIRECT_VIRQ 90 /* register direct VIRQ */ +#define KVM_HCALL_FREE_GUEST_DIRECT_VIRQ 91 /* free direct VIRQ */ +#define KVM_HCALL_GUEST_VCPU_COMMON_IDLE 95 /* guest CPU on idle */ + /* wait for some events on */ + /* guest kernel */ +#define KVM_HCALL_GUEST_VCPU_RELAX 96 /* guest VCPU is waiting for */ + /* some event and relax real */ + /* CPU to schedule other */ + /* guest VCPU */ +#define KVM_HCALL_ACTIVATE_GUEST_VCPU 97 /* activate the VCPU, which */ + /* is waiting on idle mode */ +#define KVM_HCALL_ACTIVATE_GUEST_ALL_VCPUS 98 /* activate all VCPUs, which */ + /* is waiting on idle mode */ +#define KVM_HCALL_RECOVERY_FAULTED_TAGGED_GUEST_STORE 110 + /* recovery faulted store */ + /* tagged value operations */ +#define KVM_HCALL_RECOVERY_FAULTED_GUEST_LOAD 111 + /* recovery faulted load */ + /* value and tag */ +#define KVM_HCALL_RECOVERY_FAULTED_GUEST_MOVE 112 + /* recovery faulted move */ + /* value and tag to register */ + /* into procedure stack */ +#define KVM_HCALL_RECOVERY_FAULTED_LOAD_TO_GUEST_GREG 113 + /* recovery faulted load */ + /* value and tag to global */ + /* register */ +#define KVM_HCALL_MOVE_TAGGED_GUEST_DATA 114 /* move data value from to */ +#define KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_COPY 115 + /* fast tagged memory copy */ +#define KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_SET 116 + /* fast tagged memory set */ +#define KVM_HCALL_FAST_TAGGED_MEMORY_COPY 117 /* fast tagged memory copy */ +#define KVM_HCALL_FAST_TAGGED_MEMORY_SET 118 /* fast tagged memory set */ +#define KVM_HCALL_SHUTDOWN 120 /* shutdown of guest */ +#define KVM_HCALL_DUMP_GUEST_STACK 121 /* dump guest current stack */ +#define KVM_HCALL_FTRACE_STOP 122 /* stop host's ftrace */ +#define KVM_HCALL_FTRACE_DUMP 123 /* dump host's ftrace buffer */ +#define KVM_HCALL_DUMP_COMPLETION 125 /* show state or dump all */ + /* stacks is completed */ + +#define KVM_HCALL_HOST_PRINTK 130 /* guest printk() on host */ +#define KVM_HCALL_PRINT_GUEST_KERNEL_PTES 131 /* dump guest kernel address */ + /* page table entries */ +#define KVM_HCALL_PRINT_GUEST_USER_ADDRESS_PTES 132 /* dump guest user */ + /* address ptes on host */ +#ifdef CONFIG_KVM_ASYNC_PF +#define KVM_HCALL_PV_ENABLE_ASYNC_PF 133 /* enable async pf */ + /* on current vcpu */ +#endif /* CONFIG_KVM_ASYNC_PF */ +#define KVM_HCALL_FLUSH_TLB_RANGE 134 /* flush given address */ + /* range in tlb */ +#define KVM_HCALL_RECOVERY_FAULTED_TAGGED_STORE 141 + /* recovery faulted store */ + /* tagged value operations */ +#define KVM_HCALL_RECOVERY_FAULTED_LOAD 142 /* recovery faulted load */ + /* value and tag */ +#define KVM_HCALL_RECOVERY_FAULTED_MOVE 143 + /* recovery faulted move */ + /* value and tag to register */ + /* into procedure stack */ +#define KVM_HCALL_RECOVERY_FAULTED_LOAD_TO_GREG 144 + /* recovery faulted load */ + /* value and tag to global */ + /* register */ + + +/* + * KVM hypervisor (host) <-> guest hypercalls structures + */ + +/* process and threads management */ +typedef struct kvm_task_info { + unsigned long flags; /* various task flags see header */ + /* asm/processor.h */ + unsigned long us_base; /* local data (user) stack base */ + unsigned long us_size; /* local data (user) stack size */ + unsigned long sp_offset; /* offset of local data stack pointer */ + /* SP from stack base */ + unsigned long ps_base; /* Procedure stack: base, */ + unsigned long ps_size; /* Procedure stack current: size, */ + unsigned long ps_ind; /* index */ + +/* should be unsed { */ + unsigned long ps_offset; /* current base offset, */ + unsigned long ps_top; /* current top, */ + unsigned long us_ps_size; /* user part of stack total size */ + unsigned long init_ps_size; /* and initial size. */ + unsigned long g_ps_size; /* guest kernel part of stack size, */ + unsigned long ps_reserved; /* current reserved size of guest */ + /* kernel */ +/* } should be unsed */ + + unsigned long pcs_base; /* Procedure chain stack: base, */ + unsigned long pcs_size; /* Chain stack current: size, */ + unsigned long pcs_ind; /* index */ + +/* should be unsed { */ + unsigned long pcs_offset; /* current base offset, */ + unsigned long pcs_top; /* current top, */ + unsigned long us_pcs_size; /* user part of stack total size */ + unsigned long init_pcs_size; /* and initial size. */ + unsigned long g_pcs_size; /* guest kernel part of stack size, */ + unsigned long pcs_reserved; /* current reserved size of guest */ + /* kernel part */ +/* } should be unsed */ + + /* new: guest user stacks */ + unsigned long u_us_base; /* local data (user) stack base */ + unsigned long u_us_size; /* local data (user) stack size */ + unsigned long u_sp_offset; /* offset of local data stack pointer */ + /* SP from stack base */ + unsigned long u_ps_base; /* Procedure stack: base, */ + unsigned long u_ps_size; /* Procedure stack current: size, */ + unsigned long u_ps_ind; /* index */ + unsigned long u_pcs_base; /* Procedure chain stack: base, */ + unsigned long u_pcs_size; /* Chain stack current: size, */ + unsigned long u_pcs_ind; /* index */ + + unsigned long cr0_lo; /* Chain iregister #0 lo */ + unsigned long cr0_hi; /* Chain iregister #0 hi */ + unsigned long cr1_wd; /* Chain register which contains wbs */ + unsigned long cr1_ussz; /* Chain register which contains */ + /* user stack size */ + unsigned long cud_base; /* OSCUD: base */ + unsigned long cud_size; /* and size */ + unsigned long gd_base; /* OSGD: base */ + unsigned long gd_size; /* and size */ + unsigned long cut_base; /* CUTD: base */ + unsigned long cut_size; /* and size */ + unsigned int cui; /* compilation unit index of code */ + unsigned long entry_point; /* entry point (address) of task */ + unsigned long tls; /* TLS of new user thread */ +} kvm_task_info_t; + +/* hardware stack extention, update and change */ +typedef struct kvm_hw_stack { + unsigned long flags; /* various task flags see header */ + /* asm/processor.h */ + unsigned long base; /* Procedure stack: base, */ + unsigned long offset; /* current base offset, */ + unsigned long top; /* current top, */ + unsigned long us_size; /* user part of stack total size */ + unsigned long size; /* Procedure stack current: size, */ + unsigned long delta_ind; /* delta of index */ + unsigned long reserved; /* current reserved size of guest */ + /* kernel */ +} kvm_hw_stack_t; + +/* signal and signal handler management */ +typedef struct kvm_sig_info { + int sig; /* signal # */ + unsigned int protected; /* task is protected */ + void *siginfo; /* siginfo structure pointer */ + void *ucontext; /* ucontext structure pointer */ + int si_size; /* size of siginfo structure */ + int uc_size; /* size of ucontext structure */ + unsigned long handler; /* user handler function entry point */ + unsigned long sbr; /* user data stack */ + unsigned long usd_lo; /* to handle */ + unsigned long usd_hi; /* the signal */ +} kvm_sig_info_t; + +typedef struct kvm_stacks_info { + unsigned long top; /* top address (same as SBR pointer) */ + unsigned long usd_lo; /* curent state of stack pointer */ + unsigned long usd_hi; /* register: base & size */ + unsigned long psp_lo; /* Procedure stack pointer: */ + unsigned long psp_hi; /* base & index & size */ + unsigned long pshtp; + unsigned long pcsp_lo; /* Procedure chain stack */ + unsigned long pcsp_hi; /* pointer: base & index & size */ + unsigned pcshtp; + unsigned long cr0_lo; + unsigned long cr0_hi; + unsigned long cr1_lo; + unsigned long cr1_hi; +} kvm_stacks_info_t; +typedef kvm_stacks_info_t kvm_long_jump_info_t; + +/* guest kernel local data stack pointers update */ +typedef struct kvm_data_stack_info { + bool protected; /* protected stack flag */ + unsigned long top; /* top of the stack (register SBR) */ + unsigned long usd_base; /* USD pointer: base */ + unsigned long usd_size; /* size */ + unsigned long usd_ind; /* index (only for protected stack) */ +} kvm_data_stack_info_t; + +/* patch of fields od chain stack registers to update */ +typedef struct kvm_pcs_patch_info { + unsigned int ind; /* index of frame in bytes and */ + /* relative PCS base: */ + /* PCS_base + PCS_offset + PCSP.ind */ + unsigned int update_flags; /* flags of updated fields */ + /* see below */ + unsigned long IP; /* new IP [63:0] */ + unsigned int usd_size; /* data stack size in bytes */ + /* to calculate ussz fields */ + unsigned short wbs; /* quad registers number */ + unsigned short wpsz; /* quad registers number */ +} kvm_pcs_patch_info_t; + +/* chain stack registers updating flags (see structure above) */ +#define KVM_PCS_IP_UPDATE_FLAG 0x00000001 +#define KVM_PCS_USSZ_UPDATE_FLAG 0x00000100 +#define KVM_PCS_WBS_UPDATE_FLAG 0x00001000 +#define KVM_PCS_WPSZ_UPDATE_FLAG 0x00002000 + +#define KVM_MAX_PCS_FRAME_NUM_TO_PATCH 4 /* max number of chain stack */ + /* frames to can update */ + /* at once */ + +/* hardware stacks updating interface */ +#define KVM_MAX_PS_FRAME_NUM_TO_UPDATE 2 /* max number of procedure */ + /* stack frame to can update */ + /* one frame is 2 double-word */ + /* registers with extentions */ +#define KVM_MAX_PS_FRAME_SIZE_TO_UPDATE \ + (KVM_MAX_PS_FRAME_NUM_TO_UPDATE * EXT_4_NR_SZ) + +/* + * Common hypercal to get/set/control guest MMU state + */ + +/* flags of operations on guest MMU */ +#define INIT_STATE_GMMU_OPC 0x00000001UL +#define SET_OS_VAB_GMMU_OPC 0x00000010UL +#define CREATE_NEW_GMM_GMMU_OPC 0x00000100UL + +typedef struct vcpu_gmmu_info { + unsigned long opcode; /* operations on guest MMU */ + /* (see above) */ + bool sep_virt_space; /* guest use separate PTs for */ + /* OS and user virtual spaces */ + bool pt_v6; /* guest PTs are of v6 format */ + unsigned long mmu_cr; /* MMU control register */ + unsigned long pid; /* MMU PID (context) register */ + unsigned long trap_cellar; /* MMU trap cellar base */ + unsigned long u_pptb; /* physical base of user (for */ + /* separate PT mode) or united PT */ + unsigned long u_vptb; /* virtual base of user (for */ + /* separate PT mode) or united PT */ + unsigned long os_pptb; /* physical base of kernel PT */ + /* (only for separate PT mode) */ + unsigned long os_vptb; /* virtual base of kernel PT */ + /* (only for separate PT mode) */ + unsigned long os_vab; /* offset of kernel virtual space */ + /* into common virtual addresses */ + /* range */ + unsigned long gmmid_nr; /* return to guest: ID (#) of host */ + /* gmm struct created for new guest */ + /* mm struct */ +} vcpu_gmmu_info_t; + +static inline void HYPERVISOR_pv_wait(void) +{ + generic_hypercall0(KVM_HCALL_PV_WAIT); +} + +static inline void HYPERVISOR_pv_kick(int cpu) +{ + generic_hypercall1(KVM_HCALL_PV_KICK, cpu); +} + +static inline unsigned long +HYPERVISOR_release_task_struct(int gpid_nr) +{ + return generic_hypercall1(KVM_HCALL_RELEASE_TASK_STRUCT, (long)gpid_nr); +} + +static inline unsigned long +HYPERVISOR_set_clockevent(unsigned long delta) +{ + return generic_hypercall1(KVM_HCALL_SET_CLOCKEVENT, delta); +} + +static inline unsigned long +HYPERVISOR_complete_long_jump(kvm_long_jump_info_t *regs_state) +{ + return generic_hypercall1(KVM_HCALL_COMPLETE_LONG_JUMP, + (unsigned long)regs_state); +} + +static inline unsigned long +HYPERVISOR_launch_sig_handler(kvm_stacks_info_t *regs_state, long sys_rval) +{ + return generic_hypercall2(KVM_HCALL_LAUNCH_SIG_HANDLER, + (unsigned long)regs_state, sys_rval); +} + +static inline unsigned long +HYPERVISOR_apply_psp_bounds(unsigned long base, unsigned long size, + unsigned long start, unsigned long end, unsigned long delta) +{ + return generic_hypercall5(KVM_HCALL_APPLY_PSP_BOUNDS, + base, size, start, end, delta); +} + +static inline unsigned long +HYPERVISOR_apply_pcsp_bounds(unsigned long base, unsigned long size, + unsigned long start, unsigned long end, unsigned long delta) +{ + return generic_hypercall5(KVM_HCALL_APPLY_PCSP_BOUNDS, + base, size, start, end, delta); +} +static inline unsigned long +HYPERVISOR_correct_trap_return_ip(unsigned long return_ip) +{ + return generic_hypercall1(KVM_HCALL_CORRECT_TRAP_RETURN_IP, return_ip); +} + +static inline unsigned long +HYPERVISOR_guest_intr_handler(int irq, int virq_id, + int (*irq_handler)(int, void *), void *arg) +{ + return generic_hypercall4(KVM_HCALL_GUEST_INTR_HANDLER, irq, virq_id, + (unsigned long)irq_handler, + (unsigned long)arg); +} + +static inline unsigned long +HYPERVISOR_guest_intr_thread(int vcpu_id, int irq, int virq_id, int gpid_nr, + int (*irq_fn)(int, void *), void *arg) +{ + return generic_hypercall6(KVM_HCALL_GUEST_INTR_THREAD, + vcpu_id, irq, virq_id, + gpid_nr, (unsigned long)irq_fn, + (unsigned long)arg); +} + +static inline unsigned long +HYPERVISOR_guest_free_intr_handler(int irq, void *arg) +{ + return generic_hypercall2(KVM_HCALL_GUEST_FREE_INTR_HANDLER, irq, + (unsigned long)arg); +} + +static inline unsigned long +HYPERVISOR_get_guest_direct_virq(int irq, int virq_id) +{ + return generic_hypercall2(KVM_HCALL_GET_GUEST_DIRECT_VIRQ, + irq, virq_id); +} + +static inline unsigned long +HYPERVISOR_free_guest_direct_virq(int irq) +{ + return generic_hypercall1(KVM_HCALL_FREE_GUEST_DIRECT_VIRQ, irq); +} + +static inline unsigned long +HYPERVISOR_switch_to_virt_mode(kvm_task_info_t *task_info, + void (*func)(void *data, void *arg1, void *arg2), + void *data, void *arg1, void *arg2) +{ + return generic_hypercall5(KVM_HCALL_SWITCH_TO_VIRT_MODE, + (unsigned long)task_info, + (unsigned long)func, + (unsigned long)data, + (unsigned long)arg1, + (unsigned long)arg2); +} + +static inline unsigned long +HYPERVISOR_switch_guest_kernel_stacks(kvm_task_info_t *task_info, + char *entry_point, unsigned long *args, int args_num) +{ + return generic_hypercall4(KVM_HCALL_SWITCH_GUEST_KERNEL_STACKS, + (unsigned long)task_info, (unsigned long)entry_point, + (unsigned long)args, (unsigned long)args_num); +} + +static inline unsigned long +HYPERVISOR_update_hw_stacks_frames(e2k_mem_crs_t *pcs_frame, int pcs_frame_ind, + kernel_mem_ps_t *ps_frame, int ps_frame_ind, int ps_frame_size) +{ + return generic_hypercall5(KVM_HCALL_UPDATE_HW_STACKS_FRAMES, + (unsigned long)pcs_frame, + pcs_frame_ind, + (unsigned long)ps_frame, + ps_frame_ind, ps_frame_size); +} + +static inline unsigned long +HYPERVISOR_copy_hw_stacks_frames(void __user *dst, void __user *src, + long size, bool is_chain) +{ + return generic_hypercall4(KVM_HCALL_COPY_HW_STACKS_FRAMES, + (unsigned long)dst, (unsigned long)src, size, is_chain); +} +static inline unsigned long +HYPERVISOR_copy_guest_kernel_stacks(kvm_task_info_t *task_info) +{ + return generic_hypercall1(KVM_HCALL_COPY_GUEST_KERNEL_STACKS, + (unsigned long)task_info); +} + +static inline unsigned long +HYPERVISOR_switch_to_guest_new_user(kvm_task_info_t *task_info) +{ + return generic_hypercall1(KVM_HCALL_SWITCH_TO_GUEST_NEW_USER, + (unsigned long)task_info); +} + +static inline unsigned long +HYPERVISOR_clone_guest_user_stacks(kvm_task_info_t *task_info) +{ + return generic_hypercall1(KVM_HCALL_CLONE_GUEST_USER_STACKS, + (unsigned long)task_info); +} + +static inline unsigned long +HYPERVISOR_copy_guest_user_stacks(kvm_task_info_t *task_info, + vcpu_gmmu_info_t *gmmu_info) +{ + return generic_hypercall2(KVM_HCALL_COPY_GUEST_USER_STACKS, + (unsigned long)task_info, (unsigned long)gmmu_info); +} + +static inline unsigned long +HYPERVISOR_patch_guest_data_and_chain_stacks(kvm_data_stack_info_t *ds_patch, + kvm_pcs_patch_info_t pcs_patch[], int pcs_frames) +{ + return generic_hypercall3(KVM_HCALL_PATCH_GUEST_DATA_AND_CHAIN_STACKS, + (unsigned long)ds_patch, + (unsigned long)pcs_patch, pcs_frames); +} + +static inline unsigned long +HYPERVISOR_get_guest_glob_regs(unsigned long *gregs[2], + unsigned long not_get_gregs_mask, + bool dirty_bgr, unsigned int *bgr) +{ + return generic_hypercall4(KVM_HCALL_GET_GUEST_GLOB_REGS, + (unsigned long)gregs, not_get_gregs_mask, + (unsigned long)dirty_bgr, (unsigned long)bgr); +} +static inline unsigned long +HYPERVISOR_set_guest_glob_regs(unsigned long *gregs[2], + unsigned long not_set_gregs_mask, + bool dirty_bgr, unsigned int *bgr) +{ + return generic_hypercall4(KVM_HCALL_SET_GUEST_GLOB_REGS, + (unsigned long)gregs, not_set_gregs_mask, + (unsigned long)dirty_bgr, (unsigned long)bgr); +} +static inline unsigned long +HYPERVISOR_set_guest_glob_regs_dirty_bgr(unsigned long *gregs[2], + unsigned long not_set_gregs_mask) +{ + return generic_hypercall4(KVM_HCALL_SET_GUEST_GLOB_REGS, + (unsigned long)gregs, not_set_gregs_mask, + (unsigned long)false, (unsigned long)NULL); +} +static inline unsigned long +HYPERVISOR_get_guest_local_glob_regs(unsigned long *l_gregs[2]) +{ + return generic_hypercall1(KVM_HCALL_GET_GUEST_LOCAL_GLOB_REGS, + (unsigned long)l_gregs); +} +static inline unsigned long +HYPERVISOR_set_guest_local_glob_regs(unsigned long *l_gregs[2]) +{ + return generic_hypercall1(KVM_HCALL_SET_GUEST_LOCAL_GLOB_REGS, + (unsigned long)l_gregs); +} + +static inline unsigned long +HYPERVISOR_get_all_guest_glob_regs(unsigned long *gregs[2]) +{ + return generic_hypercall1(KVM_HCALL_GET_ALL_GUEST_GLOB_REGS, + (unsigned long)gregs); +} + +static inline unsigned long +HYPERVISOR_recovery_faulted_tagged_guest_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, + u64 opc_ext, int chan, int qp_store, int atomic_store) +{ + union recovery_faulted_arg arg = { + .chan = chan, + .qp = !!qp_store, + .atomic = !!atomic_store, + .tag = data_tag, + .tag_ext = data_ext_tag + }; + return generic_hypercall6(KVM_HCALL_RECOVERY_FAULTED_TAGGED_GUEST_STORE, + address, wr_data, st_rec_opc, data_ext, opc_ext, + arg.entire); +} +static inline unsigned long +HYPERVISOR_recovery_faulted_guest_load(e2k_addr_t address, + u64 *ld_val, u8 *data_tag, u64 ld_rec_opc, int chan) +{ + return generic_hypercall5(KVM_HCALL_RECOVERY_FAULTED_GUEST_LOAD, + address, (unsigned long)ld_val, + (unsigned long)data_tag, ld_rec_opc, chan); +} +static inline unsigned long +HYPERVISOR_recovery_faulted_guest_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load) +{ + union recovery_faulted_arg arg = { + .vr = vr, + .chan = chan, + .qp = !!qp_load, + .atomic = !!atomic_load + }; + return generic_hypercall5(KVM_HCALL_RECOVERY_FAULTED_GUEST_MOVE, + addr_from, addr_to, addr_to_hi, + ld_rec_opc, arg.entire); +} +static inline unsigned long +HYPERVISOR_recovery_faulted_load_to_guest_greg(e2k_addr_t address, + u32 greg_num_d, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load, + void *saved_greg_lo, void *saved_greg_hi) +{ + union recovery_faulted_arg arg = { + .vr = vr, + .chan = chan, + .qp = !!qp_load, + .atomic = !!atomic_load + }; + return generic_hypercall6(KVM_HCALL_RECOVERY_FAULTED_LOAD_TO_GUEST_GREG, + address, greg_num_d, ld_rec_opc, arg.entire, + (unsigned long) saved_greg_lo, + (unsigned long) saved_greg_hi); +} + +static inline unsigned long +HYPERVISOR_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, + u64 opc_ext, int chan, int qp_store, int atomic_store) +{ + union recovery_faulted_arg arg = { + .chan = chan, + .qp = !!qp_store, + .atomic = !!atomic_store, + .tag = data_tag, + .tag_ext = data_ext_tag + }; + return generic_hypercall6(KVM_HCALL_RECOVERY_FAULTED_TAGGED_STORE, + address, wr_data, st_rec_opc, data_ext, opc_ext, + arg.entire); +} +static inline unsigned long +HYPERVISOR_recovery_faulted_load(e2k_addr_t address, u64 *ld_val, + u8 *data_tag, u64 ld_rec_opc, int chan) +{ + return generic_hypercall5(KVM_HCALL_RECOVERY_FAULTED_LOAD, + address, (unsigned long)ld_val, + (unsigned long)data_tag, ld_rec_opc, chan); +} +static inline unsigned long +HYPERVISOR_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load) +{ + union recovery_faulted_arg arg = { + .vr = vr, + .chan = chan, + .qp = !!qp_load, + .atomic = !!atomic_load + }; + return generic_hypercall5(KVM_HCALL_RECOVERY_FAULTED_MOVE, + addr_from, addr_to, addr_to_hi, + ld_rec_opc, arg.entire); +} +static inline unsigned long +HYPERVISOR_recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, + int vr, u64 ld_rec_opc, int chan, int qp_load, + int atomic_load, void *saved_greg_lo, void *saved_greg_hi) +{ + union recovery_faulted_arg arg = { + .vr = vr, + .chan = chan, + .qp = !!qp_load, + .atomic = !!atomic_load + }; + return generic_hypercall6(KVM_HCALL_RECOVERY_FAULTED_LOAD_TO_GREG, + address, greg_num_d, ld_rec_opc, arg.entire, + (unsigned long)saved_greg_lo, + (unsigned long)saved_greg_hi); +} +static inline unsigned long +HYPERVISOR_move_tagged_guest_data(int word_size, + e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + return generic_hypercall3(KVM_HCALL_MOVE_TAGGED_GUEST_DATA, + word_size, addr_from, addr_to); +} +static inline unsigned long +HYPERVISOR_fast_tagged_guest_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return generic_hypercall6(KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_COPY, + (unsigned long)dst, (unsigned long)src, + len, strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +HYPERVISOR_fast_tagged_guest_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return generic_hypercall5(KVM_HCALL_FAST_TAGGED_GUEST_MEMORY_SET, + (unsigned long)addr, val, tag, len, strd_opcode); +} + +static inline unsigned long +HYPERVISOR_boot_spin_lock_slow(void *lock, bool check_unlock) +{ + return generic_hypercall2(KVM_HCALL_BOOT_SPIN_LOCK_SLOW, + (unsigned long)lock, + (unsigned long)check_unlock); +} +static inline unsigned long +HYPERVISOR_boot_spin_locked_slow(void *lock) +{ + return generic_hypercall1(KVM_HCALL_BOOT_SPIN_LOCKED_SLOW, + (unsigned long)lock); +} +static inline unsigned long +HYPERVISOR_boot_spin_unlock_slow(void *lock, bool add_to_unlock) +{ + return generic_hypercall2(KVM_HCALL_BOOT_SPIN_UNLOCK_SLOW, + (unsigned long)lock, + (unsigned long)add_to_unlock); +} + +static inline unsigned long +HYPERVISOR_guest_spin_lock_slow(void *lock, bool check_unlock) +{ + return generic_hypercall2(KVM_HCALL_GUEST_SPIN_LOCK_SLOW, + (unsigned long)lock, + (unsigned long)check_unlock); +} +static inline unsigned long +HYPERVISOR_guest_spin_locked_slow(void *lock) +{ + return generic_hypercall1(KVM_HCALL_GUEST_SPIN_LOCKED_SLOW, + (unsigned long)lock); +} +static inline unsigned long +HYPERVISOR_guest_spin_unlock_slow(void *lock, bool add_to_unlock) +{ + return generic_hypercall2(KVM_HCALL_GUEST_SPIN_UNLOCK_SLOW, + (unsigned long)lock, + (unsigned long)add_to_unlock); +} + +typedef enum csd_ctl { + CSD_LOCK_CTL = 1, /* register csd lock wait */ + CSD_UNLOCK_CTL, /* unlock csd lock wait and wake up */ + /* waiting guest VCPU */ + CSD_LOCK_WAIT_CTL, /* wait for csd lock will be unlocked */ + CSD_LOCK_TRY_WAIT_CTL, /* try wait for asynchronous csd lock */ +} csd_ctl_t; + +static inline unsigned long +HYPERVISOR_guest_csd_lock_ctl(csd_ctl_t csd_ctl, void *lock) +{ + return generic_hypercall2(KVM_HCALL_GUEST_CSD_LOCK_CTL, + (unsigned long)csd_ctl, + (unsigned long)lock); +} +static inline unsigned long +HYPERVISOR_guest_csd_lock(void *lock) +{ + return HYPERVISOR_guest_csd_lock_ctl(CSD_LOCK_CTL, lock); +} +static inline unsigned long +HYPERVISOR_guest_csd_unlock(void *lock) +{ + return HYPERVISOR_guest_csd_lock_ctl(CSD_UNLOCK_CTL, lock); +} +static inline unsigned long +HYPERVISOR_guest_csd_lock_wait(void *lock) +{ + return HYPERVISOR_guest_csd_lock_ctl(CSD_LOCK_WAIT_CTL, lock); +} +static inline unsigned long +HYPERVISOR_guest_csd_lock_try_wait(void *lock) +{ + return HYPERVISOR_guest_csd_lock_ctl(CSD_LOCK_TRY_WAIT_CTL, lock); +} + +static inline unsigned long +HYPERVISOR_pt_atomic_update(unsigned long gpa, void __user *old_gpte, + unsigned atomic_op, unsigned long prot_mask) +{ + return generic_hypercall4(KVM_HCALL_PT_ATOMIC_UPDATE, + gpa, (unsigned long)old_gpte, atomic_op, prot_mask); +} + +static inline unsigned long +HYPERVISOR_kvm_guest_mm_drop(unsigned long gmmid_nr) +{ + return generic_hypercall1(KVM_HCALL_GUEST_MM_DROP, gmmid_nr); +} + +static inline unsigned long +HYPERVISOR_kvm_activate_guest_mm(unsigned long active_gmmid_nr, + unsigned long gmmid_nr, e2k_addr_t u_phys_ptb) +{ + return generic_hypercall3(KVM_HCALL_ACTIVATE_GUEST_MM, + active_gmmid_nr, gmmid_nr, u_phys_ptb); +} + +static inline unsigned long +HYPERVISOR_kvm_switch_guest_mm(unsigned long gpid_nr, unsigned long gmmid_nr, + e2k_addr_t u_phys_ptb) +{ + return generic_hypercall3(KVM_HCALL_SWITCH_GUEST_MM, + gpid_nr, gmmid_nr, u_phys_ptb); +} + +static inline unsigned long +HYPERVISOR_vcpu_guest_mmu_state(vcpu_gmmu_info_t *mmu_info) +{ + return generic_hypercall1(KVM_HCALL_VCPU_MMU_STATE, + (unsigned long)mmu_info); +} + +/* guest kernel memory attribytes to set/update */ +typedef enum kvm_sma_mode { + KVM_SMA_RO, + KVM_SMA_RW, + KVM_SMA_NX, + KVM_SMA_X, + KVM_SMA_P, + KVM_SMA_NP, +} kvm_sma_mode_t; + +/* + * IO control hypercalls + */ + +/* + * Commands to HYPERVISOR_console_io(). + */ +#define CONSOLEIO_write 0 +#define CONSOLEIO_read 1 + +/* + * Some hypercalls return value + */ +#define RETURN_TO_HOST_APP_HCRET \ + (((u64)'r' << 56) | ((u64)'e' << 48) | \ + ((u64)'t' << 40) | ((u64)'2' << 32) | \ + ((u64)'h' << 24) | ((u64)'o' << 16) | \ + ((u64)'s' << 8) | ((u64)'t' << 0)) + +static inline unsigned long +HYPERVISOR_guest_ioport_request(unsigned short port, + unsigned int __user *data, unsigned char size, + unsigned char is_out) +{ + return generic_hypercall4(KVM_HCALL_GUEST_IOPORT_REQ, port, + (unsigned long)data, + size, is_out); +} +static inline unsigned long +HYPERVISOR_guest_ioport_string_request(unsigned short port, + const void __user *data, unsigned char size, unsigned int count, + unsigned char is_out) +{ + return generic_hypercall5(KVM_HCALL_GUEST_IOPORT_STRING_REQ, port, + (unsigned long)data, + size, count, is_out); +} +static inline unsigned long +HYPERVISOR_guest_mmio_request(unsigned long mmio_addr, + unsigned long __user *data, unsigned char size, + unsigned char is_write) +{ + return generic_hypercall4(KVM_HCALL_GUEST_MMIO_REQ, mmio_addr, + (unsigned long)data, + size, is_write); +} +static inline unsigned long +HYPERVISOR_console_io(int io_cmd, int size, char __user *str) +{ + return generic_hypercall3(KVM_HCALL_CONSOLE_IO, io_cmd, size, + (unsigned long)str); +} +static inline unsigned long +HYPERVISOR_notify_io(unsigned int notifier_io) +{ + return generic_hypercall1(KVM_HCALL_NOTIFY_IO, notifier_io); +} + +/* + * Kernel VM shut-down and panicking reason + */ +#define KVM_SHUTDOWN_POWEROFF 0x01 +#define KVM_SHUTDOWN_RESTART 0x02 +#define KVM_SHUTDOWN_PANIC 0x03 + +extern void smp_send_refresh(void); +static inline unsigned long +HYPERVISOR_kvm_shutdown(void *msg, unsigned long reason) +{ + smp_send_refresh(); + return generic_hypercall2(KVM_HCALL_SHUTDOWN, (unsigned long)msg, + reason); +} +static inline unsigned long +HYPERVISOR_kvm_guest_vcpu_common_idle(long timeout, bool interruptable) +{ + return generic_hypercall2(KVM_HCALL_GUEST_VCPU_COMMON_IDLE, + timeout, interruptable); +} +static inline unsigned long +HYPERVISOR_kvm_guest_vcpu_relax(void) +{ + return generic_hypercall0(KVM_HCALL_GUEST_VCPU_RELAX); +} +#ifdef CONFIG_SMP +static inline unsigned long +HYPERVISOR_kvm_activate_guest_vcpu(int vcpu_id) +{ + return generic_hypercall1(KVM_HCALL_ACTIVATE_GUEST_VCPU, vcpu_id); +} +static inline unsigned long +HYPERVISOR_kvm_activate_guest_all_vcpus(void) +{ + return generic_hypercall0(KVM_HCALL_ACTIVATE_GUEST_ALL_VCPUS); +} +#endif /* CONFIG_SMP */ +static inline unsigned long +HYPERVISOR_host_printk(char *msg, int size) +{ + return generic_hypercall2(KVM_HCALL_HOST_PRINTK, (unsigned long)msg, + (unsigned long)size); +} +static inline unsigned long +HYPERVISOR_print_guest_kernel_ptes(e2k_addr_t address) +{ + return generic_hypercall1(KVM_HCALL_PRINT_GUEST_KERNEL_PTES, address); +} +static inline unsigned long +HYPERVISOR_print_guest_user_address_ptes(int gmmid_nr, e2k_addr_t address) +{ + return generic_hypercall2(KVM_HCALL_PRINT_GUEST_USER_ADDRESS_PTES, + gmmid_nr, address); +} +static inline void +HYPERVISOR_dump_guest_stack(void) +{ + generic_hypercall0(KVM_HCALL_DUMP_GUEST_STACK); +} +static inline void +HYPERVISOR_ftrace_stop(void) +{ + generic_hypercall0(KVM_HCALL_FTRACE_STOP); +} +static inline void +HYPERVISOR_ftrace_dump(void) +{ + generic_hypercall0(KVM_HCALL_FTRACE_DUMP); +} +static inline void +HYPERVISOR_vcpu_show_state_completion(void) +{ + generic_hypercall0(KVM_HCALL_DUMP_COMPLETION); +} +static inline unsigned long +HYPERVISOR_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return generic_hypercall6(KVM_HCALL_FAST_TAGGED_MEMORY_COPY, + (unsigned long)dst, (unsigned long)src, + len, strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +HYPERVISOR_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return generic_hypercall5(KVM_HCALL_FAST_TAGGED_MEMORY_SET, + (unsigned long)addr, val, tag, len, strd_opcode); +} +#ifdef CONFIG_KVM_ASYNC_PF +static inline int HYPERVISOR_pv_enable_async_pf(u64 apf_reason_gpa, + u64 apf_id_gpa, u32 apf_ready_vector, u32 irq_controller) +{ + return generic_hypercall4(KVM_HCALL_PV_ENABLE_ASYNC_PF, + apf_reason_gpa, apf_id_gpa, + apf_ready_vector, irq_controller); +} +#endif /* CONFIG_KVM_ASYNC_PF */ +static inline unsigned long +HYPERVISOR_flush_tlb_range(e2k_addr_t start_gva, e2k_addr_t end_gva) +{ + return generic_hypercall2(KVM_HCALL_FLUSH_TLB_RANGE, + start_gva, end_gva); +} + +/* + * arguments: + * VIRQ number + * flag: "is VIRQ handling in progress and need wake up main VCPU thread, which + * can be on idle" + * results of waiting for VIRQ: + * > 0 : number of VIRQs waiting for handling + * = 0 : VIRQ handler should be stopped + * < 0 : if error detected + */ +#define KVM_VIRQ_RECEIVED(ret) ((ret) > 0) +#define KVM_VIRQ_STOPPED(ret) ((ret) == 0) +#define KVM_VIRQ_FAILED(ret) ((ret) < 0) + +static inline unsigned long +HYPERVISOR_wait_for_virq(int virq, bool in_progress) +{ + return generic_hypercall2(KVM_HCALL_WAIT_FOR_VIRQ, virq, in_progress); +} + +#endif /* _ASM_E2K_HYPERCALL_H */ diff --git a/arch/e2k/include/asm/kvm/hypervisor.h b/arch/e2k/include/asm/kvm/hypervisor.h new file mode 100644 index 0000000..58e0ba2 --- /dev/null +++ b/arch/e2k/include/asm/kvm/hypervisor.h @@ -0,0 +1,106 @@ +/* + * Kernel-based Virtual Machine driver for Linux + * + * This header defines architecture specific interface hypervisor -> guest + * to know more about the KVM & hypervisor features + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef _ASM_E2K_KVM_HYPERVISOR_H +#define _ASM_E2K_KVM_HYPERVISOR_H + +#include + +#include +#include + +/* KVM and hypervisor features */ +/* (see field 'features' of kvm_host_info_t structure at asm/kvm/guest.h) */ +#define KVM_FEAT_HV_CPU_BIT 0 /* Hardware virtualized CPU is ON */ +#define KVM_FEAT_PV_CPU_BIT 1 /* ParaVirtualized CPU is ON */ +#define KVM_FEAT_HW_HCALL_BIT 4 /* HardWare supported HyperCALL is on */ +#define KVM_FEAT_PV_HCALL_BIT 5 /* ParaVirtualized HyperCALLs is on */ +#define KVM_FEAT_HV_MMU_BIT 8 /* Hardware Virtualized MMU is ON */ +#define KVM_FEAT_PV_MMU_BIT 9 /* MMU support is ParaVirtualization */ +#define KVM_FEAT_MMU_SPT_BIT 10 /* MMU support is based on shadow */ + /* paging */ +#define KVM_FEAT_MMU_TDP_BIT 11 /* MMU support is based on TDP */ +#define KVM_FEAT_HV_EPIC_BIT 16 /* Hardware Virtualized EPIC is ON */ +#define KVM_FEAT_PV_APIC_BIT 18 /* ParaVirtualized APIC is ON */ + /* Simulator into hypervisor */ +#define KVM_FEAT_PV_EPIC_BIT 19 /* ParaVirtualized EPIC is ON */ + /* Simulator into hypervisor */ + +/* bit mask of features to direct test */ +#define KVM_FEAT_HV_CPU_MASK (1UL << KVM_FEAT_HV_CPU_BIT) +#define KVM_FEAT_PV_CPU_MASK (1UL << KVM_FEAT_PV_CPU_BIT) +#define KVM_FEAT_HW_HCALL_MASK (1UL << KVM_FEAT_HW_HCALL_BIT) +#define KVM_FEAT_PV_HCALL_MASK (1UL << KVM_FEAT_PV_HCALL_BIT) +#define KVM_FEAT_HV_MMU_MASK (1UL << KVM_FEAT_HV_MMU_BIT) +#define KVM_FEAT_PV_MMU_MASK (1UL << KVM_FEAT_PV_MMU_BIT) +#define KVM_FEAT_MMU_SPT_MASK (1UL << KVM_FEAT_MMU_SPT_BIT) +#define KVM_FEAT_MMU_TDP_MASK (1UL << KVM_FEAT_MMU_TDP_BIT) +#define KVM_FEAT_HV_EPIC_MASK (1UL << KVM_FEAT_HV_EPIC_BIT) +#define KVM_FEAT_PV_APIC_MASK (1UL << KVM_FEAT_PV_APIC_BIT) +#define KVM_FEAT_PV_EPIC_MASK (1UL << KVM_FEAT_PV_EPIC_BIT) + +/* + * Basic function to access to host info on guest. + */ +#define GUEST_HOST_INFO_BASE (offsetof(kvm_vcpu_state_t, host)) + +static inline kvm_host_info_t *kvm_get_host_info(void) +{ + unsigned long vcpu_base; + + KVM_GET_VCPU_STATE_BASE(vcpu_base); + return *((kvm_host_info_t **)(vcpu_base + GUEST_HOST_INFO_BASE)); +} + +static inline unsigned long kvm_hypervisor_features(void) +{ + return kvm_get_host_info()->features; +} +static inline bool kvm_test_hprv_feats_mask(unsigned long feature_mask) +{ + return (kvm_hypervisor_features() & feature_mask) != 0; +} +static inline bool kvm_test_hprv_full_feats_mask(unsigned long feature_mask) +{ + return (kvm_hypervisor_features() & feature_mask) == feature_mask; +} +static inline bool kvm_test_hprv_feats_bit(int feature_bit) +{ + return kvm_test_hprv_feats_mask(1UL << feature_bit); +} + +#define IS_HV_CPU_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_HV_CPU_MASK) +#define IS_PV_CPU_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_PV_CPU_MASK) +#define IS_HV_MMU_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_HV_MMU_MASK) +#define IS_PV_MMU_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_PV_MMU_MASK) +#define IS_HV_CPU_PV_MMU_KVM() \ + kvm_test_hprv_full_feats_mask(KVM_FEAT_HV_CPU_MASK | \ + KVM_FEAT_PV_MMU_MASK) +#define IS_HV_CPU_HV_MMU_KVM() \ + kvm_test_hprv_full_feats_mask(KVM_FEAT_HV_CPU_MASK | \ + KVM_FEAT_HV_MMU_MASK) +#define IS_PV_CPU_PV_MMU_KVM() \ + kvm_test_hprv_full_feats_mask(KVM_FEAT_PV_CPU_MASK | \ + KVM_FEAT_PV_MMU_MASK) +#define IS_MMU_SPT() kvm_test_hprv_feats_mask(KVM_FEAT_MMU_SPT_MASK) +#define IS_MMU_TDP() kvm_test_hprv_feats_mask(KVM_FEAT_MMU_TDP_MASK) +#define IS_HV_MMU_SPT() (IS_HV_MMU_KVM() && IS_MMU_SPT()) +#define IS_HV_MMU_TDP() (IS_HV_MMU_KVM() && IS_MMU_TDP()) +#define IS_HV_KVM() IS_HV_CPU_HV_MMU_KVM() +#define IS_PV_KVM() IS_PV_CPU_PV_MMU_KVM() +#define IS_HW_HCALL_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_HW_HCALL_MASK) +#define IS_PV_HCALL_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_PV_HCALL_MASK) +#define IS_HV_EPIC_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_HV_EPIC_MASK) +#define IS_PV_APIC_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_PV_APIC_MASK) +#define IS_PV_EPIC_KVM() kvm_test_hprv_feats_mask(KVM_FEAT_PV_EPIC_MASK) + + +#endif /* _ASM_E2K_KVM_HYPERVISOR_H */ diff --git a/arch/e2k/include/asm/kvm/irq.h b/arch/e2k/include/asm/kvm/irq.h new file mode 100644 index 0000000..faf73d0 --- /dev/null +++ b/arch/e2k/include/asm/kvm/irq.h @@ -0,0 +1,44 @@ + +#ifndef __E2K_ASM_KVM_IRQ_H_ +#define __E2K_ASM_KVM_IRQ_H_ + +#include +#include + +/* + * VIRTUAL INTERRUPTS + * + * Virtual interrupts that a guest OS may receive from KVM. + */ +#define KVM_VIRQ_TIMER 0 /* timer interrupt */ +#define KVM_VIRQ_HVC 1 /* HyperVisor Console interrupt */ +#define KVM_VIRQ_LAPIC 2 /* virtual local APIC interrupt */ +#define KVM_VIRQ_CEPIC 3 /* virtual CEPIC interrupt */ +#define KVM_NR_VIRQS (KVM_VIRQ_CEPIC + 1) + +#define KVM_MAX_NR_VIRQS (KVM_MAX_VIRQ_VCPUS * KVM_NR_VIRQS) + +#if KVM_NR_VIRQS > KVM_MAX_NR_VIRQS + #error "limit of max number of VIRQs exceeded" +#endif + +static inline const char *kvm_get_virq_name(int virq_id) +{ + switch (virq_id) { + case KVM_VIRQ_TIMER: + return "early_timer"; + case KVM_VIRQ_HVC: + return "hvc_virq"; + case KVM_VIRQ_LAPIC: + return "lapic"; + case KVM_VIRQ_CEPIC: + return "cepic"; + default: + return "???"; + } +} + +typedef int (*irq_thread_t)(void *); +extern int debug_guest_virqs; + +#endif /* __E2K_ASM_KVM_IRQ_H_ */ diff --git a/arch/e2k/include/asm/kvm/machdep.h b/arch/e2k/include/asm/kvm/machdep.h new file mode 100644 index 0000000..f589aa8 --- /dev/null +++ b/arch/e2k/include/asm/kvm/machdep.h @@ -0,0 +1,65 @@ +#ifndef _E2K_KVM_MACHDEP_H_ +#define _E2K_KVM_MACHDEP_H_ + +#include + +#ifdef __KERNEL__ + +typedef struct global_regs global_regs_t; +typedef struct kernel_gregs kernel_gregs_t; +typedef struct host_gregs host_gregs_t; + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization support */ +typedef struct host_machdep { + /* nothing to support and do */ +} host_machdep_t; +typedef struct guest_machdep { + /* none any guest */ +} guest_machdep_t; +#else /* CONFIG_VIRTUALIZATION */ +extern void kvm_save_host_gregs_v2(struct host_gregs *gregs); +extern void kvm_save_host_gregs_v5(struct host_gregs *gregs); +extern void kvm_restore_host_gregs_v5(const struct host_gregs *gregs); + +extern void kvm_guest_save_local_gregs_v2(struct local_gregs *gregs); +extern void kvm_guest_save_local_gregs_v5(struct local_gregs *gregs); +extern void kvm_guest_save_kernel_gregs_v2(kernel_gregs_t *gregs); +extern void kvm_guest_save_kernel_gregs_v5(kernel_gregs_t *gregs); +extern void kvm_guest_save_gregs_v2(struct global_regs *gregs); +extern void kvm_guest_save_gregs_v5(struct global_regs *gregs); +extern void kvm_guest_save_gregs_dirty_bgr_v2(struct global_regs *gregs); +extern void kvm_guest_save_gregs_dirty_bgr_v5(struct global_regs *gregs); +extern void kvm_guest_restore_gregs_v2(const global_regs_t *gregs); +extern void kvm_guest_restore_gregs_v5(const global_regs_t *gregs); +extern void kvm_guest_restore_kernel_gregs_v2(global_regs_t *gregs); +extern void kvm_guest_restore_kernel_gregs_v5(global_regs_t *gregs); +extern void kvm_guest_restore_local_gregs_v2(const struct local_gregs *gregs); +extern void kvm_guest_restore_local_gregs_v5(const struct local_gregs *gregs); + +#if defined(CONFIG_PARAVIRT_GUEST) || defined(CONFIG_KVM_GUEST_KERNEL) +/* it is paravirtualized host and guest kernel */ +/* or pure guest kernel */ +#include +#endif /* CONFIG_PARAVIRT_GUEST || CONFIG_KVM_GUEST_KERNEL */ + +#ifndef CONFIG_KVM_GUEST_KERNEL +/* it is native host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ + +typedef struct host_machdep { +} host_machdep_t; +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native host kernel with virtualization support */ +typedef struct guest_machdep { + /* cannot run as guest */ +} guest_machdep_t; +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! CONFIG_VIRTUALIZATION */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_KVM_MACHDEP_H_ */ diff --git a/arch/e2k/include/asm/kvm/mm.h b/arch/e2k/include/asm/kvm/mm.h new file mode 100644 index 0000000..a739c31 --- /dev/null +++ b/arch/e2k/include/asm/kvm/mm.h @@ -0,0 +1,94 @@ +#ifndef __E2K_KVM_HOST_MM_H +#define __E2K_KVM_HOST_MM_H + +#include +#include +#include + +#include + +#define GMMID_MAX_LIMIT (GPID_MAX_LIMIT) +#define RESERVED_GMMIDS 1 /* 0 is reserved for init_mm */ + +#define GMMIDMAP_ENTRIES ((GMMID_MAX_LIMIT + 8*PAGE_SIZE - 1)/ \ + PAGE_SIZE/8) + +#define GMMID_HASH_BITS GPID_HASH_BITS +#define GMMID_HASH_SIZE NID_HASH_SIZE(GMMID_HASH_BITS) + +/* + * Guest mm structure agent on host + * The structure on host is only agent of real mm structure on guest, + * so sinchronization should be done by guest using real mm semaphores and + * spinlocks and here on host we does not use locking/unlocking + */ +typedef struct gmm_struct { + kvm_nid_t nid; /* numeric ID of the host agent */ + /* of guest mm structure */ + atomic_t mm_count; /* How many references to guest mm */ + /* shared mm */ +#ifdef CONFIG_KVM_HV_MMU + hpa_t root_hpa; /* physical base of root shadow PT */ + /* for guest mm on host */ + gfn_t root_gpa; /* 'physical' base of guest root PT */ + gpa_t os_pptb; /* guest kernel root PT physical base */ + gpa_t u_pptb; /* guest user root PT physical base */ + gva_t os_vptb; /* guest kernel root PT virtual base */ + gva_t u_vptb; /* guest user root PT virtual base */ + bool pt_synced; /* root guest PT was synced with */ + /* host shadow PT */ +#endif /* CONFIG_KVM_HV_MMU */ + spinlock_t page_table_lock; /* Protects page tables of mm */ + /* MMU context (PID) support */ + mm_context_t context; /* MMU context (PID) support for */ + /* the guest mm */ + cpumask_t cpu_vm_mask; /* mask of CPUs where the mm is */ + /* in use or was some early */ + bool in_release; /* guest mm is in release and cannot */ + /* be used as active */ +} gmm_struct_t; + +/* same as accessor for struct mm_struct's cpu_vm_mask but for guest mm */ +#define gmm_cpumask(gmm) (&(gmm)->cpu_vm_mask) + +typedef struct kvm_nid_table gmmid_table_t; + +#define gmmid_hashfn(nr) nid_hashfn(nr, GMMID_HASH_BITS) + +struct kvm; + +extern int kvm_guest_mm_drop(struct kvm_vcpu *vcpu, int gmmid_nr); +extern int kvm_activate_guest_mm(struct kvm_vcpu *vcpu, + int active_gmmid_nr, int gmmid_nr, gpa_t u_phys_ptb); +extern int kvm_guest_pv_mm_init(struct kvm *kvm); +extern void kvm_guest_pv_mm_destroy(struct kvm *kvm); + +#define for_each_guest_mm(gmm, entry, next, gmmid_table) \ + for_each_guest_nid_node(gmm, entry, next, gmmid_table, \ + nid.nid_chain) +#define gmmid_entry(ptr) container_of(ptr, gmm_struct_t, nid) +#define gmmid_table_lock(gmmid_table) \ + nid_table_lock(gmmid_table) +#define gmmid_table_unlock(gmmid_table) \ + nid_table_unlock(gmmid_table) +#define gmmid_table_lock_irq(gmmid_table) \ + nid_table_lock_irq(gmmid_table) +#define gmmid_table_unlock(gmmid_table) \ + nid_table_unlock(gmmid_table) +#define gmmid_table_lock_irqsave(gmmid_table, flags) \ + nid_table_lock_irqsave(gmmid_table, flags) +#define gmmid_table_unlock_irqrestore(gmmid_table, flags) \ + nid_table_unlock_irqrestore(gmmid_table, flags) + +static inline gmm_struct_t * +kvm_find_gmmid(gmmid_table_t *gmmid_table, int gmmid_nr) +{ + kvm_nid_t *nid; + + nid = kvm_find_nid(gmmid_table, gmmid_nr, gmmid_hashfn(gmmid_nr)); + if (nid == NULL) + return NULL; + return gmmid_entry(nid); +} + +#endif /* __E2K_KVM_HOST_MM_H */ diff --git a/arch/e2k/include/asm/kvm/mmu.h b/arch/e2k/include/asm/kvm/mmu.h new file mode 100644 index 0000000..866c176 --- /dev/null +++ b/arch/e2k/include/asm/kvm/mmu.h @@ -0,0 +1,419 @@ +#ifndef __E2K_KVM_HOST_MMU_H +#define __E2K_KVM_HOST_MMU_H + +#include +#include +#include +#include +#include +#include +#include + +static inline bool is_ss(struct kvm_vcpu *vcpu) +{ + return false; +} +static inline bool is_sep_virt_spaces(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.sep_virt_space; +} +static inline void set_sep_virt_spaces(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.sep_virt_space = true; +} +static inline void reset_sep_virt_spaces(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.sep_virt_space = false; +} +static inline bool is_shadow_paging(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.shadow_pt_on; +} +static inline void set_shadow_paging(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.shadow_pt_on = true; + set_bit(KVM_FEAT_MMU_SPT_BIT, + &vcpu->kvm->arch.kmap_host_info->features); + clear_bit(KVM_FEAT_MMU_TDP_BIT, + &vcpu->kvm->arch.kmap_host_info->features); +} +static inline void reset_shadow_paging(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.shadow_pt_on = false; +} +static inline bool is_phys_paging(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.phys_pt_on; +} +static inline void set_phys_paging(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.phys_pt_on = true; +} +static inline void reset_phys_paging(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.phys_pt_on = false; +} +static inline bool is_tdp_paging(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.tdp_on; +} +static inline void set_tdp_paging(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.tdp_on = true; + set_bit(KVM_FEAT_MMU_TDP_BIT, + &vcpu->kvm->arch.kmap_host_info->features); + clear_bit(KVM_FEAT_MMU_SPT_BIT, + &vcpu->kvm->arch.kmap_host_info->features); +} +static inline void reset_tdp_paging(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.tdp_on = false; +} + +static inline bool is_paging_flag(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.paging_on; +} +static inline void set_paging_flag(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.paging_on = true; +} +static inline void reset_paging_flag(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.paging_on = false; +} + +static inline bool is_pv_paging(struct kvm_vcpu *vcpu) +{ + return is_paging_flag(vcpu); +} +static inline bool is_spt_paging(struct kvm_vcpu *vcpu) +{ + return is_paging_flag(vcpu); +} +static inline bool is_hv_paging(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.mmu.is_paging == NULL) + return is_paging_flag(vcpu); + + return vcpu->arch.mmu.is_paging(vcpu); +} + +static inline bool is_paging(struct kvm_vcpu *vcpu) +{ + if (is_tdp_paging(vcpu)) + return is_hv_paging(vcpu); + if (unlikely(vcpu->arch.is_pv)) + return is_pv_paging(vcpu); + if (unlikely(is_shadow_paging(vcpu))) + return is_spt_paging(vcpu); + + return is_paging_flag(vcpu); +} + +static inline bool is_spt_gpa_fault(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.spt_gpa_fault; +} +static inline void set_spt_gpa_fault(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.spt_gpa_fault = true; +} +static inline void reset_spt_gpa_fault(struct kvm_vcpu *vcpu) +{ + vcpu->arch.mmu.spt_gpa_fault = false; +} + +static inline hpa_t +kvm_get_gp_phys_root(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmu.get_vcpu_gp_pptb(vcpu); +} +static inline void +kvm_set_gp_phys_root(struct kvm_vcpu *vcpu, hpa_t root) +{ + vcpu->arch.mmu.set_vcpu_gp_pptb(vcpu, root); +} + +static inline hpa_t +kvm_get_space_type_spt_root(struct kvm_vcpu *vcpu, bool u_root) +{ + return (u_root) ? vcpu->arch.mmu.get_vcpu_sh_u_pptb(vcpu) : + vcpu->arch.mmu.get_vcpu_sh_os_pptb(vcpu); +} +static inline hpa_t +kvm_get_space_type_spt_os_root(struct kvm_vcpu *vcpu) +{ + return kvm_get_space_type_spt_root(vcpu, false); +} +static inline hpa_t +kvm_get_space_type_spt_u_root(struct kvm_vcpu *vcpu) +{ + return kvm_get_space_type_spt_root(vcpu, true); +} +static inline void +kvm_set_space_type_spt_root(struct kvm_vcpu *vcpu, hpa_t root, bool u_root) +{ + if (u_root) { + vcpu->arch.mmu.set_vcpu_sh_u_pptb(vcpu, root); + } else { + vcpu->arch.mmu.set_vcpu_sh_os_pptb(vcpu, root); + } +} +static inline void +kvm_set_space_type_spt_os_root(struct kvm_vcpu *vcpu, hpa_t root) +{ + kvm_set_space_type_spt_root(vcpu, root, false); +} +static inline void +kvm_set_space_type_spt_u_root(struct kvm_vcpu *vcpu, hpa_t root) +{ + kvm_set_space_type_spt_root(vcpu, root, true); +} +static inline hpa_t +kvm_get_space_addr_spt_root(struct kvm_vcpu *vcpu, gva_t gva) +{ + if (!vcpu->arch.mmu.sep_virt_space) { + return vcpu->arch.mmu.get_vcpu_sh_u_pptb(vcpu); + } else if (unlikely(gva >= vcpu->arch.mmu.get_vcpu_os_vab(vcpu))) { + return vcpu->arch.mmu.get_vcpu_sh_os_pptb(vcpu); + } else { + return vcpu->arch.mmu.get_vcpu_sh_u_pptb(vcpu); + } +} +static inline hpa_t +kvm_get_space_addr_root(struct kvm_vcpu *vcpu, gva_t gva) +{ + if (likely(is_tdp_paging(vcpu) || + ((!is_paging(vcpu) || is_spt_gpa_fault(vcpu)) && + is_phys_paging(vcpu)))) { + return kvm_get_gp_phys_root(vcpu); + } else if (is_shadow_paging(vcpu)) { + return kvm_get_space_addr_spt_root(vcpu, gva); + } else { + KVM_BUG_ON(true); + return (hpa_t)-EINVAL; + } +} +static inline gpa_t +kvm_get_space_type_guest_root(struct kvm_vcpu *vcpu, bool u_root) +{ + if (!vcpu->arch.mmu.sep_virt_space) { + KVM_BUG_ON(!u_root); + return (gpa_t)vcpu->arch.mmu.get_vcpu_u_pptb(vcpu); + } + return (u_root) ? (gpa_t)vcpu->arch.mmu.get_vcpu_u_pptb(vcpu) : + (gpa_t)vcpu->arch.mmu.get_vcpu_os_pptb(vcpu); +} +static inline gpa_t +kvm_get_space_type_guest_os_root(struct kvm_vcpu *vcpu) +{ + return kvm_get_space_type_guest_root(vcpu, false); +} +static inline gpa_t +kvm_get_space_type_guest_u_root(struct kvm_vcpu *vcpu) +{ + return kvm_get_space_type_guest_root(vcpu, true); +} + +static inline void +kvm_set_space_type_guest_root(struct kvm_vcpu *vcpu, gpa_t root, + bool u_root) +{ + if (!vcpu->arch.mmu.sep_virt_space) { + KVM_BUG_ON(!u_root); + vcpu->arch.mmu.set_vcpu_u_pptb(vcpu, (pgprotval_t)root); + } else if (likely(u_root)) { + vcpu->arch.mmu.set_vcpu_u_pptb(vcpu, (pgprotval_t)root); + } else { + vcpu->arch.mmu.set_vcpu_os_pptb(vcpu, (pgprotval_t)root); + } +} +static inline void +kvm_set_space_type_guest_os_root(struct kvm_vcpu *vcpu, gpa_t root) +{ + kvm_set_space_type_guest_root(vcpu, root, false); +} +static inline void +kvm_set_space_type_guest_u_root(struct kvm_vcpu *vcpu, gpa_t root) +{ + kvm_set_space_type_guest_root(vcpu, root, true); +} +static inline gpa_t +kvm_get_space_addr_guest_root(struct kvm_vcpu *vcpu, gva_t gva) +{ + if (!vcpu->arch.mmu.sep_virt_space) { + return vcpu->arch.mmu.get_vcpu_u_pptb(vcpu); + } else if (unlikely(gva >= vcpu->arch.mmu.get_vcpu_os_vab(vcpu))) { + return vcpu->arch.mmu.get_vcpu_os_pptb(vcpu); + } else { + return vcpu->arch.mmu.get_vcpu_u_pptb(vcpu); + } +} +static inline hpa_t +kvm_get_space_type_spt_vptb(struct kvm_vcpu *vcpu, bool u_root) +{ + if (!vcpu->arch.mmu.sep_virt_space) { + /* common standard in linux: user and OS share virtual */ + /* space of user */ + KVM_BUG_ON(!u_root); + return vcpu->arch.mmu.get_vcpu_sh_u_vptb(vcpu); + } else if (u_root) { + return vcpu->arch.mmu.get_vcpu_sh_u_vptb(vcpu); + } else { + return vcpu->arch.mmu.get_vcpu_sh_os_vptb(vcpu); + } +} +static inline hpa_t +kvm_get_space_addr_spt_vptb(struct kvm_vcpu *vcpu, gva_t gva) +{ + if (!vcpu->arch.mmu.sep_virt_space) { + return vcpu->arch.mmu.get_vcpu_sh_u_vptb(vcpu); + } else if (unlikely(gva >= vcpu->arch.mmu.get_vcpu_os_vab(vcpu))) { + return vcpu->arch.mmu.get_vcpu_sh_os_vptb(vcpu); + } else { + return vcpu->arch.mmu.get_vcpu_sh_u_vptb(vcpu); + } +} + +#define INVALID_GPA ((gpa_t)E2K_INVALID_PAGE) +#define IS_INVALID_GPA(gpa) ((gpa) == INVALID_GPA) + +static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) +{ + struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); + + return (struct kvm_mmu_page *)page_private(page); +} + +static inline gpa_t kvm_hva_to_gpa(struct kvm *kvm, unsigned long hva) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int i; + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot(memslot, slots) { + unsigned long hva_start, hva_end; + gfn_t gfn; + gpa_t gpa; + + hva_start = memslot->userspace_addr; + hva_end = hva_start + (memslot->npages << PAGE_SHIFT); + if (hva < hva_start || hva >= hva_end) + continue; + gfn = hva_to_gfn_memslot(hva, memslot); + gpa = (gfn << PAGE_SHIFT) + (hva & ~PAGE_MASK); + return gpa; + } + } + + return INVALID_GPA; +} + +static inline gpa_t +kvm_vcpu_hva_to_gpa(struct kvm_vcpu *vcpu, unsigned long hva) +{ + return kvm_hva_to_gpa(vcpu->kvm, hva); +} + +static inline void kvm_setup_host_mmu_info(struct kvm_vcpu *vcpu) +{ + if (is_tdp_paging(vcpu)) { + set_bit(KVM_FEAT_MMU_TDP_BIT, + &vcpu->kvm->arch.kmap_host_info->features); + clear_bit(KVM_FEAT_MMU_SPT_BIT, + &vcpu->kvm->arch.kmap_host_info->features); + } else if (is_shadow_paging(vcpu)) { + set_bit(KVM_FEAT_MMU_SPT_BIT, + &vcpu->kvm->arch.kmap_host_info->features); + clear_bit(KVM_FEAT_MMU_TDP_BIT, + &vcpu->kvm->arch.kmap_host_info->features); + } else { + KVM_BUG_ON(true); + } +} + +#ifdef CONFIG_KVM_SHADOW_PT_ENABLE +extern int kvm_pv_mmu_page_fault(struct kvm_vcpu *vcpu, struct pt_regs *regs, + trap_cellar_t *tcellar, bool user_mode); +extern int kvm_pv_mmu_instr_page_fault(struct kvm_vcpu *vcpu, + struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr); +extern int kvm_pv_mmu_aau_page_fault(struct kvm_vcpu *vcpu, + struct pt_regs *regs, e2k_addr_t address, + tc_cond_t cond, unsigned int aa_no); +extern long kvm_hv_mmu_page_fault(struct kvm_vcpu *vcpu, struct pt_regs *regs, + intc_info_mu_t *intc_info_mu); +extern int kvm_mmu_instr_page_fault(struct kvm_vcpu *vcpu, gva_t address, + bool async_instr, u32 error_code); +#else /* ! CONFIG_KVM_SHADOW_PT_ENABLE */ +static inline int +kvm_pv_mmu_page_fault(struct kvm_vcpu *vcpu, struct pt_regs *regs, + trap_cellar_t *tcellar, bool user_mode) +{ + /* page fault should be handled by host */ + return -1; +} +static inline long +kvm_hv_mmu_page_fault(struct kvm_vcpu *vcpu, struct pt_regs *regs, + intc_info_mu_t *intc_info_mu) +{ + /* page fault should be handled by host */ + return -1; +} +static inline int +kvm_pv_mmu_instr_page_fault(struct kvm_vcpu *vcpu, + struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + /* page fault should be handled by host */ + return -1; +} +static inline int +kvm_pv_mmu_aau_page_fault(struct kvm_vcpu *vcpu, + struct pt_regs *regs, e2k_addr_t address, + tc_cond_t cond, unsigned int aa_no) +{ + /* page fault should be handled by host */ + return -1; +} + +static inline int +kvm_mmu_instr_page_fault(struct kvm_vcpu *vcpu, gva_t address, + bool async_instr, u32 error_code) +{ + /* page fault should be handled by host */ + return -1; +} +#endif /* CONFIG_KVM_SHADOW_PT_ENABLE */ + +extern int kvm_guest_addr_to_host(void **addr); +extern void *kvm_guest_ptr_to_host_ptr(void *guest_ptr, int size); + +#ifdef CONFIG_KVM_HOST_MODE +/* it is native host kernel with virtualization support */ +static inline int +guest_addr_to_host(void **addr, pt_regs_t *regs) +{ + if (likely(!host_test_intc_emul_mode((const struct pt_regs *)regs))) { + /* faulted addres is not paravirtualized guest one */ + return native_guest_addr_to_host(addr); + } + + return kvm_guest_addr_to_host(addr); +} +static inline void * +guest_ptr_to_host(void *ptr, int size, pt_regs_t *regs) +{ + if (likely(!host_test_intc_emul_mode((const struct pt_regs *)regs))) { + /* faulted addres is not paravirtualized guest one */ + return native_guest_ptr_to_host(ptr, size); + } + + return kvm_guest_ptr_to_host_ptr(ptr, size); +} +#endif /* CONFIG_KVM_HOST_MODE */ + +#endif /* __E2K_KVM_HOST_MMU_H */ diff --git a/arch/e2k/include/asm/kvm/mmu_context.h b/arch/e2k/include/asm/kvm/mmu_context.h new file mode 100644 index 0000000..be4e94f --- /dev/null +++ b/arch/e2k/include/asm/kvm/mmu_context.h @@ -0,0 +1,37 @@ +/* + * KVM guest kernel virtual space context support + * Copyright 2011 Salavat S. Gilyazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_MMU_CONTEXT_H +#define _E2K_KVM_MMU_CONTEXT_H + +#include +#include +#include + +/* + * Virtualization support + */ + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* it is native host kernel with virtualization support */ +/* mm_alloc()/mmdrop() defined at include/linux/sched.h */ + +#define activate_mm(__active_mm, __mm) \ + native_activate_mm(__active_mm, __mm) +static inline void +deactivate_mm(struct task_struct *dead_task, struct mm_struct *mm) +{ + native_deactivate_mm(dead_task, mm); +} +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* !(_E2K_KVM_MMU_CONTEXT_H) */ diff --git a/arch/e2k/include/asm/kvm/mmu_hv_regs_access.h b/arch/e2k/include/asm/kvm/mmu_hv_regs_access.h new file mode 100644 index 0000000..9c28508 --- /dev/null +++ b/arch/e2k/include/asm/kvm/mmu_hv_regs_access.h @@ -0,0 +1,318 @@ +/* + * E2K MMU virtualization extensions registers access + * + * Copyright 2018 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_MMU_HV_REGS_ACCESS_H_ +#define _E2K_KVM_MMU_HV_REGS_ACCESS_H_ + +#include +#include +#include + +#include +#include +#include + +/* + * MMU registers operations + */ + +#ifndef __ASSEMBLY__ + +#define READ_VIRT_CTRL_MU_REG_VALUE() NATIVE_GET_MMUREG(virt_ctrl_mu) +#define WRITE_VIRT_CTRL_MU_REG_VALUE(val) NATIVE_SET_MMUREG(virt_ctrl_mu, (val)) + +#define READ_G_W_IMASK_MMU_CR_REG_VALUE() \ + NATIVE_GET_MMUREG(g_w_imask_mmu_cr) +#define WRITE_G_W_IMASK_MMU_CR_REG_VALUE(val) \ + NATIVE_SET_MMUREG(g_w_imask_mmu_cr, (val)) + +#define READ_GID_REG_VALUE() NATIVE_GET_MMUREG(gid) +#define WRITE_GID_REG_VALUE(val) NATIVE_SET_MMUREG(gid, (val)) + +#define READ_GP_VPTB_REG_VALUE() NATIVE_GET_MMUREG(gp_vptb) +#define WRITE_GP_VPTB_REG_VALUE(val) NATIVE_SET_MMUREG(gp_vptb, (val)) + +#define READ_GP_PPTB_REG_VALUE() NATIVE_GET_MMUREG(gp_pptb) +#define WRITE_GP_PPTB_REG_VALUE(val) NATIVE_SET_MMUREG(gp_pptb, (val)) + +#define READ_INTC_INFO_MU() NATIVE_GET_MMUREG(intc_info_mu) +#define WRITE_INTC_INFO_MU(x) NATIVE_SET_MMUREG(intc_info_mu, (x)) + +#define READ_INTC_PTR_MU() NATIVE_GET_MMUREG(intc_ptr_mu) + +static inline void __save_intc_info_mu(intc_info_mu_t *info, int *num) +{ + long info_ptr, i = 0; + + /* + * The read of INTC_PTR will clear the hardware pointer, + * but the subsequent reads for INTC_INFO will increase + * it again until it reaches the same value it had before. + */ + info_ptr = READ_INTC_PTR_MU(); + if (!info_ptr) { + *num = -1; + return; + } + if (info_ptr % INTC_INFO_MU_ITEM_SIZE != 0) { + KVM_WARN_ON(true); + info_ptr = ((info_ptr + (INTC_INFO_MU_ITEM_SIZE - 1)) / + INTC_INFO_MU_ITEM_SIZE) * + INTC_INFO_MU_ITEM_SIZE; + } + + do { + info[i].no_restore = false; + info[i].modify_data = false; + info[i].hdr = (intc_info_mu_hdr_t) READ_INTC_INFO_MU(); + info[i].gpa = (u64) READ_INTC_INFO_MU(); + info[i].gva = (u64) READ_INTC_INFO_MU(); + NATIVE_STORE_TAGGED_MMUREG(&info[i].data, intc_info_mu); + info[i].condition = (tc_cond_t) READ_INTC_INFO_MU(); + NATIVE_STORE_TAGGED_MMUREG(&info[i].data_ext, intc_info_mu); + info[i].mask = (tc_mask_t) READ_INTC_INFO_MU(); + ++i; + info_ptr -= INTC_INFO_MU_ITEM_SIZE; + } while (info_ptr > 0); + + *num = i; +} + +#define fixup_intc_info_mu(info, num) \ +do { \ + int entries = *num; \ + if (entries > 0 && cpu_has(CPU_HWBUG_GUEST_ASYNC_PM)) { \ + e2k_mem_crs_t *frame; \ + e2k_pcsp_lo_t bu_pcsp_lo; \ + e2k_pcsp_hi_t bu_pcsp_hi; \ + \ + AW(bu_pcsp_lo) = READ_BU_PCSP_LO_REG_VALUE(); \ + AW(bu_pcsp_hi) = READ_BU_PCSP_HI_REG_VALUE(); \ + frame = (e2k_mem_crs_t *) (AS(bu_pcsp_lo).base + \ + AS(bu_pcsp_hi).ind); \ + --frame; \ + if (!AS(frame->cr1_lo).pm) { \ + int entry; \ + for (entry = 0; entry < entries; entry++) { \ + /* Protected mode accesses are always \ + * privileged, so keep "pm" for them. */ \ + if (AS(info[entry].condition).npsp) \ + AS(info[entry].condition).pm = 0; \ + } \ + } \ + } \ +} while (0) + +/* Use macro magic to escape header hell */ +#define save_intc_info_mu(info, num) \ +do { \ + __save_intc_info_mu(info, num); \ + fixup_intc_info_mu(info, num); \ +} while (0) + + +static inline void +restore_intc_info_mu(const intc_info_mu_t *info, int num) +{ + int i; + + /* + * 1) Clear the hardware pointer + */ + READ_INTC_PTR_MU(); + if (num == -1) + return; + + /* + * 2) Write the registers + */ + for (i = 0; i < num; i++) { + if (!info[i].no_restore) { + WRITE_INTC_INFO_MU(AW(info[i].hdr)); + WRITE_INTC_INFO_MU(info[i].gpa); + WRITE_INTC_INFO_MU(info[i].gva); + NATIVE_TAGGED_LOAD_TO_MMUREG(intc_info_mu, + &info[i].data); + WRITE_INTC_INFO_MU(AW(info[i].condition)); + NATIVE_TAGGED_LOAD_TO_MMUREG(intc_info_mu, + &info[i].data_ext); + WRITE_INTC_INFO_MU(AW(info[i].mask)); + } + } +} + +static inline void +modify_intc_info_mu_data(intc_info_mu_t *info, int num) +{ + int i; + + for (i = 0; i < num; i++) { + if (unlikely(info[i].modify_data)) { + info[i].data = info[i].mod_data; + info[i].data_ext = info[i].mod_data_ext; + } + } +} + +static inline void +kvm_set_intc_info_mu_modified_data(intc_info_mu_t *info, unsigned long data, + unsigned long data_ext) +{ + info->mod_data = data; + info->mod_data_ext = data_ext; + info->modify_data = true; +} +static inline void +kvm_reset_intc_info_mu_is_updated(struct kvm_vcpu *vcpu) +{ + vcpu->arch.intc_ctxt.mu_updated = false; +} +static inline void +kvm_set_intc_info_mu_is_updated(struct kvm_vcpu *vcpu) +{ + vcpu->arch.intc_ctxt.mu_updated = true; +} +static inline bool +kvm_get_intc_info_mu_is_updated(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.intc_ctxt.mu_updated; +} + +#define READ_SH_OS_PPTB_REG_VALUE() NATIVE_GET_MMUREG(sh_os_pptb) +#define WRITE_SH_OS_PPTB_REG_VALUE(val) NATIVE_SET_MMUREG(sh_os_pptb, (val)) + +#define READ_SH_OS_VPTB_REG_VALUE() NATIVE_GET_MMUREG(sh_os_vptb) +#define WRITE_SH_OS_VPTB_REG_VALUE(val) NATIVE_SET_MMUREG(sh_os_vptb, (val)) + +#define READ_SH_OS_VAB_REG_VALUE() NATIVE_GET_MMUREG(sh_os_vab) +#define WRITE_SH_OS_VAB_REG_VALUE(val) NATIVE_SET_MMUREG(sh_os_vab, (val)) + +#define READ_SH_PID_REG_VALUE() NATIVE_GET_MMUREG(sh_pid) +#define WRITE_SH_PID_REG_VALUE(val) NATIVE_SET_MMUREG(sh_pid, (val)) + +#define READ_CR_G_W_IMASK_REG_VALUE() NATIVE_GET_MMUREG(g_w_imask_mmu_cr) +#define WRITE_CR_G_W_IMASK_REG_VALUE(val) \ + NATIVE_SET_MMUREG(g_w_imask_mmu_cr, (val)) + +#define READ_SH_MMU_CR_REG_VALUE() NATIVE_GET_MMUREG(sh_mmu_cr) +#define WRITE_SH_MMU_CR_REG_VALUE(val) NATIVE_SET_MMUREG(sh_mmu_cr, (val)) + +extern unsigned long read_VIRT_CTRL_MU_reg_value(void); +extern void write_VIRT_CTRL_MU_reg_value(unsigned long value); +extern unsigned long read_GID_reg_value(void); +extern void write_GID_reg_value(unsigned long value); +extern unsigned long read_GP_VPTB_reg_value(void); +extern void write_GP_VPTB_reg_value(unsigned long value); +extern unsigned long read_GP_PPTB_reg_value(void); +extern void write_GP_PPTB_reg_value(unsigned long value); +extern unsigned long read_SH_OS_PPTB_reg_value(void); +extern void write_SH_OS_PPTB_reg_value(unsigned long value); +extern unsigned long read_SH_OS_VPTB_reg_value(void); +extern void write_SH_OS_VPTB_reg_value(unsigned long value); +extern unsigned long read_SH_OS_VAB_reg_value(void); +extern void write_SH_OS_VAB_reg_value(unsigned long value); +extern unsigned long read_SH_PID_reg_value(void); +extern void write_SH_PID_reg_value(unsigned long value); +extern unsigned long read_SH_MMU_CR_reg_value(void); +extern void write_SH_MMU_CR_reg_value(unsigned long value); +extern unsigned long read_G_W_IMASK_MMU_CR_reg_value(void); +extern void write_G_W_IMASK_MMU_CR_reg_value(unsigned long value); + +static inline virt_ctrl_mu_t read_VIRT_CTRL_MU_reg(void) +{ + virt_ctrl_mu_t virt_ctrl; + + virt_ctrl.VIRT_CTRL_MU_reg = read_VIRT_CTRL_MU_reg_value(); + return virt_ctrl; +} +static inline void write_VIRT_CTRL_MU_reg(virt_ctrl_mu_t virt_ctrl) +{ + write_VIRT_CTRL_MU_reg_value(virt_ctrl.VIRT_CTRL_MU_reg); +} + +static inline unsigned int read_GID_reg(void) +{ + return read_GID_reg_value(); +} +static inline void write_GID_reg(unsigned int mmu_gid) +{ + write_GID_reg_value(MMU_GID(mmu_gid)); +} + +static inline mmu_reg_t read_SH_MMU_CR_reg(void) +{ + return __mmu_reg(read_SH_MMU_CR_reg_value()); +} +static inline void write_SH_MMU_CR_reg(mmu_reg_t mmu_cr) +{ + write_SH_MMU_CR_reg_value(mmu_reg_val(mmu_cr)); +} + +static inline mmu_reg_t read_G_W_IMASK_MMU_CR_reg(void) +{ + return __mmu_reg(read_G_W_IMASK_MMU_CR_reg_value()); +} +static inline void write_G_W_IMASK_MMU_CR_reg(mmu_reg_t mmu_cr_mask) +{ + write_G_W_IMASK_MMU_CR_reg_value(mmu_reg_val(mmu_cr_mask)); +} + +static inline unsigned int read_SH_PID_reg(void) +{ + return read_SH_PID_reg_value(); +} +static inline void write_SH_PID_reg(unsigned int mmu_pid) +{ + write_SH_PID_reg_value(MMU_PID(mmu_pid)); +} + +static inline e2k_addr_t read_SH_OS_PPTB_reg(void) +{ + return read_SH_OS_PPTB_reg_value(); +} +static inline void write_SH_OS_PPTB_reg(e2k_addr_t phys_addr) +{ + write_SH_OS_PPTB_reg_value(MMU_ADDR_TO_PPTB(phys_addr)); +} + +static inline e2k_addr_t read_SH_OS_VPTB_reg(void) +{ + return read_SH_OS_VPTB_reg_value(); +} +static inline void write_SH_OS_VPTB_reg(e2k_addr_t virt_addr) +{ + write_SH_OS_VPTB_reg_value(MMU_ADDR_TO_VPTB(virt_addr)); +} + +static inline e2k_addr_t read_GP_PPTB_reg(void) +{ + return read_GP_PPTB_reg_value(); +} +static inline void write_GP_PPTB_reg(e2k_addr_t phys_addr) +{ + write_GP_PPTB_reg_value(MMU_ADDR_TO_PPTB(phys_addr)); +} + +static inline e2k_addr_t read_GP_VPTB_reg(void) +{ + return read_GP_VPTB_reg_value(); +} +static inline void write_GP_VPTB_reg(e2k_addr_t virt_addr) +{ + write_GP_VPTB_reg_value(MMU_ADDR_TO_VPTB(virt_addr)); +} + +static inline e2k_addr_t read_SH_OS_VAB_reg(void) +{ + return read_SH_OS_VAB_reg_value(); +} +static inline void write_SH_OS_VAB_reg(e2k_addr_t virt_addr) +{ + write_SH_OS_VAB_reg_value(MMU_ADDR_TO_VAB(virt_addr)); +} +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_KVM_MMU_HV_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/kvm/mmu_hv_regs_types.h b/arch/e2k/include/asm/kvm/mmu_hv_regs_types.h new file mode 100644 index 0000000..36e0d78 --- /dev/null +++ b/arch/e2k/include/asm/kvm/mmu_hv_regs_types.h @@ -0,0 +1,253 @@ +/* + * asm-e2k/mmu_regs.h: E2K MMU structures & registers. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_MMU_HV_REGS_TYPES_H_ +#define _E2K_KVM_MMU_HV_REGS_TYPES_H_ + +#include +#include + +/* + * Structures of MMU registers for hardware virtualized extensions + */ + +/* MMU address to access to MMU internal registers */ + +#define _MMU_VIRT_CTRL_NO 0x40 /* MMU virtualization control */ +#define _MMU_GID_NO 0x41 /* guest machine ID */ +#define _MMU_GP_PPTB_NO 0x43 /* physical base of guest PTs */ +#define _MMU_INTC_INFO_NO 0x44 /* MMU intercept info */ +#define _MMU_INTC_PTR_NO 0x45 /* MMU intercept info pointer */ +#define _MMU_SH_OS_VPTB_NO 0x46 /* virtual base of guest shadow PTs */ +#define _MMU_SH_OS_PPTB_NO 0x47 /* physical base of guest shadow PTs */ +#define _MMU_CR_G_W_IMASK_NO 0x48 /* mask of MMU_CR bits access to */ + /* control intercepts */ +#define _MMU_SH_PID_NO 0x49 /* shadow register of process ID */ +#define _MMU_SH_MMU_CR_NO 0x4a /* shadow register of control reg. */ + +#define MMU_ADDR_VIRT_CTRL MMU_REG_NO_TO_MMU_ADDR(_MMU_VIRT_CTRL_NO) +#define MMU_ADDR_GID MMU_REG_NO_TO_MMU_ADDR(_MMU_GID_NO) +#define MMU_ADDR_GP_PPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_GP_PPTB_NO) +#define MMU_ADDR_INTC_INFO MMU_REG_NO_TO_MMU_ADDR(_MMU_INTC_INFO_NO) +#define MMU_ADDR_INTC_PTR MMU_REG_NO_TO_MMU_ADDR(_MMU_INTC_PTR_NO) +#define MMU_ADDR_SH_OS_VPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_SH_OS_VPTB_NO) +#define MMU_ADDR_SH_OS_PPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_SH_OS_PPTB_NO) +#define MMU_ADDR_CR_G_W_IMASK MMU_REG_NO_TO_MMU_ADDR(_MMU_CR_G_W_IMASK_NO) +#define MMU_ADDR_SH_PID MMU_REG_NO_TO_MMU_ADDR(_MMU_SH_PID_NO) +#define MMU_ADDR_SH_MMU_CR MMU_REG_NO_TO_MMU_ADDR(_MMU_SH_MMU_CR_NO) + +/* MMU internel register contents */ + +/* + * MMU Guest Process (machine #) ID MMU_GID + */ + +#define MMU_GID_SIZE MMU_PID_SIZE + +/* + * Kernel virtual memory context + */ +#define E2K_KERNEL_GID 0x000 /* defined by hardware */ + +#define MMU_GID(gid) MMU_PID(gid) +#define MMU_KERNEL_GID MMU_GID(E2K_KERNEL_GID) + +/* + * MMU Virtual Control register + */ + +typedef union virt_ctrl_mu { + struct { + u64 evn_c : 36; /* [35: 0] */ + u64 __resf : 28; /* [63:38] */ + }; + struct { + /* env_c: */ + u64 rr_mmu_cr : 1; /* [ 0] */ + u64 rr_pptb : 1; /* [ 1] */ + u64 rr_vptb : 1; /* [ 2] */ + u64 rr_apic_base : 1; /* [ 3] */ + u64 rr_mtrr_pat : 1; /* [ 4] */ + u64 rr_ph_pci_b : 1; /* [ 5] */ + u64 rr_dbg : 1; /* [ 6] */ + u64 rr_dbg1 : 1; /* [ 7] */ + u64 rw_mmu_cr : 1; /* [ 8] */ + u64 rw_pptb : 1; /* [ 9] */ + u64 rw_vptb : 1; /* [10] */ + u64 rw_apic_base : 1; /* [11] */ + u64 rw_mtrr_pat : 1; /* [12] */ + u64 rw_ph_pci_b : 1; /* [13] */ + u64 rw_dbg : 1; /* [14] */ + u64 rw_dbg1 : 1; /* [15] */ + u64 pma : 1; /* [16] */ + u64 fl_dc : 1; /* [17] */ + u64 fl_dcl : 1; /* [18] */ + u64 fl_ic : 1; /* [19] */ + u64 fl_icl_u : 1; /* [20] */ + u64 fl_icl_p : 1; /* [21] */ + u64 fl_tlb : 1; /* [22] */ + u64 fl_tlbpg : 1; /* [23] */ + u64 fl_tlb2pg : 1; /* [24] */ + u64 prb_entry : 1; /* [25] */ + u64 evn_c_res : 10; /* [35:26] */ + /* other fields */ + u64 gp_pt_en : 1; /* [36] */ + u64 sh_pt_en : 1; /* [37] */ + u64 __resb : 26; /* [63:38] */ + }; + u64 word; /* as entire register */ +} virt_ctrl_mu_t; +#define VIRT_CTRL_MU_evn_c evn_c /* events mask to intercept */ +#define VIRT_CTRL_MU_rr_mmu_cr rr_mmu_cr +#define VIRT_CTRL_MU_rr_u_pptb rr_pptb +#define VIRT_CTRL_MU_rr_u_vptb rr_vptb +#define VIRT_CTRL_MU_rr_apic_base rr_apic_base +#define VIRT_CTRL_MU_rr_mtrr_pat rr_mtrr_pat +#define VIRT_CTRL_MU_rr_ph_pci_b rr_ph_pci_b +#define VIRT_CTRL_MU_rr_dbg rr_dbg +#define VIRT_CTRL_MU_rr_dbg1 rr_dbg1 +#define VIRT_CTRL_MU_rw_mmu_cr rw_mmu_cr +#define VIRT_CTRL_MU_rw_u_pptb rw_pptb +#define VIRT_CTRL_MU_rw_u_vptb rw_vptb +#define VIRT_CTRL_MU_rw_apic_base rw_apic_base +#define VIRT_CTRL_MU_rw_mtrr_pat rw_mtrr_pat +#define VIRT_CTRL_MU_rw_ph_pci_b rw_ph_pci_b +#define VIRT_CTRL_MU_rw_dbg rw_dbg +#define VIRT_CTRL_MU_rw_dbg1 rw_dbg1 +#define VIRT_CTRL_MU_pma pma +#define VIRT_CTRL_MU_fl_dc fl_dc +#define VIRT_CTRL_MU_fl_dcl fl_dcl +#define VIRT_CTRL_MU_fl_ic fl_ic +#define VIRT_CTRL_MU_fl_icl_u fl_icl_u +#define VIRT_CTRL_MU_fl_icl_p fl_icl_p +#define VIRT_CTRL_MU_fl_tlb fl_tlb +#define VIRT_CTRL_MU_fl_tlbpg fl_tlbpg +#define VIRT_CTRL_MU_fl_tlb2pg fl_tlb2pg +#define VIRT_CTRL_MU_prb_entry prb_entry + /* GPA -> PA translation enable */ +#define VIRT_CTRL_MU_gp_pt_en gp_pt_en + /* shadow Page Tables enable */ +#define VIRT_CTRL_MU_sh_pt_en sh_pt_en +#define VIRT_CTRL_MU_reg word /* [63: 0] - entire register */ + +/* Bits mask of VIRT_CTRL_MU fields and flags */ +#define VIRT_CTRL_MU_ENV_C_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_evn_c = -1, }.word) +#define VIRT_CTRL_MU_RR_MMU_CR_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_mmu_cr = 1, }.word) +#define VIRT_CTRL_MU_RR_U_PPTB_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_pptb = 1, }.word) +#define VIRT_CTRL_MU_RR_U_VPTB_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_vptb = 1, }.word) +#define VIRT_CTRL_MU_RR_APIC_BASE_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_apic_base = 1, }.word) +#define VIRT_CTRL_MU_RR_MTRR_PAT_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_mtrr_pat = 1, }.word) +#define VIRT_CTRL_MU_RR_PH_PCI_B_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_ph_pci_b = 1, }.word) +#define VIRT_CTRL_MU_RR_DBG_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_dbg = 1, }.word) +#define VIRT_CTRL_MU_RR_DBG1_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rr_dbg1 = 1, }.word) +#define VIRT_CTRL_MU_RW_MMU_CR_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_mmu_cr = 1, }.word) +#define VIRT_CTRL_MU_RW_U_PPTB_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_pptb = 1, }.word) +#define VIRT_CTRL_MU_RW_U_VPTB_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_vptb = 1, }.word) +#define VIRT_CTRL_MU_RW_APIC_BASE_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_apic_base = 1, }.word) +#define VIRT_CTRL_MU_RW_MTRR_PAT_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_mtrr_pat = 1, }.word) +#define VIRT_CTRL_MU_RW_PH_PCI_B_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_ph_pci_b = 1, }.word) +#define VIRT_CTRL_MU_RW_DBG_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_dbg = 1, }.word) +#define VIRT_CTRL_MU_RW_DBG1_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_rw_dbg1 = 1, }.word) +#define VIRT_CTRL_MU_PMA_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_pma = 1, }.word) +#define VIRT_CTRL_MU_FL_DC_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_dc = 1, }.word) +#define VIRT_CTRL_MU_FL_DCL_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_dcl = 1, }.word) +#define VIRT_CTRL_MU_FL_IC_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_ic = 1, }.word) +#define VIRT_CTRL_MU_FL_ICL_U_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_icl_u = 1, }.word) +#define VIRT_CTRL_MU_FL_ICL_P_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_icl_p = 1, }.word) +#define VIRT_CTRL_MU_FL_TLB_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_tlb = 1, }.word) +#define VIRT_CTRL_MU_FL_TLBPG_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_tlbpg = 1, }.word) +#define VIRT_CTRL_MU_FL_TLB2PG_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_fl_tlb2pg = 1, }.word) +#define VIRT_CTRL_MU_PRB_ENTRY_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_prb_entry = 1, }.word) +#define VIRT_CTRL_MU_GP_PT_EN_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_gp_pt_en = 1, }.word) +#define VIRT_CTRL_MU_SH_PT_EN_MASK \ + ((virt_ctrl_mu_t) { .VIRT_CTRL_MU_sh_pt_en = 1, }.word) + +typedef union { + struct { + u64 event_code : 8; + u64 guest_pt_lev_fin : 1; + u64 guest_pt_lev : 3; + u64 ignore_wr_rights : 1; + u64 __reserved : 51; + }; + u64 word; +} intc_info_mu_hdr_t; + +#define intc_mu_info_lo_get_event_code(x) ((x) & 0xff) + +/* Possible values for `INTC_INFO_MU[2 * j].event_code' */ +typedef enum e2k_int_info_mu_event_code { + IME_FORCED = 0, + IME_FORCED_GVA = 1, + IME_SHADOW_DATA = 2, + IME_GPA_DATA = 3, + IME_GPA_INSTR = 4, + IME_GPA_AINSTR = 5, + IME_RESERVED_6 = 6, + IME_RESERVED_7 = 7, + IME_MAS_IOADDR = 8, + IME_READ_MU = 9, + IME_WRITE_MU = 10, + IME_CACHE_FLUSH = 11, + IME_CACHE_LINE_FLUSH = 12, + IME_ICACHE_FLUSH = 13, + IME_ICACHE_LINE_FLUSH_USER = 14, + IME_ICACHE_LINE_FLUSH_SYSTEM = 15, + IME_TLB_FLUSH = 16, + IME_TLB_PAGE_FLUSH_LAST = 17, + IME_TLB_PAGE_FLUSH_UPPER = 18, + IME_TLB_ENTRY_PROBE = 19, + MU_INTC_EVENTS_MAX +} intc_info_mu_event_code_t; + +typedef struct { + intc_info_mu_hdr_t hdr; + unsigned long gpa; + unsigned long gva; + unsigned long data; + tc_cond_t condition; + unsigned long data_ext; + tc_mask_t mask; + bool no_restore; + bool modify_data; + unsigned long mod_data; + unsigned long mod_data_ext; +} intc_info_mu_t; + +#define INTC_INFO_MU_MAX 77 +#define INTC_PTR_MU_SIZE 7 +#define INTC_INFO_MU_ITEM_SIZE 7 +#define INTC_INFO_MU_ITEM_MAX (INTC_INFO_MU_MAX / INTC_INFO_MU_ITEM_SIZE) + +#endif /* _E2K_KVM_MMU_HV_REGS_TYPES_H_ */ diff --git a/arch/e2k/include/asm/kvm/mmu_regs_access.h b/arch/e2k/include/asm/kvm/mmu_regs_access.h new file mode 100644 index 0000000..a160fae --- /dev/null +++ b/arch/e2k/include/asm/kvm/mmu_regs_access.h @@ -0,0 +1,832 @@ +/* + * E2K MMU registers access virtualization for KVM guest + * + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_MMU_REGS_ACCESS_H_ +#define _E2K_KVM_MMU_REGS_ACCESS_H_ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_MODE +#undef DebugKVM +#define DEBUG_KVM_MODE 0 /* kernel virtual machine debugging */ +#define DebugKVM(fmt, args...) \ +({ \ + if (DEBUG_KVM_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +/* + * Basic functions accessing MMU on guest. + */ +#define GUEST_MMU_REGS_BASE (offsetof(kvm_vcpu_state_t, mmu) + \ + offsetof(kvm_mmu_state_t, regs)) +#define GUEST_MMU_REG(reg_no) (GUEST_MMU_REGS_BASE + \ + ((reg_no) * sizeof(mmu_reg_t))) +#define GUEST_GET_MMU_REG(reg_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_MMU_REG(reg_no)) +#define GUEST_SET_MMU_REG(reg_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_MMU_REG(reg_no), value) +#define GUEST_TRAP_CELLAR_BASE (offsetof(kvm_vcpu_state_t, mmu) + \ + offsetof(kvm_mmu_state_t, tcellar)) +#define GUEST_TC_ENTRY(tc_no) (GUEST_TRAP_CELLAR_BASE + \ + ((tc_no) * sizeof(trap_cellar_t))) +#define GUEST_GET_TC_ADDRESS(tc_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_TC_ENTRY(tc_no) + \ + offsetof(trap_cellar_t, address)) +#define GUEST_GET_TC_CONDITION(tc_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_TC_ENTRY(tc_no) + \ + offsetof(trap_cellar_t, condition)) +#define GUEST_GET_TC_DATA(tc_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_TC_ENTRY(tc_no) + \ + offsetof(trap_cellar_t, data)) +#define GUEST_MMU_DEBUG_REGS_BASE \ + (offsetof(kvm_vcpu_state_t, mmu) + \ + offsetof(kvm_mmu_state_t, debug_regs)) +#define GUEST_MMU_DEBUG_REG(reg_no) \ + (GUEST_MMU_DEBUG_REGS_BASE + \ + ((reg_no) * sizeof(mmu_reg_t))) +#define GUEST_GET_MMU_DEBUG_REG(reg_no) \ + E2K_LOAD_GUEST_VCPU_STATE_D(GUEST_MMU_DEBUG_REG(reg_no)) +#define GUEST_SET_MMU_DEBUG_REG(reg_no, value) \ + E2K_STORE_GUEST_VCPU_STATE_D(GUEST_MMU_DEBUG_REG(reg_no), value) + +/* + * Write/read MMU register + */ +static inline void KVM_WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + unsigned long mmu_reg_no = MMU_REG_NO_FROM_MMU_ADDR(mmu_addr); + + GUEST_SET_MMU_REG(mmu_reg_no, mmu_reg); + if (IS_HV_GM()) + NATIVE_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t KVM_READ_MMU_REG(mmu_addr_t mmu_addr) +{ + unsigned long mmu_reg_no = MMU_REG_NO_FROM_MMU_ADDR(mmu_addr); + + if (likely(IS_HV_GM())) { + return (mmu_reg_t)NATIVE_READ_MMU_REG(mmu_addr); + } else { + return (mmu_reg_t)GUEST_GET_MMU_REG(mmu_reg_no); + } +} +static inline void BOOT_KVM_WRITE_MMU_REG(mmu_addr_t mmu_addr, + mmu_reg_t mmu_reg) +{ + unsigned long mmu_reg_no = MMU_REG_NO_FROM_MMU_ADDR(mmu_addr); + + GUEST_SET_MMU_REG(mmu_reg_no, mmu_reg); + if (BOOT_IS_HV_GM()) + NATIVE_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t BOOT_KVM_READ_MMU_REG(mmu_addr_t mmu_addr) +{ + unsigned long mmu_reg_no = MMU_REG_NO_FROM_MMU_ADDR(mmu_addr); + + if (likely(BOOT_IS_HV_GM())) { + return (mmu_reg_t)NATIVE_READ_MMU_REG(mmu_addr); + } else { + return (mmu_reg_t)GUEST_GET_MMU_REG(mmu_reg_no); + } +} + +static inline void KVM_WRITE_MMU_OS_PPTB_REG(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_REG(MMU_ADDR_OS_PPTB, reg_val); +} +static inline unsigned long KVM_READ_MMU_OS_PPTB_REG(void) +{ + return mmu_reg_val(KVM_READ_MMU_REG(MMU_ADDR_OS_PPTB)); +} +static inline void KVM_WRITE_MMU_OS_VPTB_REG(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_REG(MMU_ADDR_OS_VPTB, reg_val); +} +static inline unsigned long KVM_READ_MMU_OS_VPTB_REG(void) +{ + return mmu_reg_val(KVM_READ_MMU_REG(MMU_ADDR_OS_VPTB)); +} +static inline void KVM_WRITE_MMU_OS_VAB_REG(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_REG(MMU_ADDR_OS_VAB, reg_val); +} +static inline unsigned long KVM_READ_MMU_OS_VAB_REG(void) +{ + return mmu_reg_val(KVM_READ_MMU_REG(MMU_ADDR_OS_VAB)); +} +static inline void KVM_WRITE_MMU_PID_REG(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_REG(MMU_ADDR_PID, reg_val); + if (IS_HV_GM()) { + /* FIXME: guest should fully control own PTs including */ + /* all hardware MMU registers, but it is not so now, */ + /* for example PT roots and context registers are controled */ + /* by hypervisor as for paravirtualized kernels */ + native_flush_TLB_all(); + } +} +static inline unsigned long KVM_READ_MMU_PID_REG(void) +{ + return mmu_reg_val(KVM_READ_MMU_REG(MMU_ADDR_PID)); +} + +static inline void BOOT_KVM_WRITE_MMU_OS_PPTB_REG(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_REG(MMU_ADDR_OS_PPTB, reg_val); +} +static inline unsigned long BOOT_KVM_READ_MMU_OS_PPTB_REG(void) +{ + return mmu_reg_val(BOOT_KVM_READ_MMU_REG(MMU_ADDR_OS_PPTB)); +} +static inline void BOOT_KVM_WRITE_MMU_OS_VPTB_REG(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_REG(MMU_ADDR_OS_VPTB, reg_val); +} +static inline unsigned long BOOT_KVM_READ_MMU_OS_VPTB_REG(void) +{ + return mmu_reg_val(BOOT_KVM_READ_MMU_REG(MMU_ADDR_OS_VPTB)); +} +static inline void BOOT_KVM_WRITE_MMU_OS_VAB_REG(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_REG(MMU_ADDR_OS_VAB, reg_val); +} +static inline unsigned long BOOT_KVM_READ_MMU_OS_VAB_REG(void) +{ + return mmu_reg_val(BOOT_KVM_READ_MMU_REG(MMU_ADDR_OS_VAB)); +} +static inline void BOOT_KVM_WRITE_MMU_PID_REG(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_REG(MMU_ADDR_PID, reg_val); + if (BOOT_IS_HV_GM()) { + /* FIXME: guest should fully control own PTs including */ + /* all hardware MMU registers, but it is not so now, */ + /* for example PT roots and context registers are controled */ + /* by hypervisor as for paravirtualized kernels */ + NATIVE_FLUSH_TLB_ALL(_flush_op_tlb_all); + } +} +static inline unsigned long BOOT_KVM_READ_MMU_PID_REG(void) +{ + return mmu_reg_val(BOOT_KVM_READ_MMU_REG(MMU_ADDR_PID)); +} + +static inline unsigned int KVM_READ_MMU_TRAP_COUNT(void) +{ + return mmu_reg_val(KVM_READ_MMU_REG(MMU_ADDR_TRAP_COUNT)); +} + +static inline void KVM_RESET_MMU_TRAP_COUNT(void) +{ + KVM_WRITE_MMU_REG(MMU_ADDR_TRAP_COUNT, 0); +} + +static inline void BOOT_KVM_RESET_MMU_TRAP_COUNT(void) +{ + BOOT_KVM_WRITE_MMU_REG(MMU_ADDR_TRAP_COUNT, 0); +} + +static inline unsigned long KVM_READ_TC_ADDRESS(int tc_no) +{ + return GUEST_GET_TC_ADDRESS(tc_no); +} + +static inline tc_cond_t KVM_READ_TC_CONDITION(int tc_no) +{ + return (tc_cond_t)(u64)GUEST_GET_TC_CONDITION(tc_no); +} + +static inline unsigned long KVM_READ_TC_DATA(int tc_no) +{ + /* FIXME: data has tag, so here should be load tagged value */ + return GUEST_GET_TC_DATA(tc_no); +} + +/* + * Write/read Data TLB register + */ + +static inline void KVM_WRITE_DTLB_REG(tlb_addr_t tlb_addr, mmu_reg_t mmu_reg) +{ + if (IS_HV_GM()) { + NATIVE_WRITE_DTLB_REG(tlb_addr, mmu_reg); + } else { + panic("KVM_WRITE_DTLB_REG() is not yet implemented\n"); + } +} + +static inline mmu_reg_t KVM_READ_DTLB_REG(tlb_addr_t tlb_addr) +{ + if (IS_HV_GM()) { + return NATIVE_READ_DTLB_REG(tlb_addr); + } else { + return kvm_read_dtlb_reg(tlb_addr); + } +} + +/* + * Flush TLB page/entry + */ + +static inline void +KVM_FLUSH_TLB_ENTRY(flush_op_t flush_op, flush_addr_t flush_addr) +{ + if (IS_HV_GM()) { + /* FIXME: guest should fully control own PTs including */ + /* all hardware MMU registers, but it is not so now, */ + /* for example PT roots and context registers are controled */ + /* by hypervisor as for paravirtualized kernels */ + native_flush_TLB_all(); + } else if (IS_ENABLED(CONFIG_KVM_PARAVIRT_TLB_FLUSH)) { + HYPERVISOR_flush_tlb_range(flush_addr_get_va(flush_addr), + flush_addr_get_va(flush_addr)); + } +} + +/* + * Flush DCACHE line + */ + +static inline void +KVM_FLUSH_DCACHE_LINE(e2k_addr_t virt_addr) +{ + if (IS_HV_GM()) { + /* + * Prevent putting privilidged instruction strd from + * NATIVE_FLUSH_DCACHE_LINE under predicate IS_HV_GM(). + * Even with false value of predicate it can cause priv. + * action exception in guest kernel. + */ + E2K_CMD_SEPARATOR; + NATIVE_FLUSH_DCACHE_LINE(virt_addr); + } else { + kvm_flush_dcache_line(virt_addr); + } +} + +/* + * Clear DCACHE L1 set + */ +static inline void +KVM_CLEAR_DCACHE_L1_SET(e2k_addr_t virt_addr, unsigned long set) +{ + if (IS_HV_GM()) { + NATIVE_CLEAR_DCACHE_L1_SET(virt_addr, set); + } else { + kvm_clear_dcache_l1_set(virt_addr, set); + } +} + +/* + * Write/read DCACHE L2 registers + */ +static inline void +KVM_WRITE_DCACHE_L2_REG(unsigned long reg_val, int reg_num, int bank_num) +{ + if (IS_HV_GM()) { + NATIVE_WRITE_L2_REG(reg_val, reg_num, bank_num); + } else { + kvm_write_dcache_l2_reg(reg_val, reg_num, bank_num); + } +} +static inline unsigned long +KVM_READ_DCACHE_L2_REG(int reg_num, int bank_num) +{ + if (IS_HV_GM()) { + return NATIVE_READ_L2_REG(reg_num, bank_num); + } else { + return kvm_read_dcache_l2_reg(reg_num, bank_num); + } +} + +/* + * Flush ICACHE line + */ + +static inline void +KVM_FLUSH_ICACHE_LINE(flush_op_t flush_op, flush_addr_t flush_addr) +{ + if (IS_HV_GM()) { + NATIVE_FLUSH_ICACHE_LINE(flush_op, flush_addr); + } else { + /* any switch to guest kernel now flush all TLB and caches */ + /* so precise flushing can be not implemented */ + pr_debug("KVM_FLUSH_ICACHE_LINE() is not yet implemented\n"); + } +} + +/* + * Flush and invalidate or write back CACHE(s) (invalidate all caches + * of the processor) + */ + +static inline void +KVM_FLUSH_CACHE_L12(flush_op_t flush_op) +{ + if (IS_HV_GM()) { + native_write_back_CACHE_L12(); + } else { + panic("KVM_FLUSH_CACHE_L12() is not yet implemented\n"); + } +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ + +static inline void +KVM_FLUSH_TLB_ALL(flush_op_t flush_op) +{ + if (IS_HV_GM()) { + native_flush_TLB_all(); + } else if (IS_ENABLED(CONFIG_KVM_PARAVIRT_TLB_FLUSH)) { + HYPERVISOR_flush_tlb_range(0, E2K_VA_SIZE); + } +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ + +static inline void +KVM_FLUSH_ICACHE_ALL(flush_op_t flush_op) +{ + if (IS_HV_GM()) { + native_flush_ICACHE_all(); + } else { + /* panic("KVM_FLUSH_ICACHE_ALL() is not yet implemented\n"); */ + } +} + +/* + * Get Entry probe for virtual address + */ + +static inline probe_entry_t +KVM_ENTRY_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + if (IS_HV_GM()) { + return NATIVE_ENTRY_PROBE_MMU_OP(virt_addr); + } else { + return kvm_mmu_entry_probe(virt_addr); + } +} + +/* + * Get physical address for virtual address + */ + +static inline probe_entry_t +KVM_ADDRESS_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + if (IS_HV_GM()) { + return NATIVE_ADDRESS_PROBE_MMU_OP(virt_addr); + } else { + return kvm_mmu_address_probe(virt_addr); + } +} + +/* + * Read CLW register + */ + +static inline clw_reg_t +KVM_READ_CLW_REG(clw_addr_t clw_addr) +{ + panic("KVM_READ_CLW_REG() is not yet implemented\n"); + return -1; +} + +/* + * KVM MMU DEBUG registers access + */ +static inline mmu_reg_t +KVM_READ_MMU_DEBUG_REG_VALUE(int reg_no) +{ + return GUEST_GET_MMU_DEBUG_REG(reg_no); +} +static inline void +KVM_WRITE_MMU_DEBUG_REG_VALUE(int reg_no, mmu_reg_t value) +{ + GUEST_SET_MMU_DEBUG_REG(reg_no, value); +} +static inline mmu_reg_t +KVM_READ_DDBAR0_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR0_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDBAR1_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR1_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDBAR2_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR2_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDBAR3_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR3_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDBCR_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDBCR_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDBSR_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDBSR_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDMAR0_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDMAR0_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDMAR1_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDMAR1_REG_NO); +} +static inline mmu_reg_t +KVM_READ_DDMCR_REG_VALUE(void) +{ + return KVM_READ_MMU_DEBUG_REG_VALUE(MMU_DDMCR_REG_NO); +} +static inline void +KVM_WRITE_DDBAR0_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR0_REG_NO, value); +} +static inline void +KVM_WRITE_DDBAR1_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR1_REG_NO, value); +} +static inline void +KVM_WRITE_DDBAR2_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR2_REG_NO, value); +} +static inline void +KVM_WRITE_DDBAR3_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR3_REG_NO, value); +} +static inline void +KVM_WRITE_DDBCR_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBCR_REG_NO, value); +} +static inline void +KVM_WRITE_DDBSR_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBSR_REG_NO, value); +} +static inline void +KVM_WRITE_DDMAR0_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDMAR0_REG_NO, value); +} +static inline void +KVM_WRITE_DDMAR1_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDMAR1_REG_NO, value); +} +static inline void +KVM_WRITE_DDMCR_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDMCR_REG_NO, value); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is pure kvm kernel without paravirtualization based on pv_ops */ + +static inline void WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + KVM_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t READ_MMU_REG(mmu_addr_t mmu_addr) +{ + return (mmu_reg_t)KVM_READ_MMU_REG(mmu_addr); +} + +#define BOOT_WRITE_MMU_REG(addr_val, reg_val) \ + BOOT_KVM_WRITE_MMU_REG(addr_val, reg_val) +#define BOOT_READ_MMU_REG(addr_val) \ + BOOT_KVM_READ_MMU_REG(addr_val) + +static inline void WRITE_MMU_OS_PPTB(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_OS_PPTB_REG(reg_val); +} +static inline unsigned long READ_MMU_OS_PPTB(void) +{ + return KVM_READ_MMU_OS_PPTB_REG(); +} +static inline void WRITE_MMU_OS_VPTB(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_OS_VPTB_REG(reg_val); +} +static inline unsigned long READ_MMU_OS_VPTB(void) +{ + return KVM_READ_MMU_OS_VPTB_REG(); +} +static inline void WRITE_MMU_OS_VAB(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_OS_VAB_REG(reg_val); +} +static inline unsigned long READ_MMU_OS_VAB(void) +{ + return KVM_READ_MMU_OS_VAB_REG(); +} +static inline void WRITE_MMU_PID(mmu_reg_t reg_val) +{ + KVM_WRITE_MMU_PID_REG(reg_val); +} +static inline unsigned long READ_MMU_PID(void) +{ + return KVM_READ_MMU_PID_REG(); +} + +static inline void BOOT_WRITE_MMU_OS_PPTB(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_OS_PPTB_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_OS_PPTB(void) +{ + return BOOT_KVM_READ_MMU_OS_PPTB_REG(); +} +static inline void BOOT_WRITE_MMU_OS_VPTB(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_OS_VPTB_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_OS_VPTB(void) +{ + return BOOT_KVM_READ_MMU_OS_VPTB_REG(); +} +static inline void BOOT_WRITE_MMU_OS_VAB(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_OS_VAB_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_OS_VAB(void) +{ + return BOOT_KVM_READ_MMU_OS_VAB_REG(); +} +static inline void BOOT_WRITE_MMU_PID(mmu_reg_t reg_val) +{ + BOOT_KVM_WRITE_MMU_PID_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_PID(void) +{ + return BOOT_KVM_READ_MMU_PID_REG(); +} + +/* + * Write/read Data TLB register + */ + +static inline void WRITE_DTLB_REG(tlb_addr_t tlb_addr, mmu_reg_t mmu_reg) +{ + KVM_WRITE_DTLB_REG(tlb_addr, mmu_reg); +} + +static inline mmu_reg_t READ_DTLB_REG(tlb_addr_t tlb_addr) +{ + return KVM_READ_DTLB_REG(tlb_addr); +} + +/* + * Flush TLB page/entry + */ + +static inline void +FLUSH_TLB_ENTRY(flush_op_t flush_op, flush_addr_t flush_addr) +{ + KVM_FLUSH_TLB_ENTRY(flush_op, flush_addr); +} + +/* + * Flush DCACHE line + */ + +static inline void FLUSH_DCACHE_LINE(e2k_addr_t virt_addr) +{ + KVM_FLUSH_DCACHE_LINE(virt_addr); +} +static inline void FLUSH_DCACHE_LINE_OFFSET(e2k_addr_t virt_addr, size_t offset) +{ + KVM_FLUSH_DCACHE_LINE(virt_addr + offset); +} + + +/* + * Clear DCACHE L1 set + */ +static inline void +CLEAR_DCACHE_L1_SET(e2k_addr_t virt_addr, unsigned long set) +{ + KVM_CLEAR_DCACHE_L1_SET(virt_addr, set); +} + +/* + * Write/read DCACHE L2 registers + */ +static inline void +WRITE_L2_REG(unsigned long reg_val, int reg_num, int bank_num) +{ + KVM_WRITE_DCACHE_L2_REG(reg_val, reg_num, bank_num); +} +static inline unsigned long +READ_L2_REG(int reg_num, int bank_num) +{ + return KVM_READ_DCACHE_L2_REG(reg_num, bank_num); +} + +/* + * Flush ICACHE line + */ + +static inline void +FLUSH_ICACHE_LINE(flush_op_t flush_op, flush_addr_t flush_addr) +{ + KVM_FLUSH_ICACHE_LINE(flush_op, flush_addr); +} + +/* + * Flush and invalidate or write back CACHE(s) (invalidate all caches + * of the processor) + */ + +static inline void +FLUSH_CACHE_L12(flush_op_t flush_op) +{ + KVM_FLUSH_CACHE_L12(flush_op); +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ + +static inline void +FLUSH_TLB_ALL(flush_op_t flush_op) +{ + KVM_FLUSH_TLB_ALL(flush_op); +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ + +static inline void +FLUSH_ICACHE_ALL(flush_op_t flush_op) +{ + KVM_FLUSH_ICACHE_ALL(flush_op); +} + +/* + * Get Entry probe for virtual address + */ + +static inline probe_entry_t +ENTRY_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return KVM_ENTRY_PROBE_MMU_OP(virt_addr); +} + +/* + * Get physical address for virtual address + */ + +static inline probe_entry_t +ADDRESS_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return KVM_ADDRESS_PROBE_MMU_OP(virt_addr); +} + +/* + * Read CLW register + */ + +static inline clw_reg_t +READ_CLW_REG(clw_addr_t clw_addr) +{ + return KVM_READ_CLW_REG(clw_addr); +} + +/* + * KVM MMU DEBUG registers access + */ +static inline mmu_reg_t +READ_DDBAR0_REG_VALUE(void) +{ + return KVM_READ_DDBAR0_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBAR1_REG_VALUE(void) +{ + return KVM_READ_DDBAR1_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBAR2_REG_VALUE(void) +{ + return KVM_READ_DDBAR2_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBAR3_REG_VALUE(void) +{ + return KVM_READ_DDBAR3_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBCR_REG_VALUE(void) +{ + return KVM_READ_DDBCR_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBSR_REG_VALUE(void) +{ + return KVM_READ_DDBSR_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDMAR0_REG_VALUE(void) +{ + return KVM_READ_DDMAR0_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDMAR1_REG_VALUE(void) +{ + return KVM_READ_DDMAR1_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDMCR_REG_VALUE(void) +{ + return KVM_READ_DDMCR_REG_VALUE(); +} +static inline void +WRITE_DDBAR0_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDBAR0_REG_VALUE(value); +} +static inline void +WRITE_DDBAR1_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDBAR1_REG_VALUE(value); +} +static inline void +WRITE_DDBAR2_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDBAR2_REG_VALUE(value); +} +static inline void +WRITE_DDBAR3_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDBAR3_REG_VALUE(value); +} +static inline void +WRITE_DDBCR_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDBCR_REG_VALUE(value); +} +static inline void +WRITE_DDBSR_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDBSR_REG_VALUE(value); +} +static inline void +WRITE_DDMAR0_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDMAR0_REG_VALUE(value); +} +static inline void +WRITE_DDMAR1_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDMAR1_REG_VALUE(value); +} +static inline void +WRITE_DDMCR_REG_VALUE(mmu_reg_t value) +{ + KVM_WRITE_DDMCR_REG_VALUE(value); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_KVM_MMU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/kvm/nid.h b/arch/e2k/include/asm/kvm/nid.h new file mode 100644 index 0000000..9b9a880 --- /dev/null +++ b/arch/e2k/include/asm/kvm/nid.h @@ -0,0 +1,89 @@ +#ifndef _ASM_E2K_KVM_NID_H +#define _ASM_E2K_KVM_NID_H + +/* + * Numeric identifier (nid) allocator + */ + +#include +#include +#include +#include + +typedef struct kvm_nidmap { + atomic_t nr_free; /* number of free elements */ + void *page; +} kvm_nidmap_t; + +struct kvm_nid_table { + raw_spinlock_t nidmap_lock; + struct kmem_cache *nid_cachep; + char nid_cache_name[32]; + int nidmap_entries; + int nid_hash_bits; + int nid_hash_size; + kvm_nidmap_t *nidmap; + struct hlist_head *nid_hash; + int nid_max_limit; + int reserved_nids; + int last_nid; + unsigned int nidhash_shift; +}; + +#define NID_HASH_SIZE(NID_HASH_BITS) (1 << (NID_HASH_BITS)) + +typedef struct kvm_nid { + int nr; + struct hlist_node nid_chain; +} kvm_nid_t; + +#define nid_hashfn(nr, NID_HASH_BITS) \ + hash_long((unsigned long)(nr), NID_HASH_BITS) + +extern int kvm_alloc_nid(struct kvm_nid_table *nid_table, kvm_nid_t *nid); +extern void kvm_do_free_nid(kvm_nid_t *nid, struct kvm_nid_table *nid_table); +extern void kvm_free_nid(kvm_nid_t *nid, struct kvm_nid_table *nid_table); +extern int kvm_nidmap_init(struct kvm_nid_table *nid_table, + int nid_max_limit, int reserved_nids, int last_nid); +extern void kvm_nidmap_destroy(struct kvm_nid_table *nid_table); + +static inline kvm_nid_t * +kvm_find_nid(struct kvm_nid_table *nid_table, int nid_nr, int hash_index) +{ + kvm_nid_t *nid; + unsigned long flags; + + raw_spin_lock_irqsave(&nid_table->nidmap_lock, flags); + hlist_for_each_entry(nid, + &(nid_table->nid_hash[hash_index]), + nid_chain) { + if (nid->nr == nid_nr) { + raw_spin_unlock_irqrestore(&nid_table->nidmap_lock, + flags); + return nid; + } + } + raw_spin_unlock_irqrestore(&nid_table->nidmap_lock, flags); + return NULL; +} + +#define for_each_guest_nid_node(node, entry, next, nid_table, \ + nid_hlist_member) \ + for ((entry) = 0; (entry) < (nid_table)->nid_hash_size; (entry)++) \ + hlist_for_each_entry_safe(node, next, \ + &((nid_table)->nid_hash[entry]), \ + nid_hlist_member) +#define nid_table_lock(nid_table) \ + raw_spin_lock(&(nid_table)->nidmap_lock) +#define nid_table_unlock(nid_table) \ + raw_spin_unlock(&(nid_table)->nidmap_lock) +#define nid_table_lock_irq(nid_table) \ + raw_spin_lock_irq(&(nid_table)->nidmap_lock) +#define nid_table_unlock_irq(nid_table) \ + raw_spin_unlock_irq(&(nid_table)->nidmap_lock) +#define nid_table_lock_irqsave(nid_table, flags) \ + raw_spin_lock_irqsave(&(nid_table)->nidmap_lock, flags) +#define nid_table_unlock_irqrestore(nid_table, flags) \ + raw_spin_unlock_irqrestore(&(nid_table)->nidmap_lock, flags) + +#endif /* _ASM_E2K_KVM_NID_H */ diff --git a/arch/e2k/include/asm/kvm/page.h b/arch/e2k/include/asm/kvm/page.h new file mode 100644 index 0000000..b76a1e5 --- /dev/null +++ b/arch/e2k/include/asm/kvm/page.h @@ -0,0 +1,60 @@ +/* + * + * Copyright 2016 MCST, Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_KVM_PAGE_H +#define _ASM_E2K_KVM_PAGE_H + +#ifdef __KERNEL__ + +#include + +#if defined(CONFIG_VIRTUALIZATION) +/* + * it can be: + * paravirtualized host/guest kernel + * native host kernel with virtualization support + * native guest kernel + * Shift up kernel virtual space and reserve area + * from 0x0000200000000000 + * to 0x0000400000000000 + * for guest kernel virtual space and it will be top of user virtual space + */ +/* #define NATIVE_PAGE_OFFSET 0x0000d00000000000 */ +#define HOST_PAGE_OFFSET NATIVE_PAGE_OFFSET /* 0x0000d00000000000 */ +#define GUEST_PAGE_OFFSET 0x0000200000000000 /* start and */ +#define GUEST_KERNEL_MEM_END 0x0000400000000000 /* end of guest */ + /* kernel virtual */ + /* space */ +#ifndef CONFIG_KVM_GUEST_KERNEL +/* it is host kernel with virtualization support or */ +/* paravirtualized host/guest kernel */ +#define __guest_pa(x) ((e2k_addr_t)(x) - GUEST_PAGE_OFFSET) +#define __guest_va(x) ((void *)((e2k_addr_t) (x) + GUEST_PAGE_OFFSET)) +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* CONFIG_VIRTUALIZATION */ + +#if !defined(CONFIG_VIRTUALIZATION) +/* it is native kernel without any virtualization */ + +#define guest_user_address_to_pva(task, addr) (-1) /* none guests */ + +#elif !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native host kernel with virtualization support */ +#define PAGE_OFFSET HOST_PAGE_OFFSET +#define BOOT_PAGE_OFFSET PAGE_OFFSET +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else + #error "Unknown virtualization type */ +#endif /* ! CONFIG_VIRTUALIZATION */ + +#endif /* !(__KERNEL__) */ + +#endif /* ! _ASM_E2K_KVM_PAGE_H */ diff --git a/arch/e2k/include/asm/kvm/page_track.h b/arch/e2k/include/asm/kvm/page_track.h new file mode 100644 index 0000000..614dd9d --- /dev/null +++ b/arch/e2k/include/asm/kvm/page_track.h @@ -0,0 +1,85 @@ +#ifndef _ASM_E2K_KVM_PAGE_TRACK_H +#define _ASM_E2K_KVM_PAGE_TRACK_H + +#ifdef CONFIG_KVM_HV_MMU +enum kvm_page_track_mode { + KVM_PAGE_TRACK_WRITE, + KVM_PAGE_TRACK_MAX, +}; + +/* + * The notifier represented by @kvm_page_track_notifier_node is linked into + * the head which will be notified when guest is triggering the track event. + * + * Write access on the head is protected by kvm->mmu_lock, read access + * is protected by track_srcu. + */ +struct kvm_page_track_notifier_head { + struct srcu_struct track_srcu; + struct hlist_head track_notifier_list; +}; + +struct kvm_page_track_notifier_node { + struct hlist_node node; + + /* + * It is called when guest is writing the write-tracked page + * and write emulation is finished at that time. + * + * @vcpu: the vcpu where the write access happened. + * @gpa: the physical address written by guest. + * @new: the data was written to the address. + * @bytes: the written length. + */ + void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, + int bytes); + /* + * It is called when memory slot is being moved or removed + * users can drop write-protection for the pages in that memory slot + * + * @kvm: the kvm where memory slot being moved or removed + * @slot: the memory slot being moved or removed + * @node: this node + */ + void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot, + struct kvm_page_track_notifier_node *node); +}; + +void kvm_page_track_init(struct kvm *kvm); +void kvm_page_track_cleanup(struct kvm *kvm); + +void kvm_page_track_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont); +int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, + unsigned long npages); + +void kvm_slot_page_track_add_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode); +void kvm_slot_page_track_remove_page(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn, + enum kvm_page_track_mode mode); +bool kvm_page_track_is_active(struct kvm *kvm, struct kvm_memory_slot *slot, + gfn_t gfn, enum kvm_page_track_mode mode); + +void +kvm_page_track_register_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n); +void +kvm_page_track_unregister_notifier(struct kvm *kvm, + struct kvm_page_track_notifier_node *n); +void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, + int bytes); +void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot); +#else /* ! CONFIG_KVM_HV_MMU */ +static inline void kvm_page_track_init(struct kvm *kvm) +{ + return; /* not used */ +} +static inline void kvm_page_track_cleanup(struct kvm *kvm) +{ + return; /* not used */ +} +#endif /* CONFIG_KVM_HV_MMU */ + +#endif /* _ASM_E2K_KVM_PAGE_TRACK_H */ diff --git a/arch/e2k/include/asm/kvm/pgtable-tdp.h b/arch/e2k/include/asm/kvm/pgtable-tdp.h new file mode 100644 index 0000000..6bab855 --- /dev/null +++ b/arch/e2k/include/asm/kvm/pgtable-tdp.h @@ -0,0 +1,157 @@ +/* + * TDP - Two Dimensional Paging support + * GPA -> PA page table structure and common definitions. + * + * Second dimension page table to translate guest physical addresses (GPA) + * to host physical pages has same structure as native PT V6 + * + * Copyright 2018 MCST + */ + +#ifndef _ASM_E2K_KVM_PGTABLE_TDP_H +#define _ASM_E2K_KVM_PGTABLE_TDP_H + +/* + * NOTE: E2K TDP based on four levels of page tables. + */ + +#include +#include +#include + +/* max. number of physical address bits (architected) */ +#define E2K_MAX_PHYS_BITS_TDP E2K_MAX_PHYS_BITS_V6 + +#ifndef __ASSEMBLY__ + +/* + * TDP-PTE format + */ + +/* numbers of PTE's bits */ +#define _PAGE_P_BIT_TDP _PAGE_P_BIT_V6 /* Present */ +#define _PAGE_W_BIT_TDP _PAGE_W_BIT_V6 /* Writable */ +#define _PAGE_A_HW_BIT_TDP _PAGE_A_HW_BIT_V6 /* page Accessed */ +#define _PAGE_D_BIT_TDP _PAGE_D_BIT_V6 /* page Dirty */ +#define _PAGE_HUGE_BIT_TDP _PAGE_HUGE_BIT_V6 /* huge Page Size */ +#define _PAGE_MTCR_SHIFT_TDP 8 /* Memory Type */ +#define _PAGE_MTCR_BITS_NUM_TDP 2 /* Combination Rule */ +#define _PAGE_SW1_BIT_TDP _PAGE_SW1_BIT_V6 /* SoftWare bit #1 */ +#define _PAGE_SW2_BIT_TDP _PAGE_SW2_BIT_V6 /* SoftWare bit #2 */ +#define _PAGE_PFN_SHIFT_TDP _PAGE_PFN_SHIFT_V6 /* shift of Physical */ + /* Frame Number */ +#define _PAGE_MT_SHIFT_TDP _PAGE_MT_SHIFT_V6 /* shift of Memory */ + /* Type field */ +#define _PAGE_MT_BITS_NUM_TDP _PAGE_MT_BITS_NUM_V6 /* occupies 3 bits */ +#define _PAGE_NON_EX_BIT_TDP _PAGE_NON_EX_BIT_V6 /* NON EXecutable */ + +#define _PAGE_P_TDP (1ULL << _PAGE_P_BIT_TDP) +#define _PAGE_W_TDP (1ULL << _PAGE_W_BIT_TDP) +#define _PAGE_A_HW_TDP (1ULL << _PAGE_A_HW_BIT_TDP) +#define _PAGE_D_TDP (1ULL << _PAGE_D_BIT_TDP) +#define _PAGE_HUGE_TDP (1ULL << _PAGE_HUGE_BIT_TDP) +#define _PAGE_MTCR_TDP \ + (((1ULL << _PAGE_MTCR_BITS_NUM_TDP) - 1) << \ + _PAGE_MTCR_SHIFT_TDP) +#define _PAGE_SW1_TDP (1ULL << _PAGE_SW1_BIT_TDP) +#define _PAGE_SW2_TDP (1ULL << _PAGE_SW2_BIT_TDP) +#define _PAGE_PFN_TDP \ + ((((1ULL << E2K_MAX_PHYS_BITS_TDP) - 1) >> \ + PAGE_SHIFT) << \ + _PAGE_PFN_SHIFT_TDP) +#define _PAGE_MT_TDP \ + (((1ULL << _PAGE_MT_BITS_NUM_TDP) - 1) << _PAGE_MT_SHIFT_TDP) +#define _PAGE_NON_EX_TDP (1ULL << _PAGE_NON_EX_BIT_TDP) + +#define _PAGE_MT_GET_VAL_TDP(x) _PAGE_MT_GET_VAL(x) +#define _PAGE_MT_SET_VAL_TDP(x, mt) _PAGE_MT_SET_VAL(x, mt) + +/* convert physical address to page frame number for PTE */ +#define _PAGE_PADDR_TO_PFN_TDP(phys_addr) \ + _PAGE_PADDR_TO_PFN_V6(phys_addr) + +/* convert the page frame number from PTE to physical address */ +#define _PAGE_PFN_TO_PADDR_TDP(pte_val) _PAGE_PFN_TO_PADDR_V6(pte_val) + +static inline pteval_t +covert_uni_pte_flags_to_pte_val_tdp(const uni_pteval_t uni_flags) +{ + pteval_t pte_flags = 0; + + if (uni_flags & UNI_PAGE_PRESENT) + pte_flags |= (_PAGE_P_TDP); + if (uni_flags & UNI_PAGE_WRITE) + pte_flags |= (_PAGE_W_TDP); + if (uni_flags & UNI_PAGE_HW_ACCESS) + pte_flags |= (_PAGE_A_HW_TDP); + if (uni_flags & UNI_PAGE_DIRTY) + pte_flags |= (_PAGE_D_TDP); + if (uni_flags & UNI_PAGE_HUGE) + pte_flags |= (_PAGE_HUGE_TDP); + if (uni_flags & UNI_PAGE_NON_EX) + pte_flags |= (_PAGE_NON_EX_TDP); + if (uni_flags & UNI_PAGE_PFN) + pte_flags |= (_PAGE_PFN_TDP); + if (uni_flags & UNI_PAGE_MEM_TYPE) + pte_flags |= (_PAGE_MT_TDP); + if (uni_flags & UNI_PAGE_MEM_TYPE_RULE) + pte_flags |= (_PAGE_MTCR_TDP); + + BUG_ON(pte_flags == 0); + + return pte_flags; +} + +static inline pteval_t +fill_pte_val_tdp_flags(const uni_pteval_t uni_flags) +{ + return covert_uni_pte_flags_to_pte_val_tdp(uni_flags); +} +static inline pteval_t +get_pte_val_tdp_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & covert_uni_pte_flags_to_pte_val_tdp(uni_flags); +} +static inline bool +test_pte_val_tdp_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return get_pte_val_tdp_flags(pte_val, uni_flags) != 0; +} +static inline pteval_t +set_pte_val_tdp_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val | covert_uni_pte_flags_to_pte_val_tdp(uni_flags); +} +static inline pteval_t +clear_pte_val_tdp_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & ~covert_uni_pte_flags_to_pte_val_tdp(uni_flags); +} + +static inline unsigned int +get_pte_val_tdp_memory_type(pteval_t pte_val) +{ + return _PAGE_MT_GET_VAL_TDP(pte_val); +} +static inline pteval_t +set_pte_val_tdp_memory_type(pteval_t pte_val, unsigned int memory_type) +{ + BUG_ON(memory_type != GEN_CACHE_MT && + memory_type != GEN_NON_CACHE_MT && + memory_type != GEN_NON_CACHE_ORDERED_MT && + memory_type != EXT_PREFETCH_MT && + memory_type != EXT_NON_PREFETCH_MT && + memory_type != EXT_CONFIG_MT && + memory_type != EXT_CACHE_MT); + + return _PAGE_MT_SET_VAL_TDP(pte_val, memory_type); +} + +static inline int get_tdp_root_level(void) +{ + return E2K_PT_LEVELS_NUM; +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _ASM_E2K_KVM_PGTABLE_TDP_H */ diff --git a/arch/e2k/include/asm/kvm/pgtable-x86.h b/arch/e2k/include/asm/kvm/pgtable-x86.h new file mode 100644 index 0000000..359ba15 --- /dev/null +++ b/arch/e2k/include/asm/kvm/pgtable-x86.h @@ -0,0 +1,104 @@ +/* + * E2K ISET X86 emulation page table structure and common definitions. + * + * Copyright 2017 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_KVM_PGTABLE_X86_H +#define _ASM_E2K_KVM_PGTABLE_X86_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K ISET V1-V5 page tables. + * NOTE: E2K has four levels of page tables. + */ + +#include +#include + + +/* max. number of physical address bits (architected) */ +#define MAX_PHYS_BITS_X86_32 32 +#define MAX_PHYS_BITS_X86_64 48 + +/* virtual & physical page definitions */ +#define PAGE_SHIFT_X86 12 +#define PAGE_SIZE_X86 (1ULL << PAGE_SHIFT_X86) + +#ifndef __ASSEMBLY__ + +/* + * PTE format + */ + +#define _PAGE_BIT_PRESENT_X86 0 /* is present */ +#define _PAGE_W_BIT_X86 1 /* bit # of Writable */ +#define _PAGE_BIT_USER_X86 2 /* userspace addressable */ +#define _PAGE_BIT_PWT_X86 3 /* page write through */ +#define _PAGE_BIT_PCD_X86 4 /* page cache disabled */ +#define _PAGE_BIT_ACCESSED_X86 5 /* was accessed (raised by CPU) */ +#define _PAGE_BIT_DIRTY_X86 6 /* was written to (raised by CPU) */ +#define _PAGE_BIT_PSE_X86 7 /* 4 MB (or 2MB) page */ +#define _PAGE_BIT_PAT_X86 7 /* on 4KB pages */ +#define _PAGE_BIT_GLOBAL_X86 8 /* Global TLB entry */ +#define _PAGE_BIT_PAT_LARGE_X86 12 /* On 2MB or 1GB pages */ +#define _PAGE_PFN_SHIFT_X86 12 /* shift of PFN field */ +#define _PAGE_BIT_NX_X86 63 /* No execute: only valid after */ + /* cpuid check */ + +#define _PAGE_P_X86 (1ULL << _PAGE_BIT_PRESENT_X86) +#define _PAGE_W_X86 (1ULL << _PAGE_W_BIT_X86) +#define _PAGE_USER_X86 (1ULL << _PAGE_BIT_USER_X86) +#define _PAGE_PWT_X86 (1ULL << _PAGE_BIT_PWT_X86) +#define _PAGE_PCD_X86 (1ULL << _PAGE_BIT_PCD_X86) +#define _PAGE_A_X86 (1ULL << _PAGE_BIT_ACCESSED_X86) +#define _PAGE_D_X86 (1ULL << _PAGE_BIT_DIRTY_X86) +#define _PAGE_PSE_X86 (1ULL << _PAGE_BIT_PSE_X86) +#define _PAGE_PAT_X86 (1ULL << _PAGE_BIT_PAT_X86) +#define _PAGE_G_X86 (1ULL << _PAGE_BIT_GLOBAL_X86) +#define _PAGE_PAT_LARGE_X86 (1ULL << _PAGE_BIT_PAT_LARGE_X86) +#define _PAGE_NX_X86_32 (0ULL) /* has not such protection */ +#define _PAGE_NX_X86_PAE (1ULL << _PAGE_BIT_NX_X86) +#define _PAGE_NX_X86_64 (1ULL << _PAGE_BIT_NX_X86) + +#define _PAGE_PFN_X86_32 /* 0x0000_0000_ffff_f000 */ \ + ((((1ULL << MAX_PHYS_BITS_X86_32) - 1) >> \ + PAGE_SHIFT_X86) << \ + _PAGE_PFN_SHIFT_X86) +#define _PAGE_PFN_X86_64 /* 0x0000_ffff_ffff_f000 */ \ + ((((1ULL << MAX_PHYS_BITS_X86_64) - 1) >> \ + PAGE_SHIFT_X86) << \ + _PAGE_PFN_SHIFT_X86) + +/* Page table entries format */ +typedef u32 pt_element_x86_32_t; +typedef u64 pt_element_x86_64_t; + +#define X86_PTE_LEVEL_NUM 1 /* level number of native pte */ +#define X86_32_PGD_LEVEL_NUM 2 /* level number of pgd for 32 bits */ + /* physical & virtual addresses mode */ +#define X86_PAE_PGD_LEVEL_NUM 3 /* level number of pgd for 48 bits */ + /* physical & 32 bits virtual */ + /* addresses mode */ +#define X86_64_PGD_LEVEL_NUM 4 /* level number of pgd for 48 bits */ + /* physical & 48 bits virtual */ + /* addresses mode */ +#define X86_PDPE_LEVEL_NUM 3 /* pgd for PAE mode */ +#define X86_DIRECTORY_LEVEL_NUM 2 /* from this level starts direcrory */ + /* levels of PT */ + +#define MAX_HUGE_PAGES_LEVEL_X86_32 X86_32_PGD_LEVEL_NUM +#define MAX_HUGE_PAGES_LEVEL_X86_PAE (X86_PAE_PGD_LEVEL_NUM - 1) +#define MAX_HUGE_PAGES_LEVEL_X86_64 MAX_HUGE_PAGES_LEVEL + +/* one page table occupies on 4K page */ +#define PT_ENT_SHIFT_X86_32 2 /* 4 bytes, 2 bits */ +#define PT_ENT_SHIFT_X86_64 3 /* 8 bytes, 3 bits */ +#define PT_ENT_BITS_X86_32 (PAGE_SHIFT_X86 - PT_ENT_SHIFT_X86_32) +#define PT_ENT_BITS_X86_64 (PAGE_SHIFT_X86 - PT_ENT_SHIFT_X86_64) +#define PT_ENT_PER_PAGE_X86_32 (1 << PT_ENT_BITS_X86_32) +#define PT_ENT_PER_PAGE_X86_64 (1 << PT_ENT_BITS_X86_64) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _ASM_E2K_KVM_PGTABLE_X86_H */ diff --git a/arch/e2k/include/asm/kvm/pgtable.h b/arch/e2k/include/asm/kvm/pgtable.h new file mode 100644 index 0000000..effd60c --- /dev/null +++ b/arch/e2k/include/asm/kvm/pgtable.h @@ -0,0 +1,77 @@ +/* + * E2K page table operations. + * KVM virtualization support + * Copyright 2016 MCST, Salavat S. Gilyazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_PGTABLE_H +#define _E2K_KVM_PGTABLE_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K page tables. + * NOTE: E2K has four levels of page tables, while Linux assumes that + * there are three levels of page tables. + */ + +#include + +#include +#include + +#ifdef CONFIG_VIRTUALIZATION +#define HOST_VMALLOC_START NATIVE_VMALLOC_START + /* 0x0000 e400 0000 0000 */ +#define HOST_VMALLOC_END NATIVE_VMALLOC_END + /* 0x0000 e500 0000 0000 */ +#define HOST_VMEMMAP_START NATIVE_VMEMMAP_START + /* 0x0000 e600 0000 0000 */ +#define HOST_VMEMMAP_END NATIVE_VMEMMAP_END + /*<0x0000 e700 0000 0000 */ +#define GUEST_VMALLOC_START (SHADOW_KERNEL_IMAGE_AREA_BASE + \ + 0x008000000000UL) + /* 0x0000 2e80 0000 0000 */ +#define GUEST_VMALLOC_END (GUEST_VMALLOC_START + \ + 0x001000000000UL) + /* 0x0000 2e90 0000 0000 */ +#define GUEST_VMEMMAP_START (GUEST_VMALLOC_END + \ + 0x001000000000UL) + /* 0x0000 2ea0 0000 0000 */ +#define GUEST_VMEMMAP_END (GUEST_VMEMMAP_START + \ + (1UL << (E2K_MAX_PHYS_BITS - \ + PAGE_SHIFT)) * \ + sizeof(struct page)) + /*<0x0000 2f00 0000 0000 */ +#endif /* CONFIG_VIRTUALIZATION */ + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* or native host with virtualization support */ +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ +#include +#else /* CONFIG_VIRTUALIZATION */ +/* it is native host with virtualization support */ +#define VMALLOC_START HOST_VMALLOC_START + /* 0x0000 e400 0000 0000 */ +#define VMALLOC_END HOST_VMALLOC_END + /* 0x0000 e500 0000 0000 */ +#define VMEMMAP_START HOST_VMEMMAP_START + /* 0x0000 e600 0000 0000 */ +#define VMEMMAP_END HOST_VMEMMAP_END + /*<0x0000 e700 0000 0000 */ +#endif /* ! CONFIG_VIRTUALIZATION */ +/* it is native kernel without any virtualization */ +/* or native host with virtualization support */ + +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else + #error "Unknown virtualization type" +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_PGTABLE_H */ diff --git a/arch/e2k/include/asm/kvm/process.h b/arch/e2k/include/asm/kvm/process.h new file mode 100644 index 0000000..a26e06f --- /dev/null +++ b/arch/e2k/include/asm/kvm/process.h @@ -0,0 +1,810 @@ +/* + * KVM guest kernel processes support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_PROCESS_H +#define _E2K_KVM_PROCESS_H + +#include + +#include +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_GUEST_STACKS_MODE +#undef DebugGUST +#define DEBUG_KVM_GUEST_STACKS_MODE 0 /* guest user stacks */ + /* copy debug */ +#define DebugGUST(fmt, args...) \ +({ \ + if (DEBUG_KVM_GUEST_STACKS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +extern void kvm_clear_host_thread_info(thread_info_t *ti); +extern gthread_info_t *create_guest_start_thread_info(struct kvm_vcpu *vcpu); +extern int kvm_resume_vm_thread(void); + +extern int kvm_correct_guest_trap_return_ip(unsigned long return_ip); + +extern long return_pv_vcpu_syscall_fork(void); + +/* + * Is the CPU at guest Hardware Virtualized mode + * CORE_MODE.gmi is true only at guest HV mode + */ +static inline bool is_CPU_at_guest_hv_vm_mode(void) +{ + e2k_core_mode_t CORE_MODE; + + CORE_MODE.CORE_MODE_reg = native_read_CORE_MODE_reg_value(); + if (CORE_MODE.CORE_MODE_gmi) { + return true; + } + return false; +} +#ifdef CONFIG_KVM_HOST_MODE +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host and guest kernel */ +static inline bool host_is_at_HV_GM_mode(void) +{ + if (unlikely(!IS_HV_GM() && is_CPU_at_guest_hv_vm_mode())) + return true; + return false; +} +#endif /* CONFIG_KVM_HOST_MODE */ + +static __always_inline bool +is_guest_user_hardware_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (stack_base < GUEST_TASK_SIZE) { + return true; + } + return false; +} + +/* host kernel support virtualization and should update VCPU thread context */ +/* see arch/e2k/include/asm/process.h for more details why and how */ +static __always_inline void +kvm_host_update_vcpu_thread_context(struct task_struct **task, + struct thread_info **ti, struct pt_regs **regs, + struct gthread_info **gti, struct kvm_vcpu **vcpu) +{ + if (!test_thread_flag(TIF_VIRTUALIZED_GUEST)) + /* ot is not VCPU thread */ + return; + if ((ti != NULL) && (*ti == current_thread_info())) + /* thread is not changed, so need not updates */ + return; + if (ti != NULL) + *ti = current_thread_info(); + if (task != NULL) + *task = current; + if (regs != NULL) + *regs = current_thread_info()->pt_regs; + if (gti != NULL) + *gti = current_thread_info()->gthread_info; + if (vcpu != NULL) + *vcpu = current_thread_info()->vcpu; +} +#define KVM_HOST_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, \ + __gti, __vcpu) \ + kvm_host_update_vcpu_thread_context(__task, __ti, __regs, \ + __gti, __vcpu) +#define KVM_HOST_CHECK_VCPU_THREAD_CONTEXT(__ti) \ +do { \ + GTI_BUG_ON((__ti) != current_thread_info()); \ +} while (false) + +/* + * In some case local data stack cannot be expanded, + * here should be all cases for kvm guest. + * Guest kernel is user process of host and kernel threads are maintained by + * host, including all traps on guest kernel threads + */ +#define kvm_usd_cannot_be_expanded(regs) \ +({ \ + bool is; \ + \ + if (!test_thread_flag(TIF_VIRTUALIZED_GUEST) || \ + paravirt_enabled()) \ + /* Stack is not guest data stack */ \ + /* or it is guest and it cannot run own guest */ \ + is = false; \ + else if (user_stack_cannot_be_expanded() || \ + (regs->stacks.usd_lo.USD_lo_base >= GUEST_TASK_SIZE && \ + regs->stacks.usd_lo.USD_lo_base < HOST_TASK_SIZE)) \ + /* it is stack of guest kernel thread, kernel stacks */ \ + /* should not be expanded */ \ + is = true; \ + else { \ + /* it is not guest process or it is guest user */ \ + /* cannot be here */ \ + BUG_ON(true); \ + is = false; \ + } \ + is; \ +}) + +static inline void +kvm_clear_virt_thread_struct(thread_info_t *thread_info) +{ + thread_info->gpid_nr = -1; /* cannot inherit, only set by */ + /* guest/host kernel */ +#ifdef CONFIG_KVM_HOST_MODE + /* clear KVM support fields and flags */ + if (test_ti_thread_flag(thread_info, TIF_VIRTUALIZED_HOST) || + test_ti_thread_flag(thread_info, TIF_VIRTUALIZED_GUEST)) + /* It is clone() on host to create guest */ + /* VCPU or VIRQ VCPU threads */ + kvm_clear_host_thread_info(thread_info); + if (thread_info->gthread_info) { + /* It is guest thread: clear from old process */ + thread_info->gthread_info = NULL; + /* kvm_pv_clear_guest_thread_info(thread_info->gthread_info); */ + } + /* VCPU host/guest thread flags and VCPU structure cannot inherit */ + /* only to pass */ + clear_ti_thread_flag(thread_info, TIF_VIRTUALIZED_HOST); + thread_info->vcpu = NULL; +#endif /* CONFIG_KVM_HOST_MODE */ +} + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* host kernel with virtualization support */ + +#define UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, __gti, __vcpu) \ + KVM_HOST_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, \ + __gti, __vcpu) +#define CHECK_VCPU_THREAD_CONTEXT(__ti) \ + KVM_HOST_CHECK_VCPU_THREAD_CONTEXT(__ti) + +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#define KVM_GOTO_RETURN_TO_PARAVIRT_GUEST(ret_value) \ + E2K_GOTO_ARG1(return_to_paravirt_guest, ret_value) +#define KVM_COND_GOTO_RETURN_TO_PARAVIRT_GUEST(cond, ret_value) \ + DEF_COND_GOTO_ARG1(return_to_paravirt_guest, cond, ret_value) +#define KVM_GOTO_DONE_TO_PARAVIRT_GUEST() \ + E2K_GOTO(done_to_paravirt_guest) +#define KVM_COND_GOTO_DONE_TO_PARAVIRT_GUEST(cond) \ + DEF_COND_GOTO(done_to_paravirt_guest, cond) + +/* + * The function completes on host done to guest process after trap handling + */ +#define KVM_GET_PARAVIRT_GUEST_MODE(pv_guest, regs) \ +({ \ + bool pv_mode; \ + \ + pv_mode = test_thread_flag(TIF_PARAVIRT_GUEST); \ + /* trap can occur on light hypercall and handled as trap on user */ \ + /* but return will be on host kernel into light hypercall */ \ + /* In this case do not switch to guest shadow image */ \ + pv_mode &= from_host_user_mode((regs)->crs.cr1_lo); \ + \ + (pv_guest) = pv_mode; \ +}) + +/* + * Set global registers used by host to support virtualization + * Now only one (pair) register is used as pointer to VCPU state structure + */ +#ifndef CONFIG_USE_GD_TO_VCPU_ACCESS +#define SET_HOST_GREG(greg_no, value) NATIVE_SET_DGREG(greg_no, value) +#define GET_HOST_GREG(greg_no) NATIVE_GET_UNTEGGED_DGREG(greg_no) +#else /* CONFIG_USE_GD_TO_VCPU_ACCESS */ + #error "Global pointer to VCPU state can not be loadded to GD register" +#endif /* ! CONFIG_USE_GD_TO_VCPU_ACCESS */ + +extern noinline notrace __interrupt +void go2guest(long fn, bool priv_guest); + +#ifdef CONFIG_KVM_HOST_MODE +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host and guest kernel */ + +#define GET_GUEST_VCPU_STATE_POINTER(__vcpu) \ +({ \ + e2k_addr_t vs = (e2k_addr_t)((__vcpu)->arch.vcpu_state); \ + \ + vs = kvm_vcpu_hva_to_gpa(__vcpu, vs); \ + if (is_paging(__vcpu)) \ + vs = (e2k_addr_t)__guest_va(vs); \ + vs; \ +}) + +#define INIT_HOST_VCPU_STATE_GREG_COPY(__ti, __vcpu) \ +({ \ + e2k_addr_t vs = GET_GUEST_VCPU_STATE_POINTER(__vcpu); \ + \ + HOST_ONLY_COPY_TO_VCPU_STATE_GREG(&(__ti)->k_gregs, vs); \ +}) + +#define INIT_HOST_GREGS_COPY(__ti, __vcpu) \ +({ \ + /* Zeroing global registers used by kernel */ \ + CLEAR_KERNEL_GREGS_COPY(__ti); \ + /* Set pointer to VCPU state to enable interface with guest */ \ + INIT_HOST_VCPU_STATE_GREG_COPY(__ti, vcpu); \ +}) + +static inline void +prepare_pv_vcpu_inject_stacks(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + e2k_stacks_t *stacks, *g_stacks; + gthread_info_t *gti = pv_vcpu_get_gti(vcpu); + + if (regs->g_stacks_valid) { + /* already prepared */ + return; + } + + /* all stacks at empty state, because of guest user recursion */ + /* of trap/system calls can not be */ + g_stacks = ®s->g_stacks; + g_stacks->usd_lo = gti->g_usd_lo; + g_stacks->usd_hi = gti->g_usd_hi; + g_stacks->top = gti->g_sbr.SBR_base; + g_stacks->psp_lo = gti->g_psp_lo; + g_stacks->psp_hi = gti->g_psp_hi; + g_stacks->pcsp_lo = gti->g_pcsp_lo; + g_stacks->pcsp_hi = gti->g_pcsp_hi; + + /* pshtp & pcshtp from guest user stack real state upon trap/syscall */ + stacks = ®s->stacks; + g_stacks->pshtp = stacks->pshtp; + g_stacks->pcshtp = stacks->pcshtp; + + regs->g_stacks_valid = true; + regs->g_stacks_active = false; + regs->need_inject = false; +} + +#undef EMULATE_EMPTY_CHAIN_STACK /* only to debug */ + +#ifdef EMULATE_EMPTY_CHAIN_STACK +static __always_inline void +pv_vcpu_emulate_empty_chain_staks(struct kvm_vcpu *vcpu, pt_regs_t *regs, + e2k_stacks_t *stacks, bool guest_user) +{ + e2k_pcshtp_t pcshtp; + unsigned long flags; + e2k_pcsp_lo_t g_pcsp_lo, k_pcsp_lo; + e2k_pcsp_hi_t g_pcsp_hi, k_pcsp_hi; + e2k_mem_crs_t __user *g_cframe; + e2k_mem_crs_t *k_crs; + int ret; + + pcshtp = stacks->pcshtp; + if (!(guest_user && pcshtp <= 0x40)) + return; + + g_pcsp_lo = regs->stacks.pcsp_lo; + g_pcsp_hi = regs->stacks.pcsp_hi; + + raw_all_irq_save(flags); + NATIVE_FLUSHC; + k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + k_pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); + BUG_ON(AS(k_pcsp_hi).ind != pcshtp); + + k_crs = (e2k_mem_crs_t *) AS(k_pcsp_lo).base; + g_cframe = (e2k_mem_crs_t __user *) (AS(g_pcsp_lo).base + + AS(g_pcsp_hi).ind - pcshtp); + ret = user_hw_stack_frames_copy(g_cframe, k_crs, pcshtp, regs, + k_pcsp_hi.PCSP_hi_ind, true); + if (ret) { + pr_err("%s(): copy to user stack failed\n", __func__); + BUG_ON(true); + } + k_pcsp_hi.PCSP_hi_ind -= pcshtp; + pcshtp = 0; + regs->stacks.pcshtp = pcshtp; + stacks->pcshtp = pcshtp; + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(k_pcsp_hi); + raw_all_irq_restore(flags); +} +#else /* !EMULATE_EMPTY_CHAIN_STACK */ +static __always_inline void +pv_vcpu_emulate_empty_chain_staks(struct kvm_vcpu *vcpu, pt_regs_t *regs, + e2k_stacks_t *stacks, bool guest_user) +{ +} +#endif /* EMULATE_EMPTY_CHAIN_STACK */ + +/** + * pv_vcpu_user_hw_stacks_copy - check size of user hardware stacks that have + * been SPILLed to kernel back to guest space + * @regs - saved guest user stack registers + * @cur_window_q - size of current window in procedure stack + * + * All guest user's stacks part were already copied to guest kernel stacks, + * so it need only check that it was full size and nothing to copy here + */ +static __always_inline int +pv_vcpu_user_hw_stacks_copy(pt_regs_t *regs, e2k_stacks_t *stacks, + u64 cur_window_q, bool guest_user) +{ + e2k_psp_lo_t g_psp_lo = stacks->psp_lo, + k_psp_lo = current_thread_info()->k_psp_lo; + e2k_psp_hi_t g_psp_hi = stacks->psp_hi; + e2k_pcsp_lo_t g_pcsp_lo = stacks->pcsp_lo, + k_pcsp_lo = current_thread_info()->k_pcsp_lo; + e2k_pcsp_hi_t g_pcsp_hi = stacks->pcsp_hi; + s64 g_pshtp_size, g_pcshtp_size, ps_copy_size, pcs_copy_size; + int ret; + + DebugUST("guest kernel chain state: base 0x%llx ind 0x%x size 0x%x\n", + g_pcsp_lo.PCSP_lo_base, g_pcsp_hi.PCSP_hi_ind, + g_pcsp_hi.PCSP_hi_size); + DebugUST("guest kernel proc state: base 0x%llx ind 0x%x size 0x%x\n", + g_psp_lo.PSP_lo_base, g_psp_hi.PSP_hi_ind, + g_psp_hi.PSP_hi_size); + g_pshtp_size = GET_PSHTP_MEM_INDEX(stacks->pshtp); + g_pcshtp_size = PCSHTP_SIGN_EXTEND(stacks->pcshtp); + DebugUST("guest kernel chain stack PCSHTP 0x%llx, " + "proc stack PSHTP 0x%llx cur window 0x%llx\n", + g_pcshtp_size, g_pshtp_size, cur_window_q); + + /* + * FIXME: the current implementation of the guest user signal handler + * injection uses direct copying to guest hardware stacks. + * It is bad decision, needs to be corrected + KVM_BUG_ON(is_paging(current_thread_info()->vcpu) && + (g_psp_lo.PSP_lo_base < GUEST_TASK_SIZE || + g_pcsp_lo.PCSP_lo_base < GUEST_TASK_SIZE)); + */ + + /* + * Calculate size of user's part to copy from kernel stacks + * into guest kernel stacks + */ + pcs_copy_size = get_pcs_copy_size(g_pcshtp_size); + ps_copy_size = get_ps_copy_size(cur_window_q, g_pshtp_size); + /* Make sure there is enough space in CF for the FILL */ + BUG_ON((E2K_MAXCR_q - 4) * 16 < E2K_CF_MAX_FILL); + DebugUST("to copy chain stack 0x%llx, proc stack 0x%llx\n", + pcs_copy_size, ps_copy_size); + + if (likely(pcs_copy_size <= 0 && ps_copy_size <= 0)) + return 0; + + if (unlikely(pcs_copy_size > 0)) { + e2k_pcsp_hi_t k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + void __user *dst; + void *src; + + /* Since SPILL'ed guest user data will be copyed to guest */ + /* kernel stacks then cannot be any overflow of user's */ + /* hardware stack. */ + if (unlikely(AS(g_pcsp_hi).ind > AS(g_pcsp_hi).size)) { + pr_err("%s(): guest kernel chain stack overflow " + "(out of memory?): ind 0x%x size 0x%x\n", + __func__, g_pcsp_hi.PCSP_hi_ind, + g_pcsp_hi.PCSP_hi_size); + KVM_BUG_ON(true); + } + dst = (void __user *)(g_pcsp_lo.PCSP_lo_base + + g_pcsp_hi.PCSP_hi_ind); + if (!guest_user) { + /* stack index has been incremented on PCSHTP */ + dst -= g_pcshtp_size; + } + src = (void *)k_pcsp_lo.PCSP_lo_base; + + ret = user_hw_stack_frames_copy(dst, src, pcs_copy_size, regs, + k_pcsp_hi.PCSP_hi_ind, true); + if (ret) + return ret; + if (guest_user) { + g_pcsp_hi.PCSP_hi_ind += pcs_copy_size; + stacks->pcsp_hi = g_pcsp_hi; + DebugGUST("guest user chain stack frames copied from " + "host %px to guest kernel from %px size 0x%llx " + "PCSP.ind 0x%x\n", + src, dst, pcs_copy_size, g_pcsp_hi.PCSP_hi_ind); + } + } + + if (unlikely(ps_copy_size > 0)) { + e2k_psp_hi_t k_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + void __user *dst; + void *src; + + /* Since SPILL'ed guest user data will be copyed to guest */ + /* kernel stacks then cannot be any overflow of user's */ + /* hardware stack. */ + if (unlikely(AS(g_psp_hi).ind > AS(g_psp_hi).size)) { + pr_err("%s(): guest kernel proc stack overflow " + "(out of memory?): ind 0x%x size 0x%x\n", + __func__, g_psp_hi.PSP_hi_ind, + g_psp_hi.PSP_hi_size); + KVM_BUG_ON(true); + } + dst = (void __user *)(g_psp_lo.PSP_lo_base + + g_psp_hi.PSP_hi_ind); + if (!guest_user) { + /* stack index has been incremented on PSHTP */ + dst -= g_pshtp_size; + } + src = (void *)k_psp_lo.PSP_lo_base; + + ret = user_hw_stack_frames_copy(dst, src, ps_copy_size, regs, + k_psp_hi.PSP_hi_ind, false); + if (ret) + return ret; + if (guest_user) { + g_psp_hi.PSP_hi_ind += ps_copy_size; + stacks->psp_hi = g_psp_hi; + DebugGUST("guest user proc stack frames copied from " + "host %px to guest kernel from %px size 0x%llx " + "PSP.ind 0x%x\n", + src, dst, ps_copy_size, g_psp_hi.PSP_hi_ind); + } + } + + return 0; +} + +/** + * pv_vcpu_user_hw_stacks_prepare - prepare guest user hardware stacks + that have been SPILLed to kernel back + to guest user space + * @regs - saved guest user stack registers + * @cur_window_q - size of current window in procedure stack + * @syscall - true if called upon direct system call exit (no signal handlers) + * + * This does two things: + * + * 1) It is possible that upon kernel entry pcshtp == 0 in some cases: + * - user signal handler had pcshtp==0x20 before return to sigreturn() + * - user context had pcshtp==0x20 before return to makecontext_trampoline() + * - chain stack underflow happened + * So it is possible in sigreturn() and traps, but not in system calls. + * If we are using the trick with return to FILL user hardware stacks than + * we must have frame in chain stack to return to. So in this case kernel's + * chain stack is moved up by one frame (0x20 bytes). + * We also fill the new frame with actual user data and update stacks->pcshtp, + * this is needed to keep the coherent state where saved stacks->pcshtp values + * shows how much data from user space has been spilled to kernel space. + * + * 2) It is not possible to always FILL all of user data that have been + * SPILLed to kernel stacks. So we manually copy the leftovers that can + * not be FILLed to user space. + * This copy does not update stacks->pshtp and stacks->pcshtp. Main reason + * is signals: if a signal arrives after copying then it must see a coherent + * state where saved stacks->pshtp and stacks->pcshtp values show how much + * data from user space has been spilled to kernel space. + */ +static __always_inline void +pv_vcpu_user_hw_stacks_prepare(struct kvm_vcpu *vcpu, pt_regs_t *regs, + u64 cur_window_q, enum restore_caller from, int syscall) +{ + e2k_stacks_t *stacks; + e2k_pcshtp_t pcshtp; + bool guest_user; + bool paging = is_paging(vcpu); + int ret; + + if (likely(paging)) { + guest_user = !!(syscall || !pv_vcpu_trap_on_guest_kernel(regs)); + } else { + guest_user = false; + } + if (guest_user) { + if (from & FROM_PV_VCPU_MODE) { + /* all preparation has been made */ + /* by host & guest handler */ + return; + } + + /* trap on/syscall from guest user, so regs keeps user */ + /* registers state and it need use guest kernel stacks */ + /* in empty state to handle this trap/syscall */ + if (!regs->g_stacks_valid) { + prepare_pv_vcpu_inject_stacks(vcpu, regs); + } + stacks = ®s->g_stacks; + } else { + /* trap on guest kernel, so regs already points to guest */ + /* kernel stacks and trap will be handled by host */ + /* same as other user's processes traps */ + stacks = ®s->stacks; + } + + /* only to debug on simulator : pcshtp == 0 */ + pv_vcpu_emulate_empty_chain_staks(vcpu, regs, stacks, guest_user); + + pcshtp = stacks->pcshtp; + DebugUST("guest kernel chain stack state: base 0x%llx ind 0x%x " + "size 0x%x\n", + stacks->pcsp_lo.PCSP_lo_base, + stacks->pcsp_hi.PCSP_hi_ind, + stacks->pcsp_hi.PCSP_hi_size); + DebugUST("host kernel chain stack state: base 0x%llx ind 0x%x " + "size 0x%x\n", + NATIVE_NV_READ_PCSP_LO_REG().PCSP_lo_base, + NATIVE_NV_READ_PCSP_HI_REG().PCSP_hi_ind, + NATIVE_NV_READ_PCSP_HI_REG().PCSP_hi_size); + DebugUST("guest kernel chain stack size to fill PCSHTP 0x%x\n", + pcshtp); + /* + * 1) Make sure there is free space in kernel chain stack to return to + */ + if (!syscall && pcshtp == 0 && !guest_user) { + unsigned long flags; + e2k_pcsp_lo_t g_pcsp_lo = stacks->pcsp_lo, + k_pcsp_lo = current_thread_info()->k_pcsp_lo; + e2k_pcsp_hi_t g_pcsp_hi = stacks->pcsp_hi, k_pcsp_hi; + e2k_mem_crs_t __user *g_cframe; + e2k_mem_crs_t *k_crs; + int ret = -EINVAL; + + raw_all_irq_save(flags); + NATIVE_FLUSHC; + k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + BUG_ON(AS(k_pcsp_hi).ind); + AS(k_pcsp_hi).ind += SZ_OF_CR; + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(k_pcsp_hi); + + k_crs = (e2k_mem_crs_t *) AS(k_pcsp_lo).base; + g_cframe = (e2k_mem_crs_t __user *) (AS(g_pcsp_lo).base + + AS(g_pcsp_hi).ind); + if ((u64) g_cframe > (u64) AS(g_pcsp_lo).base) { + ret = __copy_user_to_current_hw_stack(k_crs, + g_cframe - 1, sizeof(*k_crs), regs, true); + } + raw_all_irq_restore(flags); + + /* Can happen if application returns until runs out of + * chain stack or there is no free memory for stacks. + * There is no user stack to return to - die. */ + if (ret) { + E2K_LMS_HALT_OK; + pr_err("%s(): SIGKILL. %s\n", + __func__, + (ret == -EINVAL) ? + "tried to return to kernel" + : + "ran into Out-of-Memory on user stacks"); + force_sig(SIGKILL); + return; + } + DebugUST("copy guest user chain frame from %px to kernel " + "bottom from %px\n", + g_cframe - 1, k_crs); + + if (AS(g_pcsp_hi).ind < SZ_OF_CR) { + pr_err("%s(): guest kernel chain stack underflow\n", + __func__); + KVM_BUG_ON(true); + } + + pcshtp = SZ_OF_CR; + stacks->pcshtp = pcshtp; + DebugUST("guest kernel chain stack to FILL PCSHTP " + "set to 0x%x\n", + stacks->pcshtp); + } else if (!syscall && pcshtp == 0 && guest_user) { + e2k_pcsp_hi_t k_pcsp_hi; + unsigned long flags; + + /* set flag for unconditional injection to do not copy */ + /* from guest user space */ + regs->need_inject = true; + + /* reserve one bottom frames for trampoline */ + /* the guest handler replaces guest user trapped frame */ + raw_all_irq_save(flags); + NATIVE_FLUSHC; + k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + BUG_ON(k_pcsp_hi.PCSP_hi_ind); + k_pcsp_hi.PCSP_hi_ind += 1 * SZ_OF_CR; + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(k_pcsp_hi); + raw_all_irq_restore(flags); + } + + /* + * 2) Copy user data that cannot be FILLed + */ + ret = pv_vcpu_user_hw_stacks_copy(regs, stacks, cur_window_q, + guest_user); + if (unlikely(ret)) + do_exit(SIGKILL); +} + +static __always_inline void +host_user_hw_stacks_prepare(struct e2k_stacks *stacks, pt_regs_t *regs, + u64 cur_window_q, enum restore_caller from, int syscall) +{ + struct kvm_vcpu *vcpu; + + if (likely(!kvm_test_intc_emul_flag(regs))) { + /* trap on/syscall from host user processes */ + return native_user_hw_stacks_prepare(stacks, regs, + cur_window_q, from, syscall); + } + + vcpu = current_thread_info()->vcpu; + KVM_BUG_ON(vcpu == NULL); + pv_vcpu_user_hw_stacks_prepare(vcpu, regs, cur_window_q, from, syscall); +} + +#define SAVE_HOST_KERNEL_GREGS_COPY_TO(__k_gregs, __g_gregs) \ +({ \ + kernel_gregs_t *kg = (__k_gregs); \ + kernel_gregs_t *gg = (__g_gregs); \ + unsigned long task__; \ + unsigned long cpu_id__; \ + unsigned long cpu_off__; \ + \ + ONLY_COPY_FROM_KERNEL_GREGS(kg, task__, cpu_id__, cpu_off__); \ + ONLY_COPY_TO_KERNEL_GREGS(gg, task__, cpu_id__, cpu_off__); \ +}) + +#define SAVE_HOST_KERNEL_GREGS_COPY(__ti, __gti) \ +({ \ + kernel_gregs_t *k_gregs = &(__ti)->k_gregs_light; \ + kernel_gregs_t *g_gregs = &(__gti)->g_gregs; \ + \ + SAVE_HOST_KERNEL_GREGS_COPY_TO(k_gregs, g_gregs); \ +}) + +#define RESTORE_HOST_KERNEL_GREGS_COPY_FROM(__k_gregs, __g_gregs) \ +({ \ + kernel_gregs_t *kg = (__k_gregs); \ + kernel_gregs_t *gg = (__g_gregs); \ + unsigned long task__; \ + unsigned long cpu_id__; \ + unsigned long cpu_off__; \ + \ + ONLY_COPY_FROM_KERNEL_GREGS(gg, task__, cpu_id__, cpu_off__); \ + ONLY_COPY_TO_KERNEL_GREGS(kg, task__, cpu_id__, cpu_off__); \ +}) + +#define RESTORE_HOST_KERNEL_GREGS_COPY(__ti, __gti, __vcpu) \ +({ \ + kernel_gregs_t *k_gregs = &(__ti)->k_gregs; \ + kernel_gregs_t *g_gregs = &(__gti)->g_gregs; \ + \ + RESTORE_HOST_KERNEL_GREGS_COPY_FROM(k_gregs, g_gregs); \ + INIT_HOST_VCPU_STATE_GREG_COPY(__ti, __vcpu); \ +}) + +#define printk printk_fixed_args +#define __trace_bprintk __trace_bprintk_fixed_args +#define panic panic_fixed_args + +/* + * The function completes on host switch to new user process (sys_execve()) + * of guest kernel. + */ +static __always_inline __interrupt void +kvm_complete_switch_to_user_func(void) +{ + thread_info_t *ti; + gthread_info_t *gti; + bool from_virt_guest; + bool from_pv_guest; + + /* current thread info/task pointer global registers were cleared */ + /* while all global registers were set to emty state */ + ti = NATIVE_READ_CURRENT_REG(); + gti = ti->gthread_info; + from_virt_guest = test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST); + from_pv_guest = test_ti_thread_flag(ti, TIF_PARAVIRT_GUEST); + + /* the function should switch interrupt control from UPSR to */ + /* PSR and set initial state of user UPSR */ + if (!from_virt_guest) { + NATIVE_SET_USER_INITIAL_UPSR(E2K_USER_INITIAL_UPSR); + } else { + KVM_SET_GUEST_USER_INITIAL_UPSR(ti); + } + + if (unlikely(from_virt_guest)) { + /* structure gregs into guest thread info structure will */ + /* contain user global registers from now */ + gti->gregs_active = 1; + gti->gregs_valid = 0; + gti->gregs_for_currents_valid = 0; + KVM_COND_GOTO_RETURN_TO_PARAVIRT_GUEST(from_pv_guest, 0); + } +} + +/* + * The function completes return to guest user signal handler + */ +static __always_inline __interrupt void +kvm_complete_go2user(thread_info_t *ti, long fn) +{ + bool is_pv_guest; /* entry point fn is paravirtualized guest */ + /* kernel function */ + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) { + /* it is not guest process return to */ + /* but the function should restore user UPSR state */ + NATIVE_WRITE_UPSR_REG(ti->upsr); + return; + } + if ((e2k_addr_t)fn < GUEST_TASK_SIZE && + (ti->vcpu == NULL || is_paging(ti->vcpu))) { + /* it is guest user process return to */ + /* clear host global registers used for virtualization + CLEAR_HOST_GREGS(); + */ + /* the function should restore guest user UPSR state */ + KVM_RESTORE_GUEST_USER_UPSR(ti); + } else { + /* it is guest kernel process return to + ONLY_SET_HOST_GREGS(ti->vcpu, ti->vcpu->arch.vcpu_state); + */ + /* the function should restore guest kernel UPSR state */ + KVM_RESTORE_GUEST_KERNEL_UPSR(ti); + } + + is_pv_guest = ((e2k_addr_t)fn >= HOST_TASK_SIZE); + + KVM_COND_GOTO_RETURN_TO_PARAVIRT_GUEST(is_pv_guest, 0); +} + +#undef printk +#undef __trace_bprintk +#undef panic +#else /* ! CONFIG_KVM_HOST_MODE */ +/* it is native kernel without any virtualization or */ +/* pure guest kernel */ + +#define INIT_HOST_VCPU_STATE_GREG_COPY(__ti, __vcpu) +#define INIT_HOST_GREGS_COPY(__ti, __vcpu) + +#endif /* CONFIG_KVM_HOST_MODE */ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#include +#else /* CONFIG_VIRTUALIZATION && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host and guest kernel */ +#define usd_cannot_be_expanded(regs) kvm_usd_cannot_be_expanded(regs) +#define clear_vm_thread_flags() \ +({ \ + /* \ + * If it is new process with new virtual space forked by QEMU \ + * after VM creation and created process execve() other code, \ + * then it cannot inherit VM features \ + */ \ + clear_thread_flag(TIF_VM_CREATED); \ +}) + +#define GET_PARAVIRT_GUEST_MODE(pv_guest, regs) \ + KVM_GET_PARAVIRT_GUEST_MODE(pv_guest, regs) + +static inline void +clear_virt_thread_struct(thread_info_t *thread_info) +{ + kvm_clear_virt_thread_struct(thread_info); +} + +static __always_inline __interrupt void +complete_switch_to_user_func(void) +{ + kvm_complete_switch_to_user_func(); +} +static __always_inline __interrupt void +complete_go2user(thread_info_t *ti, long fn) +{ + kvm_complete_go2user(ti, fn); +} +static inline void free_virt_task_struct(struct task_struct *task) +{ + /* nothing to free */ +} +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_KVM_PROCESS_H */ diff --git a/arch/e2k/include/asm/kvm/ptrace.h b/arch/e2k/include/asm/kvm/ptrace.h new file mode 100644 index 0000000..c05b345 --- /dev/null +++ b/arch/e2k/include/asm/kvm/ptrace.h @@ -0,0 +1,654 @@ +#ifndef _E2K_KVM_PTRACE_H +#define _E2K_KVM_PTRACE_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include +#endif /* __ASSEMBLY__ */ + +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_USE_AAU +#include +#endif /* CONFIG_USE_AAU */ +#include +#include + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +typedef enum inject_caller { + FROM_HOST_INJECT = 1 << 0, + FROM_PV_VCPU_TRAP_INJECT = 1 << 1, + FROM_PV_VCPU_SYSCALL_INJECT = 1 << 2, +} inject_caller_t; + +#ifdef CONFIG_VIRTUALIZATION + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native host kernel with virtualization support */ +#define BOOT_TASK_SIZE (BOOT_HOST_TASK_SIZE) +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel */ +#include +/* #define TASK_SIZE (GUEST_TASK_SIZE) */ +/* #define BOOT_TASK_SIZE (BOOT_GUEST_TASK_SIZE) */ +#else /* CONFIG_PARAVIRT_GUEST */ +/* it is paravirtualized host and guest kernel */ +#include +/* #define TASK_SIZE (PARAVIRT_TASK_SIZE) */ +/* #define BOOT_TASK_SIZE (BOOT_PARAVIRT_TASK_SIZE) */ +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +#endif /* CONFIG_VIRTUALIZATION */ + +#ifdef __KERNEL__ + +/* some global registers are used to support virtualization mode */ +/* (see usage and real numbers at asm/glob_regs.h) */ +typedef struct host_gregs { + struct e2k_greg g[HOST_KERNEL_GREGS_PAIRS_SIZE]; +} host_gregs_t; + +/* + * We could check CR.pm and TIR.ip here, but that is not needed + * because whenever CR.pm = 1 or TIR.ip < TASK_SIZE, SBR points + * to user space. So checking SBR alone is enough. + * + * Testing SBR is necessary because of HW bug #59886 - the 'ct' command + * (return to user) may be interrupted with closed interrupts. + * The result - kernel's ip, psr.pm=1, but SBR points to user space. + * This case should be detected as user mode. + * + * Checking via SBR is also useful for detecting fast system calls as + * user mode. + */ +#define is_user_mode(regs, __USER_SPACE_TOP__) \ + ((regs)->stacks.top < (__USER_SPACE_TOP__)) +#define is_kernel_mode(regs, __KERNEL_SPACE_BOTTOM__) \ + ((regs)->stacks.top >= (__KERNEL_SPACE_BOTTOM__)) + +#define from_kernel_mode(cr1_lo) ((cr1_lo).CR1_lo_pm) +#define from_user_mode(cr1_lo) (!((cr1_lo).CR1_lo_pm)) + +#define is_from_user_IP(cr0_hi, __USER_SPACE_TOP__) \ +({ \ + unsigned long IP; \ + bool ret; \ + IP = (cr0_hi).CR0_hi_IP; \ + ret = (IP < (__USER_SPACE_TOP__)); \ + ret; \ +}) +#define is_from_kernel_IP(cr0_hi, __KERNEL_SPACE_BOTTOM__) \ +({ \ + unsigned long IP; \ + bool ret; \ + IP = (cr0_hi).CR0_hi_IP; \ + ret = (IP >= (__KERNEL_SPACE_BOTTOM__)); \ + ret; \ +}) + +#define from_user_IP(cr0_hi) is_from_user_IP(cr0_hi, TASK_SIZE) +#define from_kernel_IP(cr0_hi) is_from_kernel_IP(cr0_hi, TASK_SIZE) + +#define is_trap_from_user(regs, __USER_SPACE_TOP__) \ +({ \ + e2k_tir_lo_t tir_lo; \ + tir_lo.TIR_lo_reg = (regs)->TIR_lo; \ + tir_lo.TIR_lo_ip < (__USER_SPACE_TOP__); \ +}) +#define is_trap_from_kernel(regs, __KERNEL_SPACE_BOTTOM__) \ +({ \ + e2k_tir_lo_t tir_lo; \ + tir_lo.TIR_lo_reg = (regs)->TIR_lo; \ + tir_lo.TIR_lo_ip >= (__KERNEL_SPACE_BOTTOM__); \ +}) + +#if !defined(CONFIG_KVM_GUEST_KERNEL) && !defined(CONFIG_PARAVIRT_GUEST) +/* it is native kernel without any virtualization */ +/* or host kernel with virtualization support */ + +static inline void atomic_load_osgd_to_gd(void) +{ + native_atomic_load_osgd_to_gd(); +} + +#define SAVE_DAM(__dam) NATIVE_SAVE_DAM(__dam) +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ + +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ + +#include +#else + #error "Undefined type of virtualization" +#endif /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ + +#if defined(CONFIG_VIRTUALIZATION) +/* it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +/* or pure guest kernel (not paravirtualized based on pv_ops) */ +#define guest_task_mode(task) \ + test_ti_thread_flag(task_thread_info(task), \ + TIF_VIRTUALIZED_GUEST) +#define native_user_mode(regs) is_user_mode(regs, NATIVE_TASK_SIZE) +#define guest_user_mode(regs) is_user_mode(regs, GUEST_TASK_SIZE) +#define native_kernel_mode(regs) is_kernel_mode(regs, NATIVE_TASK_SIZE) +#define guest_kernel_mode(regs) \ + (is_kernel_mode(regs, GUEST_TASK_SIZE) && \ + !native_kernel_mode(regs)) + +#define from_host_user_IP(cr0_hi) \ + is_from_user_IP(cr0_hi, NATIVE_TASK_SIZE) +#define from_host_kernel_IP(cr0_hi) \ + is_from_kernel_IP(cr0_hi, NATIVE_TASK_SIZE) +#define from_guest_user_IP(cr0_hi) \ + is_from_user_IP(cr0_hi, GUEST_TASK_SIZE) +#define from_guest_kernel_IP(cr0_hi) \ + (is_from_kernel_IP(cr0_hi, GUEST_TASK_SIZE) && \ + !from_host_kernel_IP(cr0_hi)) + +#define from_host_user_mode(cr1_lo) from_user_mode(cr1_lo) +#define from_host_kernel_mode(cr1_lo) from_kernel_mode(cr1_lo) +/* guest user is user of guest kernel, so USER MODE (pm = 0) */ +#define from_guest_user_mode(cr1_lo) from_user_mode(cr1_lo) + +#define is_call_from_user(cr0_hi, cr1_lo, __HOST__) \ + ((__HOST__) ? \ + is_call_from_host_user(cr0_hi, cr1_lo) : \ + is_call_from_guest_user(cr0_hi, cr1_lo)) +#define is_call_from_kernel(cr0_hi, cr1_lo, __HOST__) \ + ((__HOST__) ? \ + is_call_from_host_kernel(cr0_hi, cr1_lo) : \ + is_call_from_guest_kernel(cr0_hi, cr1_lo)) + +#ifndef CONFIG_KVM_GUEST_KERNEL +/* it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ + +#define user_mode(regs) \ + ((regs) ? is_user_mode(regs, TASK_SIZE) : false) +#define kernel_mode(regs) \ + ((regs) ? is_kernel_mode(regs, TASK_SIZE) : true) + +#ifdef CONFIG_KVM_HW_VIRTUALIZATION +/* guest kernel can be: */ +/* user of host kernel, so USER MODE (pm = 0) */ +/* hardware virtualized guest kernel, so KERNEL MODE (pm = 1) */ +#define from_guest_kernel_mode(cr1_lo) \ + (from_kernel_mode(cr1_lo) || from_user_mode(cr1_lo)) +#define from_guest_kernel(cr0_hi, cr1_lo) \ + (from_guest_kernel_mode(cr1_lo) && \ + from_guest_kernel_IP(cr0_hi)) +#else /* ! CONFIG_KVM_HW_VIRTUALIZATION */ +/* guest kernel is user of host kernel, so USER MODE (pm = 0) */ +#define from_guest_kernel_mode(cr1_lo) \ + from_user_mode(cr1_lo) +#define from_guest_kernel(cr0_hi, cr1_lo) \ + (from_guest_kernel_mode(cr1_lo) && \ + from_guest_kernel_IP(cr0_hi)) +#endif /* CONFIG_KVM_HW_VIRTUALIZATION */ + +#define is_trap_from_host_kernel(regs) \ + is_trap_from_kernel(regs, NATIVE_TASK_SIZE) + +#define is_call_from_host_user(cr0_hi, cr1_lo) \ + (from_host_user_IP(cr0_hi) && from_host_user_mode(cr1_lo)) +#define is_call_from_host_user_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_host_user(cr0_hi, cr1_lo) : \ + from_host_user_mode(cr1_lo)) +#define is_call_from_guest_user(cr0_hi, cr1_lo) \ + (from_guest_user_IP(cr0_hi) && from_guest_user_mode(cr1_lo)) +#define is_call_from_guest_user_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_guest_user(cr0_hi, cr1_lo) : \ + from_guest_user_mode(cr1_lo)) +#define is_call_from_host_kernel(cr0_hi, cr1_lo) \ + (from_host_kernel_IP(cr0_hi) && from_host_kernel_mode(cr1_lo)) +#define is_call_from_host_kernel_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_host_kernel(cr0_hi, cr1_lo) : \ + from_host_kernel_mode(cr1_lo)) +#define is_call_from_guest_kernel(cr0_hi, cr1_lo) \ + from_guest_kernel(cr0_hi, cr1_lo) +#define is_call_from_guest_kernel_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_guest_kernel(cr0_hi, cr1_lo) : \ + from_guest_kernel_mode(cr1_lo)) + +#define is_trap_on_user(regs, __HOST__) \ + ((__HOST__) ? \ + trap_from_host_user(regs) : \ + trap_from_guest_user(regs)) +#define is_trap_on_kernel(regs, __HOST__) \ + ((__HOST__) ? \ + trap_from_host_kernel(regs) : \ + (trap_from_guest_kernel(regs) || \ + is_trap_from_host_kernel(regs))) + +#define ON_HOST_KERNEL() (NATIVE_NV_READ_PSR_REG_VALUE() & PSR_PM) + +#define __trap_from_user(regs) \ + is_trap_on_user(regs, ON_HOST_KERNEL()) +#define __trap_from_kernel(regs) \ + is_trap_on_kernel(regs, ON_HOST_KERNEL()) +#define trap_on_user(regs) __trap_from_user(regs) +#define trap_on_kernel(regs) __trap_from_kernel(regs) + +#define call_from_user_mode(cr0_hi, cr1_lo) \ + is_call_from_user(cr0_hi, cr1_lo, ON_HOST_KERNEL()) +#define call_from_kernel_mode(cr0_hi, cr1_lo) \ + is_call_from_kernel(cr0_hi, cr1_lo, ON_HOST_KERNEL()) +#define call_from_user(regs) \ + call_from_user_mode((regs)->crs.cr0_hi, (regs)->crs.cr1_lo) +#define call_from_kernel(regs) \ + call_from_kernel_mode((regs)->crs.cr0_hi, (regs)->crs.cr1_lo) + +#define __trap_from_host_user(regs) native_user_mode(regs) +#define __trap_from_host_kernel(regs) native_kernel_mode(regs) +#define __trap_from_guest_user(regs) guest_user_mode(regs) +#define __trap_from_guest_kernel(regs) guest_kernel_mode(regs) + +#define __call_from_kernel(regs) call_from_kernel(regs) +#define __call_from_user(regs) call_from_user(regs) + +#define trap_on_guest_kernel_mode(regs) \ + from_guest_kernel_mode((regs)->crs.cr1_lo) +#define trap_on_guest_kernel_IP(regs) \ + (from_guest_kernel_IP((regs)->crs.cr0_hi) && \ + !from_host_kernel_IP((regs)->crs.cr0_hi)) +#define host_trap_guest_user_mode(regs) \ + (from_guest_user_mode((regs)->crs.cr1_lo) && \ + __trap_from_guest_user(regs)) +#define host_trap_guest_kernel_mode(regs) \ + (from_guest_kernel((regs)->crs.cr0_hi, \ + (regs)->crs.cr1_lo) && \ + __trap_from_guest_kernel(regs)) +#define guest_trap_user_mode(regs) \ + (from_guest_kernel((regs)->crs.cr0_hi, \ + (regs)->crs.cr1_lo) && \ + __trap_from_guest_user(regs)) +#define guest_trap_kernel_mode(regs) \ + (from_guest_kernel((regs)->crs.cr0_hi, \ + (regs)->crs.cr1_lo) && \ + __trap_from_guest_kernel(regs)) + +#define trap_from_host_kernel_mode(regs) \ + from_host_kernel_mode((regs)->crs.cr1_lo) +#define trap_from_host_kernel_IP(regs) \ + from_host_kernel_IP((regs)->crs.cr0_hi) +#define trap_from_host_kernel(regs) \ + (trap_from_host_kernel_mode(regs) && \ + __trap_from_host_kernel(regs)) +#define trap_from_host_user(regs) \ + (from_host_user_mode((regs)->crs.cr1_lo) && \ + __trap_from_host_user(regs)) +/* macros to detect guest kernel traps on guest and on host */ +/* trap only on guest kernel */ +#define trap_from_guest_kernel(regs) \ + (from_guest_kernel_mode((regs)->crs.cr1_lo) && \ + __trap_from_guest_kernel(regs)) +/* macros to detect guest traps on host, guest has not own guest, so */ +/* macros should always return 'false' for guest */ +/* trap occurred on guest user only */ +#define trap_from_guest_user(regs) \ +({ \ + bool is; \ + \ + if (paravirt_enabled() || \ + !test_thread_flag(TIF_VIRTUALIZED_GUEST)) \ + /* It is guest and it cannot run own guest */ \ + /* or trap is not on guest process */ \ + is = false; \ + else if (host_trap_guest_user_mode(regs)) \ + is = true; \ + else \ + is = false; \ + is; \ +}) +/* macroses to detect guest traps on host, guest has not own guest, so */ +/* macroses should always return 'false' for guest */ +/* trap occurred on guest process (guest user or guest kernel or on host */ +/* while running guest process (guest VCPU thread) */ +#define trap_on_guest(regs) \ + (!paravirt_enabled() && \ + test_thread_flag(TIF_VIRTUALIZED_GUEST)) +#define trap_on_pv_hv_guest(vcpu, regs) \ + ((vcpu) != NULL && \ + !((vcpu)->arch.is_hv) && trap_on_guest(regs)) +/* guest trap occurred on guest user or kernel */ +#define guest_trap_on_host(regs) \ + (trap_on_guest(regs) && user_mode(regs)) +#define guest_trap_on_pv_hv_host(vcpu, regs) \ + (trap_on_pv_hv_guest(vcpu, regs) && user_mode(regs)) +/* trap occurred on guest kernel or user, but in host mode */ +/* and the trap can be due to guest or not */ +#define host_trap_on_guest(regs) \ + (guest_trap_on_host(regs) && \ + trap_from_host_kernel_mode(regs) && \ + trap_from_host_kernel_IP(regs)) +/* guest trap occurred on guest user or kernel or on host but due to guest */ +/* for example guest kernel address in hypercalls */ +#define due_to_guest_trap_on_host(regs) \ + (trap_on_guest(regs) && \ + (user_mode(regs) || \ + LIGHT_HYPERCALL_MODE(regs) || \ + GENERIC_HYPERCALL_MODE())) +#define due_to_guest_trap_on_pv_hv_host(vcpu, regs) \ + (trap_on_pv_hv_guest(vcpu, regs) && \ + (user_mode(regs) || \ + LIGHT_HYPERCALL_MODE(regs) || \ + GENERIC_HYPERCALL_MODE())) +/* page fault is from intercept */ +#define due_to_intc_page_fault(vcpu, regs) \ + ((vcpu) != NULL && \ + (vcpu)->arch.is_hv && \ + (regs)->trap->is_intc) +/* trap occurred on guest user only */ +#define guest_user_trap_on_host(regs) \ + (trap_on_guest(regs) && guest_trap_user_mode(regs)) +/* trap occurred on guest kernel only */ +#define guest_kernel_trap_on_host(regs) \ + (trap_on_guest(regs) && guest_trap_kernel_mode(regs)) + +/* macros to detect guest traps on guest and on host */ +/* trap on guest user, kernel or on host kernel due to guest */ +#define __guest_trap(regs) \ + (paravirt_enabled() || \ + test_thread_flag(TIF_VIRTUALIZED_GUEST)) + +#define addr_from_guest_user(addr) ((addr) < GUEST_TASK_SIZE) +#define addr_from_guest_kernel(addr) \ + ((addr) >= GUEST_TASK_SIZE && (addr) < HOST_TASK_SIZE) + +#define guest_user_addr_mode_page_fault(regs, instr_page, addr) \ + ((instr_page) ? guest_user_mode(regs) : \ + guest_user_mode(regs) || \ + (addr_from_guest_user(addr) && \ + (!trap_from_host_kernel(regs) || \ + LIGHT_HYPERCALL_MODE(regs) || \ + GENERIC_HYPERCALL_MODE()))) +/* macros to detect guest user address on host, */ +/* guest has not own guest, so macros should always return 'false' for guest */ +/* faulted address is from guest user space */ +#define guest_mode_page_fault(regs, instr_page, addr) \ + (trap_on_guest(regs) && \ + guest_user_addr_mode_page_fault(regs, \ + instr_page, addr)) +/* macros to detect instruction page fault on guest kernel access */ +/* such traps should be handled by host because of guest kernel */ +/* is user of host */ +#define guest_kernel_instr_page_fault(regs) \ + (trap_on_guest(regs) && \ + guest_trap_kernel_mode(regs) && \ + trap_on_guest_kernel_IP(regs)) +/* macros to detect instruction page fault on guest user access */ +/* such traps should be handled by guest kernel */ +#define guest_user_instr_page_fault(regs) \ + (trap_on_guest(regs) && guest_user_mode(regs)) + +static inline e2k_addr_t +check_is_user_address(struct task_struct *task, e2k_addr_t address) +{ + if (likely(address < TASK_SIZE)) + return 0; + if (!paravirt_enabled()) { + pr_err("Address 0x%016lx is host kernel address\n", + address); + return -1; + } else if (address < NATIVE_TASK_SIZE) { + pr_err("Address 0x%016lx is guest kernel address\n", + address); + return -1; + } else { + pr_err("Address 0x%016lx is host kernel address\n", + address); + return -1; + } +} +#define IS_GUEST_USER_ADDRESS_TO_PVA(task, address) \ + (test_ti_thread_flag(task_thread_info(tsk), \ + TIF_VIRTUALIZED_GUEST) && \ + IS_GUEST_USER_ADDRESS(address)) +#define IS_GUEST_ADDRESS_TO_HOST(address) \ + (paravirt_enabled() && !IS_HV_GM() && \ + IS_HOST_KERNEL_ADDRESS(address)) + +#ifdef CONFIG_KVM_GUEST_HW_PV +/* FIXME Instead of ifdef, this should check for is_pv */ +#define print_host_user_address_ptes(mm, address) \ + native_print_host_user_address_ptes(mm, address) +#else +/* guest page table is pseudo PT and only host PT is used */ +/* to translate any guest addresses */ +#define print_host_user_address_ptes(mm, address) \ +({ \ + /* function is actual only for guest kernel */ \ + if (paravirt_enabled()) \ + HYPERVISOR_print_guest_user_address_ptes((mm)->gmmid_nr, \ + address); \ +}) +#endif /* CONFIG_KVM_GUEST_HW_PV */ +#else /* CONFIG_KVM_GUEST_KERNEL */ +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +#else /* ! CONFIG_VIRTUALIZATION */ +/* it is native kernel without any virtualization */ + +#define guest_task_mode(task) false /* only native tasks */ + +#define user_mode(regs) is_user_mode(regs, TASK_SIZE) +#define kernel_mode(regs) is_kernel_mode(regs, TASK_SIZE) + +#define is_call_from_host_user(cr0_hi, cr1_lo) \ + (from_user_IP(cr0_hi) && from_user_mode(cr1_lo)) +#define is_call_from_host_user_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_host_user(cr0_hi, cr1_lo) : \ + from_user_mode(cr1_lo)) +#define is_call_from_guest_user(cr0_hi, cr1_lo) false +#define is_call_from_guest_user_IP(cr0_hi, cr1_lo, ignoreIP) false +#define is_call_from_host_kernel(cr0_hi, cr1_lo) \ + (from_kernel_IP(cr0_hi) && from_kernel_mode(cr1_lo)) +#define is_call_from_host_kernel_IP(cr0_hi, cr1_lo, ignore_IP) \ + ((!(ignore_IP)) ? is_call_from_host_kernel(cr0_hi, cr1_lo) : \ + from_kernel_mode(cr1_lo)) +#define is_call_from_guest_kernel(cr0_hi, cr1_lo) false +#define is_call_from_guest_kernel_IP(cr0_hi, cr1_lo, ignore_IP) false + +#define is_call_from_user(cr0_hi, cr1_lo, __HOST__) \ + is_call_from_host_user(cr0_hi, cr1_lo) +#define is_call_from_kernel(cr0_hi, cr1_lo, __HOST__) \ + is_call_from_host_kernel(cr0_hi, cr1_lo) + +#define __trap_from_user(regs) is_trap_from_user(regs, TASK_SIZE) +#define __trap_from_kernel(regs) is_trap_from_kernel(regs, TASK_SIZE) +#define trap_on_user(regs) user_mode(regs) +#define trap_on_kernel(regs) kernel_mode(regs) + +/* macroses to detect guest traps on host */ +/* Virtualization is off, so nothing guests exist, */ +/* so macroses should always return 'false' */ +#define trap_on_guest(regs) false +/* trap occurred on guest user or kernel */ +#define guest_trap_on_host(regs) \ + false /* guest is not supported */ +/* trap occurred on guest kernel or user, but in host mode */ +/* and the trap can be due to guest or not */ +#define host_trap_on_guest(regs) \ + false /* guest is not supported */ +/* trap occurred on guest user or kernel or on host but due to guest */ +#define due_to_guest_trap_on_host(regs) \ + false /* guest is not supported */ +/* page fault is from intercept */ +#define due_to_intc_page_fault(vcpu, regs) \ + false /* guest is not supported */ +/* trap occurred on guest user only */ +#define guest_user_trap_on_host(regs) \ + false /* guest is not supported */ +/* trap occurred on guest kernel only */ +#define guest_kernel_trap_on_host(regs) \ + false /* guest is not supported */ + +/* macros to detect guest traps on guest and on host */ +/* trap on guest user, kernel or on host kernel due to guest */ +#define __guest_trap(regs) \ + false /* guest is not supported */ +/* macros to detect guest kernel traps on guest and on host */ +/* trap only on guest kernel */ +#define trap_from_guest_kernel(regs) \ + false /* guest is not supported */ + +#define __call_from_kernel(regs) from_kernel_mode((regs)->crs.cr1_lo) +#define __call_from_user(regs) from_user_mode((regs)->crs.cr1_lo) + +#define ON_HOST_KERNEL() true +#define call_from_user_mode(cr0_hi, cr1_lo) \ + is_call_from_user(cr0_hi, cr1_lo, ON_HOST_KERNEL()) +#define call_from_kernel_mode(cr0_hi, cr1_lo) \ + is_call_from_kernel(cr0_hi, cr1_lo, ON_HOST_KERNEL()) +#define call_from_user(regs) \ + call_from_user_mode((regs)->crs.cr0_hi, (regs)->crs.cr1_lo) +#define call_from_kernel(regs) \ + call_from_kernel_mode((regs)->crs.cr0_hi, (regs)->crs.cr1_lo) + +static inline e2k_addr_t +check_is_user_address(struct task_struct *task, e2k_addr_t address) +{ + return native_check_is_user_address(task, address); +} +#define IS_GUEST_USER_ADDRESS_TO_PVA(task, address) \ + NATIVE_IS_GUEST_USER_ADDRESS_TO_PVA(task, address) +#define IS_GUEST_ADDRESS_TO_HOST(address) \ + NATIVE_IS_GUEST_ADDRESS_TO_HOST(address) +#define print_host_user_address_ptes(mm, address) \ + native_print_host_user_address_ptes(mm, address) + +#define guest_mode_page_fault(regs, instr_page, addr) false + +#endif /* CONFIG_VIRTUALIZATION */ + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without virtualization support */ +#define LIGHT_HYPERCALL_MODE(regs) 0 /* hypercalls not supported */ +#define TI_GENERIC_HYPERCALL_MODE(thread_info) 0 /* hypercalls not supported */ +#define GENERIC_HYPERCALL_MODE() 0 /* hypercalls not supported */ +#define IN_LIGHT_HYPERCALL() 0 /* hypercalls not supported */ +#define IN_GENERIC_HYPERCALL() 0 /* hypercalls not supported */ +#define IN_HYPERCALL() 0 /* hypercalls not supported */ +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized) */ +#include +#elif defined(CONFIG_VIRTUALIZATION) || defined(CONFIG_PARAVIRT_GUEST) +/* It is native host kernel with virtualization support on */ +/* or it is paravirtualized host and guest kernel */ + +#define LIGHT_HYPERCALL_MODE(pt_regs) \ +({ \ + pt_regs_t *__regs = (pt_regs); \ + bool is_ligh_hypercall; \ + \ + is_ligh_hypercall = \ + (__regs->flags & LIGHT_HYPERCALL_FLAG_PT_REGS) != 0; \ + is_ligh_hypercall; \ +}) +#define TI_LIGHT_HYPERCALL_MODE(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + test_ti_thread_flag(__ti, TIF_LIGHT_HYPERCALL); \ +}) +#define IN_LIGHT_HYPERCALL() TI_LIGHT_HYPERCALL_MODE(current_thread_info()) +#define TI_GENERIC_HYPERCALL_MODE(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + test_ti_thread_flag(__ti, TIF_GENERIC_HYPERCALL); \ +}) +#define GENERIC_HYPERCALL_MODE() \ + TI_GENERIC_HYPERCALL_MODE(current_thread_info()) +#define IN_GENERIC_HYPERCALL() GENERIC_HYPERCALL_MODE() +#define IN_HYPERCALL() \ + (IN_LIGHT_HYPERCALL() || IN_GENERIC_HYPERCALL()) +#else /* ! CONFIG_VIRTUALIZATION && ! CONFIG_PARAVIRT_GUEST */ + #error "Unknown virtualization type" +#endif /* ! CONFIG_VIRTUALIZATION */ + +#ifdef CONFIG_KVM_HOST_MODE +/* It is native host kernel with virtualization support on */ + +/* + * Additional context for paravirtualized guest to save/restore at + * 'signal_stack_context' structure to handle traps/syscalls by guest + */ + +typedef struct pv_vcpu_ctxt { + inject_caller_t inject_from; /* reason of injection */ + int trap_no; /* number of recursive trap */ + u64 sys_rval; /* return value of guest system call */ + e2k_psr_t guest_psr; /* guest PSR state before trap */ + bool irq_under_upsr; /* is IRQ control under UOSR? */ + bool in_sig_handler; /* signal handler in progress */ +} pv_vcpu_ctxt_t; + +#else /* !CONFIG_KVM_HOST_MODE */ +/* it is native kernel without any virtualization */ +/* or pure guest kernel (not paravirtualized) */ + +typedef struct pv_vcpu_ctxt { + /* empty structure */ +} pv_vcpu_ctxt_t; + +#endif /* CONFIG_KVM_HOST_MODE */ + +#ifdef CONFIG_VIRTUALIZATION + +static inline struct pt_regs *find_guest_user_regs(struct pt_regs *regs) +{ + struct pt_regs *guser_regs = regs; + do { + if (guest_user_mode(guser_regs)) + break; + if (guser_regs->next != NULL && + guser_regs->next <= guser_regs) { + /* pt_regs allocated only at the stack, stack grows */ + /* down, so next structure can be only above current */ + pr_err("%s(): invalid list of pt_regs structures: " + "next regs %px below current %px\n", + __func__, guser_regs->next, guser_regs); + WARN_ON(true); + guser_regs = NULL; + break; + } + guser_regs = guser_regs->next; + } while (guser_regs); + + return guser_regs; +} +#else /* ! CONFIG_VIRTUALIZATION */ +static inline struct pt_regs *find_guest_user_regs(struct pt_regs *regs) +{ + return NULL; +} +#endif /* CONFIG_VIRTUALIZATION */ + + +#if defined(CONFIG_SMP) +extern unsigned long profile_pc(struct pt_regs *regs); +#else +#define profile_pc(regs) instruction_pointer(regs) +#endif +extern void show_regs(struct pt_regs *); +extern int syscall_trace_entry(struct pt_regs *regs); +extern void syscall_trace_leave(struct pt_regs *regs); + +#endif /* __KERNEL__ */ +#endif /* _E2K_KVM_PTRACE_H */ + diff --git a/arch/e2k/include/asm/kvm/pv-emul.h b/arch/e2k/include/asm/kvm/pv-emul.h new file mode 100644 index 0000000..fd8b854 --- /dev/null +++ b/arch/e2k/include/asm/kvm/pv-emul.h @@ -0,0 +1,283 @@ +#ifndef __KVM_E2K_PV_EMUL_H +#define __KVM_E2K_PV_EMUL_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +#include + +#ifdef CONFIG_VIRTUALIZATION + +static __always_inline bool +kvm_host_at_pv_vcpu_mode(thread_info_t *ti) +{ + return ti->vcpu && test_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE); +} + +static __always_inline void +kvm_set_intc_emul_flag(pt_regs_t *regs) +{ + regs->flags |= TRAP_AS_INTC_EMUL_PT_REGS; +} + +static __always_inline bool +kvm_test_intc_emul_flag(pt_regs_t *regs) +{ + return !!(regs->flags & TRAP_AS_INTC_EMUL_PT_REGS); +} + +static __always_inline void +kvm_clear_intc_emul_flag(pt_regs_t *regs) +{ + regs->flags &= ~TRAP_AS_INTC_EMUL_PT_REGS; +} + +static __always_inline bool +kvm_test_and_clear_intc_emul_flag(pt_regs_t *regs) +{ + bool is_emul = kvm_test_intc_emul_flag(regs); + kvm_clear_intc_emul_flag(regs); + return is_emul; +} +#ifdef CONFIG_KVM_HOST_MODE +/* it is host kernel with virtualization support */ +static inline bool +host_test_intc_emul_mode(const struct pt_regs *regs) +{ + if (likely(native_current_thread_info()->vcpu == NULL)) { + return false; + } else if (regs == NULL) { + return false; + } else if (!kvm_test_intc_emul_flag((pt_regs_t *)regs)) { + /* host is not in interception emulation mode */ + return false; + } + + return true; +} +#else /* !CONFIG_KVM_HOST_MODE */ +/* it is not host kernel */ +static inline bool +host_test_intc_emul_mode(const pt_regs_t *regs) +{ + return false; +} +#endif /* CONFIG_KVM_HOST_MODE */ + +static inline int kvm_get_vcpu_intc_TIRs_num(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.intc_ctxt.nr_TIRs; +} + +static inline bool kvm_check_is_vcpu_intc_TIRs_empty(struct kvm_vcpu *vcpu) +{ + if (kvm_get_vcpu_intc_TIRs_num(vcpu) < 0) + return true; + /* TIRs have traps */ + return false; +} + +static inline bool +kvm_check_is_vcpu_guest_stacks_empty(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + if (!regs->g_stacks_valid) { + /* guest kernel stacks is not even inited */ + return true; + } + return !!(regs->g_stacks.psp_hi.PSP_hi_ind == 0 && + regs->g_stacks.pcsp_hi.PCSP_hi_ind == 0 && + !regs->need_inject); +} + +static inline bool +kvm_is_vcpu_guest_stacks_pending(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + if (!kvm_check_is_vcpu_guest_stacks_empty(vcpu, regs)) { + if (!regs->g_stacks_active) { + return true; + } + } + return false; +} + +static inline void +kvm_clear_vcpu_guest_stacks_pending(struct kvm_vcpu *vcpu, pt_regs_t *regs) +{ + if (regs->g_stacks_valid) { + regs->g_stacks_active = true; + } +} + +extern noinline void insert_pv_vcpu_traps(thread_info_t *ti, pt_regs_t *regs); + +extern void kvm_emulate_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs, + trap_pt_regs_t *trap); +extern void return_from_pv_vcpu_intc(struct thread_info *ti, pt_regs_t *regs); +extern bool pv_vcpu_syscall_intc(thread_info_t *ti, pt_regs_t *regs); + +static inline bool kvm_vcpu_in_hypercall(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.sw_ctxt.in_hypercall; +} + +static inline gthread_info_t *pv_vcpu_get_gti(struct kvm_vcpu *vcpu) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + return vcpu->arch.gti; + } + return NULL; +} + +static inline void pv_vcpu_set_gti(struct kvm_vcpu *vcpu, gthread_info_t *gti) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + vcpu->arch.gti = gti; + } else { + KVM_BUG_ON(true); + } +} + +static inline int pv_vcpu_get_gpid_id(struct kvm_vcpu *vcpu) +{ + gthread_info_t *gti; + + gti = pv_vcpu_get_gti(vcpu); + if (likely(gti != NULL)) { + return gti->gpid->nid.nr; + } else { + return -EINVAL; + } +} + +static inline gmm_struct_t *pv_mmu_get_init_gmm(struct kvm *kvm) +{ + return kvm->arch.init_gmm; +} + +static inline gmm_struct_t *pv_vcpu_get_init_gmm(struct kvm_vcpu *vcpu) +{ + return pv_mmu_get_init_gmm(vcpu->kvm); +} + +static inline bool pv_vcpu_is_init_gmm(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + return gmm == pv_vcpu_get_init_gmm(vcpu); + } else { + KVM_BUG_ON(true); + } + return false; +} + +static inline void pv_vcpu_clear_gmm(struct kvm_vcpu *vcpu) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + vcpu->arch.mmu.gmm = NULL; + } else { + KVM_BUG_ON(true); + } +} + +static inline gmm_struct_t *pv_vcpu_get_gmm(struct kvm_vcpu *vcpu) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + if (vcpu->arch.mmu.gmm != NULL) { + return vcpu->arch.mmu.gmm; + } else { + return pv_vcpu_get_init_gmm(vcpu); + } + } else { + KVM_BUG_ON(true); + } + return NULL; +} + +static inline void pv_vcpu_set_gmm(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + if (gmm != NULL && !pv_vcpu_is_init_gmm(vcpu, gmm)) { + vcpu->arch.mmu.gmm = gmm; + } else { + pv_vcpu_clear_gmm(vcpu); + } + } else { + KVM_BUG_ON(true); + } +} + +static inline gmm_struct_t *pv_vcpu_get_active_gmm(struct kvm_vcpu *vcpu) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + return vcpu->arch.mmu.active_gmm; + } else { + KVM_BUG_ON(true); + } + return NULL; +} + +static inline void +pv_vcpu_set_active_gmm(struct kvm_vcpu *vcpu, gmm_struct_t *gmm) +{ + if (likely(!vcpu->arch.is_hv && vcpu->arch.is_pv)) { + vcpu->arch.mmu.active_gmm = gmm; + } else { + KVM_BUG_ON(true); + } +} + +static inline mm_context_t *pv_vcpu_get_gmm_context(struct kvm_vcpu *vcpu) +{ + return &pv_vcpu_get_gmm(vcpu)->context; +} + +#else /* !CONFIG_VIRTUALIZATION */ +static __always_inline bool +kvm_host_at_pv_vcpu_mode(thread_info_t *ti) +{ + return false; +} + +static __always_inline void +kvm_set_intc_emul_flag(pt_regs_t *regs) +{ +} + +static __always_inline bool +kvm_test_intc_emul_flag(pt_regs_t *regs) +{ + return false; +} + +static __always_inline void +kvm_clear_intc_emul_flag(pt_regs_t *regs) +{ +} + +static __always_inline bool +kvm_test_and_clear_intc_emul_flag(pt_regs_t *regs) +{ + return false; +} +static inline bool +host_test_intc_emul_mode(const pt_regs_t *regs) +{ + return false; +} +static inline void insert_pv_vcpu_traps(thread_info_t *ti, pt_regs_t *regs) +{ +} + +static inline bool kvm_vcpu_in_hypercall(struct kvm_vcpu *vcpu) +{ + return false; +} + +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KVM_E2K_PV_EMUL_H */ diff --git a/arch/e2k/include/asm/kvm/regs_state.h b/arch/e2k/include/asm/kvm/regs_state.h new file mode 100644 index 0000000..85df1aa --- /dev/null +++ b/arch/e2k/include/asm/kvm/regs_state.h @@ -0,0 +1,463 @@ +#ifndef _E2K_KVM_REGS_STATE_H +#define _E2K_KVM_REGS_STATE_H + +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#include +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_VIRTUALIZATION + +#undef DEBUG_GREGS_MODE +#undef DebugGREGS +#define DEBUG_GREGS_MODE 0 /* global registers save/restore */ +#define DebugGREGS(fmt, args...) \ +({ \ + if (DEBUG_GREGS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#define DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V2(gregs) \ + DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V2, GUEST_GREGS_MASK) +#define DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V5(gregs) \ + DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V5, GUEST_GREGS_MASK) + +#define DO_SAVE_GREGS_EXCEPT_HOST_V2(gregs) \ + DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V2, GUEST_GREGS_MASK) +#define DO_SAVE_GREGS_EXCEPT_HOST_V5(gregs) \ + DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V5, GUEST_GREGS_MASK) + +#define DO_SAVE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V2(gregs) \ + DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V2, \ + GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK) +#define DO_SAVE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V5(gregs) \ + DO_SAVE_GREGS_ON_MASK(gregs, E2K_ISET_V5, \ + GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK) + +#define DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V2(gregs) \ + DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V2, GUEST_GREGS_MASK) +#define DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V5(gregs) \ + DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V5, GUEST_GREGS_MASK) + +#define DO_RESTORE_GREGS_EXCEPT_HOST_V2(gregs) \ + DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V2, GUEST_GREGS_MASK) +#define DO_RESTORE_GREGS_EXCEPT_HOST_V5(gregs) \ + DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V5, GUEST_GREGS_MASK) + +#define DO_RESTORE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V2(gregs) \ + DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V2, \ + GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK) +#define DO_RESTORE_GUEST_LOCAL_GREGS_EXCEPT_KERNEL_V5(gregs) \ + DO_RESTORE_GREGS_ON_MASK(gregs, E2K_ISET_V5, \ + GLOBAL_GREGS_USER_MASK | GUEST_GREGS_MASK) + +#ifndef CONFIG_E2K_ISET_VER +#define SAVE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \ + (machine.host.save_guest_gregs(gregs)) +#define RESTORE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \ + (machine.host.restore_guest_gregs(gregs)) +#elif CONFIG_E2K_ISET_VER < 5 +#define SAVE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \ + DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V2((gregs)->g) +#define RESTORE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \ + DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V2((gregs)->g) +#else /* CONFIG_E2K_ISET_VER >= 5 */ +#define SAVE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \ + DO_SAVE_GUEST_GREGS_EXCEPT_KERNEL_V5((gregs)->g) +#define RESTORE_GUEST_GREGS_EXCEPT_KERNEL(gregs) \ + DO_RESTORE_GUEST_GREGS_EXCEPT_KERNEL_V5((gregs)->g) +#endif /* CONFIG_E2K_ISET_VER */ + +#define SET_GUEST_USER_GREG(gregs, gr_no, gr_main, gr_ext) \ +({ \ + global_regs_t *greg_to = (gregs); \ + \ + greg_to->g[(gr_no)].base = (gr_main); \ + greg_to->g[(gr_no)].ext = (gr_ext); \ +}) +#define COPY_GUEST_USER_GREG_ON_MASK(gr_from, gr_to, gr_mask) \ +({ \ + global_regs_t *greg_from = (gr_from); \ + global_regs_t *greg_to = (gr_to); \ + unsigned long cur_mask = (gr_mask); \ + int gr_no = 0; \ + \ + while (cur_mask) { \ + if (cur_mask & 0x3UL) { \ + SAVE_GUEST_USER_GREG_PAIR( \ + greg_from->g, gr_no, greg_to->g, gr_no); \ + } \ + gr_no += 2; \ + cur_mask >>= 2; \ + } \ +}) +#define SAVE_GUEST_USER_GREG_PAIR(__ti_gregs, ti_gr_no, \ + __gti_gregs, gti_gr_no) \ +({ \ + NATIVE_MOVE_TAGGED_QWORD( \ + (e2k_addr_t)&((__ti_gregs)[ti_gr_no + 0].base), \ + (e2k_addr_t)&((__ti_gregs)[ti_gr_no + 1].base), \ + (e2k_addr_t)&((__gti_gregs)[gti_gr_no + 0].base), \ + (e2k_addr_t)&((__gti_gregs)[gti_gr_no + 1].base)); \ + (__gti_gregs)[(gti_gr_no) + 0].ext = \ + (__ti_gregs)[(ti_gr_no) + 0].ext; \ + (__gti_gregs)[(gti_gr_no) + 1].ext = \ + (__ti_gregs)[(ti_gr_no) + 1].ext; \ +}) +#define RESTORE_GUEST_USER_GREG_PAIR(__ti_gregs, ti_gr_no, \ + __gti_gregs, gti_gr_no) \ +({ \ + NATIVE_MOVE_TAGGED_QWORD( \ + (e2k_addr_t)&((__gti_gregs)[gti_gr_no + 0].base), \ + (e2k_addr_t)&((__gti_gregs)[gti_gr_no + 1].base), \ + (e2k_addr_t)&((__ti_gregs)[ti_gr_no + 0].base), \ + (e2k_addr_t)&((__ti_gregs)[ti_gr_no + 1].base)); \ + (__ti_gregs)[(ti_gr_no) + 0].ext = \ + (__gti_gregs)[(gti_gr_no) + 0].ext; \ + (__ti_gregs)[(ti_gr_no) + 1].ext = \ + (__gti_gregs)[(gti_gr_no) + 1].ext; \ +}) +#define COPY_GUEST_KERNEL_GREGS_FROM_TI(__ti_gregs, __gti_regs) \ +({ \ + SAVE_GUEST_USER_GREG_PAIR( \ + __ti_gregs, CURRENT_GREGS_PAIRS_INDEX_LO, \ + __gti_regs, CURRENT_GREGS_PAIRS_INDEX_LO); \ + SAVE_GUEST_USER_GREG_PAIR( \ + __ti_gregs, CPU_GREGS_PAIRS_INDEX_LO, \ + __gti_regs, CPU_GREGS_PAIRS_INDEX_LO); \ +}) +#define COPY_GUEST_KERNEL_GREGS_TO_TI(__ti_gregs, __gti_regs) \ +({ \ + RESTORE_GUEST_USER_GREG_PAIR( \ + __ti_gregs, CURRENT_GREGS_PAIRS_INDEX_LO, \ + __gti_regs, CURRENT_GREGS_PAIRS_INDEX_LO); \ + RESTORE_GUEST_USER_GREG_PAIR( \ + __ti_gregs, CPU_GREGS_PAIRS_INDEX_LO, \ + __gti_regs, CPU_GREGS_PAIRS_INDEX_LO); \ +}) + +#define SAVE_GUEST_KERNEL_GREGS_AT_GTI(__ti, __gti, __gregs) \ +({ \ + DebugGREGS("now: gregs_active %d gregs_valid %d " \ + "gregs_for_currents_valid %d\n", \ + __gti->gregs_active, __gti->gregs_valid, \ + __gti->gregs_for_currents_valid); \ + WARN_ON(__gti->gregs_active && __gti->gregs_for_currents_valid);\ + SAVE_GUEST_USER_GREG_PAIR( \ + (__ti)->k_gregs.g, CURRENT_GREGS_PAIRS_INDEX_LO, \ + (__gregs)->g, CURRENT_GREGS_PAIR_LO); \ + SAVE_GUEST_USER_GREG_PAIR( \ + (__ti)->k_gregs.g, CPU_GREGS_PAIRS_INDEX_LO, \ + (__gregs)->g, CPU_GREGS_PAIR_LO); \ + __gti->gregs_for_currents_valid = 1; \ + DebugGREGS("set gregs_for_currents_valid %d\n", \ + __gti->gregs_for_currents_valid); \ +}) +#define RESTORE_GUEST_KERNEL_GREGS_AT_TI(__ti, __gti, __gregs) \ +({ \ + DebugGREGS("now: gregs_active %d gregs_valid %d " \ + "gregs_for_currents_valid %d\n", \ + __gti->gregs_active, __gti->gregs_valid, \ + __gti->gregs_for_currents_valid); \ + WARN_ON(__gti->gregs_active && !__gti->gregs_for_currents_valid); \ + RESTORE_GUEST_USER_GREG_PAIR( \ + (__ti)->k_gregs.g, CURRENT_GREGS_PAIRS_INDEX_LO, \ + (__gregs)->g, CURRENT_GREGS_PAIR_LO); \ + RESTORE_GUEST_USER_GREG_PAIR( \ + (__ti)->k_gregs.g, CPU_GREGS_PAIRS_INDEX_LO, \ + (__gregs)->g, CPU_GREGS_PAIR_LO); \ + __gti->gregs_for_currents_valid = 0; \ + DebugGREGS("clear gregs_for_currents_valid %d\n", \ + __gti->gregs_for_currents_valid); \ +}) + +#define DO_INIT_GUEST_USER_UPSR(__gti, __upsr) \ +({ \ + (__gti)->u_upsr = __upsr; \ + (__gti)->u_upsr_valid = true; \ +}) +#define DO_INIT_GUEST_KERNEL_UPSR(__gti, __upsr) \ +({ \ + (__gti)->k_upsr = __upsr; \ + (__gti)->k_upsr_valid = true; \ +}) +#define DO_SAVE_GUEST_USER_UPSR(__gti, __upsr) \ +({ \ + GTI_BUG_ON((__gti)->u_upsr_valid); \ + DO_INIT_GUEST_USER_UPSR(__gti, __upsr); \ +}) +#define DO_SAVE_GUEST_KERNEL_UPSR(__gti, __upsr) \ +({ \ + GTI_BUG_ON((__gti)->k_upsr_valid); \ + DO_INIT_GUEST_KERNEL_UPSR(__gti, __upsr); \ +}) +#define SAVE_GUEST_USER_UPSR_AT_GTI(__ti, __gti) \ + DO_SAVE_GUEST_USER_UPSR(__gti, (__ti)->upsr) +#define SAVE_GUEST_KERNEL_UPSR_AT_GTI(__ti, __gti) \ + DO_SAVE_GUEST_KERNEL_UPSR(__gti, (__ti)->upsr) +#define DO_RESTORE_GUEST_USER_UPSR(__gti, upsr_value) \ +({ \ + GTI_BUG_ON(!(__gti)->u_upsr_valid); \ + (upsr_value) = (__gti)->u_upsr; \ + (__gti)->u_upsr_valid = false; \ +}) +#define DO_RESTORE_GUEST_KERNEL_UPSR(__gti, upsr_value) \ +({ \ + GTI_BUG_ON(!(__gti)->k_upsr_valid); \ + (upsr_value) = (__gti)->k_upsr; \ + (__gti)->k_upsr_valid = false; \ +}) +#define RESTORE_GUEST_USER_UPSR_AT_TI(__ti, __gti) \ + DO_RESTORE_GUEST_USER_UPSR(__gti, (__ti)->upsr) +#define RESTORE_GUEST_KERNEL_UPSR_AT_TI(__ti, __gti) \ + DO_RESTORE_GUEST_KERNEL_UPSR(__gti, (__ti)->upsr) + +/* It is native host/guest kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +/* Save/restore global registers used by host to support guest */ +#define SAVE_GUEST_HOST_GREGS_AT_TI(__greg_pair, __gl_regs) \ +({ \ + SAVE_GUEST_USER_GREG_PAIR( \ + __greg_pair, HOST_VCPU_STATE_GREGS_PAIRS_INDEX_LO, \ + __gl_regs, VCPU_STATE_GREGS_PAIR_LO); \ +}) +#define RESTORE_GUEST_HOST_GREGS_AT_TI(__greg_pair, __gl_regs) \ +({ \ + RESTORE_GUEST_USER_GREG_PAIR( \ + __greg_pair, HOST_VCPU_STATE_GREGS_PAIRS_INDEX_LO, \ + __gl_regs, VCPU_STATE_GREGS_PAIR_LO); \ +}) + +#define SAVE_GUEST_USER_REGS_AT_GTI(thread_info, gthread_info, save_upsr) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = (gthread_info); \ + global_regs_t *__gregs = &__gti->gregs; \ + \ + if (test_ti_thread_flag(__ti, TIF_VIRTUALIZED_GUEST)) { \ + SAVE_GUEST_KERNEL_GREGS_AT_GTI(__ti, __gti, __gregs); \ + SAVE_GUEST_HOST_GREGS_AT_TI(__ti->h_gregs.g, \ + __gregs->g); \ + if (save_upsr) { \ + SAVE_GUEST_USER_UPSR_AT_GTI(__ti, __gti); \ + } \ + } \ +}) +#define RESTORE_GUEST_USER_REGS_AT_TI(thread_info, gthread_info, restore_upsr) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = (gthread_info); \ + host_gregs_t *__greg_pair = &__ti->h_gregs; \ + global_regs_t *__gregs = &__gti->gregs; \ + \ + if (test_ti_thread_flag(__ti, TIF_VIRTUALIZED_GUEST)) { \ + RESTORE_GUEST_KERNEL_GREGS_AT_TI(__ti, __gti, __gregs); \ + RESTORE_GUEST_HOST_GREGS_AT_TI(__greg_pair->g, \ + __gregs->g); \ + if (restore_upsr) { \ + RESTORE_GUEST_USER_UPSR_AT_TI(__ti, __gti); \ + } \ + } \ +}) +#define KVM_INIT_GUEST_USER_UPSR(thread_info, __upsr) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = __ti->gthread_info; \ + \ + DO_INIT_GUEST_USER_UPSR(__gti, __upsr); \ +}) +#define KVM_SAVE_GUEST_KERNEL_UPSR(thread_info, __upsr) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = __ti->gthread_info; \ + \ + DO_SAVE_GUEST_KERNEL_UPSR(__gti, __upsr); \ +}) +#define KVM_SAVE_GUEST_USER_UPSR(thread_info, __upsr) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = __ti->gthread_info; \ + \ + DO_SAVE_GUEST_USER_UPSR(__gti, __upsr); \ +}) +#define KVM_RESTORE_GUEST_KERNEL_UPSR(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = __ti->gthread_info; \ + e2k_upsr_t __upsr; \ + \ + DO_RESTORE_GUEST_KERNEL_UPSR(__gti, __upsr); \ + NATIVE_WRITE_UPSR_REG(__upsr); \ +}) +#define KVM_RESTORE_GUEST_USER_UPSR(thread_info) \ +({ \ + thread_info_t *__ti = (thread_info); \ + gthread_info_t *__gti = __ti->gthread_info; \ + e2k_upsr_t __upsr; \ + \ + DO_RESTORE_GUEST_USER_UPSR(__gti, __upsr); \ + NATIVE_WRITE_UPSR_REG(__upsr); \ +}) +#define KVM_SET_GUEST_USER_INITIAL_UPSR(thread_info) \ +({ \ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)); \ + KVM_RESTORE_GUEST_USER_UPSR(thread_info); \ +}) + +#define SAVE_GUEST_USER_GLOBAL_REGISTERS(gthread_info) \ +({ \ + gthread_info_t *gti = (gthread_info); \ + global_regs_t *gregs = >i->gregs; \ + \ + DebugGREGS("now: gregs_active %d gregs_valid %d " \ + "gregs_for_currents_valid %d\n", \ + gti->gregs_active, gti->gregs_valid, \ + gti->gregs_for_currents_valid); \ + /* user state of gregs which now are under current pointers */ \ + /* should be saved into current guest thread info structure */ \ + WARN_ON(gti->gregs_active && !gti->gregs_for_currents_valid); \ + \ + /* save current state of global registers excluding gregs */ \ + /* used by kernel */ \ + gregs->bgr = NATIVE_READ_BGR_REG(); \ + init_BGR_reg(); /* enable whole GRF */ \ + SAVE_GUEST_GREGS_EXCEPT_KERNEL(gregs); \ + NATIVE_WRITE_BGR_REG(gregs->bgr); \ + gti->gregs_valid = 1; \ + DebugGREGS("set gregs_valid %d\n", \ + gti->gregs_valid); \ +}) +#define SAVE_GUEST_USER_ALL_GLOBAL_REGISTERS(gthread_info, gti_base) \ +({ \ + gthread_info_t *__gti = (gthread_info); \ + gthread_info_t *src_gti = (gti_base); \ + global_regs_t *__gregs = &__gti->gregs; \ + global_regs_t *src_gregs = &src_gti->gregs; \ + \ + SAVE_GUEST_USER_GLOBAL_REGISTERS(__gti); \ + WARN_ON(__gti->gregs_for_currents_valid); \ + WARN_ON(!src_gti->gregs_for_currents_valid); \ + /* global register which now used by kernel */ \ + /* saved from parent global registers state */ \ + COPY_GUEST_USER_GREG_ON_MASK(src_gregs, __gregs, \ + GUEST_GREGS_MASK); \ + __gti->gregs_for_currents_valid = 1; \ + DebugGREGS("set gregs_for_currents_valid %d\n", \ + __gti->gregs_for_currents_valid); \ +}) + +#define RESTORE_GUEST_USER_GLOBAL_REGISTERS(gthread_info) \ +({ \ + gthread_info_t *gti = (gthread_info); \ + global_regs_t *gregs = >i->gregs; \ + \ + DebugGREGS("now: gregs_active %d gregs_valid %d " \ + "gregs_for_currents_valid %d\n", \ + gti->gregs_active, gti->gregs_valid, \ + gti->gregs_for_currents_valid); \ + WARN_ON(gti->gregs_active && !gti->gregs_valid); \ + WARN_ON(gti->gregs_active && !gti->gregs_for_currents_valid); \ + \ + /* restore current state of global registers excluding gregs */ \ + /* used by kernel */ \ + init_BGR_reg(); /* enable whole GRF */ \ + RESTORE_GUEST_GREGS_EXCEPT_KERNEL(gregs); \ + NATIVE_WRITE_BGR_REG(gregs->bgr); \ + gti->gregs_valid = 0; \ + DebugGREGS("clear gregs_valid %d\n", \ + gti->gregs_valid); \ +}) +#define SAVE_PV_VCPU_GLOBAL_REGISTERS(gthread_info) \ +do { \ + gthread_info_t *gti = (gthread_info); \ + global_regs_t *gregs = >i->sw_regs.gregs; \ + \ + machine.save_gregs_dirty_bgr(gregs); \ +} while (false) + +#define RESTORE_PV_VCPU_GLOBAL_REGISTERS(gthread_info) \ +do { \ + gthread_info_t *gti = (gthread_info); \ + global_regs_t *gregs = >i->sw_regs.gregs; \ + \ + machine.restore_gregs(gregs); \ +} while (false) + +#endif /* CONFIG_VIRTUALIZATION */ + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ +#define NATIVE_RESTORE_USER_CUT_REGS(ti, regs) /* CUTD is already set */ +#define RESTORE_USER_CUT_REGS(ti, regs, in_syscall) \ + NATIVE_RESTORE_USER_CUT_REGS(ti, regs) +#elif defined(CONFIG_KVM_HOST_MODE) +/* it is native host kernel with virtualization support */ +#define HOST_RESTORE_USER_CUT_REGS(ti, regs, in_syscall) \ +do { \ + e2k_cutd_t cutd; \ + struct kvm_vcpu *vcpu; \ +\ + if (likely(!test_ti_thread_flag((ti), TIF_HOST_AT_VCPU_MODE))) { \ + /* host at native or hypervisor mode */ \ + /* so CUT context is alredy set */ \ + break; \ + } \ + vcpu = (ti)->vcpu; \ + if (pv_vcpu_trap_on_guest_kernel(regs)) { \ + /* guest kernel return to kernel, need not switch context */ \ + break; \ + } else if ((in_syscall) ? \ + host_return_to_injected_guest_syscall((ti), (regs)) \ + : \ + host_return_to_injected_guest_trap(ti, (regs))) { \ + /* it need switch to guest kernel context */ \ + cutd = vcpu->arch.hw_ctxt.sh_oscutd; \ + } else { \ + /* it need switch to guest user context */ \ + cutd = pv_vcpu_get_gti(vcpu)->stack_regs.cutd; \ + } \ + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(cutd.CUTD_reg); \ +} while (false) +#define RESTORE_USER_CUT_REGS(ti, regs, in_syscall) \ + HOST_RESTORE_USER_CUT_REGS(ti, regs, in_syscall) +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravrtualized) */ +#include +#else + #error "Undefined virtualization mode" +#endif /* !CONFIG_VIRTUALIZATION */ + +#define RESTORE_USER_TRAP_CUT_REGS(ti, regs) \ + RESTORE_USER_CUT_REGS(ti, regs, false) +#define RESTORE_USER_SYSCALL_CUT_REGS(ti, regs) \ + RESTORE_USER_CUT_REGS(ti, regs, true) + +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST_KERNEL) +#define SAVE_GUEST_VCPU_STATE_GREGS(regs) \ +({ \ + DO_SAVE_DGREG(GUEST_VCPU_STATE_GREG, \ + regs->guest_vcpu_state_greg); \ +}) +#define RESTORE_GUEST_VCPU_STATE_GREGS(regs) \ +({ \ + DO_RESTORE_DGREG(GUEST_VCPU_STATE_GREG, \ + regs->guest_vcpu_state_greg); \ +}) +#else /* ! CONFIG_KVM && ! CONFIG_KVM_GUEST_KERNEL */ +#define SAVE_GUEST_VCPU_STATE_GREGS(regs) +#define RESTORE_GUEST_VCPU_STATE_GREGS(regs) +#endif /* CONFIG_KVM || CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_KVM_REGS_STATE_H */ + diff --git a/arch/e2k/include/asm/kvm/runstate.h b/arch/e2k/include/asm/kvm/runstate.h new file mode 100644 index 0000000..e54dda6 --- /dev/null +++ b/arch/e2k/include/asm/kvm/runstate.h @@ -0,0 +1,479 @@ +#ifndef __KVM_E2K_RUNSTATE_H +#define __KVM_E2K_RUNSTATE_H + +#if defined(CONFIG_VIRTUALIZATION) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ + +#include +#include + +/* + * VCPU state structure contains virtual CPU running state info. + * The structure is common for host and guest and can (and should) + * be accessed by both. + * Guest access do through global pointer which should be load on some global + * register (GUEST_VCPU_STATE_GREG) or on special CPU register GD. + * But GD can be used only if guest kernel run as protected task + */ + +/* + * Basic accessing functions to/from virtual CPU running state info structure + * (see asm/kvm/guest.h) on host. + */ +static inline kvm_runstate_info_t * +kvm_get_vcpu_runstate(struct kvm_vcpu *vcpu) +{ + kvm_runstate_info_t *runstate; + + runstate = &(vcpu->arch.kmap_vcpu_state->runstate); + return runstate; +} +static inline int +kvm_get_guest_vcpu_runstate(struct kvm_vcpu *vcpu) +{ + return kvm_get_vcpu_runstate(vcpu)->state; +} +static inline void +kvm_set_guest_vcpu_runstate(struct kvm_vcpu *vcpu, int state) +{ + kvm_get_vcpu_runstate(vcpu)->state = state; +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_entry_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_vcpu_runstate(vcpu)->state_entry_time; +} +static inline void +kvm_set_guest_vcpu_runstate_entry_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_get_vcpu_runstate(vcpu)->state_entry_time = time; +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_time(struct kvm_vcpu *vcpu, int runstate_type) +{ + return kvm_get_vcpu_runstate(vcpu)->time[runstate_type]; +} +static inline void +kvm_set_guest_vcpu_runstate_time(struct kvm_vcpu *vcpu, + int runstate_type, uint64_t time) +{ + kvm_get_vcpu_runstate(vcpu)->time[runstate_type] = time; +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_running_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_running); +} +static inline void +kvm_set_guest_vcpu_runstate_running_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_running, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_runnable_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_runnable); +} +static inline void +kvm_set_guest_vcpu_runstate_runnable_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_runnable, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_blocked_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_blocked); +} +static inline void +kvm_set_guest_vcpu_runstate_blocked_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_blocked, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_offline_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_offline); +} +static inline void +kvm_set_guest_vcpu_runstate_offline_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_offline, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_in_hcall_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_hcall); +} +static inline void +kvm_set_guest_vcpu_runstate_in_hcall_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_hcall, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_in_QEMU_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_QEMU); +} +static inline void +kvm_set_guest_vcpu_runstate_in_QEMU_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_QEMU, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_in_trap_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_trap); +} +static inline void +kvm_set_guest_vcpu_runstate_in_trap_time(struct kvm_vcpu *vcpu, uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_trap, time); +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_in_intercept_time(struct kvm_vcpu *vcpu) +{ + return kvm_get_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_intercept); +} +static inline void +kvm_set_guest_vcpu_runstate_in_intercept_time(struct kvm_vcpu *vcpu, + uint64_t time) +{ + kvm_set_guest_vcpu_runstate_time(vcpu, RUNSTATE_in_intercept, time); +} + +/* + * Interrupts should be disabled by caller + */ +static inline void +kvm_do_update_guest_vcpu_runstate(struct kvm_vcpu *vcpu, + int new_state, uint64_t entry_time) +{ + int old_state = kvm_get_guest_vcpu_runstate(vcpu); + uint64_t old_entry_time = kvm_get_guest_vcpu_runstate_entry_time(vcpu); + uint64_t old_time; + uint64_t old_time_add; + + if (entry_time > old_entry_time) { + old_time = kvm_get_guest_vcpu_runstate_time(vcpu, old_state); + old_time_add = entry_time - old_entry_time; + old_time += old_time_add; + kvm_set_guest_vcpu_runstate_time(vcpu, old_state, old_time); + } + + kvm_set_guest_vcpu_runstate(vcpu, new_state); + kvm_set_guest_vcpu_runstate_entry_time(vcpu, entry_time); +} +static inline void +kvm_update_guest_vcpu_runstate(struct kvm_vcpu *vcpu, + int new_state, uint64_t entry_time) +{ + unsigned long flags; + + raw_local_irq_save(flags); + kvm_do_update_guest_vcpu_runstate(vcpu, new_state, entry_time); + raw_local_irq_restore(flags); +} +/* Interrupts should be disabled by caller */ +static inline void +kvm_do_update_guest_vcpu_current_runstate(struct kvm_vcpu *vcpu, int new_state) +{ + uint64_t cur_time; + + cur_time = nsecs_2cycles(ktime_to_ns(ktime_get())); + kvm_do_update_guest_vcpu_runstate(vcpu, new_state, cur_time); +} +static inline void +kvm_update_guest_vcpu_current_runstate(struct kvm_vcpu *vcpu, int new_state) +{ + unsigned long flags; + + raw_local_irq_save(flags); + kvm_do_update_guest_vcpu_current_runstate(vcpu, new_state); + raw_local_irq_restore(flags); +} + +/* Interrupts should be disabled by caller */ +static inline void +kvm_do_init_guest_vcpu_runstate(struct kvm_vcpu *vcpu, int init_state) +{ + uint64_t cur_time; + + cur_time = nsecs_2cycles(ktime_to_ns(ktime_get())); + kvm_set_guest_vcpu_runstate(vcpu, init_state); + kvm_set_guest_vcpu_runstate_entry_time(vcpu, cur_time); + kvm_set_guest_vcpu_runstate_time(vcpu, init_state, 0); +} +static inline void +kvm_init_guest_vcpu_runstate(struct kvm_vcpu *vcpu, int init_state) +{ + unsigned long flags; + + raw_local_irq_save(flags); + kvm_do_init_guest_vcpu_runstate(vcpu, init_state); + raw_local_irq_restore(flags); +} + +static inline long +kvm_do_get_guest_vcpu_stolen_time(struct kvm_vcpu *vcpu) +{ + s64 running, blocked, runnable, offline, stolen, in_hcall, in_intercept; + int runstate; + uint64_t entry_time; + s64 now; + + now = nsecs_2cycles(ktime_to_ns(ktime_get())); + entry_time = kvm_get_guest_vcpu_runstate_entry_time(vcpu); + BUG_ON(now < entry_time); + + runstate = kvm_get_guest_vcpu_runstate(vcpu); + + running = kvm_get_guest_vcpu_runstate_running_time(vcpu); + if (runstate == RUNSTATE_running) + running += (now - entry_time); + in_hcall = kvm_get_guest_vcpu_runstate_in_hcall_time(vcpu); + if (runstate == RUNSTATE_in_hcall) + in_hcall += (now - entry_time); + blocked = kvm_get_guest_vcpu_runstate_blocked_time(vcpu); + if (runstate == RUNSTATE_blocked) + blocked += (now - entry_time); + in_intercept = kvm_get_guest_vcpu_runstate_in_intercept_time(vcpu); + if (runstate == RUNSTATE_in_intercept) + in_intercept += (now - entry_time); + + /* work out how much time the VCPU has not been runn*ing* */ + runnable = kvm_get_guest_vcpu_runstate_runnable_time(vcpu) + + kvm_get_guest_vcpu_runstate_in_QEMU_time(vcpu) + + kvm_get_guest_vcpu_runstate_in_trap_time(vcpu); + if (runstate == RUNSTATE_runnable || runstate == RUNSTATE_in_trap || + runstate == RUNSTATE_in_QEMU) + runnable += (now - entry_time); + offline = kvm_get_guest_vcpu_runstate_offline_time(vcpu); + if (runstate == RUNSTATE_offline) + offline += (now - entry_time); + + stolen = runnable + offline; + + BUG_ON(now < stolen + running + in_hcall + blocked + in_intercept); + + return stolen; +} +static inline long +kvm_get_guest_vcpu_stolen_time(struct kvm_vcpu *vcpu) +{ + s64 stolen_time; + unsigned long flags; + + raw_local_irq_save(flags); + stolen_time = kvm_do_get_guest_vcpu_stolen_time(vcpu); + raw_local_irq_restore(flags); + + return stolen_time; +} + +static inline long +kvm_do_get_guest_vcpu_running_time(struct kvm_vcpu *vcpu) +{ + s64 running, in_hcall, blocked, in_intercept; + int runstate; + uint64_t entry_time; + s64 now; + + do { + entry_time = kvm_get_guest_vcpu_runstate_entry_time(vcpu); + runstate = kvm_get_guest_vcpu_runstate(vcpu); + running = kvm_get_guest_vcpu_runstate_running_time(vcpu); + in_hcall = kvm_get_guest_vcpu_runstate_in_hcall_time(vcpu); + blocked = kvm_get_guest_vcpu_runstate_blocked_time(vcpu); + in_intercept = + kvm_get_guest_vcpu_runstate_in_intercept_time(vcpu); + now = nsecs_2cycles(ktime_to_ns(ktime_get())); + } while (entry_time != kvm_get_guest_vcpu_runstate_entry_time(vcpu)); + + BUG_ON(now < entry_time); + if (now > entry_time) { + if (runstate == RUNSTATE_running) + running += (now - entry_time); + if (runstate == RUNSTATE_in_hcall) + in_hcall += (now - entry_time); + if (runstate == RUNSTATE_blocked) + blocked += (now - entry_time); + if (runstate == RUNSTATE_in_intercept) + in_intercept += (now - entry_time); + } + + BUG_ON(now < in_hcall + blocked + running + in_intercept); + + return running + in_hcall + blocked + in_intercept; +} +static inline long +kvm_get_guest_vcpu_running_time(struct kvm_vcpu *vcpu) +{ + s64 running_time; + unsigned long flags; + + raw_local_irq_save(flags); + running_time = kvm_do_get_guest_vcpu_running_time(vcpu); + raw_local_irq_restore(flags); + + return running_time; +} + +/* Runstate time is measured with ktime_get() cycles, it has to be monotonic across all CPUs */ +static inline unsigned long +kvm_get_host_runstate_ktime(void) +{ + return nsecs_2cycles(ktime_to_ns(ktime_get())); +} + +/* + * IRQs should be disabled by caller + * It always is true while caller is light hypercall + */ +static inline unsigned long +kvm_get_guest_running_time(struct kvm_vcpu *vcpu) +{ + cycles_t running; + + running = kvm_do_get_guest_vcpu_running_time(vcpu); + return running; +} + +#define CONFIG_DEBUG_VCPU_RUNSTATE +#ifndef CONFIG_DEBUG_VCPU_RUNSTATE +/* guest VCPU run state should be updated in traps and interrupts */ +static inline void +kvm_set_guest_runstate_in_user_trap(void) +{ + thread_info_t *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + return; + vcpu = ti->vcpu; + BUG_ON(vcpu == NULL); + BUG_ON(!irqs_disabled()); + BUG_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_running); + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_trap); +} +static inline void +kvm_set_guest_runstate_out_user_trap(void) +{ + thread_info_t *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + return; + vcpu = ti->vcpu; + BUG_ON(vcpu == NULL); + BUG_ON(!irqs_disabled()); + BUG_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_in_trap); + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_running); +} +static inline int +kvm_set_guest_runstate_in_kernel_trap(void) +{ + thread_info_t *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + int cur_runstate; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + return -1; + vcpu = ti->vcpu; + if (vcpu == NULL) + return -1; /* It is VIRQ VCPU: run state is unused */ + BUG_ON(!irqs_disabled()); + cur_runstate = kvm_get_guest_vcpu_runstate(vcpu); + BUG_ON(cur_runstate != RUNSTATE_running); + kvm_do_update_guest_vcpu_current_runstate(vcpu, RUNSTATE_in_trap); + return cur_runstate; +} +static inline void +kvm_set_guest_runstate_out_kernel_trap(int saved_runstate) +{ + thread_info_t *ti = current_thread_info(); + struct kvm_vcpu *vcpu; + + if (!test_ti_thread_flag(ti, TIF_VIRTUALIZED_GUEST)) + return; + vcpu = ti->vcpu; + if (vcpu == NULL) + return; /* It is VIRQ VCPU: run state is unused */ + BUG_ON(!irqs_disabled()); + BUG_ON(kvm_get_guest_vcpu_runstate(vcpu) != RUNSTATE_in_trap); + kvm_do_update_guest_vcpu_current_runstate(vcpu, saved_runstate); +} +#else /* CONFIG_DEBUG_VCPU_RUNSTATE */ +extern void kvm_set_guest_runstate_in_user_trap(void); +extern void kvm_set_guest_runstate_out_user_trap(void); +extern int kvm_set_guest_runstate_in_kernel_trap(void); +extern void kvm_set_guest_runstate_out_kernel_trap(int saved_runstate); +#endif /* ! CONFIG_DEBUG_VCPU_RUNSTATE */ + +#define SET_RUNSTATE_IN_USER_TRAP() kvm_set_guest_runstate_in_user_trap() +#define SET_RUNSTATE_OUT_USER_TRAP() kvm_set_guest_runstate_out_user_trap() +#define SET_RUNSTATE_IN_KERNEL_TRAP(cur_runstate) \ + (cur_runstate = kvm_set_guest_runstate_in_kernel_trap()) +#define SET_RUNSTATE_OUT_KERNEL_TRAP(cur_runstate) \ + kvm_set_guest_runstate_out_kernel_trap(cur_runstate) + +#else /* ! CONFIG_VIRTUALIZATION || CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or pure guest kernel (not paravirtualized based on pv_ops) */ +#define SET_RUNSTATE_IN_USER_TRAP() +#define SET_RUNSTATE_OUT_USER_TRAP() +#define SET_RUNSTATE_IN_KERNEL_TRAP(cur_runstate) +#define SET_RUNSTATE_OUT_KERNEL_TRAP(cur_runstate) +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ + +/* FIXME: follow function(s) should not be used on pure guest kernel mode */ +static inline int +kvm_get_guest_vcpu_runstate(struct kvm_vcpu *vcpu) +{ + return -1; /* guest can not support own guests */ +} +static inline long +kvm_do_get_guest_vcpu_running_time(struct kvm_vcpu *vcpu) +{ + return -1; /* guest can not support own guests */ +} +static inline long +kvm_get_guest_vcpu_running_time(struct kvm_vcpu *vcpu) +{ + return -1; /* guest can not support own guests */ +} +static inline unsigned long +kvm_get_guest_running_time(struct kvm_vcpu *vcpu) +{ + return -1; /* guest can not support own guests */ +} +static inline uint64_t +kvm_get_guest_vcpu_runstate_entry_time(struct kvm_vcpu *vcpu) +{ + return -1; /* guest can not support own guests */ +} +static inline void +kvm_do_update_guest_vcpu_current_runstate(struct kvm_vcpu *vcpu, int new_state) +{ + /* guest can not support own guests */ +} +static inline void +kvm_update_guest_vcpu_current_runstate(struct kvm_vcpu *vcpu, int new_state) +{ + /* guest can not support own guests */ +} +static inline void +kvm_do_init_guest_vcpu_runstate(struct kvm_vcpu *vcpu, int init_state) +{ + /* guest can not support own guests */ +} +static inline unsigned long +kvm_get_host_runstate_ktime(void) +{ + return -1; /* guest can not support own guests */ +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ +#endif /* CONFIG_VIRTUALIZATION && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KVM_E2K_RUNSTATE_H */ diff --git a/arch/e2k/include/asm/kvm/spinlock.h b/arch/e2k/include/asm/kvm/spinlock.h new file mode 100644 index 0000000..86ada45 --- /dev/null +++ b/arch/e2k/include/asm/kvm/spinlock.h @@ -0,0 +1,40 @@ +#ifndef __ASM_E2K_KVM_SPINLOCK_H +#define __ASM_E2K_KVM_SPINLOCK_H +/* + * This file implements the arch-dependent parts of kvm guest + * spin_lock()/spin_unlock() slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else + #error "Unknown virtualization type" +#endif /* CONFIG_PARAVIRT_GUEST */ + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + arch_spinlock_t val; + u16 ticket, ready; + + wmb(); /* wait for all store completion */ + val.lock = __api_atomic16_add_return32_lock( + 1 << ARCH_SPINLOCK_HEAD_SHIFT, &lock->lock); + ticket = val.tail; + ready = val.head; + + if (unlikely(ticket != ready)) { + /* spinlock has more user(s): so activate it(s) */ + arch_spin_unlock_slow(lock); + } +} + +#endif /* __ASM_E2K_KVM_SPINLOCK_H */ diff --git a/arch/e2k/include/asm/kvm/spinlock_slow.h b/arch/e2k/include/asm/kvm/spinlock_slow.h new file mode 100644 index 0000000..1a2f2d3 --- /dev/null +++ b/arch/e2k/include/asm/kvm/spinlock_slow.h @@ -0,0 +1,44 @@ +#ifndef _ASM_E2K_KVM_SPINLOCK_SLOW_H +#define _ASM_E2K_KVM_SPINLOCK_SLOW_H +/* + * This file implements on host the arch-dependent parts of kvm guest + * spin_lock()/spin_unlock() slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include +#include +#include + +typedef struct spinlock_unlocked { + struct list_head unlocked_list; + struct thread_info *ti; + struct gthread_info *gti; + struct list_head checked_unlocked; /* list of tasks */ + /* which alredy */ + /* checked spin */ + /* was unlocked */ + + void *lock; +} spinlock_unlocked_t; + +#define SPINLOCK_HASH_BITS 6 +#define SPINLOCK_HASH_SHIFT 4 /* [9:4] hash bits */ +#define SPINLOCK_HASH_SIZE (1 << SPINLOCK_HASH_BITS) +#define spinlock_hashfn(lockp) \ + hash_long(((unsigned long)(lockp)) >> SPINLOCK_HASH_SHIFT, \ + SPINLOCK_HASH_BITS) +#define SPINUNLOCKED_LIST_SIZE 32 + +extern int kvm_guest_spin_lock_slow(struct kvm *kvm, void *lock, + bool check_unlock); +extern int kvm_guest_spin_locked_slow(struct kvm *kvm, void *lock); +extern int kvm_guest_spin_unlock_slow(struct kvm *kvm, void *lock, + bool add_to_unlock); + +extern int kvm_guest_spinlock_init(struct kvm *kvm); +extern void kvm_guest_spinlock_destroy(struct kvm *kvm); + +#endif /* _ASM_E2K_KVM_SPINLOCK_SLOW_H */ \ No newline at end of file diff --git a/arch/e2k/include/asm/kvm/stacks.h b/arch/e2k/include/asm/kvm/stacks.h new file mode 100644 index 0000000..6c237d4 --- /dev/null +++ b/arch/e2k/include/asm/kvm/stacks.h @@ -0,0 +1,43 @@ +/* + * KVM guest stacks support + * Copyright 2017 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_KVM_STACKS_H +#define _E2K_KVM_STACKS_H + +#include + +#ifdef CONFIG_VIRTUALIZATION +#ifdef CONFIG_KVM_GUEST +/* + * Guest kernel thread stacks descriptions + */ +#define VIRT_KERNEL_C_STACK_SIZE KVM_GUEST_KERNEL_C_STACK_SIZE +#define VIRT_KERNEL_PS_SIZE KVM_GUEST_KERNEL_PS_SIZE +#define VIRT_KERNEL_PS_INIT_SIZE KVM_GUEST_KERNEL_PS_INIT_SIZE +#define VIRT_KERNEL_PCS_SIZE KVM_GUEST_KERNEL_PCS_SIZE +#define VIRT_KERNEL_PCS_INIT_SIZE KVM_GUEST_KERNEL_PCS_INIT_SIZE + +#define VIRT_KERNEL_P_STACK_PAGES (VIRT_KERNEL_PS_SIZE / PAGE_SIZE) +#define VIRT_KERNEL_PC_STACK_PAGES (VIRT_KERNEL_PCS_SIZE / PAGE_SIZE) + +/* + * Guest user task stacks descriptions + */ +#define VIRT_USER_C_STACK_SIZE KVM_GUEST_USER_C_STACK_SIZE +#define VIRT_USER_PS_SIZE KVM_GUEST_USER_PS_SIZE +#define VIRT_USER_PS_INIT_SIZE KVM_GUEST_USER_PS_INIT_SIZE +#define VIRT_USER_PCS_SIZE KVM_GUEST_USER_PCS_SIZE +#define VIRT_USER_PCS_INIT_SIZE KVM_GUEST_USER_PCS_INIT_SIZE + +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type: +#endif /* CONFIG_KVM_GUEST*/ + +#else /* ! CONFIG_VIRTUALIZATION */ +#define VIRT_KERNEL_P_STACK_PAGES 0 +#define VIRT_KERNEL_PC_STACK_PAGES 0 +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* ! _E2K_KVM_STACKS_H */ diff --git a/arch/e2k/include/asm/kvm/string.h b/arch/e2k/include/asm/kvm/string.h new file mode 100644 index 0000000..9f74649 --- /dev/null +++ b/arch/e2k/include/asm/kvm/string.h @@ -0,0 +1,7 @@ +#ifndef _E2K_KVM_STRING_H_ +#define _E2K_KVM_STRING_H_ + +#include +#include + +#endif /* _E2K_KVM_STRING_H_ */ diff --git a/arch/e2k/include/asm/kvm/switch.h b/arch/e2k/include/asm/kvm/switch.h new file mode 100644 index 0000000..e38861c --- /dev/null +++ b/arch/e2k/include/asm/kvm/switch.h @@ -0,0 +1,1293 @@ +#ifndef _E2K_KVM_SWITCH_H +#define _E2K_KVM_SWITCH_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * See below the 'flags' argument of xxx_guest_enter()/xxx_guest_exit() + */ +#define FULL_CONTEXT_SWITCH 0x0001U /* save/restore full guest/host */ + /* context */ +#define FROM_HYPERCALL_SWITCH 0x0002U /* save/restore full guest/host */ + /* before/after hypercall */ +#define USD_CONTEXT_SWITCH 0x0004U /* save/restore local data stack */ +#define DEBUG_REGS_SWITCH 0x0008U /* save/restore debugging registers */ +#define DONT_CU_REGS_SWITCH 0x0010U /* do not save/restore CUT and CU */ + /* registers */ +#define DONT_MMU_CONTEXT_SWITCH 0x0020U /* do not switch MMU context */ +#define DONT_SAVE_GREGS_SWITCH 0x0040U /* do not save global regs */ +#define DONT_AAU_CONTEXT_SWITCH 0x0080U /* do not switch AAU context */ +#define EXIT_FROM_INTC_SWITCH 0x0100U /* complete intercept emulation mode */ +#define EXIT_FROM_TRAP_SWITCH 0x0200U /* complete trap mode */ + +static inline void +native_trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, + unsigned flags) +{ + /* nothing guests can be */ + + if (flags & EXIT_FROM_INTC_SWITCH) + return; + /* IMPORTANT: do NOT access current, current_thread_info() */ + /* and per-cpu variables after this point */ + if (flags & EXIT_FROM_TRAP_SWITCH) { + NATIVE_RESTORE_KERNEL_GREGS(&ti->k_gregs); + } +} +static inline void +native_trap_guest_exit(struct thread_info *ti, struct pt_regs *regs, + trap_pt_regs_t *trap, unsigned flags) +{ + /* nothing guests can be */ +} +static inline bool +native_guest_trap_pending(struct thread_info *ti) +{ + /* there is not any guest */ + return false; +} + +static inline bool +native_trap_from_guest_user(struct thread_info *ti) +{ + /* there is not any guest */ + return false; +} + +static inline bool +native_syscall_from_guest_user(struct thread_info *ti) +{ + /* there is not any guest */ + return false; +} + +static inline struct e2k_stacks * +native_trap_guest_get_restore_stacks(struct thread_info *ti, + struct pt_regs *regs) +{ + return ®s->stacks; +} + +static inline struct e2k_stacks * +native_syscall_guest_get_restore_stacks(struct thread_info *ti, + struct pt_regs *regs) +{ + return ®s->stacks; +} + +/* + * The function should return bool is the system call from guest + */ +static inline bool +native_guest_syscall_enter(struct thread_info *ti, struct pt_regs *regs) +{ + /* nothing guests can be */ + + return false; /* it is not guest system call */ +} +static inline void +native_guest_syscall_exit_to(struct thread_info *ti, struct pt_regs *regs, + unsigned flags) +{ + /* nothing guests can be */ +} + +#ifdef CONFIG_VIRTUALIZATION + +/* + * Normally data stack is switched on interceptions as follows: + * 1) Upon interception guest's USD_hi.size is saved into backup + * stacks (cr1_lo.ussz field). + * 2) Then hardware switches PCSP stack (see Phase 5) and does an + * equivalent of DONE which modifies guest's USD with 'cr1_lo.ussz' + * from the function that called GLAUNCH. + * 3) Hypervisor in software saves this modified USD and restores it + * before GLAUNCH. + * 4) Hardware in GLAUNCH switches PCSP stack (see Phase 4) + * 5) Hardware in GLAUNCH does an equivalent of DONE (see Phase 6) + * which restores proper guest USD. + * + * But if hypervisor sets VIRT_CTRL_CU.glnch.g_th then that DONE is + * skipped and guest's data stack is incorrect. So we manually do + * here what DONE does. For simplicity do it always although it + * actually is needed only in 'g_th' case. + */ +static inline void kvm_correct_guest_data_stack_regs( + struct kvm_sw_cpu_context *sw_ctxt, e2k_cr1_hi_t cr1_hi) +{ + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_size_t real_size; + + real_size = cr1_hi.CR1_hi_ussz << 4; + usd_hi = sw_ctxt->usd_hi; + usd_lo = sw_ctxt->usd_lo; + usd_lo.USD_lo_base += (real_size - usd_hi.USD_hi_size); + usd_hi.USD_hi_size = real_size; + sw_ctxt->usd_lo = usd_lo; + sw_ctxt->usd_hi = usd_hi; +} + +/* + * For interceptions just switch actual registers with saved values + * in 'sw_ctxt'. + * + * For hypercalls: + * 1) Enter hypercall. + * 2) Save previous values from 'sw_ctxt' to 'sw_ctxt->saved'. + * 3) Switch actual registers with saved values in @sw_ctxt. + * 4) Allocate stack with getsp. + * 5) After hypercall completion switch registers back to guest values. + * 6) Restore 'sw_ctxt' from 'sw_ctxt->saved' + * (because 'getsp' above has changed registers we cannot use their values). + */ +static inline void kvm_switch_stack_regs(struct kvm_sw_cpu_context *sw_ctxt, + bool ctxt_save, bool ctxt_restore) +{ + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_sbr_t sbr; + + KVM_BUG_ON(ctxt_save && ctxt_restore); + + if (!ctxt_restore) { + AW(usd_lo) = NATIVE_NV_READ_USD_LO_REG_VALUE(); + AW(usd_hi) = NATIVE_NV_READ_USD_HI_REG_VALUE(); + AW(sbr) = NATIVE_NV_READ_SBR_REG_VALUE(); + } + + NATIVE_NV_WRITE_USBR_USD_REG(sw_ctxt->sbr, + sw_ctxt->usd_hi, sw_ctxt->usd_lo); + + if (ctxt_save) { + KVM_BUG_ON(sw_ctxt->saved.valid); + sw_ctxt->saved.sbr = sw_ctxt->sbr; + sw_ctxt->saved.usd_lo = sw_ctxt->usd_lo; + sw_ctxt->saved.usd_hi = sw_ctxt->usd_hi; + sw_ctxt->saved.valid = true; + } + if (!ctxt_restore) { + sw_ctxt->sbr = sbr; + sw_ctxt->usd_lo = usd_lo; + sw_ctxt->usd_hi = usd_hi; + } else { + KVM_BUG_ON(!sw_ctxt->saved.valid); + sw_ctxt->sbr = sw_ctxt->saved.sbr; + sw_ctxt->usd_lo = sw_ctxt->saved.usd_lo; + sw_ctxt->usd_hi = sw_ctxt->saved.usd_hi; + sw_ctxt->saved.valid = false; + } +} + +#define Compiler_bug_128308_workaround + +#ifndef Compiler_bug_128308_workaround +static inline void kvm_switch_fpu_regs(struct kvm_sw_cpu_context *sw_ctxt) +{ + e2k_fpcr_t fpcr; + e2k_fpsr_t fpsr; + e2k_pfpfr_t pfpfr; + e2k_upsr_t upsr; + + fpcr = NATIVE_NV_READ_FPCR_REG(); + fpsr = NATIVE_NV_READ_FPSR_REG(); + pfpfr = NATIVE_NV_READ_PFPFR_REG(); + upsr = NATIVE_NV_READ_UPSR_REG(); + + NATIVE_NV_WRITE_FPCR_REG(sw_ctxt->fpcr); + NATIVE_NV_WRITE_FPSR_REG(sw_ctxt->fpsr); + NATIVE_NV_WRITE_PFPFR_REG(sw_ctxt->pfpfr); + NATIVE_WRITE_UPSR_REG(sw_ctxt->upsr); + + sw_ctxt->fpcr = fpcr; + sw_ctxt->fpsr = fpsr; + sw_ctxt->pfpfr = pfpfr; + sw_ctxt->upsr = upsr; +} +#else /* Compiler_bug_128308_workaround */ +extern noinline void kvm_switch_fpu_regs(struct kvm_sw_cpu_context *sw_ctxt); +#endif /* !Compiler_bug_128308_workaround */ + +static inline void kvm_switch_cu_regs(struct kvm_sw_cpu_context *sw_ctxt) +{ + e2k_cutd_t cutd; + + AW(cutd) = NATIVE_NV_READ_CUTD_REG_VALUE(); + + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(AW(sw_ctxt->cutd)); + sw_ctxt->cutd = cutd; +} + +static inline void kvm_switch_mmu_pt_regs(struct kvm_sw_cpu_context *sw_ctxt) +{ + mmu_reg_t u_pptb; + mmu_reg_t u_vptb; + + u_pptb = NATIVE_READ_MMU_U_PPTB_REG(); + u_vptb = NATIVE_READ_MMU_U_VPTB_REG(); + + NATIVE_WRITE_MMU_U_PPTB_REG(sw_ctxt->sh_u_pptb); + NATIVE_WRITE_MMU_U_VPTB_REG(sw_ctxt->sh_u_vptb); + + sw_ctxt->sh_u_pptb = u_pptb; + sw_ctxt->sh_u_vptb = u_vptb; +} + +static inline void kvm_switch_mmu_tc_regs(struct kvm_sw_cpu_context *sw_ctxt) +{ + mmu_reg_t tc_hpa; + mmu_reg_t trap_count; + + tc_hpa = NATIVE_READ_MMU_TRAP_POINT(); + trap_count = NATIVE_READ_MMU_TRAP_COUNT(); + + NATIVE_WRITE_MMU_TRAP_POINT(sw_ctxt->tc_hpa); + NATIVE_WRITE_MMU_TRAP_COUNT(sw_ctxt->trap_count); + + sw_ctxt->tc_hpa = tc_hpa; + sw_ctxt->trap_count = trap_count; +} + +static inline void kvm_switch_mmu_regs(struct kvm_sw_cpu_context *sw_ctxt, + bool switch_tc) +{ + kvm_switch_mmu_pt_regs(sw_ctxt); + if (switch_tc) + kvm_switch_mmu_tc_regs(sw_ctxt); +} + +static inline void kvm_switch_to_guest_mmu_pid(struct kvm_vcpu *vcpu) +{ + mm_context_t *gmm_context; + + gmm_context = pv_vcpu_get_gmm_context(vcpu); + reload_mmu_pid(gmm_context, smp_processor_id()); +} + +static inline void kvm_switch_to_host_mmu_pid(struct mm_struct *mm) +{ + reload_context(mm, smp_processor_id()); +} + +static inline void kvm_switch_debug_regs(struct kvm_sw_cpu_context *sw_ctxt, + int is_active) +{ + u64 b_dimar0, b_dimar1, b_ddmar0, b_ddmar1, b_dibar0, b_dibar1, + b_dibar2, b_dibar3, b_ddbar0, b_ddbar1, b_ddbar2, b_ddbar3, + a_dimar0, a_dimar1, a_ddmar0, a_ddmar1, a_dibar0, a_dibar1, + a_dibar2, a_dibar3, a_ddbar0, a_ddbar1, a_ddbar2, a_ddbar3; + e2k_dimcr_t b_dimcr, a_dimcr; + e2k_ddmcr_t b_ddmcr, a_ddmcr; + e2k_dibcr_t b_dibcr, a_dibcr; + e2k_dibsr_t b_dibsr, a_dibsr; + e2k_ddbcr_t b_ddbcr, a_ddbcr; + e2k_ddbsr_t b_ddbsr, a_ddbsr; + + b_dibcr = sw_ctxt->dibcr; + b_ddbcr = sw_ctxt->ddbcr; + b_dibsr = sw_ctxt->dibsr; + b_ddbsr = sw_ctxt->ddbsr; + b_dimcr = sw_ctxt->dimcr; + b_ddmcr = sw_ctxt->ddmcr; + b_dibar0 = sw_ctxt->dibar0; + b_dibar1 = sw_ctxt->dibar1; + b_dibar2 = sw_ctxt->dibar2; + b_dibar3 = sw_ctxt->dibar3; + b_ddbar0 = sw_ctxt->ddbar0; + b_ddbar1 = sw_ctxt->ddbar1; + b_ddbar2 = sw_ctxt->ddbar2; + b_ddbar3 = sw_ctxt->ddbar3; + b_dimar0 = sw_ctxt->dimar0; + b_dimar1 = sw_ctxt->dimar1; + b_ddmar0 = sw_ctxt->ddmar0; + b_ddmar1 = sw_ctxt->ddmar1; + + a_dibcr = NATIVE_READ_DIBCR_REG(); + a_ddbcr = NATIVE_READ_DDBCR_REG(); + a_dibsr = NATIVE_READ_DIBSR_REG(); + a_ddbsr = NATIVE_READ_DDBSR_REG(); + a_dimcr = NATIVE_READ_DIMCR_REG(); + a_ddmcr = NATIVE_READ_DDMCR_REG(); + a_dibar0 = NATIVE_READ_DIBAR0_REG_VALUE(); + a_dibar1 = NATIVE_READ_DIBAR1_REG_VALUE(); + a_dibar2 = NATIVE_READ_DIBAR2_REG_VALUE(); + a_dibar3 = NATIVE_READ_DIBAR3_REG_VALUE(); + a_ddbar0 = NATIVE_READ_DDBAR0_REG_VALUE(); + a_ddbar1 = NATIVE_READ_DDBAR1_REG_VALUE(); + a_ddbar2 = NATIVE_READ_DDBAR2_REG_VALUE(); + a_ddbar3 = NATIVE_READ_DDBAR3_REG_VALUE(); + a_ddmar0 = NATIVE_READ_DDMAR0_REG_VALUE(); + a_ddmar1 = NATIVE_READ_DDMAR1_REG_VALUE(); + a_dimar0 = NATIVE_READ_DIMAR0_REG_VALUE(); + a_dimar1 = NATIVE_READ_DIMAR1_REG_VALUE(); + + if (is_active) { + /* These two must be written first to disable monitoring */ + NATIVE_WRITE_DIBCR_REG(b_dibcr); + NATIVE_WRITE_DDBCR_REG(b_ddbcr); + } + NATIVE_WRITE_DIBAR0_REG_VALUE(b_dibar0); + NATIVE_WRITE_DIBAR1_REG_VALUE(b_dibar1); + NATIVE_WRITE_DIBAR2_REG_VALUE(b_dibar2); + NATIVE_WRITE_DIBAR3_REG_VALUE(b_dibar3); + NATIVE_WRITE_DDBAR0_REG_VALUE(b_ddbar0); + NATIVE_WRITE_DDBAR1_REG_VALUE(b_ddbar1); + NATIVE_WRITE_DDBAR2_REG_VALUE(b_ddbar2); + NATIVE_WRITE_DDBAR3_REG_VALUE(b_ddbar3); + NATIVE_WRITE_DDMAR0_REG_VALUE(b_ddmar0); + NATIVE_WRITE_DDMAR1_REG_VALUE(b_ddmar1); + NATIVE_WRITE_DIMAR0_REG_VALUE(b_dimar0); + NATIVE_WRITE_DIMAR1_REG_VALUE(b_dimar1); + NATIVE_WRITE_DIBSR_REG(b_dibsr); + NATIVE_WRITE_DDBSR_REG(b_ddbsr); + NATIVE_WRITE_DIMCR_REG(b_dimcr); + NATIVE_WRITE_DDMCR_REG(b_ddmcr); + if (!is_active) { + /* These two must be written last to enable monitoring */ + NATIVE_WRITE_DIBCR_REG(b_dibcr); + NATIVE_WRITE_DDBCR_REG(b_ddbcr); + } + + sw_ctxt->dibcr = a_dibcr; + sw_ctxt->ddbcr = a_ddbcr; + sw_ctxt->dibsr = a_dibsr; + sw_ctxt->ddbsr = a_ddbsr; + sw_ctxt->dimcr = a_dimcr; + sw_ctxt->ddmcr = a_ddmcr; + sw_ctxt->dibar0 = a_dibar0; + sw_ctxt->dibar1 = a_dibar1; + sw_ctxt->dibar2 = a_dibar2; + sw_ctxt->dibar3 = a_dibar3; + sw_ctxt->ddbar0 = a_ddbar0; + sw_ctxt->ddbar1 = a_ddbar1; + sw_ctxt->ddbar2 = a_ddbar2; + sw_ctxt->ddbar3 = a_ddbar3; + sw_ctxt->ddmar0 = a_ddmar0; + sw_ctxt->ddmar1 = a_ddmar1; + sw_ctxt->dimar0 = a_dimar0; + sw_ctxt->dimar1 = a_dimar1; + +} + +static inline void +switch_ctxt_trap_enable_mask(struct kvm_sw_cpu_context *sw_ctxt) +{ + unsigned osem; + + osem = NATIVE_READ_OSEM_REG_VALUE(); + NATIVE_WRITE_OSEM_REG_VALUE(sw_ctxt->osem); + sw_ctxt->osem = osem; +} + +static inline void host_guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt; + + switch_ctxt_trap_enable_mask(sw_ctxt); + /* In full virtualization mode guest sets his own OSEM in thread_init() */ + if (!vcpu->is_hv) + KVM_BUG_ON((NATIVE_READ_OSEM_REG_VALUE() & HYPERCALLS_TRAPS_MASK) != + HYPERCALLS_TRAPS_MASK); + + if (flags & FROM_HYPERCALL_SWITCH) { + /* + * Hypercalls - both hardware and software virtualization + */ + KVM_BUG_ON(!sw_ctxt->in_hypercall); + sw_ctxt->in_hypercall = false; + + /* For hypercalls skip the extended part. */ + HOST_RESTORE_HOST_GREGS(ti); + + /* compilation units context */ + if (!(flags & DONT_CU_REGS_SWITCH)) { + kvm_switch_cu_regs(sw_ctxt); + } + + /* restore guest PT context (U_PPTB/U_VPTB) */ + if (!(flags & DONT_MMU_CONTEXT_SWITCH)) { + kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv); + } + } else if (flags & FULL_CONTEXT_SWITCH) { + + /* + * Interceptions - hardware support is enabled + */ +#ifdef CONFIG_USE_AAU + if (!(flags & DONT_AAU_CONTEXT_SWITCH)) + machine.calculate_aau_aaldis_aaldas(NULL, ti, &sw_ctxt->aau_context); +#endif + + /* For interceptions restore extended part */ + NATIVE_RESTORE_KERNEL_GREGS(&ti->k_gregs); + + NATIVE_RESTORE_INTEL_REGS(sw_ctxt); + + /* Isolate from QEMU */ + /* TODO if we want to call QEMU from hypercalls,then + * we should switch more context in hypercalls - see + * the list in sw_ctxt definition */ + kvm_switch_fpu_regs(sw_ctxt); + kvm_switch_cu_regs(sw_ctxt); + kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv); + +#ifdef CONFIG_USE_AAU + if (!(flags & DONT_AAU_CONTEXT_SWITCH)) { + /* + * We cannot rely on %aasr value since interception could have + * happened in guest user before "bap" or in guest trap handler + * before restoring %aasr, so we must restore all AAU registers. + */ + native_clear_apb(); + native_set_aau_context(&sw_ctxt->aau_context); + + /* + * It's important to restore AAD after all return operations. + */ + NATIVE_RESTORE_AADS(&sw_ctxt->aau_context); + } +#endif + } else { + /* + * Return from emulation of interseption to paravirtualized + * vcpu + */ + + /* switch to guest MMU context to continue guest execution */ + kvm_switch_mmu_regs(sw_ctxt, false); + } + + if (flags & DEBUG_REGS_SWITCH) + kvm_switch_debug_regs(sw_ctxt, true); + + /* Switch data stack after all function calls */ + if (flags & USD_CONTEXT_SWITCH) { + if (!(flags & FROM_HYPERCALL_SWITCH) || !vcpu->is_hv) { + kvm_switch_stack_regs(sw_ctxt, false, false); + } else { + /* restore saved source pointers of host stack */ + kvm_switch_stack_regs(sw_ctxt, false, true); + } + } +} + +static inline void host_guest_enter_light(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, + bool from_sdisp) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt; + + KVM_BUG_ON(!sw_ctxt->in_hypercall); + sw_ctxt->in_hypercall = false; + + HOST_RESTORE_KERNEL_GREGS_AS_LIGHT(ti); + + kvm_switch_cu_regs(sw_ctxt); + + /* Switch data stack after all function calls */ + if (!from_sdisp) { + if (!vcpu->is_hv) { + kvm_switch_stack_regs(sw_ctxt, false, false); + } else { + /* restore saved source pointers of host stack */ + kvm_switch_stack_regs(sw_ctxt, false, true); + } + } +} + +static inline void host_guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt; + + switch_ctxt_trap_enable_mask(sw_ctxt); + KVM_BUG_ON(NATIVE_READ_OSEM_REG_VALUE() & HYPERCALLS_TRAPS_MASK); + + /* Switch data stack before all function calls */ + if (flags & USD_CONTEXT_SWITCH) { + if (!(flags & FROM_HYPERCALL_SWITCH) || !vcpu->is_hv) { + kvm_switch_stack_regs(sw_ctxt, false, false); + } else { + /* save source pointers of host stack */ + kvm_switch_stack_regs(sw_ctxt, true, false); + } + } + + if (flags & FROM_HYPERCALL_SWITCH) { + /* + * Hypercalls - both hardware and software virtualization + */ + KVM_BUG_ON(sw_ctxt->in_hypercall); + sw_ctxt->in_hypercall = true; + + /* For hypercalls skip the extended part. */ + HOST_SAVE_HOST_GREGS(ti); + ONLY_SET_KERNEL_GREGS(ti); + + /* compilation units context */ + if (!(flags & DONT_CU_REGS_SWITCH)) { + kvm_switch_cu_regs(sw_ctxt); + } + /* save guest PT context (U_PPTB/U_VPTB) and restore host */ + /* user PT context */ + if (!(flags & DONT_MMU_CONTEXT_SWITCH)) { + kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv); + } + } else if (flags & FULL_CONTEXT_SWITCH) { + + /* + * Interceptions - hardware support is enabled + */ +#ifdef CONFIG_USE_AAU + if (!(flags & DONT_AAU_CONTEXT_SWITCH)) { + e2k_aasr_t aasr; + + /* + * We cannot rely on %aasr value since interception could have + * happened in guest user before "bap" or in guest trap handler + * before restoring %aasr, so we must save all AAU registers. + * Several macroses use %aasr to determine, which registers to + * save/restore, so pass worst-case %aasr to them in + * sw_ctxt->aau_context, and save the actual guest value to + * sw_ctxt->aasr + */ + aasr = native_read_aasr_reg(); + SWITCH_GUEST_AAU_AASR(&aasr, &sw_ctxt->aau_context, 1); + /* + * This is placed before saving intc cellar since it is done + * with 'mmurr' instruction which requires AAU to be stopped. + * + * Do this before saving %sbbp as it uses 'alc' + * and thus zeroes %aaldm. + */ + NATIVE_SAVE_AAU_MASK_REGS(&sw_ctxt->aau_context, aasr); + + /* It's important to save AAD before all call operations. */ + NATIVE_SAVE_AADS(&sw_ctxt->aau_context); + /* + * Function calls are allowed from this point on, + * mark it with a compiler barrier. + */ + barrier(); + + /* Since iset v6 %aaldi must be saved too */ + NATIVE_SAVE_AALDIS(sw_ctxt->aau_context.aaldi); + + machine.get_aau_context(&sw_ctxt->aau_context); + + native_clear_apb(); + } +#endif + + /* No atomic/DAM operations are allowed before this point. + * Note that we cannot do this before saving AAU. */ + if (cpu_has(CPU_HWBUG_L1I_STOPS_WORKING)) + E2K_DISP_CTPRS(); + + /* For interceptions save extended part. */ + machine.save_kernel_gregs(&ti->k_gregs); + ONLY_SET_KERNEL_GREGS(ti); + + NATIVE_SAVE_INTEL_REGS(sw_ctxt); +#ifdef CONFIG_MLT_STORAGE + machine.invalidate_MLT(); +#endif + if (machine.flushts) + machine.flushts(); + + /* Isolate from QEMU */ + kvm_switch_fpu_regs(sw_ctxt); + kvm_switch_cu_regs(sw_ctxt); + kvm_switch_mmu_regs(sw_ctxt, vcpu->is_hv); + } else { + /* + * Starting emulation of interseption of paravirtualized vcpu + */ + + /* switch to hypervisor MMU context to emulate hw intercept */ + kvm_switch_mmu_regs(sw_ctxt, false); + } + + if (flags & DEBUG_REGS_SWITCH) + kvm_switch_debug_regs(sw_ctxt, false); +} + +static inline void host_guest_exit_light(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->sw_ctxt; + + KVM_BUG_ON(sw_ctxt->in_hypercall); + sw_ctxt->in_hypercall = true; + + HOST_SAVE_KERNEL_GREGS_AS_LIGHT(ti); + ONLY_SET_KERNEL_GREGS(ti); + + kvm_switch_cu_regs(sw_ctxt); +} + +/* + * Some hypercalls return to guest from other exit point then + * usual hypercall return from. So it need some clearing hypercall track. + */ +static inline bool +host_hypercall_exit(struct kvm_vcpu *vcpu) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + + if (sw_ctxt->in_hypercall) { + sw_ctxt->in_hypercall = false; + return true; + } + return false; +} + + +/* + * Some hypercalls return from hypercall to host. + * So it need some restore host context and some clearing hypercall track. + */ +static inline void hypercall_exit_to_host(struct kvm_vcpu *vcpu) +{ + struct kvm_sw_cpu_context *sw_ctxt = &vcpu->arch.sw_ctxt; + + switch_ctxt_trap_enable_mask(sw_ctxt); + + KVM_BUG_ON(!sw_ctxt->in_hypercall); + + host_hypercall_exit(vcpu); +} + +/* + * Save/restore VCPU host kernel thread context during switching from + * one guest threads (current) to other guest thread (next) + * It need now save only signal context, because of host kernel stacks + * are the same for all guest threads (processes). + */ +static inline void +pv_vcpu_save_host_context(struct kvm_vcpu *vcpu, gthread_info_t *cur_gti) +{ + cur_gti->signal.stack.base = current_thread_info()->signal_stack.base; + cur_gti->signal.stack.size = current_thread_info()->signal_stack.size; + cur_gti->signal.stack.used = current_thread_info()->signal_stack.used; + cur_gti->signal.traps_num = vcpu->arch.host_ctxt.signal.traps_num; + cur_gti->signal.in_work = vcpu->arch.host_ctxt.signal.in_work; + cur_gti->signal.syscall_num = vcpu->arch.host_ctxt.signal.syscall_num; + cur_gti->signal.in_syscall = vcpu->arch.host_ctxt.signal.in_syscall; +} + +static inline void +pv_vcpu_restore_host_context(struct kvm_vcpu *vcpu, gthread_info_t *next_gti) +{ + current_thread_info()->signal_stack.base = next_gti->signal.stack.base; + current_thread_info()->signal_stack.size = next_gti->signal.stack.size; + current_thread_info()->signal_stack.used = next_gti->signal.stack.used; + vcpu->arch.host_ctxt.signal.traps_num = next_gti->signal.traps_num; + vcpu->arch.host_ctxt.signal.in_work = next_gti->signal.in_work; + vcpu->arch.host_ctxt.signal.syscall_num = next_gti->signal.syscall_num; + vcpu->arch.host_ctxt.signal.in_syscall = next_gti->signal.in_syscall; +} + +static inline void +pv_vcpu_switch_guest_host_context(struct kvm_vcpu *vcpu, + gthread_info_t *cur_gti, gthread_info_t *next_gti) +{ + pv_vcpu_save_host_context(vcpu, cur_gti); + pv_vcpu_restore_host_context(vcpu, next_gti); +} + +static inline void pv_vcpu_switch_host_context(struct kvm_vcpu *vcpu) +{ + kvm_host_context_t *host_ctxt = &vcpu->arch.host_ctxt; + unsigned long *stack; + pt_regs_t *regs; + e2k_usd_hi_t k_usd_hi; + e2k_usd_lo_t k_usd_lo; + e2k_sbr_t k_sbr; + e2k_psp_lo_t k_psp_lo; + e2k_psp_hi_t k_psp_hi; + e2k_pcsp_lo_t k_pcsp_lo; + e2k_pcsp_hi_t k_pcsp_hi; + unsigned long base; + unsigned long size; + unsigned long used; + + /* keep current state of context */ + stack = current->stack; + regs = current_thread_info()->pt_regs; + k_usd_lo = current_thread_info()->k_usd_lo; + k_usd_hi = current_thread_info()->k_usd_hi; + k_sbr.SBR_reg = (unsigned long)stack + KERNEL_C_STACK_SIZE + + KERNEL_C_STACK_OFFSET; + k_psp_lo = current_thread_info()->k_psp_lo; + k_psp_hi = current_thread_info()->k_psp_hi; + k_pcsp_lo = current_thread_info()->k_pcsp_lo; + k_pcsp_hi = current_thread_info()->k_pcsp_hi; + + /* restore VCPU thread context */ + current->stack = host_ctxt->stack; + current_thread_info()->pt_regs = host_ctxt->pt_regs; + current_thread_info()->k_usd_hi = host_ctxt->k_usd_hi; + current_thread_info()->k_usd_lo = host_ctxt->k_usd_lo; + current_thread_info()->k_psp_lo = host_ctxt->k_psp_lo; + current_thread_info()->k_psp_hi = host_ctxt->k_psp_hi; + current_thread_info()->k_pcsp_lo = host_ctxt->k_pcsp_lo; + current_thread_info()->k_pcsp_hi = host_ctxt->k_pcsp_hi; + + /* save VCPU thread context */ + host_ctxt->stack = stack; + host_ctxt->pt_regs = regs; + host_ctxt->k_usd_lo = k_usd_lo; + host_ctxt->k_usd_hi = k_usd_hi; + host_ctxt->k_sbr = k_sbr; + host_ctxt->k_psp_lo = k_psp_lo; + host_ctxt->k_psp_hi = k_psp_hi; + host_ctxt->k_pcsp_lo = k_pcsp_lo; + host_ctxt->k_pcsp_hi = k_pcsp_hi; + + /* keep current signal stack state */ + base = current_thread_info()->signal_stack.base; + size = current_thread_info()->signal_stack.size; + used = current_thread_info()->signal_stack.used; + /* atomic trap_num is not used for host thread, so keep it in place */ + + /* restote VCPU thread signal stack state */ + current_thread_info()->signal_stack.base = host_ctxt->signal.stack.base; + current_thread_info()->signal_stack.size = host_ctxt->signal.stack.size; + current_thread_info()->signal_stack.used = host_ctxt->signal.stack.used; + + /* save VCPU thread signal stack state */ + host_ctxt->signal.stack.base = base; + host_ctxt->signal.stack.size = size; + host_ctxt->signal.stack.used = used; + /* atomic trap_num & in_work & syscall_num & in_syscall will not be */ + /* used for host thread, so keep it in place for last guest thread */ +} + +static inline void pv_vcpu_exit_to_host(struct kvm_vcpu *vcpu) +{ + /* save VCPU guest thread context */ + /* restore VCPU host thread context */ + pv_vcpu_switch_host_context(vcpu); +} + +static inline void pv_vcpu_enter_to_guest(struct kvm_vcpu *vcpu) +{ + /* save VCPU host thread context */ + /* restore VCPU guest thread context */ + pv_vcpu_switch_host_context(vcpu); +} + +static inline void +host_switch_trap_enable_mask(struct thread_info *ti, struct pt_regs *regs, + bool guest_enter) +{ + struct kvm_vcpu *vcpu; + struct kvm_sw_cpu_context *sw_ctxt; + + if (trap_on_guest(regs)) { + vcpu = ti->vcpu; + sw_ctxt = &vcpu->arch.sw_ctxt; + if (guest_enter) { + /* return from trap, restore hypercall flag */ + sw_ctxt->in_hypercall = regs->in_hypercall; + } else { /* guest exit */ + /* enter to trap, save hypercall flag because of */ + /* trap handler can pass traps to guest and run */ + /* guest trap handler with recursive hypercalls */ + regs->in_hypercall = sw_ctxt->in_hypercall; + } + if (sw_ctxt->in_hypercall) { + /* mask should be already switched or */ + /* will be switched by hypercall */ + return; + } + switch_ctxt_trap_enable_mask(sw_ctxt); + } +} + +static __always_inline bool +pv_vcpu_trap_on_guest_kernel(pt_regs_t *regs) +{ + if (regs && is_trap_pt_regs(regs) && guest_kernel_mode(regs)) + return true; + + return false; +} + +static inline bool +host_guest_trap_pending(struct thread_info *ti) +{ + struct pt_regs *regs = ti->pt_regs; + struct kvm_vcpu *vcpu; + + if (likely(!regs || !is_trap_pt_regs(regs) || + !kvm_test_intc_emul_flag(regs))) { + /* it is not paravirtualized guest VCPU intercepts */ + /* emulation mode, so nothing to do more */ + return false; + } + vcpu = ti->vcpu; + if (!kvm_check_is_vcpu_intc_TIRs_empty(vcpu)) { + /* there are some injected traps for guest */ + kvm_clear_vcpu_guest_stacks_pending(vcpu, regs); + return true; + } + if (kvm_is_vcpu_guest_stacks_pending(vcpu, regs)) { + /* guest user spilled stacks is not empty, */ + /* so it need rocover its */ + return true; + } + return false; +} + +static inline bool +host_trap_from_guest_user(struct thread_info *ti) +{ + struct pt_regs *regs = ti->pt_regs; + + if (likely(!host_guest_trap_pending(ti) && regs->traps_to_guest == 0)) + return false; + return !pv_vcpu_trap_on_guest_kernel(ti->pt_regs); +} + +static inline bool +host_syscall_from_guest_user(struct thread_info *ti) +{ + struct pt_regs *regs = ti->pt_regs; + + if (likely(!regs || is_trap_pt_regs(regs) || + !kvm_test_intc_emul_flag(regs))) { + /* it is not paravirtualized guest VCPU intercepts */ + /* emulation mode, so nothing system calls from guest */ + return false; + } + KVM_BUG_ON(ti->vcpu == NULL); + KVM_BUG_ON(guest_kernel_mode(regs)); + return true; +} + +static inline void +host_trap_guest_exit_intc(struct thread_info *ti, struct pt_regs *regs) +{ + if (!kvm_test_and_clear_intc_emul_flag(regs)) { + /* it is not paravirtualized guest VCPU intercepts*/ + /* emulation mode, so nothing to do more */ + return; + } + + /* + * Return from trap on paravirtualized guest VCPU which was + * interpreted as interception + */ + return_from_pv_vcpu_intc(ti, regs); +} + +static inline bool +host_return_to_injected_guest_syscall(struct thread_info *ti, pt_regs_t *regs) +{ + struct kvm_vcpu *vcpu; + int syscall_num, in_syscall; + + vcpu = ti->vcpu; + syscall_num = atomic_read(&vcpu->arch.host_ctxt.signal.syscall_num); + in_syscall = atomic_read(&vcpu->arch.host_ctxt.signal.in_syscall); + + if (likely(syscall_num > 0)) { + if (in_syscall == syscall_num) { + /* all injected system calls are already handling */ + return false; + } + /* it need return to start injected system call */ + return true; + } + return false; +} + +static inline bool +host_return_to_injected_guest_trap(struct thread_info *ti, pt_regs_t *regs) +{ + struct kvm_vcpu *vcpu; + gthread_info_t *gti; + int traps_num, in_work; + + vcpu = ti->vcpu; + gti = pv_vcpu_get_gti(vcpu); + traps_num = atomic_read(&vcpu->arch.host_ctxt.signal.traps_num); + in_work = atomic_read(&vcpu->arch.host_ctxt.signal.in_work); + + if (unlikely(traps_num == 0)) { + /* there are nothing injected traps */ + return false; + } + if (traps_num == in_work) { + /* there are/(is) some injected to guest traps */ + /* but all the traps are already handling */ + return false; + } + + /* it need return to start handling of new injected trap */ + if (test_gti_thread_flag(gti, GTIF_KERNEL_THREAD) || + pv_vcpu_trap_on_guest_kernel(regs)) { + /* return to recursive injected trap at guest kernel mode */ + /* so all guest stacks were already switched to */ + return false; + } + + /* return from host trap to injected trap at user mode */ + /* so it need switch all guest user's stacks to kernel */ + return true; +} + +static inline struct e2k_stacks * +host_trap_guest_get_pv_vcpu_restore_stacks(struct thread_info *ti, + struct pt_regs *regs) +{ + + if (host_return_to_injected_guest_trap(ti, regs)) { + /* it need switch to guest kernel context */ + return ®s->g_stacks; + } else { + /* it need switch to guest user context */ + return native_trap_guest_get_restore_stacks(ti, regs); + } +} + +static inline struct e2k_stacks * +host_syscall_guest_get_pv_vcpu_restore_stacks(struct thread_info *ti, + struct pt_regs *regs) +{ + + if (host_return_to_injected_guest_syscall(ti, regs)) { + /* it need switch to guest kernel context */ + return ®s->g_stacks; + } else { + /* it need switch to guest user context */ + return native_syscall_guest_get_restore_stacks(ti, regs); + } +} + +static inline struct e2k_stacks * +host_trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + if (test_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE)) { + /* host return to paravirtualized guest (VCPU) mode */ + return host_trap_guest_get_pv_vcpu_restore_stacks(ti, regs); + } + return native_trap_guest_get_restore_stacks(ti, regs); +} + +static inline struct e2k_stacks * +host_syscall_guest_get_restore_stacks(struct thread_info *ti, + struct pt_regs *regs) +{ + if (test_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE)) { + /* host return to paravirtualized guest (VCPU) mode */ + return host_syscall_guest_get_pv_vcpu_restore_stacks(ti, regs); + } + return native_syscall_guest_get_restore_stacks(ti, regs); +} + +static inline void +host_trap_pv_vcpu_exit_trap(struct thread_info *ti, struct pt_regs *regs) +{ + struct kvm_vcpu *vcpu = ti->vcpu; + int traps_num, in_work; + + traps_num = atomic_read(&vcpu->arch.host_ctxt.signal.traps_num); + in_work = atomic_read(&vcpu->arch.host_ctxt.signal.in_work); + if (likely(traps_num <= 0)) { + /* it is return from host trap to guest (VCPU) mode */ + return; + } else if (traps_num == in_work) { + /* there are/(is) some injected to guest traps */ + /* but all the traps are already handling */ + return; + } + + /* it need return to start handling of new injected trap */ + atomic_inc(&vcpu->arch.host_ctxt.signal.in_work); +} + +static inline void +host_trap_guest_exit_trap(struct thread_info *ti, struct pt_regs *regs) +{ + if (test_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE)) { + /* host return to paravirtualized guest (VCPU) mode */ + host_trap_pv_vcpu_exit_trap(ti, regs); + } + + host_switch_trap_enable_mask(ti, regs, true); + + /* restore global regs of native kernel */ + native_trap_guest_enter(ti, regs, EXIT_FROM_TRAP_SWITCH); +} + +static inline void +host_trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, + unsigned flags) +{ + if (flags & EXIT_FROM_INTC_SWITCH) { + host_trap_guest_exit_intc(ti, regs); + } + if (flags & EXIT_FROM_TRAP_SWITCH) { + host_trap_guest_exit_trap(ti, regs); + } +} + +static inline void +host_syscall_pv_vcpu_exit_trap(struct thread_info *ti, struct pt_regs *regs) +{ + struct kvm_vcpu *vcpu = ti->vcpu; + int syscall_num, in_syscall; + + syscall_num = atomic_read(&vcpu->arch.host_ctxt.signal.syscall_num); + in_syscall = atomic_read(&vcpu->arch.host_ctxt.signal.in_syscall); + if (likely(syscall_num == 0)) { + /* it is return from host syscall to guest (VCPU) mode */ + return; + } else if (in_syscall == syscall_num) { + /* there is some injected to guest system call */ + /* and all the call is already handling */ + return; + } + + /* it need return to start handling of new injected system call */ + atomic_inc(&vcpu->arch.host_ctxt.signal.in_syscall); +} + +static inline void +host_syscall_guest_exit_trap(struct thread_info *ti, struct pt_regs *regs) +{ + if (!test_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE)) + return; + + /* host return to paravirtualized guest (VCPU) mode */ + host_syscall_pv_vcpu_exit_trap(ti, regs); + + host_switch_trap_enable_mask(ti, regs, true); +} + +static inline void +host_guest_syscall_exit_to(struct thread_info *ti, struct pt_regs *regs, + unsigned flags) +{ + if (flags & EXIT_FROM_INTC_SWITCH) { + host_trap_guest_exit_intc(ti, regs); + } + if (flags & EXIT_FROM_TRAP_SWITCH) { + host_syscall_guest_exit_trap(ti, regs); + } +} + +extern void kvm_init_pv_vcpu_intc_handling(struct kvm_vcpu *vcpu, pt_regs_t *regs); +extern int last_light_hcall; + +static inline void +host_trap_guest_exit(struct thread_info *ti, struct pt_regs *regs, + trap_pt_regs_t *trap, unsigned flags) +{ + if (!test_and_clear_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE)) + return; + + /* + * Trap on paravirtualized guest VCPU is interpreted as intercept + */ + + kvm_emulate_pv_vcpu_intc(ti, regs, trap); + + /* only after switch to host MMU context at previous function */ + host_switch_trap_enable_mask(ti, regs, false); +} + +/* + * The function should return bool 'is the system call from guest?' + */ +static inline bool +host_guest_syscall_enter(struct thread_info *ti, struct pt_regs *regs) +{ + if (!test_and_clear_ti_thread_flag(ti, TIF_HOST_AT_VCPU_MODE)) + return false; /* it is not guest system call */ + + return pv_vcpu_syscall_intc(ti, regs); +} +#endif /* CONFIG_VIRTUALIZATION */ + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest */ +# include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravrtualized) */ +# include +#else +/* it is native kernel without any virtualization or */ +/* host kernel with virtualization support */ +#ifndef CONFIG_VIRTUALIZATION +/* it is only native kernel without any virtualization */ +static inline void __guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ +} + +static inline void __guest_enter_light(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, + bool from_sdisp) +{ +} + +static inline void __guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ +} +static inline void __guest_exit_light(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu) +{ +} +static inline void +trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, unsigned flags) +{ + native_trap_guest_enter(ti, regs, flags); +} +static inline void +trap_guest_exit(struct thread_info *ti, struct pt_regs *regs, + trap_pt_regs_t *trap, unsigned flags) +{ + native_trap_guest_exit(ti, regs, trap, flags); +} +static inline bool +guest_trap_pending(struct thread_info *ti) +{ + return native_guest_trap_pending(ti); +} + +static inline bool +guest_trap_from_user(struct thread_info *ti) +{ + return native_trap_from_guest_user(ti); +} + +static inline bool +guest_syscall_from_user(struct thread_info *ti) +{ + return native_syscall_from_guest_user(ti); +} + +static inline struct e2k_stacks * +trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + return native_trap_guest_get_restore_stacks(ti, regs); +} + +static inline struct e2k_stacks * +syscall_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + return native_syscall_guest_get_restore_stacks(ti, regs); +} + +/* + * The function should return bool is the system call from guest + */ +static inline bool +guest_syscall_enter(struct thread_info *ti, struct pt_regs *regs) +{ + return native_guest_syscall_enter(ti, regs); +} +static inline void +guest_syscall_exit_to(struct thread_info *ti, struct pt_regs *regs, + unsigned flags) +{ + native_guest_syscall_exit_to(ti, regs, flags); +} +#else /* CONFIG_VIRTUALIZATION */ +/* it is only host kernel with virtualization support */ +static inline void __guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + host_guest_enter(ti, vcpu, flags); +} + +static inline void __guest_enter_light(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, + bool from_sdisp) +{ + host_guest_enter_light(ti, vcpu, from_sdisp); +} + +static inline void __guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, unsigned flags) +{ + host_guest_exit(ti, vcpu, flags); +} +static inline void __guest_exit_light(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu) +{ + host_guest_exit_light(ti, vcpu); +} +static inline void +trap_guest_enter(struct thread_info *ti, struct pt_regs *regs, unsigned flags) +{ + host_trap_guest_enter(ti, regs, flags); +} +static inline void +trap_guest_exit(struct thread_info *ti, struct pt_regs *regs, + trap_pt_regs_t *trap, unsigned flags) +{ + host_trap_guest_exit(ti, regs, trap, flags); +} +static inline bool +guest_trap_pending(struct thread_info *ti) +{ + return host_guest_trap_pending(ti); +} + +static inline bool +guest_trap_from_user(struct thread_info *ti) +{ + return host_trap_from_guest_user(ti); +} + +static inline bool +guest_syscall_from_user(struct thread_info *ti) +{ + return host_syscall_from_guest_user(ti); +} + +static inline struct e2k_stacks * +trap_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + return host_trap_guest_get_restore_stacks(ti, regs); +} + +static inline struct e2k_stacks * +syscall_guest_get_restore_stacks(struct thread_info *ti, struct pt_regs *regs) +{ + return host_syscall_guest_get_restore_stacks(ti, regs); +} + +/* + * The function should return bool is the system call from guest + */ +static inline bool +guest_syscall_enter(struct thread_info *ti, struct pt_regs *regs) +{ + return host_guest_syscall_enter(ti, regs); +} +static inline void +guest_syscall_exit_to(struct thread_info *ti, struct pt_regs *regs, + unsigned flags) +{ + host_guest_syscall_exit_to(ti, regs, flags); +} +#endif /* ! CONFIG_VIRTUALIZATION */ +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! _E2K_KVM_SWITCH_H */ diff --git a/arch/e2k/include/asm/kvm/thread_info.h b/arch/e2k/include/asm/kvm/thread_info.h new file mode 100644 index 0000000..3748ab3 --- /dev/null +++ b/arch/e2k/include/asm/kvm/thread_info.h @@ -0,0 +1,547 @@ +/* + * kvm_thread_info.h: In-kernel KVM guest thread info related definitions + * Copyright (c) 2011, MCST. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __E2K_KVM_THREAD_INFO_H +#define __E2K_KVM_THREAD_INFO_H + +#include +#include +#include + +#include +#include +#include +#include + +#ifdef CONFIG_VIRTUALIZATION + +struct kvm; +struct kvm_vcpu; +struct gmm_struct; + +/* TODO a security hole, undef it - bad guest can cause host to panic() */ +#define GTI_DEBUG_MODE + +#ifdef GTI_DEBUG_MODE +#define GTI_BUG_ON(cond) BUG_ON(cond) +#else /* ! GTI_DEBUG_MODE */ +#define GTI_BUG_ON(cond) do { } while (0) +#endif /* GTI_DEBUG_MODE */ + +/* + * Hardware & local data stacks registers state to save/restore guest stacks + * while hypercalls under paravirtualization without hardware support. + * It allows to emulate switch (on HCALL) and restore (on HRET) hardware + * supported extensions. + */ +typedef struct guest_hw_stack { + bool valid; /* stacks are valid */ + e2k_stacks_t stacks; /* pointers to local data & hardware */ + /* stacks */ + e2k_mem_crs_t crs; /* to startup & launch VCPU */ + e2k_cutd_t cutd; /* Compilation Unit table pointer */ +} guest_hw_stack_t; + +/* + * Guest kernel pt_regs structure to keep some info about guest state on host + * It needs to enable recursive traps, system calls, signal handlers + * (same as on host kernel) + */ +typedef enum gpt_regs_type { + undefined_regs_type, /* unknown or any type */ + start_regs_type, /* start activation of guest */ + trap_regs_type, /* beacause of trap */ + syscall_regs_type, /* system call */ + hypercall_regs_type, /* hypercall */ + sig_handler_regs_type, /* to run signal handler */ +} gpt_regs_type_t; + +typedef struct gpt_regs { + gpt_regs_type_t type; /* the reason to have been created */ + /* this instance of structure */ + + /* follow fields to save last guest and host activation state */ + /* to enable recursive kernel activations */ + int g_stk_frame_no; /* number of saved guest kernel */ + /* data stack activation (see below) */ + e2k_addr_t g_usd_size; /* guest kernel data stack size */ + int k_stk_frame_no; /* number of saved host kernel */ + /* data stack activation */ + e2k_addr_t k_usd_size; /* host kernel data stack size */ + e2k_size_t pcsp_ind; /* chain procedure stack index */ + struct pt_regs *pt_regs; /* head of pt_regs structure queue: */ + /* pointer to current pt_regs */ + + /* all structures type of this gpt_regs (same as pt_regs structures) */ + /* allocated into host kernel data stack because of recursive events */ + /* (traps, system calls, hypercalls, signale handler) handled by */ + /* kernel on one stack using stack discipline */ + /* So it can use index into this stack as reference to previous or */ + /* next same structure instead of real pointer (address) to avoid */ + /* complex addresses translation while copy/clone stacks (fork()) */ + e2k_addr_t next_index; /* index of next same structure, */ + /* into host kernel data stack */ + /* which keeps state of previous */ + /* guest kernel activation */ +} gpt_regs_t; + +typedef struct vcpu_l_gregs { + local_gregs_t gregs; /* guest user "local" global */ + /* registers to save updated on page */ + /* fault values */ + u64 updated; /* mask of updated registers "local" */ + /* global registers (see above) */ + bool valid; /* gregs is valid */ + int gener; /* generation of valid gregs */ +} vcpu_l_gregs_t; + +typedef struct kvm_signal_context { + /* signal stack area is used to store interrupted context */ + /* one for host thread and one for each guest threads */ + struct signal_stack stack; + atomic_t traps_num; /* counter of recursive traps at the */ + /* signal stack, actual only for guest */ + atomic_t in_work; /* counter of recursive traps in the */ + /* work, actual only for guest */ + atomic_t syscall_num; /* counter of recursive system calls at the */ + /* signal stack, actual only for guest */ + atomic_t in_syscall; /* counter of recursive system calls in the */ + /* work, actual only for guest */ +} kvm_signal_context_t; + +/* + * Guest kernel thread info structure + */ +typedef struct gthread_info { + gpid_t *gpid; + + /* guest kernel stacks pointers */ + e2k_usd_lo_t g_usd_lo; + e2k_usd_hi_t g_usd_hi; + e2k_sbr_t g_sbr; + e2k_psp_lo_t g_psp_lo; + e2k_psp_hi_t g_psp_hi; + e2k_pcsp_lo_t g_pcsp_lo; + e2k_pcsp_hi_t g_pcsp_hi; + + e2k_size_t us_size; /* guest local data size: */ + /* kernel stack for kernel thread, */ + /* user stack for user */ + guest_hw_stack_t stack_regs; /* state of pointers to local & */ + /* hardware stacks of guest */ + + data_stack_t data_stack; + void *stack; + hw_stack_t hw_stacks; + vcpu_l_gregs_t l_gregs; /* guest user "local" global */ + /* registers to save updated on page */ + /* fault values */ + kernel_gregs_t g_gregs; /* guest kernel global resiters state */ + /* after migration to other VCPU */ + + /* the following flags to mark event: */ + /* hardware stacks bounds trap occured, but 'sge' on guest */ + /* is disabled and the trap is not yet passed to guest kernel */ + bool proc_bounds_waiting; /* procedure stack bounds */ + bool chain_bounds_waiting; /* chain stack bounds */ + + int g_stk_frame_no; /* current number of guest kernel */ + /* data stack activation */ + /* each kernel activation from user */ + /* (trap, system call, signal */ + /* handler) runs on kernel data stack */ + /* Activations can be nested, */ + /* so for each new nested activation */ + /* need new free frame of data stack */ + /* above (for data stack below) */ + /* previous activation */ + int k_stk_frame_no; /* current number of host kernel */ + /* data stack activation */ + + unsigned long flags; /* flags (see below) */ + struct gmm_struct *gmm; /* pointer to current guest mm agent */ + /* on host */ + /* NULL for guest kernel threads */ + hpa_t nonp_root_hpa; /* physical base of nonpaging root PT */ + bool gmm_in_release; /* guest mm is releasing (exit_mm()) */ + + /* following fields should be updated for each multi-stack process */ + /* (task, thread) while switching */ + struct kvm_vcpu *vcpu; /* pointer to VCPU where the thread */ + /* is running now or was run last */ + /* time */ + struct pt_regs fork_regs; /* pt_regs structure of last host */ + /* sys_fork() system call */ + u32 intr_counter; /* number of traps are handling */ + atomic_t signal_counter; /* number of signals are handling */ + struct sw_regs sw_regs; /* user special registers state */ + /* to save/restore while guest */ + /* process switch */ + kvm_signal_context_t signal; /* the host kernel's signal/trap */ + /* stack of contexts */ + + /* follow pointer should be updated by each recursive traps, */ + /* system calls, signal handler running */ + gpt_regs_t *gpt_regs; /* pointer to current state of */ + /* guest kernel */ + + e2k_upsr_t u_upsr; /* guest user is different from */ + e2k_upsr_t k_upsr; /* guest kernel UPSR */ + + /* FIXME: follow fields is added only to debug correct save/restore */ + /* guest UPSR state, delete after debugging completion with some */ + /* debug statements into other source files of kvm */ + bool u_upsr_valid; /* guest user upsr value is valid */ + bool k_upsr_valid; /* guest kernel upsr value is valid */ + + global_regs_t gregs; /* structure to keep guest user */ + /* global registers state */ + /* FIXME: this structure duplicates */ + /* similar structure into sw_regs, */ + /* should use structure into sw_regs */ + /* only */ + bool gregs_active; /* gregs structure above is active */ + /* for user global registers */ + bool gregs_valid; /* gregs structure above contains */ + /* actual global registers state */ + bool gregs_for_currents_valid; /* gregs used for currents pointers */ + /* contain actual global registers */ + /* state, when real global registers */ + /* is set to currents pointers */ + bool task_is_binco; /* the guest task is binary */ + /* application compiler */ + bool task_is_protect; /* the guest task is running at */ + /* protected mode */ +} gthread_info_t; + +/* guest stacks flag (field at structure above) */ + +/* + * Guest thread flags + */ +#define GTIF_VCPU_START_THREAD 0 /* this thread is booting (start) */ + /* guest kernel thread */ +#define GTIF_KERNEL_THREAD 1 /* this thread is guest kernel */ + /* thread */ +#define GTIF_THREAD_MIGRATED 2 /* the thread was migrated from one */ + /* VCPU to other */ +#define GTIF_HW_PS_LOCKED 16 /* hardware procedure stack */ + /* was locked by host */ +#define GTIF_HW_PCS_LOCKED 17 /* hardware chain stack */ + /* was locked by host */ +#define GTIF_HW_PS_PRESENTED 18 /* hardware procedure stack */ + /* was populated by host */ +#define GTIF_HW_PCS_PRESENTED 19 /* hardware chain stack */ + /* was populated by host */ +#define GTIF_DATA_STACK_HOSTED 20 /* local data stack of guest */ + /* was registered on host */ +#define GTIF_HW_PS_HOSTED 21 /* hardware procedure stack of guest */ + /* was registered on host */ +#define GTIF_HW_PCS_HOSTED 22 /* hardware chain stack of guest */ + /* was registered on host */ +#define GTIF_HW_PS_HAS_GUARD_PAGE 28 /* hardware procedure stack of guest */ + /* has extra guard page */ +#define GTIF_HW_PCS_HAS_GUARD_PAGE 29 /* hardware chain stack of guest */ + /* has extra guard page */ + +#define _GTIF_VCPU_START_THREAD (1UL << GTIF_VCPU_START_THREAD) +#define _GTIF_KERNEL_THREAD (1UL << GTIF_KERNEL_THREAD) +#define _GTIF_THREAD_MIGRATED (1UL << GTIF_THREAD_MIGRATED) +#define _GTIF_HW_PS_LOCKED (1UL << GTIF_HW_PS_LOCKED) +#define _GTIF_HW_PCS_LOCKED (1UL << GTIF_HW_PCS_LOCKED) +#define _GTIF_HW_PS_PRESENTED (1UL << GTIF_HW_PS_PRESENTED) +#define _GTIF_HW_PCS_PRESENTED (1UL << GTIF_HW_PCS_PRESENTED) +#define _GTIF_DATA_STACK_HOSTED (1UL << GTIF_DATA_STACK_HOSTED) +#define _GTIF_HW_PS_HOSTED (1UL << GTIF_HW_PS_HOSTED) +#define _GTIF_HW_PCS_HOSTED (1UL << GTIF_HW_PCS_HOSTED) +#define _GTIF_HW_PS_HAS_GUARD_PAGE (1UL << GTIF_HW_PS_HAS_GUARD_PAGE) +#define _GTIF_HW_PCS_HAS_GUARD_PAGE (1UL << GTIF_HW_PCS_HAS_GUARD_PAGE) + +#define GTIF_ALL_STACKS_MASK \ + (_GTIF_HW_PS_LOCKED | _GTIF_HW_PCS_LOCKED | \ + _GTIF_HW_PS_PRESENTED | _GTIF_HW_PCS_PRESENTED | \ + _GTIF_DATA_STACK_HOSTED | _GTIF_HW_PS_HOSTED | \ + _GTIF_HW_PCS_HOSTED) + +static inline void set_the_flag(unsigned long *flags, int the_flag) +{ + set_bit(the_flag, flags); +} + +static inline void clear_the_flag(unsigned long *flags, int the_flag) +{ + clear_bit(the_flag, flags); +} + +static inline int test_and_set_the_flag(unsigned long *flags, int the_flag) +{ + return test_and_set_bit(the_flag, flags); +} + +static inline int test_and_clear_the_flag(unsigned long *flags, int the_flag) +{ + return test_and_clear_bit(the_flag, flags); +} + +static inline int test_the_flag(unsigned long *flags, int the_flag) +{ + return test_bit(the_flag, flags); +} + +static inline void set_gti_thread_flag(gthread_info_t *gti, int flag) +{ + set_the_flag(>i->flags, flag); +} + +static inline void clear_gti_thread_flag(gthread_info_t *gti, int flag) +{ + clear_the_flag(>i->flags, flag); +} + +static inline int test_and_set_gti_thread_flag(gthread_info_t *gti, int flag) +{ + return test_and_set_the_flag(>i->flags, flag); +} + +static inline int test_and_clear_gti_thread_flag(gthread_info_t *gti, int flag) +{ + return test_and_clear_the_flag(>i->flags, flag); +} + +static inline int test_gti_thread_flag(gthread_info_t *gti, int flag) +{ + return test_the_flag(>i->flags, flag); +} + +/* + * Hardware stacks bounds control + */ +static inline bool test_guest_hw_stack_bounds_waiting(thread_info_t *ti, + unsigned long trap_mask) +{ + gthread_info_t *gti = ti->gthread_info; + bool is_waiting = false; + + if (gti == NULL) + return false; + if (trap_mask & exc_proc_stack_bounds_mask) + is_waiting |= gti->proc_bounds_waiting; + if (trap_mask & exc_chain_stack_bounds_mask) + is_waiting |= gti->chain_bounds_waiting; + return is_waiting; +} +static inline bool test_guest_proc_bounds_waiting(thread_info_t *ti) +{ + return test_guest_hw_stack_bounds_waiting(ti, + exc_proc_stack_bounds_mask); +} +static inline bool test_guest_chain_bounds_waiting(thread_info_t *ti) +{ + return test_guest_hw_stack_bounds_waiting(ti, + exc_chain_stack_bounds_mask); +} +static inline void set_guest_hw_stack_bounds_waiting(thread_info_t *ti, + unsigned long trap_mask) +{ + gthread_info_t *gti = ti->gthread_info; + + if (gti == NULL) + return; + if (trap_mask & exc_proc_stack_bounds_mask) { + gti->proc_bounds_waiting = true; + } else if (trap_mask & exc_chain_stack_bounds_mask) { + gti->chain_bounds_waiting = true; + } else { + BUG_ON(true); + } +} +static inline void set_guest_proc_bounds_waiting(thread_info_t *ti) +{ + set_guest_hw_stack_bounds_waiting(ti, exc_proc_stack_bounds_mask); +} +static inline void set_guest_chain_bounds_waiting(thread_info_t *ti) +{ + set_guest_hw_stack_bounds_waiting(ti, exc_chain_stack_bounds_mask); +} +static inline bool +test_and_set_guest_hw_stack_bounds_waiting(thread_info_t *ti, + unsigned long trap_mask) +{ + bool waiting; + + waiting = test_guest_hw_stack_bounds_waiting(ti, trap_mask); + if (!waiting) + set_guest_hw_stack_bounds_waiting(ti, trap_mask); + return waiting; +} +static inline bool test_and_set_guest_proc_bounds_waiting(thread_info_t *ti) +{ + return test_and_set_guest_hw_stack_bounds_waiting(ti, + exc_proc_stack_bounds_mask); +} +static inline bool test_and_set_guest_chain_bounds_waiting(thread_info_t *ti) +{ + return test_and_set_guest_hw_stack_bounds_waiting(ti, + exc_chain_stack_bounds_mask); +} +static inline void clear_guest_hw_stack_bounds_waiting(thread_info_t *ti, + unsigned long trap_mask) +{ + gthread_info_t *gti = ti->gthread_info; + + if (gti == NULL) + return; + if (trap_mask & exc_proc_stack_bounds_mask) { + gti->proc_bounds_waiting = false; + } else if (trap_mask & exc_chain_stack_bounds_mask) { + gti->chain_bounds_waiting = false; + } else { + BUG_ON(true); + } +} +static inline void clear_guest_proc_bounds_waiting(thread_info_t *ti) +{ + clear_guest_hw_stack_bounds_waiting(ti, exc_proc_stack_bounds_mask); +} +static inline void clear_guest_chain_bounds_waiting(thread_info_t *ti) +{ + clear_guest_hw_stack_bounds_waiting(ti, exc_chain_stack_bounds_mask); +} +static inline bool +test_and_clear_guest_hw_stack_bounds_waiting(thread_info_t *ti, + unsigned long trap_mask) +{ + bool waiting; + + waiting = test_guest_hw_stack_bounds_waiting(ti, trap_mask); + if (waiting) + clear_guest_hw_stack_bounds_waiting(ti, trap_mask); + return waiting; +} +static inline bool test_and_clear_guest_proc_bounds_waiting(thread_info_t *ti) +{ + return test_and_clear_guest_hw_stack_bounds_waiting(ti, + exc_proc_stack_bounds_mask); +} +static inline bool test_and_clear_guest_chain_bounds_waiting(thread_info_t *ti) +{ + return test_and_clear_guest_hw_stack_bounds_waiting(ti, + exc_chain_stack_bounds_mask); +} + +/* + * Set of functions to manipulate with gpt_regs structures + */ + +#define ATOMIC_GET_HW_PCS_INDEX() \ +({ \ + unsigned long pcsp_hi_val; \ + unsigned int pcshtp_val; \ + e2k_pcsp_hi_t pcsp_hi; \ + \ + ATOMIC_READ_HW_PCS_SIZES(pcsp_hi_val, pcshtp_val); \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcsp_hi.PCSP_hi_ind + pcshtp_val; \ +}) + +static inline e2k_size_t +gpt_regs_addr_to_index(thread_info_t *ti, gpt_regs_t *gregs) +{ + e2k_addr_t bottom; + e2k_addr_t addr; + + if (gregs == NULL) + return -1UL; + bottom = (e2k_addr_t)thread_info_task(ti)->stack; + addr = (e2k_addr_t)gregs; + GTI_BUG_ON(addr < bottom || + addr + sizeof(*gregs) > bottom + KERNEL_C_STACK_SIZE); + return addr - bottom; +} +static inline gpt_regs_t * +gpt_regs_index_to_addr(thread_info_t *ti, e2k_size_t gregs_index) +{ + e2k_addr_t bottom; + + if (gregs_index == -1UL) + return NULL; + bottom = (e2k_addr_t)thread_info_task(ti)->stack; + GTI_BUG_ON(gregs_index < 0 || + gregs_index + sizeof(gpt_regs_t) > KERNEL_C_STACK_SIZE); + return (gpt_regs_t *)(bottom + gregs_index); +} +static inline gpt_regs_t * +get_gpt_regs(thread_info_t *ti) +{ + gthread_info_t *gti = ti->gthread_info; + + GTI_BUG_ON(gti == NULL); + return gti->gpt_regs; +} +static inline void +set_gpt_regs(thread_info_t *ti, gpt_regs_t *gregs) +{ + gthread_info_t *gti = ti->gthread_info; + + GTI_BUG_ON(gti == NULL); + gti->gpt_regs = gregs; +} +static inline void +add_gpt_regs(thread_info_t *ti, gpt_regs_t *new_gregs, gpt_regs_type_t type) +{ + gpt_regs_t *cur_gregs; + + cur_gregs = get_gpt_regs(ti); + new_gregs->type = type; + new_gregs->next_index = gpt_regs_addr_to_index(ti, cur_gregs); + new_gregs->pcsp_ind = ATOMIC_GET_HW_PCS_INDEX(); + set_gpt_regs(ti, new_gregs); +} +static inline gpt_regs_t * +delete_gpt_regs(thread_info_t *ti) +{ + gpt_regs_t *cur_gregs; + + cur_gregs = get_gpt_regs(ti); + GTI_BUG_ON(cur_gregs == NULL); + set_gpt_regs(ti, gpt_regs_index_to_addr(ti, cur_gregs->next_index)); + return cur_gregs; +} +static inline gpt_regs_t * +get_next_gpt_regs(thread_info_t *ti, gpt_regs_t *gregs) +{ + BUG_ON(gregs == NULL); + return gpt_regs_index_to_addr(ti, gregs->next_index); +} + +extern int kvm_pv_guest_thread_info_init(struct kvm *kvm); +extern void kvm_pv_guest_thread_info_destroy(struct kvm *kvm); +extern void kvm_pv_clear_guest_thread_info(gthread_info_t *gthread_info); + +#else /* !CONFIG_VIRTUALIZATION */ + +typedef struct guest_hw_stack { + /* empty structure */ +} guest_hw_stack_t; + +typedef struct kvm_signal_context { + /* empty structure */ +} kvm_signal_context_t; +typedef struct gthread_info { + /* empty structure */ +} gthread_info_t; + +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* __E2K_KVM_THREAD_INFO_H */ diff --git a/arch/e2k/include/asm/kvm/threads.h b/arch/e2k/include/asm/kvm/threads.h new file mode 100644 index 0000000..809d7d2 --- /dev/null +++ b/arch/e2k/include/asm/kvm/threads.h @@ -0,0 +1,23 @@ +#ifndef _ASM_E2K_THREADS_H +#define _ASM_E2K_THREADS_H + + +/* + * e2k arch-dependent limits for the nr of threads virtual CPUs + * virtual IRQs + */ + +/* KVM manage */ +#define KVM_VM_MAX_LIMIT 1024 /* max number of VM IDs at use */ + +/* VCPUs manage */ +#define KVM_MAX_VCPUS 64 + +/* VIRQ manage */ +#define KVM_VIRQ_VCPUS_BASE_NO 0x01000000 + +/* VIRQ VCPU: one for each Local APIC */ +/* one for erly timer interrupt */ +#define KVM_MAX_VIRQ_VCPUS (KVM_MAX_VCPUS + 1) + +#endif /* _ASM_E2K_THREADS_H */ diff --git a/arch/e2k/include/asm/kvm/tlbflush.h b/arch/e2k/include/asm/kvm/tlbflush.h new file mode 100644 index 0000000..aa86001 --- /dev/null +++ b/arch/e2k/include/asm/kvm/tlbflush.h @@ -0,0 +1,165 @@ +/* + * Guest MMU caches flushing on KVM host + * + * Copyright 2016 Salavat S. Gilyazov (atic@mcst.ru) + */ +#ifndef _E2K_KVM_TLBFLUSH_H +#define _E2K_KVM_TLBFLUSH_H + +#include +#include + + +/* + * Guest VM support on host + * TLB flushing: + * + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(mm, start, end) flushes a range of pages + * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables + */ + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ +static __always_inline bool +__flush_guest_cpu_root_pt_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + return false; /* none any guests and guest addresses */ +} +static __always_inline bool +__flush_guest_cpu_root_pt_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + return false; /* none any guests and guest addresses */ +} +static __always_inline bool +__flush_guest_cpu_root_pt_mm(struct mm_struct *mm) +{ + return false; /* none any guests and guest addresses */ +} +static __always_inline bool +__flush_guest_cpu_root_pt(void) +{ + return false; /* none any guests and guest addresses */ +} +#else /* CONFIG_VIRTUALIZATION */ +extern void kvm_flush_guest_tlb_mm(struct gmm_struct *gmm); +extern void kvm_flush_guest_tlb_page(struct gmm_struct *gmm, e2k_addr_t addr); +extern void kvm_flush_guest_tlb_range(struct gmm_struct *gmm, + e2k_addr_t start, e2k_addr_t end); +extern void kvm_flush_guest_tlb_pgtables(struct gmm_struct *gmm, + e2k_addr_t start, e2k_addr_t end); +extern void kvm_flush_guest_tlb_range_and_pgtables(struct gmm_struct *gmm, + e2k_addr_t start, e2k_addr_t end); + +/* + * Functions to flush guest CPU root PT on host should return boolean value: + * true if address or MM is from guest VM space and flushing was done + * false if address or MM is not from guest VM space or flushing cannot + * be done + */ +extern bool kvm_do_flush_guest_cpu_root_pt_page(struct vm_area_struct *vma, + e2k_addr_t addr); +extern bool kvm_do_flush_guest_cpu_root_pt_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +extern bool kvm_do_flush_guest_cpu_root_pt_mm(struct mm_struct *mm); +extern bool kvm_do_flush_guest_cpu_root_pt(void); + +static inline bool +kvm_flush_guest_cpu_root_pt_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + if (MMU_IS_SEPARATE_PT()) { + /* cannot be any CPU root PTs */ + return false; + } else if (!test_thread_flag(TIF_VIRTUALIZED_GUEST)) { + /* it is not guest VCPU process on host */ + /* so cannot have guest VM */ + return false; + } else if (paravirt_enabled()) { + /* it is guest process on guest and guest has not own guests */ + return false; + } + return kvm_do_flush_guest_cpu_root_pt_page(vma, addr); +} + +static inline bool +kvm_flush_guest_cpu_root_pt_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + if (MMU_IS_SEPARATE_PT()) { + /* cannot be any CPU root PTs */ + return false; + } else if (!test_thread_flag(TIF_VIRTUALIZED_GUEST)) { + /* it is not guest VCPU process on host */ + /* so cannot have guest VM */ + return false; + } else if (paravirt_enabled()) { + /* it is guest process on guest and guest has not own guests */ + return false; + } + return kvm_do_flush_guest_cpu_root_pt_range(mm, start, end); +} + +static inline bool +kvm_flush_guest_cpu_root_pt_mm(struct mm_struct *mm) +{ + if (MMU_IS_SEPARATE_PT()) { + /* cannot be any CPU root PTs */ + return false; + } else if (!test_thread_flag(TIF_VIRTUALIZED_GUEST)) { + /* it is not guest VCPU process on host */ + /* so cannot have guest VM */ + return false; + } else if (paravirt_enabled()) { + /* it is guest process on guest and guest has not own guests */ + return false; + } + return kvm_do_flush_guest_cpu_root_pt_mm(mm); +} + +static inline bool +kvm_flush_guest_cpu_root_pt(void) +{ + if (MMU_IS_SEPARATE_PT()) { + /* cannot be any CPU root PTs */ + return false; + } else if (!test_thread_flag(TIF_VIRTUALIZED_GUEST)) { + /* it is not guest VCPU process on host */ + /* so cannot have guest VM */ + return false; + } else if (paravirt_enabled()) { + /* it is guest process on guest and guest has not own guests */ + return false; + } + return kvm_do_flush_guest_cpu_root_pt(); +} + +#ifndef CONFIG_KVM_GUEST_KERNEL +/* it is native host kernel with virtualization support */ +/* or it is paravirtualized host/guest kernel */ +static inline bool +__flush_guest_cpu_root_pt_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + return kvm_flush_guest_cpu_root_pt_page(vma, addr); +} +static inline bool +__flush_guest_cpu_root_pt_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + return kvm_flush_guest_cpu_root_pt_range(mm, start, end); +} +static inline bool +__flush_guest_cpu_root_pt_mm(struct mm_struct *mm) +{ + return kvm_flush_guest_cpu_root_pt_mm(mm); +} +static inline bool +__flush_guest_cpu_root_pt(void) +{ + return kvm_flush_guest_cpu_root_pt(); +} + +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ +#endif /* ! CONFIG_VIRTUALIZATION */ +#endif /* _E2K_KVM_TLBFLUSH_H */ diff --git a/arch/e2k/include/asm/kvm/trace_kvm.h b/arch/e2k/include/asm/kvm/trace_kvm.h new file mode 100644 index 0000000..a877257 --- /dev/null +++ b/arch/e2k/include/asm/kvm/trace_kvm.h @@ -0,0 +1,1174 @@ +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_H + +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +/* + * Tracepoint for guest mode entry. + */ +TRACE_EVENT(kvm_entry, + TP_PROTO(unsigned int vcpu_id), + TP_ARGS(vcpu_id), + + TP_STRUCT__entry( + __field( unsigned int, vcpu_id ) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + ), + + TP_printk("vcpu %u", __entry->vcpu_id) +); + +/* + * Tracepoint for hypercall. + */ +TRACE_EVENT(kvm_hypercall, + TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3), + TP_ARGS(nr, a0, a1, a2, a3), + + TP_STRUCT__entry( + __field( unsigned long, nr ) + __field( unsigned long, a0 ) + __field( unsigned long, a1 ) + __field( unsigned long, a2 ) + __field( unsigned long, a3 ) + ), + + TP_fast_assign( + __entry->nr = nr; + __entry->a0 = a0; + __entry->a1 = a1; + __entry->a2 = a2; + __entry->a3 = a3; + ), + + TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", + __entry->nr, __entry->a0, __entry->a1, __entry->a2, + __entry->a3) +); + +/* + * Tracepoint for PIO. + */ +TRACE_EVENT(kvm_pio, + TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, + unsigned int count), + TP_ARGS(rw, port, size, count), + + TP_STRUCT__entry( + __field( unsigned int, rw ) + __field( unsigned int, port ) + __field( unsigned int, size ) + __field( unsigned int, count ) + ), + + TP_fast_assign( + __entry->rw = rw; + __entry->port = port; + __entry->size = size; + __entry->count = count; + ), + + TP_printk("pio_%s at 0x%x size %d count %d", + __entry->rw ? "write" : "read", + __entry->port, __entry->size, __entry->count) +); + +/* + * Tracepoint for cpuid. + */ +TRACE_EVENT(kvm_cpuid, + TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx, + unsigned long rcx, unsigned long rdx), + TP_ARGS(function, rax, rbx, rcx, rdx), + + TP_STRUCT__entry( + __field( unsigned int, function ) + __field( unsigned long, rax ) + __field( unsigned long, rbx ) + __field( unsigned long, rcx ) + __field( unsigned long, rdx ) + ), + + TP_fast_assign( + __entry->function = function; + __entry->rax = rax; + __entry->rbx = rbx; + __entry->rcx = rcx; + __entry->rdx = rdx; + ), + + TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx", + __entry->function, __entry->rax, + __entry->rbx, __entry->rcx, __entry->rdx) +); + +#define AREG(x) { APIC_##x, "APIC_" #x } + +#define kvm_trace_symbol_apic \ + AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ + AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ + AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ + AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ + AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI) +/* + * Tracepoint for apic access. + */ +TRACE_EVENT(kvm_apic, + TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val), + TP_ARGS(rw, reg, val), + + TP_STRUCT__entry( + __field( unsigned int, rw ) + __field( unsigned int, reg ) + __field( unsigned int, val ) + ), + + TP_fast_assign( + __entry->rw = rw; + __entry->reg = reg; + __entry->val = val; + ), + + TP_printk("apic_%s %s = 0x%x", + __entry->rw ? "write" : "read", + __print_symbolic(__entry->reg, kvm_trace_symbol_apic), + __entry->val) +); + +#define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) +#define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) + +#define CREG(x) { CEPIC_##x, "CEPIC_" #x } + +#define kvm_trace_symbol_cepic \ + CREG(CTRL), CREG(ID), CREG(CTRL2), CREG(DAT), CREG(DAT2), \ + CREG(EPIC_INT), CREG(EPIC_INT2), CREG(EPIC_INT3), CREG(CPR), \ + CREG(ESR), CREG(ESR2), CREG(EOI), CREG(CIR), CREG(GSTBASE_LO), \ + CREG(GSTBASE_HI), CREG(GSTID), CREG(PNMIRR), CREG(ICR), \ + CREG(ICR2), CREG(TIMER_LVTT), CREG(TIMER_INIT), CREG(TIMER_CUR),\ + CREG(TIMER_DIV), CREG(SVR), CREG(PNMIRR_MASK), CREG(VECT_INTA) \ + +#define kvm_trace_symbol_epic kvm_trace_symbol_cepic + +/* + * Tracepoint for 32/64 epic access + */ +TRACE_EVENT(kvm_epic, + TP_PROTO(unsigned int rw, unsigned int dword, unsigned int reg, + unsigned long val), + TP_ARGS(rw, dword, reg, val), + + TP_STRUCT__entry( + __field(unsigned int, rw) + __field(unsigned int, dword) + __field(unsigned int, reg) + __field(unsigned long, val) + ), + + TP_fast_assign( + __entry->rw = rw; + __entry->dword = dword; + __entry->reg = reg; + __entry->val = val; + ), + + TP_printk("epic_%s%s %s = 0x%lx", + __entry->rw ? "write" : "read", + __entry->dword ? "64" : "32", + __print_symbolic(__entry->reg, kvm_trace_symbol_epic), + __entry->val) +); + +#define trace_kvm_epic_read_w(reg, val) trace_kvm_epic(0, 0, reg, val) +#define trace_kvm_epic_write_w(reg, val) trace_kvm_epic(1, 0, reg, val) +#define trace_kvm_epic_read_d(reg, val) trace_kvm_epic(0, 1, reg, val) +#define trace_kvm_epic_write_d(reg, val) trace_kvm_epic(1, 1, reg, val) + +/* + * Tracepoint for kvm guest exit: + */ +TRACE_EVENT(kvm_exit, + TP_PROTO(unsigned int exit_reason, unsigned long guest_rip), + TP_ARGS(exit_reason, guest_rip), + + TP_STRUCT__entry( + __field( unsigned int, exit_reason ) + __field( unsigned long, guest_rip ) + ), + + TP_fast_assign( + __entry->exit_reason = exit_reason; + __entry->guest_rip = guest_rip; + ), + + TP_printk("reason %d rip 0x%lx", + __entry->exit_reason, + __entry->guest_rip) +); + +/* + * Tracepoint for kvm interrupt injection: + */ +TRACE_EVENT(kvm_inj_virq, + TP_PROTO(unsigned int irq), + TP_ARGS(irq), + + TP_STRUCT__entry( + __field( unsigned int, irq ) + ), + + TP_fast_assign( + __entry->irq = irq; + ), + + TP_printk("irq %u", __entry->irq) +); + +/* + * Tracepoint for page fault. + */ +TRACE_EVENT(kvm_page_fault, + TP_PROTO(unsigned long fault_address, unsigned int error_code), + TP_ARGS(fault_address, error_code), + + TP_STRUCT__entry( + __field( unsigned long, fault_address ) + __field( unsigned int, error_code ) + ), + + TP_fast_assign( + __entry->fault_address = fault_address; + __entry->error_code = error_code; + ), + + TP_printk("address %lx error_code %x", + __entry->fault_address, __entry->error_code) +); + +/* + * Tracepoint for guest MSR access. + */ +TRACE_EVENT(kvm_msr, + TP_PROTO(unsigned int rw, unsigned int ecx, unsigned long data), + TP_ARGS(rw, ecx, data), + + TP_STRUCT__entry( + __field( unsigned int, rw ) + __field( unsigned int, ecx ) + __field( unsigned long, data ) + ), + + TP_fast_assign( + __entry->rw = rw; + __entry->ecx = ecx; + __entry->data = data; + ), + + TP_printk("msr_%s %x = 0x%lx", + __entry->rw ? "write" : "read", + __entry->ecx, __entry->data) +); + +#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data) +#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data) + +/* + * Tracepoint for guest CR access. + */ +TRACE_EVENT(kvm_cr, + TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), + TP_ARGS(rw, cr, val), + + TP_STRUCT__entry( + __field( unsigned int, rw ) + __field( unsigned int, cr ) + __field( unsigned long, val ) + ), + + TP_fast_assign( + __entry->rw = rw; + __entry->cr = cr; + __entry->val = val; + ), + + TP_printk("cr_%s %x = 0x%lx", + __entry->rw ? "write" : "read", + __entry->cr, __entry->val) +); + +#define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) +#define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) + +TRACE_EVENT(kvm_pic_set_irq, + TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), + TP_ARGS(chip, pin, elcr, imr, coalesced), + + TP_STRUCT__entry( + __field( __u8, chip ) + __field( __u8, pin ) + __field( __u8, elcr ) + __field( __u8, imr ) + __field( bool, coalesced ) + ), + + TP_fast_assign( + __entry->chip = chip; + __entry->pin = pin; + __entry->elcr = elcr; + __entry->imr = imr; + __entry->coalesced = coalesced; + ), + + TP_printk("chip %u pin %u (%s%s)%s", + __entry->chip, __entry->pin, + (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", + (__entry->imr & (1 << __entry->pin)) ? "|masked":"", + __entry->coalesced ? " (coalesced)" : "") +); + +#define kvm_apic_dst_shorthand \ + {0x0, "dst"}, \ + {0x1, "self"}, \ + {0x2, "all"}, \ + {0x3, "all-but-self"} + +#define kvm_deliver_mode \ + {0x0, "Fixed"}, \ + {0x1, "LowPrio"}, \ + {0x2, "SMI"}, \ + {0x3, "Res3"}, \ + {0x4, "NMI"}, \ + {0x5, "INIT"}, \ + {0x6, "SIPI"}, \ + {0x7, "ExtINT"} + +TRACE_EVENT(kvm_apic_ipi, + TP_PROTO(__u32 icr_low, __u32 dest_id), + TP_ARGS(icr_low, dest_id), + + TP_STRUCT__entry( + __field( __u32, icr_low ) + __field( __u32, dest_id ) + ), + + TP_fast_assign( + __entry->icr_low = icr_low; + __entry->dest_id = dest_id; + ), + + TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", + __entry->dest_id, (u8)__entry->icr_low, + __print_symbolic((__entry->icr_low >> 8 & 0x7), + kvm_deliver_mode), + (__entry->icr_low & (1<<11)) ? "logical" : "physical", + (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", + (__entry->icr_low & (1<<15)) ? "level" : "edge", + __print_symbolic((__entry->icr_low >> 18 & 0x3), + kvm_apic_dst_shorthand)) +); + +TRACE_EVENT(kvm_apic_accept_irq, + TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced), + TP_ARGS(apicid, dm, tm, vec, coalesced), + + TP_STRUCT__entry( + __field( __u32, apicid ) + __field( __u16, dm ) + __field( __u8, tm ) + __field( __u8, vec ) + __field( bool, coalesced ) + ), + + TP_fast_assign( + __entry->apicid = apicid; + __entry->dm = dm; + __entry->tm = tm; + __entry->vec = vec; + __entry->coalesced = coalesced; + ), + + TP_printk("apicid %x vec %u (%s|%s)%s", + __entry->apicid, __entry->vec, + __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), + __entry->tm ? "level" : "edge", + __entry->coalesced ? " (coalesced)" : "") +); + +TRACE_EVENT(kvm_epic_ipi, + TP_PROTO(__u32 dest_id, __u32 vector), + TP_ARGS(dest_id, vector), + + TP_STRUCT__entry( + __field(__u32, dest_id) + __field(__u32, vector) + ), + + TP_fast_assign( + __entry->dest_id = dest_id; + __entry->vector = vector; + ), + + TP_printk("dst %u vec %u", + __entry->dest_id, __entry->vector) +); + +#define kvm_epic_deliver_mode \ + {0x0, "fixed(0)"}, \ + {0x1, "fixed(1)"}, \ + {0x2, "smi"}, \ + {0x3, "nm_special"}, \ + {0x4, "nmi"}, \ + {0x5, "init"}, \ + {0x6, "startup"}, \ + {0x7, "reserved"} + +TRACE_EVENT(kvm_ioepic_set_irq, + TP_PROTO(__u32 dst, __u32 vector, __u32 dlvm, bool level, bool mask, + int pin, int pin_status, bool coalesced), + TP_ARGS(dst, vector, dlvm, level, mask, pin, pin_status, coalesced), + + TP_STRUCT__entry( + __field( __u32, dst ) + __field( __u32, vector ) + __field( __u32, dlvm ) + __field( bool, level ) + __field( bool, mask ) + __field( int, pin ) + __field( int, pin_status ) + __field( bool, coalesced ) + ), + + TP_fast_assign( + __entry->dst = dst; + __entry->vector = vector; + __entry->dlvm = dlvm; + __entry->level = level; + __entry->mask = mask; + __entry->pin = pin; + __entry->pin = pin_status; + __entry->coalesced = coalesced; + ), + + TP_printk("pin %u %s. dst %d vec 0x%x (%s|%s%s)%s", + __entry->pin, __entry->pin_status ? "set" : "unset", + __entry->dst, __entry->vector, + __print_symbolic(__entry->dlvm, kvm_epic_deliver_mode), + __entry->level ? "level" : "edge", + __entry->mask ? "|masked" : "", + __entry->coalesced ? " (coalesced)" : "") +); + +TRACE_EVENT(kvm_epic_accept_irq, + TP_PROTO(__u32 epicid, __u16 dm, __u8 tm, __u32 vec, + bool coalesced), + TP_ARGS(epicid, dm, tm, vec, coalesced), + + TP_STRUCT__entry( + __field(__u32, epicid) + __field(__u16, dm) + __field(__u8, tm) + __field(__u32, vec) + __field(bool, coalesced) + ), + + TP_fast_assign( + __entry->epicid = epicid; + __entry->dm = dm; + __entry->tm = tm; + __entry->vec = vec; + __entry->coalesced = coalesced; + ), + + TP_printk("epicid %x vec %u (%s|%s)%s", + __entry->epicid, __entry->vec, + __print_symbolic(__entry->dm, kvm_epic_deliver_mode), + __entry->tm ? "level" : "edge", + __entry->coalesced ? " (coalesced)" : "") +); + +TRACE_EVENT(kvm_epic_eoi, + TP_PROTO(__u32 vector), + TP_ARGS(vector), + + TP_STRUCT__entry( + __field(__u32, vector) + ), + + TP_fast_assign( + __entry->vector = vector; + ), + + TP_printk("vector %u", __entry->vector) +); + +/* + * Tracepoint for nested VMRUN + */ +TRACE_EVENT(kvm_nested_vmrun, + TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, + __u32 event_inj, bool npt), + TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), + + TP_STRUCT__entry( + __field( __u64, rip ) + __field( __u64, vmcb ) + __field( __u64, nested_rip ) + __field( __u32, int_ctl ) + __field( __u32, event_inj ) + __field( bool, npt ) + ), + + TP_fast_assign( + __entry->rip = rip; + __entry->vmcb = vmcb; + __entry->nested_rip = nested_rip; + __entry->int_ctl = int_ctl; + __entry->event_inj = event_inj; + __entry->npt = npt; + ), + + TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx " + "int_ctl: 0x%08x event_inj: 0x%08x npt: %s\n", + __entry->rip, __entry->vmcb, __entry->nested_rip, + __entry->int_ctl, __entry->event_inj, + __entry->npt ? "on" : "off") +); + +/* + * Tracepoint for #VMEXIT while nested + */ +TRACE_EVENT(kvm_nested_vmexit, + TP_PROTO(__u64 rip, __u32 exit_code, + __u64 exit_info1, __u64 exit_info2, + __u32 exit_int_info, __u32 exit_int_info_err), + TP_ARGS(rip, exit_code, exit_info1, exit_info2, + exit_int_info, exit_int_info_err), + + TP_STRUCT__entry( + __field( __u64, rip ) + __field( __u32, exit_code ) + __field( __u64, exit_info1 ) + __field( __u64, exit_info2 ) + __field( __u32, exit_int_info ) + __field( __u32, exit_int_info_err ) + ), + + TP_fast_assign( + __entry->rip = rip; + __entry->exit_code = exit_code; + __entry->exit_info1 = exit_info1; + __entry->exit_info2 = exit_info2; + __entry->exit_int_info = exit_int_info; + __entry->exit_int_info_err = exit_int_info_err; + ), + TP_printk("rip: 0x%016llx reason: %d ext_inf1: 0x%016llx " + "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x\n", + __entry->rip, + __entry->exit_code, + __entry->exit_info1, __entry->exit_info2, + __entry->exit_int_info, __entry->exit_int_info_err) +); + +/* + * Tracepoint for #VMEXIT reinjected to the guest + */ +TRACE_EVENT(kvm_nested_vmexit_inject, + TP_PROTO(__u32 exit_code, + __u64 exit_info1, __u64 exit_info2, + __u32 exit_int_info, __u32 exit_int_info_err), + TP_ARGS(exit_code, exit_info1, exit_info2, + exit_int_info, exit_int_info_err), + + TP_STRUCT__entry( + __field( __u32, exit_code ) + __field( __u64, exit_info1 ) + __field( __u64, exit_info2 ) + __field( __u32, exit_int_info ) + __field( __u32, exit_int_info_err ) + ), + + TP_fast_assign( + __entry->exit_code = exit_code; + __entry->exit_info1 = exit_info1; + __entry->exit_info2 = exit_info2; + __entry->exit_int_info = exit_int_info; + __entry->exit_int_info_err = exit_int_info_err; + ), + + TP_printk("reason: %d ext_inf1: 0x%016llx " + "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x\n", + __entry->exit_code, + __entry->exit_info1, __entry->exit_info2, + __entry->exit_int_info, __entry->exit_int_info_err) +); + +/* + * Tracepoint for nested #vmexit because of interrupt pending + */ +TRACE_EVENT(kvm_nested_intr_vmexit, + TP_PROTO(__u64 rip), + TP_ARGS(rip), + + TP_STRUCT__entry( + __field( __u64, rip ) + ), + + TP_fast_assign( + __entry->rip = rip + ), + + TP_printk("rip: 0x%016llx\n", __entry->rip) +); + +/* + * Tracepoint for nested #vmexit because of interrupt pending + */ +TRACE_EVENT(kvm_invlpga, + TP_PROTO(__u64 rip, int asid, u64 address), + TP_ARGS(rip, asid, address), + + TP_STRUCT__entry( + __field( __u64, rip ) + __field( int, asid ) + __field( __u64, address ) + ), + + TP_fast_assign( + __entry->rip = rip; + __entry->asid = asid; + __entry->address = address; + ), + + TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx\n", + __entry->rip, __entry->asid, __entry->address) +); + +/* + * Tracepoint for nested #vmexit because of interrupt pending + */ +TRACE_EVENT(kvm_skinit, + TP_PROTO(__u64 rip, __u32 slb), + TP_ARGS(rip, slb), + + TP_STRUCT__entry( + __field( __u64, rip ) + __field( __u32, slb ) + ), + + TP_fast_assign( + __entry->rip = rip; + __entry->slb = slb; + ), + + TP_printk("rip: 0x%016llx slb: 0x%08x\n", + __entry->rip, __entry->slb) +); + +#define TIRHI_EXC_MASK 0x00000fffffffffffULL +#define TIRHI_ALS_MASK 0x0003f00000000000ULL +#define TIRHI_ALS_SHIFT 44ULL +#define TIRHI_MOVA_MASK 0x00f0000000000000ULL +#define TIRHI_MOVA0_MASK 0x0010000000000000ULL +#define TIRHI_MOVA1_MASK 0x0020000000000000ULL +#define TIRHI_MOVA2_MASK 0x0040000000000000ULL +#define TIRHI_MOVA3_MASK 0x0080000000000000ULL + +#define E2K_TRACE_PRINT_TIR_HI(entry) \ + (entry & (TIRHI_EXC_MASK | TIRHI_MOVA_MASK)) ? \ + __print_flags(entry & (TIRHI_EXC_MASK | TIRHI_MOVA_MASK), "|", \ + { TIRHI_MOVA0_MASK, "mova0" }, \ + { TIRHI_MOVA1_MASK, "mova1" }, \ + { TIRHI_MOVA2_MASK, "mova2" }, \ + { TIRHI_MOVA3_MASK, "mova3" }, \ + { exc_illegal_opcode_mask, "illegal_opcode" }, \ + { exc_priv_action_mask, "priv_action" }, \ + { exc_fp_disabled_mask, "fp_disabled" }, \ + { exc_fp_stack_u_mask, "fp_stack_u" }, \ + { exc_d_interrupt_mask, "d_interrupt" }, \ + { exc_diag_ct_cond_mask, "diag_ct_cond" }, \ + { exc_diag_instr_addr_mask, "diag_instr_addr" }, \ + { exc_illegal_instr_addr_mask, "illegal_instr_addr" }, \ + { exc_instr_debug_mask, "instr_debug" }, \ + { exc_window_bounds_mask, "window_bounds" }, \ + { exc_user_stack_bounds_mask, "user_stack_bounds" }, \ + { exc_proc_stack_bounds_mask, "proc_stack_bounds" }, \ + { exc_chain_stack_bounds_mask, "chain_stack_bounds" }, \ + { exc_fp_stack_o_mask, "fp_stack_o" }, \ + { exc_diag_cond_mask, "diag_cond" }, \ + { exc_diag_operand_mask, "diag_operand" }, \ + { exc_illegal_operand_mask, "illegal_operand" }, \ + { exc_array_bounds_mask, "array_bounds" }, \ + { exc_access_rights_mask, "access_rights" }, \ + { exc_addr_not_aligned_mask, "addr_not_aligned" }, \ + { exc_instr_page_miss_mask, "instr_page_miss" }, \ + { exc_instr_page_prot_mask, "instr_page_prot" }, \ + { exc_ainstr_page_miss_mask, "ainstr_page_miss" }, \ + { exc_ainstr_page_prot_mask, "ainstr_page_prot" }, \ + { exc_last_wish_mask, "last_wish" }, \ + { exc_base_not_aligned_mask, "base_not_aligned" }, \ + { exc_software_trap_mask, "software_trap" }, \ + { exc_data_debug_mask, "data_debug" }, \ + { exc_data_page_mask, "data_page" }, \ + { exc_recovery_point_mask, "recovery_point" }, \ + { exc_interrupt_mask, "interrupt" }, \ + { exc_nm_interrupt_mask, "nm_interrupt" }, \ + { exc_div_mask, "div" }, \ + { exc_fp_mask, "fp" }, \ + { exc_mem_lock_mask, "mem_lock" }, \ + { exc_mem_lock_as_mask, "mem_lock_as" }, \ + { exc_mem_error_out_cpu_mask, "mem_error_out_cpu" }, \ + { exc_mem_error_MAU_mask, "mem_error_MAU" }, \ + { exc_mem_error_L2_mask, "mem_error_L2" }, \ + { exc_mem_error_L1_35_mask, "mem_error_L1_35" }, \ + { exc_mem_error_L1_02_mask, "mem_error_L1_02" }, \ + { exc_mem_error_ICACHE_mask, "mem_error_ICACHE" } \ + ) : "(none)" + +TRACE_EVENT( + intc_tir, + + TP_PROTO(u64 tir_lo, u64 tir_hi), + + TP_ARGS(tir_lo, tir_hi), + + TP_STRUCT__entry( + __field( u64, tir_lo ) + __field( u64, tir_hi ) + ), + + TP_fast_assign( + __entry->tir_lo = tir_lo; + __entry->tir_hi = tir_hi; + ), + + TP_printk("TIR%lld: ip 0x%llx, als 0x%llx\n" + " exceptions: %s\n" + , + __entry->tir_hi >> 56, + __entry->tir_lo & E2K_VA_MASK, + (__entry->tir_hi & TIRHI_ALS_MASK) >> TIRHI_ALS_SHIFT, + E2K_TRACE_PRINT_TIR_HI(__entry->tir_hi) + ) +); + +TRACE_EVENT( + intc_ctprs, + + TP_PROTO(u64 ctpr1, u64 ctpr1_hi, u64 ctpr2, u64 ctpr2_hi, u64 ctpr3, + u64 ctpr3_hi), + + TP_ARGS(ctpr1, ctpr1_hi, ctpr2, ctpr2_hi, ctpr3, ctpr3_hi), + + TP_STRUCT__entry( + __field( u64, ctpr1 ) + __field( u64, ctpr1_hi ) + __field( u64, ctpr2 ) + __field( u64, ctpr2_hi ) + __field( u64, ctpr3 ) + __field( u64, ctpr3_hi ) + ), + + TP_fast_assign( + __entry->ctpr1 = ctpr1; + __entry->ctpr1_hi = ctpr1_hi; + __entry->ctpr2 = ctpr2; + __entry->ctpr2_hi = ctpr2_hi; + __entry->ctpr3 = ctpr3; + __entry->ctpr3_hi = ctpr3_hi; + ), + + TP_printk("ctpr1 0x%llx, ctpr1_hi 0x%llx\n" + "ctpr2 0x%llx, ctpr2_hi 0x%llx\n" + "ctpr3 0x%llx, ctpr3_hi 0x%llx\n", + __entry->ctpr1, __entry->ctpr1_hi, + __entry->ctpr2, __entry->ctpr2_hi, + __entry->ctpr3, __entry->ctpr3_hi) +); + +TRACE_EVENT( + intc_aau, + + TP_PROTO(const e2k_aau_t *aau_ctxt, u64 lsr, u64 lsr1, u64 ilcr, u64 ilcr1), + + TP_ARGS(aau_ctxt, lsr, lsr1, ilcr, ilcr1), + + TP_STRUCT__entry( + __field( u32, aasr ) + __field( u64, lsr ) + __field( u64, lsr1 ) + __field( u64, ilcr ) + __field( u64, ilcr1 ) + __field( u32, aafstr ) + __field( u64, aaldm ) + __field( u64, aaldv ) + + /* Synchronous part */ + __array( u64, aastis, AASTIS_REGS_NUM); + __field( u32, aasti_tags ); + + /* Asynchronous part */ + __array( u64, aainds, AAINDS_REGS_NUM); + __field( u32, aaind_tags ); + __array( u64, aaincrs, AAINCRS_REGS_NUM); + __field( u32, aaincr_tags ); + __array( u64, aads, AADS_REGS_NUM * 2); + + /* Since iset v6 */ + __array( u64, aaldi, AALDIS_REGS_NUM); + + ), + + TP_fast_assign( + int i; + + __entry->aasr = AW(aau_ctxt->guest_aasr); + __entry->lsr = lsr; + __entry->lsr1 = lsr1; + __entry->ilcr = ilcr; + __entry->ilcr1 = ilcr1; + __entry->aafstr = aau_ctxt->aafstr; + __entry->aaldm = AW(aau_ctxt->aaldm); + __entry->aaldv = AW(aau_ctxt->aaldv); + + for (i = 0; i < AADS_REGS_NUM; i++) { + __entry->aads[2 * i] = AW(aau_ctxt->aads[i]).lo; + __entry->aads[2 * i + 1] = AW(aau_ctxt->aads[i]).hi; + } + + for (i = 0; i < AAINCRS_REGS_NUM; i++) + __entry->aaincrs[i] = aau_ctxt->aaincrs[i]; + + __entry->aaincr_tags = aau_ctxt->aaincr_tags; + + for (i = 0; i < AAINDS_REGS_NUM; i++) + __entry->aainds[i] = aau_ctxt->aainds[i]; + + __entry->aaind_tags = aau_ctxt->aaind_tags; + + for (i = 0; i < AASTIS_REGS_NUM; i++) + __entry->aastis[i] = aau_ctxt->aastis[i]; + + __entry->aasti_tags = aau_ctxt->aasti_tags; + + for (i = 0; i < AALDIS_REGS_NUM; i++) + __entry->aaldi[i] = aau_ctxt->aaldi[i]; + ), + + TP_printk("aasr 0x%x, lsr 0x%llx, lsr1 0x%llx, ilcr 0x%llx, ilcr1 0x%llx\n" + "aaldv 0x%llx, aaldm = 0x%llx\n" + "aads lo/hi 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx 0x%llx/0x%llx\n" + "aaincrs 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "aaincr_tags 0x%x\n" + "aainds 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "aaind_tags 0x%x\n" + "aastis 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "aasti_tags 0x%x\n" + "aaldis 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n" + "0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n", + __entry->aasr, __entry->lsr, __entry->lsr1, + __entry->ilcr, __entry->ilcr1, + __entry->aaldv, __entry->aaldm, + __entry->aads[0], __entry->aads[1], + __entry->aads[2], __entry->aads[3], + __entry->aads[4], __entry->aads[5], + __entry->aads[6], __entry->aads[7], + __entry->aads[8], __entry->aads[9], + __entry->aads[10], __entry->aads[11], + __entry->aads[12], __entry->aads[13], + __entry->aads[14], __entry->aads[15], + __entry->aads[16], __entry->aads[17], + __entry->aads[18], __entry->aads[19], + __entry->aads[20], __entry->aads[21], + __entry->aads[22], __entry->aads[23], + __entry->aads[24], __entry->aads[25], + __entry->aads[26], __entry->aads[27], + __entry->aads[28], __entry->aads[29], + __entry->aads[30], __entry->aads[31], + __entry->aads[32], __entry->aads[33], + __entry->aads[34], __entry->aads[35], + __entry->aads[36], __entry->aads[37], + __entry->aads[38], __entry->aads[39], + __entry->aads[40], __entry->aads[41], + __entry->aads[42], __entry->aads[43], + __entry->aads[44], __entry->aads[45], + __entry->aads[46], __entry->aads[47], + __entry->aads[48], __entry->aads[49], + __entry->aads[50], __entry->aads[51], + __entry->aads[52], __entry->aads[53], + __entry->aads[54], __entry->aads[55], + __entry->aads[56], __entry->aads[57], + __entry->aads[58], __entry->aads[59], + __entry->aads[60], __entry->aads[61], + __entry->aads[62], __entry->aads[63], + __entry->aaincrs[0], __entry->aaincrs[1], + __entry->aaincrs[2], __entry->aaincrs[3], + __entry->aaincrs[4], __entry->aaincrs[5], + __entry->aaincrs[6], __entry->aaincrs[7], + __entry->aaincr_tags, + __entry->aainds[0], __entry->aainds[1], + __entry->aainds[2], __entry->aainds[3], + __entry->aainds[4], __entry->aainds[5], + __entry->aainds[6], __entry->aainds[7], + __entry->aainds[8], __entry->aainds[9], + __entry->aainds[10], __entry->aainds[11], + __entry->aainds[12], __entry->aainds[13], + __entry->aainds[14], __entry->aainds[15], + __entry->aaind_tags, + __entry->aastis[0], __entry->aastis[1], + __entry->aastis[2], __entry->aastis[3], + __entry->aastis[4], __entry->aastis[5], + __entry->aastis[6], __entry->aastis[7], + __entry->aastis[8], __entry->aastis[9], + __entry->aastis[10], __entry->aastis[11], + __entry->aastis[12], __entry->aastis[13], + __entry->aastis[14], __entry->aastis[15], + __entry->aasti_tags, + __entry->aaldi[0], __entry->aaldi[1], + __entry->aaldi[2], __entry->aaldi[3], + __entry->aaldi[4], __entry->aaldi[5], + __entry->aaldi[6], __entry->aaldi[7], + __entry->aaldi[8], __entry->aaldi[9], + __entry->aaldi[10], __entry->aaldi[11], + __entry->aaldi[12], __entry->aaldi[13], + __entry->aaldi[14], __entry->aaldi[15], + __entry->aaldi[16], __entry->aaldi[17], + __entry->aaldi[18], __entry->aaldi[19], + __entry->aaldi[20], __entry->aaldi[21], + __entry->aaldi[22], __entry->aaldi[23], + __entry->aaldi[24], __entry->aaldi[25], + __entry->aaldi[26], __entry->aaldi[27], + __entry->aaldi[28], __entry->aaldi[29], + __entry->aaldi[30], __entry->aaldi[31]) +); + +TRACE_EVENT( + generic_hcall, + + TP_PROTO(unsigned long hcall_num, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, + unsigned long arg6, unsigned long gsbr, + unsigned long cpu), + + TP_ARGS(hcall_num, arg1, arg2, arg3, arg4, arg5, arg6, gsbr, cpu), + + TP_STRUCT__entry( + __field( u64, hcall_num ) + __field( u64, arg1 ) + __field( u64, arg2 ) + __field( u64, arg3 ) + __field( u64, arg4 ) + __field( u64, arg5 ) + __field( u64, arg6 ) + __field( u64, gsbr ) + __field( u64, cpu ) + ), + + TP_fast_assign( + __entry->hcall_num = hcall_num; + __entry->arg1 = arg1; + __entry->arg2 = arg2; + __entry->arg3 = arg3; + __entry->arg4 = arg4; + __entry->arg5 = arg5; + __entry->arg6 = arg6; + __entry->gsbr = gsbr; + __entry->cpu = cpu; + ), + + TP_printk("CPU#%llu, generic hypercall %llu\n" + "Args: 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx; gsbr: 0x%llx\n" + , + __entry->cpu, + __entry->hcall_num, + __entry->arg1, + __entry->arg2, + __entry->arg3, + __entry->arg4, + __entry->arg5, + __entry->arg6, + __entry->gsbr) +); + +TRACE_EVENT( + light_hcall, + + TP_PROTO(unsigned long hcall_num, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, + unsigned long arg6, unsigned long cpu), + + TP_ARGS(hcall_num, arg1, arg2, arg3, arg4, arg5, arg6, cpu), + + TP_STRUCT__entry( + __field( u64, hcall_num ) + __field( u64, arg1 ) + __field( u64, arg2 ) + __field( u64, arg3 ) + __field( u64, arg4 ) + __field( u64, arg5 ) + __field( u64, arg6 ) + __field( u64, cpu ) + ), + + TP_fast_assign( + __entry->hcall_num = hcall_num; + __entry->arg1 = arg1; + __entry->arg2 = arg2; + __entry->arg3 = arg3; + __entry->arg4 = arg4; + __entry->arg5 = arg5; + __entry->arg6 = arg6; + __entry->cpu = cpu; + ), + + TP_printk("CPU#%llu, light hypercall %llu\n" + "Args: 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + , + __entry->cpu, + __entry->hcall_num, + __entry->arg1, + __entry->arg2, + __entry->arg3, + __entry->arg4, + __entry->arg5, + __entry->arg6) +); + +TRACE_EVENT( + generic_hcall_exit, + + TP_PROTO(unsigned long ret), + + TP_ARGS(ret), + + TP_STRUCT__entry( + __field( u64, ret ) + ), + + TP_fast_assign( + __entry->ret = ret; + ), + + TP_printk("Generic hypercall exit: %llu\n", __entry->ret) +); + +TRACE_EVENT( + light_hcall_exit, + + TP_PROTO(unsigned long ret), + + TP_ARGS(ret), + + TP_STRUCT__entry( + __field( u64, ret ) + ), + + TP_fast_assign( + __entry->ret = ret; + ), + + TP_printk("Light hypercall exit: %llu\n", __entry->ret) +); + +TRACE_EVENT( + vcpu_put, + + TP_PROTO(int vcpu, int cpu), + + TP_ARGS(vcpu, cpu), + + TP_STRUCT__entry( + __field( int, vcpu ) + __field( int, cpu ) + ), + + TP_fast_assign( + __entry->vcpu = vcpu; + __entry->cpu = cpu; + ), + + TP_printk("vcpu %d, cpu %d\n", __entry->vcpu, __entry->cpu) +); + +TRACE_EVENT( + vcpu_load, + + TP_PROTO(int vcpu, int last_cpu, int cpu), + + TP_ARGS(vcpu, last_cpu, cpu), + + TP_STRUCT__entry( + __field( int, vcpu ) + __field( int, last_cpu ) + __field( int, cpu ) + ), + + TP_fast_assign( + __entry->vcpu = vcpu; + __entry->last_cpu = last_cpu; + __entry->cpu = cpu; + ), + + TP_printk("vcpu %d, cpu %d, last_cpu %d\n", __entry->vcpu, __entry->cpu, + __entry->last_cpu) +); + +TRACE_EVENT( + kvm_handle_rmap_range, + + TP_PROTO(u64 hva_start, u64 hva_end, u64 gpa_start, u64 gpa_end, + void *handler), + + TP_ARGS(hva_start, hva_end, gpa_start, gpa_end, handler), + + TP_STRUCT__entry( + __field( u64, hva_start ) + __field( u64, hva_end ) + __field( u64, gpa_start ) + __field( u64, gpa_end ) + __field( void *, handler ) + ), + + TP_fast_assign( + __entry->hva_start = hva_start; + __entry->hva_end = hva_end; + __entry->gpa_start = gpa_start; + __entry->gpa_end = gpa_end; + __entry->handler = handler; + ), + + TP_printk("HVA 0x%llx - 0x%llx; GPA 0x%llx - 0x%llx; handler 0x%px\n", + __entry->hva_start, __entry->hva_end, + __entry->gpa_start, __entry->gpa_end, + __entry->handler) +); + +#endif /* _TRACE_KVM_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/e2k/include/asm/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace_kvm + +/* This part must be outside protection */ +#include diff --git a/arch/e2k/include/asm/kvm/trace_kvm_hv.h b/arch/e2k/include/asm/kvm/trace_kvm_hv.h new file mode 100644 index 0000000..153b160 --- /dev/null +++ b/arch/e2k/include/asm/kvm/trace_kvm_hv.h @@ -0,0 +1,454 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm_hv + +#if !defined(_TRACE_KVM_HV_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_HV_H + +#include +#include + +#define E2K_TRACE_PRINT_CU_HDR_LO(entry) \ + __print_flags(entry, "|", \ + { intc_cu_hdr_lo_rr_idr_mask, "rr_idr" }, \ + { intc_cu_hdr_lo_rr_clkr_mask, "rr_clkr" }, \ + { intc_cu_hdr_lo_rr_sclkr_mask, "rr_sclkr" }, \ + { intc_cu_hdr_lo_rr_dbg_mask, "rr_dbg" }, \ + { intc_cu_hdr_lo_rw_core_mode_mask, "rw_core_mode" }, \ + { intc_cu_hdr_lo_rw_clkr_mask, "rw_clkr" }, \ + { intc_cu_hdr_lo_rw_sclkr_mask, "rw_sclkr" }, \ + { intc_cu_hdr_lo_rw_sclkm3_mask, "rw_sclkm3" }, \ + { intc_cu_hdr_lo_rw_dbg_mask, "rw_dbg" }, \ + { intc_cu_hdr_lo_hcem_mask, "hcem" }, \ + { intc_cu_hdr_lo_virt_mask, "virt" }, \ + { intc_cu_hdr_lo_stop_mask, "stop" }, \ + { intc_cu_hdr_lo_hret_last_wish_mask, "hret_last_wish" }, \ + { intc_cu_hdr_lo_exc_instr_debug_mask, "exc_instr_debug" }, \ + { intc_cu_hdr_lo_exc_data_debug_mask, "exc_data_debug" }, \ + { intc_cu_hdr_lo_exc_instr_page_mask, "exc_instr_page" }, \ + { intc_cu_hdr_lo_exc_data_page_mask, "exc_data_page" }, \ + { intc_cu_hdr_lo_exc_mova_mask, "exc_mova" }, \ + { intc_cu_hdr_lo_exc_interrupt_mask, "exc_interrupt" }, \ + { intc_cu_hdr_lo_exc_nm_interrupt_mask, "exc_nm_interrupt" }, \ + { intc_cu_hdr_lo_hv_int_mask, "hv_int" }, \ + { intc_cu_hdr_lo_hv_nm_int_mask, "hv_nm_int" }, \ + { intc_cu_hdr_lo_g_tmr_mask, "g_tmr" }, \ + { intc_cu_hdr_lo_rr_mask, "rr" }, \ + { intc_cu_hdr_lo_rw_mask, "rw" }, \ + { intc_cu_hdr_lo_exc_mem_error_mask, "exc_mem_error" }, \ + { intc_cu_hdr_lo_wait_trap_mask, "wait_trap" }, \ + { intc_cu_hdr_lo_dbg_mask, "dbg" }, \ + { intc_cu_hdr_lo_tir_fz_mask, "tir_fz" }) + +#define E2K_TRACE_PRINT_CU_INFO_LO(entry) \ + __print_symbolic(intc_cu_info_lo_get_event_code(entry), \ + { ICE_FORCED, "FORCED" }, \ + { ICE_READ_CU, "READ_CU" }, \ + { ICE_WRITE_CU, "WRITE_CU" }, \ + { ICE_MASKED_HCALL, "MASKED_HCALL" }) + +#define E2K_TRACE_PRINT_MU_INFO_HDR(entry) \ + __print_symbolic(intc_mu_info_lo_get_event_code(entry), \ + { IME_FORCED, "FORCED" }, \ + { IME_FORCED_GVA, "FORCED_GVA" }, \ + { IME_SHADOW_DATA, "SHADOW_DATA" }, \ + { IME_GPA_DATA, "GPA_DATA" }, \ + { IME_GPA_INSTR, "GPA_INSTR" }, \ + { IME_GPA_AINSTR, "GPA_AINSTR" }, \ + { IME_MAS_IOADDR, "MAS_IOADDR" }, \ + { IME_READ_MU, "READ_MU" }, \ + { IME_WRITE_MU, "WRITE_MU" }, \ + { IME_CACHE_FLUSH, "CACHE_FLUSH" }, \ + { IME_CACHE_LINE_FLUSH, "CACHE_LINE_FLUSH" }, \ + { IME_ICACHE_FLUSH, "ICACHE_FLUSH" }, \ + { IME_ICACHE_LINE_FLUSH_USER, "ICACHE_LINE_FLUSH_USER" }, \ + { IME_ICACHE_LINE_FLUSH_SYSTEM, "ICACHE_LINE_FLUSH_SYSTEM" }, \ + { IME_TLB_FLUSH, "TLB_FLUSH" }, \ + { IME_TLB_PAGE_FLUSH_LAST, "TLB_PAGE_FLUSH_LAST" }, \ + { IME_TLB_PAGE_FLUSH_UPPER, "TLB_PAGE_FLUSH_UPPER" }, \ + { IME_TLB_ENTRY_PROBE, "TLB_ENTRY_PROBE" }) + +#define E2K_PRINT_INTC_CU_ENTRY(__entry, i) \ + (__entry->cu_num > i) ? \ + E2K_TRACE_PRINT_CU_INFO_LO(__entry->cu[2 * i]) : "(none)", \ + (__entry->cu_num > i) ? __entry->cu[2 * i] : 0ULL, \ + (__entry->cu_num > i) ? __entry->cu[2 * i + 1] : 0ULL + +#define E2K_PRINT_INTC_MU_ENTRY(__entry, mu_num, i) \ + (mu_num > i) ? \ + E2K_TRACE_PRINT_MU_INFO_HDR(__entry->mu[7 * i]) : "(none)", \ + (mu_num > i) ? __entry->mu[7 * i] : 0ULL, \ + (mu_num > i) ? __entry->mu[7 * i + 1] : 0ULL, \ + (mu_num > i) ? __entry->mu[7 * i + 2] : 0ULL, \ + (mu_num > i) ? __entry->mu[7 * i + 3] : 0ULL, \ + (mu_num > i) ? __entry->mu[7 * i + 4] : 0ULL, \ + (mu_num > i) ? __entry->mu[7 * i + 5] : 0ULL, \ + (mu_num > i) ? __entry->mu[7 * i + 6] : 0ULL + + +TRACE_EVENT( + intc, + + TP_PROTO(const struct kvm_intc_cpu_context *intc_ctxt, + u64 guest_ip, u64 cpu), + + TP_ARGS(intc_ctxt, guest_ip, cpu), + + TP_STRUCT__entry( + __field( int, cu_num ) + __field( int, mu_num ) + __field( u64, cu_hdr_lo ) + __array( u64, cu, INTC_INFO_CU_ENTRY_MAX ) + __array( u64, mu, INTC_INFO_MU_MAX ) + __field( u64, guest_ip ) + __field( u64, cpu ) + ), + + TP_fast_assign( + __entry->cu_num = intc_ctxt->cu_num; + __entry->mu_num = intc_ctxt->mu_num; + + if (__entry->cu_num >= 0) + __entry->cu_hdr_lo = AW(intc_ctxt->cu.header.lo); + + if (__entry->cu_num > 0) { + int i; + for (i = 0; i < __entry->cu_num; i++) { + __entry->cu[2 * i] = + AW(intc_ctxt->cu.entry[i].lo); + __entry->cu[2 * i + 1] = + intc_ctxt->cu.entry[i].hi; + } + } + + if (__entry->mu_num > 0) { + int i; + for (i = 0; i < __entry->mu_num; i++) { + __entry->mu[7 * i] = + AW(intc_ctxt->mu[i].hdr); + __entry->mu[7 * i + 1] = + intc_ctxt->mu[i].gpa; + __entry->mu[7 * i + 2] = + intc_ctxt->mu[i].gva; + __entry->mu[7 * i + 3] = + intc_ctxt->mu[i].data; + __entry->mu[7 * i + 4] = + AW(intc_ctxt->mu[i].condition); + __entry->mu[7 * i + 5] = + intc_ctxt->mu[i].data_ext; + __entry->mu[7 * i + 6] = + AW(intc_ctxt->mu[i].mask); + } + } + + __entry->guest_ip = guest_ip; + __entry->cpu = cpu; + ), + + TP_printk("CPU#%llu, guest IP 0x%llx, cu_num %d, mu_num %d\n" + "CU header: %s (0x%llx)\n" + "CU entry0: %s (0x%llx 0x%llx)\n" + "CU entry1: %s (0x%llx 0x%llx)\n" + "MU entry0: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry1: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry2: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry3: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry4: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry5: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry6: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry7: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry8: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry9: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry10: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + , + __entry->cpu, __entry->guest_ip, + __entry->cu_num, __entry->mu_num, + (__entry->cu_num >= 0) ? + E2K_TRACE_PRINT_CU_HDR_LO(__entry->cu_hdr_lo) : "(none)", + (__entry->cu_num >= 0) ? __entry->cu_hdr_lo : 0, + E2K_PRINT_INTC_CU_ENTRY(__entry, 0), + E2K_PRINT_INTC_CU_ENTRY(__entry, 1), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 0), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 1), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 2), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 3), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 4), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 5), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 6), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 7), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 8), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 9), + E2K_PRINT_INTC_MU_ENTRY(__entry, __entry->mu_num, 10)) +); + +TRACE_EVENT( + single_mu_intc, + + TP_PROTO(const intc_info_mu_t *mu, u64 guest_ip, u64 cpu), + + TP_ARGS(mu, guest_ip, cpu), + + TP_STRUCT__entry( + __array( u64, mu, INTC_INFO_MU_ITEM_SIZE ) + __field( u64, guest_ip ) + __field( u64, cpu ) + ), + + TP_fast_assign( + __entry->mu[0] = AW(mu[0].hdr); + __entry->mu[1] = mu[0].gpa; + __entry->mu[2] = mu[0].gva; + __entry->mu[3] = mu[0].data; + __entry->mu[4] = AW(mu[0].condition); + __entry->mu[5] = mu[0].data_ext; + __entry->mu[6] = AW(mu[0].mask); + __entry->guest_ip = guest_ip; + __entry->cpu = cpu; + ), + + TP_printk("CPU#%llu, guest IP: 0x%llx\n" + "MU entry0: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", + __entry->cpu, __entry->guest_ip, + E2K_PRINT_INTC_MU_ENTRY(__entry, 1, 0)) +); + +TRACE_EVENT( + double_mu_intc, + + TP_PROTO(const intc_info_mu_t *mu, u64 guest_ip, u64 cpu), + + TP_ARGS(mu, guest_ip, cpu), + + TP_STRUCT__entry( + __array( u64, mu, 2 * INTC_INFO_MU_ITEM_SIZE ) + __field( u64, guest_ip ) + __field( u64, cpu ) + ), + + TP_fast_assign( + int i; + for (i = 0; i < 2; i++) { + __entry->mu[7 * i] = + AW(mu[i].hdr); + __entry->mu[7 * i + 1] = + mu[i].gpa; + __entry->mu[7 * i + 2] = + mu[i].gva; + __entry->mu[7 * i + 3] = + mu[i].data; + __entry->mu[7 * i + 4] = + AW(mu[i].condition); + __entry->mu[7 * i + 5] = + mu[i].data_ext; + __entry->mu[7 * i + 6] = + AW(mu[i].mask); + } + __entry->guest_ip = guest_ip; + __entry->cpu = cpu; + ), + + TP_printk("CPU#%llu, guest IP: 0x%llx\n" + "MU entry0: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n" + "MU entry1: %s (0x%llx), 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", + __entry->cpu, __entry->guest_ip, + E2K_PRINT_INTC_MU_ENTRY(__entry, 2, 0), + E2K_PRINT_INTC_MU_ENTRY(__entry, 2, 1)) +); + +TRACE_EVENT( + single_cu_intc, + + TP_PROTO(const intc_info_cu_hdr_t cu_hdr, u64 guest_ip, u64 cpu), + + TP_ARGS(cu_hdr, guest_ip, cpu), + + TP_STRUCT__entry( + __field( u64, cu_hdr_lo ) + __field( u64, guest_ip ) + __field( u64, cpu ) + ), + + TP_fast_assign( + __entry->cu_hdr_lo = AW(cu_hdr.lo); + __entry->guest_ip = guest_ip; + __entry->cpu = cpu; + ), + + TP_printk("CPU#%llu, guest IP: 0x%llx\n" + "CU header: %s (0x%llx)\n", + __entry->cpu, __entry->guest_ip, + E2K_TRACE_PRINT_CU_HDR_LO(__entry->cu_hdr_lo), + __entry->cu_hdr_lo) + +); + +TRACE_EVENT( + intc_exit, + + TP_PROTO(int ret), + + TP_ARGS(ret), + + TP_STRUCT__entry( + __field( int, ret ) + ), + + TP_fast_assign( + __entry->ret = ret; + ), + + TP_printk("Intercept exit %s(%d)\n", + (__entry->ret) ? "to QEMU " : "", + __entry->ret) +); + +TRACE_EVENT( + irq_delivery, + + TP_PROTO(unsigned int vector, unsigned int dlvm, int vcpu, + bool dam_active), + + TP_ARGS(vector, dlvm, vcpu, dam_active), + + TP_STRUCT__entry( + __field( u32, vector ) + __field( u32, dlvm ) + __field( int, vcpu ) + __field( bool, dam_active ) + ), + + TP_fast_assign( + __entry->vector = vector; + __entry->dlvm = dlvm; + __entry->vcpu = vcpu; + __entry->dam_active = dam_active; + ), + + TP_printk("to vcpu %d via %s, vector 0x%x, dlvm %d\n", __entry->vcpu, + __entry->dam_active ? "icr" : "pmirr", + __entry->vector, __entry->dlvm) +); + +TRACE_EVENT( + save_pmirr, + + TP_PROTO(u32 pmirr, u64 val), + + TP_ARGS(pmirr, val), + + TP_STRUCT__entry( + __field( u32, pmirr ) + __field( u64, val ) + ), + + TP_fast_assign( + __entry->pmirr = pmirr; + __entry->val = val; + ), + + TP_printk("pmirr#%d val 0x%llx\n", __entry->pmirr, __entry->val) +); + +TRACE_EVENT( + restore_pmirr, + + TP_PROTO(u32 pmirr, u64 val), + + TP_ARGS(pmirr, val), + + TP_STRUCT__entry( + __field( u32, pmirr ) + __field( u64, val ) + ), + + TP_fast_assign( + __entry->pmirr = pmirr; + __entry->val = val; + ), + + TP_printk("pmirr#%d val 0x%llx\n", __entry->pmirr, __entry->val) +); + +TRACE_EVENT( + save_cir, + + TP_PROTO(u32 cir), + + TP_ARGS(cir), + + TP_STRUCT__entry( + __field( u32, cir ) + ), + + TP_fast_assign( + __entry->cir = cir; + ), + + TP_printk("cir 0x%x\n", __entry->cir) +); + +TRACE_EVENT( + restore_cir, + + TP_PROTO(u32 cir), + + TP_ARGS(cir), + + TP_STRUCT__entry( + __field( u32, cir ) + ), + + TP_fast_assign( + __entry->cir = cir; + ), + + TP_printk("cir 0x%x\n", __entry->cir) +); + +TRACE_EVENT( + complete_intc_info_io_read, + + TP_PROTO(unsigned long gpa, unsigned long data), + + TP_ARGS(gpa, data), + + TP_STRUCT__entry( + __field( unsigned long, gpa ) + __field( unsigned long, data ) + ), + + TP_fast_assign( + __entry->gpa = gpa; + __entry->data = data; + ), + + TP_printk("gpa 0x%lx, data 0x%lx\n", __entry->gpa, __entry->data) +); + +TRACE_EVENT( + complete_intc_info_io_write, + + TP_PROTO(unsigned long gpa, unsigned long data), + + TP_ARGS(gpa, data), + + TP_STRUCT__entry( + __field( unsigned long, gpa ) + __field( unsigned long, data ) + ), + + TP_fast_assign( + __entry->gpa = gpa; + __entry->data = data; + ), + + TP_printk("gpa 0x%lx, data 0x%lx\n", __entry->gpa, __entry->data) +); + +#endif /* _TRACE_KVM_HV_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/e2k/include/asm/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace_kvm_hv +#include diff --git a/arch/e2k/include/asm/kvm/trace_kvm_pv.h b/arch/e2k/include/asm/kvm/trace_kvm_pv.h new file mode 100644 index 0000000..44da459 --- /dev/null +++ b/arch/e2k/include/asm/kvm/trace_kvm_pv.h @@ -0,0 +1,207 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm_pv + +#if !defined(_TRACE_KVM_PV_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_PV_H + +#include +#include +#include +#include +#include + +#define E2K_TC_TYPE_STORE (1ULL << 17) +#define E2K_TC_TYPE_S_F (1ULL << 19) +#define E2K_TC_TYPE_ROOT (1ULL << 27) +#define E2K_TC_TYPE_SCAL (1ULL << 28) +#define E2K_TC_TYPE_SRU (1ULL << 29) +#define E2K_TC_TYPE_SPEC (1ULL << 30) +#define E2K_TC_TYPE_PM (1ULL << 31) +#define E2K_TC_TYPE_NUM_ALIGN (1ULL << 50) +#define E2K_TC_TYPE_EMPT (1ULL << 51) +#define E2K_TC_TYPE_CLW (1ULL << 52) + +#define E2K_TC_TYPE (E2K_TC_TYPE_STORE | E2K_TC_TYPE_S_F | E2K_TC_TYPE_ROOT | \ + E2K_TC_TYPE_SCAL | E2K_TC_TYPE_SRU | E2K_TC_TYPE_SPEC | \ + E2K_TC_TYPE_PM | E2K_TC_TYPE_NUM_ALIGN | \ + E2K_TC_TYPE_EMPT | E2K_TC_TYPE_CLW) + +#define E2K_FAULT_TYPE_GLOBAL_SP (1ULL << 0) +#define E2K_FAULT_TYPE_PAGE_BOUND (1ULL << 1) +#define E2K_FAULT_TYPE_EXC_MEM_LOCK (1ULL << 2) +#define E2K_FAULT_TYPE_PH_PR_PAGE (1ULL << 3) +#define E2K_FAULT_TYPE_IO_PAGE (1ULL << 4) +#define E2K_FAULT_TYPE_ISYS_PAGE (1ULL << 5) +#define E2K_FAULT_TYPE_PROT_PAGE (1ULL << 6) +#define E2K_FAULT_TYPE_PRIV_PAGE (1ULL << 7) +#define E2K_FAULT_TYPE_ILLEGAL_PAGE (1ULL << 8) +#define E2K_FAULT_TYPE_NWRITE_PAGE (1ULL << 9) +#define E2K_FAULT_TYPE_PAGE_MISS (1ULL << 10) +#define E2K_FAULT_TYPE_PH_BOUND (1ULL << 11) +#define E2K_FAULT_TYPE_INTL_RES_BITS (1ULL << 12) + +TRACE_EVENT( + intc_trap_cellar, + + TP_PROTO(const trap_cellar_t *tc, int nr), + + TP_ARGS(tc, nr), + + TP_STRUCT__entry( + __field( int, nr ) + __field( u64, address ) + __field( u64, data_val ) + __field( u64, data_ext_val ) + __field( u8, data_tag ) + __field( u8, data_ext_tag ) + __field( u64, condition ) + __field( u64, mask ) + ), + + TP_fast_assign( + __entry->nr = nr; + __entry->address = tc->address; + load_value_and_tagd(&tc->data, + &__entry->data_val, &__entry->data_tag); + load_value_and_tagd(&tc->data_ext, + &__entry->data_ext_val, &__entry->data_ext_tag); + __entry->condition = AW(tc->condition); + __entry->mask = AW(tc->mask); + ), + + TP_printk("\n" + "Entry %d: address 0x%llx data %hhx 0x%llx data_ext %hhx 0x%llx\n" + "Register: address=0x%02hhx, vl=%d, vr=%d\n" + "Opcode: fmt=%d, n_prot=%d, fmtc=%d\n" + "Info1: chan=%d, mas=0x%02hhx, miss_lvl=%d, rcv=%d, dst_rcv=0x%03x\n" + "Info2: %s\n" + "Ftype: %s" + , + __entry->nr, __entry->address, __entry->data_tag, + __entry->data_val, __entry->data_ext_tag, __entry->data_ext_val, + AS((tc_cond_t) __entry->condition).address, + AS((tc_cond_t) __entry->condition).vl, + AS((tc_cond_t) __entry->condition).vr, + AS((tc_cond_t) __entry->condition).fmt, + AS((tc_cond_t) __entry->condition).npsp, + AS((tc_cond_t) __entry->condition).fmtc, + AS((tc_cond_t) __entry->condition).chan, + AS((tc_cond_t) __entry->condition).mas, + AS((tc_cond_t) __entry->condition).miss_lvl, + AS((tc_cond_t) __entry->condition).rcv, + AS((tc_cond_t) __entry->condition).dst_rcv, + __print_flags(__entry->condition & E2K_TC_TYPE, "|", + { E2K_TC_TYPE_STORE, "store" }, + { E2K_TC_TYPE_S_F, "s_f" }, + { E2K_TC_TYPE_ROOT, "root" }, + { E2K_TC_TYPE_SCAL, "scal" }, + { E2K_TC_TYPE_SRU, "sru" }, + { E2K_TC_TYPE_SPEC, "spec" }, + { E2K_TC_TYPE_PM, "pm" }, + { E2K_TC_TYPE_NUM_ALIGN, "num_align" }, + { E2K_TC_TYPE_EMPT, "empt" }, + { E2K_TC_TYPE_CLW, "clw" } + ), + __print_flags(AS((tc_cond_t) __entry->condition).fault_type, "|", + { E2K_FAULT_TYPE_GLOBAL_SP, "global_sp" }, + { E2K_FAULT_TYPE_PAGE_BOUND, "page_bound" }, + { E2K_FAULT_TYPE_EXC_MEM_LOCK, "exc_mem_lock" }, + { E2K_FAULT_TYPE_PH_PR_PAGE, "ph_pr_page" }, + { E2K_FAULT_TYPE_IO_PAGE, "io_page" }, + { E2K_FAULT_TYPE_ISYS_PAGE, "isys_page" }, + { E2K_FAULT_TYPE_PROT_PAGE, "prot_page" }, + { E2K_FAULT_TYPE_PRIV_PAGE, "priv_page" }, + { E2K_FAULT_TYPE_ILLEGAL_PAGE, "illegal_page" }, + { E2K_FAULT_TYPE_NWRITE_PAGE, "nwrite_page" }, + { E2K_FAULT_TYPE_PAGE_MISS, "page_miss" }, + { E2K_FAULT_TYPE_PH_BOUND, "ph_bound" }, + { E2K_FAULT_TYPE_INTL_RES_BITS, "intl_res_bits" } + )) +); + +#define kvm_trace_pv_symbol_inject_caller \ + { FROM_HOST_INJECT, "From host" }, \ + { FROM_PV_VCPU_TRAP_INJECT, "From VCPU trap" }, \ + { FROM_PV_VCPU_SYSCALL_INJECT, "From VCPU syscall" } + +TRACE_EVENT( + pv_injection, + + TP_PROTO(inject_caller_t from, const e2k_stacks_t *stacks, const e2k_mem_crs_t *crs, + int traps_num, int syscall_num), + + TP_ARGS(from, stacks, crs, traps_num, syscall_num), + + TP_STRUCT__entry( + __field( int, from ) + /* Stacks */ + __field( unsigned long, u_top ) + __field( u64, u_usd_lo ) + __field( u64, u_usd_hi ) + __field( unsigned long, top ) + __field( u64, usd_lo ) + __field( u64, usd_hi ) + __field( u64, psp_lo ) + __field( u64, psp_hi ) + __field( u64, pcsp_lo ) + __field( u64, pcsp_hi ) + __field( u64, pshtp ) + __field( unsigned int, pcshtp ) + /* CRs */ + __field( u64, cr0_lo ) + __field( u64, cr0_hi ) + __field( u64, cr1_lo ) + __field( u64, cr1_hi ) + /* Recursion level */ + __field( int, traps_num ) + __field( int, syscall_num ) + ), + + TP_fast_assign( + __entry->from = from; + __entry->u_top = stacks->u_top; + __entry->u_usd_lo = AW(stacks->u_usd_lo); + __entry->u_usd_hi = AW(stacks->u_usd_hi); + __entry->top = stacks->top; + __entry->usd_lo = AW(stacks->usd_lo); + __entry->usd_hi = AW(stacks->usd_hi); + __entry->psp_lo = AW(stacks->psp_lo); + __entry->psp_hi = AW(stacks->psp_hi); + __entry->pcsp_lo = AW(stacks->pcsp_lo); + __entry->pcsp_hi = AW(stacks->pcsp_hi); + __entry->pshtp = AW(stacks->pshtp); + __entry->pcshtp = stacks->pcshtp; + __entry->cr0_lo = AW(crs->cr0_lo); + __entry->cr0_hi = AW(crs->cr0_hi); + __entry->cr1_lo = AW(crs->cr1_lo); + __entry->cr1_hi = AW(crs->cr1_hi); + __entry->traps_num = traps_num; + __entry->syscall_num = syscall_num; + ), + + TP_printk("\n" + "%s. traps_num %d, syscall_num %d. Stacks:\n" + "u_top 0x%lx, u_usd_lo 0x%llx, u_usd_hi 0x%llx\n" + "top 0x%lx, usd_lo 0x%llx, usd_hi 0x%llx\n" + "psp_lo 0x%llx, psp_hi 0x%llx, pcsp_lo 0x%llx, pcsp_hi 0x%llx\n" + "pshtp 0x%llx, pcshtp 0x%x\n" + "cr0_lo 0x%llx, cr0_hi 0x%llx, cr1_lo 0x%llx, cr1_hi 0x%llx\n" + , + __print_symbolic(__entry->from, kvm_trace_pv_symbol_inject_caller), + __entry->traps_num, __entry->syscall_num, + __entry->u_top, __entry->u_usd_lo, __entry->u_usd_hi, + __entry->top, __entry->usd_lo, __entry->usd_hi, + __entry->psp_lo, __entry->psp_hi, __entry->pcsp_lo, __entry->pcsp_hi, + __entry->pshtp, __entry->pcshtp, + __entry->cr0_lo, __entry->cr0_hi, __entry->cr1_lo, __entry->cr1_hi) + +); + +#endif /* _TRACE_KVM_PV_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/e2k/include/asm/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace_kvm_pv +#include diff --git a/arch/e2k/include/asm/kvm/trap_table.S.h b/arch/e2k/include/asm/kvm/trap_table.S.h new file mode 100644 index 0000000..a20e1ae --- /dev/null +++ b/arch/e2k/include/asm/kvm/trap_table.S.h @@ -0,0 +1,549 @@ +/* + * + * Copyright (C) 2015 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_KVM_TRAP_TABLE_ASM_H +#define _E2K_KVM_TRAP_TABLE_ASM_H + +#ifdef __ASSEMBLY__ + +#include +#include +#include +#include +#include + +#include + +#if defined CONFIG_SMP +# define SMP_ONLY(...) __VA_ARGS__ +#else +# define SMP_ONLY(...) +#endif + +#ifdef CONFIG_KVM_HOST_MODE +/* it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ + +.macro DO_SWITCH_TO_KERNEL_IMAGE_PGD rti, predN, rtmp0, rtmp1, rtmp2 + /* do switch to host kernel image */ + /* thread_info_t *ti = rti */ + /* %pred_no is true to switch to kernel image pgd */ + /* if guest and host kernel images are loaded to equal */ + /* addresses, then switch from one to another must */ + /* flush all caches, including TLB */ + /* if (%pred_no) { */ + /* native_flush_TLB_all(); */ + /* native_flush_CACHE_all(); */ + /* E2K_WAIT_FLUSH; */ + /* } */ +{ + ldd [\rti + TI_KERNEL_IMAGE_PGD_P], \rtmp1 ? \predN; + ldd [\rti + TI_KERNEL_IMAGE_PGD], \rtmp2 ? \predN; + addd 0, 0, \rtmp0 ? \predN; +} +{ + std,2 \rtmp0, [_FLUSH_TLB_ALL_OP] MAS_TLB_FLUSH ? \predN; + std \rtmp2, [\rtmp1] ? \predN; +} +{ + std,2 \rtmp0, [_FLUSH_CACHE_L12_OP] MAS_CACHE_FLUSH ? \predN; +} + wait fl_c = 1; +.endm /* DO_SWITCH_TO_KERNEL_IMAGE_PGD */ + +.macro SWITCH_TO_KERNEL_IMAGE_PGD rti predN rtmp0, rtmp1, rtmp2 + /* switch to host kernel image */ + /* thread_info_t *ti = rti; */ + /* if (ti->flags & _TIF_PARAVIRT_GUEST) { */ + /* *ti->kernel_image_pgd_p = ti->kernel_image_pgd; */ + /* native_flush_TLB_all(); */ + /* native_flush_CACHE_all(); */ + /* E2K_WAIT_FLUSH; */ + /* } */ + /* rti: current_thread_info */ + ldd [\rti + TI_FLAGS], \rtmp0; + cmpandedb \rtmp0, _TIF_PARAVIRT_GUEST, \predN; +{ + pass \predN, @p0; + landp ~@p0, ~@p0, @p4; + pass @p4, \predN; +} + DO_SWITCH_TO_KERNEL_IMAGE_PGD \rti, \predN, \rtmp0, \rtmp1, \rtmp2 +.endm + +/* + * goto guest kernel system call table entry, if system call is from guest user + * rti: register of current_thread_info() + * rtmp0 rtmp1 rtmp2: temporary registers + * ptmp0 ptmp1 ptmp2: temporary predicates + */ +.macro GOTO_GUEST_KERNEL_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 ptmp2 + /* thread_info_t *ti = %dr7 */ + /* e2k_cr1_lo_t cr1_lo = NATIVE_READ_CR1_LO_REG(); */ + /* */ + /* if ((ti->flags & _TIF_VIRTUALIZED_GUEST)) && { */ + /* !(cr1_lo.CR1_lo_psr & PSR_PM)) { */ + /* if (ti->flags & _TIF_PARAVIRT_GUEST) { */ + /* DO_SWITCH_TO_KERNEL_IMAGE_PGD() */ + /* } */ + /* goto goto_guest_kernel_ttable_C( */ + /* sys_num << 32 | entry, */ + /* arg1, arg2, arg3, arg4, */ + /* arg5, arg6); */ + /* } */ + +.global goto_guest_kernel_ttable_C; +{ + rrd %cr1.lo, \rtmp1; + ldd [\rti + TI_FLAGS], \rtmp0; + sxt 2, %r0, %dr0; +} +{ + cmpandedb \rtmp1, CR1_LO_PSR_PM_MASK, \ptmp1; + cmpandedb \rtmp0, _TIF_VIRTUALIZED_GUEST, \ptmp0; + cmpandedb \rtmp0, _TIF_PARAVIRT_GUEST, \ptmp2; +} +{ + pass \ptmp0, @p0; + pass \ptmp1, @p1; + landp ~@p0, @p1, @p4; + pass @p4, \ptmp0; + pass \ptmp2, @p2; + landp ~@p2, ~@p2, @p5; + pass @p5, \ptmp1; +} +{ + puttagd %dr1, 0, %dr1 ? \ptmp0; + puttagd %dr2, 0, %dr2 ? \ptmp0; + shld %dr0, 32, %dr0 ? \ptmp0; +} +{ + SMP_ONLY(shld,1 GCPUID, 3, GCPUOFFSET ? \ptmp0) + puttagd %dr3, 0, %dr3 ? \ptmp0; + puttagd %dr4, 0, %dr4 ? \ptmp0; + ord %dr0, \entry_num, %dr0 ? \ptmp0; +} +{ + puttagd %dr5, 0, %dr5 ? \ptmp0; + puttagd %dr6, 0, %dr6 ? \ptmp0; +} + /* rti: current_thread_info */ + /* ptmp1 : predicate is true if need switch kernel image pgd */ + /* rtmp0, rtmp1, rtmp2: temporary registers */ + DO_SWITCH_TO_KERNEL_IMAGE_PGD \rti, \ptmp1, \rtmp0, \rtmp1, \rtmp2 +{ + SMP_ONLY(ldd,2 [ __per_cpu_offset + GCPUOFFSET ], GCPUOFFSET ? \ptmp0) + puttagd %dr7, 0, %dr7 ? \ptmp0; + ibranch goto_guest_kernel_ttable_C ? \ptmp0; +} +.endm /* GOTO_GUEST_KERNEL_TTABLE */ + +/* + * goto guest kernel system call table entry, if system call is from guest user + * rti: register of current_thread_info() + * rtmp0 rtmp1 rtmp2: temporary registers + * ptmp0 ptmp1: temporary predicates + */ +.macro GOTO_PV_VCPU_KERNEL_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 + /* thread_info_t *ti = %dr7 */ + /* e2k_cr1_lo_t cr1_lo = NATIVE_READ_CR1_LO_REG(); */ + /* */ + /* if ((ti->flags & TIF_HOST_AT_VCPU_MODE)) { */ + /* if (ti->flags & _TIF_PARAVIRT_GUEST) { */ + /* DO_SWITCH_TO_KERNEL_IMAGE_PGD() */ + /* } */ + /* goto goto_guest_kernel_ttable_C( */ + /* sys_num << 32 | entry, */ + /* arg1, arg2, arg3, arg4, */ + /* arg5, arg6); */ + /* } */ + +{ + ldd [\rti + TI_FLAGS], \rtmp0; + sxt 2, %r0, %dr0; +} +{ + cmpandedb \rtmp0, _TIF_HOST_AT_VCPU_MODE, \ptmp0; + cmpandedb \rtmp0, _TIF_PARAVIRT_GUEST, \ptmp1; +} +{ + pass \ptmp0, @p0; + landp ~@p0, ~@p0, @p4; + pass @p4, \ptmp0; + pass \ptmp1, @p2; + landp ~@p2, ~@p2, @p5; + pass @p5, \ptmp1; +} + /* rti: current_thread_info */ + /* ptmp1 : predicate is true if need switch kernel image pgd */ + /* rtmp0, rtmp1, rtmp2: temporary registers */ + DO_SWITCH_TO_KERNEL_IMAGE_PGD \rti, \ptmp1, \rtmp0, \rtmp1, \rtmp2 +.endm /* GOTO_GUEST_KERNEL_TTABLE */ + +/* + * goto guest kernel fast system call table entry, if system call is + * from guest user + * rtmpti: temporary register to read current_thread_info() + * rtmp0 rtmp1 rtmp2: temporary registers + * ptmp0 ptmp1 ptmp2: temporary predicates + */ +.macro GOTO_GUEST_KERNEL_FAST_TTABLE entry_num rtmpti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 ptmp2 +{ + setwd wsz = 8, nfx = 1; + nop 1; + rrd %osr0, \rtmpti; + ipd 2; +} + GOTO_GUEST_KERNEL_TTABLE \entry_num, \rtmpti, \rtmp0, \rtmp1, \rtmp2, \ + \ptmp0, \ptmp1, \ptmp2 +.endm /* GOTO_GUEST_KERNEL_FAST_TTABLE */ + +/* + * goto guest kernel protected system call table entry, if system call is + * from guest user + * rti: register of current_thread_info() + * rtmp0 rtmp1 rtmp2: temporary registers + * ptmp0 ptmp1 ptmp2: temporary predicates + * FIXME: is not implemented + */ +.macro GOTO_GUEST_KERNEL_PROT_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 ptmp2 + GOTO_GUEST_KERNEL_TTABLE \entry_num, \rti, \rtmp0, \rtmp1, \rtmp2, \ + \ptmp0, \ptmp1, \ptmp2 +.endm /* GOTO_GUEST_KERNEL_PROT_TTABLE */ + +#else /* ! CONFIG_KVM_HOST_MODE */ +.macro SWITCH_TO_KERNEL_IMAGE_PGD rti predN rtmp0, rtmp1, rtmp2 + /* not used */ +.endm /* SWITCH_TO_KERNEL_IMAGE_PGD */ + +.macro GOTO_GUEST_KERNEL_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 ptmp2 + /* not used */ +.endm /* GOTO_GUEST_KERNEL_TTABLE */ +.macro GOTO_GUEST_KERNEL_FAST_TTABLE entry_num rtmpti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 ptmp2 + /* not used */ +.endm /* GOTO_GUEST_KERNEL_FAST_TTABLE */ +.macro GOTO_GUEST_KERNEL_PROT_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 ptmp2 + /* not used */ +.endm /* GOTO_GUEST_KERNEL_PROT_TTABLE */ +.macro GOTO_PV_VCPU_KERNEL_TTABLE entry_num rti rtmp0 rtmp1 rtmp2 \ + ptmp0 ptmp1 + /* not used */ +.endm /* GOTO_PV_VCPU_KERNEL_TTABLE */ +#endif /* CONFIG_KVM_HOST_MODE */ + +/* + * Save current state of pair of global registers with tags and extensions + * gpair_lo/gpair_hi is pair of adjacent global registers, lo is even + * and hi is odd (for example GCURTI/GCURTASK) + * kreg_lo, kreg_hi is pair of indexes of global registers into structure + * to save these k_gregs.g[kregd_lo/kreg_hi] + * rbase is register containing base address to save global + * registers pair values (for example glob_regs_t structure + * or thread_info_t thread_info->k_gregs/h_gregs) + * predSAVE conditional save on this predicate + * rtmp0/rtmp1 two temporary registers (for example %dr20, %dr21) + */ + +.macro SAVE_GREGS_PAIR_COND_V2 gpair_lo, gpair_hi, kreg_lo, kreg_hi, rbase, \ + predSAVE, rtmp0, rtmp1 +{ + strd,2 %dg\gpair_lo, [\rbase + (TAGGED_MEM_STORE_REC_OPC + \ + \kreg_lo * GLOB_REG_SIZE + \ + GLOB_REG_BASE)] ? \predSAVE; + strd,5 %dg\gpair_hi, [\rbase + (TAGGED_MEM_STORE_REC_OPC + \ + \kreg_hi * GLOB_REG_SIZE + \ + GLOB_REG_BASE)] ? \predSAVE; + movfi,1 %dg\gpair_lo, \rtmp0 ? \predSAVE; + movfi,4 %dg\gpair_hi, \rtmp1 ? \predSAVE; +} +{ + sth \rtmp0, [\rbase + (\kreg_lo * GLOB_REG_SIZE + \ + GLOB_REG_EXT)] ? \predSAVE; + sth \rtmp1, [\rbase + (\kreg_hi * GLOB_REG_SIZE + \ + GLOB_REG_EXT)] ? \predSAVE; +} +.endm /* SAVE_GREGS_PAIR_COND_V2 */ + +/* Bug 116851 - all strqp must be speculative if dealing with tags */ +.macro SAVE_GREGS_PAIR_COND_V5 gpair_lo, gpair_hi, kreg_lo, kreg_hi, rbase, \ + predSAVE +{ + strqp,2,sm %dg\gpair_lo, [\rbase + (TAGGED_MEM_STORE_REC_OPC + \ + \kreg_lo * GLOB_REG_SIZE + \ + GLOB_REG_BASE)] ? \predSAVE; + strqp,5,sm %dg\gpair_hi, [\rbase + (TAGGED_MEM_STORE_REC_OPC + \ + \kreg_hi * GLOB_REG_SIZE + \ + GLOB_REG_BASE)] ? \predSAVE; +} +.endm /* SAVE_GREGS_PAIR_COND_V5 */ + +.macro SAVE_GREG_UNEXT greg, kreg, rbase + strqp,sm \greg, [\rbase + (TAGGED_MEM_STORE_REC_OPC + \ + \kreg * GLOB_REG_SIZE + \ + GLOB_REG_BASE)]; +.endm /* SAVE_GREG_UNEXT */ + +.macro SAVE_GREGS_PAIR_UNEXT greg1, greg2, kreg1, kreg2, rbase +{ + SAVE_GREG_UNEXT \greg1, kreg1, rbase + SAVE_GREG_UNEXT \greg2, kreg2, rbase +} +.endm /* SAVE_GREGS_PAIR_UNEXT */ + +.macro ASM_SET_KERNEL_GREGS_PAIR gpair_lo, gpair_hi, rval_lo, rval_hi +{ + addd \rval_lo, 0, %dg\gpair_lo; + addd \rval_hi, 0, %dg\gpair_hi; +} +.endm /* ASM_SET_CURRENTS_GREGS_PAIR */ + +.macro DO_ASM_SET_KERNEL_GREGS_PAIR gpair_lo, gpair_hi, rval_lo, rval_hi + ASM_SET_KERNEL_GREGS_PAIR \gpair_lo, \gpair_hi, \ + \rval_lo, \rval_hi +.endm /* DO_ASM_SET_KERNEL_GREGS_PAIR */ + +.macro SET_KERNEL_GREGS runused, rtask, rpercpu_off, rcpu + DO_ASM_SET_KERNEL_GREGS_PAIR \ + GUEST_VCPU_STATE_GREG, CURRENT_TASK_GREG, \ + \runused, \rtask + DO_ASM_SET_KERNEL_GREGS_PAIR \ + MY_CPU_OFFSET_GREG, SMP_CPU_ID_GREG, \ + \rpercpu_off, \rcpu +.endm /* SET_KERNEL_GREGS */ + +.macro ONLY_SET_KERNEL_GREGS runused, rtask, rpercpu_off, rcpu + SET_KERNEL_GREGS \runused, \rtask, \rpercpu_off, \rcpu +.endm /* ONLY_SET_KERNEL_GREGS */ + +#ifdef CONFIG_KVM_HOST_MODE +/* it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +.macro NEED_SAVE_CUR_AND_VCPU_STATE_GREGS drti, predV5, \ + drtmp0, drtmp1, predtmp, \ + predCUR, predVCPU, predEXTk + /* + * drti - pointer to thread_info + * predV5 - ISET is V5 + * predCUR - is now set to true (trap from user) and can be updated + * to does not save kernel global regs and set current + * Trap at host mode and host kernel currents and other global registers + * (GCURTI & GCURTASK & CPU_ID & CPU_OFF) + * should not be saved to not invalidate guest kernel or user state of + * global registers, which were or will be saved at thread info + * %predVCPU - save VCPU state pointer regs + * predEXTk - need save kernel (predCUR) & need save extention (!predV5) + * + * predCUR = test_thread_flag(TIF_HOST_AT_VCPU_MODE) && + * !test_thread_flag(TIF_LIGHT_HYPERCALL) || + * !test_thread_flag(TIF_HOST_AT_VCPU_MODE) && + * (cr0_hi.CR0_hi_IP >= NATIVE_TASK_SIZE) + * predVCPU = predCUR; + * predEXTk = predCUR & !predV5 + */ + { + rrd %cr0.hi, \drtmp0; /* %drtmp0: cr0_hi.IP */ + ldd [\drti + TI_FLAGS], \drtmp1; /* %drtmp1: ti->flags */ + } + { + cmpbdb \drtmp0, NATIVE_TASK_SIZE, \predtmp; + cmpandedb \drtmp1, _TIF_LIGHT_HYPERCALL, \predCUR; + cmpandedb \drtmp1, _TIF_HOST_AT_VCPU_MODE, \predVCPU; + } + { + nop 1; + pass \predtmp, @p2; + pass \predCUR, @p0; + pass \predVCPU, @p1; + landp @p0, ~@p1, @p4; + pass @p4, \predCUR; + } + { + nop 1; + pass \predVCPU, @p0; + pass \predCUR, @p2; + pass \predtmp, @p1; + landp @p0, ~@p1, @p4; + landp ~@p2, ~@p4, @p5; + landp ~@p2, ~@p4, @p6; + pass @p5, \predCUR; + pass @p6, \predVCPU; + } + { + pass \predV5, @p0; + pass \predCUR, @p1; + landp ~@p0, @p1, @p4; + pass @p4, \predEXTk; + } +.endm /* NEED_SAVE_CUR_AND_VCPU_STATE_GREGS */ + +.macro DO_SAVE_HOST_GREGS_V2 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \ + drti, predSAVE, drtmp, rtmp0, rtmp1 + /* drtmp: thread_info->h_gregs.g */ + addd \drti, TI_HOST_GREGS_TO_VIRT, \drtmp ? \predSAVE; + SAVE_GREGS_PAIR_COND_V2 \gvcpu_lo, \gvcpu_hi, \hvcpu_lo, \hvcpu_hi, \ + \drtmp, /* thread_info->h_gregs.g base address */ \ + \predSAVE, \ + \rtmp0, \rtmp1 +.endm /* DO_SAVE_HOST_GREGS_V2 */ + +.macro DO_SAVE_HOST_GREGS_V5 gvcpu_lo, gvcpu_hi, hvcpu_lo, hvcpu_hi \ + drti, predSAVE, drtmp + /* drtmp: thread_info->h_gregs.g */ + addd \drti, TI_HOST_GREGS_TO_VIRT, \drtmp ? \predSAVE; + SAVE_GREGS_PAIR_COND_V5 \gvcpu_lo, \gvcpu_hi, \hvcpu_lo, \hvcpu_hi, \ + \drtmp, /* thread_info->h_gregs.g base address */ \ + \predSAVE +.endm /* DO_SAVE_HOST_GREGS_V5 */ + +.macro SAVE_HOST_GREGS_V2 drti, predSAVE, drtmp, rtmp0, rtmp1 + DO_SAVE_HOST_GREGS_V2 \ + GUEST_VCPU_STATE_GREG, GUEST_VCPU_STATE_UNUSED_GREG, \ + VCPU_STATE_GREGS_PAIRS_INDEX, VCPU_STATE_GREGS_PAIRS_HI_INDEX, \ + \drti, \predSAVE, \ + \drtmp, \rtmp0, \rtmp1 +.endm /* SAVE_HOST_GREGS_V2 */ + +.macro SAVE_HOST_GREGS_V5 drti, predSAVE, drtmp + DO_SAVE_HOST_GREGS_V5 \ + GUEST_VCPU_STATE_GREG, GUEST_VCPU_STATE_UNUSED_GREG, \ + VCPU_STATE_GREGS_PAIRS_INDEX, VCPU_STATE_GREGS_PAIRS_HI_INDEX, \ + \drti, \predSAVE, \ + \drtmp, +.endm /* SAVE_HOST_GREGS_V5 */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1 + SAVE_HOST_GREGS_V2 \drti, \predSAVE, \drtmp, \rtmp0, \rtmp1 +.endm /* SAVE_HOST_GREGS_TO_VIRT_V2 */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V5 drti, predSAVE, drtmp + SAVE_HOST_GREGS_V5 \drti, \predSAVE, \drtmp +.endm /* SAVE_HOST_GREGS_TO_VIRT_V5 */ + +.macro SAVE_HOST_GREGS_TO_VIRT_UNEXT drti, drtmp + /* not used */ +.endm /* SAVE_HOST_GREGS_TO_VIRT_UNEXT */ + +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_KVM_HOST_MODE && ! CONFIG_KVM_GUEST_KERNEL */ +/* It is native host kernel without any virtualization */ +.macro NEED_SAVE_CUR_AND_VCPU_STATE_GREGS drti, predV5, \ + drtmp0, drtmp1, predtmp, \ + predCUR, predVCPU, predEXTk + /* + * drti - pointer to thread_info (unused) + * predV5 - ISET is V5 + * predCUR - save kernel global regs and set current (already + * calculated, don't update) + * %predVCPU - set to false (none any VCPUs) + * predEXTk - need save kernel (predCUR) & need save extention (!predV5) + */ + { + pass \predV5, @p0; + pass \predCUR, @p1; + landp ~@p0, @p1, @p4; + landp ~@p1, @p1, @p5; + pass @p4, \predEXTk; + pass @p5, \predVCPU; + } +.endm /* NEED_SAVE_CUR_AND_VCPU_STATE_GREGS */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V2 drti, predSAVE, drtmp, rtmp0, rtmp1 + /* not used */ +.endm /* SAVE_VCPU_STATE_GREGS */ + +.macro SAVE_HOST_GREGS_TO_VIRT_V5 drti, predSAVE, drtmp + /* not used */ +.endm /* SAVE_GREGS_TO_VIRT */ + +.macro SAVE_HOST_GREGS_TO_VIRT_UNEXT drti, drtmp + /* not used */ +.endm /* SAVE_HOST_GREGS_TO_VIRT_UNEXT */ + +#endif /* CONFIG_KVM_HOST_MODE */ + +/* + * same as C function above but as result should be set %pred28 + * argument sys_num_reg_no is assembler register number containing # of + * system call (for example %r0) + * %pred28 == 1 if system call is generic and doesn use pt_regs structure + */ +.macro IS_SYS_CALL_GENERIC sys_num_reg_no + cmpesb sys_num_reg_no, __NR_clone, %pred0; + cmpesb sys_num_reg_no, __NR_clone_thread, %pred1; + cmpesb sys_num_reg_no, __NR_fork, %pred2; + cmpesb sys_num_reg_no, __NR_vfork, %pred3; +{ + pass %pred0, @p0; + pass %pred1, @p1; + pass %pred2, @p2; + pass %pred3, @p3; + andp ~@p0, ~@p1, @p4; + andp ~@p2, ~@p3, @p5; + andp @p4, @p5, @p6; + pass @p6, %pred27; +} + cmpesb sys_num_reg_no, __NR_execve, %pred4; + cmpesb sys_num_reg_no, __NR_rt_sigreturn, %pred5; + cmpesb sys_num_reg_no, __NR_e2k_sigsetjmp, %pred6; + cmpesb sys_num_reg_no, __NR_e2k_longjmp, %pred7; +{ + pass %pred4, @p0; + pass %pred5, @p1; + pass %pred6, @p2; + pass %pred7, @p3; + andp ~@p0, ~@p1, @p4; + andp ~@p2, ~@p3, @p5; + andp @p4, @p5, @p6; + pass @p6, %pred30; +} + cmpesb sys_num_reg_no, __NR_e2k_longjmp2, %pred8; + cmpesb sys_num_reg_no, __NR_sigaltstack, %pred9; + cmpesb sys_num_reg_no, __NR_rt_sigaction, %pred10; + cmpesb sys_num_reg_no, __NR_rt_sigsuspend, %pred11; +{ + pass %pred8, @p0; + pass %pred9, @p1; + pass %pred10, @p2; + pass %pred11, @p3; + andp ~@p0, ~@p1, @p4; + andp ~@p2, ~@p3, @p5; + andp @p4, @p5, @p6; + pass @p6, %pred29; +} +{ + nop 2; + pass %pred27, @p0; + pass %pred30, @p1; + pass %pred29, @p2; + andp @p0, @p1, @p4; + andp @p2, @p4, @p5; + pass @p5, %pred28; +} + cmpesb sys_num_reg_no, __NR_ioctl, %pred13; + cmpesb sys_num_reg_no, __NR_ipc, %pred14; +{ + pass %pred13, @p0; + pass %pred14, @p1; + pass %pred28, @p2; + andp ~@p0, ~@p1, @p4; + andp @p2, @p4, @p5; + pass @p5, %pred28; +} +.endm /* IS_SYS_CALL_GENERIC */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _E2K_KVM_TRAP_TABLE_ASM_H */ diff --git a/arch/e2k/include/asm/kvm/trap_table.h b/arch/e2k/include/asm/kvm/trap_table.h new file mode 100644 index 0000000..efb0548 --- /dev/null +++ b/arch/e2k/include/asm/kvm/trap_table.h @@ -0,0 +1,848 @@ +#ifndef __KVM_E2K_TRAP_TABLE_H +#define __KVM_E2K_TRAP_TABLE_H + +/* Does not include this header directly, include */ + +#ifndef __ASSEMBLY__ + +#include +#include + +#include +#include +#include +#include +#include + +#undef DEBUG_KVM_GUEST_TRAPS_MODE +#undef DebugKVMGT +#define DEBUG_KVM_GUEST_TRAPS_MODE 0 /* KVM guest trap debugging */ +#define DebugKVMGT(fmt, args...) \ +({ \ + if (DEBUG_KVM_GUEST_TRAPS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_KVM_VERBOSE_GUEST_TRAPS_MODE +#undef DebugKVMVGT +#define DEBUG_KVM_VERBOSE_GUEST_TRAPS_MODE 0 /* KVM verbose guest */ + /* trap debugging */ +#define DebugKVMVGT(fmt, args...) \ +({ \ + if (DEBUG_KVM_VERBOSE_GUEST_TRAPS_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +# define TT_BUG_ON(cond) BUG_ON(cond) +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native guest kernel */ +# define TT_BUG_ON(cond) BUG_ON(cond) +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel or host kernel with virtualization support */ +# define TT_BUG_ON(cond) do { } while (0) +#endif /* CONFIG_PARAVIRT_GUEST */ + +/* structure of result of trap passing to guest functions */ +#define KVM_PASSED_TRAPS_MASK ((1ULL << (exc_max_num + 1)) - 1) +#define KVM_GUEST_KERNEL_ADDR_PF_BIT (exc_max_num + 1) +#define KVM_SHADOW_PT_PROT_PF_BIT (exc_max_num + 2) +#define KVM_PASS_RESULT_PF_BIT KVM_SHADOW_PT_PROT_PF_BIT +#define KVM_PASS_RESULT_PF_MASK \ + ((1ULL << (KVM_PASS_RESULT_PF_BIT + 1)) - 1) + +/* events to complete VCPU trap handling */ +#define KVM_PV_MMU_RESTORE_CONTEXT_PF_BIT (exc_max_num + 5) +#define KVM_SHADOW_NONP_PF_BIT (exc_max_num + 6) + +#define KVM_GUEST_KERNEL_ADDR_PF_MASK \ + (1ULL << KVM_GUEST_KERNEL_ADDR_PF_BIT) +#define KVM_SHADOW_PT_PROT_PF_MASK \ + (1ULL << KVM_SHADOW_PT_PROT_PF_BIT) +#define KVM_SHADOW_NONP_PF_MASK \ + (1ULL << KVM_SHADOW_NONP_PF_BIT) +#define KVM_PV_MMU_RESTORE_CONTEXT_PF_MASK \ + (1ULL << KVM_PV_MMU_RESTORE_CONTEXT_PF_BIT) + +#define KVM_NOT_GUEST_TRAP_RESULT 0ULL +#define KVM_TRAP_IS_PASSED(trap_no) (1ULL << (trap_no)) +#define KVM_GUEST_KERNEL_ADDR_PF KVM_GUEST_KERNEL_ADDR_PF_MASK +#define KVM_SHADOW_PT_PROT_PF KVM_SHADOW_PT_PROT_PF_MASK +#define KVM_SHADOW_NONP_PF KVM_SHADOW_NONP_PF_MASK +#define KVM_NEED_COMPLETE_PF_MASK \ + (KVM_PV_MMU_RESTORE_CONTEXT_PF_MASK) + +#define KVM_IS_ERROR_RESULT_PF(hret) ((long)(hret) < 0) +#define KVM_GET_PASS_RESULT_PF(hret) ((hret) & KVM_PASS_RESULT_PF_MASK) +#define KVM_IS_NOT_GUEST_TRAP(hret) \ + (KVM_GET_PASS_RESULT_PF(hret) == KVM_NOT_GUEST_TRAP_RESULT) +#define KVM_GET_PASSED_TRAPS(hret) \ + (KVM_GET_PASS_RESULT_PF(hret) & KVM_PASSED_TRAPS_MASK) +#define KVM_IS_TRAP_PASSED(hret) (KVM_GET_PASSED_TRAPS(hret) != 0) +#define KVM_IS_GUEST_KERNEL_ADDR_PF(hret) \ + (KVM_GET_PASS_RESULT_PF(hret) == KVM_GUEST_KERNEL_ADDR_PF) +#define KVM_IS_SHADOW_PT_PROT_PF(hret) \ + (KVM_GET_PASS_RESULT_PF(hret) == KVM_SHADOW_PT_PROT_PF) +#define KVM_IS_SHADOW_NONP_PF(hret) \ + ((hret) & KVM_SHADOW_NONP_PF_MASK) +#define KVM_GET_NEED_COMPLETE_PF(hret) \ + ((hret) & KVM_NEED_COMPLETE_PF_MASK) +#define KVM_IS_NEED_RESTORE_CONTEXT_PF(hret) \ + ((KVM_GET_NEED_COMPLETE_PF(hret) & \ + KVM_PV_MMU_RESTORE_CONTEXT_PF_MASK) != 0) +#define KVM_CLEAR_NEED_RESTORE_CONTEXT_PF(hret) \ + (KVM_GET_NEED_COMPLETE_PF(hret) & \ + ~KVM_PV_MMU_RESTORE_CONTEXT_PF_MASK) + +static inline unsigned int +kvm_host_is_kernel_data_stack_bounds(bool on_kernel, e2k_usd_lo_t usd_lo) +{ + return native_is_kernel_data_stack_bounds(true, usd_lo); +} + +#ifdef CONFIG_VIRTUALIZATION +/* It is native host guest kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +/* or pure guest kernel */ +static inline unsigned int +is_kernel_data_stack_bounds(bool on_kernel, e2k_usd_lo_t usd_lo) +{ + return kvm_host_is_kernel_data_stack_bounds(on_kernel, usd_lo); +} +#endif /* CONFIG_VIRTUALIZATION */ + +/* + * Hypervisor supports light hypercalls + * Lighte hypercalls does not: + * - switch to kernel stacks + * - use data stack + * - call any function wich can use stack + * So SBR does not switch to kernel stack, but we use + * SBR value to calculate user/kernel mode of trap/system call + * Light hypercals can be trapped (page fault on guest address, for example) + * In this case SBR value shows user trap mode, but trap occurs on hypervisor + * and we need know about it to do not save/restore global registers which + * used by kernel to optimaze access to current/current_thread_info() + */ + +#define CR1_LO_PSR_PM_SHIFT 57 /* privileged mode */ + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ + +#define handle_guest_traps(regs) /* none any guests */ +static __always_inline void +init_guest_traps_handling(struct pt_regs *regs, bool user_mode_trap) +{ +} +static __always_inline void +init_guest_syscalls_handling(struct pt_regs *regs) +{ +} + +static inline bool +is_guest_proc_stack_bounds(struct pt_regs *regs) +{ + return false; /* none any guest */ +} +static inline bool +is_guest_chain_stack_bounds(struct pt_regs *regs) +{ + return false; /* none any guest */ +} +static inline bool +is_guest_TIRs_frozen(struct pt_regs *regs) +{ + return false; /* none any guest */ +} +static inline bool +have_deferred_traps(struct pt_regs *regs) +{ + return native_have_deferred_traps(regs); +} + +static inline bool +handle_guest_last_wish(struct pt_regs *regs) +{ + return false; /* none any guest and any wishes from */ +} + +/* + * Following functions run on host, check if traps occurred on guest user + * or kernel, so probably should be passed to guest kernel to handle. + * None any guests when virtualization is off + */ +static inline unsigned long +pass_aau_trap_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_the_trap_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_stack_bounds_trap_to_guest(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds) +{ + return 0; +} +static inline unsigned long +pass_coredump_trap_to_guest(struct pt_regs *regs) +{ + return 0; +} +static inline unsigned long +pass_interrupt_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_nm_interrupt_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_virqs_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_clw_fault_to_guest(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return 0; +} +static inline unsigned long +pass_page_fault_to_guest(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return 0; +} +static inline void +complete_page_fault_to_guest(unsigned long what_complete) +{ +} +#else /* CONFIG_VIRTUALIZATION */ + +/* + * Special section of kernel image to be common for host and guest kernel and + * to support paravirtualized host and guest running. + * Functions at this section have fixed addressees and offsets into both images + * Any function call from this section will run host image copy of this + * function because of only host image section mapped to both image - host and + * guest. + */ +#define __to_paravirt_guest \ + __attribute__((__section__(".to_paravirt_guest"))) +#define __to_guest \ + __attribute__((__section__(".to_guest"))) + +extern char __ptext_host_start[], __ptext_host_end[]; + +/* + * KVM guest kernel trap handling support + */ + +/* results of trap handling */ +typedef enum trap_hndl { + GUEST_TRAP_IMPOSSIBLE, /* guest kernel does not support */ + /* so guest trap cannot be occured */ + GUEST_TRAP_NOT_HANDLED, /* trap on guest, but guest kernel */ + /* cannot handle the trap */ + GUEST_TRAP_HANDLED, /* guest trap was successfully */ + /* handled */ + GUEST_TRAP_FAILED, /* guest trap handling failed */ +} trap_hndl_t; + +extern trap_hndl_t kvm_do_handle_guest_traps(struct pt_regs *regs); +extern trap_hndl_t kvm_handle_guest_deferred_traps(struct pt_regs *regs); + +extern bool kvm_is_guest_TIRs_frozen(struct pt_regs *regs); +extern bool kvm_is_guest_proc_stack_bounds(struct pt_regs *regs); +extern bool kvm_is_guest_chain_stack_bounds(struct pt_regs *regs); +extern unsigned long kvm_host_aau_page_fault(struct kvm_vcpu *vcpu, + pt_regs_t *regs, + unsigned long TIR_hi, unsigned long TIR_lo); +extern unsigned long kvm_pass_the_trap_to_guest(struct kvm_vcpu *vcpu, + pt_regs_t *regs, + unsigned long TIR_hi, unsigned long TIR_lo, + int trap_no); +extern unsigned long kvm_pass_stack_bounds_trap_to_guest(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds); +extern unsigned long kvm_pass_virqs_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo); +extern unsigned long kvm_pass_coredump_trap_to_guest(struct kvm_vcpu *vcpu, + struct pt_regs *regs); +extern unsigned long kvm_pass_clw_fault_to_guest(struct pt_regs *regs, + trap_cellar_t *tcellar); +extern unsigned long kvm_pass_page_fault_to_guest(struct pt_regs *regs, + trap_cellar_t *tcellar); +extern void kvm_complete_page_fault_to_guest(unsigned long what_complete); + +extern noinline notrace int do_hret_last_wish_intc(struct kvm_vcpu *vcpu, + struct pt_regs *regs); + +extern void trap_handler_trampoline(void); +extern void syscall_handler_trampoline(void); +extern void trap_handler_trampoline_continue(void); +extern void syscall_handler_trampoline_continue(u64 sys_rval); +extern void syscall_fork_trampoline(void); +extern void syscall_fork_trampoline_continue(u64 sys_rval); +extern notrace long return_pv_vcpu_trap(void); +extern notrace long return_pv_vcpu_syscall(void); + +static __always_inline void +kvm_init_guest_traps_handling(struct pt_regs *regs, bool user_mode_trap) +{ + regs->traps_to_guest = 0; /* only for host */ + regs->is_guest_user = false; /* only for host */ + regs->deferred_traps = 0; /* for host and guest */ + regs->g_stacks_valid = false; /* only for host */ + if (user_mode_trap && + test_thread_flag(TIF_LIGHT_HYPERCALL) && + (NATIVE_NV_READ_CR1_LO_REG().CR1_lo_pm)) { + regs->flags |= LIGHT_HYPERCALL_FLAG_PT_REGS; + } +} + +static __always_inline void +kvm_init_guest_syscalls_handling(struct pt_regs *regs) +{ + regs->traps_to_guest = 0; /* only for host */ + regs->is_guest_user = true; /* only for host */ + regs->deferred_traps = 0; /* only for guest */ + regs->g_stacks_valid = false; /* only for host */ +} +static inline bool +kvm_have_guest_deferred_traps(struct pt_regs *regs) +{ + return regs->deferred_traps != 0; +} + +static inline void +kvm_exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi, + e2k_usd_lo_t usd_lo, e2k_upsr_t upsr) +{ + KVM_WRITE_UPSR_REG_VALUE(upsr.UPSR_reg); + KVM_WRITE_USD_REG(usd_hi, usd_lo); + KVM_WRITE_SBR_REG_VALUE(sbr); +} + +/* + * The function should return boolen value 'true' if the trap is wish + * of host to inject VIRQs interrupt and return 'false' if the wish is not + * from host to deal with guest + */ +static inline bool +kvm_handle_guest_last_wish(struct pt_regs *regs) +{ + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + + if (vcpu == NULL) { + /* it is not guest VCPU thread, or completed */ + return false; + } + if (vcpu->arch.virq_wish) { + /* trap is only to interrupt guest kernel on guest mode */ + /* to provide injection of pending VIRQs on guest */ + if (!vcpu->arch.virq_injected) { + vcpu->arch.virq_injected = true; + vcpu->arch.virq_wish = false; + return true; + } /* else already injected */ + } + return false; +} + +/* + * Guest kernel (same as host) does not use AAU, so if trap occurred on + * guest kernel it is error. Do not pass the trap to guest, host will handle + * thr trap and kill the guest. + * Guest user can use AAU and if trap occurred on guest user, then this trap + * need pass to handle one by guest kernel + */ +static inline bool +kvm_should_pass_aau_kernel_trap_to_guest(struct pt_regs *regs) +{ + return false; +} +static inline bool +kvm_should_pass_aau_user_trap_to_guest(struct pt_regs *regs) +{ + return true; +} + +/* + * Some traps need not pass to guest, they can be handled by host only. + */ + +#define kvm_needless_guest_exc_mask (0UL | \ + exc_interrupt_mask | \ + exc_nm_interrupt_mask | \ + exc_mem_error_mask | \ + exc_data_page_mask | \ + 0UL) +#define kvm_guest_exc_mask (exc_all_mask & \ + ~kvm_needless_guest_exc_mask) + +static inline bool +kvm_should_pass_the_trap_to_guest(struct pt_regs *regs, int trap_no) +{ + unsigned long trap_mask = (1UL << trap_no); + + if (trap_no == exc_last_wish_num) { + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + + if (vcpu->arch.is_hv) { + if (vcpu->arch.virq_wish || vcpu->arch.vm_exit_wish) { + /* it is last wish to support guest on host */ + /* do not pass to guest */ + return false; + } + } else if (vcpu->arch.is_pv) { + if (vcpu->arch.virq_wish) { + /* it is paravirtualized guest, pass trap */ + /* to guest, if it is enabled */ + ; + } else { + /* there is not any wish for guest */ + return false; + } + } else { + KVM_BUG_ON(true); + } + } + if (trap_mask & kvm_guest_exc_mask) + return true; + return false; +} +/* + * Some traps will be passed to guest, but by host handler of the trap. + */ + +#define kvm_defer_guest_exc_mask (0UL | \ + exc_data_page_mask | \ + exc_mem_lock_mask | \ + exc_ainstr_page_miss_mask | \ + exc_ainstr_page_prot_mask | \ + 0UL) +#define kvm_pv_defer_guest_exc_mask (0UL) + +static inline bool +kvm_defered_pass_the_trap_to_guest(struct pt_regs *regs, int trap_no) +{ + unsigned long trap_mask = (1UL << trap_no); + + if (trap_mask & kvm_pv_defer_guest_exc_mask) + return true; + return false; +} + +/* + * The function controls traps handling by guest kernel. + * Traps were passed to guest kernel (set TIRs and trap cellar) before + * calling the function. + * Result of function is bool 'traps were handled by guest' + * If the trap is trap of guest user and was handled by guest kernel + * (probably with fault), then the function return bool 'true' and handling + * of this trap can be completed. + * If the trap is not trap of guest user or cannot be handled by guest kernel, + * then the function return bool 'false' and handling of this trap should + * be continued by host. + * WARNING: The function can be called only on host kernel (guest cannot + * run own guests. + */ +static inline bool kvm_handle_guest_traps(struct pt_regs *regs) +{ + struct kvm_vcpu *vcpu; + int ret; + + vcpu = current_thread_info()->vcpu; + if (!due_to_guest_trap_on_pv_hv_host(vcpu, regs)) { + DebugKVMVGT("trap occurred outside of guest user and " + "kernel\n"); + return false; + } + if (regs->traps_to_guest == 0) { + DebugKVMVGT("it is recursive trap on host and can be handled " + "only by host\n"); + return false; + } + if (vcpu == NULL) { + DebugKVMVGT("it is not VCPU thread or VCPU is not yet " + "created\n"); + return false; + } + regs->flags |= GUEST_FLAG_PT_REGS; + ret = kvm_do_handle_guest_traps(regs); + regs->traps_to_guest = 0; + if (regs->deferred_traps) { + /* New traps (VIRQs interrupt) occured to pass to guest */ + ret = kvm_handle_guest_deferred_traps(regs); + regs->deferred_traps = 0; + } + regs->flags &= ~GUEST_FLAG_PT_REGS; + + if (ret == GUEST_TRAP_HANDLED) { + DebugKVMGT("the guest trap handled\n"); + return true; + } else if (ret == GUEST_TRAP_FAILED) { + DebugKVMGT("the guest trap handled, but with fault\n"); + return true; + } else if (ret == GUEST_TRAP_NOT_HANDLED) { + DebugKVMGT("guest cannot handle the guest trap\n"); + return false; + } else if (ret == GUEST_TRAP_IMPOSSIBLE) { + DebugKVMGT("it is not guest user trap\n"); + return false; + } else { + BUG_ON(true); + } + return false; +} + +static inline bool +is_guest_proc_stack_bounds(struct pt_regs *regs) +{ + if (!kvm_test_intc_emul_flag(regs)) + return false; + + return kvm_is_guest_proc_stack_bounds(regs); +} +static inline bool +is_guest_chain_stack_bounds(struct pt_regs *regs) +{ + if (!kvm_test_intc_emul_flag(regs)) + return false; + + return kvm_is_guest_chain_stack_bounds(regs); +} + +#ifndef CONFIG_KVM_GUEST_KERNEL +/* It is native host kernel with virtualization support on */ +/* or it is paravirtualized host and guest kernel */ +/* guest cannot support hypervisor mode and create own virtual machines, */ +/* so in paravirtualized mode the following functions are called only */ +/* on host mode and should not be used on guest mode */ + +#define handle_guest_traps(regs) kvm_handle_guest_traps(regs) + +static __always_inline void +init_guest_traps_handling(struct pt_regs *regs, bool user_mode_trap) +{ + kvm_init_guest_traps_handling(regs, user_mode_trap); +} +static __always_inline void +init_guest_syscalls_handling(struct pt_regs *regs) +{ + kvm_init_guest_syscalls_handling(regs); +} + +static inline bool +is_guest_TIRs_frozen(struct pt_regs *regs) +{ + if (!kvm_test_intc_emul_flag(regs)) + return false; + + return kvm_is_guest_TIRs_frozen(regs); +} +static inline bool +have_deferred_traps(struct pt_regs *regs) +{ + return kvm_have_guest_deferred_traps(regs); +} + +static inline bool +handle_guest_last_wish(struct pt_regs *regs) +{ + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + return kvm_handle_guest_last_wish(regs); +} +static inline void +kvm_host_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + struct kvm_vcpu *vcpu = current_thread_info()->vcpu; + + if (!kvm_test_intc_emul_flag(regs)) { + native_instr_page_fault(regs, ftype, async_instr); + return; + } + + kvm_pv_mmu_instr_page_fault(vcpu, regs, ftype, async_instr); +} + +static inline void +kvm_host_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const unsigned int aa_no) +{ + if (likely(!kvm_test_intc_emul_flag(regs))) { + native_do_aau_page_fault(regs, address, condition, mask, aa_no); + return; + } + + kvm_pv_mmu_aau_page_fault(current_thread_info()->vcpu, regs, + address, condition, aa_no); +} + +/* + * Following functions run on host, check if traps occurred on guest user + * or kernel, so probably sould be passed to guest kernel to handle. + * In some cases traps should be passed to guest, but need be preliminary + * handled by host (for example hardware stack bounds). + * Functions return flag or mask of traps which passed to guest and + * should not be handled by host + */ +static inline unsigned long +pass_aau_trap_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + struct kvm_vcpu *vcpu; + + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + vcpu = current_thread_info()->vcpu; + + return kvm_host_aau_page_fault(vcpu, regs, TIR_hi, TIR_lo); +} +static inline unsigned long +pass_stack_bounds_trap_to_guest(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds) +{ + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + return kvm_pass_stack_bounds_trap_to_guest(regs, + proc_bounds, chain_bounds); +} +static inline bool +pass_instr_page_fault_trap_to_guest(struct pt_regs *regs, int trap_no) +{ + if (!kvm_test_intc_emul_flag(regs)) + return false; + + return true; + +} +static inline unsigned long +pass_the_trap_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo, + int trap_no) +{ + struct kvm_vcpu *vcpu; + + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + vcpu = current_thread_info()->vcpu; + +/* + if (trap_no == exc_proc_stack_bounds_num) + return pass_stack_bounds_trap_to_guest(regs, true, false); + if (trap_no == exc_chain_stack_bounds_num) + return pass_stack_bounds_trap_to_guest(regs, false, true); + */ + + if (!kvm_should_pass_the_trap_to_guest(regs, trap_no)) { + DebugKVMVGT("trap #%d needs not handled by guest\n", + trap_no); + return 0; + } + if (trap_no == exc_instr_page_miss_num) { + tc_fault_type_t ftype; + + AW(ftype) = 0; + AS(ftype).page_miss = 1; + kvm_pv_mmu_instr_page_fault(vcpu, regs, ftype, 0); + return 1; + } + if (trap_no == exc_instr_page_prot_num) { + tc_fault_type_t ftype; + + AW(ftype) = 0; + AS(ftype).illegal_page = 1; + kvm_pv_mmu_instr_page_fault(vcpu, regs, ftype, 0); + return 1; + } + if (trap_no == exc_ainstr_page_miss_num) { + tc_fault_type_t ftype; + + AW(ftype) = 0; + AS(ftype).page_miss = 1; + kvm_pv_mmu_instr_page_fault(vcpu, regs, ftype, 1); + return 1; + } + if (trap_no == exc_ainstr_page_prot_num) { + tc_fault_type_t ftype; + + AW(ftype) = 0; + AS(ftype).illegal_page = 1; + kvm_pv_mmu_instr_page_fault(vcpu, regs, ftype, 1); + return 1; + } + if (trap_no == exc_last_wish_num) { + int r; + + r = do_hret_last_wish_intc(vcpu, regs); + if (r == 0) { + return 1; + } else { + return 0; + } + } + if (kvm_vcpu_in_hypercall(vcpu)) { + /* the trap on host, so handles it by host */ + return 0; + } + if (kvm_defered_pass_the_trap_to_guest(regs, trap_no)) { + DebugKVMVGT("trap #%d will be passed later by host " + "handler of the trap\n", trap_no); + return 0; + } + return kvm_pass_the_trap_to_guest(vcpu, regs, TIR_hi, TIR_lo, trap_no); +} +static inline unsigned long +pass_coredump_trap_to_guest(struct pt_regs *regs) +{ + struct kvm_vcpu *vcpu; + + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + vcpu = current_thread_info()->vcpu; + + + return kvm_pass_coredump_trap_to_guest(vcpu, regs); +} + +/* + * Now interrupts are handled by guest only in bottom half style + * Host pass interrupts to special virtual IRQ process (activate VIRQ VCPU) + * This process activates specified for this VIRQ guest kernel thread + * to handle interrupt. + * So do not pass real interrupts to guest kernel + */ +static inline unsigned long +pass_interrupt_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_nm_interrupt_to_guest(struct pt_regs *regs, int trap_no, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + return 0; +} +static inline unsigned long +pass_virqs_to_guest(struct pt_regs *regs, + unsigned long TIR_hi, unsigned long TIR_lo) +{ + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + if (test_thread_flag(TIF_PSEUDOTHREAD)) { + /* it is VIRQ VCPU thread, it cannot handle interrupts */ + return 0; + } + if (!test_thread_flag(TIF_VIRQS_ACTIVE)) { + /* VIRQ VCPU thread is not yet active */ + return 0; + } + return kvm_pass_virqs_to_guest(regs, TIR_hi, TIR_lo); +} +static inline unsigned long +pass_clw_fault_to_guest(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + return kvm_pass_clw_fault_to_guest(regs, tcellar); +} +static inline unsigned long +pass_page_fault_to_guest(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + if (!kvm_test_intc_emul_flag(regs)) + return 0; + + return kvm_pass_page_fault_to_guest(regs, tcellar); +} +static inline void +complete_page_fault_to_guest(unsigned long what_complete) +{ + kvm_complete_page_fault_to_guest(what_complete); +} +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ +#endif /* ! CONFIG_VIRTUALIZATION */ + +static inline bool +native_is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return (user_mode(regs) && + regs->stacks.psp_hi.PSP_hi_ind >= + regs->stacks.psp_hi.PSP_hi_size) || + regs->stacks.psp_hi.PSP_hi_ind <= 0; +} +static inline bool +native_is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return (user_mode(regs) && + regs->stacks.pcsp_hi.PCSP_hi_ind >= + regs->stacks.pcsp_hi.PCSP_hi_size) || + regs->stacks.pcsp_hi.PSP_hi_ind <= 0; +} + +#if defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native guest kernel */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host/guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* it is native kernel with or without virtualization support */ +static inline bool +is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return native_is_proc_stack_bounds(ti, regs); +} +static inline bool +is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return native_is_chain_stack_bounds(ti, regs); +} + +#ifdef CONFIG_VIRTUALIZATION +/* it is host kernel with virtualization support */ +static inline void +instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + kvm_host_instr_page_fault(regs, ftype, async_instr); +} + +static inline void +do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const unsigned int aa_no) +{ + kvm_host_do_aau_page_fault(regs, address, condition, mask, aa_no); +} +#endif /* CONFIG_VIRTUALIZATION */ + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#else /* __ASSEMBLY__ */ +#include +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KVM_E2K_TRAP_TABLE_H */ diff --git a/arch/e2k/include/asm/kvm/uaccess.h b/arch/e2k/include/asm/kvm/uaccess.h new file mode 100644 index 0000000..5a757fa --- /dev/null +++ b/arch/e2k/include/asm/kvm/uaccess.h @@ -0,0 +1,197 @@ +#ifndef _E2K_KVM_UACCESS_H_ +#define _E2K_KVM_UACCESS_H_ + +/* + * Host kernel access to User space memory, including guest user space + * Copyright (c) 2020, MCST. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include + +#include +#include +#include +#include +#include + +static inline unsigned long +native_copy_in_user_with_tags(void __user *to, const void __user *from, + unsigned long n) +{ + return __copy_in_user_with_tags(to, from, n); +} + +static inline unsigned long +native_copy_to_user_with_tags(void __user *to, const void *from, + unsigned long n) +{ + return __copy_to_user_with_tags(to, from, n); +} + +static inline unsigned long +native_copy_from_user_with_tags(void *to, const void __user *from, + unsigned long n) +{ + return __copy_from_user_with_tags(to, from, n); +} + +#define native_get_user(kval, uptr) get_user(kval, uptr) +#define native_put_user(kval, uptr) put_user(kval, uptr) + +#ifdef CONFIG_KVM_HOST_MODE +/* it is host kernel with virtualization support */ + +#define host_get_user(kval, uptr, hregs) \ +({ \ + __typeof__(*(uptr)) __user *___pu_ptr = (uptr); \ + int sz_uptr = sizeof(*(uptr)); \ + \ + ___pu_ptr = (!host_test_intc_emul_mode(hregs)) ? \ + (uptr) \ + : \ + kvm_guest_ptr_to_host_ptr((uptr), sz_uptr); \ + (___pu_ptr) ? native_get_user(kval, ___pu_ptr) : -EFAULT; \ +}) + +#define host_put_user(kval, uptr, hregs) \ +({ \ + __typeof__(*(uptr)) __user *___pu_ptr = (uptr); \ + int sz_uptr = sizeof(*(uptr)); \ + \ + ___pu_ptr = (!host_test_intc_emul_mode(hregs)) ? \ + (uptr) \ + : \ + kvm_guest_ptr_to_host_ptr((uptr), sz_uptr); \ + (___pu_ptr) ? native_put_user(kval, ___pu_ptr) : -EFAULT; \ +}) + +extern unsigned long kvm_copy_in_user_with_tags(void __user *to, + const void __user *from, unsigned long n); +extern unsigned long kvm_copy_to_user_with_tags(void __user *to, + const void *from, unsigned long n); +extern unsigned long kvm_copy_from_user_with_tags(void *to, + const void __user *from, unsigned long n); + +static inline unsigned long +host_copy_in_user_with_tags(void __user *to, const void __user *from, + unsigned long n, const struct pt_regs *regs) +{ + if (likely(!host_test_intc_emul_mode(regs))) { + return native_copy_in_user_with_tags(to, from, n); + } + return kvm_copy_in_user_with_tags(to, from, n); +} + +static inline unsigned long +host_copy_to_user_with_tags(void __user *to, const void *from, + unsigned long n, const struct pt_regs *regs) +{ + if (likely(!host_test_intc_emul_mode(regs))) { + return native_copy_to_user_with_tags(to, from, n); + } + return kvm_copy_to_user_with_tags(to, from, n); +} + +static inline unsigned long +host_copy_from_user_with_tags(void *to, const void __user *from, + unsigned long n, const struct pt_regs *regs) +{ + if (likely(!host_test_intc_emul_mode(regs))) { + return native_copy_from_user_with_tags(to, from, n); + } + return kvm_copy_from_user_with_tags(to, from, n); +} + +extern int kvm_vcpu_copy_host_to_guest(struct kvm_vcpu *vcpu, + const void *host, void __user *guest, size_t size, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch); +extern int kvm_vcpu_copy_host_from_guest(struct kvm_vcpu *vcpu, + void *host, const void __user *guest, size_t size, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch); + +static inline int +fast_tagged_memory_copy_to_user(void __user *dst, const void *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + struct kvm_vcpu *vcpu; + + if (likely(!host_test_intc_emul_mode(regs))) { + return native_fast_tagged_memory_copy_to_user(dst, src, + len, regs, + strd_opcode, ldrd_opcode, prefetch); + } + + vcpu = native_current_thread_info()->vcpu; + KVM_BUG_ON(vcpu == NULL); + return kvm_vcpu_copy_host_to_guest(vcpu, src, dst, len, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline int +fast_tagged_memory_copy_from_user(void *dst, const void __user *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + struct kvm_vcpu *vcpu; + + if (likely(!host_test_intc_emul_mode(regs))) { + return native_fast_tagged_memory_copy_from_user(dst, src, + len, regs, + strd_opcode, ldrd_opcode, prefetch); + } + + vcpu = native_current_thread_info()->vcpu; + KVM_BUG_ON(vcpu == NULL); + return kvm_vcpu_copy_host_from_guest(vcpu, dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +#else /* !CONFIG_KVM_HOST_MODE */ +/* it is not host kernel, it is native kernel without virtualization */ + +#define host_get_user(kval, uptr, hregs) native_get_user(kval, uptr) +#define host_put_user(kval, uptr, hregs) native_put_user(kval, uptr) + +static inline unsigned long +host_copy_in_user_with_tags(void __user *to, const void __user *from, + unsigned long n, const struct pt_regs *regs) +{ + return native_copy_in_user_with_tags(to, from, n); +} + +static inline unsigned long +host_copy_to_user_with_tags(void __user *to, const void *from, + unsigned long n, const struct pt_regs *regs) +{ + return native_copy_to_user_with_tags(to, from, n); +} + +static inline unsigned long +host_copy_from_user_with_tags(void *to, const void __user *from, + unsigned long n, const struct pt_regs *regs) +{ + return native_copy_from_user_with_tags(to, from, n); +} +#endif /* CONFIG_KVM_HOST_MODE */ + +static inline unsigned long +host_copy_to_user(void __user *to, const void *from, + unsigned long n, const struct pt_regs *regs) +{ + return host_copy_to_user_with_tags(to, from, n, regs); +} + +#endif /* _E2K_KVM_UACCESS_H_ */ diff --git a/arch/e2k/include/asm/kvm_host.h b/arch/e2k/include/asm/kvm_host.h new file mode 100644 index 0000000..94c05bc --- /dev/null +++ b/arch/e2k/include/asm/kvm_host.h @@ -0,0 +1,1471 @@ +/* + * Kernel-based Virtual Machine driver for Linux + * + * This header defines architecture specific interfaces, e2k version + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef _ASM_E2K_KVM_HOST_H +#define _ASM_E2K_KVM_HOST_H + +#include +#include +#include /* irq_handler_t */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define KVM_E2K_DEBUG + +#ifdef KVM_E2K_DEBUG +#define KVM_BUG_ON(__condition) BUG_ON(__condition) +#else /* ! KVM_E2K_DEBUG */ +#define KVM_BUG_ON(__condition) BUILD_BUG_ON_INVALID(__condition) +#endif /* KVM_E2K_DEBUG */ + +/* Avoid printk from arch-independent WARN_ON as + * KVM implementation uses __interrupt functions */ +#define KVM_WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN(); \ + unlikely(__ret_warn_on); \ +}) + +#define KVM_E2K_SV_VM_TYPE_MASK (1 << KVM_E2K_SV_VM_TYPE) +#define KVM_E2K_SW_PV_VM_TYPE_MASK (1 << KVM_E2K_SW_PV_VM_TYPE) +#define KVM_E2K_HV_VM_TYPE_MASK (1 << KVM_E2K_HV_VM_TYPE) +#define KVM_E2K_HW_PV_VM_TYPE_MASK (1 << KVM_E2K_HW_PV_VM_TYPE) + +/* mask of available and supported by the hypervisor VM types */ +/* depends on hardware, CPU ISET, kernel & hypervisor configuration */ +extern unsigned int kvm_vm_types_available; + +/* atomic operations under mask values, for example kvm_vm_types_available */ +static inline void atomic_clear_mask(unsigned int mask, unsigned int *value) +{ + __api_atomic_op(mask, value, w, "andns", RELAXED_MB); +} +static inline void atomic_set_mask(unsigned int mask, unsigned int *value) +{ + __api_atomic_op(mask, value, w, "ors", RELAXED_MB); +} +static inline void atomic64_clear_mask(unsigned long mask, unsigned long *value) +{ + __api_atomic_op(mask, value, d, "andnd", RELAXED_MB); +} +static inline void atomic64_set_mask(unsigned long mask, unsigned long *value) +{ + __api_atomic_op(mask, value, d, "ord", RELAXED_MB); +} + +static inline bool +kvm_is_sv_vm_available(void) +{ + return kvm_vm_types_available & KVM_E2K_SV_VM_TYPE_MASK; +} +static inline bool +kvm_is_sw_pv_vm_available(void) +{ + return kvm_vm_types_available & KVM_E2K_SW_PV_VM_TYPE_MASK; +} +static inline bool +kvm_is_hv_vm_available(void) +{ + return kvm_vm_types_available & KVM_E2K_HV_VM_TYPE_MASK; +} +static inline bool +kvm_is_hw_pv_vm_available(void) +{ + return kvm_vm_types_available & KVM_E2K_HW_PV_VM_TYPE_MASK; +} + +#define KVM_USER_MEM_SLOTS 32 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 4 + +#undef E2K_INVALID_PAGE +#define E2K_INVALID_PAGE (~(hpa_t)0) + +#define UNMAPPED_GVA (~(gpa_t)0) + +/* + * See include/linux/kvm_host.h + * For the normal pfn, the highest 12 bits should be zero, + * so we can mask bit 62 ~ bit 52 to indicate the error pfn, + * mask bit 63 to indicate the noslot pfn. + * Some arch e2k extensions: + * Bit 51 indicates NULL pagse + */ +#define KVM_PFN_NULL (1ULL << 51) + +/* number of page sizes supported by KVM */ +/* KVM can now support pages on each page table level, real MMU capabilities */ +/* are presenting by page table & page levels structures */ +#define KVM_NR_PAGE_SIZES MAX_HUGE_PAGES_LEVEL + +#define KVM_PT_LEVEL_HPAGE_SHIFT(ptl) get_pt_level_page_shift(ptl) +#define KVM_PT_LEVEL_HPAGE_SIZE(ptl) get_pt_level_page_size(ptl) +#define KVM_PT_LEVEL_HPAGE_MASK(ptl) get_pt_level_page_mask(ptl) +#define KVM_PT_LEVEL_PAGES_PER_HPAGE(ptl) \ + (KVM_PT_LEVEL_HPAGE_SIZE(ptl) / PAGE_SIZE) +#define KVM_PT_LEVEL_HPAGE_GFN_SHIFT(ptl) \ + (KVM_PT_LEVEL_HPAGE_SHIFT(ptl) - PAGE_SHIFT) + +static inline gfn_t +gfn_to_index(gfn_t gfn, gfn_t base_gfn, const pt_level_t *pt_level) +{ + /* KVM_PT_LEVEL_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ + KVM_BUG_ON(!pt_level->is_pte && !pt_level->is_huge); + return (gfn >> KVM_PT_LEVEL_HPAGE_GFN_SHIFT(pt_level)) - + (base_gfn >> KVM_PT_LEVEL_HPAGE_GFN_SHIFT(pt_level)); +} + +#define E2K_INVALID_PAGE (~(hpa_t)0) +#define ERROR_PAGE(x) IS_ERR_VALUE(x) +#define TO_ERROR_PAGE(x) ((hpa_t)((long)(x))) +#define PAGE_TO_ERROR(x) ((long)(x)) +#define IS_E2K_INVALID_PAGE(x) ((x) == E2K_INVALID_PAGE) +#define VALID_PAGE(x) (!IS_E2K_INVALID_PAGE(x) && !ERROR_PAGE(x)) + +#define KVM_PERMILLE_MMU_PAGES 20 +#define KVM_MIN_ALLOC_MMU_PAGES 64 +#define KVM_MMU_HASH_SHIFT 10 +#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) +#define KVM_MIN_FREE_MMU_PAGES 5 +#define KVM_REFILL_PAGES 25 + +#define KVM_ALIAS_SLOTS 4 + +#define KVM_SHADOW_SLOTS 8 + +#define KVM_HALT_POLL_NS_DEFAULT 400000 + +#define KVM_IRQCHIP_NUM_PINS KVM_IOEPIC_NUM_PINS + +#define KVM_MAX_EIOHUB_NUM MAX_NUMNODES + +#define ASYNC_PF_PER_VCPU 64 + +bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); +void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); + +struct kvm_vcpu; +struct kvm; + +extern struct mutex kvm_lock; +extern struct list_head vm_list; + +enum kvm_reg { + VCPU_REGS_UPSR = 0, + NR_VCPU_REGS +}; + +enum { + VCPU_SREG_USD_lo, + VCPU_SREG_USD_hi, + VCPU_SREG_PSP_lo, + VCPU_SREG_PSP_hi, + VCPU_SREG_PCSP_lo, + VCPU_SREG_PCSP_hi, + NR_VCPU_SREG +}; + +struct kvm_vcpu_stat { + /* follow fields should have any arch and arch-independent updated */ + u32 halt_successful_poll; + u32 halt_attempted_poll; + u64 halt_poll_invalid; + u32 halt_wakeup; + /* MMU stat */ + u64 pf_fixed; + u64 pf_guest; + u64 flush_gva; + u64 mmio_exits; + u64 signal_exits; +}; + +/* + * backup hardware stacks: + * 1) to spill/fill guest stacks while intercept/glaunch + * 2) to support hypercalls + */ +typedef struct bu_hw_stack { + hw_stack_area_t ps; /* procedure stack area */ + hw_stack_area_t pcs; /* chain stack area */ + e2k_psp_lo_t psp_lo; /* Procedure stack pointer: */ + e2k_psp_hi_t psp_hi; /* base & index & size */ + e2k_pcsp_lo_t pcsp_lo; /* Procedure chain stack */ + e2k_pcsp_hi_t pcsp_hi; /* pointer: base & index & size */ + int users; /* number of active hypercalls, */ + /* handlers on these stacks */ +} bu_hw_stack_t; + +/* sizes as common kernel stacks */ +#define HYPV_BACKUP_PS_SIZE KERNEL_P_STACK_SIZE +#define HYPV_BACKUP_PCS_SIZE KERNEL_PC_STACK_SIZE + +#define GET_BACKUP_PS_SIZE(bu_stacks) ((bu_stacks)->ps.size) +#define GET_BACKUP_PCS_SIZE(bu_stacks) ((bu_stacks)->pcs.size) +#define SET_BACKUP_PS_SIZE(bu_stacks, val) \ + (GET_BACKUP_PS_SIZE(bu_stacks) = (val)) +#define SET_BACKUP_PCS_SIZE(bu_stacks, val) \ + (GET_BACKUP_PCS_SIZE(bu_stacks) = (val)) +#define GET_BACKUP_PS_BASE(bu_stacks) GET_PS_BASE(bu_stacks) +#define GET_BACKUP_PCS_BASE(bu_stacks) GET_PCS_BASE(bu_stacks) + +/* + * guest VCPU boot-time stacks + */ +typedef struct vcpu_boot_stack { + data_stack_t data; /* local data stack */ + void *data_stack; /* data stack pointer at user space */ + hw_stack_area_t ps; /* procedure stack area */ + hw_stack_area_t pcs; /* chain stack area */ + void *proc_stack; /* procedure stack pointer at user */ + /* space */ + void *chain_stack; /* procedure chain stack pointer at */ + /* user space */ + guest_hw_stack_t regs; /* current registers state */ +} vcpu_boot_stack_t; + +#define GET_VCPU_BOOT_CS_BASE(boot_stacks) ((boot_stacks)->data.bottom) +#define GET_VCPU_BOOT_CS_TOP(boot_stacks) ((boot_stacks)->data.top) +#define GET_VCPU_BOOT_CS_SIZE(boot_stacks) ((boot_stacks)->data.size) +#define GET_VCPU_BOOT_PS_BASE(boot_stacks) GET_PS_BASE(boot_stacks) +#define GET_VCPU_BOOT_PS_SIZE(boot_stacks) ((boot_stacks)->ps.size) +#define GET_VCPU_BOOT_PCS_BASE(boot_stacks) GET_PCS_BASE(boot_stacks) +#define GET_VCPU_BOOT_PCS_SIZE(boot_stacks) ((boot_stacks)->pcs.size) +#define SET_VCPU_BOOT_CS_BASE(boot_stacks, val) \ + (GET_VCPU_BOOT_CS_BASE(boot_stacks) = (e2k_addr_t)(val)) +#define SET_VCPU_BOOT_CS_TOP(boot_stacks, val) \ + (GET_VCPU_BOOT_CS_TOP(boot_stacks) = (e2k_addr_t)(val)) +#define SET_VCPU_BOOT_CS_SIZE(boot_stacks, val) \ + (GET_VCPU_BOOT_CS_SIZE(boot_stacks) = (val)) +#define SET_VCPU_BOOT_PS_BASE(boot_stacks, val) \ + (GET_VCPU_BOOT_PS_BASE(boot_stacks) = (val)) +#define SET_VCPU_BOOT_PS_SIZE(boot_stacks, val) \ + (GET_VCPU_BOOT_PS_SIZE(boot_stacks) = (val)) +#define SET_VCPU_BOOT_PCS_BASE(boot_stacks, val) \ + (GET_VCPU_BOOT_PCS_BASE(boot_stacks) = (val)) +#define SET_VCPU_BOOT_PCS_SIZE(boot_stacks, val) \ + (GET_VCPU_BOOT_PCS_SIZE(boot_stacks) = (val)) + +struct kvm_vm_stat { + ulong mmu_shadow_zapped; + ulong mmu_pte_write; + ulong mmu_pte_updated; + ulong mmu_pde_zapped; + ulong mmu_flooded; + ulong mmu_recycled; + ulong mmu_cache_miss; + ulong mmu_unsync; + ulong remote_tlb_flush; + ulong lpages; +}; + +/* + * Don't want allocation failures within the mmu code, so need preallocate + * enough memory for a single page fault in a cache. + */ +#define KVM_NR_MEM_OBJS 400 +#define KVM_NR_MIN_MEM_OBJS 40 + +typedef struct kvm_mmu_memory_cache { + struct kmem_cache *kmem_cache; + int nobjs; + void *objects[KVM_NR_MEM_OBJS]; +} kvm_mmu_memory_cache_t; + +/* + * the pages used as guest page table on soft mmu are tracked by + * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used + * by indirect shadow page can not be more than 15 bits. + * + * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access, + * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. + */ +typedef union kvm_mmu_page_role { + unsigned word; + struct { + unsigned level:4; + unsigned cr4_pae:1; + unsigned quadrant:2; + unsigned direct:1; + unsigned access:3; + unsigned invalid:1; + unsigned nxe:1; + unsigned cr0_wp:1; + unsigned smep_andnot_wp:1; + unsigned smap_andnot_wp:1; + unsigned unused:8; + + /* + * This is left at the top of the word so that + * kvm_memslots_for_spte_role can extract it with a + * simple shift. While there is room, give it a whole + * byte so it is also faster to load it from memory. + */ + unsigned smm:8; + }; +} kvm_mmu_page_role_t; + +typedef union kvm_mmu_root_flags { + unsigned word; + struct { + unsigned has_host_pgds:1; + unsigned has_guest_pgds:1; + unsigned unused:30; + }; +} kvm_mmu_root_flags_t; + +typedef struct kvm_rmap_head { + unsigned long val; +} kvm_rmap_head_t; + +typedef struct kvm_mmu_page { + struct list_head link; + struct hlist_node hash_link; + + /* + * The following two entries are used to key the shadow page in the + * hash table. + */ + gfn_t gfn; + kvm_mmu_page_role_t role; + kvm_mmu_root_flags_t root_flags; + + pgprot_t *spt; + gva_t gva; /* the shadow PT map guest virtual addresses from */ + /* hold the gfn of each spte inside spt */ + gfn_t *gfns; + bool unsync; + bool host_synced; /* host kernel range already synced */ + bool guest_kernel_synced; /* guest kernel range already synced */ + bool guest_user_synced; /* guest user range synced */ + bool released; /* root already under release */ + atomic_t atomic_unsync; /* there are atomicaly updated of PT entries */ + int root_count; /* Currently serving as active root */ + unsigned int unsync_children; + struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ + + /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */ + unsigned long mmu_valid_gen; + + DECLARE_BITMAP(unsync_child_bitmap, 512); + +#ifdef CONFIG_X86_32 + /* + * Used out of the mmu-lock to avoid reading spte values while an + * update is in progress; see the comments in __get_spte_lockless(). + */ + int clear_spte_count; +#endif + + /* Number of writes since the last time traversal visited this page. */ + atomic_t write_flooding_count; +} kvm_mmu_page_t; + +/* page fault handling results */ +typedef enum pf_res { + PFRES_NO_ERR = 0, + PFRES_WRITE_TRACK, + PFRES_INJECTED, + PFRES_TRY_MMIO, + PFRES_ERR, + PFRES_RETRY, /* page fault is not handled and can */ + /* be retried on guest or should be handled */ + /* from begining by hypervisor */ +} pf_res_t; + +struct kvm_arch_exception; + +/* + * e2k supports 2 types of virtual space: + * - primary (native); + * - secondary (x86 emulation). + * enable 3 paging modes: + * - 4-level 64-bit, primary and secondary; + * - 3-level 64-bit, only secondary; + * - 2-level 32-bit, only secondary. + * Primary (native) virtual space can be managed as: + * - common linux standard: user and OS share single virtual space. In this + * case there is one united PT to translate user and kernel addresses and + * one PT root pointed to by MMU registers U_PPTB/U_VPTB; + * - separate user and OS virtual spaces: there are two separate PTs + * to translate user and kernel addresses and two separate PTs roots + * pointed to by MMU registers U_PPTB/U_VPTB (for users) and + * OS_PPTB/OS_VPTB (for kernel). + * + * The kvm_mmu structure abstracts the details of the current mmu mode. + */ +typedef struct kvm_mmu { + hpa_t sh_u_root_hpa; /* shadow PT root for user (and probably OS) */ + hpa_t sh_os_root_hpa; /* shadow PT root for OS (separate spoaces) */ + hpa_t gp_root_hpa; /* physical base of root PT to translate */ + /* guest physical addresses */ + gva_t sh_u_vptb; /* shadow base address to map user */ + /* (and probably OS) PTs to virtual space */ + gva_t sh_os_vptb; /* shadow base address to map kernel */ + /* (separate spaces) PTs to virtual space */ + gva_t sh_os_vab; /* common hypervisor and guest pointer to */ + /* border of user and OS spaces */ + hpa_t sh_root_hpa; + bool sep_virt_space; /* separate virtual spaces for OS and user */ + bool direct_map; + bool is_spae; /* extended physical memory is enabled */ + bool is_pse; /* 4 Mb pages is enable for secondary space */ + bool paging_on; /* virtual space translation mode is ON */ + bool shadow_pt_on; /* shadow PT mode is enabled */ + bool phys_pt_on; /* guest physical addresses PT is ON */ + bool tdp_on; /* two dimensional paging is ON */ + bool spt_gpa_fault; /* guest physical address access fault */ + /* at shodow paging mode */ + bool nx; /* not executable is ??? */ + bool u_context_on; /* guest user MMU context created and is ON */ + int pid; /* guest process ID (mmu context) */ + int pid2; /* guest process ID at secondary space */ + int root_level; + int shadow_root_level; + kvm_mmu_page_role_t base_role; + u64 *pae_root; + u64 *lm_root; + /* guest PT roots pointers */ + pgprotval_t os_pptb; /* guest OS primary (native) */ + /* page table physical base of VCPU */ + gva_t os_vptb; /* guest OS primary (native) */ + /* page table virtual base of VCPU */ + pgprotval_t u_pptb; /* guest user primary (native) */ + /* page table physical base of VCPU */ + gva_t u_vptb; /* guest user primary (native) */ + /* page table virtual base of VCPU */ + pgprotval_t u2_pptb; /* guest secondary page table */ + /* physical base of VCPU */ + gpa_t mpt_b; /* guest protection table base */ + /* of VCPU */ + pgprotval_t pdptes[4]; /* current root level PTEs registers */ + /* of VCPU for extended physical */ + /* address mode (SPAE) */ + gpa_t tc_gpa; /* guest 'physical address' of */ + /* trap cellar (TRAP_POINT MMU reg.) */ + struct page *tc_page; /* host page of guest trap cellar */ + void *tc_kaddr; /* host virtual address of guest */ + /* trap cellar */ + int tc_num; /* number of entries at trap cellar */ + /* same as TRAP_COUNT / 3 */ + gmm_struct_t *gmm; /* host agent of current guest mm */ + gmm_struct_t *active_gmm; /* only on host: current active */ + /* guest mm agent on host (same as */ + /* active_mm at native mode) */ + + /* MMU interceptions control registers state */ + virt_ctrl_mu_t virt_ctrl_mu; + mmu_reg_t g_w_imask_mmu_cr; + + /* MMU shadow control registers initial state */ + mmu_reg_t init_sh_mmu_cr; + mmu_reg_t init_sh_pid; + + /* Can have large pages at levels 2..last_nonleaf_level-1. */ + u8 last_nonleaf_level; + + /* MMU interface */ + bool (*is_paging)(struct kvm_vcpu *vcpu); + void (*set_vcpu_u_pptb)(struct kvm_vcpu *vcpu, pgprotval_t base); + void (*set_vcpu_sh_u_pptb)(struct kvm_vcpu *vcpu, hpa_t root); + void (*set_vcpu_os_pptb)(struct kvm_vcpu *vcpu, pgprotval_t base); + void (*set_vcpu_sh_os_pptb)(struct kvm_vcpu *vcpu, hpa_t root); + void (*set_vcpu_u_vptb)(struct kvm_vcpu *vcpu, gva_t base); + void (*set_vcpu_sh_u_vptb)(struct kvm_vcpu *vcpu, gva_t base); + void (*set_vcpu_os_vptb)(struct kvm_vcpu *vcpu, gva_t base); + void (*set_vcpu_sh_os_vptb)(struct kvm_vcpu *vcpu, gva_t base); + void (*set_vcpu_os_vab)(struct kvm_vcpu *vcpu, gva_t os_virt_base); + void (*set_vcpu_gp_pptb)(struct kvm_vcpu *vcpu, hpa_t root); + pgprotval_t (*get_vcpu_u_pptb)(struct kvm_vcpu *vcpu); + hpa_t (*get_vcpu_sh_u_pptb)(struct kvm_vcpu *vcpu); + pgprotval_t (*get_vcpu_os_pptb)(struct kvm_vcpu *vcpu); + hpa_t (*get_vcpu_sh_os_pptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_u_vptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_sh_u_vptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_os_vptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_sh_os_vptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_os_vab)(struct kvm_vcpu *vcpu); + hpa_t (*get_vcpu_gp_pptb)(struct kvm_vcpu *vcpu); + void (*set_vcpu_pt_context)(struct kvm_vcpu *vcpu, unsigned flags); + void (*init_vcpu_ptb)(struct kvm_vcpu *vcpu); + pgprotval_t (*get_vcpu_context_u_pptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_context_u_vptb)(struct kvm_vcpu *vcpu); + pgprotval_t (*get_vcpu_context_os_pptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_context_os_vptb)(struct kvm_vcpu *vcpu); + gva_t (*get_vcpu_context_os_vab)(struct kvm_vcpu *vcpu); + hpa_t (*get_vcpu_context_gp_pptb)(struct kvm_vcpu *vcpu); + pgprotval_t (*get_vcpu_pdpte)(struct kvm_vcpu *vcpu, int index); + pf_res_t (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, + bool prefault, gfn_t *gfn, kvm_pfn_t *pfn); + void (*inject_page_fault)(struct kvm_vcpu *vcpu, + struct kvm_arch_exception *fault); + gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, + struct kvm_arch_exception *exception); + gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, + struct kvm_arch_exception *exception); + void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + pgprot_t *spte, const void *pte); + void (*flush_gva)(struct kvm_vcpu *vcpu, gva_t gva); + void (*flush_gva_range)(struct kvm_vcpu *vcpu, gva_t gva_start, + gva_t gva_end); + int (*sync_page)(struct kvm_vcpu *vcpu, kvm_mmu_page_t *sp); +} kvm_mmu_t; + +typedef struct intc_mu_state { + unsigned long notifier_seq; /* 'mmu_notifier_seq' state before */ + /* gfn->pfn translation */ + pf_res_t pfres; /* page fault handling result */ + bool may_be_retried; /* the MMU request can be retied */ + bool ignore_notifier; /* the MMU request should ignore the + * MMU notifier status */ +} intc_mu_state_t; + +/* define exit reasons (requests) from guest to host */ +#define EXIT_REASON_VM_PANIC 0 +#define EXIT_REASON_MMIO_REQ 1 +#define EXIT_REASON_IOPORT_REQ 2 + +#define EXIT_NOTIFY_IO 3 + +#define EXIT_SHUTDOWN 4 + +typedef struct kvm_ioport_req { + uint64_t port; + uint64_t data; /* single data or guest address of string */ + u32 __user *user_data; /* user pointer to data */ + uint32_t count; + uint32_t cur_count; + uint8_t size; + uint8_t is_out; + uint8_t string; + uint8_t needed; + uint8_t completed; +} kvm_ioport_req_t; + +typedef struct kvm_lapic_irq { + u32 vector; + u32 delivery_mode; + u32 dest_mode; + u32 level; + u32 trig_mode; + u32 shorthand; + u32 dest_id; +} kvm_lapic_irq_t; + +/* + * Unlike kvm_lapic_irq, CEPIC doesn't have dest_mode (physical/logical) + * and level (polarity) fields + */ +typedef struct kvm_cepic_irq { + u32 vector; + u32 delivery_mode; + u32 trig_mode; + u32 shorthand; + u32 dest_id; +} kvm_cepic_irq_t; + +typedef struct kvm_mem_alias { + unsigned long alias_start; + unsigned long target_start; + gfn_t alias_base_gfn; + unsigned long npages; + u32 target_slot; +} kvm_mem_alias_t; + +typedef struct kvm_kernel_shadow { + unsigned long kernel_start; + unsigned long shadow_start; + unsigned long area_size; + u32 alias_slot; +} kvm_kernel_shadow_t; + +struct user_area_t; + +typedef struct kvm_mem_guest { + struct user_area *area; + kvm_guest_mem_type_t type; /* type of memory: RAM, VRAM */ +} kvm_mem_guest_t; + +/* + * Delivery modes of Virtual IRQs (see field 'flags' below) + */ +#define DIRECT_INJ_VIRQ_FLAG 0x0010UL /* direct injection of VIRQ */ + /* to VCPU process */ +typedef struct kvm_guest_virq { + int virq_id; /* VIRQ number */ + atomic_t *count; /* pointer to atomic counter */ + /* of unhandled VIRQs */ + unsigned long flags; /* delivery mode and other flags of */ + /* virtual IRQ (see above) */ + struct kvm_vcpu *vcpu; /* Virtual guest CPU to handle VIRQ */ + struct task_struct *host_task; /* host task structure of VIRQ */ + int stop_handler; /* VIRQ handler should be stopped */ +} kvm_guest_virq_t; + +/* + * Context that is saved and restored by software when + * switching from hypervisor to guest or vice versa. + */ +typedef struct kvm_sw_cpu_context { + int osem; + bool in_hypercall; + + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_sbr_t sbr; + + struct to_save { + bool valid; + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_sbr_t sbr; + } saved; + + e2k_mem_crs_t crs; /* only for PV guest */ + + /* + * TODO here goes stuff that can be not switched + * on hypercalls if we do not support calling QEMU from them + */ + e2k_fpcr_t fpcr; + e2k_fpsr_t fpsr; + e2k_pfpfr_t pfpfr; + + e2k_upsr_t upsr; + +#ifdef CONFIG_GREGS_CONTEXT + /* + * Guest has own global registers context (vcpu) different + * from QEMU (host). + * The VCPU (guest) context should be restored while vcpu_load() + * after saving the host (QEMU) context and be saved while vcpu_put() + * before restoring the host (QEMU) context. + * Only one current VCPU or host context should be saved and restored + * while switch to/from other processes. + */ + global_regs_t vcpu_gregs; + global_regs_t host_gregs; + kernel_gregs_t vcpu_k_gregs; + kernel_gregs_t host_k_gregs; + host_gregs_t vcpu_h_gregs; + host_gregs_t host_h_gregs; +#endif /* CONFIG_GREGS_CONTEXT */ + + e2k_cutd_t cutd; + + /* guest (hypervisor shadow) user page table bases: */ + mmu_reg_t sh_u_pptb; /* physical */ + mmu_reg_t sh_u_vptb; /* and virtual */ + mmu_reg_t tc_hpa; /* host physical base of VCPU */ + /* trap cellar */ + mmu_reg_t trap_count; + + e2k_dibcr_t dibcr; + e2k_ddbcr_t ddbcr; + e2k_dibsr_t dibsr; + e2k_ddbsr_t ddbsr; + e2k_dimcr_t dimcr; + e2k_ddmcr_t ddmcr; + u64 dimar0; + u64 dimar1; + u64 ddmar0; + u64 ddmar1; + u64 dibar0; + u64 dibar1; + u64 dibar2; + u64 dibar3; + u64 ddbar0; + u64 ddbar1; + u64 ddbar2; + u64 ddbar3; + +#ifdef CONFIG_USE_AAU + e2k_aau_t aau_context; +#endif + + u64 cs_lo; + u64 cs_hi; + u64 ds_lo; + u64 ds_hi; + u64 es_lo; + u64 es_hi; + u64 fs_lo; + u64 fs_hi; + u64 gs_lo; + u64 gs_hi; + u64 ss_lo; + u64 ss_hi; + u64 rpr_lo; + u64 rpr_hi; + u64 tcd; +} kvm_sw_cpu_context_t; + +/* + * Context that is saved and restored by software _only_ for + * interceptions when switching from hypervisor to guest or vice versa. + */ +typedef struct kvm_intc_cpu_context { + e2k_ctpr_t ctpr1; + e2k_ctpr_t ctpr2; + e2k_ctpr_t ctpr3; + e2k_ctpr_hi_t ctpr1_hi; + e2k_ctpr_hi_t ctpr2_hi; + e2k_ctpr_hi_t ctpr3_hi; + u64 lsr; + u64 ilcr; + u64 lsr1; + u64 ilcr1; + + int cu_num, mu_num; + int cur_mu; /* # of current MMU intercept in handling */ + intc_info_cu_t cu; + intc_info_mu_t mu[INTC_INFO_MU_ITEM_MAX]; + intc_mu_state_t mu_state[INTC_INFO_MU_ITEM_MAX]; + bool mu_updated; /* the mu info was updated, so need restote */ + /* on the registers */ + bool cu_updated; /* the cu info was updated, so need restore */ + /* on the registers */ + s8 nr_TIRs; + u64 exceptions; /* source mask of all exceptions in the TIRs */ + /* at the interception moment */ + /* should be the same as INTC_INFO_CU.hi.exc */ + u64 exc_to_create; /* mask of the new exceptions to add at TIRs */ + u64 exc_to_delete; /* mask of the exceptions to delete from TIRs */ + u64 exc_to_pass; /* mask of the exceptions from source */ + /* TIRs exceptions to pass to the guest */ + gva_t exc_IP_to_create; /* IP to create exceptions like instruction */ + /* page faults */ + e2k_tir_t TIRs[TIR_NUM]; + u64 sbbp[SBBP_ENTRIES_NUM]; + u64 intc_mu_to_move; + u64 cu_entry_handled; +} kvm_intc_cpu_context_t; + +struct kvm_epic_page; + +/* + * here goes context that is: + * - saved and restored by hardware when switching + * from hypervisor to guest or vice versa. + * - belongs only to guest and isn't used by hypervisor + * + * Then this context is: + * 1) Initialized by hypervisor in init_hw_ctxt() + * 2) Written to actual registers for the first time + * in write_hw_ctxt_to_registers() + * 3) Switched in save/restore_kvm_context_v6() + */ +typedef struct kvm_hw_cpu_context { + e2k_psp_lo_t sh_psp_lo; + e2k_psp_hi_t sh_psp_hi; + e2k_pcsp_lo_t sh_pcsp_lo; + e2k_pcsp_hi_t sh_pcsp_hi; + e2k_pshtp_t sh_pshtp; + e2k_pcshtp_t sh_pcshtp; + e2k_wd_t sh_wd; + e2k_psp_lo_t bu_psp_lo; + e2k_psp_hi_t bu_psp_hi; + e2k_pcsp_lo_t bu_pcsp_lo; + e2k_pcsp_hi_t bu_pcsp_hi; + + mmu_reg_t sh_mmu_cr; + mmu_reg_t sh_pid; + mmu_reg_t sh_os_pptb; + mmu_reg_t gp_pptb; + mmu_reg_t sh_os_vptb; + mmu_reg_t sh_os_vab; + mmu_reg_t gid; + + e2k_oscud_lo_t sh_oscud_lo; + e2k_oscud_hi_t sh_oscud_hi; + e2k_osgd_lo_t sh_osgd_lo; + e2k_osgd_hi_t sh_osgd_hi; + e2k_cutd_t sh_oscutd; + e2k_cuir_t sh_oscuir; + u64 sh_osr0; + e2k_core_mode_t sh_core_mode; + + virt_ctrl_cu_t virt_ctrl_cu; + virt_ctrl_mu_t virt_ctrl_mu; + mmu_reg_t g_w_imask_mmu_cr; + + struct kvm_epic_page *cepic; + + /* Secondary space registers */ + u64 u2_pptb; + u64 pid2; + u64 mpt_b; + u64 pci_l_b; + u64 ph_h_b; + u64 ph_hi_l_b; + u64 ph_hi_h_b; + u64 pat; + u64 pdpte0; + u64 pdpte1; + u64 pdpte2; + u64 pdpte3; +} kvm_hw_cpu_context_t; + +/* + * The structure needs only for paravitualized guest mode: + * !vcpu->arch.is_hv && vcpu->arch.is_pv + * + * Hypervisor context which should be different for host VCPU thread (qemu) + * and guest VCPUs threads to allow running the VCPU kernel thread as + * multi-threading. + * The one VCPY kernel thread is running the one of the following processes: + * the host application qemu (or some other to control VM); + * the guest kernel threads; + * the guest user threads; + * This structure is to save/restore and switch from/to the one host VCPU thread + * and many guest threads. + * If the host thread is now active then the thread info contains this active + * context and vcpu->arch.host_ctxt contains context of last running guest + * thread. + * If the guest thread is active then the thread_info contains context of this + * active guest thread and the structure vcpu->arch.host_ctxt contains context + * of the host thread. + * Contexts of not active guest threads were saved/restored at the such + * structures into its gthread_info + */ +typedef struct kvm_host_context { + /* the task->stack ponter */ + unsigned long *stack; + /* the host kernel's stack of local data */ + /* one for host thread and one common for all guest threads */ + e2k_usd_hi_t k_usd_hi; + e2k_usd_lo_t k_usd_lo; + e2k_sbr_t k_sbr; + /* the host kernel's hardware stacks */ + /* one for host thread and one common for all guest threads */ + e2k_psp_lo_t k_psp_lo; + e2k_psp_hi_t k_psp_hi; + e2k_pcsp_lo_t k_pcsp_lo; + e2k_pcsp_hi_t k_pcsp_hi; + + /* pointer to the top of 'pt_regs' structures list */ + pt_regs_t *pt_regs; + /* the host kernel's signal/trap stack of contexts */ + kvm_signal_context_t signal; +} kvm_host_context_t; + +#ifdef CONFIG_KVM_ASYNC_PF +struct kvm_apf_ready { + struct list_head link; + u32 apf_id; +}; +#endif /* CONFIG_KVM_ASYNC_PF */ + +struct kvm_vcpu_arch { + kvm_sw_cpu_context_t sw_ctxt; + kvm_intc_cpu_context_t intc_ctxt; + kvm_hw_cpu_context_t hw_ctxt; + kvm_host_context_t host_ctxt; + + int launched; + int last_exit; + bool is_pv; /* VCPU is under paravirtualized */ + /* support */ + bool is_hv; /* VCPU is under hardware virtualized */ + /* support */ + + kvm_vcpu_state_t *vcpu_state; + kvm_vcpu_state_t *kmap_vcpu_state; /* alias of VCPU state */ + /* mapped into kernel VM */ + /* space */ + e2k_cute_t *guest_cut; + e2k_addr_t guest_phys_base; /* guest image (kernel) physical base */ + char *guest_base; /* guest image (kernel) virtual base */ + e2k_size_t guest_size; /* guest image (kernel) size */ + e2k_size_t trap_offset; /* guest trap table #0 entry offset */ + /* from guest image base */ + char *trap_entry; /* guest trap table #0 base as entry */ + /* to paravirtualized trap handler */ + + bu_hw_stack_t hypv_backup; /* backup hardware stacks */ + vcpu_boot_stack_t boot_stacks; /* guest boot-time stacks */ + guest_hw_stack_t guest_stacks; /* guest hardware stacks state */ + /* to emulate harware supported */ + /* HCALLs */ + gthread_info_t *gti; /* host agent of current active guest */ + /* thread/process */ + + /* + * Paging state of the vcpu + * This context is always used to handle faults. + */ + kvm_mmu_t mmu; + + /* + * Pointer to the mmu context currently used for + * gva_to_gpa translations. + */ + kvm_mmu_t *walk_mmu; + + kvm_mmu_memory_cache_t mmu_pte_list_desc_cache; + kvm_mmu_memory_cache_t mmu_page_cache; + kvm_mmu_memory_cache_t mmu_page_header_cache; + + /* FIXME: Cache MMIO info is not fully implemented */ + u64 mmio_gva; + unsigned access; + gfn_t mmio_gfn; + u64 mmio_gen; + u64 mmio_data[1]; + u64 __user *mmio_user_data; + intc_info_mu_t *io_intc_info; + + /* + * Indicate whether the access faults on its page table in guest + * which is set when fix page fault and used to detect unhandeable + * instruction. + * FIXME: it is field from x86 arch, so does it need for e2k??? + */ + bool write_fault_to_shadow_pgtable; + + u64 apic_base; + struct kvm_lapic *apic; /* kernel irqchip context */ + int32_t apic_arb_prio; + + /* Software KVM CEPIC model */ + u64 epic_base; + struct kvm_cepic *epic; + + /* Hardware guest CEPIC support */ + raw_spinlock_t epic_dam_lock; /* lock to update dam_active */ + bool epic_dam_active; + struct hrtimer cepic_idle; + + int mp_state; + int sipi_vector; + struct task_struct *guest_task; /* guest task */ + struct task_struct *host_task; /* host task: main VCPU host */ + /* or VIRQ VCPU host for VIRQ VCPUs */ + struct mutex lock; /* host and guest part of VCPU */ + /* including VIRQ VCPUs */ + /* synchronization */ + struct list_head vcpus_to_spin; /* list of VCPUs to support boot-time */ + /* spin lock/unlock */ + bool unhalted; /* VCPU was woken up by pv_kick */ + bool halted; /* VCPU is halted */ + bool on_idle; /* VCPU is on idle waiting for some */ + /* events for guest */ + bool on_spinlock; /* VCPU is on slow spinlock waiting */ + bool on_csd_lock; /* VCPU is waiting for csd unlocking */ + /* (IPI completion) */ + bool should_stop; /* guest VCPU thread should be */ + /* stopped and completed */ + bool virq_wish; /* trap 'last wish' is injection to */ + /* pass pending VIRQs to guest */ + bool virq_injected; /* interrupt is injected to handle */ + /* pending VIRQs by guest */ + bool on_virqs_handling; /* VCPU is handling pending VIRQs */ + bool vm_exit_wish; /* VCPU is need to VM exit and */ + /* exit reason handling */ + struct completion exited; /* guest VCPU thread completed */ + struct completion released; /* all VCPU threads completed and */ + /* VCPU can be freed */ + struct hrtimer hrt; /* local timer of VCPU */ + int hrt_virq_no; /* number of VIRQ of local timer */ + long hrt_period; /* period of hr timer */ + long hrt_running_start; /* VCPU running time when timer */ + /* was started */ + + char *entry_point; /* startup point of guest image */ + int args_num; /* arguments number to pass to guest */ + u64 args[KVM_VCPU_MAX_GUEST_ARGS]; /* arguments to pass */ + + /* Exit data for guest */ + uint32_t exit_reason; + kvm_ioport_req_t ioport; /* IO port access (in()/out()) */ + void *ioport_data; /* pointer to IO port data at */ + /* kvm_run page (now middle) */ + int64_t ioport_data_size; /* max size of IO port data area */ + uint32_t notifier_io; /* IO request notifier */ + + bool in_exit_req; /* VCPU is waiting for exit */ + /* request completion */ + /* exit request in progress */ + struct completion exit_req_done; /* exit request is completed */ + + struct list_head exit_reqs_list; /* exit requests list head */ + /* used only on main VCPU */ + struct list_head exit_req; /* the VCPU exit request */ + raw_spinlock_t exit_reqs_lock; /* to lock list of exit */ + /* requests */ + + struct work_struct dump_work; /* to schedule work to dump */ + /* guest VCPU state */ + + u8 event_exit_inst_len; + + uint32_t exit_shutdown_terminate; + +#ifdef CONFIG_KVM_ASYNC_PF + struct { + bool enabled; + struct gfn_to_hva_cache reason_gpa; /* hva of guest per-cpu */ + /* pv_apf_event.apf_reason */ + struct gfn_to_hva_cache id_gpa; /* hva of guest per-cpu */ + /* pv_apf_event.apf_id */ + u32 cnt; /* Counter of async pf */ + /* events on this vcpu. */ + u32 host_apf_reason; /* Reason for async pf: */ + /* page in swap or page ready. */ + u32 apf_ready_vector; /* Irq vector number to notify */ + /* that page is ready */ + u32 irq_controller; /* Type of irq controller to use */ + /* to notify guest that page is ready */ + bool in_pm; /* Is privilidged mode intercepted? */ + } apf; +#endif /* CONFIG_KVM_ASYNC_PF */ + + int node_id; + int hard_cpu_id; +}; + +#ifdef CONFIG_KVM_HV_MMU +typedef struct kvm_lpage_info { + int disallow_lpage; +} kvm_lpage_info_t; + +typedef struct kvm_arch_memory_slot { + kvm_rmap_head_t *rmap[KVM_NR_PAGE_SIZES]; + kvm_lpage_info_t *lpage_info[KVM_NR_PAGE_SIZES - 1]; + kvm_mem_guest_t guest_areas; + unsigned short *gfn_track[KVM_PAGE_TRACK_MAX]; +} kvm_arch_memory_slot_t; +#else /* ! CONFIG_KVM_HV_MMU */ +struct kvm_lpage_info { + int write_count; +}; + +struct kvm_arch_memory_slot { + unsigned long *rmap; + struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; + kvm_mem_guest_t guest_areas; +}; + +extern struct file_operations kvm_vm_fops; +#endif /* CONFIG_KVM_HV_MMU */ + +/* + * e2k-arch vcpu->requests bit members + */ +#define KVM_REQ_TRIPLE_FAULT 10 /* FIXME: not implemented */ +#define KVM_REQ_MMU_SYNC 11 /* FIXME: not implemented */ +#define KVM_REQ_PENDING_IRQS 15 /* there are unhandled IRQs */ + /* injected on VCPU */ +#define KVM_REQ_PENDING_VIRQS 16 /* there are unhandled VIRQs */ + /* to inject on VCPU */ +#define KVM_REG_SHOW_STATE 17 /* bit should be cleared */ + /* after show state of VCPU */ + /* completion */ +#define KVM_REQ_KICK 18 /* VCPU should be kicked */ +#define KVM_REQ_VIRQS_INJECTED 20 /* pending VIRQs injected */ +#define KVM_REQ_SCAN_IOAPIC 23 /* scan IO-APIC */ +#define KVM_REQ_SCAN_IOEPIC 24 /* scan IO-EPIC */ + +#define kvm_set_pending_virqs(vcpu) \ + set_bit(KVM_REQ_PENDING_VIRQS, (void *)&vcpu->requests) +#define kvm_test_and_clear_pending_virqs(vcpu) \ + test_and_clear_bit(KVM_REQ_PENDING_VIRQS, \ + (void *)&vcpu->requests) +#define kvm_clear_pending_virqs(vcpu) \ + clear_bit(KVM_REQ_PENDING_VIRQS, (void *)&vcpu->requests) +#define kvm_test_pending_virqs(vcpu) \ + test_bit(KVM_REQ_PENDING_VIRQS, (void *)&vcpu->requests) +#define kvm_set_virqs_injected(vcpu) \ + set_bit(KVM_REQ_VIRQS_INJECTED, (void *)&vcpu->requests) +#define kvm_test_and_clear_virqs_injected(vcpu) \ + test_and_clear_bit(KVM_REQ_VIRQS_INJECTED, \ + (void *)&vcpu->requests) +#define kvm_clear_virqs_injected(vcpu) \ + clear_bit(KVM_REQ_VIRQS_INJECTED, (void *)&vcpu->requests) +#define kvm_test_virqs_injected(vcpu) \ + test_bit(KVM_REQ_VIRQS_INJECTED, (void *)&vcpu->requests) +#define kvm_start_vcpu_show_state(vcpu) \ + test_and_set_bit(KVM_REG_SHOW_STATE, (void *)&vcpu->requests) +#define kvm_complete_vcpu_show_state(vcpu) \ +do { \ + if (test_and_clear_bit(KVM_REG_SHOW_STATE, (void *)&vcpu->requests)) \ + wake_up_bit((void *)&vcpu->requests, KVM_REG_SHOW_STATE); \ +} while (false) + +struct kvm_irq_mask_notifier { + void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); + int irq; + struct hlist_node link; +}; + +typedef const pt_struct_t * (*get_pt_struct_func_t)(struct kvm *kvm); +typedef const pt_struct_t * (*get_vcpu_pt_struct_func_t)(struct kvm_vcpu *vcpu); + +struct irq_remap_table { + bool enabled; + unsigned int host_pin; + unsigned int guest_pin; + int host_node; + int guest_node; + /* IOEPIC passthrough page start */ + hpa_t hpa; + gpa_t gpa; + struct pci_dev *vfio_dev; +}; + +struct kvm_arch { + unsigned long vm_type; /* virtual machine type */ + unsigned long flags; + int naliases; + int nshadows; + kvm_nid_t vmid; /* VM ID */ + unsigned int bsp_vcpu_id; + bool is_pv; /* paravirtualized VM */ + bool is_hv; /* hardware virtualized VM */ + bool shadow_pt_enable; /* shadow PT is supported and is base of */ + /* guest MMU emulation */ + bool phys_pt_enable; /* guest physical addresses PT is supported */ + /* by MMU and hypervisor */ + bool tdp_enable; /* two dimensional paging is supported */ + /* by hardware MMU and hypervisor */ + bool shadow_pt_set_up; /* shadow PT was set up, skip setup on other VCPUs */ + kvm_mem_alias_t aliases[KVM_ALIAS_SLOTS]; + kvm_kernel_shadow_t shadows[KVM_SHADOW_SLOTS]; + kvm_nidmap_t gpid_nidmap[GPIDMAP_ENTRIES]; + struct hlist_head gpid_hash[GPID_HASH_SIZE]; + kvm_gpid_table_t gpid_table; + struct kmem_cache *gti_cachep; + char gti_cache_name[32]; + kvm_nidmap_t gmmid_nidmap[GMMIDMAP_ENTRIES]; + struct hlist_head gmmid_hash[GMMID_HASH_SIZE]; + gmmid_table_t gmmid_table; + gmm_struct_t *init_gmm; /* host agent of guest kernel mm */ + + /* host page table structure to support guest MMU and PTs can be */ + /* different in common case */ + const pt_struct_t *host_pt_struct; /* abstractions for details */ + /* of the host page table structure */ + const pt_struct_t *guest_pt_struct; /* abstractions for details */ + /* of the guest page table structure */ + const pt_struct_t *gp_pt_struct; /* abstractions for details */ + /* of the guest physical page table */ + /* structure, if is enable */ + get_pt_struct_func_t get_host_pt_struct; + get_vcpu_pt_struct_func_t get_vcpu_pt_struct; + get_pt_struct_func_t get_gp_pt_struct; + +#ifdef CONFIG_KVM_HV_MMU + /* MMU nonpaging mode */ + hpa_t nonp_root_hpa; /* physical base of nonpaging root PT */ + /* MMU pages statistic */ + unsigned int n_used_mmu_pages; + unsigned int n_requested_mmu_pages; + unsigned int n_max_mmu_pages; + unsigned int indirect_shadow_pages; + unsigned long mmu_valid_gen; + struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; + + /* Hash table of struct kvm_mmu_page. */ + struct list_head active_mmu_pages; + struct list_head zapped_obsolete_pages; + struct kvm_page_track_notifier_node mmu_sp_tracker; + struct kvm_page_track_notifier_head track_notifier_head; +#endif /* CONFIG_KVM_HV_MMU */ + + kvm_host_info_t *host_info; /* host machine and kernel INFO */ + kvm_host_info_t *kmap_host_info; /* host machine and kernel INFO */ + /* mapped to kernel space */ + kvm_guest_info_t guest_info; /* guest machine and kernel INFO */ + raw_spinlock_t time_state_lock; /* lock to update VM time */ + bool is_epic; /* 0 - paravirt LAPIC/IO-APIC model */ + /* 1 - paravirt CEPIC/IO-EPIC model */ + struct kvm_ioapic *vioapic[KVM_MAX_EIOHUB_NUM]; + struct kvm_ioepic *ioepic[KVM_MAX_EIOHUB_NUM]; + int vapics_in_nmi_mode; + struct page *epic_pages; /* HW CEPIC support */ + struct list_head assigned_dev_head; + struct iommu_domain *iommu_domain; + struct irq_remap_table *irt; + unsigned long irq_sources_bitmap; + struct kvm_nbsr *nbsr; + struct kvm_lt *lt[KVM_MAX_EIOHUB_NUM]; + struct kvm_spmc *spmc[KVM_MAX_EIOHUB_NUM]; + + /* boot-time spinlocks manage */ + struct hlist_head boot_spinlock_hash[BOOT_SPINLOCK_HASH_SIZE]; + boot_spinlock_unlocked_t boot_spinunlocked_list[ + BOOT_SPINUNLOCKED_LIST_SIZE]; + struct list_head boot_spinunlocked_head; + struct list_head boot_spinunlocked_free; + struct list_head boot_spinunlocked_wait; + raw_spinlock_t boot_spinlock_hash_lock; + bool boot_spinlock_hash_disable; + +/* TODO memory leak!!! */ + /* spinlocks manage */ + struct hlist_head spinlock_hash[SPINLOCK_HASH_SIZE]; + spinlock_unlocked_t spinunlocked_list[SPINUNLOCKED_LIST_SIZE]; + struct list_head spinunlocked_head; + struct list_head spinunlocked_free; + struct list_head spinunlocked_wait; + raw_spinlock_t spinlock_hash_lock; + bool spinlock_hash_disable; + + /* VIRQ manage */ + raw_spinlock_t virq_lock; + kvm_guest_virq_t guest_virq[KVM_MAX_NR_VIRQS]; + int max_irq_no; /* max number of IRQ (from 0) */ +#ifdef CONFIG_SMP + /* guest CSD lock wait management on host */ + raw_spinlock_t csd_spinlock; + struct list_head csd_lock_wait_head; /* head of list of waiters */ + struct list_head csd_lock_free_head; /* head of list of free */ + /* structures */ + csd_lock_waiter_t csd_lock_free_list[KVM_MAX_CSD_LOCK_FREE_NUM]; +#endif /* CONFIG_SMP */ + + /* reads protected by irq_srcu, writes by irq_lock */ + struct hlist_head irq_ack_notifier_list; + + struct hlist_head mask_notifier_list; + + bool halted; /* VM is halted */ + /* sign of reboot VM, true - reboot */ + bool reboot; + + /* lock to update num_sclkr_run and common sh_sclkm3 + * for all vcpu-s of the guest */ + raw_spinlock_t sh_sclkr_lock; + int num_sclkr_run; + s64 sh_sclkm3; + unsigned int num_numa_nodes; + unsigned int max_nr_node_cpu; + + /* CEPIC timer frequency (Hz) */ + unsigned long cepic_freq; + + /* Multiplier for watchdog timer prescaler (allows to slow down + * its frequency) */ + unsigned long wd_prescaler_mult; + + /* Directly map legacy VGA area (0xa0000-0xbffff) to guest */ + bool legacy_vga_passthrough; +}; + +#ifdef CONFIG_KVM_ASYNC_PF + +/* Async page fault event descriptor */ +struct kvm_arch_async_pf { + u32 apf_id; /* Unique identifier of async page fault event */ +}; + +#endif /* CONFIG_KVM_ASYNC_PF */ + +#define arch_to_vcpu(arch_vcpu) container_of(arch_vcpu, struct kvm_vcpu, arch) + +/* + * KVM arch-dependent flags + */ +#define KVMF_PARAVIRT_GUEST 0 /* guest kernel is paravirtualized */ + /* and has shadow image address */ +#define KVMF_VCPU_STARTED 1 /* VCPUs (one or more) is started */ + /* VM real active */ +#define KVMF_IN_SHOW_STATE 8 /* show state of KVM (print all */ + /* stacks) is in progress */ +#define KVMF_NATIVE_KERNEL 32 /* guest is running native */ + /* e2k linux kernel */ +#define KVMF_PARAVIRT_KERNEL 33 /* guest is running paravirtualized */ + /* e2k linux kernel */ +#define KVMF_LINTEL 40 /* guest is running LIntel */ +#define KVMF_PARAVIRT_GUEST_MASK (1UL << KVMF_PARAVIRT_GUEST) +#define KVMF_VCPU_STARTED_MASK (1UL << KVMF_VCPU_STARTED) +#define KVMF_IN_SHOW_STATE_MASK (1UL << KVMF_IN_SHOW_STATE) +#define KVMF_NATIVE_KERNEL_MASK (1UL << KVMF_NATIVE_KERNEL) +#define KVMF_PARAVIRT_KERNEL_MASK (1UL << KVMF_PARAVIRT_KERNEL) +#define KVMF_LINTEL_MASK (1UL << KVMF_LINTEL) + +#define set_kvm_mode_flag(kvm, flag) \ +({ \ + set_bit(flag, (unsigned long *)&(kvm)->arch.flags); \ +}) + +#define clear_kvm_mode_flag(kvm, flag) \ +({ \ + clear_bit(flag, (unsigned long *)&(kvm)->arch.flags); \ +}) + +#define test_and_set_kvm_mode_flag(kvm, flag) \ +({ \ + test_and_set_bit(flag, (unsigned long *)&(kvm)->arch.flags); \ +}) + +#define test_and_clear_kvm_mode_flag(kvm, flag) \ +({ \ + test_and_clear_bit(flag, (unsigned long *)&(kvm)->arch.flags); \ +}) + +#define test_kvm_mode_flag(kvm, flag) \ +({ \ + test_bit(flag, (unsigned long *)&(kvm)->arch.flags); \ +}) + +#define kvm_clear_vcpu(kvm, vcpu_no) \ +({ \ + (kvm)->vcpus[vcpu_no] = NULL; \ + smp_wmb(); \ +}) + +struct kvm_e2k_info { + struct module *module; +}; + +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) +{ + /* nothing to do */ +} + +/* + * Shadow page tables root flags to get/set/update/alloc/free + */ +#define U_ROOT_PT_BIT 0 /* user shadow PT root */ +#define OS_ROOT_PT_BIT 1 /* kernel (OS) PT root */ +#define GP_ROOT_PT_BIT 2 /* hypervisor PT root to translate */ + /* guest physical addresses */ +#define SEP_VIRT_ROOT_PT_BIT 3 /* separate virtual spaces mode */ +#define DONT_SYNC_ROOT_PT_BIT 4 /* do not sync shadow PT root */ + +#define U_ROOT_PT_FLAG (1U << U_ROOT_PT_BIT) +#define OS_ROOT_PT_FLAG (1U << OS_ROOT_PT_BIT) +#define GP_ROOT_PT_FLAG (1U << GP_ROOT_PT_BIT) +#define SEP_VIRT_ROOT_PT_FLAG (1U << SEP_VIRT_ROOT_PT_BIT) +#define DONT_SYNC_ROOT_PT_FLAG (1U << DONT_SYNC_ROOT_PT_BIT) + +#define KVM_ARCH_WANT_MMU_NOTIFIER + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +int kvm_unmap_hva_range(struct kvm *kvm, + unsigned long start, unsigned long end); +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); +#endif /* KVM_ARCH_WANT_MMU_NOTIFIER */ + +extern int kvm_mmu_load(struct kvm_vcpu *vcpu, unsigned flags); +extern void kvm_mmu_unload(struct kvm_vcpu *vcpu, unsigned flags); + +extern void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); + +extern int kvm_wake_up_vcpu_host(struct kvm_vcpu *vcpu, int wait); +extern struct kvm_vcpu *kvm_get_vcpu_on_id(struct kvm *kvm, int vcpu_id); +extern struct kvm_vcpu *kvm_get_vcpu_on_hard_cpu_id(struct kvm *kvm, + int hard_cpu_id); +extern bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); + +extern void kvm_make_scan_ioapic_request(struct kvm *kvm); + +#ifdef CONFIG_KVM_ASYNC_PF +extern void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); +extern void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); +#endif /* CONFIG_KVM_ASYNC_PF */ + +#if defined(CONFIG_KVM_HW_VIRTUALIZATION) && \ + !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is hypervisor or host with virtualization support */ +extern void kvm_hv_epic_load(struct kvm_vcpu *vcpu); +extern void kvm_epic_invalidate_dat(struct kvm_vcpu *vcpu); +extern void kvm_epic_enable_int(void); +extern void kvm_epic_timer_start(void); +extern void kvm_epic_timer_stop(void); +extern void kvm_deliver_cepic_epic_interrupt(void); +extern void kvm_epic_check_int_status(struct kvm_vcpu_arch *vcpu); +extern void kvm_init_cepic_idle_timer(struct kvm_vcpu *vcpu); +extern void kvm_epic_start_idle_timer(struct kvm_vcpu *vcpu); +extern void kvm_epic_stop_idle_timer(struct kvm_vcpu *vcpu); +#else /* ! CONFIG_KVM_HW_VIRTUALIZATION || CONFIG_KVM_GUEST_KERNEL */ +/* it is host without virtualization support */ +/* or native paravirtualized guest */ +static inline void kvm_hv_epic_load(struct kvm_vcpu *vcpu) +{ + /* nothing to do */ +} + +static inline void kvm_epic_invalidate_dat(struct kvm_vcpu *vcpu) +{ + /* nothing to do */ +} + +static inline void kvm_epic_enable_int(void) +{ + /* nothing to do */ +} + +static inline void kvm_epic_timer_start(void) +{ + /* nothing to do */ +} + +static inline void kvm_epic_timer_stop(void) +{ + /* nothing to do */ +} + +static inline void kvm_deliver_cepic_epic_interrupt(void) +{ + /* nothing to do */ +} + +static inline void kvm_epic_check_int_status(struct kvm_vcpu_arch *vcpu) +{ + /* nothing to do */ +} + +static inline void kvm_init_cepic_idle_timer(struct kvm_vcpu *vcpu) +{ + /* nothing to do */ +} + +static inline void kvm_epic_start_idle_timer(struct kvm_vcpu *vcpu) +{ + /* nothing to do */ +} + +static inline void kvm_epic_stop_idle_timer(struct kvm_vcpu *vcpu) +{ + /* nothing to do */ +} +#endif /* CONFIG_KVM_HW_VIRTUALIZATION && !CONFIG_KVM_GUEST_KERNEL */ + +extern struct work_struct kvm_dump_stacks; +extern void wait_for_print_all_guest_stacks(struct work_struct *work); + +#endif /* _ASM_E2K_KVM_HOST_H */ diff --git a/arch/e2k/include/asm/l-iommu.h b/arch/e2k/include/asm/l-iommu.h new file mode 100644 index 0000000..6d54d0a --- /dev/null +++ b/arch/e2k/include/asm/l-iommu.h @@ -0,0 +1,157 @@ +#ifndef _E2K_IOMMU_H +#define _E2K_IOMMU_H + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + + +#ifdef CONFIG_PM_SLEEP +extern void l_iommu_stop_all(void); +#endif + +#define L_IOMMU_CTRL SIC_iommu_ctrl +#define L_IOMMU_FLUSH_ALL SIC_iommu_flush +#define L_IOMMU_FLUSH_ADDR SIC_iommu_flushP +#define L_IOMMU_ERROR SIC_iommu_err +#define L_IOMMU_ERROR1 SIC_iommu_err1 + +#define IO_PAGE_SHIFT 12 + +#define IOMMU_TABLES_NR 2 +#define IOMMU_LOW_TABLE 0 +#define IOMMU_HIGH_TABLE 1 + +typedef struct { unsigned iopte; } iopte_t; + +#define iopte_val(x) ((x).iopte) + + +#define MIN_IOMMU_WINSIZE (4*1024*1024*1024UL) +#define MAX_IOMMU_WINSIZE (512*1024*1024*1024UL) +#define DFLT_IOMMU_WINSIZE (4*1024*1024*1024UL) + +#define IOPTE_PAGE_MASK 0xfffffff0 +#define IOPTE_CACHE 0x00000004 /* Cached */ +#define IOPTE_STP_PREF_IOPTE 0x00000004 /* stop prefetch iopte */ +#define IOPTE_WRITE 0x00000001 /* Writeable */ +#define IOPTE_VALID 0x00000002 /* IOPTE is valid */ + +#define pa_to_iopte(addr) (((unsigned long)(addr) >> 8) & IOPTE_PAGE_MASK) +#define iopte_to_pa(iopte) (((unsigned long)(iopte) & IOPTE_PAGE_MASK) << 8) + + +#define addr_to_flush(__a) ((__a) >> IO_PAGE_SHIFT) + +static inline void l_iommu_write(unsigned node, u32 val, unsigned long addr) +{ + sic_write_node_iolink_nbsr_reg(node, 0, addr, val); +} + +static inline u32 l_iommu_read(unsigned node, unsigned long addr) +{ + return sic_read_node_iolink_nbsr_reg(node, 0, addr); +} + +static inline void l_iommu_set_ba(unsigned node, unsigned long *ba) +{ + l_iommu_write(node, pa_to_iopte(ba[IOMMU_LOW_TABLE]), SIC_iommu_ba_lo); + l_iommu_write(node, pa_to_iopte(ba[IOMMU_HIGH_TABLE]), SIC_iommu_ba_hi); +} + +#define l_prefetch_iopte_supported l_prefetch_iopte_supported +static inline int l_prefetch_iopte_supported(void) +{ + return (int)machine.native_iset_ver >= ELBRUS_8C2_ISET; +} + +static inline void l_prefetch_iopte(iopte_t *iopte, int prefetch) +{ + if (prefetch) + iopte_val(iopte[0]) &= ~IOPTE_STP_PREF_IOPTE; + else + iopte_val(iopte[0]) |= IOPTE_STP_PREF_IOPTE; +} + +static inline void *l_iommu_map_table(void *va, unsigned long size) +{ + phys_addr_t start = __pa(va); + pgprot_t prot = pgprot_writecombine(PAGE_KERNEL); + struct page **pages; + phys_addr_t page_start; + unsigned int page_count; + unsigned int i; + void *vaddr; + + if (!cpu_has(CPU_HWBUG_IOMMU)) + return va; + + page_start = start - offset_in_page(start); + page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); + + pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); + if (!pages) + return NULL; + + for (i = 0; i < page_count; i++) { + phys_addr_t addr = page_start + i * PAGE_SIZE; + pages[i] = pfn_to_page(addr >> PAGE_SHIFT); + } + + vaddr = vmap(pages, page_count, VM_MAP, prot); + kfree(pages); + + return vaddr; +} + +static inline void *l_iommu_unmap_table(void *va) +{ + void *p; + if (!cpu_has(CPU_HWBUG_IOMMU)) + return va; + p = page_address(vmalloc_to_page(va)) + offset_in_page(va); + vunmap(va); + return p; +} + +static inline int l_iommu_get_table(unsigned long iova) +{ + return iova & (~0UL << 32) ? IOMMU_HIGH_TABLE : IOMMU_LOW_TABLE; +} + +#define boot_l_iommu_supported() BOOT_HAS_MACHINE_L_IOMMU +#define l_iommu_supported() HAS_MACHINE_L_IOMMU + +extern int l_iommu_no_numa_bug; +extern int l_iommu_force_numa_bug_on; +extern unsigned long l_iommu_win_sz; + +#define l_iommu_has_numa_bug() (l_iommu_force_numa_bug_on || \ + (nr_online_nodes > 1 && l_iommu_no_numa_bug == 0 && \ + cpu_has(CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE))) + +#define L_PGSIZE_BITMAP SZ_4K + +/* software MMU support */ + +#define E2K_SWIOTLB_DEFAULT_SIZE (64 * 1024 * 1024) +#define E2K_SWIOTLB_MIN_SIZE (8 * 1024 * 1024) +#define L_SWIOTLB_DEFAULT_SIZE E2K_SWIOTLB_DEFAULT_SIZE +#define L_SWIOTLB_MIN_SIZE E2K_SWIOTLB_MIN_SIZE + +#define SWIOTLB_POOL_DEFAULT_NUM 64 +#define SWIOTLB_ISA_POOL_DEFAULT_NUM 16 +#define SWIOTLB_POOL_DEFAULT_SIZE (SWIOTLB_POOL_DEFAULT_NUM * PAGE_SIZE) +#define SWIOTLB_ISA_POOL_DEFAULT_SIZE \ + (SWIOTLB_ISA_POOL_DEFAULT_NUM * PAGE_SIZE) + +#endif /* !(_E2K_IOMMU_H) */ diff --git a/arch/e2k/include/asm/l-mcmonitor.h b/arch/e2k/include/asm/l-mcmonitor.h new file mode 100644 index 0000000..360ae88 --- /dev/null +++ b/arch/e2k/include/asm/l-mcmonitor.h @@ -0,0 +1,94 @@ +#ifndef _E2K_L_MCMONITOR_H_ +#define _E2K_L_MCMONITOR_H_ + +#include +#include + +typedef e2k_mc_ecc_struct_t l_mc_ecc_struct_t; + +static inline u32 l_mc_get_error_cnt(l_mc_ecc_struct_t *ecc, int node, + int nr) +{ + ecc->E2K_MC_ECC_reg = sic_get_mc_ecc(node, nr); + return ecc->E2K_MC_ECC_secnt; +} + +static inline char *l_mc_get_error_str(l_mc_ecc_struct_t *ecc, int nr, + char *error_msg, int error_msg_len) +{ + snprintf(error_msg, error_msg_len, + "MC%d_ECC=0x%x (ee=%d dmode=%d of=%d ue=%d secnt=%d)", + nr, + ecc->E2K_MC_ECC_reg, + ecc->E2K_MC_ECC_ee, + ecc->E2K_MC_ECC_dmode, + ecc->E2K_MC_ECC_of, + ecc->E2K_MC_ECC_ue, ecc->E2K_MC_ECC_secnt); + return error_msg; +} + +static inline bool l_mcmonitor_eec_enabled(void) +{ + l_mc_ecc_struct_t ecc; + ecc.E2K_MC_ECC_reg = sic_get_mc_ecc(0, 0); + return ecc.E2K_MC_ECC_ee; +} + + +#define l_mcmonitor_supported() HAS_MACHINE_L_SIC + +/* CC handles 32 bytes at a time */ +#define L_MC_ECC_WORDS_NR 4 +#define L_MCMONITOR_TEST_SIZE (256 * L_MC_ECC_WORDS_NR) + +static inline void local_set_mc_ecc(void *node_nbsr, int num, unsigned int reg_value) +{ + nbsr_write(reg_value, node_nbsr + SIC_mc0_ecc + num * 0x40); +} + +static inline void l_mcmonitor_fill_data(u64 *a, bool make_error) +{ + int i, mc = SIC_MC_COUNT; + int sz = L_MCMONITOR_TEST_SIZE / L_MC_ECC_WORDS_NR / sizeof(*a); + e2k_mc_ecc_struct_t mc_ecc[SIC_MAX_MC_COUNT]; + a = (void *)__pa(a); + + void *node_nbsr = sic_get_node_nbsr_base(0); + + for (i = 0; i < mc; i++) + mc_ecc[i].E2K_MC_ECC_reg = sic_get_mc_ecc(0, i); + + for (i = 0; i < mc; i++) { + l_mc_ecc_struct_t e = mc_ecc[i]; + e.E2K_MC_ECC_dmode = 1; + local_set_mc_ecc(node_nbsr, i, e.E2K_MC_ECC_reg); + } + mb(); + + for (i = 0; i < sz; i++, a += L_MC_ECC_WORDS_NR) { + int j; + u64 d = 0; + for (j = 0; j < L_MC_ECC_WORDS_NR; j++) { + u64 v = d; + if (j == 0 && make_error) + v |= (1UL << (i % 64)); + boot_writeq(v, a + j); + } + mb(); + } + + for (i = 0; i < mc; i++) + local_set_mc_ecc(node_nbsr, i, mc_ecc[i].E2K_MC_ECC_reg); + mb(); +} + +static inline int l_mcmonitor_cmp(u64 *a) +{ + int i; + for (i = 0; i < L_MCMONITOR_TEST_SIZE / sizeof(*a); i++) { + if (a[i] != 0) + return -EFAULT; + } + return 0; +} +#endif /* _E2K_L_MCMONITOR_H_ */ diff --git a/arch/e2k/include/asm/l_ide.h b/arch/e2k/include/asm/l_ide.h new file mode 100644 index 0000000..bc61de7 --- /dev/null +++ b/arch/e2k/include/asm/l_ide.h @@ -0,0 +1,6 @@ +#ifndef _ARCH_IDE_H_ +#define _ARCH_IDE_H_ + +#include + +#endif /*_ARCH_IDE_H_*/ diff --git a/arch/e2k/include/asm/l_pmc.h b/arch/e2k/include/asm/l_pmc.h new file mode 100644 index 0000000..4e1e78b --- /dev/null +++ b/arch/e2k/include/asm/l_pmc.h @@ -0,0 +1,18 @@ +#pragma once + +/* Available working frequencies (in kHz) */ +#define PMC_L_FREQUENCY_1 1000000 +#define PMC_L_FREQUENCY_2 400000 +#define PMC_L_FREQUENCY_3 200000 +#define PMC_L_FREQUENCY_4 143000 + +/* PMC registers */ +#define PMC_L_COVFID_STATUS_REG 0x0 +#define PMC_L_P_STATE_CNTRL_REG 0x8 +#define PMC_L_P_STATE_STATUS_REG 0xc +#define PMC_L_P_STATE_VALUE_0_REG 0x10 +#define PMC_L_P_STATE_VALUE_1_REG 0x14 +#define PMC_L_P_STATE_VALUE_2_REG 0x18 +#define PMC_L_P_STATE_VALUE_3_REG 0x1c + +#include diff --git a/arch/e2k/include/asm/l_spmc.h b/arch/e2k/include/asm/l_spmc.h new file mode 100644 index 0000000..4434611 --- /dev/null +++ b/arch/e2k/include/asm/l_spmc.h @@ -0,0 +1,6 @@ +#ifndef _ARCH_SPMC_H_ +#define _ARCH_SPMC_H_ + +#include + +#endif /*_ARCH_SPMC_H_*/ diff --git a/arch/e2k/include/asm/l_timer.h b/arch/e2k/include/asm/l_timer.h new file mode 100644 index 0000000..750e237 --- /dev/null +++ b/arch/e2k/include/asm/l_timer.h @@ -0,0 +1,14 @@ +#ifndef _ASM_L_TIMER_H +#define _ASM_L_TIMER_H + +#ifdef __KERNEL__ + +#include +#include + +#define L_TIMER_IS_ALLOWED() (HAS_MACHINE_E2K_IOHUB || IS_HV_GM()) + +#include + +#endif /* __KERNEL__ */ +#endif /* _ASM_L_TIMER_H */ diff --git a/arch/e2k/include/asm/l_timer_regs.h b/arch/e2k/include/asm/l_timer_regs.h new file mode 100644 index 0000000..4c0f8cd --- /dev/null +++ b/arch/e2k/include/asm/l_timer_regs.h @@ -0,0 +1,107 @@ +#ifndef _L_ASM_L_TIMER_REGS_H +#define _L_ASM_L_TIMER_REGS_H + +#include + +/* + * Elbrus System timer Registers (litlle endian) + */ + +typedef struct counter_limit_fields { + u32 unused : 9; /* [8:0] */ + u32 c_l : 22; /* [30:9] */ + u32 l : 1; /* [31] */ +} counter_limit_fields_t; +typedef union counter_limit { + u32 word; + counter_limit_fields_t fields; +} counter_limit_t; +typedef struct counter_st_v_fields { + u32 unused : 9; /* [8:0] */ + u32 c_st_v : 22; /* [30:9] */ + u32 l : 1; /* [31] */ +} counter_st_v_fields_t; +typedef union counter_st_v { + u32 word; + counter_st_v_fields_t fields; +} counter_st_v_t; +typedef struct counter_fields { + u32 unused : 9; /* [8:0] */ + u32 c : 22; /* [30:9] */ + u32 l : 1; /* [31] */ +} counter_fields_t; +typedef union counter { + u32 word; + counter_fields_t fields; +} counter_t; +typedef struct counter_control_fields { + u32 s_s : 1; /* [0] */ + u32 inv_l : 1; /* [1] */ + u32 l_ini : 1; /* [2] */ + u32 unused : 29; /* [31:3] */ +} counter_control_fields_t; +typedef union counter_control { + u32 word; + counter_control_fields_t fields; +} counter_control_t; +typedef struct wd_counter_l_fields { + u32 wd_c : 32; /* [31:0] */ +} wd_counter_l_fields_t; +typedef union wd_counter_l { + u32 word; + wd_counter_l_fields_t fields; +} wd_counter_l_t; +typedef struct wd_counter_h_fields { + u32 wd_c : 32; /* [31:0] */ +} wd_counter_h_fields_t; +typedef union wd_counter_h { + u32 word; + wd_counter_h_fields_t fields; +} wd_counter_h_t; +typedef struct wd_limit_fields { + u32 wd_l : 32; /* [31:0] */ +} wd_limit_fields_t; +typedef union wd_limit { + u32 word; + wd_limit_fields_t fields; +} wd_limit_t; +typedef struct power_counter_l_fields { + u32 pw_c : 32; /* [31:0] */ +} power_counter_l_fields_t; +typedef union power_counter_l { + u32 word; + power_counter_l_fields_t fields; +} power_counter_l_t; +typedef struct power_counter_h_fields { + u32 pw_c : 32; /* [31:0] */ +} power_counter_h_fields_t; +typedef union power_counter_h { + u32 word; + power_counter_h_fields_t fields; +} power_counter_h_t; +typedef struct wd_control_fields { + u32 w_m : 1; /* [0] */ + u32 w_out_e : 1; /* [1] */ + u32 w_evn : 1; /* [2] */ + u32 unused : 29; /* [31:3] */ +} wd_control_fields_t; +typedef union wd_control { + u32 word; + wd_control_fields_t fields; +} wd_control_t; +typedef struct reset_counter_l_fields { + u32 rst : 32; /* [31:0] */ +} reset_counter_l_fields_t; +typedef union reset_counter_l { + u32 word; + reset_counter_l_fields_t fields; +} reset_counter_l_t; +typedef struct reset_counter_h_fields { + u32 rst : 32; /* [31:0] */ +} reset_counter_h_fields_t; +typedef union reset_counter_h { + u32 word; + reset_counter_h_fields_t fields; +} reset_counter_h_t; + +#endif /* _L_ASM_L_TIMER_REGS_H */ diff --git a/arch/e2k/include/asm/linkage.h b/arch/e2k/include/asm/linkage.h new file mode 100644 index 0000000..186440a --- /dev/null +++ b/arch/e2k/include/asm/linkage.h @@ -0,0 +1,7 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#define __ALIGN .align 0 +#define __ALIGN_STR ".align 0" + +#endif diff --git a/arch/e2k/include/asm/linux_logo.h b/arch/e2k/include/asm/linux_logo.h new file mode 100644 index 0000000..8031547 --- /dev/null +++ b/arch/e2k/include/asm/linux_logo.h @@ -0,0 +1,48 @@ +/* $Id: linux_logo.h,v 1.1 2001/05/16 13:33:12 anonymous Exp $ + * include/asm-e2k/linux_logo.h: This is a linux logo + * to be displayed on boot. + * + * Copyright (C) 1996 Larry Ewing (lewing@isc.tamu.edu) + * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * + * You can put anything here, but: + * LINUX_LOGO_COLORS has to be less than 224 + * image size has to be 80x80 + * values have to start from 0x20 + * (i.e. RGB(linux_logo_red[0], + * linux_logo_green[0], + * linux_logo_blue[0]) is color 0x20) + * BW image has to be 80x80 as well, with MS bit + * on the left + * Serial_console ascii image can be any size, + * but should contain %s to display the version + */ + +#include +#include + +#define linux_logo_banner "Linux/E2K version " UTS_RELEASE + +#define LINUX_LOGO_COLORS 214 + +#ifdef INCLUDE_LINUX_LOGO_DATA + +#define INCLUDE_LINUX_LOGOBW +#define INCLUDE_LINUX_LOGO16 + +#include + +#else + +/* prototypes only */ +extern unsigned char linux_logo_red[]; +extern unsigned char linux_logo_green[]; +extern unsigned char linux_logo_blue[]; +extern unsigned char linux_logo[]; +extern unsigned char linux_logo_bw[]; +extern unsigned char linux_logo16_red[]; +extern unsigned char linux_logo16_green[]; +extern unsigned char linux_logo16_blue[]; +extern unsigned char linux_logo16[]; + +#endif diff --git a/arch/e2k/include/asm/local.h b/arch/e2k/include/asm/local.h new file mode 100644 index 0000000..003157c --- /dev/null +++ b/arch/e2k/include/asm/local.h @@ -0,0 +1,175 @@ +#pragma once + +#include +#include +#include + +/* + * A signed long type for operations which are atomic for a single CPU. + * Usually used in combination with per-cpu variables. + */ + +/* Use relaxed atomics if they are available */ +#if CONFIG_CPU_ISET >= 5 + +# include + +/* Default implementation uses READ_ONCE and WRITE_ONCE + * which are too slow on e2k because of "volatile" */ +# undef __local_inc +# undef __local_dec +# undef __local_add +# undef __local_sub + +#else + +typedef struct { long counter; } __attribute__ ((aligned)) local_t; + +# define LOCAL_INIT(i) { (i) } + +# define local_read(l) ((l)->counter) +# define local_set(l, i) (((l)->counter) = (i)) + +/* + * local_add - add long to local variable + * @i: long value to add + * @l: pointer of type local_t + */ +static inline void local_add(long i, local_t *l) +{ + unsigned long flags; + + raw_all_irq_save(flags); + l->counter += i; + raw_all_irq_restore(flags); +} + +/* + * local_sub - sub long from local variable + * @i: long value to sub + * @l: pointer of type local_t + */ +static inline void local_sub(long i, local_t *l) +{ + unsigned long flags; + + raw_all_irq_save(flags); + l->counter -= i; + raw_all_irq_restore(flags); +} + +# define local_inc(l) local_add(1,l) +# define local_dec(l) local_sub(1,l) + +/* + * local_add_return - add long to local variable and return the result + * @i: long value to add + * @l: pointer of type local_t + */ +static inline long local_add_return(long i, local_t *l) +{ + unsigned long flags; + register long result; + + raw_all_irq_save(flags); + l->counter += i; + result = l->counter; + raw_all_irq_restore(flags); + + return result; +} + +/* + * local_sub_return - sub long from local variable and return the result + * @i: long value to sub + * @l: pointer of type local_t + */ +static inline long local_sub_return(long i, local_t *l) +{ + unsigned long flags; + register long result; + + raw_all_irq_save(flags); + l->counter -= i; + result = l->counter; + raw_all_irq_restore(flags); + + return result; +} + +# define local_add_negative(i, l) (local_add_return(i,l) < 0) +# define local_sub_and_test(i, l) (local_sub_return(i,l) == 0) +# define local_inc_and_test(l) (local_add_return(1,l) == 0) +# define local_dec_and_test(l) (local_sub_return(1,l) == 0) + +static inline long local_cmpxchg(local_t *l, long o, long n) +{ + unsigned long flags; + register long result; + + raw_all_irq_save(flags); + result = l->counter; + if (result == o) + l->counter = n; + raw_all_irq_restore(flags); + + return result; +} + +static inline long local_xchg(local_t *l, long n) +{ + unsigned long flags; + register long result; + + raw_all_irq_save(flags); + result = l->counter; + l->counter = n; + raw_all_irq_restore(flags); + + return result; +} + +/** + * local_add_unless - add unless the number is a given value + * @l: pointer of type local_t + * @a: the amount to add to l... + * @u: ...unless l is equal to u. + * + * Atomically adds @a to @l, so long as it was not @u. + * Returns non-zero if @l was not @u, and zero otherwise. + */ +static inline int local_add_unless(local_t *l, long a, long u) +{ + unsigned long flags; + register long result; + + raw_all_irq_save(flags); + if (l->counter == u) { + result = 0; + } else { + l->counter += a; + result = 1; + } + raw_all_irq_restore(flags); + + return result; +} + +# define local_inc_return(l) local_add_return(1, l) +# define local_dec_return(l) local_sub_return(1, l) + +# define local_inc_not_zero(l) local_add_unless((l), 1, 0) + +#endif + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. */ +#define __local_inc(l) ((l)->a.counter++) +#define __local_dec(l) ((l)->a.counter++) +#define __local_add(i, l) ((l)->a.counter += (i)) +#define __local_sub(i, l) ((l)->a.counter -= (i)) + +#ifdef CONFIG_HAVE_FTRACE_NMI_ENTER +# warning For the sake of atomicity nmie's must be disabled here along with ie's +#endif + diff --git a/arch/e2k/include/asm/machdep.h b/arch/e2k/include/asm/machdep.h new file mode 100644 index 0000000..9e532e8 --- /dev/null +++ b/arch/e2k/include/asm/machdep.h @@ -0,0 +1,685 @@ +#ifndef _E2K_MACHDEP_H_ +#define _E2K_MACHDEP_H_ + +#include +#include + +#include + +#include +#include +#include +#include + +#ifdef __KERNEL__ + +enum { + CPU_HWBUG_LARGE_PAGES, + CPU_HWBUG_LAPIC_TIMER, + CPU_HWBUG_PIO_READS, + CPU_HWBUG_ATOMIC, + CPU_HWBUG_CLW, + CPU_HWBUG_PAGE_A, + CPU_HWBUG_SPURIOUS_EXC_ILL_INSTR_ADDR, + CPU_HWBUG_UNALIGNED_LOADS, + CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE, + CPU_HWBUG_DMA_AT_APIC_ADDR, + CPU_HWBUG_KERNEL_DATA_MONITOR, + CPU_HWBUG_WRITE_MEMORY_BARRIER, + CPU_HWBUG_BAD_RESET, + CPU_HWBUG_BREAKPOINT_INSTR, + CPU_HWBUG_E8C_WATCHDOG, + CPU_HWBUG_IOMMU, + CPU_HWBUG_WC_DAM, + CPU_HWBUG_TRAP_CELLAR_S_F, + CPU_HWBUG_SS, + CPU_HWBUG_AAU_AALDV, + CPU_HWBUG_LEVEL_EOI, + CPU_HWBUG_FALSE_SS, + CPU_HWBUG_SPURIOUS_EXC_DATA_DEBUG, + CPU_HWBUG_TLB_FLUSH_L1D, + CPU_HWBUG_GUEST_ASYNC_PM, + CPU_HWBUG_L1I_STOPS_WORKING, + CPU_HWBUG_CLW_STALE_L1_ENTRY, + CPU_HWBUG_C3_WAIT_MA_C, + CPU_HWBUG_VIRT_SCLKM3_INTC, + CPU_FEAT_WC_PCI_PREFETCH, + CPU_FEAT_FLUSH_DC_IC, + CPU_FEAT_EPIC, + CPU_FEAT_TRAP_V5, + CPU_FEAT_TRAP_V6, + CPU_FEAT_QPREG, + CPU_FEAT_ISET_V3, + CPU_FEAT_ISET_V5, + CPU_FEAT_ISET_V6, + CPU_HWBUG_E16C_SLEEP, + NR_CPU_FEATURES +}; + +struct cpuinfo_e2k; +struct pt_regs; +struct seq_file; +struct global_regs; +struct kernel_gregs; +struct local_gregs; +struct e2k_aau_context; +struct kvm_vcpu_arch; +struct e2k_dimtp; +struct thread_info; + +#include /* virtualization support */ + +typedef struct machdep { + int native_id; /* machine Id */ + int native_rev; /* cpu revision */ + e2k_iset_ver_t native_iset_ver; /* Instruction set version */ + bool cmdline_iset_ver; /* iset specified in cmdline */ + bool mmu_pt_v6; /* MMU is setting up to use */ + /* new page table structures */ + bool mmu_separate_pt; /* MMU was set to use */ + /* separate PTs for kernel */ + /* and users */ + bool L3_enable; /* cache L3 is enable */ + bool gmi; /* is hardware virtualized */ + /* guest VM */ + e2k_addr_t x86_io_area_base; + e2k_addr_t x86_io_area_size; + u8 max_nr_node_cpus; + u8 nr_node_cpus; + u8 node_iolinks; + e2k_addr_t pcicfg_area_phys_base; + e2k_size_t pcicfg_area_size; + e2k_addr_t nsr_area_phys_base; + e2k_size_t nbsr_area_offset; + e2k_size_t nbsr_area_size; + e2k_addr_t copsr_area_phys_base; + e2k_size_t copsr_area_size; + u8 mlt_size; + u8 tlb_lines_bits_num; + u64 tlb_addr_line_num; + u64 tlb_addr_line_num2; + u8 tlb_addr_line_num_shift2; + u8 tlb_addr_set_num; + u8 tlb_addr_set_num_shift; + e2k_size_t sic_mc_size; + u8 sic_mc_count; + u32 sic_mc1_ecc; + u32 sic_io_str1; + u32 clock_tick_rate; + + unsigned long cpu_features[(NR_CPU_FEATURES + 63) / 64]; + + e2k_addr_t (*get_nsr_area_phys_base)(void); + void (*setup_apic_vector_handlers)(void); +#ifdef CONFIG_SMP + void (*clk_off)(void); + void (*clk_on)(int); +#endif + void (*C1_enter)(void); + void (*C3_enter)(void); + + /* Often used pointers are placed close to each other */ + + void (*save_kernel_gregs)(struct kernel_gregs *); + void (*save_gregs)(struct global_regs *); + void (*save_local_gregs)(struct local_gregs *); + void (*save_gregs_dirty_bgr)(struct global_regs *); + void (*save_gregs_on_mask)(struct global_regs *, bool dirty_bgr, + unsigned long not_save_gregs_mask); + void (*restore_gregs)(const struct global_regs *); + void (*restore_local_gregs)(const struct local_gregs *); + void (*restore_gregs_on_mask)(struct global_regs *, bool dirty_bgr, + unsigned long not_restore_gregs_mask); + + void (*save_dimtp)(e2k_dimtp_t *); + void (*restore_dimtp)(const e2k_dimtp_t *); + + void (*save_kvm_context)(struct kvm_vcpu_arch *); + void (*restore_kvm_context)(const struct kvm_vcpu_arch *); + + void (*calculate_aau_aaldis_aaldas)(const struct pt_regs *regs, + struct thread_info *ti, struct e2k_aau_context *context); + void (*do_aau_fault)(int aa_field, struct pt_regs *regs); + void (*save_aaldi)(u64 *aaldis); + void (*get_aau_context)(struct e2k_aau_context *); + + unsigned long (*rrd)(int reg); + void (*rwd)(int reg, unsigned long value); + unsigned long (*boot_rrd)(int reg); + void (*boot_rwd)(int reg, unsigned long value); + + u64 (*get_cu_hw1)(void); + void (*set_cu_hw1)(u64); + +#ifdef CONFIG_MLT_STORAGE + void (*invalidate_MLT)(void); + void (*get_and_invalidate_MLT_context)(e2k_mlt_t *mlt_state); +#endif + + void (*flushts)(void); + + void (*setup_arch)(void); + void (*setup_cpu_info)(struct cpuinfo_e2k *c); + int (*show_cpuinfo)(struct seq_file *m, void *v); + void (*init_IRQ)(void); + + int (*set_wallclock)(unsigned long nowtime); + unsigned long (*get_wallclock)(void); + + void (*restart)(char *cmd); + void (*power_off)(void); + void (*halt)(void); + void (*arch_reset)(char *cmd); + void (*arch_halt)(void); + + int (*get_irq_vector)(void); + + /* virtualization support: guest kernel and host/hypervisor */ + host_machdep_t host; /* host additional fields (used only by */ + /* host at arch/e2k/kvm/xxx) */ + guest_machdep_t guest; /* guest additional fields (used only by */ + /* guest at arch/e2k/kvm/guest/xxx) */ +} machdep_t; + + +/* + * When executing on pure guest kernel, guest_cpu will be set to + * 'machine.guest.id', i.e. to what hardware guest *thinks* it's + * being executed on. + */ +typedef void (*cpuhas_initcall_t)(int cpu, int revision, int iset_ver, + int guest_cpu, struct machdep *machine); +extern cpuhas_initcall_t __cpuhas_initcalls[], __cpuhas_initcalls_end[]; + +/* + * feature = + * if ('is_static') + * 'static_cond' checked at build time; + * else + * 'dynamic_cond' checked in runtime; + */ +#ifndef BUILD_CPUHAS_INITIALIZERS +# define CPUHAS(feat, is_static, static_cond, dynamic_cond) \ + static const char feat##_is_static = !!(is_static); \ + static const char feat##_is_set_statically = !!(static_cond); + +#else /* #ifdef BUILD_CPUHAS_INITIALIZERS */ +# include +# define CPUHAS(feat, _is_static, static_cond, dynamic_cond) \ + __init \ + static void feat##_initializer(const int cpu, const int revision, \ + const int iset_ver, const int guest_cpu, \ + struct machdep *const machine) { \ + bool is_static = (_is_static); \ + if (is_static && (static_cond) || !is_static && (dynamic_cond)) \ + set_bit(feat, (machine)->cpu_features); \ + } \ + static cpuhas_initcall_t __cpuhas_initcall_##feat __used \ + __section(".cpuhas_initcall") = &feat##_initializer; +#endif + + +/* Most of these bugs are not emulated on simulator but + * set them anyway to make kernel running on a simulator + * behave in the same way as on real hardware. */ + +/* #47176 - Large pages do not work. + * Workaround - do not use them. */ +CPUHAS(CPU_HWBUG_LARGE_PAGES, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL && revision < 1); +/* #56947 - lapic timer can lose interrupts. + * Workaround - do not use oneshot mode. */ +CPUHAS(CPU_HWBUG_LAPIC_TIMER, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL && revision < 1); +/* #69194 - PIO reads can hang processor. + * Workaround - serialize PIO reads on every CPU. */ +CPUHAS(CPU_HWBUG_PIO_READS, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) || + cpu == IDR_ES2_RU_MDL && revision <= 1); +/* #71610 - Atomic operations can be non-atomic + * Workaround - flush data cache line. + * This workaround increases the count of DCACHE flushes, + * Turmalin has hardware bug with flushes so don't use + * this workaround on it. */ +CPUHAS(CPU_HWBUG_ATOMIC, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL); +/* #58397, #76626 - CLW does not work. + * Workaround - do not use it. */ +CPUHAS(CPU_HWBUG_CLW, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S), + false, + cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) || + cpu == IDR_ES2_RU_MDL && revision <= 1 || + cpu == IDR_E2S_MDL && revision == 0); +/* #76626 - "Page accessed" bit in PTE does not work. + * Workaround - always set it. */ +CPUHAS(CPU_HWBUG_PAGE_A, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) || + cpu == IDR_ES2_RU_MDL && revision <= 1); +/* #78411 - Sometimes exc_illegal_instr_addr is generated + * instead of exc_instr_page_miss. + * Workaround - always return to user from exc_illegal_instr_addr. */ +CPUHAS(CPU_HWBUG_SPURIOUS_EXC_ILL_INSTR_ADDR, + !IS_ENABLED(CONFIG_CPU_E2S), + false, + cpu == IDR_E2S_MDL && revision <= 1); +/* #83160 - unaligned loads do not work + * Workaround - limit the stream of unaligned loads to less + * than 32 bytes per cycle and put "wait ld_c" before it. */ +CPUHAS(CPU_HWBUG_UNALIGNED_LOADS, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) || + cpu == IDR_ES2_RU_MDL && revision <= 1) +/* #83884 - es2 deadlocks on DMA to neighbour node. + * #100984 - # DMA to neighbour node slows down. + * Workaround - allocate DMA buffers only in the device node. */ +CPUHAS(CPU_HWBUG_CANNOT_DO_DMA_IN_NEIGHBOUR_NODE, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E8C), + false, + cpu == IDR_ES2_DSP_MDL && (revision <= 1 || revision == 6) || + cpu == IDR_ES2_RU_MDL && revision <= 1 || + cpu == IDR_E8C_MDL && revision <= 2); +/* #83884 - es2 deadlock on DMA at + * (APIC_DEFAULT_PHYS_BASE & 0x7fffFFFF) address. + * Workaround - reserve the 4K page at this address. */ +CPUHAS(CPU_HWBUG_DMA_AT_APIC_ADDR, + !IS_ENABLED(CONFIG_CPU_ES2), + false, + cpu == IDR_ES2_DSP_MDL); +/* #88644 - data profiling events are lost if overflow happens + * under closed NM interrupts. + * Workaround - disable data monitor profiling in kernel. */ +CPUHAS(CPU_HWBUG_KERNEL_DATA_MONITOR, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP), + false, + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL); +/* #89495 - write barrier does not work (even for atomics). + * Workaround - special command sequence after every read-acquire. */ +CPUHAS(CPU_HWBUG_WRITE_MEMORY_BARRIER, + !IS_ENABLED(CONFIG_CPU_E8C), + false, + cpu == IDR_E8C_MDL && revision <= 1); +/* On some processor's revisions writecombine memory + * in prefetchable PCI area is not allowed. */ +CPUHAS(CPU_FEAT_WC_PCI_PREFETCH, + !IS_ENABLED(CONFIG_CPU_ES2), + true, + !((cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL) && + revision == 0)); +/* #82499 - Instruction Cache must be handled carefully + * when flush_dc_line also flushes IC by physical address. */ +CPUHAS(CPU_FEAT_FLUSH_DC_IC, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET >= 3, + iset_ver >= E2K_ISET_V3); +/* #89653 - some hw counter won't reset, which may cause corruption of DMA. + * Workaround - reset machine until the counter sets in good value */ +CPUHAS(CPU_HWBUG_BAD_RESET, + !IS_ENABLED(CONFIG_CPU_E8C), + false, + cpu == IDR_E8C_MDL && revision <= 1); +/* #90514 - hardware hangs after modifying code with a breakpoint. + * Workaround - use HS.lng from the instruction being replaced. */ +CPUHAS(CPU_HWBUG_BREAKPOINT_INSTR, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E8C2), + false, + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E8C2_MDL); +/* #92834, #96516 - hang because of hardware problems. + * Workaround - boot activates watchdog, kernel should disable it */ +CPUHAS(CPU_HWBUG_E8C_WATCHDOG, + !IS_ENABLED(CONFIG_CPU_E8C), + false, + cpu == IDR_E8C_MDL && revision <= 1); +/* #94466 */ +CPUHAS(CPU_HWBUG_IOMMU, + !IS_ENABLED(CONFIG_CPU_E2S), + false, + cpu == IDR_E2S_MDL && revision <= 2); +/* #95860 - WC memory conflicts with DAM. + * Workaround - "wait st_c" between WC writes and cacheable loads */ +CPUHAS(CPU_HWBUG_WC_DAM, + !IS_ENABLED(CONFIG_CPU_E2S) && !IS_ENABLED(CONFIG_CPU_E8C) && + !IS_ENABLED(CONFIG_CPU_E8C2), + false, + cpu == IDR_E2S_MDL && revision <= 2 || + cpu == IDR_E8C_MDL && revision <= 1 || + cpu == IDR_E8C2_MDL && revision == 0); +/* 96719 - combination of flags s_f=0, store=1, sru=1 is possible + * Workaround - treat it as s_f=1, store=1, sru=1 */ +CPUHAS(CPU_HWBUG_TRAP_CELLAR_S_F, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) && + !IS_ENABLED(CONFIG_CPU_E8C2), + false, + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL && revision == 0); +/* #97594 - %cr1_lo.ss flag is lost if ext. interrupt arrives faster. + * Workaround - manually set %cr1_lo.ss again in interrupt handler */ +CPUHAS(CPU_HWBUG_SS, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) && + !IS_ENABLED(CONFIG_CPU_E8C2), + false, + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL && revision <= 2 || + cpu == IDR_E8C_MDL && revision <= 2 || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL && revision == 0); +/* #99302 - %aaldv sometimes is not restored properly. + * Workaround - insert 'wait ma_c' barrier */ +CPUHAS(CPU_HWBUG_AAU_AALDV, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) && + !IS_ENABLED(CONFIG_CPU_E8C2), + false, + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL && revision == 0); +/* #103223 - LAPIC does not send EoI to IO_APIC for level interrupts. + * Workaround - wait under closed interrupts until APIC_ISR clears */ +CPUHAS(CPU_HWBUG_LEVEL_EOI, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) && + !IS_ENABLED(CONFIG_CPU_E8C2), + false, + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL); +/* #104865 - hardware might generate a false single step interrupt + * Workaround - clean frame 0 of PCS during the allocation */ +CPUHAS(CPU_HWBUG_FALSE_SS, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) && + !IS_ENABLED(CONFIG_CPU_E8C2), + false, + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL && revision <= 2 || + cpu == IDR_E8C_MDL && revision <= 2 || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL); +/* #117649 - false exc_data_debug are generated based on _previous_ + * values in ld/st address registers. + * Workaround - forbid data breakpoint on the first 31 bytes + * (hardware prefetch works with 32 bytes blocks). */ +CPUHAS(CPU_HWBUG_SPURIOUS_EXC_DATA_DEBUG, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) && + !IS_ENABLED(CONFIG_CPU_E8C2) && !IS_ENABLED(CONFIG_CPU_E12C) && + !IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3), + false, + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL || + cpu == IDR_E12C_MDL && revision == 0 || + cpu == IDR_E16C_MDL && revision == 0 || + cpu == IDR_E2C3_MDL && revision == 0); +/* #119084 - several TBL flushes in a row might fail to flush L1D. + * Workaround - insert "wait fl_c" immediately after every TLB flush */ +CPUHAS(CPU_HWBUG_TLB_FLUSH_L1D, + !IS_ENABLED(CONFIG_CPU_E8C2), + false, + cpu == IDR_E8C2_MDL); +/* #121311 - asynchronous entries in INTC_INFO_MU always have "pm" bit set. + * Workaround - use "pm" bit saved in guest's chain stack. */ +CPUHAS(CPU_HWBUG_GUEST_ASYNC_PM, + !IS_ENABLED(CONFIG_CPU_E12C) && !IS_ENABLED(CONFIG_CPU_E16C) && + !IS_ENABLED(CONFIG_CPU_E2C3), + false, + cpu == IDR_E12C_MDL || cpu == IDR_E16C_MDL || + cpu == IDR_E2C3_MDL); +/* #122946 - conflict new interrupt while sync signal turning off. + * Workaround - wating for C0 after E2K_WAIT_V6 */ +CPUHAS(CPU_HWBUG_E16C_SLEEP, + !IS_ENABLED(CONFIG_CPU_E16C), + false, + cpu == IDR_E16C_MDL && revision == 0); +/* #124206 - instruction buffer stops working + * Workaround - prepare %ctpr's in glaunch/trap handler entry; + * avoid rbranch in glaunch/trap handler entry and exit. */ +CPUHAS(CPU_HWBUG_L1I_STOPS_WORKING, + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) && + !IS_ENABLED(CONFIG_CPU_E8C2) && !IS_ENABLED(CONFIG_CPU_E12C) && + !IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3), + false, + cpu == IDR_ES2_DSP_MDL || cpu == IDR_ES2_RU_MDL || + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || + cpu == IDR_E1CP_MDL || cpu == IDR_E8C2_MDL || + cpu == IDR_E12C_MDL || cpu == IDR_E16C_MDL || + cpu == IDR_E2C3_MDL); +/* #124947 - CLW clearing by OS must be done on the same CPU that started the + * hardware clearing operation to avoid creating a stale L1 entry. + * Workaround - forbid migration until CLW clearing is finished in software. */ +CPUHAS(CPU_HWBUG_CLW_STALE_L1_ENTRY, + IS_ENABLED(CONFIG_E2K_MACHINE) && !IS_ENABLED(CONFIG_CPU_E12C) && + !IS_ENABLED(CONFIG_CPU_E16C), + IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) || + IS_ENABLED(CONFIG_CPU_E8C2), + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || cpu == IDR_E8C2_MDL || + cpu == IDR_E12C_MDL && revision == 0 || + cpu == IDR_E16C_MDL && revision == 0); +/* #126587 - "wait ma_c=1" does not wait for all L2$ writebacks to complete + * when disabling CPU core with "wait trap=1" algorithm. + * Workaround - manually insert 66 NOPs before "wait trap=1" */ +CPUHAS(CPU_HWBUG_C3_WAIT_MA_C, + IS_ENABLED(CONFIG_E2K_MACHINE), + IS_ENABLED(CONFIG_CPU_E2S) || IS_ENABLED(CONFIG_CPU_E8C) || + IS_ENABLED(CONFIG_CPU_E1CP), + cpu == IDR_E2S_MDL || cpu == IDR_E8C_MDL || cpu == IDR_E1CP_MDL); +/* #128127 - Intercepting SCLKM3 write does not prevent guest from writing it. + * Workaround - Update SH_SCLKM3 in intercept handler */ +CPUHAS(CPU_HWBUG_VIRT_SCLKM3_INTC, + !IS_ENABLED(CONFIG_CPU_E16C) && !IS_ENABLED(CONFIG_CPU_E2C3) && + !IS_ENABLED(CONFIG_CPU_E12C), + false, + cpu == IDR_E16C_MDL && revision == 0 || + cpu == IDR_E12C_MDL && revision == 0 || + cpu == IDR_E2C3_MDL && revision == 0); +/* Rely on IDR instead of iset version to choose between APIC and EPIC. + * For guest we use it's own fake IDR so that we choose between APIC and + * EPIC based on what hardware guest *thinks* it's being executed on. */ +CPUHAS(CPU_FEAT_EPIC, + IS_ENABLED(CONFIG_E2K_MACHINE) && + !IS_ENABLED(CONFIG_KVM_GUEST_KERNEL), + !IS_ENABLED(CONFIG_CPU_ES2) && !IS_ENABLED(CONFIG_CPU_E2S) && + !IS_ENABLED(CONFIG_CPU_E8C) && !IS_ENABLED(CONFIG_CPU_E1CP) && + !IS_ENABLED(CONFIG_CPU_E8C2), + guest_cpu != IDR_ES2_DSP_MDL && guest_cpu != IDR_ES2_RU_MDL && + guest_cpu != IDR_E2S_MDL && guest_cpu != IDR_E8C_MDL && + guest_cpu != IDR_E1CP_MDL && guest_cpu != IDR_E8C2_MDL); +/* Shows which user registers must be saved upon trap entry/exit */ +CPUHAS(CPU_FEAT_TRAP_V5, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET == 5, + iset_ver == E2K_ISET_V5); +CPUHAS(CPU_FEAT_TRAP_V6, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET >= 6, + iset_ver >= E2K_ISET_V6); +/* QP registers: only since iset V5 */ +CPUHAS(CPU_FEAT_QPREG, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET >= 5, + iset_ver >= E2K_ISET_V5); +/* Optimized version of machine.iset check */ +CPUHAS(CPU_FEAT_ISET_V3, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET >= 3, + iset_ver >= E2K_ISET_V3); +CPUHAS(CPU_FEAT_ISET_V5, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET >= 5, + iset_ver >= E2K_ISET_V5); +CPUHAS(CPU_FEAT_ISET_V6, + CONFIG_CPU_ISET != 0, + CONFIG_CPU_ISET >= 6, + iset_ver >= E2K_ISET_V6); + + +static inline unsigned long test_feature_dynamic(struct machdep *machine, int feature) +{ + unsigned long *addr = machine->cpu_features; + + return 1UL & (addr[feature / 64] >> (feature & 63)); +} + +#define test_feature(machine, feature) \ + ((feature##_is_static) ? \ + (feature##_is_set_statically) : \ + test_feature_dynamic(machine, feature)) + +#define boot_machine_has(machine_p, feature) \ + test_feature(machine_p, feature) +#define boot_cpu_has(feature) boot_machine_has(&boot_machine, feature) + +#ifndef E2K_P2V +# define cpu_has(feature) test_feature(&machine, feature) +#else +# define cpu_has(feature) boot_cpu_has(feature) +#endif + +/* Normally cpu_has() is passed symbolic name of feature (e.g. CPU_FEAT_*), + * use this one instead if only numeric value of feature is known. */ +#define cpu_has_by_value(feature) test_feature_dynamic(&machine, feature) + +extern void cpu_set_feature(struct machdep *machine, int feature); +extern void cpu_clear_feature(struct machdep *machine, int feature); + + +extern __nodedata machdep_t machine; +extern __nodedata pt_struct_t pgtable_struct; + +#define boot_machine (boot_get_vo_value(machine)) +#define boot_pgtable_struct ((pt_struct_t)boot_get_vo_value(pgtable_struct)) +#define boot_pgtable_struct_p boot_vp_to_pp(&pgtable_struct) + +#if CONFIG_CPU_ISET >= 6 +# define IS_CPU_ISET_V6() true +#elif CONFIG_CPU_ISET >= 1 +# define IS_CPU_ISET_V6() false +#elif CONFIG_CPU_ISET == 0 +# ifdef E2K_P2V +# define IS_CPU_ISET_V6() \ + (boot_machine.native_iset_ver >= E2K_ISET_V6) +# else /* ! E2K_P2V */ +# define IS_CPU_ISET_V6() \ + (machine.native_iset_ver >= E2K_ISET_V6) +# endif /* E2K_P2V */ +#else /* CONFIG_CPU_ISET undefined or negative */ +# warning "Undefined CPU ISET VERSION #, IS_CPU_ISET_V6 is defined dinamicaly" +# ifdef E2K_P2V +# define IS_CPU_ISET_V6() \ + (boot_machine.native_iset_ver >= E2K_ISET_V6) +# else /* ! E2K_P2V */ +# define IS_CPU_ISET_V6() \ + (machine.native_iset_ver >= E2K_ISET_V6) +# endif /* E2K_P2V */ +#endif /* CONFIG_CPU_ISET 0-6 */ + +#define IS_HV_GM() (machine.gmi) + +extern void save_kernel_gregs_v2(struct kernel_gregs *); +extern void save_kernel_gregs_v5(struct kernel_gregs *); +extern void save_gregs_v2(struct global_regs *); +extern void save_gregs_v5(struct global_regs *); +extern void save_local_gregs_v2(struct local_gregs *); +extern void save_local_gregs_v5(struct local_gregs *); +extern void save_gregs_dirty_bgr_v2(struct global_regs *); +extern void save_gregs_dirty_bgr_v5(struct global_regs *); +extern void save_gregs_on_mask_v2(struct global_regs *, bool dirty_bgr, + unsigned long mask_not_save); +extern void save_gregs_on_mask_v5(struct global_regs *, bool dirty_bgr, + unsigned long mask_not_save); +extern void restore_gregs_v2(const struct global_regs *); +extern void restore_gregs_v5(const struct global_regs *); +extern void restore_local_gregs_v2(const struct local_gregs *); +extern void restore_local_gregs_v5(const struct local_gregs *); +extern void restore_gregs_on_mask_v2(struct global_regs *, bool dirty_bgr, + unsigned long mask_not_restore); +extern void restore_gregs_on_mask_v5(struct global_regs *, bool dirty_bgr, + unsigned long mask_not_restore); +extern void save_dimtp_v6(e2k_dimtp_t *); +extern void restore_dimtp_v6(const e2k_dimtp_t *); +extern void save_kvm_context_v6(struct kvm_vcpu_arch *); +extern void restore_kvm_context_v6(const struct kvm_vcpu_arch *); +extern void qpswitchd_sm(int); + +extern void calculate_aau_aaldis_aaldas_v2(const struct pt_regs *regs, + struct thread_info *ti, struct e2k_aau_context *context); +extern void calculate_aau_aaldis_aaldas_v5(const struct pt_regs *regs, + struct thread_info *ti, struct e2k_aau_context *context); +extern void calculate_aau_aaldis_aaldas_v6(const struct pt_regs *regs, + struct thread_info *ti, struct e2k_aau_context *context); +extern void do_aau_fault_v2(int aa_field, struct pt_regs *regs); +extern void do_aau_fault_v5(int aa_field, struct pt_regs *regs); +extern void do_aau_fault_v6(int aa_field, struct pt_regs *regs); +extern void save_aaldi_v2(u64 *aaldis); +extern void save_aaldi_v5(u64 *aaldis); +extern void get_aau_context_v2(struct e2k_aau_context *); +extern void get_aau_context_v5(struct e2k_aau_context *); + +extern void flushts_v3(void); + +extern unsigned long boot_native_read_IDR_reg_value(void); + +unsigned long rrd_v2(int); +unsigned long rrd_v3(int); +unsigned long rrd_v6(int); +void rwd_v2(int reg, unsigned long value); +void rwd_v3(int reg, unsigned long value); +void rwd_v6(int reg, unsigned long value); +unsigned long boot_rrd_v2(int); +unsigned long boot_rrd_v3(int); +unsigned long boot_rrd_v6(int); +void boot_rwd_v2(int reg, unsigned long value); +void boot_rwd_v3(int reg, unsigned long value); +void boot_rwd_v6(int reg, unsigned long value); + +/* Supported registers for machine->rrd()/rwd() */ +enum { + E2K_REG_CORE_MODE, + E2K_REG_HCEM, + E2K_REG_HCEB, + E2K_REG_OSCUTD, + E2K_REG_OSCUIR, +}; + +u64 native_get_cu_hw1_v2(void); +u64 native_get_cu_hw1_v5(void); +void native_set_cu_hw1_v2(u64); +void native_set_cu_hw1_v5(u64); + +void invalidate_MLT_v2(void); +void invalidate_MLT_v3(void); +void get_and_invalidate_MLT_context_v2(e2k_mlt_t *mlt_state); +void get_and_invalidate_MLT_context_v3(e2k_mlt_t *mlt_state); +void get_and_invalidate_MLT_context_v6(e2k_mlt_t *mlt_state); + +#ifdef CONFIG_SMP +void clock_off_v3(void); +void clock_on_v3(int cpu); +#endif + +void C1_enter_v2(void); +void C1_enter_v6(void); +void C3_enter_v3(void); +void C3_enter_v6(void); +#endif /* __KERNEL__ */ + +#endif /* _E2K_MACHDEP_H_ */ diff --git a/arch/e2k/include/asm/machdep_numa.h b/arch/e2k/include/asm/machdep_numa.h new file mode 100644 index 0000000..b36e707 --- /dev/null +++ b/arch/e2k/include/asm/machdep_numa.h @@ -0,0 +1,23 @@ +#ifndef _E2K_MACHDEP_NUMA_H_ +#define _E2K_MACHDEP_NUMA_H_ + +#include +#include + +#ifdef CONFIG_NUMA +#define the_node_machine(nid) \ + ((machdep_t *)__va(vpa_to_pa( \ + node_kernel_va_to_pa(nid, &machine)))) +#define node_machine the_node_machine(numa_node_id()) +#define the_node_pgtable_struct(nid) \ + ((pt_struct_t *)__va(vpa_to_pa(node_kernel_va_to_pa(nid, \ + &pgtable_struct)))) +#define node_pgtable_struct the_node_pgtable_struct(numa_node_id()) +#else /* ! CONFIG_NUMA */ +#define the_node_machine(nid) (&machine) +#define node_machine the_node_machine(0) +#define the_node_pgtable_struct(nid) (&pgtable_struct) +#define node_pgtable_struct the_node_pgtable_struct(0) +#endif /* CONFIG_NUMA */ + +#endif diff --git a/arch/e2k/include/asm/mas.h b/arch/e2k/include/asm/mas.h new file mode 100644 index 0000000..5430298 --- /dev/null +++ b/arch/e2k/include/asm/mas.h @@ -0,0 +1,106 @@ +#ifndef _E2K_MAS_H_ +#define _E2K_MAS_H_ + +#include + +#include + +#ifndef __ASSEMBLY__ + +/* new in iset v6 */ +typedef union { + struct { + u8 mod : 3; + u8 opc : 4; + u8 : 1; + } masf1; + struct { + u8 mod : 3; + u8 be : 1; + u8 m1 : 1 /* == 0 */; + u8 dc_ch : 2; + u8 : 1; + } masf2; + struct { + u8 mod : 3 /* == 3,7 */; + u8 be : 1; + u8 m1 : 1 /* == 1 */; + u8 m3 : 1; + u8 mt : 1; + u8 : 1; + } masf3; + struct { + u8 m2 : 2; + u8 ch1 : 1; + u8 be : 1; + u8 m1 : 1 /* == 1 */; + u8 dc_ch : 2; + u8 : 1; + } masf4; + u8 word; +} e2k_v6_mas_t; + +# define MAS_MT_0 0 +# define MAS_MT_1 1 + +# define MAS_STORE_RELEASE_V6(_mt) \ +({ \ + e2k_v6_mas_t __mas = { \ + .masf3.mod = 3, \ + .masf3.be = 0, \ + .masf3.m1 = 1, \ + .masf3.m3 = 0, \ + .masf3.mt = (_mt) \ + }; \ + __mas.word; \ +}) + +# define MAS_LOAD_ACQUIRE_V6(_mt) \ +({ \ + e2k_v6_mas_t __mas = { \ + .masf3.mod = 3, \ + .masf3.be = 0, \ + .masf3.m1 = 1, \ + .masf3.m3 = 0, \ + .masf3.mt = (_mt) \ + }; \ + __mas.word; \ +}) + +# define MAS_LOAD_ACQUIRE_V6(_mt) \ +({ \ + e2k_v6_mas_t __mas = { \ + .masf3.mod = 3, \ + .masf3.be = 0, \ + .masf3.m1 = 1, \ + .masf3.m3 = 0, \ + .masf3.mt = (_mt) \ + }; \ + __mas.word; \ +}) + +/* Only ALC0 or ALC0/ALC2 for quadro */ +# define MAS_WATCH_FOR_MODIFICATION_V6 \ +({ \ + e2k_v6_mas_t __mas = { \ + .masf4.m1 = 1, \ + .masf4.m2 = 1 \ + }; \ + __mas.word; \ +}) + +/* Note that 'root', 'spec' and 'store' must also be checked */ +static inline bool is_mas_secondary_lock_trap_on_store(unsigned int mas) +{ + return (mas & 3) == 1; +} + +/* Note that 'root', 'spec' and 'store' must also be checked */ +static inline bool is_mas_secondary_lock_trap_on_load_store(unsigned int mas) +{ + return (mas & 3) == 2; +} + +#endif + +#endif /* _E2K_MAS_H_ */ diff --git a/arch/e2k/include/asm/mc146818rtc.h b/arch/e2k/include/asm/mc146818rtc.h new file mode 100644 index 0000000..8e414dd --- /dev/null +++ b/arch/e2k/include/asm/mc146818rtc.h @@ -0,0 +1,12 @@ +/* + * Machine dependent access functions for RTC registers. + */ +#ifndef _ASM_MC146818RTC_H +#define _ASM_MC146818RTC_H + +#include +#include + +#define RTC_IRQ 8 + +#endif /* _ASM_MC146818RTC_H */ diff --git a/arch/e2k/include/asm/mlt.h b/arch/e2k/include/asm/mlt.h new file mode 100644 index 0000000..11f003d --- /dev/null +++ b/arch/e2k/include/asm/mlt.h @@ -0,0 +1,85 @@ +#ifndef _E2K_MLT_H_ +#define _E2K_MLT_H_ + +#include +#include +#include + + +#define NATIVE_MLT_SIZE (machine.mlt_size) +#define NATIVE_MAX_MLT_SIZE ES2_MLT_SIZE + +#define REG_MLT_N_SHIFT 7 +#define REG_MLT_DW_SHIFT 5 +#define REG_MLT_TYPE_SHIFT 0 + +#define REG_MLT_TYPE 5UL + + +typedef unsigned long e2k_mlt_line_t; + +typedef struct e2k_mlt_dw0_v2_fields +{ + e2k_mlt_line_t resc : 4; /* [3:0] */ + e2k_mlt_line_t mask : 8; /* [11:4] */ + e2k_mlt_line_t page : 28; /* [39:12]*/ + e2k_mlt_line_t opcod_size : 3; /* [42:40] */ + e2k_mlt_line_t rg : 8; /* [50:43] */ + e2k_mlt_line_t lock_mode : 1; /* [51] */ + e2k_mlt_line_t hit : 1; /* [52] */ + e2k_mlt_line_t val : 1; /* [53] */ + e2k_mlt_line_t unresolved : 10; /* [63:54] */ +} e2k_mlt_dw0_v2_fields_t; + +typedef struct e2k_mlt_dw0_v6_fields +{ + e2k_mlt_line_t val : 1; /* [0] */ + e2k_mlt_line_t hit : 1; /* [1] */ + e2k_mlt_line_t lock_mode : 1; /* [2] */ + e2k_mlt_line_t word_fst : 9; /* [11:3] */ + e2k_mlt_line_t page_fst : 36; /* [47:12]*/ + e2k_mlt_line_t mask_fst : 8; /* [55:48] */ + e2k_mlt_line_t rg : 8; /* [63:56] */ +} e2k_mlt_dw0_v6_fields_t; + +/* One reg (string) in MLT table */ +typedef struct e2k_mlt_entry { + union { + e2k_mlt_dw0_v2_fields_t v2_fields; + e2k_mlt_dw0_v6_fields_t v6_fields; + e2k_mlt_line_t word; + } dw0; + + union { + e2k_mlt_line_t word; + } dw1; + + union { + e2k_mlt_line_t word; + } dw2; +} e2k_mlt_entry_t; + +typedef struct e2k_mlt { + int num; /* number of entries in the MLT */ + e2k_mlt_entry_t mlt[NATIVE_MAX_MLT_SIZE]; /* valid MLT entries */ +} e2k_mlt_t; + +#define NATIVE_READ_MLT_REG(addr) \ + NATIVE_DO_READ_MAS(addr, MAS_MLT_REG, e2k_mlt_line_t, d, 2) + +typedef unsigned long e2k_dam_t; + +#define REG_DAM_N_SHIFT 7 +#define REG_DAM_TYPE_SHIFT 0 +#define REG_DAM_TYPE 4 + +#define NATIVE_READ_DAM_REG(addr) \ + NATIVE_DO_READ_MAS(addr, MAS_DAM_REG, e2k_dam_t, d, 2) + +#define NATIVE_SAVE_BINCO_REGS_FOR_PTRACE(regs) \ +do { \ + regs->rpr_lo = NATIVE_READ_RPR_LO_REG_VALUE(); \ + regs->rpr_hi = NATIVE_READ_RPR_HI_REG_VALUE(); \ +} while (0) + +#endif diff --git a/arch/e2k/include/asm/mm_hooks.h b/arch/e2k/include/asm/mm_hooks.h new file mode 100644 index 0000000..74e1bb1 --- /dev/null +++ b/arch/e2k/include/asm/mm_hooks.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Define no-op hooks to be included in asm/mmu_context.h + * for arch e2k. + */ +#ifndef _ASM_E2K_MM_HOOKS_H +#define _ASM_E2K_MM_HOOKS_H + +static inline void arch_unmap(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool execute, bool foreign) +{ + if (vma->vm_flags & VM_PRIVILEGED) { + /* We have only hardware and signal + * stacks in VM_PRIVILEGED area */ + if (execute) + return false; + + if (!test_ts_flag(TS_KERNEL_SYSCALL)) + return false; + } + + return true; +} +#endif /* _ASM_E2K_MM_HOOKS_H */ diff --git a/arch/e2k/include/asm/mman.h b/arch/e2k/include/asm/mman.h new file mode 100644 index 0000000..bf5ca54 --- /dev/null +++ b/arch/e2k/include/asm/mman.h @@ -0,0 +1,149 @@ +#ifndef _E2K_MMAN_H_ +#define _E2K_MMAN_H_ + +#include + +#include +#include + +#define MV_FLUSH 0x00000001 + +struct vm_area_struct; +struct file; +struct mm_struct; + +int make_all_vma_pages_valid(struct vm_area_struct *vma, int flags); +int make_vma_pages_valid(struct vm_area_struct *vma, + unsigned long start_addr, unsigned long end_addr); + +pte_t *get_user_address_pte(struct vm_area_struct *vma, e2k_addr_t address); + +int vm_munmap_notkillable(unsigned long start, size_t len); +unsigned long vm_mmap_notkillable(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long offset); + +typedef enum sma_mode { + SMA_RO, + SMA_RW, + SMA_NX, + SMA_X, + SMA_P, + SMA_NP, + SMA_WB_MT, + SMA_WC_MT, + SMA_UC_MT, +} sma_mode_t; + +int e2k_set_vmm_cui(struct mm_struct *mm, int cui, + unsigned long code_base, unsigned long code_end); + +#define VM_HW_STACK_COMMON_FLAGS (VM_PRIVILEGED | VM_DONTEXPAND) +#define VM_HW_STACK_PS_FLAGS (VM_HW_STACK_COMMON_FLAGS | VM_HW_STACK_PS) +#define VM_HW_STACK_PCS_FLAGS (VM_HW_STACK_COMMON_FLAGS | VM_HW_STACK_PCS) + +static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, + unsigned long pkey) +{ + unsigned long vm_flags; + unsigned long cui; + + /* Order of checks is important since + * 32BIT flag is set in protected mode */ + if (TASK_IS_PROTECTED(current)) + cui = GET_CUI_FROM_INT_PROT(prot); + else + cui = USER_CODES_UNPROT_INDEX(current); + + vm_flags = cui << VM_CUI_SHIFT; + + /* + * Check if we are allocating hardware stacks. + */ + if (current_thread_info()->status & TS_MMAP_DONTEXPAND) { + /* + * VM_DONTEXPAND makes sure that even if VM_MLOCK + * is set, this area won't be populated on mmap(). + */ + vm_flags |= VM_DONTEXPAND; + } + + if (current_thread_info()->status & TS_MMAP_PRIVILEGED) + vm_flags |= VM_PRIVILEGED; + + if (current_thread_info()->status & TS_MMAP_DONTCOPY) + vm_flags |= VM_DONTCOPY; + + if (current_thread_info()->status & TS_MMAP_PS) + vm_flags |= VM_HW_STACK_PS; + + if (current_thread_info()->status & TS_MMAP_PCS) + vm_flags |= VM_HW_STACK_PCS; + + if (current_thread_info()->status & TS_MMAP_SIGNAL_STACK) + vm_flags |= VM_SIGNAL_STACK; + + if (current_thread_info()->status & TS_MMAP_NOHUGEPAGE) + vm_flags |= VM_NOHUGEPAGE; + + return vm_flags; +} +#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) + +static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) +{ + unsigned long page_prot; + + page_prot = vm_flags & VM_CUI; + + if (vm_flags & VM_PRIVILEGED) + _PAGE_SET_PRIV(page_prot); + + return __pgprot(page_prot); +} +#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) + +static inline int arch_validate_prot(unsigned long prot, unsigned long addr) +{ + if (prot & PROT_CUI) + return 0; + return 1; +} +#define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr) + +static inline int arch_mmap_check(unsigned long addr, unsigned long len, + unsigned long flags) +{ + if (TASK_IS_BINCO(current) && + (!ADDR_IN_SS(addr) && ADDR_IN_SS(addr + len) || + ADDR_IN_SS(addr) && !ADDR_IN_SS(addr + len))) + return -EINVAL; + + return 0; +} +#define arch_mmap_check(addr, len, flags) arch_mmap_check(addr, len, flags) + +extern int no_writecombine; + +/* + * execute_mmu_operations() return values + */ +enum exec_mmu_ret { + /* Successfully executed, go to the next trap cellar record */ + EXEC_MMU_SUCCESS = 1, + /* Stop handling trap cellar and exit */ + EXEC_MMU_STOP, + /* Trap cellar record should be executed again */ + EXEC_MMU_REPEAT +}; +extern int execute_mmu_operations(trap_cellar_t *tcellar, + trap_cellar_t *next_tcellar, struct pt_regs *regs, + int rg, int zeroing, e2k_addr_t *addr, + bool (*is_spill_fill_recovery)(tc_cond_t cond, + e2k_addr_t address, bool s_f, + struct pt_regs *regs), + int (*calculate_rf_frame)(struct pt_regs *regs, + tc_cond_t cond, u64 **radr, + bool *load_to_rf)); + +#endif /* _E2K_MMAN_H_ */ diff --git a/arch/e2k/include/asm/mmu-regs-types-v2.h b/arch/e2k/include/asm/mmu-regs-types-v2.h new file mode 100644 index 0000000..6bfbda1 --- /dev/null +++ b/arch/e2k/include/asm/mmu-regs-types-v2.h @@ -0,0 +1,156 @@ +/* + * E2K ISET V2-V5 MMU structure and common definitions. + * + * Copyright 2018 (c) MCST, Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_MMU_REGS_TYPES_V2_H +#define _ASM_E2K_MMU_REGS_TYPES_V2_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K ISET V2-V5 page tables. + * NOTE: E2K has four levels of page tables. + */ + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +/* + * DTLB entry probe format + */ + +#define DTLB_ENTRY_ERROR_MASK_V2 0xbe00000000000000ULL +#define DTLB_ENTRY_PH_BOUND_V2 0x8000000000000000ULL +#define DTLB_ENTRY_ILLEGAL_PAGE_V2 0x4000000000000000ULL +#define DTLB_ENTRY_PAGE_MISS_V2 0x2000000000000000ULL +#define DTLB_ENTRY_PROBE_DISABLED_V2 0x0400000000000000ULL +#define DTLB_ENTRY_RES_BITS_V2 0x0200000000000000ULL +#define DTLB_ENTRY_MISS_LEVEL_MASK_V2 0x1800000000000000ULL + +#define DTLB_ENTRY_WR_V2 0x0000000000000002ULL +#define DTLB_ENTRY_NON_EX_U_S_V2 0x0000000000000004ULL +#define DTLB_ENTRY_PWT_V2 0x0000000000000008ULL +#define DTLB_ENTRY_PCD1_V2 0x0000000000000010ULL +#define DTLB_ENTRY_D_V2 0x0000000000000040ULL +#define DTLB_ENTRY_G_V2 0x0000000000000100ULL +#define DTLB_ENTRY_PCD2_V2 0x0000000000000200ULL +#define DTLB_ENTRY_NWA_V2 0x0000000000000400ULL +#define DTLB_ENTRY_PHA_V2 0x000000fffffff000ULL /* phys address */ +#define DTLB_ENTRY_VVA_V2 0x0000010000000000ULL /* VVA bit */ +#define DTLB_ENTRY_PV_V2 0x0000020000000000ULL +#define DTLB_ENTRY_INT_PR_NON_EX_V2 0x0000040000000000ULL +#define DTLB_ENTRY_INTL_RD_V2 0x0000200000000000ULL +#define DTLB_ENTRY_INTL_WR_V2 0x0000400000000000ULL +#define DTLB_ENTRY_WP_V2 0x0000800000000000ULL +#define DTLB_ENTRY_UC_V2 0x0001000000000000ULL + +/* MPT flags for 2/4Mb & 1Gb pages [46:45] */ +#define DTLB_ENTRY_MPT_FLAGS_V2 0x0000600000000000ULL + +#define DTLB_EP_RES_V2 0x0001ffffffffffffULL +#define DTLB_EP_FAULT_RES_V2 (~DTLB_EP_RES_V2) + +/* + * DTLB address probe result format + */ +#define PH_ADDR_AP_RES_V2 0x000000ffffffffffULL /* Physical address */ + /* normal result of */ + /* AP [39: 0] */ +#define DISABLE_AP_RES_V2 DISABLE_EP_RES_V2 /* AP diasble result */ + /* [62] */ +#define ILLEGAL_PAGE_AP_RES_V2 ILLEGAL_PAGE_EP_RES_V2 /* illegal page */ + /* [58] */ + +/* convert physical address to page frame number for DTLB */ +#define PA_TO_DTLB_ENTRY_PHA_V2(phys_addr) \ + (((e2k_addr_t)phys_addr) & DTLB_ENTRY_PHA_V2) +/* convert the page frame number from DTLB entry to physical address */ +#define DTLB_ENTRY_PHA_TO_PA_V2(dtlb_entry) \ + ((e2k_addr_t)(dtlb_entry) & DTLB_ENTRY_PHA_V2) + +static inline probe_entry_t +covert_uni_dtlb_flags_to_dtlb_val_v2(const uni_dtlb_t uni_flags) +{ + probe_entry_t dtlb_flags = 0; + + if (uni_flags & UNI_PAGE_WRITE) + dtlb_flags |= (DTLB_ENTRY_WR_V2); + if (uni_flags & UNI_PAGE_PRIV) + dtlb_flags |= (DTLB_ENTRY_PV_V2); + if (uni_flags & UNI_PAGE_VALID) + dtlb_flags |= (DTLB_ENTRY_VVA_V2); + if (uni_flags & UNI_PAGE_PROTECT) + dtlb_flags |= (DTLB_ENTRY_INT_PR_NON_EX_V2); + if (uni_flags & UNI_PAGE_GLOBAL) + dtlb_flags |= (DTLB_ENTRY_G_V2); + if (uni_flags & UNI_PAGE_DIRTY) + dtlb_flags |= (DTLB_ENTRY_D_V2); + if (uni_flags & UNI_PAGE_NWA) + dtlb_flags |= (DTLB_ENTRY_NWA_V2); + if (uni_flags & UNI_PAGE_MEM_TYPE) + dtlb_flags |= (DTLB_ENTRY_PCD1_V2 | DTLB_ENTRY_PCD2_V2 | + DTLB_ENTRY_PWT_V2); + if (uni_flags & UNI_PAGE_NON_EX) + dtlb_flags |= (DTLB_ENTRY_NON_EX_U_S_V2); + if (uni_flags & UNI_PAGE_PFN) + dtlb_flags |= (DTLB_ENTRY_PHA_V2); + if (uni_flags & UNI_PAGE_MEM_TYPE_MA) + dtlb_flags |= (DTLB_ENTRY_PCD1_V2 | DTLB_ENTRY_PCD2_V2 | + DTLB_ENTRY_PWT_V2); + if (uni_flags & UNI_PAGE_WRITE_INT) + dtlb_flags |= (DTLB_ENTRY_WP_V2); + if (uni_flags & UNI_PAGE_INTL_RD) + dtlb_flags |= (DTLB_ENTRY_INTL_RD_V2); + if (uni_flags & UNI_PAGE_INTL_WR) + dtlb_flags |= (DTLB_ENTRY_INTL_WR_V2); + if (uni_flags & UNI_DTLB_EP_RES) + dtlb_flags |= (DTLB_EP_RES_V2); + if (uni_flags & UNI_DTLB_PH_ADDR_AP_RES) + dtlb_flags |= (PH_ADDR_AP_RES_V2); + if (uni_flags & UNI_DTLB_ERROR_MASK) + dtlb_flags |= (DTLB_ENTRY_ERROR_MASK_V2); + if (uni_flags & UNI_DTLB_MISS_LEVEL) + dtlb_flags |= (DTLB_ENTRY_MISS_LEVEL_MASK_V2); + if (uni_flags & UNI_DTLB_SUCCESSFUL) + dtlb_flags |= (DTLB_ENTRY_PROBE_DISABLED_V2); + if (uni_flags & UNI_DTLB_RES_BITS) + dtlb_flags |= (DTLB_ENTRY_RES_BITS_V2); + + BUILD_BUG_ON(dtlb_flags == 0); + + return dtlb_flags; +} + +static inline probe_entry_t +fill_dtlb_val_v2_flags(const uni_dtlb_t uni_flags) +{ + return covert_uni_dtlb_flags_to_dtlb_val_v2(uni_flags); +} +static inline probe_entry_t +get_dtlb_val_v2_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return dtlb_val & covert_uni_dtlb_flags_to_dtlb_val_v2(uni_flags); +} +static inline bool +test_dtlb_val_v2_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return get_dtlb_val_v2_flags(dtlb_val, uni_flags) != 0; +} +static inline probe_entry_t +set_dtlb_val_v2_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return dtlb_val | covert_uni_dtlb_flags_to_dtlb_val_v2(uni_flags); +} +static inline probe_entry_t +clear_dtlb_val_v2_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return dtlb_val & ~covert_uni_dtlb_flags_to_dtlb_val_v2(uni_flags); +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _ASM_E2K_MMU_REGS_TYPES_V2_H */ diff --git a/arch/e2k/include/asm/mmu-regs-types-v6.h b/arch/e2k/include/asm/mmu-regs-types-v6.h new file mode 100644 index 0000000..8dda520 --- /dev/null +++ b/arch/e2k/include/asm/mmu-regs-types-v6.h @@ -0,0 +1,180 @@ +/* + * E2K ISET V6-... MMU structure and common definitions. + * + * Copyright 2018 (c) MCST, Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_MMU_REGS_TYPES_V6_H +#define _ASM_E2K_MMU_REGS_TYPES_V6_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K ISET V2-V5 page tables. + * NOTE: E2K has four levels of page tables. + */ + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +/* + * DTLB entry probe format + */ +#define DTLB_ENTRY_MT_exc_SHIFT_V6 8 /* shift of Memory Type field */ + /* for exceptions */ +#define DTLB_ENTRY_MT_BITS_NUM_V6 3 /* occupies 3 bits */ +#define DTLB_ENTRY_MT_ma_SHIFT_V6 48 /* shift of Memory Type field */ + /* for memory access */ +#define DTLB_ENTRY_PFN_SHIFT_V6 12 /* shift of Physical Page */ + /* Number */ + +#define DTLB_ENTRY_ERROR_MASK_V6 0xe000000000000000ULL +#define DTLB_ENTRY_PH_BOUND_V6 0x8000000000000000ULL +#define DTLB_ENTRY_ILLEGAL_PAGE_V6 0x4000000000000000ULL +#define DTLB_ENTRY_PAGE_MISS_V6 0x2000000000000000ULL +#define DTLB_ENTRY_PROBE_SUCCESSFUL_V6 0x0000000000000080ULL +#define DTLB_ENTRY_RES_BITS_V6 0x1000000000000000ULL +#define DTLB_ENTRY_MISS_LEVEL_MASK_V6 0x0000000000000007ULL +#define DTLB_ENTRY_TLB_HIT_V6 0x0000000000000020ULL + +#define DTLB_ENTRY_WR_exc_V6 0x0000000000000002ULL +#define DTLB_ENTRY_PV_or_U_S_V6 0x0000000000000004ULL +#define DTLB_ENTRY_VVA_V6 0x0000000000000008ULL +#define DTLB_ENTRY_INT_PR_V6 0x0000000000000010ULL +#define DTLB_ENTRY_G_V6 0x0000000000000020ULL +#define DTLB_ENTRY_D_V6 0x0000000000000040ULL +#define DTLB_ENTRY_NWA_V6 0x0000000000000080ULL +#define DTLB_ENTRY_MT_exc_V6 /* 0x0000000000000700ULL */ \ + (((1ULL << DTLB_ENTRY_MT_BITS_NUM_V6) - 1) << \ + DTLB_ENTRY_MT_exc_SHIFT_V6) +#define DTLB_ENTRY_NON_EX_V6 0x8000000000000000ULL +#define DTLB_ENTRY_PHA_V6 /* 0x0000fffffffff000ULL */ \ + ((((1ULL << E2K_MAX_PHYS_BITS_V6) - 1) >> \ + DTLB_ENTRY_PFN_SHIFT_V6) << \ + DTLB_ENTRY_PFN_SHIFT_V6) +#define DTLB_ENTRY_MT_ma_V6 /* 0x0007000000000000ULL */ \ + (((1ULL << DTLB_ENTRY_MT_BITS_NUM_V6) - 1) << \ + DTLB_ENTRY_MT_ma_SHIFT_V6) +#define DTLB_ENTRY_WR_int_V6 0x0008000000000000ULL +#define DTLB_ENTRY_INTL_RD_V6 0x0010000000000000ULL +#define DTLB_ENTRY_INTL_WR_V6 0x0020000000000000ULL + +/* MPT flags for 2/4Mb & 1Gb pages [46:45] */ +#define DTLB_ENTRY_MPT_FLAGS_V6 0x0000600000000000ULL + +#define DTLB_EP_RES_V6 0x003fffffffffffffULL +#define DTLB_EP_FAULT_RES_V6 (~DTLB_EP_RES_V6) + +/* + * DTLB address probe result format + */ +#define PH_ADDR_AP_RES_V6 /* 0x0000ffffffffffffULL */ \ + ((1ULL << E2K_MAX_PHYS_BITS_V6) - 1) +#define DISABLE_AP_RES_V6 DISABLE_EP_RES_V6 /* AP diasble result */ + /* [62] */ +#define ILLEGAL_PAGE_AP_RES_V6 ILLEGAL_PAGE_EP_RES_V6 /* illegal page */ + /* [58] */ + +/* convert physical address to page frame number for DTLB */ +#define PA_TO_DTLB_ENTRY_PHA_V6(phys_addr) \ + (((e2k_addr_t)phys_addr) & DTLB_ENTRY_PHA_V6) +/* convert the page frame number from DTLB entry to physical address */ +#define DTLB_ENTRY_PHA_TO_PA_V6(dtlb_entry) \ + ((e2k_addr_t)(dtlb_entry) & DTLB_ENTRY_PHA_V6) + +/* get/set Memory Type field from/to DTLB entry */ +#define DTLB_ENTRY_MT_exc_GET_VAL(x) \ + (((x) & DTLB_ENTRY_MT_exc_V6) >> DTLB_ENTRY_MT_exc_SHIFT_V6) +#define DTLB_ENTRY_MT_exc_SET_VAL(x, mt) \ + (((x) & ~DTLB_ENTRY_MT_exc_V6) | \ + (((probe_entry_t)(mt) << DTLB_ENTRY_MT_exc_SHIFT_V6) & \ + DTLB_ENTRY_MT_exc_V6)) +#define DTLB_ENTRY_MT_ma_GET_VAL(x) \ + (((x) & DTLB_ENTRY_MT_ma_V6) >> DTLB_ENTRY_MT_ma_SHIFT_V6) +#define DTLB_ENTRY_MT_ma_SET_VAL(x, mt) \ + (((x) & ~DTLB_ENTRY_MT_ma_V6) | \ + (((probe_entry_t)(mt) << DTLB_ENTRY_MT_ma_SHIFT_V6) & \ + DTLB_ENTRY_MT_ma_V6)) + +static inline probe_entry_t +covert_uni_dtlb_flags_to_dtlb_val_v6(const uni_dtlb_t uni_flags) +{ + probe_entry_t dtlb_flags = 0; + + if (uni_flags & UNI_PAGE_WRITE) + dtlb_flags |= (DTLB_ENTRY_WR_exc_V6); + if (uni_flags & UNI_PAGE_PRIV) + dtlb_flags |= (DTLB_ENTRY_PV_or_U_S_V6); + if (uni_flags & UNI_PAGE_VALID) + dtlb_flags |= (DTLB_ENTRY_VVA_V6); + if (uni_flags & UNI_PAGE_PROTECT) + dtlb_flags |= (DTLB_ENTRY_INT_PR_V6); + if (uni_flags & UNI_PAGE_GLOBAL) + dtlb_flags |= (DTLB_ENTRY_G_V6); + if (uni_flags & UNI_PAGE_DIRTY) + dtlb_flags |= (DTLB_ENTRY_D_V6); + if (uni_flags & UNI_PAGE_NWA) + dtlb_flags |= (DTLB_ENTRY_NWA_V6); + if (uni_flags & UNI_PAGE_MEM_TYPE) + dtlb_flags |= (DTLB_ENTRY_MT_exc_V6); + if (uni_flags & UNI_PAGE_NON_EX) + dtlb_flags |= (DTLB_ENTRY_NON_EX_V6); + if (uni_flags & UNI_PAGE_PFN) + dtlb_flags |= (DTLB_ENTRY_PHA_V6); + if (uni_flags & UNI_PAGE_MEM_TYPE_MA) + dtlb_flags |= (DTLB_ENTRY_MT_ma_V6); + if (uni_flags & UNI_PAGE_WRITE_INT) + dtlb_flags |= (DTLB_ENTRY_WR_int_V6); + if (uni_flags & UNI_PAGE_INTL_RD) + dtlb_flags |= (DTLB_ENTRY_INTL_RD_V6); + if (uni_flags & UNI_PAGE_INTL_WR) + dtlb_flags |= (DTLB_ENTRY_INTL_WR_V6); + if (uni_flags & UNI_DTLB_EP_RES) + dtlb_flags |= (DTLB_EP_RES_V6); + if (uni_flags & UNI_DTLB_PH_ADDR_AP_RES) + dtlb_flags |= (PH_ADDR_AP_RES_V6); + if (uni_flags & UNI_DTLB_ERROR_MASK) + dtlb_flags |= (DTLB_ENTRY_ERROR_MASK_V6); + if (uni_flags & UNI_DTLB_MISS_LEVEL) + dtlb_flags |= (DTLB_ENTRY_MISS_LEVEL_MASK_V6); + if (uni_flags & UNI_DTLB_SUCCESSFUL) + dtlb_flags |= (DTLB_ENTRY_PROBE_SUCCESSFUL_V6); + if (uni_flags & UNI_DTLB_RES_BITS) + dtlb_flags |= (DTLB_ENTRY_RES_BITS_V6); + + BUILD_BUG_ON(dtlb_flags == 0); + + return dtlb_flags; +} + +static inline probe_entry_t +fill_dtlb_val_v6_flags(const uni_dtlb_t uni_flags) +{ + return covert_uni_dtlb_flags_to_dtlb_val_v6(uni_flags); +} +static inline probe_entry_t +get_dtlb_val_v6_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return dtlb_val & covert_uni_dtlb_flags_to_dtlb_val_v6(uni_flags); +} +static inline bool +test_dtlb_val_v6_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return get_dtlb_val_v6_flags(dtlb_val, uni_flags) != 0; +} +static inline probe_entry_t +set_dtlb_val_v6_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return dtlb_val | covert_uni_dtlb_flags_to_dtlb_val_v6(uni_flags); +} +static inline probe_entry_t +clear_dtlb_val_v6_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return dtlb_val & ~covert_uni_dtlb_flags_to_dtlb_val_v6(uni_flags); +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _ASM_E2K_MMU_REGS_TYPES_V6_H */ diff --git a/arch/e2k/include/asm/mmu.h b/arch/e2k/include/asm/mmu.h new file mode 100644 index 0000000..9c9ba40 --- /dev/null +++ b/arch/e2k/include/asm/mmu.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _E2K_MMU_H_ +#define _E2K_MMU_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +/* hw_context_lifetime.state possible values. + * Actual values are important because we use atomic_inc/dec to switch states. + * "state" field helps to avoid double use, and "alive" field helps to avoid + * double free. */ +enum { + HWC_STATE_READY = 0U, /* The context is free to take */ + HWC_STATE_BUSY = 1U, /* A thread is currently executing on the context */ + HWC_STATE_COPYING = 2U /* The context is being copied in fork() */ +}; +#define HWC_STATE_SHIFT 0 +#define HWC_ALIVE_BIAS (1U << 16) +union hw_context_lifetime { + refcount_t refcount; + struct { + u16 state; + u16 alive; + }; +}; + +enum hw_context_fmt { + CTX_32_BIT, + CTX_64_BIT, + CTX_128_BIT +}; + +struct hw_context { + u64 key; /* For finding this context in the hash table */ + struct rhash_head hash_entry; + union hw_context_lifetime lifetime; + + struct { + typeof_member(struct pt_regs, stacks) stacks; + typeof_member(struct pt_regs, crs) crs; + typeof_member(struct pt_regs, wd) wd; + typeof_member(struct pt_regs, kernel_entry) kernel_entry; + } regs; + + /* After user_hw_stacks_copy_full() there is one user frame + * left in kernel chain stack, it's contents are saved here. */ + e2k_mem_crs_t prev_crs; + + /* Data from thread_info */ + struct { + data_stack_t u_stack; /* User data stack info */ + hw_stack_t u_hw_stack; /* User hardware stacks info */ + struct list_head getsp_adj; + struct list_head old_u_pcs_list; +#ifdef CONFIG_PROTECTED_MODE + global_store_t *g_list; + e2k_addr_t multithread_address; + struct rw_semaphore *lock; +#endif /* CONFIG_PROTECTED_MODE */ + struct signal_stack signal_stack; + } ti; + + /* Pointer to the corresponding user context */ + void __user *ucp; + /* Pointer to the next context to run */ + void __user *p_uc_link; + enum hw_context_fmt ptr_format; + + /* Used to free in a separate context (for better performance) */ + struct rcu_head rcu_head; + struct work_struct work; + struct mm_struct *mm; +} ____cacheline_aligned_in_smp; + +#ifdef CONFIG_PROTECTED_MODE + /* + * The list below is used to restore descriptors from pointers + * when kernel needs to pass a descriptor back to user protected space. + * The list stores pointer/descriptor pairs with some extra info if any. + * When kernel need to pass a descriptor to a signal handler operating + * in the protected mode, it looks for the given pointer in the list, + * and gets the descriptor it correcponds to if available. + */ +struct sival_ptr_list { + struct list_head link; /* connects links into the list */ + void __user *kernel_ptr; + unsigned long long int user_ptr_lo; + unsigned long long int user_ptr_hi; + unsigned char user_tags; + unsigned int signum; /* mq_notify needs keeping a single pointer + * for a particular registered signal + */ +}; +#endif + +typedef struct { + unsigned long cpumsk[NR_CPUS]; + atomic_t cur_cui; /* first free cui */ + atomic_t tstart; /* first free type for TSD */ + int tcount; + + /* + * Bit array for saving the information about + * busy and free entries in cut + */ + DECLARE_BITMAP(cut_mask, USER_CUT_AREA_SIZE/sizeof(e2k_cute_t)); + /* + * Mutex lock for protecting of cut_mask + */ + struct mutex cut_mask_lock; + + /* + * For makecontext/swapcontext - a hash list of available contexts + */ + struct rhashtable hw_contexts; + + /* + * for multithreads coredump + * + * e2k arch has 3 stacks (2 hardware_stacks) + * for core file needed all stacks + * The threads must free pc & p stacks after finish_coredump + * The below structure are needed to delay free hardware_stacks + */ + struct list_head delay_free_stacks; + struct rw_semaphore core_lock; +#ifdef CONFIG_PROTECTED_MODE + allpools_t umpools; + struct list_head sival_ptr_list_head; + struct rw_semaphore sival_ptr_list_sem; + /* The field below controls different debug/error output + * purposed to support porting libraries to protected mode: + */ + unsigned long pm_sc_debug_mode; + /* Controls extra info and issues identified by kernel to journal. + * Use command 'dmesg' to display these messages. + * For particular controls see: + * arch/e2k/include/uapi/asm/protected_mode.h + */ +#endif /* CONFIG_PROTECTED_MODE */ + + /* List of cached user hardware stacks */ + struct list_head cached_stacks; + spinlock_t cached_stacks_lock; + size_t cached_stacks_size; +} mm_context_t; + + +/* Version for fast syscalls, so it must be inlined. + * Must be used only for current. */ +static inline u64 context_ti_key_fast_syscall(const struct thread_info *ti) +{ + struct pt_regs __user *u_regs = __signal_pt_regs_last(ti); + + if (u_regs) + return u_regs->stacks.top; + + return ti->u_stack.top; +} + +extern long hw_context_lookup_pcsp_and_switch(e2k_pcsp_lo_t pcsp_lo, + e2k_usd_lo_t usd_lo); +extern int hw_contexts_init(struct task_struct *p, mm_context_t *mm_context, + bool is_fork); +extern void hw_contexts_destroy(mm_context_t *mm_context); +extern long do_swapcontext(void __user *oucp, const void __user *ucp, + bool save_prev_ctx, int format); +extern void makecontext_trampoline(void); +extern void makecontext_trampoline_continue(void); +extern void hw_context_deactivate_mm(struct task_struct *dead_task); + +struct ucontext; +extern long sys_setcontext(const struct ucontext __user *ucp, + int sigsetsize); +extern long sys_makecontext(struct ucontext __user *ucp, void (*func)(void), + u64 args_size, void __user *args, int sigsetsize); +extern long sys_freecontext(struct ucontext __user *ucp); +extern long sys_swapcontext(struct ucontext __user *oucp, + const struct ucontext __user *ucp, int sigsetsize); +#ifdef CONFIG_COMPAT +struct ucontext_32; +extern long compat_sys_setcontext(const struct ucontext_32 __user *ucp, + int sigsetsize); +extern long compat_sys_makecontext(struct ucontext_32 __user *ucp, + void (*func)(void), u64 args_size, void __user *args, + int sigsetsize); +extern long compat_sys_freecontext(struct ucontext_32 __user *ucp); +extern long compat_sys_swapcontext(struct ucontext_32 __user *oucp, + const struct ucontext_32 __user *ucp, int sigsetsize); +#endif +#ifdef CONFIG_PROTECTED_MODE +struct ucontext_prot; +extern long protected_sys_setcontext( + const struct ucontext_prot __user *ucp, + int sigsetsize); +extern long protected_sys_makecontext(struct ucontext_prot __user *ucp, + void (*func)(void), u64 args_size, void __user *args, + int sigsetsize); +extern long protected_sys_freecontext(struct ucontext_prot __user *ucp); +extern long protected_sys_swapcontext(struct ucontext_prot __user *oucp, + const struct ucontext_prot __user *ucp, int sigsetsize); +#endif + +struct vm_userfaultfd_ctx; +extern unsigned long mremap_to(unsigned long addr, unsigned long old_len, + unsigned long new_addr, unsigned long new_len, bool *locked, + struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap_early, + struct list_head *uf_unmap); + +#endif /* _E2K_MMU_H_ */ diff --git a/arch/e2k/include/asm/mmu_context.h b/arch/e2k/include/asm/mmu_context.h new file mode 100644 index 0000000..777fe71 --- /dev/null +++ b/arch/e2k/include/asm/mmu_context.h @@ -0,0 +1,538 @@ +/* + * asm-e2k/mmu_context.h + */ + +#ifndef _E2K_MMU_CONTEXT_H_ +#define _E2K_MMU_CONTEXT_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * The high bits of the "context_cache" (and the "mm->context") are the + * CONTEXT _version_ code. A version of 0 is always considered invalid, + * so to invalidate another process only need to do "p->mm->context = 0". + * + * If more CONTEXT's than the processor has is needed, it invalidates all + * TLB's ('flush_tlb_all()') and starts a new CONTEXT version. + * That will automatically force a new CONTEXT for any other processes + * the next time they want to run. + * + * cpu_last_context(cpuid): + * 63 0 + * +-------------------------------+------------------+ + * | asn version of this processor | hardware CONTEXT | + * +-------------------------------+------------------+ + */ + +#define CTX_HARDWARE_BITS 12 +#define CTX_HARDWARE_MASK ((1UL << CTX_HARDWARE_BITS) - 1) +#define CTX_HARDWARE_MAX CTX_HARDWARE_MASK +#define CTX_VERSION_SHIFT CTX_HARDWARE_BITS +#define CTX_VERSION_SIZE (1UL << CTX_VERSION_SHIFT) +#define CTX_VERSION_MASK (~(CTX_VERSION_SIZE - 1)) +#define CTX_FIRST_VERSION_NUM 1UL +#define CTX_FIRST_VERSION (CTX_FIRST_VERSION_NUM << CTX_VERSION_SHIFT) + +#define CTX_HARDWARE(ctx) ((ctx) & CTX_HARDWARE_MASK) +#define CTX_VERSION(ctx) ((ctx) & CTX_VERSION_MASK) + +#ifdef CONFIG_SMP +#include +//spin_lock is needed: #define cpu_last_context(cpuid) (cpu_data[cpuid].mmu_last_context) +#define my_cpu_last_context() (my_cpu_data.mmu_last_context) +#define my_cpu_last_context1(num_cpu) (my_cpu_data1(num_cpu).mmu_last_context) +#else +extern unsigned long mmu_last_context; +//#define cpu_last_context(cpuid) mmu_last_context +#define my_cpu_last_context() mmu_last_context +#define my_cpu_last_context1(num_cpu) mmu_last_context +#endif /* CONFIG_SMP */ + +extern int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm); +extern void arch_exit_mmap(struct mm_struct *mm); + +static inline void +reload_context_mask(unsigned long mask) +{ + set_MMU_CONT(CTX_HARDWARE(mask)); +} + +/* + * Get process new MMU context. This is needed when the page table + * pointer is changed or when the CONTEXT of the current process is updated + * This proc is called under closed interrupts or preempt_disable() + */ + +static inline unsigned long +get_new_mmu_pid(mm_context_t *context, int num_cpu) +{ + unsigned long ctx; + unsigned long next; + + /* Interrupts should be disabled to not bother about + * async-safety (calls to this function from the same + * CPU after it was interrupted). */ + + WARN_ON_ONCE(!__raw_all_irqs_disabled()); + + ctx = my_cpu_last_context1(num_cpu); + next = ctx + 1; + if (CTX_HARDWARE(next) == E2K_KERNEL_CONTEXT) + next ++; + if (CTX_VERSION(ctx) != CTX_VERSION(next)) { + flush_TLB_all(); + flush_ICACHE_all(); + if (CTX_VERSION(next) < CTX_FIRST_VERSION) { + next = CTX_FIRST_VERSION; + if (CTX_HARDWARE(next) == E2K_KERNEL_CONTEXT) + next ++; + } + } + + /* Another CPU might have written 0 to our cpu's mm context + * while we were getting the next context. But it is OK since + * we are changing the context anyway, and if this happens we + * will just rewrite that 0 with the new context. */ + context->cpumsk[num_cpu] = next; + my_cpu_last_context1(num_cpu) = next; + + return next; +} + +static inline unsigned long +get_new_mmu_context(struct mm_struct *mm, int num_cpu) +{ + return get_new_mmu_pid(&mm->context, num_cpu); +} + +/* + * Get the process current MMU context. + */ +static inline unsigned long +get_mmu_pid(mm_context_t *context, int cpu) +{ + unsigned long next; + + /* check if our CPU MASK is of an older generation and thus invalid: */ + next = context->cpumsk[cpu]; + if (unlikely(next == 0 || CTX_VERSION(my_cpu_last_context1(cpu)) + != CTX_VERSION(next))) + next = get_new_mmu_pid(context, cpu); + + return next; +} + +static inline unsigned long +get_mmu_context(struct mm_struct *mm, int cpu) +{ + return get_mmu_pid(&mm->context, cpu); +} + +/* + * Get the process current MMU context. + */ +static inline void +copy_mmu_pid(mm_context_t *pid_to, mm_context_t *pid_from) +{ + *pid_to = *pid_from; +} + +static inline void +reload_mmu_context(struct mm_struct *mm) +{ + unsigned long ctx, flags; + int cpu; + + raw_all_irq_save(flags); + cpu = smp_processor_id(); + ctx = get_new_mmu_context(mm, cpu); + reload_context_mask(ctx); + raw_all_irq_restore(flags); +} +static inline void +invalidate_mmu_context(struct mm_struct *mm) +{ + int cpu = raw_smp_processor_id(); +#ifdef CONFIG_SMP + /* + * Remove this cpu from mm_cpumask. This might be + * needed, for example, after sys_io_setup() if the + * kernel thread which was using this mm received + * flush ipi (unuse_mm() does not clear mm_cpumask). + * And maybe there are other such places where + * a kernel thread uses user mm. + */ + cpumask_clear_cpu(cpu, mm_cpumask(mm)); +#endif + mm->context.cpumsk[cpu] = 0; +} + +extern inline void +enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) +{ +} + +/* + * Initialize a new mmu context. This is invoked when a new + * address space instance (unique or shared) is instantiated. + * This just needs to set mm->context[] to an invalid context. + */ +static inline int +__init_new_context(struct task_struct *p, struct mm_struct *mm, + mm_context_t *context) +{ + bool is_fork = p && (p != current); + int ret; + + memset(&context->cpumsk, 0, nr_cpu_ids * sizeof(context->cpumsk[0])); + + if (is_fork) { + /* + * Copy data on user fork + */ + mm_context_t *curr_context = ¤t->mm->context; + + /* + * Copy cut mask from the context of parent process + * to the context of new process + */ + mutex_lock(&curr_context->cut_mask_lock); + bitmap_copy((unsigned long *) &context->cut_mask, + (unsigned long *) &curr_context->cut_mask, + USER_CUT_AREA_SIZE/sizeof(e2k_cute_t)); + mutex_unlock(&curr_context->cut_mask_lock); + } else { + /* + * Initialize by zero cut_mask of new process + */ + mutex_init(&context->cut_mask_lock); + bitmap_zero((unsigned long *) &context->cut_mask, + USER_CUT_AREA_SIZE/sizeof(e2k_cute_t)); + } + + atomic_set(&context->tstart, 1); + + init_rwsem(&context->sival_ptr_list_sem); + INIT_LIST_HEAD(&context->sival_ptr_list_head); + + INIT_LIST_HEAD(&context->delay_free_stacks); + init_rwsem(&context->core_lock); + + INIT_LIST_HEAD(&context->cached_stacks); + spin_lock_init(&context->cached_stacks_lock); + context->cached_stacks_size = 0; + + if (mm == NULL) + return 0; + + ret = hw_contexts_init(p, context, is_fork); + return ret; +} + +static inline int +init_new_context(struct task_struct *p, struct mm_struct *mm) +{ + return __init_new_context(p, mm, &mm->context); +} + +static inline int +init_new_mmu_pid(mm_context_t *context) +{ + return __init_new_context(NULL, NULL, context); +} + +extern void destroy_cached_stacks(mm_context_t *context); + +/* + * Destroy a dead context. This occurs when mmput drops the + * mm_users count to zero, the mmaps have been released, and + * all the page tables have been flushed. The function job + * is to destroy any remaining processor-specific state. + */ +static inline void destroy_context(struct mm_struct *mm) +{ + destroy_cached_stacks(&mm->context); +} + + +/* + * Force a context reload. This is needed when context is changed + */ +static inline void +reload_mmu_pid(mm_context_t *context, int num_cpu) +{ + unsigned long ctx = context->cpumsk[num_cpu]; + + if (!ctx) + ctx = get_new_mmu_pid(context, num_cpu); + set_MMU_CONT(CTX_HARDWARE(ctx)); +} +static inline void +reload_context(struct mm_struct *mm, int num_cpu) +{ + reload_mmu_pid(&mm->context, num_cpu); +} + +/* + * Force a root page table pointer reload. + */ +static inline void +reload_root_pgd(pgd_t *pgd) +{ + if (MMU_IS_SEPARATE_PT()) { + set_MMU_U_PPTB(__pa(pgd)); + } else { +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + if (!THERE_IS_DUP_KERNEL) { + set_MMU_U_PPTB(__pa(pgd)); + } +#else /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + set_MMU_U_PPTB(__pa(pgd)); +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + } +} +extern inline void +reload_root_pt(struct mm_struct *mm) +{ + pgd_t *pgd; + + if (mm == &init_mm) { + pgd = cpu_kernel_root_pt; + if ((unsigned long) pgd >= KERNEL_BASE) + pgd = __va(kernel_va_to_pa(pgd)); + } else { + pgd = mm->pgd; + } + reload_root_pgd(pgd); +} +/* + * Force the kernel root page table pointer reload. + */ +static inline void +set_root_pt(pgd_t *root_pt) +{ + BUG_ON(MMU_IS_SEPARATE_PT()); + set_MMU_U_PPTB(__pa(root_pt)); +} + +/* + * Switch a root page table pointer and context. + */ +static inline void +reload_thread(struct mm_struct *mm) +{ + unsigned long flags; + int num_cpu; + + preempt_disable(); + num_cpu = raw_smp_processor_id(); + if (!MMU_IS_SEPARATE_PT()) { +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + if (THERE_IS_DUP_KERNEL) { + spin_lock(&mm->page_table_lock); + copy_user_pgd_to_kernel_root_pt(mm->pgd); + spin_unlock(&mm->page_table_lock); + } +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + } + raw_all_irq_save(flags); + reload_root_pt(mm); + reload_context(mm, num_cpu); + raw_all_irq_restore(flags); + preempt_enable(); +} + +static inline void +do_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next, int switch_pgd); + +/* + * Activate a new MM instance for the current task. + */ +static inline void +native_activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) +{ + do_switch_mm(active_mm, mm, NULL, false); +} + +static inline void call_switch_mm(struct mm_struct *prev_mm, + struct mm_struct *next_mm, struct task_struct *next, + int switch_pgd, int switch_mm) +{ + if (switch_mm || switch_pgd) + do_switch_mm(prev_mm, next_mm, next, switch_pgd); +} + +/* Virtualization support */ + +extern void native_deactivate_mm(struct task_struct *dead_task, + struct mm_struct *mm); + +#include + +/* + * Switch from address space PREV to address space NEXT. + * interrupt was disabled by caller + */ +static inline void +do_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next, int switch_pgd) +{ + int cpu = raw_smp_processor_id(); + unsigned long flags, mask; + + if (likely(prev_mm != next_mm)) { + raw_all_irq_save(flags); + + if (likely(next_mm)) { +#ifdef CONFIG_SMP + /* Start receiving flush ipis for the next mm */ + cpumask_set_cpu(cpu, mm_cpumask(next_mm)); + + /* Without a memory barrier, a following race can happen + * (CPU0 executes switch_mm, CPU1 executes flush_tlb): + * + * -----------------------------+----------------------- + * CPU0 | CPU1 + * -----------------------------+----------------------- + * read next_mm->context | + * for CPU0 | + * | set next_mm->context + * | for CPU0 to 0 + * the loaded value has older | + * context version -> update it | + * with get_new_mmu_context() | + * -> 0 in next_mm->context | execute memory barrier + * is rewritten | + * | CPU0 is not set in + * | mm_cpumask(next_mm), + * | so ipi's not send + * set CPU0 bit in | + * mm_cpumask(next_mm) | + * -----------------------------+----------------------- + * + * To avoid the races both CPU1 and CPU0 execute memory + * barriers: + * -----------------------------+----------------------- + * CPU0 | CPU1 + * -----------------------------+----------------------- + * set CPU0 bit in | set next_mm->context + * mm_cpumask(next_mm) | for CPU0 to 0 + * | + * execute memory barrier | execute memory barrier + * | + * read next_mm->context | CPU0 is not set in + * for CPU0 | mm_cpumask(next_mm), + * | so ipi's not send + * -----------------------------+----------------------- + * This way either CPU0 will see 0 in next_mm or + * CPU1 will send the flush ipi to CPU0, or both. + * + * This barrier could be smp_mb__after_atomic(), but + * the membarrier syscall requires a full memory + * barrier after storing to rq->curr, before going + * back to user-space. + */ + smp_mb(); +#endif + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT + /* Load user page table */ + if (!MMU_IS_SEPARATE_PT() && THERE_IS_DUP_KERNEL) { + copy_user_pgd_to_kernel_root_pt(next_mm->pgd); + } +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + + /* Switch context */ + reload_root_pt(next_mm); + mask = get_mmu_context(next_mm, cpu); + reload_context_mask(mask); + } + +#ifdef CONFIG_SMP + /* Stop flush ipis for the previous mm */ + if (likely(prev_mm)) + cpumask_clear_cpu(cpu, mm_cpumask(prev_mm)); +#endif + raw_all_irq_restore(flags); + } else { + /* Switching between threads, nothing to do here */ + } +} + +static inline void need_switch_mm(struct task_struct *prev, + struct task_struct *next, struct mm_struct *oldmm, + struct mm_struct *mm, int *switch_pgd, int *switch_mm) +{ + *switch_pgd = false; + *switch_mm = mm != NULL; +} + +/* + * Switch from address space PREV to address space NEXT. + */ +static inline void +switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next) +{ + int switch_pgd, switch_mm; + + need_switch_mm(current, next, prev_mm, next_mm, + &switch_pgd, &switch_mm); + BUG_ON(switch_mm && switch_pgd); + + call_switch_mm(prev_mm, next_mm, next, switch_pgd, switch_mm); +} + +/* + * Set kernel MMU state + */ +static inline void +set_kernel_MMU_state(void) +{ + e2k_addr_t root_base = kernel_va_to_pa(cpu_kernel_root_pt); + + E2K_WAIT_ALL; + if (MMU_IS_SEPARATE_PT()) { + BUILD_BUG_ON(MMU_SEPARATE_KERNEL_VAB != PAGE_OFFSET); + WRITE_MMU_OS_VPTB(MMU_SEPARATE_KERNEL_VPTB); + WRITE_MMU_OS_PPTB(root_base); + WRITE_MMU_OS_VAB(MMU_SEPARATE_KERNEL_VAB); + WRITE_MMU_CONT(MMU_KERNEL_CONTEXT); + } else { + WRITE_MMU_U_VPTB(MMU_UNITED_KERNEL_VPTB); + WRITE_MMU_U_PPTB(root_base); + WRITE_MMU_CONT(MMU_KERNEL_CONTEXT); + } + E2K_WAIT_ALL; +} + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +extern inline void +set_secondary_space_MMU_state(void) +{ + unsigned long mmu_cr; + + mmu_cr = get_MMU_CR(); + mmu_cr |= _MMU_CR_UPT_EN; + if (machine.native_iset_ver >= E2K_ISET_V5) + mmu_cr |= _MMU_CR_SNXE; + set_MMU_CR(mmu_cr); +} +#else /* ! CONFIG_SECONDARY_SPACE_SUPPORT */ +#define set_secondary_space_MMU_state() +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + +extern void makecontext_trampoline_switched(void); + +#endif /* _E2K_MMU_CONTEXT_H_ */ diff --git a/arch/e2k/include/asm/mmu_fault.h b/arch/e2k/include/asm/mmu_fault.h new file mode 100644 index 0000000..7e49779 --- /dev/null +++ b/arch/e2k/include/asm/mmu_fault.h @@ -0,0 +1,271 @@ +#ifndef _E2K_MMU_FAULT_H_ +#define _E2K_MMU_FAULT_H_ + +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_PA_MODE +#undef DebugPA +#define DEBUG_PA_MODE 0 /* page table allocation */ +#define DebugPA(fmt, args...) \ +({ \ + if (DEBUG_PA_MODE) \ + pr_info(fmt, ##args); \ +}) + +static inline int +native_guest_addr_to_host(void **addr) +{ + /* there are not any guests, so nothing convertion */ + return 0; +} + +static inline void * +native_guest_ptr_to_host(void *ptr, int size) +{ + /* there are not any guests, so nothing convertion */ + return ptr; +} + +static inline long +native_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, + u64 opc_ext, int chan, int qp_store, int atomic_store) +{ + if (atomic_store) { + NATIVE_RECOVERY_TAGGED_STORE_ATOMIC(address, wr_data, data_tag, + st_rec_opc, data_ext, data_ext_tag, opc_ext); + } else { + NATIVE_RECOVERY_TAGGED_STORE(address, wr_data, data_tag, + st_rec_opc, data_ext, data_ext_tag, opc_ext, + chan, qp_store); + } + + return 0; +} +static inline long +native_recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan) +{ + u64 val; + u32 tag; + + NATIVE_RECOVERY_TAGGED_LOAD_TO(address, ld_rec_opc, val, tag, chan); + *ld_val = val; + *data_tag = tag; + return 0; +} +static inline long +native_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load) +{ + if (atomic_load) { + NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_VR_ATOMIC(addr_from, addr_to, + addr_to_hi, vr, ld_rec_opc); + } else { + NATIVE_MOVE_TAGGED_DWORD_WITH_OPC_CH_VR(addr_from, addr_to, + addr_to_hi, vr, ld_rec_opc, chan, qp_load); + } + + return 0; +} + +static inline long +native_recovery_faulted_load_to_cpu_greg(e2k_addr_t address, u32 greg_num_d, + int vr, u64 ld_rec_opc, int chan_opc, + int qp_load, int atomic_load) +{ + if (atomic_load) { + NATIVE_RECOVERY_LOAD_TO_A_GREG_VR_ATOMIC(address, + ld_rec_opc, greg_num_d, vr, qp_load); + } else { + NATIVE_RECOVERY_LOAD_TO_A_GREG_CH_VR(address, + ld_rec_opc, greg_num_d, chan_opc, vr, qp_load); + } + + return 0; +} + +static inline long +native_recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, + int vr, u64 ld_rec_opc, int chan_opc, + int qp_load, int atomic_load, u64 *saved_greg_lo, + u64 *saved_greg_hi) +{ + if (!saved_greg_lo) { + return native_recovery_faulted_load_to_cpu_greg(address, + greg_num_d, vr, ld_rec_opc, chan_opc, qp_load, + atomic_load); + } else { + return native_recovery_faulted_move(address, + (u64) saved_greg_lo, (u64) saved_greg_hi, + vr, ld_rec_opc, chan_opc, qp_load, atomic_load); + } +} + +static inline bool +native_is_guest_kernel_gregs(struct thread_info *ti, + unsigned greg_num_d, u64 **greg_copy) +{ + /* native kernel does not use such registers */ + /* host kernel save/restore such registers itself */ + return false; +} + +static inline long +native_move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + NATIVE_MOVE_TAGGED_WORD(addr_from, addr_to); + + return 0; +} +static inline long +native_move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + NATIVE_MOVE_TAGGED_DWORD(addr_from, addr_to); + + return 0; +} +static inline long +native_move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + NATIVE_MOVE_TAGGED_QWORD(addr_from, addr_from + sizeof(long), + addr_to, addr_to + sizeof(long)); + + return 0; +} + +extern void native_handle_mpdma_fault(e2k_addr_t hva); + +extern e2k_addr_t print_address_ptes(pgd_t *pgdp, e2k_addr_t address, + int kernel); + + +/* + * Paravirtualization support + */ +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +static inline long +recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, u32 data_tag, + u64 st_rec_opc, u64 data_ext, u32 data_ext_tag, u64 opc_ext, + int chan, int qp_store, int atomic_store) +{ + return native_recovery_faulted_tagged_store(address, wr_data, data_tag, + st_rec_opc, data_ext, data_ext_tag, opc_ext, + chan, qp_store, atomic_store); +} +static inline long +recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan) +{ + return native_recovery_faulted_load(address, ld_val, data_tag, + ld_rec_opc, chan); +} +static inline long +recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, + int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load, u64 *saved_greg_lo, + u64 *saved_greg_hi) +{ + return native_recovery_faulted_load_to_greg(address, greg_num_d, + vr, ld_rec_opc, chan, qp_load, atomic_load, + saved_greg_lo, saved_greg_hi); +} +static inline long +recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + e2k_addr_t addr_to_hi, int vr, u64 ld_rec_opc, int chan, + int qp_load, int atomic_load) +{ + return native_recovery_faulted_move(addr_from, addr_to, addr_to_hi, vr, + ld_rec_opc, chan, qp_load, atomic_load); +} + +static inline bool +is_guest_kernel_gregs(struct thread_info *ti, + unsigned greg_num_d, u64 **greg_copy) +{ + return native_is_guest_kernel_gregs(ti, greg_num_d, greg_copy); +} +static inline void +move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + native_move_tagged_word(addr_from, addr_to); +} +static inline void +move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + native_move_tagged_dword(addr_from, addr_to); +} +static inline void +move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + native_move_tagged_qword(addr_from, addr_to); +} +static inline void +handle_mpdma_fault(e2k_addr_t hva) +{ + native_handle_mpdma_fault(hva); +} + +# ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ +static inline int guest_addr_to_host(void **addr, pt_regs_t *regs) +{ + return native_guest_addr_to_host(addr); +} + +static inline void *guest_ptr_to_host(void *ptr, int size, pt_regs_t *regs) +{ + return native_guest_ptr_to_host(ptr, size); +} +# else /* CONFIG_VIRTUALIZATION */ +/* it is native host kernel with virtualization support */ +#include +# endif /* !CONFIG_VIRTUALIZATION */ + +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else + #error "Unknown virtualization type" +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +static inline long +store_tagged_dword(void *address, u64 data, u32 tag) +{ + return recovery_faulted_tagged_store((e2k_addr_t) address, data, tag, + TAGGED_MEM_STORE_REC_OPC, 0, 0, 0, 1, 0, 0); +} + +static inline long +load_value_and_tagd(const void *address, u64 *ld_val, u8 *ld_tag) +{ + return recovery_faulted_load((e2k_addr_t) address, ld_val, ld_tag, + TAGGED_MEM_LOAD_REC_OPC, 0); +} + +static inline long +load_qvalue_and_tagq(e2k_addr_t address, u64 *val_lo, u64 *val_hi, + u8 *tag_lo, u8 *tag_hi) +{ + long ret; + + ret = recovery_faulted_load(address, val_lo, tag_lo, + TAGGED_MEM_LOAD_REC_OPC, 0); + ret |= recovery_faulted_load(address + sizeof(long), val_hi, tag_hi, + TAGGED_MEM_LOAD_REC_OPC, 0); + return ret; +} + +#endif /* _E2K_MMU_FAULT_H_ */ diff --git a/arch/e2k/include/asm/mmu_regs.h b/arch/e2k/include/asm/mmu_regs.h new file mode 100644 index 0000000..8556630 --- /dev/null +++ b/arch/e2k/include/asm/mmu_regs.h @@ -0,0 +1,789 @@ +/* + * MMU structures & registers. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_MMU_REGS_H_ +#define _E2K_MMU_REGS_H_ + +#ifndef __ASSEMBLY__ +#include +#include +#endif /* __ASSEMBLY__ */ + +#include +#include +#include +#include +#include + +#undef DEBUG_MR_MODE +#undef DebugMR +#define DEBUG_MR_MODE 0 /* MMU registers access */ +#define DebugMR(...) DebugPrint(DEBUG_MR_MODE, ##__VA_ARGS__) + +#undef DEBUG_MCR_MODE +#undef DebugMCR +#define DEBUG_MCR_MODE 0 /* MMU CONTEXT registers access */ +#define DebugMCR(...) DebugPrint(DEBUG_MCR_MODE, ##__VA_ARGS__) + +#undef DEBUG_CLW_MODE +#undef DebugCLW +#define DEBUG_CLW_MODE 0 /* CLW registers access */ +#define DebugCLW(...) DebugPrint(DEBUG_CLW_MODE, ##__VA_ARGS__) + +#undef DEBUG_TLB_MODE +#undef DebugTLB +#define DEBUG_TLB_MODE 0 /* TLB registers access */ +#define DebugTLB(...) DebugPrint(DEBUG_TLB_MODE, ##__VA_ARGS__) + +/* + * MMU registers operations + */ + +#ifndef __ASSEMBLY__ +/* + * Write MMU register + */ +static inline void +write_MMU_reg(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + DebugMR("Write MMU reg 0x%lx value 0x%llx\n", + MMU_REG_NO_FROM_MMU_ADDR(mmu_addr), mmu_reg_val(mmu_reg)); + WRITE_MMU_REG(mmu_addr_val(mmu_addr), mmu_reg_val(mmu_reg)); +} + +static inline void +boot_write_MMU_reg(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + BOOT_WRITE_MMU_REG(mmu_addr_val(mmu_addr), mmu_reg_val(mmu_reg)); +} + +/* + * Read MMU register + */ + +static inline mmu_reg_t +read_MMU_reg(mmu_addr_t mmu_addr) +{ + DebugMR("Read MMU reg 0x%lx\n", + MMU_REG_NO_FROM_MMU_ADDR(mmu_addr)); + return __mmu_reg(READ_MMU_REG(mmu_addr_val(mmu_addr))); +} + +static inline mmu_reg_t +boot_read_MMU_reg(mmu_addr_t mmu_addr) +{ + return __mmu_reg(BOOT_READ_MMU_REG(mmu_addr_val(mmu_addr))); +} + +/* + * Read MMU Control register + */ +#define read_MMU_CR() read_MMU_reg(MMU_ADDR_CR) +#define READ_MMU_CR() \ + READ_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR_NO)) +static inline unsigned long +get_MMU_CR(void) +{ + unsigned long mmu_cr; + + DebugMR("Get MMU Control Register\n"); + mmu_cr = READ_MMU_CR(); + DebugMR("MMU Control Register state : 0x%lx\n", mmu_cr); + return mmu_cr; +} + +/* + * Write MMU Control register + */ +#define write_MMU_CR(mmu_cr) write_MMU_reg(MMU_ADDR_CR, mmu_cr) +#define WRITE_MMU_CR(mmu_cr) \ + WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR_NO), \ + mmu_reg_val(mmu_cr)) +static inline void +set_MMU_CR(unsigned long mmu_cr) +{ + DebugMR("Set MMU Control Register to 0x%lx\n", mmu_cr); + WRITE_MMU_CR(mmu_cr); + DebugMR("Read MMU Control Register : 0x%llx\n", + READ_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR_NO))); +} +#define BOOT_WRITE_MMU_CR(mmu_cr) \ + BOOT_WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR_NO), \ + mmu_reg_val(mmu_cr)) +static inline void +boot_set_MMU_CR(unsigned long mmu_cr) +{ + BOOT_WRITE_MMU_CR(mmu_cr); +} + +/* + * Write MMU Context register + */ +#define write_MMU_CONT(mmu_cont) \ + WRITE_MMU_PID(mmu_cont) +#define WRITE_MMU_CONT(mmu_cont) \ + WRITE_MMU_PID(mmu_reg_val(mmu_cont)) +static inline void +set_MMU_CONT(unsigned long context) +{ + DebugMCR("Set MMU CONTEXT register to 0x%lx\n", context); + WRITE_MMU_PID(context); +} +#define BOOT_WRITE_MMU_CONT(mmu_cont) \ + BOOT_WRITE_MMU_PID(mmu_reg_val(mmu_cont)) +static inline void +boot_set_MMU_CONT(unsigned long context) +{ + BOOT_WRITE_MMU_CONT(context); +} + +/* + * Write MMU Control Register of secondary space table + */ +#define write_MMU_CR3_RG(mmu_page_dir) \ + write_MMU_reg(MMU_ADDR_CR3_RG, mmu_page_dir) +#define WRITE_MMU_CR3_RG(mmu_page_dir) \ + WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR3_RG_NO), \ + mmu_reg_val(mmu_page_dir)) +static inline void +set_MMU_CR3_RG(unsigned long mmu_page_dir) +{ + DebugMR("Set MMU INTEL page table base register to 0x%lx\n", + mmu_page_dir); + WRITE_MMU_CR3_RG(mmu_page_dir); +} + +#define get_MMU_CR3_RG() \ + (unsigned long)mmu_reg_val(read_MMU_reg(MMU_ADDR_CR3_RG)) +/* + * Write MMU page tables virtual base register + */ +#define WRITE_MMU_U_VPTB(mmu_virt_ptb) \ + WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_VPTB_NO), \ + mmu_reg_val(mmu_virt_ptb)) +static inline void +set_MMU_U_VPTB(unsigned long mmu_virt_ptb) +{ + DebugMR("Set MMU page table virtual base register to 0x%lx\n", + mmu_virt_ptb); + WRITE_MMU_U_VPTB(mmu_virt_ptb); +} +#define BOOT_WRITE_MMU_U_VPTB(mmu_virt_ptb) \ + BOOT_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_VPTB_NO), \ + mmu_reg_val(mmu_virt_ptb)) +static inline void +boot_set_MMU_U_VPTB(unsigned long mmu_virt_ptb) +{ + BOOT_WRITE_MMU_U_VPTB(mmu_virt_ptb); +} + +static inline void +set_MMU_OS_VPTB(unsigned long mmu_virt_ptb) +{ + DebugMR("Set MMU OS page table virtual base register to 0x%lx\n", + mmu_virt_ptb); + WRITE_MMU_OS_VPTB(mmu_virt_ptb); +} +static inline void +boot_set_MMU_OS_VPTB(unsigned long mmu_virt_ptb) +{ + BOOT_WRITE_MMU_OS_VPTB(mmu_virt_ptb); +} + +/* + * Write/read MMU root page table physical base register + */ +#define WRITE_MMU_U_PPTB(mmu_phys_ptb) \ + WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_PPTB_NO), \ + mmu_reg_val(mmu_phys_ptb)) +#define READ_MMU_U_PPTB() \ + READ_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_PPTB_NO)) +static inline void +set_MMU_U_PPTB(unsigned long mmu_phys_ptb) +{ + DebugMR("Set MMU USER page table physical base register to 0x%lx\n", + mmu_phys_ptb); + WRITE_MMU_U_PPTB(mmu_phys_ptb); +} +#define BOOT_WRITE_MMU_U_PPTB(mmu_phys_ptb) \ + BOOT_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_PPTB_NO), \ + mmu_reg_val(mmu_phys_ptb)) +static inline void +boot_set_MMU_U_PPTB(unsigned long mmu_phys_ptb) +{ + BOOT_WRITE_MMU_U_PPTB(mmu_phys_ptb); +} +static inline unsigned long +get_MMU_U_PPTB(void) +{ + return READ_MMU_U_PPTB(); +} + +static inline void +set_MMU_OS_PPTB(unsigned long mmu_phys_ptb) +{ + DebugMR("Set MMU OS root page table physical base register to 0x%lx\n", + mmu_phys_ptb); + WRITE_MMU_OS_PPTB(mmu_phys_ptb); +} +static inline void +boot_set_MMU_OS_PPTB(unsigned long mmu_phys_ptb) +{ + BOOT_WRITE_MMU_OS_PPTB(mmu_phys_ptb); +} + +/* + * Read MMU Trap Point register + */ +#define read_MMU_TRAP_POINT() read_MMU_reg(MMU_ADDR_TRAP_POINT) +#define READ_MMU_TRAP_POINT() \ + READ_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_POINT_NO)) + +/* + * Set MMU Trap Point register + */ +#define write_MMU_TRAP_POINT(trap_cellar) \ + write_MMU_reg(MMU_ADDR_TRAP_POINT, \ + MMU_TRAP_POINT((e2k_addr_t)trap_cellar)) +#define WRITE_MMU_TRAP_POINT(trap_cellar) \ + WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_POINT_NO), \ + _MMU_TRAP_POINT((e2k_addr_t)trap_cellar)) +static inline void +set_MMU_TRAP_POINT(void *trap_cellar) +{ + DebugMR("Set MMU Trap Point register to %px\n", trap_cellar); + WRITE_MMU_TRAP_POINT(trap_cellar); +} +#define BOOT_WRITE_MMU_TRAP_POINT(trap_cellar) \ + BOOT_WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_POINT_NO), \ + _MMU_TRAP_POINT((e2k_addr_t)trap_cellar)) +static inline void +boot_set_MMU_TRAP_POINT(void *trap_cellar) +{ + BOOT_WRITE_MMU_TRAP_POINT(trap_cellar); +} + +/* + * Set MMU Trap Counter register + */ +#define NATIVE_WRITE_MMU_TRAP_COUNT(counter) \ + NATIVE_WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_COUNT_NO), \ + (unsigned long)_MMU_TRAP_COUNT(counter)) +#define write_MMU_TRAP_COUNT(counter) \ + write_MMU_reg(MMU_ADDR_TRAP_COUNT, \ + (unsigned long)_MMU_TRAP_COUNT(counter)) +#define WRITE_MMU_TRAP_COUNT(counter) \ + WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_COUNT_NO), \ + (unsigned long)_MMU_TRAP_COUNT(counter)) +#define RESET_MMU_TRAP_COUNT() WRITE_MMU_TRAP_COUNT(0) +static inline void +set_MMU_TRAP_COUNT(unsigned int counter) +{ + DebugMR("Set MMU Trap Counter register to %d\n", counter); + WRITE_MMU_TRAP_COUNT(counter); +} +static inline void +reset_MMU_TRAP_COUNT(void) +{ + RESET_MMU_TRAP_COUNT(); +} +#define BOOT_WRITE_MMU_TRAP_COUNT(counter) \ + BOOT_WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_COUNT_NO), \ + (unsigned long)_MMU_TRAP_COUNT(counter)) +#define BOOT_RESET_MMU_TRAP_COUNT() BOOT_WRITE_MMU_TRAP_COUNT(0) +static inline void +boot_reset_MMU_TRAP_COUNT(void) +{ + BOOT_RESET_MMU_TRAP_COUNT(); +} + +/* + * Read MMU Trap Counter register + */ +#define NATIVE_get_MMU_TRAP_COUNT() \ + ((unsigned int)mmu_reg_val(native_read_MMU_reg( \ + MMU_ADDR_TRAP_COUNT))) +#define NATIVE_READ_MMU_TRAP_COUNT() \ + ((unsigned int)(NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_COUNT_NO)))) +#define get_MMU_TRAP_COUNT() \ + ((unsigned int)mmu_reg_val(read_MMU_reg(MMU_ADDR_TRAP_COUNT))) +#define READ_MMU_TRAP_COUNT() \ + ((unsigned int)(READ_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_TRAP_COUNT_NO)))) +static inline unsigned int +native_read_MMU_TRAP_COUNT(void) +{ + DebugMR("Read MMU Trap Counter register\n"); + return NATIVE_READ_MMU_TRAP_COUNT(); +} +static inline unsigned int +read_MMU_TRAP_COUNT(void) +{ + DebugMR("Read MMU Trap Counter register\n"); + return READ_MMU_TRAP_COUNT(); +} + +/* + * Set MMU Memory Protection Table Base register + */ +#define write_MMU_MPT_B(base) \ + write_MMU_reg(MMU_ADDR_MPT_B, base) +#define WRITE_MMU_MPT_B(base) \ + WRITE_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_MPT_B_NO), \ + mmu_reg_val(base)) +#define get_MMU_MPT_B() \ + read_MMU_reg(MMU_ADDR_MPT_B) +static inline void +set_MMU_MPT_B(unsigned long base) +{ + DebugMR("Set MMU Memory Protection Table Base register to 0x%lx\n", + base); + WRITE_MMU_MPT_B(base); +} + +/* + * Set MMU PCI Low Bound register + */ +#define write_MMU_PCI_L_B(bound) \ + write_MMU_reg(MMU_ADDR_PCI_L_B, bound) +#define WRITE_MMU_PCI_L_B(bound) \ + WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_PCI_L_B_NO), \ + mmu_reg_val(bound)) +static inline void +set_MMU_PCI_L_B(unsigned long bound) +{ + DebugMR("Set MMU PCI low bound register to 0x%lx\n", bound); + WRITE_MMU_PCI_L_B(bound); +} + +/* + * Set MMU Phys High Bound register + */ +#define write_MMU_PH_H_B(bound) \ + write_MMU_reg(MMU_ADDR_PH_H_B, bound) +#define WRITE_MMU_PH_H_B(bound) \ + WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_PH_H_B_NO), \ + mmu_reg_val(bound)) +static inline void +set_MMU_PH_H_B(unsigned long bound) +{ + DebugMR("Set MMU Physical memory high bound register to 0x%lx\n", + bound); + WRITE_MMU_PH_H_B(bound); +} + +/* + * Write User Stack Clean Window Disable register + */ +#define set_MMU_US_CL_D(val) \ + write_MMU_reg(MMU_ADDR_US_CL_D, val) +#define WRITE_MMU_US_CL_D(val) \ + WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_US_CL_D_NO), \ + mmu_reg_val(val)) +static inline void +write_MMU_US_CL_D(unsigned int disable_flag) +{ + DebugCLW("Set MMU US CLW Disable register to %d\n", disable_flag); + WRITE_MMU_US_CL_D(disable_flag); +} + +/* + * Read User Stack Clean Window Disable register + */ +#define get_MMU_US_CL_D() \ + read_MMU_reg(MMU_ADDR_US_CL_D) +#define READ_MMU_US_CL_D() \ + (unsigned int)READ_MMU_REG(_MMU_REG_NO_TO_MMU_ADDR_VAL( \ + _MMU_US_CL_D_NO)) + +/* + * Set Memory Type Range Registers ( MTRRS ) + */ + +#define WRITE_MTRR_REG(no, val) \ + WRITE_MMU_REG(MMU_ADDR_MTRR(no), mmu_reg_val(val)) + +static inline void +set_MMU_MTRR_REG(unsigned long no, long long value) +{ + DebugCLW("Set MTRR#%ld register to ox%llx\n", no, value); + WRITE_MTRR_REG(no, value); +} + +/* + * Get Memory Type Range Registers ( MTRRS ) + */ +#define get_MMU_MTRR_REG(no) \ + (unsigned long)READ_MMU_REG(MMU_ADDR_MTRR(no)) + +static inline unsigned int +read_MMU_US_CL_D(void) +{ + DebugCLW("Read MMU US CLW Disable register\n"); + return (unsigned int)READ_MMU_US_CL_D(); +} + + +/* + * Flush TLB page + */ +static inline void +____flush_TLB_page(flush_op_t flush_op, flush_addr_t flush_addr) +{ + unsigned long flags; + bool fl_c_needed = cpu_has(CPU_HWBUG_TLB_FLUSH_L1D); + + DebugTLB("Flush TLB page : op 0x%lx extended virtual addr 0x%lx\n", + flush_op_val(flush_op), flush_addr_val(flush_addr)); + + raw_all_irq_save(flags); + FLUSH_TLB_ENTRY(flush_op_val(flush_op), flush_addr_val(flush_addr)); + if (fl_c_needed) + __E2K_WAIT(_fl_c); + raw_all_irq_restore(flags); +} + +#define flush_TLB_page_begin() +#define flush_TLB_page_end() \ +do { \ + __E2K_WAIT(_fl_c | _ma_c); \ +} while (0) + +static inline void +__flush_TLB_page(e2k_addr_t virt_addr, unsigned long context) +{ + ____flush_TLB_page(flush_op_tlb_page_sys, + flush_addr_make_sys(virt_addr, context)); +} + +static inline void +flush_TLB_page(e2k_addr_t virt_addr, unsigned long context) +{ + flush_TLB_page_begin(); + __flush_TLB_page(virt_addr, context); + flush_TLB_page_end(); +} + +static inline void +__flush_TLB_kernel_page(e2k_addr_t virt_addr) +{ + __flush_TLB_page(virt_addr, E2K_KERNEL_CONTEXT); +} + +static inline void +flush_TLB_kernel_page(e2k_addr_t virt_addr) +{ + flush_TLB_page_begin(); + __flush_TLB_kernel_page(virt_addr); + flush_TLB_page_end(); +} + +static inline void +__flush_TLB_ss_page(e2k_addr_t virt_addr, unsigned long context) +{ + ____flush_TLB_page(flush_op_tlb_page_sys, + flush_addr_make_ss(virt_addr, context)); +} + +static inline void +flush_TLB_ss_page(e2k_addr_t virt_addr, unsigned long context) +{ + flush_TLB_page_begin(); + __flush_TLB_ss_page(virt_addr, context); + flush_TLB_page_end(); +} + +/* + * Flush DCACHE line + */ +#define flush_DCACHE_line_begin() \ +do { \ + E2K_WAIT_ST; \ +} while (0) + +#define flush_DCACHE_line_end() \ +do { \ + E2K_WAIT_FLUSH; \ +} while (0) + +static inline void __flush_DCACHE_line(e2k_addr_t virt_addr) +{ + FLUSH_DCACHE_LINE(virt_addr); +} +static inline void __flush_DCACHE_line_offset(e2k_addr_t virt_addr, size_t offset) +{ + FLUSH_DCACHE_LINE_OFFSET(virt_addr, offset); +} +static inline void +flush_DCACHE_line(e2k_addr_t virt_addr) +{ + DebugMR("Flush DCACHE line : virtual addr 0x%lx\n", virt_addr); + + flush_DCACHE_line_begin(); + __flush_DCACHE_line(virt_addr); + flush_DCACHE_line_end(); +} + +/* + * Clear DCACHE L1 set + */ +static inline void +clear_DCACHE_L1_set(e2k_addr_t virt_addr, unsigned long set) +{ + E2K_WAIT_ALL; + CLEAR_DCACHE_L1_SET(virt_addr, set); + E2K_WAIT_ST; +} + +/* + * Clear DCACHE L1 line + */ +static inline void +clear_DCACHE_L1_line(e2k_addr_t virt_addr) +{ + unsigned long set; + for (set = 0; set < E2K_DCACHE_L1_SETS_NUM; set++) + clear_DCACHE_L1_set(virt_addr, set); +} +/* + * Write DCACHE L2 registers + */ +static inline void +native_write_DCACHE_L2_reg(unsigned long reg_val, int reg_num, int bank_num) +{ + NATIVE_WRITE_L2_REG(reg_val, reg_num, bank_num); +} +static inline void +native_write_DCACHE_L2_CNTR_reg(unsigned long reg_val, int bank_num) +{ + native_write_DCACHE_L2_reg(reg_val, _E2K_DCACHE_L2_CTRL_REG, bank_num); +} +static inline void +write_DCACHE_L2_reg(unsigned long reg_val, int reg_num, int bank_num) +{ + WRITE_L2_REG(reg_val, reg_num, bank_num); +} +static inline void +write_DCACHE_L2_CNTR_reg(unsigned long reg_val, int bank_num) +{ + write_DCACHE_L2_reg(reg_val, _E2K_DCACHE_L2_CTRL_REG, bank_num); +} + +/* + * Read DCACHE L2 registers + */ +static inline unsigned long +native_read_DCACHE_L2_reg(int reg_num, int bank_num) +{ + return NATIVE_READ_L2_REG(reg_num, bank_num); +} +static inline unsigned long +native_read_DCACHE_L2_CNTR_reg(int bank_num) +{ + return native_read_DCACHE_L2_reg(_E2K_DCACHE_L2_CTRL_REG, bank_num); +} +static inline unsigned long +native_read_DCACHE_L2_ERR_reg(int bank_num) +{ + return native_read_DCACHE_L2_reg(_E2K_DCACHE_L2_ERR_REG, bank_num); +} +static inline unsigned long +read_DCACHE_L2_reg(int reg_num, int bank_num) +{ + return READ_L2_REG(reg_num, bank_num); +} +static inline unsigned long +read_DCACHE_L2_CNTR_reg(int bank_num) +{ + return read_DCACHE_L2_reg(_E2K_DCACHE_L2_CTRL_REG, bank_num); +} +static inline unsigned long +read_DCACHE_L2_ERR_reg(int bank_num) +{ + return read_DCACHE_L2_reg(_E2K_DCACHE_L2_ERR_REG, bank_num); +} + +/* + * Flush ICACHE line + */ +static inline void +__flush_ICACHE_line(flush_op_t flush_op, flush_addr_t flush_addr) +{ + DebugMR("Flush ICACHE line : op 0x%lx extended virtual addr 0x%lx\n", + flush_op_val(flush_op), flush_addr_val(flush_addr)); + FLUSH_ICACHE_LINE(flush_op_val(flush_op), flush_addr_val(flush_addr)); +} + +#define flush_ICACHE_line_begin() +#define flush_ICACHE_line_end() \ +do { \ + E2K_WAIT_FLUSH; \ +} while (0) + +static inline void +__flush_ICACHE_line_user(e2k_addr_t virt_addr) +{ + __flush_ICACHE_line(flush_op_icache_line_user, + flush_addr_make_user(virt_addr)); +} + +static inline void +flush_ICACHE_line_user(e2k_addr_t virt_addr) +{ + flush_ICACHE_line_begin(); + __flush_ICACHE_line_user(virt_addr); + flush_ICACHE_line_end(); +} + +static inline void +__flush_ICACHE_line_sys(e2k_addr_t virt_addr, unsigned long context) +{ + __flush_ICACHE_line(flush_op_icache_line_sys, + flush_addr_make_sys(virt_addr, context)); +} + +static inline void +flush_ICACHE_line_sys(e2k_addr_t virt_addr, unsigned long context) +{ + flush_ICACHE_line_begin(); + __flush_ICACHE_line_sys(virt_addr, context); + flush_ICACHE_line_end(); +} + +static inline void +flush_ICACHE_kernel_line(e2k_addr_t virt_addr) +{ + flush_ICACHE_line_sys(virt_addr, E2K_KERNEL_CONTEXT); +} + +/* + * Flush and invalidate CACHE(s) (invalidate all caches of the processor) + * WARNING: operation was deleted from instruction set begining V3-iset + */ + +static inline void +boot_native_invalidate_CACHE_L12(void) +{ + int invalidate_supported; + unsigned long flags; + + /* Invalidate operation was removed in E2S */ + invalidate_supported = BOOT_NATIVE_IS_MACHINE_ES2; + + raw_all_irq_save(flags); + E2K_WAIT_MA; + if (invalidate_supported) + NATIVE_FLUSH_CACHE_L12(_flush_op_invalidate_cache_L12); + else + NATIVE_FLUSH_CACHE_L12(_flush_op_write_back_cache_L12); + E2K_WAIT_FLUSH; + raw_all_irq_restore(flags); +} + +/* + * Flush and write back CACHE(s) (write back and invalidate all caches + * of the processor) + * Flush cache is the same as write back + */ + +static inline void +native_raw_write_back_CACHE_L12(void) +{ + __E2K_WAIT(E2K_WAIT_OP_MA_C_MASK); + NATIVE_FLUSH_CACHE_L12(_flush_op_write_back_cache_L12); + __E2K_WAIT(E2K_WAIT_OP_FL_C_MASK | E2K_WAIT_OP_MA_C_MASK); +} + +static inline void +write_back_CACHE_L12(void) +{ + DebugMR("Flush : Write back all CACHEs (op 0x%lx)\n", + _flush_op_write_back_cache_L12); + FLUSH_CACHE_L12(_flush_op_write_back_cache_L12); +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ + +static inline void +native_raw_flush_TLB_all(void) +{ + __E2K_WAIT(E2K_WAIT_OP_ST_C_MASK); + NATIVE_FLUSH_TLB_ALL(_flush_op_tlb_all); + __E2K_WAIT(E2K_WAIT_OP_FL_C_MASK | E2K_WAIT_OP_MA_C_MASK); +} + +static inline void +flush_TLB_all(void) +{ + DebugMR("Flush all TLBs (op 0x%lx)\n", _flush_op_tlb_all); + FLUSH_TLB_ALL(_flush_op_tlb_all); +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ +static inline void +flush_ICACHE_all(void) +{ + DebugMR("Flush all ICACHE op 0x%lx\n", _flush_op_icache_all); + FLUSH_ICACHE_ALL(_flush_op_icache_all); +} + +/* + * Read CLW register + */ + +static inline clw_reg_t +read_CLW_reg(clw_addr_t clw_addr) +{ + DebugCLW("Read CLW reg 0x%lx\n", clw_addr); + return READ_CLW_REG(clw_addr); +} + +/* + * Read CLW bottom register + */ +#define read_US_CL_B() read_CLW_reg(ADDR_US_CL_B) +#define READ_US_CL_B() READ_CLW_REG(ADDR_US_CL_B) + +/* + * Read CLW up register + */ +#define read_US_CL_UP() read_CLW_reg(ADDR_US_CL_UP) +#define READ_US_CL_UP() READ_CLW_REG(ADDR_US_CL_UP) + +/* + * Read CLW bit-mask registers + */ +#define read_US_CL_M0() read_CLW_reg(ADDR_US_CL_M0) +#define READ_US_CL_M0() READ_CLW_REG(ADDR_US_CL_M0) +#define read_US_CL_M1() read_CLW_reg(ADDR_US_CL_M1) +#define READ_US_CL_M1() READ_CLW_REG(ADDR_US_CL_M1) +#define read_US_CL_M2() read_CLW_reg(ADDR_US_CL_M2) +#define READ_US_CL_M2() READ_CLW_REG(ADDR_US_CL_M2) +#define read_US_CL_M3() read_CLW_reg(ADDR_US_CL_M3) +#define READ_US_CL_M3() READ_CLW_REG(ADDR_US_CL_M3) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_MMU_REGS_H_ */ diff --git a/arch/e2k/include/asm/mmu_regs_access.h b/arch/e2k/include/asm/mmu_regs_access.h new file mode 100644 index 0000000..7170638 --- /dev/null +++ b/arch/e2k/include/asm/mmu_regs_access.h @@ -0,0 +1,312 @@ +/* + * asm-e2k/mmu_regs_access.h: E2K MMU structures & registers. + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_MMU_REGS_ACCESS_H_ +#define _E2K_MMU_REGS_ACCESS_H_ + +#ifndef __ASSEMBLY__ +#include +#include +#endif /* __ASSEMBLY__ */ + +#include +#include + +#include + +extern unsigned long native_read_MMU_OS_PPTB_reg_value(void); +extern void native_write_MMU_OS_PPTB_reg_value(unsigned long value); +extern unsigned long native_read_MMU_OS_VPTB_reg_value(void); +extern void native_write_MMU_OS_VPTB_reg_value(unsigned long value); +extern unsigned long native_read_MMU_OS_VAB_reg_value(void); +extern void native_write_MMU_OS_VAB_reg_value(unsigned long value); + +extern unsigned long boot_native_read_MMU_OS_PPTB_reg_value(void); +extern void boot_native_write_MMU_OS_PPTB_reg_value(unsigned long value); +extern unsigned long boot_native_read_MMU_OS_VPTB_reg_value(void); +extern void boot_native_write_MMU_OS_VPTB_reg_value(unsigned long value); +extern unsigned long boot_native_read_MMU_OS_VAB_reg_value(void); +extern void boot_native_write_MMU_OS_VAB_reg_value(unsigned long value); + +#define NATIVE_WRITE_MMU_OS_PPTB_REG(reg_val) \ + native_write_MMU_OS_PPTB_reg_value(reg_val) +#define NATIVE_READ_MMU_OS_PPTB_REG() \ + native_read_MMU_OS_PPTB_reg_value() +#define NATIVE_WRITE_MMU_OS_VPTB_REG(reg_val) \ + native_write_MMU_OS_VPTB_reg_value(reg_val) +#define NATIVE_READ_MMU_OS_VPTB_REG() \ + native_read_MMU_OS_VPTB_reg_value() +#define NATIVE_WRITE_MMU_OS_VAB_REG(reg_val) \ + native_write_MMU_OS_VAB_reg_value(reg_val) +#define NATIVE_READ_MMU_OS_VAB_REG() \ + native_read_MMU_OS_VAB_reg_value() +#define NATIVE_READ_MMU_PID_REG() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_PID_NO)) +#define NATIVE_WRITE_MMU_PID_REG(reg_val) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_PID_NO), \ + mmu_reg_val(reg_val)) +#define NATIVE_READ_MMU_U_PPTB_REG() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_PPTB_NO)) +#define NATIVE_WRITE_MMU_U_PPTB_REG(reg_val) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_PPTB_NO), \ + mmu_reg_val(reg_val)) +#define NATIVE_READ_MMU_U_VPTB_REG() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_VPTB_NO)) +#define NATIVE_WRITE_MMU_U_VPTB_REG(reg_val) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_U_VPTB_NO), \ + mmu_reg_val(reg_val)) + +#define BOOT_NATIVE_WRITE_MMU_OS_PPTB_REG(reg_val) \ + boot_native_write_MMU_OS_PPTB_reg_value(reg_val) +#define BOOT_NATIVE_READ_MMU_OS_PPTB_REG() \ + boot_native_read_MMU_OS_PPTB_reg_value() +#define BOOT_NATIVE_WRITE_MMU_OS_VPTB_REG(reg_val) \ + boot_native_write_MMU_OS_VPTB_reg_value(reg_val) +#define BOOT_NATIVE_READ_MMU_OS_VPTB_REG() \ + boot_native_read_MMU_OS_VPTB_reg_value() +#define BOOT_NATIVE_WRITE_MMU_OS_VAB_REG(reg_val) \ + boot_native_write_MMU_OS_VAB_reg_value(reg_val) +#define BOOT_NATIVE_READ_MMU_OS_VAB_REG() \ + boot_native_read_MMU_OS_VAB_reg_value() +#define BOOT_NATIVE_WRITE_MMU_PID_REG(reg_val) \ + NATIVE_WRITE_MMU_PID_REG(reg_val) +#define BOOT_NATIVE_READ_MMU_PID_REG() \ + NATIVE_READ_MMU_PID_REG() + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* it is native kernel without any virtualization */ +/* or host kernel with virtualization support */ + +/* + * MMU registers operations + */ + +#ifndef __ASSEMBLY__ +/* + * Write/read MMU register + */ +#define WRITE_MMU_REG(addr_val, reg_val) \ + NATIVE_WRITE_MMU_REG(addr_val, reg_val) +#define READ_MMU_REG(addr_val) \ + NATIVE_READ_MMU_REG(addr_val) + +#define BOOT_WRITE_MMU_REG(addr_val, reg_val) \ + BOOT_NATIVE_WRITE_MMU_REG(addr_val, reg_val) +#define BOOT_READ_MMU_REG(addr_val) \ + BOOT_NATIVE_READ_MMU_REG(addr_val) + +#define WRITE_MMU_OS_PPTB(reg_val) \ + NATIVE_WRITE_MMU_OS_PPTB_REG(reg_val) +#define READ_MMU_OS_PPTB() \ + NATIVE_READ_MMU_OS_PPTB_REG() +#define WRITE_MMU_OS_VPTB(reg_val) \ + NATIVE_WRITE_MMU_OS_VPTB_REG(reg_val) +#define READ_MMU_OS_VPTB() \ + NATIVE_READ_MMU_OS_VPTB_REG() +#define WRITE_MMU_OS_VAB(reg_val) \ + NATIVE_WRITE_MMU_OS_VAB_REG(reg_val) +#define READ_MMU_OS_VAB() \ + NATIVE_READ_MMU_OS_VAB_REG() +#define WRITE_MMU_PID(reg_val) \ + NATIVE_WRITE_MMU_PID_REG(reg_val) +#define READ_MMU_PID() \ + NATIVE_READ_MMU_PID_REG() + +#define BOOT_WRITE_MMU_OS_PPTB(reg_val) \ + BOOT_NATIVE_WRITE_MMU_OS_PPTB_REG(reg_val) +#define BOOT_READ_MMU_OS_PPTB() \ + BOOT_NATIVE_READ_MMU_OS_PPTB_REG() +#define BOOT_WRITE_MMU_OS_VPTB(reg_val) \ + BOOT_NATIVE_WRITE_MMU_OS_VPTB_REG(reg_val) +#define BOOT_READ_MMU_OS_VPTB() \ + BOOT_NATIVE_READ_MMU_OS_VPTB_REG() +#define BOOT_WRITE_MMU_OS_VAB(reg_val) \ + BOOT_NATIVE_WRITE_MMU_OS_VAB_REG(reg_val) +#define BOOT_READ_MMU_OS_VAB() \ + BOOT_NATIVE_READ_MMU_OS_VAB_REG() +#define BOOT_WRITE_MMU_PID(reg_val) \ + BOOT_NATIVE_WRITE_MMU_PID_REG(reg_val) +#define BOOT_READ_MMU_PID() \ + BOOT_NATIVE_READ_MMU_PID_REG() + +/* + * Write/read Data TLB register + */ +#define WRITE_DTLB_REG(tlb_addr, tlb_value) \ + NATIVE_WRITE_DTLB_REG(tlb_addr, tlb_value) + +#define READ_DTLB_REG(tlb_addr) \ + NATIVE_READ_DTLB_REG(tlb_addr) + +/* + * Flush TLB page/entry + */ +#define FLUSH_TLB_ENTRY(flush_op, addr) \ + NATIVE_FLUSH_TLB_ENTRY(flush_op, addr) + +/* + * Flush DCACHE line + */ +#define FLUSH_DCACHE_LINE(virt_addr) \ + NATIVE_FLUSH_DCACHE_LINE(virt_addr) +#define FLUSH_DCACHE_LINE_OFFSET(virt_addr, offset) \ + NATIVE_FLUSH_DCACHE_LINE_OFFSET((virt_addr), (offset)) + +/* + * Clear DCACHE L1 set + */ +#define CLEAR_DCACHE_L1_SET(virt_addr, set) \ + NATIVE_CLEAR_DCACHE_L1_SET(virt_addr, set) + +/* + * Write DCACHE L2 registers + */ +#define WRITE_L2_REG(reg_val, reg_num, bank_num) \ + NATIVE_WRITE_L2_REG(reg_val, reg_num, bank_num) + +/* + * Read DCACHE L2 registers + */ +#define READ_L2_REG(reg_num, bank_num) \ + NATIVE_READ_L2_REG(reg_num, bank_num) + +/* + * Flush ICACHE line + */ +#define FLUSH_ICACHE_LINE(flush_op, addr) \ + NATIVE_FLUSH_ICACHE_LINE(flush_op, addr) + +/* + * Flush and invalidate or write back L1/L2 CACHE(s) + */ +#define FLUSH_CACHE_L12(flush_op) \ + native_write_back_CACHE_L12() + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ +#define FLUSH_TLB_ALL(flush_op) \ + native_flush_TLB_all() + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ +#define FLUSH_ICACHE_ALL(flush_op) \ + native_flush_ICACHE_all() + +/* + * Get Entry probe for virtual address + */ +#define ENTRY_PROBE_MMU_OP(addr_val) \ + NATIVE_ENTRY_PROBE_MMU_OP(addr_val) + +/* + * Get physical address for virtual address + */ +#define ADDRESS_PROBE_MMU_OP(addr_val) \ + NATIVE_ADDRESS_PROBE_MMU_OP(addr_val) + +/* + * Read CLW register + */ +#define READ_CLW_REG(clw_addr) \ + NATIVE_READ_CLW_REG(clw_addr) + +/* + * MMU DEBUG registers access + */ +#define READ_DDBAR0_REG_VALUE() NATIVE_READ_DDBAR0_REG_VALUE() +#define READ_DDBAR1_REG_VALUE() NATIVE_READ_DDBAR1_REG_VALUE() +#define READ_DDBAR2_REG_VALUE() NATIVE_READ_DDBAR2_REG_VALUE() +#define READ_DDBAR3_REG_VALUE() NATIVE_READ_DDBAR3_REG_VALUE() +#define READ_DDBCR_REG_VALUE() NATIVE_READ_DDBCR_REG_VALUE() +#define READ_DDBSR_REG_VALUE() NATIVE_READ_DDBSR_REG_VALUE() +#define READ_DDMAR0_REG_VALUE() NATIVE_READ_DDMAR0_REG_VALUE() +#define READ_DDMAR1_REG_VALUE() NATIVE_READ_DDMAR1_REG_VALUE() +#define READ_DDMCR_REG_VALUE() NATIVE_READ_DDMCR_REG_VALUE() +#define WRITE_DDBAR0_REG_VALUE(value) NATIVE_WRITE_DDBAR0_REG_VALUE(value) +#define WRITE_DDBAR1_REG_VALUE(value) NATIVE_WRITE_DDBAR1_REG_VALUE(value) +#define WRITE_DDBAR2_REG_VALUE(value) NATIVE_WRITE_DDBAR2_REG_VALUE(value) +#define WRITE_DDBAR3_REG_VALUE(value) NATIVE_WRITE_DDBAR3_REG_VALUE(value) +#define WRITE_DDBCR_REG_VALUE(value) NATIVE_WRITE_DDBCR_REG_VALUE(value) +#define WRITE_DDBSR_REG_VALUE(value) NATIVE_WRITE_DDBSR_REG_VALUE(value) +#define WRITE_DDMAR0_REG_VALUE(value) NATIVE_WRITE_DDMAR0_REG_VALUE(value) +#define WRITE_DDMAR1_REG_VALUE(value) NATIVE_WRITE_DDMAR1_REG_VALUE(value) +#define WRITE_DDMCR_REG_VALUE(value) NATIVE_WRITE_DDMCR_REG_VALUE(value) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#ifndef __ASSEMBLY__ + +#define READ_DDBAR0_REG() \ + READ_DDBAR0_REG_VALUE() +#define READ_DDBAR1_REG() \ + READ_DDBAR1_REG_VALUE() +#define READ_DDBAR2_REG() \ + READ_DDBAR2_REG_VALUE() +#define READ_DDBAR3_REG() \ + READ_DDBAR3_REG_VALUE() +#define READ_DDBCR_REG() \ +({ \ + e2k_ddbcr_t ddbcr; \ + \ + ddbcr.DDBCR_reg = READ_DDBCR_REG_VALUE(); \ + ddbcr; \ +}) +#define READ_DDBSR_REG() \ +({ \ + e2k_ddbsr_t ddbsr; \ + \ + ddbsr.DDBSR_reg = READ_DDBSR_REG_VALUE(); \ + ddbsr; \ +}) +#define READ_DDMAR0_REG() \ + READ_DDMAR0_REG_VALUE() +#define READ_DDMAR1_REG() \ + READ_DDMAR1_REG_VALUE() +#define READ_DDMCR_REG() \ +({ \ + e2k_ddmcr_t ddmcr; \ + \ + ddmcr.DDMCR_reg = READ_DDMCR_REG_VALUE(); \ + ddmcr; \ +}) +#define WRITE_DDBAR0_REG(value) \ + WRITE_DDBAR0_REG_VALUE(value) +#define WRITE_DDBAR1_REG(value) \ + WRITE_DDBAR1_REG_VALUE(value) +#define WRITE_DDBAR2_REG(value) \ + WRITE_DDBAR2_REG_VALUE(value) +#define WRITE_DDBAR3_REG(value) \ + WRITE_DDBAR3_REG_VALUE(value) +#define WRITE_DDBCR_REG(value) \ + WRITE_DDBCR_REG_VALUE(value.DDBCR_reg) +#define WRITE_DDBSR_REG(value) \ + WRITE_DDBSR_REG_VALUE(value.DDBSR_reg) +#define WRITE_DDMAR0_REG(value) \ + WRITE_DDMAR0_REG_VALUE(value) +#define WRITE_DDMAR1_REG(value) \ + WRITE_DDMAR1_REG_VALUE(value) +#define WRITE_DDMCR_REG(value) \ + WRITE_DDMCR_REG_VALUE(value.DDMCR_reg) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_MMU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/mmu_regs_types.h b/arch/e2k/include/asm/mmu_regs_types.h new file mode 100644 index 0000000..a47f6b6 --- /dev/null +++ b/arch/e2k/include/asm/mmu_regs_types.h @@ -0,0 +1,936 @@ +/* + * asm-e2k/mmu_regs.h: E2K MMU structures & registers. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_MMU_REGS_TYPES_H_ +#define _E2K_MMU_REGS_TYPES_H_ + +#include + +/* + * MMU registers structures + */ + +/* MMU address to access to MMU internal registers */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t mmu_addr_t; +#define mmu_addr_val(mmu_addr) (mmu_addr) +#define __mmu_addr(mmu_addr_val) (mmu_addr_val) +#endif /* __ASSEMBLY__ */ + +#define _MMU_ADDR_REG_NO_SHIFT 4 /* [ 9: 4] */ + +#define _MMU_ADDR_REG_NO 0x0000000000000ff0 /* # of register */ + +#define _MMU_CR_NO 0x00 /* Control register */ +#define _MMU_CONT_NO 0x01 /* Context register */ +#define _MMU_PID_NO _MMU_CONT_NO /* renamed name of CONT */ +#define _MMU_CR3_RG_NO 0x02 /* CR3 register for secondary space */ +#define _MMU_U2_PPTB_NO _MMU_CR3_RG_NO /* renamed name of CR3 */ +#define _MMU_ELB_PTB_NO 0x03 /* ELBRUS page table virtual base */ +#define _MMU_U_VPTB_NO _MMU_ELB_PTB_NO /* renamed name of ELB_PTB */ +#define _MMU_ROOT_PTB_NO 0x04 /* Root Page Table Base register */ +#define _MMU_U_PPTB_NO _MMU_ROOT_PTB_NO /* renamed name of ROOT_PTB */ +#define _MMU_TRAP_POINT_NO 0x05 /* Trap Pointer register */ +#define _MMU_TRAP_COUNT_NO 0x06 /* Trap Counter register */ +#define _MMU_MPT_B_NO 0x07 /* Phys Protection Table Base */ + /* register for secondary space */ +#define _MMU_PCI_L_B_NO 0x08 /* PCI Low Bound register */ + /* for secondary space */ +#define _MMU_US_CL_D_NO 0x09 /* User Stack Clearing Disable */ + /* register */ +#define _MMU_PH_H_B_NO 0x0a /* Low Phys memory High Bound */ + /* for secondary space */ +#define _MMU_USED_4KB_DTLB_NO 0x0b /* ??? */ +#define _MMU_USED_DTLB_MPT_DW_NO 0x0c /* ??? */ +#define _MMU_APIC_BASE_NO 0x0d /* local APIC/EPIC registers base */ +#define _MMU_HW0_NO 0x0e /* MMU hardware register #0 */ +#define _MMU_PID2_NO 0x0f /* secondary process ID */ +#define _MMU_MTRR_START_NO 0x10 /* Memory Type Range Register */ + /* (first register) */ + /* for secondary space */ +#define _MMU_MTRR_PAIRS_END_NO 0x1f /* Memory Type Range Register */ + /* (mtrr15 - last pairs register) */ + /* for secondary space */ +#define _MMU_MTRR_END_NO 0x30 /* Memory Type Range Register */ + /* (last register) */ + /* for secondary space */ +#define _MMU_PAT_NO 0x31 /* page attribute table */ +#define _MMU_PH_HI_L_B_NO 0x32 /* High Phys memory Low Bound */ + /* for secondary space */ +#define _MMU_PH_HI_H_B_NO 0x33 /* High Phys memory High Bound */ + /* for secondary space */ +#define _MMU_OS_VPTB_NO 0x34 /* virtual base of kernel PTs */ +#define _MMU_OS_PPTB_NO 0x35 /* physical base of kernel PTs */ +#define _MMU_PDPTE0_NO 0x38 /* pud[0] 3-th level PT entry #0 */ +#define _MMU_PDPTE1_NO 0x39 /* pud[1] 3-th level PT entry #1 */ +#define _MMU_PDPTE2_NO 0x3a /* pud[2] 3-th level PT entry #2 */ +#define _MMU_PDPTE3_NO 0x3b /* pud[3] 3-th level PT entry #3 */ +#define _MMU_TLU_CACHE_NO 0x3c /* TLU cache ??? */ +#define _MMU_OS_VAB_NO 0x4c /* kernel virtual space base */ + /* PAGE_OFFSET */ +#define MMU_REGS_NUM (_MMU_MTRR_END_NO + 1) +#define MTRR_LAST_DEFAULT 0x806 /* Default value of last MTRR */ + +#define _MMU_REG_NO_TO_MMU_ADDR_VAL(reg_no) \ + (((reg_no) << _MMU_ADDR_REG_NO_SHIFT) & _MMU_ADDR_REG_NO) +#define MMU_REG_NO_TO_MMU_ADDR(reg_no) \ + __mmu_addr(_MMU_REG_NO_TO_MMU_ADDR_VAL(reg_no)) +#define MMU_REG_NO_FROM_MMU_ADDR(mmu_addr) \ + ((mmu_addr_val(mmu_addr) & _MMU_ADDR_REG_NO) >> \ + _MMU_ADDR_REG_NO_SHIFT) + +#define MMU_ADDR_CR MMU_REG_NO_TO_MMU_ADDR(_MMU_CR_NO) +#define MMU_ADDR_CONT MMU_REG_NO_TO_MMU_ADDR(_MMU_CONT_NO) +#define MMU_ADDR_PID MMU_ADDR_CONT /* renamed name */ +#define MMU_ADDR_CR3_RG MMU_REG_NO_TO_MMU_ADDR(_MMU_CR3_RG_NO) +#define MMU_ADDR_U2_PGTB MMU_ADDR_CR3_RG /* renamed name */ +#define MMU_ADDR_ELB_PTB MMU_REG_NO_TO_MMU_ADDR(_MMU_ELB_PTB_NO) +#define MMU_ADDR_U_VPTB MMU_ADDR_ELB_PTB /* rename name */ +#define MMU_ADDR_ROOT_PTB MMU_REG_NO_TO_MMU_ADDR(_MMU_ROOT_PTB_NO) +#define MMU_ADDR_U_PPTB MMU_ADDR_ROOT_PTB /* rename name */ +#define MMU_ADDR_TRAP_POINT MMU_REG_NO_TO_MMU_ADDR(_MMU_TRAP_POINT_NO) +#define MMU_ADDR_TRAP_COUNT MMU_REG_NO_TO_MMU_ADDR(_MMU_TRAP_COUNT_NO) +#define MMU_ADDR_MPT_B MMU_REG_NO_TO_MMU_ADDR(_MMU_MPT_B_NO) +#define MMU_ADDR_PCI_L_B MMU_REG_NO_TO_MMU_ADDR(_MMU_PCI_L_B_NO) +#define MMU_ADDR_US_CL_D MMU_REG_NO_TO_MMU_ADDR(_MMU_US_CL_D_NO) +#define MMU_ADDR_PH_H_B MMU_REG_NO_TO_MMU_ADDR(_MMU_PH_H_B_NO) +#define MMU_ADDR_WATCH_POINT MMU_REG_NO_TO_MMU_ADDR(_MMU_WATCH_POINT_NO) +#define MMU_ADDR_MTRR_START MMU_REG_NO_TO_MMU_ADDR(_MMU_MTRR_START_NO) +#define MMU_ADDR_MTRR_END MMU_REG_NO_TO_MMU_ADDR(_MMU_MTRR_END_NO) +#define MMU_ADDR_MTRR(no) MMU_REG_NO_TO_MMU_ADDR(no) +#define MMU_ADDR_OS_VPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_OS_VPTB_NO) +#define MMU_ADDR_OS_PPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_OS_PPTB_NO) +#define MMU_ADDR_SH_OS_VPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_SH_OS_VPTB_NO) +#define MMU_ADDR_SH_OS_PPTB MMU_REG_NO_TO_MMU_ADDR(_MMU_SH_OS_PPTB_NO) +#define MMU_ADDR_OS_VAB MMU_REG_NO_TO_MMU_ADDR(_MMU_OS_VAB_NO) + +/* MMU internel register contents */ + +#ifndef __ASSEMBLY__ +typedef unsigned long long mmu_reg_t; +#define mmu_reg_val(mmu_reg) (mmu_reg) +#define __mmu_reg(mmu_reg_val) (mmu_reg_val) +#endif /* __ASSEMBLY__ */ + +/* Size of trap_cellar */ + +/* while bug for size of env for setjmp use TC_INTO_PT_REGS == 0 */ +#define MAX_TC_SIZE 10 + +/* + * MMU Control Register MMU_CR + */ + +#define _MMU_CR_CD_SHIFT 1 +#define _MMU_CR_IPD_SHIFT 11 + +#define _MMU_CR_TLB_EN 0x0000000000000001 /* translation enable */ +#define _MMU_CR_CD_MASK 0x0000000000000006 /* cache disable bits */ +#define _MMU_CR_SET1 0x0000000000000008 /* set #1 enable for */ + /* 4 MB pages */ +#define _MMU_CR_SET2 0x0000000000000010 /* set #2 enable for */ + /* 4 MB pages */ +#define _MMU_CR_SET3 0x0000000000000020 /* set #3 enable for */ + /* 4 MB pages */ +#define _MMU_CR_CR0_PG 0x0000000000000040 /* paging enable for */ + /* second space INTEL */ +#define _MMU_CR_CR4_PSE 0x0000000000000080 /* page size 4Mb */ + /* enable for second */ + /* space INTEL */ +#define _MMU_CR_CR0_CD 0x0000000000000100 /* cache disable for */ + /* secondary space */ + /* INTEL */ +#define _MMU_CR_TLU2_EN 0x0000000000000200 /* TLU enable for */ + /* secondary space */ + /* INTEL */ +#define _MMU_CR_LD_MPT 0x0000000000000400 /* memory protection */ + /* table enable for */ + /* LD from secondary */ + /* space INTEL */ +#define _MMU_CR_IPD_MASK 0x0000000000000800 /* Instruction */ + /* Prefetch Depth */ +#define _MMU_CR_UPT_EN 0x0000000000001000 /* enable UPT */ +#define _MMU_CR_SNXE 0x0000000000008000 /* enable SNXE */ + +#define _MMU_CR_CD_VAL(x) (((x) << _MMU_CR_CD_SHIFT) & _MMU_CR_CD_MASK) +#define _MMU_CD_EN _MMU_CR_CD_VAL(0UL) /* all caches enabled */ +#define _MMU_CD_D1_DIS _MMU_CR_CD_VAL(1UL) /* DCACHE1 disabled */ +#define _MMU_CD_D_DIS _MMU_CR_CD_VAL(2UL) /* DCACHE1, DCACHE2 disabled */ +#define _MMU_CD_DIS _MMU_CR_CD_VAL(3UL) /* DCACHE1, DCACHE2, ECACHE */ + /* disabled */ +#define _MMU_CR_IPD_VAL(x) (((x) << _MMU_CR_IPD_SHIFT) & _MMU_CR_IPD_MASK) +#define _MMU_IPD_DIS _MMU_CR_IPD_VAL(0UL) /* none prefetch */ +#define _MMU_IPD_2_LINE _MMU_CR_IPD_VAL(1UL) /* 2 line of ICACHE prefetch */ + +#ifdef CONFIG_IPD_DISABLE +#define KERNEL_MMU_IPD _MMU_IPD_DIS /* none prefetch */ +#else +#define KERNEL_MMU_IPD _MMU_IPD_2_LINE /* 2 line of ICACHE prefetch */ +#endif /* CONFIG_IPD_DISABLE */ + +#ifndef CONFIG_SECONDARY_SPACE_SUPPORT +#define _MMU_CR_SEC_SPACE_EN +#define _MMU_CR_SEC_SPACE_DIS +#else /* CONFIG_SECONDARY_SPACE_SUPPORT */ +#define _MMU_CR_SEC_SPACE_EN (_MMU_CR_CR0_PG | _MMU_CR_TLU2_EN) +#define _MMU_CR_SEC_SPACE_DIS (_MMU_CR_CR0_CD) +#endif /* ! CONFIG_SECONDARY_SPACE_SUPPORT */ + +#define __MMU_CR_KERNEL (_MMU_CR_TLB_EN | _MMU_CD_EN | KERNEL_MMU_IPD) +#define __MMU_CR_KERNEL_OFF (_MMU_CD_DIS | _MMU_IPD_DIS) + +#ifdef CONFIG_HUGETLB_PAGE +# define _MMU_CR_KERNEL (__MMU_CR_KERNEL | _MMU_CR_SET3) +#else +# define _MMU_CR_KERNEL (boot_cpu_has(CPU_HWBUG_LARGE_PAGES) ? \ + (__MMU_CR_KERNEL) : (__MMU_CR_KERNEL | _MMU_CR_SET3)) +#endif /* CONFIG_HUGETLB_PAGE */ + +#define MMU_CR_KERNEL __mmu_reg(_MMU_CR_KERNEL) +#define MMU_CR_KERNEL_OFF __mmu_reg(__MMU_CR_KERNEL_OFF) + +#define mmu_cr_set_tlb_enable(mmu_reg) \ + (mmu_reg_val(mmu_reg) | _MMU_CR_TLB_EN) + +#define mmu_cr_set_vaddr_enable(mmu_reg) \ + (mmu_reg_val(mmu_reg) | _MMU_CR_TLB_EN) + +#define mmu_cr_reset_tlb_enable(mmu_reg) \ + (mmu_reg_val(mmu_reg) & ~(_MMU_CR_TLB_EN)) + +#define mmu_cr_reset_vaddr_enable(mmu_reg) \ + (mmu_reg_val(mmu_reg) & ~(_MMU_CR_TLB_EN)) + +# define mmu_cr_set_large_pages(mmu_reg) \ + (mmu_reg_val(mmu_reg) | _MMU_CR_SET3) +# define mmu_cr_reset_large_pages(mmu_reg) \ + (mmu_reg_val(mmu_reg) & ~_MMU_CR_SET3) + +/* + * MMU Process ID Register MMU_PID (renamed name from MMU_CONT) + */ + +#define _MMU_CONTEXT 0x0000000000000fff +#define _MMU_CONTEXT_SIZE (_MMU_CONTEXT + 1) +#define MMU_PID_SIZE _MMU_CONTEXT_SIZE + +/* + * Kernel virtual memory context + */ +#define E2K_KERNEL_CONTEXT 0x000 +#define E2K_KERNEL_PID E2K_KERNEL_CONTEXT /* renamed name */ + +#define MMU_CONTEXT(context) __mmu_reg(context) +#define MMU_KERNEL_CONTEXT MMU_CONTEXT(E2K_KERNEL_CONTEXT) +#define MMU_PID(pid) MMU_CONTEXT(pid) /* renamed name */ +#define MMU_KERNEL_PID MMU_KERNEL_CONTEXT /* renamed name */ + +/* + * MMU Control Register of secondary space table MMU_CR3_RG + * The physical address of the INTEL page directory base, + * aligned to table size + */ + +#define _MMU_CR3_PAGE_DIR 0x0000000fffff000UL +#define _MMU_CR3_PCD 0x000000000000010UL +#define _MMU_CR3_PWT 0x000000000000008UL + +#define MMU_CR3_KERNEL(page_dir) \ + (((e2k_addr_t)(page_dir)) & _MMU_CR3_PAGE_DIR) + +/* + * MMU Page Table virtual Base Registers MMU_OS_VPTB & MMU_U_VPTB + * (renamed MMU_ELB_PTB) + * The virtual address of the page table beginning, aligned to table size + */ +/* OS page table virtual base for separate virtual spaces */ +#define _MMU_VPTB_MASK (PGDIR_MASK & E2K_VA_PAGE_MASK) +#define MMU_ADDR_TO_VPTB(virt_addr) \ + __mmu_reg((virt_addr) & _MMU_VPTB_MASK) + +/* Separate Page Tables virtual bases */ +#define MMU_SEPARATE_KERNEL_VPTB MMU_ADDR_TO_VPTB(KERNEL_VPTB_BASE_ADDR) +#define MMU_SEPARATE_USER_VPTB MMU_ADDR_TO_VPTB(USER_VPTB_BASE_ADDR) +/* United Page Tables virtual base */ +#define MMU_UNITED_KERNEL_VPTB MMU_ADDR_TO_VPTB(KERNEL_VPTB_BASE_ADDR) +#define MMU_UNITED_USER_VPTB MMU_UNITED_KERNEL_VPTB + +/* + * MMU Root Page Table physical Bases register MMU_OS_PPTB & MMU_U_PPTB + * (renamed MMU_ROOT_PTB) + * The physical address of the root elbrus page table beginning, + * aligned to table size + */ + +#define _MMU_PPTB_MASK (MAX_PA_MASK & PAGE_MASK) +#define MMU_ADDR_TO_PPTB(phys_addr) __mmu_reg((phys_addr) & _MMU_PPTB_MASK) +#define MMU_KERNEL_PPTB MMU_ADDR_TO_PPTB(KERNEL_PPTB_BASE_ADDR) + +/* Separate Page Tables physical bases */ +#define MMU_SEPARATE_KERNEL_PPTB MMU_KERNEL_PPTB +#define MMU_SEPARATE_USER_PPTB(phys_addr) MMU_ADDR_TO_PPTB(phys_addr) +/* United Page Tables virtual base */ +#define MMU_UNITED_KERNEL_PPTB MMU_KERNEL_PPTB +#define MMU_UNITED_USER_PPTB(phys_addr) MMU_ADDR_TO_PPTB(phys_addr) + +/* + * MMU Base address of virtual space of kernel MMU_OS_VAB + * The virtual address of the kernel space should be aligned to 2**44 + */ +#define _MMU_VAB_MASK 0x0000f00000000000 +#define MMU_ADDR_TO_VAB(virt_addr) __mmu_reg((virt_addr) & _MMU_VAB_MASK) +#define MMU_SEPARATE_KERNEL_VAB MMU_ADDR_TO_VAB(PAGE_OFFSET) + +/* + * MMU Trap Pointer register MMU_TRAP_POINT + * The physical address of the beginning of an area, where the attributes + * of nonexecuted requests to memory are stored in case of the exception + * arising on it ("cellar") + */ + +#define MMU_ALIGN_TRAP_POINT_BASE_V2 9 +#define MMU_ALIGN_TRAP_POINT_BASE_MASK_V2 \ + ((1UL << MMU_ALIGN_TRAP_POINT_BASE_V2) - 1) +#define MMU_TRAP_POINT_MASK_V2 ~MMU_ALIGN_TRAP_POINT_BASE_MASK_V2 +#define MMU_TRAP_CELLAR_MAX_SIZE_V2 64 /* double-words */ + +#define MMU_ALIGN_TRAP_POINT_BASE 10 +#define MMU_ALIGN_TRAP_POINT_BASE_MASK ((1UL << MMU_ALIGN_TRAP_POINT_BASE) - 1) +#define MMU_TRAP_POINT_MASK ~MMU_ALIGN_TRAP_POINT_BASE_MASK +#define MMU_TRAP_CELLAR_MAX_SIZE \ + ((TC_EXT_OFFSET + 7)/8 + 64) /* double-words */ + +#define _MMU_TRAP_POINT(phys_addr) ((phys_addr) & MMU_TRAP_POINT_MASK) +#define MMU_TRAP_POINT(phys_addr) __mmu_reg(_MMU_TRAP_POINT(phys_addr)) +#define MMU_KERNEL_TRAP_POINT MMU_TRAP_POINT(KERNEL_TRAP_CELLAR) + +/* + * MMU Trap Counter register MMU_TRAP_COUNT + * Number of double-words in the "cellar" of the trap + */ + +#define _MMU_TRAP_COUNT_MASK 0x000000000000002f +#define _MMU_TRAP_COUNT(counter) (counter & _MMU_TRAP_COUNT_MASK) +#define MMU_TRAP_COUNT(counter) __mmu_reg(_MMU_TRAP_COUNT(counter)) +#define MMU_TRAP_COUNT_GET(mmu_reg) _MMU_TRAP_COUNT(mmu_reg_val(mmu_reg)) + +#define mmu_trap_count_get(mmu_reg) MMU_TRAP_COUNT_GET(mmu_reg) + +/* + * MMU Memory Protection Table Base MMU_MPT_B + * The base address of Memory Protection Table, + * aligned to table size + */ + +#define _MMU_MPT_B 0x000000fffffff000UL + +/* + * MMU PCI Low Bound MMU_PCI_L_B + * Fix the boundary between PCIand main memory addresses + * for Intel accesses + */ + +#define _MMU_PCI_L_B 0x00000000ffc00000UL +#define _MMU_PCI_L_B_ALIGN_MASK 0x00000000003fffffUL + +/* + * MMU Phys High Bound MMU_PH_H_B + * Fix the high boundary Intel physical memory + * for Intel accesses + */ + +#define _MMU_PH_H_B 0x00000000ffc00000UL +#define _MMU_PH_H_B_ALIGN_MASK 0x00000000003fffffUL + +/* + * CACHEs (DCACHE & ICACHE) structure + */ + +#define E2K_DCACHE_L1_LINES_BITS_NUM 9 +#define E2K_DCACHE_L1_LINES_NUM (1 << E2K_DCACHE_L1_LINES_BITS_NUM) +#define E2K_DCACHE_L1_SETS_BITS_NUM 2 +#define E2K_DCACHE_L1_SETS_NUM (1 << E2K_DCACHE_L1_SETS_BITS_NUM) + +#define E2K_DCACHE_L2_LINES_BITS_NUM 10 +#define E2K_DCACHE_L2_LINES_NUM (1 << E2K_DCACHE_L2_LINES_BITS_NUM) +#define E2K_DCACHE_L2_SETS_BITS_NUM 2 +#define E2K_DCACHE_L2_SETS_NUM (1 << E2K_DCACHE_L2_SETS_BITS_NUM) + +#define E2K_ICACHE_SETS_NUM 4 +#define E2K_ICACHE_SET_SIZE 256 +#define E2K_ICACHE_SET_MASK (E2K_ICACHE_SET_SIZE - 1) +#define E2K_ICACHE_LINES_NUM 64 + +/* + * CACHEs (DCACHE & ICACHE) registers operations + */ + +/* CACHEs (DCACHE & ICACHE) registers access operations address */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t dcache_addr_t; +typedef dcache_addr_t dcache_l1_addr_t; +typedef dcache_addr_t dcache_l2_addr_t; +#endif /* ! __ASSEMBLY__ */ + +#define dcache_addr_val(dcache_addr) (dcache_addr) +#define dcache_l1_addr_val(dcache_l1_addr) dcache_addr_val(dcache_l1_addr) +#define dcache_l2_addr_val(dcache_l2_addr) dcache_addr_val(dcache_l2_addr) + +#define __dcache_addr(dcache_addr_val) (dcache_addr_val) +#define __dcache_l1_addr(dcache_l1_addr_val) __dcache_addr(dcache_l1_addr_val) +#define __dcache_l2_addr(dcache_l2_addr_val) __dcache_addr(dcache_l2_addr_val) + +#define _E2K_DCACHE_L1_SET 0x00000000C0000000 +#define _E2K_DCACHE_L1_TYPE 0x0000000020000000 +#define _E2K_DCACHE_L1_LINE 0x0000000000003FE0 +#define _E2K_DCACHE_L1_WORD 0x0000000000000018 + +#define _E2K_DCACHE_L1_SET_SHIFT 30 +#define _E2K_DCACHE_L1_TYPE_SHIFT 29 +#define _E2K_DCACHE_L1_LINE_SHIFT 5 +#define _E2K_DCACHE_L1_WORD_SHIFT 3 + +#define DCACHE_L1_VADDR_TO_ADDR(virt_addr) \ + ((virt_addr) & _E2K_DCACHE_L1_LINE) + +#define dcache_l1_set_set(addr, set) \ + (__dcache_l1_addr( \ + (dcache_l1_addr_val(addr) & ~_E2K_DCACHE_L1_SET) | \ + ((set) << _E2K_DCACHE_L1_SET_SHIFT) & \ + _E2K_DCACHE_L1_SET)) +#define dcache_l1_get_set(addr) \ + (dcache_l1_addr_val(addr) & _E2K_DCACHE_L1_SET) + +#define dcache_l1_set_type(addr, type) \ + (__dcache_l1_addr( \ + (dcache_l1_addr_val(addr) & ~_E2K_DCACHE_L1_TYPE) | \ + ((type) << _E2K_DCACHE_L1_TYPE_SHIFT) & \ + _E2K_DCACHE_L1_TYPE)) +#define dcache_l1_get_type(addr) \ + (dcache_l1_addr_val(addr) & _E2K_DCACHE_L1_TYPE) + +#define dcache_l1_set_line(addr, line) \ + (__dcache_l1_addr( \ + (dcache_l1_addr_val(addr) & ~_E2K_DCACHE_L1_LINE) | \ + ((line) << _E2K_DCACHE_L1_LINE_SHIFT) & \ + _E2K_DCACHE_L1_LINE)) +#define dcache_l1_get_line(addr) \ + (dcache_l1_addr_val(addr) & _E2K_DCACHE_L1_LINE) + +#define dcache_l1_set_word(addr, word) \ + (__dcache_l1_addr( \ + (dcache_l1_addr_val(addr) & ~_E2K_DCACHE_L1_WORD) | \ + ((word) << _E2K_DCACHE_L1_WORD_SHIFT) & \ + _E2K_DCACHE_L1_WORD)) +#define dcache_l1_get_word(addr) \ + (dcache_l1_addr_val(addr) & _E2K_DCACHE_L1_WORD) + +#define mk_dcache_l1_addr(virt_addr, set, type, word) \ +({ \ + dcache_l1_addr_t addr; \ + addr = __dcache_l1_addr(DCACHE_L1_VADDR_TO_ADDR(virt_addr)); \ + addr = dcache_l1_set_set(addr, set); \ + addr = dcache_l1_set_type(addr, type); \ + addr = dcache_l1_set_word(addr, word); \ + addr; \ +}) + +#define _E2K_DCACHE_L2_TYPE 0x0000000030000000 + #define _E2K_DCACHE_L2_DATA_TYPE 0x0 + #define _E2K_DCACHE_L2_REGS_TYPE 0x1 + #define _E2K_DCACHE_L2_TAG_TYPE 0x2 + #define _E2K_DCACHE_L2_REGS_TYPE2 0x3 +#define _E2K_DCACHE_L2_LINE 0x000000000007ffc0 +#define _E2K_DCACHE_L2_REG_NUM 0x000000000000ff00 + #define _E2K_DCACHE_L2_BIST_SIG1_REG 0x00 + #define _E2K_DCACHE_L2_BIST_SIG2_REG 0x01 + #define _E2K_DCACHE_L2_BISR_CTRL_REG 0x02 + #define _E2K_DCACHE_L2_CTRL_REG 0x03 + #define _E2K_DCACHE_L2_ECC_DBG_REG 0x04 + #define _E2K_DCACHE_L2_ERR_REG 0x05 + #define _E2K_DCACHE_L2_CNT_ERR1_REG 0x06 + #define _E2K_DCACHE_L2_CNT_ERR2_REG 0x07 + #define _E2K_DCACHE_L2_CTRL_EXT_REG 0x08 +#define _E2K_DCACHE_L2_BANK_NUM 0x00000000000000c0 +#define _E2K_DCACHE_L2_WORD 0x0000000000000038 + +#define _E2K_DCACHE_L2_TYPE_SHIFT 28 +#define _E2K_DCACHE_L2_LINE_SHIFT 6 +#define _E2K_DCACHE_L2_REG_NUM_SHIFT 8 +#define _E2K_DCACHE_L2_BANK_NUM_SHIFT 6 +#define _E2K_DCACHE_L2_WORD_SHIFT 3 + +#define E2K_L2_BANK_NUM 4 + +#define E2K_L2_CNTR_EN_CORR 0x0000000000000001 +#define E2K_L2_CNTR_EN_DET 0x0000000000000002 +#define E2K_L2_CNTR_EN_CINT 0x0000000000000004 + +#define DCACHE_L2_PADDR_TO_ADDR(phys_addr) \ + ((virt_addr) & _E2K_DCACHE_L2_LINE) + +#define dcache_l2_set_type(addr, type) \ + (__dcache_l2_addr( \ + (dcache_l2_addr_val(addr) & ~_E2K_DCACHE_L2_TYPE) | \ + ((type) << _E2K_DCACHE_L2_TYPE_SHIFT) & \ + _E2K_DCACHE_L2_TYPE)) +#define dcache_l2_get_type(addr) \ + (dcache_l2_addr_val(addr) & _E2K_DCACHE_L2_TYPE) + +#define dcache_l2_set_line(addr, line) \ + (__dcache_l2_addr( \ + (dcache_l2_addr_val(addr) & ~_E2K_DCACHE_L2_LINE) | \ + ((index) << _E2K_DCACHE_L2_LINE_SHIFT) & \ + _E2K_DCACHE_L2_LINE)) +#define dcache_l2_get_line(addr) \ + (dcache_l2_addr_val(addr) & _E2K_DCACHE_L2_LINE) + +#define dcache_l2_set_reg_num(addr, reg_num) \ + (__dcache_l2_addr( \ + (dcache_l2_addr_val(addr) & \ + ~_E2K_DCACHE_L2_REG_NUM) | \ + ((reg_num) << _E2K_DCACHE_L2_REG_NUM_SHIFT) & \ + _E2K_DCACHE_L2_REG_NUM)) +#define dcache_l2_get_reg_num(addr) \ + (dcache_l2_addr_val(addr) & _E2K_DCACHE_L2_REG_NUM_SHIFT) + +#define dcache_l2_set_bank_num(addr, bank_num) \ + (__dcache_l2_addr( \ + (dcache_l2_addr_val(addr) & \ + ~_E2K_DCACHE_L2_BANK_NUM) | \ + ((bank_num) << _E2K_DCACHE_L2_BANK_NUM_SHIFT) & \ + _E2K_DCACHE_L2_BANK_NUM)) +#define dcache_l2_get_bank_num(addr) \ + (dcache_l2_addr_val(addr) & _E2K_DCACHE_L2_BANK_NUM_SHIFT) + +#define dcache_l2_set_word(addr, word) \ + (__dcache_l2_addr( \ + (dcache_l2_addr_val(addr) & ~_E2K_DCACHE_L2_WORD) | \ + ((word) << _E2K_DCACHE_L2_WORD_SHIFT) & \ + _E2K_DCACHE_L2_WORD)) +#define dcache_l2_get_word(addr) \ + (dcache_l2_addr_val(addr) & _E2K_DCACHE_L2_WORD) + +#define mk_dcache_l2_addr(phys_addr, type, word) \ +({ \ + dcache_l2_addr_t addr = 0; \ + addr = __dcache_l2_addr(DCACHE_L1_PADDR_TO_ADDR(phys_addr)); \ + addr = dcache_l2_set_type(addr, type); \ + addr = dcache_l2_set_word(addr, word); \ + addr; \ +}) + +#define mk_dcache_l2_reg_addr(reg_num, bank_num) \ +({ \ + dcache_l2_addr_t addr = 0; \ + addr = dcache_l2_set_type(addr, _E2K_DCACHE_L2_REGS_TYPE); \ + addr = dcache_l2_set_reg_num(addr, reg_num); \ + addr = dcache_l2_set_bank_num(addr, bank_num); \ + addr; \ +}) + +/* + * ICACHE/DTLB/ITLB line flush operations + */ + +/* ICACHE/DTLB/ITLB line flush operations address */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t flush_op_t; +#endif /* ! __ASSEMBLY__ */ + +#define flush_op_val(flush_op) (flush_op) + +#define __flush_op(flush_op_val) (flush_op_val) + +#define _FLUSH_OP_TYPE 0x0000000000000007 /* type of */ + /* operation */ +#define _FLUSH_ICACHE_LINE_USER_OP 0x0000000000000000 +#define _FLUSH_TLB_PAGE_SYS_OP 0x0000000000000001 +#define _FLUSH_ICACHE_LINE_SYS_OP 0x0000000000000002 + +#define flush_op_get_type(flush_op) \ + (flush_op_val(flush_op) & _FLUSH_OP_TYPE) +#define flush_op_set_type(flush_op, type) \ + (__flush_op((flush_op_val(flush_op) & ~_FLUSH_OP_TYPE) | \ + ((type) & _FLUSH_OP_TYPE))) +#define flush_op_set_icache_line_user(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_ICACHE_LINE_USER_OP) +#define flush_op_set_icache_line_sys(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_ICACHE_LINE_SYS_OP) +#define flush_op_set_tlb_page_sys(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_TLB_PAGE_SYS_OP) +#define _flush_op_icache_line_user ((long)_FLUSH_ICACHE_LINE_USER_OP) +#define _flush_op_icache_line_sys ((long)_FLUSH_ICACHE_LINE_SYS_OP) +#define _flush_op_tlb_page_sys ((long)_FLUSH_TLB_PAGE_SYS_OP) +#define flush_op_icache_line_user __flush_op(_flush_op_icache_line_user) +#define flush_op_icache_line_sys __flush_op(_flush_op_icache_line_sys) +#define flush_op_tlb_page_sys __flush_op(_flush_op_tlb_page_sys) + +/* ICACHE/DTLB/ITLB line flush extended virtual address structure */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t flush_addr_t; +#endif /* ! __ASSEMBLY__ */ + +#define flush_addr_val(flush_addr) (flush_addr) + +#define __flush_addr(flush_addr_val) (flush_addr_val) + +#define _FLUSH_ADDR_CONTEXT_SHIFT 50 /* [61:50] */ + +#define _FLUSH_ADDR_VA 0x0000ffffffffffff /* virtual address */ + /* [47: 0] */ +#define _FLUSH_ADDR_CONTEXT 0x3ffc000000000000 /* context # */ +#define _FLUSH_ADDR_ROOT 0x4000000000000000 /* should be 0 */ +#define _FLUSH_ADDR_PHYS 0x8000000000000000 /* should be 0 */ + +#define FLUSH_VADDR_TO_VA(virt_addr) ((virt_addr) & _FLUSH_ADDR_VA) + +#define _FLUSH_ADDR_KERNEL(virt_addr) (FLUSH_VADDR_TO_VA(virt_addr) | \ + ((long)E2K_KERNEL_CONTEXT << _FLUSH_ADDR_CONTEXT_SHIFT)) + +#define FLUSH_ADDR_KERNEL(virt_addr) \ + __flush_addr(_FLUSH_ADDR_KERNEL(virt_addr)) + +#define flush_addr_get_va(flush_addr) \ + (flush_addr_val(flush_addr) & _FLUSH_ADDR_VA) +#define flush_addr_set_va(flush_addr, virt_addr) \ + (__flush_addr((flush_addr_val(flush_addr) & ~_FLUSH_ADDR_VA) | \ + ((va_page) & _FLUSH_ADDR_VA))) + +#define flush_addr_get_pid(flush_addr) \ + ((flush_addr_val(flush_addr) & _FLUSH_ADDR_CONTEXT) >> \ + _FLUSH_ADDR_CONTEXT_SHIFT) +#define flush_addr_get_context(flush_addr) \ + (flush_addr_val(flush_addr) & _FLUSH_ADDR_CONTEXT) +#define flush_addr_set_context(flush_addr, context) \ + (__flush_addr((flush_addr_val(flush_addr) & \ + ~_FLUSH_ADDR_CONTEXT) | \ + ((long)(context) << _FLUSH_ADDR_CONTEXT_SHIFT) & \ + _FLUSH_ADDR_CONTEXT)) +#define _flush_addr_make_sys(virt_addr, context, root) \ +({ \ + e2k_addr_t __addr_val = FLUSH_VADDR_TO_VA(virt_addr); \ + __addr_val |= (((long)(context) << \ + _FLUSH_ADDR_CONTEXT_SHIFT) & \ + _FLUSH_ADDR_CONTEXT); \ + if (root) \ + __addr_val |= _FLUSH_ADDR_ROOT; \ + __addr_val; \ +}) +#define _flush_addr_make_user(virt_addr) \ + FLUSH_VADDR_TO_VA(virt_addr) +#define flush_addr_make_sys(virt_addr, context) \ + __flush_addr(_flush_addr_make_sys(virt_addr, context, 0)) +#define flush_addr_make_user(virt_addr) \ + __flush_addr(_flush_addr_make_user(virt_addr)) +#define flush_addr_make_ss(virt_addr, context) \ + __flush_addr(_flush_addr_make_sys(virt_addr, context, 1)) + +/* + * CACHE(s) flush operations + */ + +/* CACHE(s) flush operations address */ + +#define _FLUSH_INVALIDATE_CACHE_L12_OP 0x0000000000000000 +#define _FLUSH_WRITE_BACK_CACHE_L12_OP 0x0000000000000001 + +/* instruction set begining V3 has not invalidate operation */ +/* only flush all caches (same as write back) */ +#define _FLUSH_CACHE_L12_OP _FLUSH_WRITE_BACK_CACHE_L12_OP + +#define flush_op_set_invalidate_cache_L12(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_INVALIDATE_CACHE_L12_OP) +#define flush_op_set_write_back_cache_L12(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_WRITE_BACK_CACHE_L12_OP) +#define flush_op_set_cache_all(flush_op) \ + flush_op_set_write_back_cache_L12(flush_op) +#define _flush_op_invalidate_cache_L12 ((long)_FLUSH_INVALIDATE_CACHE_L12_OP) +#define _flush_op_write_back_cache_L12 ((long)_FLUSH_WRITE_BACK_CACHE_L12_OP) +#define _flush_op_cache_all _flush_op_write_back_cache_L12 +#define flush_op_invalidate_cache_L12 \ + __flush_op(_flush_op_invalidate_cache_L12) +#define flush_op_write_back_cache_L12 \ + __flush_op(_flush_op_write_back_cache_L12) +#define flush_op_cache_all flush_op_write_back_cache_L12 + +/* + * ICACHE/TLB flush operations + */ + +/* ICACHE/TLB flush operations address */ + +#define _FLUSH_ICACHE_ALL_OP 0x0000000000000000 +#define _FLUSH_TLB_ALL_OP 0x0000000000000001 + +#define flush_op_set_icache_all(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_ICACHE_ALL_OP) +#define flush_op_set_tlb_all(flush_op) \ + flush_op_set_type(flush_op, _FLUSH_TLB_ALL_OP) +#define _flush_op_icache_all ((long)_FLUSH_ICACHE_ALL_OP) +#define _flush_op_tlb_all ((long)_FLUSH_TLB_ALL_OP) +#define flush_op_icache_all __flush_op(_flush_op_icache_all) +#define flush_op_tlb_all __flush_op(_flush_op_tlb_all) + + +/* + * MU address to access to CLW internal registers + */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t clw_addr_t; +#endif /* __ASSEMBLY__ */ + +#define US_CL_B_NO 0x024 /* User stack bottom to clean */ +#define US_CL_UP_NO 0x124 /* User stack up to clean */ +#define US_CL_M0_NO 0x004 /* User stack bit-mask [0:63] */ +#define US_CL_M1_NO 0x084 /* User stack bit-mask [64:127] */ +#define US_CL_M2_NO 0x104 /* User stack bit-mask [128:195] */ +#define US_CL_M3_NO 0x184 /* User stack bit-mask [196:255] */ + +/* CLW internel register contents */ + +#ifndef __ASSEMBLY__ +typedef unsigned long clw_reg_t; +#endif /* __ASSEMBLY__ */ + +/* + * User Stack Window clean bit-mask structure + */ + +#define CLW_MASK_WORD_NUM 4 /* number of words in bit-mask */ +#define CLW_BITS_PER_MASK_WORD 64 /* number of bits in one bit-mask */ + /* word */ +#define CLW_BYTES_PER_BIT 32 /* one bit describes 32 bytes of */ + /* stack area */ +#define CLW_BYTES_PER_MASK /* number of bytes in full bit-mask */ \ + (CLW_BYTES_PER_BIT * CLW_MASK_WORD_NUM * CLW_BITS_PER_MASK_WORD) + +/* + * MMU DEBUG registers + */ + +#define _MMU_ADDR_REGS_TYPE 0x0000000000000007 /* [2:0] type of op. */ +#define _MMU_ADDR_DEBUG_REG_NO 0x00000000000001e0 /* [8:5] # of reg. */ +#define _MMU_ADDR_DEBUG_REG_NO_SHIFT 5 /* [8:5] */ +#define _MMU_ADDR_DEBUG_REG_TYPE 7 /* access to DEBUG */ + /* registers */ + +#define MMU_DDBAR0_REG_NO 0 /* Data access breakpoint address */ +#define MMU_DDBAR1_REG_NO 1 /* registers # 0 - 3 */ +#define MMU_DDBAR2_REG_NO 2 +#define MMU_DDBAR3_REG_NO 3 +#define MMU_DDBCR_REG_NO 4 /* Data access breakpoint control */ +#define MMU_DDBSR_REG_NO 5 /* Data access breakpoint status */ +#define MMU_DDMAR0_REG_NO 6 /* Data monitor accumulator */ +#define MMU_DDMAR1_REG_NO 7 /* registers # 0 - 1 */ +#define MMU_DDMCR_REG_NO 8 /* Data monitor control register */ +#define MMU_DEBUG_REGS_NUM (MMU_DDMCR_REG_NO + 1) + +#define _DEBUG_REG_NO_TO_MMU_ADDR(reg_no) \ + ((((reg_no) << _MMU_ADDR_DEBUG_REG_NO_SHIFT) & \ + _MMU_ADDR_DEBUG_REG_NO) | _MMU_ADDR_DEBUG_REG_TYPE) + +#ifndef __ASSEMBLY__ + +typedef union { + struct { /* structure of register */ + u32 user : 1; /* [ 0: 0] */ + u32 system : 1; /* [ 1: 1] */ + u32 trap : 1; /* [ 2: 2] */ + u32 unused : 13; /* [15: 3] */ + u32 event : 7; /* [22:16] */ + u32 unused2 : 9; /* [31:23] */ + } fields[2]; + u64 word; +} e2k_ddmcr_t; +#define DDMCR_reg word + +typedef union { + struct { + u64 b0 : 8; + u64 unus7 : 4; + u64 b1 : 8; + u64 unus8 : 4; + u64 b2 : 8; + u64 unus9 : 4; + u64 b3 : 8; + u64 unu10 : 4; + u64 unu11 : 16; + }; + union { + struct { + u64 sprg0 : 1; + u64 spec0 : 1; + u64 aprg0 : 1; + u64 psf0 : 1; + u64 csf0 : 1; + u64 cut0 : 1; + u64 pt0 : 1; + u64 clw0 : 1; + u64 unus1 : 4; + + u64 sprg1 : 1; + u64 spec1 : 1; + u64 aprg1 : 1; + u64 psf1 : 1; + u64 csf1 : 1; + u64 cut1 : 1; + u64 pt1 : 1; + u64 clw1 : 1; + u64 unus2 : 4; + + u64 sprg2 : 1; + u64 spec2 : 1; + u64 aprg2 : 1; + u64 psf2 : 1; + u64 csf2 : 1; + u64 cut2 : 1; + u64 pt2 : 1; + u64 clw2 : 1; + u64 unus3 : 4; + + u64 sprg3 : 1; + u64 spec3 : 1; + u64 aprg3 : 1; + u64 psf3 : 1; + u64 csf3 : 1; + u64 cut3 : 1; + u64 pt3 : 1; + u64 clw3 : 1; + u64 unus4 : 4; + + u64 unus5 : 1; + u64 m0 : 1; /* [49] */ + u64 m1 : 1; /* [50] */ + u64 unus6 : 13; + }; + struct { + u64 b0 : 8; + u64 unus7 : 4; + u64 b1 : 8; + u64 unus8 : 4; + u64 b2 : 8; + u64 unus9 : 4; + u64 b3 : 8; + u64 unu10 : 4; + u64 unu11 : 16; + }; + } fields; + u64 word; +} e2k_ddbsr_t; +#define DDBSR_reg word + +#define E2K_DDBSR_MASK(cp_num) (0xffULL << ((cp_num) * 12)) +#define E2K_DDBSR_MASK_ALL_BP 0xff0ff0ff0ffULL + +typedef union { + struct { + u64 v0 : 1; + u64 root0 : 1; + u64 rw0 : 2; + u64 lng0 : 3; + u64 sync0 : 1; + u64 spec0 : 1; + u64 ap0 : 1; + u64 sf0 : 1; + u64 hw0 : 1; + u64 t0 : 1; + u64 __x0 : 1; + u64 v1 : 1; + u64 root1 : 1; + u64 rw1 : 2; + u64 lng1 : 3; + u64 sync1 : 1; + u64 spec1 : 1; + u64 ap1 : 1; + u64 sf1 : 1; + u64 hw1 : 1; + u64 t1 : 1; + u64 __x1 : 1; + u64 v2 : 1; + u64 root2 : 1; + u64 rw2 : 2; + u64 lng2 : 3; + u64 sync2 : 1; + u64 spec2 : 1; + u64 ap2 : 1; + u64 sf2 : 1; + u64 hw2 : 1; + u64 t2 : 1; + u64 __x2 : 1; + u64 v3 : 1; + u64 root3 : 1; + u64 rw3 : 2; + u64 lng3 : 3; + u64 sync3 : 1; + u64 spec3 : 1; + u64 ap3 : 1; + u64 sf3 : 1; + u64 hw3 : 1; + u64 t3 : 1; + u64 __x3 : 1; + u64 gm : 1; + }; + struct { + u64 v0 : 1; + u64 root0 : 1; + u64 rw0 : 2; + u64 lng0 : 3; + u64 sync0 : 1; + u64 spec0 : 1; + u64 ap0 : 1; + u64 sf0 : 1; + u64 hw0 : 1; + u64 t0 : 1; + u64 __x0 : 1; + u64 v1 : 1; + u64 root1 : 1; + u64 rw1 : 2; + u64 lng1 : 3; + u64 sync1 : 1; + u64 spec1 : 1; + u64 ap1 : 1; + u64 sf1 : 1; + u64 hw1 : 1; + u64 t1 : 1; + u64 __x1 : 1; + u64 v2 : 1; + u64 root2 : 1; + u64 rw2 : 2; + u64 lng2 : 3; + u64 sync2 : 1; + u64 spec2 : 1; + u64 ap2 : 1; + u64 sf2 : 1; + u64 hw2 : 1; + u64 t2 : 1; + u64 __x2 : 1; + u64 v3 : 1; + u64 root3 : 1; + u64 rw3 : 2; + u64 lng3 : 3; + u64 sync3 : 1; + u64 spec3 : 1; + u64 ap3 : 1; + u64 sf3 : 1; + u64 hw3 : 1; + u64 t3 : 1; + u64 __x3 : 1; + u64 gm : 1; + } fields; + u64 word; +} e2k_ddbcr_t; +#define DDBCR_reg word + +#define E2K_DDBCR_MASK(cp_num) (0x3FFFULL << ((cp_num) * 14)) +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_MMU_REGS_TYPES_H_ */ diff --git a/arch/e2k/include/asm/mmu_types.h b/arch/e2k/include/asm/mmu_types.h new file mode 100644 index 0000000..1e60924 --- /dev/null +++ b/arch/e2k/include/asm/mmu_types.h @@ -0,0 +1,795 @@ +#ifndef _E2K_MMU_TYPES_H_ +#define _E2K_MMU_TYPES_H_ + +#include + +#include + +#ifndef __ASSEMBLY__ + +/* + * These are used to make use of C type-checking.. + */ +typedef unsigned long pteval_t; +typedef unsigned long pmdval_t; +typedef unsigned long pudval_t; +typedef unsigned long pgdval_t; +typedef unsigned long pgprotval_t; + +typedef struct { pteval_t pte; } pte_t; +typedef struct { pmdval_t pmd; } pmd_t; +typedef struct { pudval_t pud; } pud_t; +typedef struct { pgdval_t pgd; } pgd_t; +typedef struct { pgprotval_t pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pud_val(x) ((x).pud) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pud(x) ((pud_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#endif /* ! __ASSEMBLY__ */ + +/* one page table occupies one 4K page and has 512 entries */ +#define PT_ENTRIES_SHIFT 3 /* 8 bytes, 3 bits */ +#define PT_ENTRIES_BITS (PAGE_SHIFT - PT_ENTRIES_SHIFT) /* 9 bits */ +#define PT_ENTRIES_PER_PAGE (1 << PT_ENTRIES_BITS) /* 512 ptes */ + +/* + * Definitions for 4-th (root) level: + * + * PGDIR_SHIFT determines what a root-level page table entry + * can map: + * pages of 3-th level page table entries + * + * Cannot use the top 0xffff ff00 0000 0000 - 0xffff ffff ffff ffff addresses + * because virtual page table lives there. + */ +#define PGDIR_SHIFT (PAGE_SHIFT + 3 * PT_ENTRIES_BITS) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#define PAGE_PGD_SIZE PGDIR_SIZE +#define PTRS_PER_PGD (1UL << PT_ENTRIES_BITS) +#define PGD_TABLE_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0 + +/* + * Definitions for 3-th level: + * + * PUD_SHIFT determines the size of the area a 3-th level page tables + * can map: + * pages of second-level page table entries + */ +#define PUD_SHIFT (PAGE_SHIFT + 2 * PT_ENTRIES_BITS) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) +#define PAGE_PUD_SIZE PUD_SIZE +#define PTRS_PER_PUD (1UL << PT_ENTRIES_BITS) +#define PUD_TABLE_SIZE (PTRS_PER_PUD * sizeof(pud_t)) + +/* + * Definitions for 2-nd level: + * + * PMD_SHIFT determines the size of the area a middle level page tables + * can map: + * pages of first level page table entries + */ +#define PMD_SHIFT (PAGE_SHIFT + 1 * PT_ENTRIES_BITS) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PAGE_PMD_SIZE PMD_SIZE +#define PTRS_PER_PMD (1UL << PT_ENTRIES_BITS) +#define PMD_TABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t)) + +/* + * Definitions for first (page table entries) level: + * + * PTE - Entries per user pages. + */ +#define PTE_SHIFT (PAGE_SHIFT) /* PAGE_SHIFT */ +#define PTE_SIZE (1UL << PTE_SHIFT) /* PAGE_SIZE */ +#define PTE_MASK (~(PTE_SIZE-1)) /* PAGE_MASK */ +#define PTRS_PER_PTE (1UL << PT_ENTRIES_BITS) +#define PTE_TABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t)) + +/* + * The index in the 4-th (root-level) page table directory. + */ +#define pgd_index(virt_addr) (((virt_addr) >> PGDIR_SHIFT) & \ + (PTRS_PER_PGD - 1)) + +/* Additional trap cellar fields are located at this offset */ +#define TC_EXT_OFFSET 512 + +#ifndef __ASSEMBLY__ + +/* + * Hardware MMUs page tables have some differences from one ISET to other + * moreover each MMU supports a few different page tables: + * native (primary) + * secondary page tables for sevral modes (VA32, VA48, PA32, PA48 ...) + * The follow structures and tables present these differences for each instance + * to manage page tables as common item + */ + +/* max possible number of page table levels for all ISETs, types, modes */ +/* to can describe any supported by MMUs type of page tables */ +#define ARCH_MAX_PT_LEVELS 4 /* for 48 bits virtual address */ + /* and 48 bits physical address */ + +#define E2K_PT_LEVELS_NUM 4 /* native mode page tables have */ + /* equal number of levels up to now */ +#define E2K_PAGES_LEVEL_NUM 0 /* level number of physical pages */ +#define E2K_PTE_LEVEL_NUM 1 /* level number of native pte */ +#define E2K_PMD_LEVEL_NUM 2 /* level number of native pmd */ +#define E2K_PUD_LEVEL_NUM 3 /* level number of native pud */ +#define E2K_PGD_LEVEL_NUM 4 /* level number of native pgd */ + +/* max level # on which can map huge pages: pmd, pud */ +#define MAX_HUGE_PAGES_LEVEL E2K_PUD_LEVEL_NUM + +#define MAX_NUM_HUGE_PTES 2 /* max number of page table entries */ + /* to present one huge page on any */ + /* PT level */ + /* Now e2c+ MMU need 2 pmd entries */ + /* to present 4 Mb page */ + +typedef enum dtlb_type { + UNUSED_DTLB_TYPE, /* DTLB unused for page table entries */ + COMMON_DTLB_TYPE, /* common DTLB N-lines x M-sets */ + FULL_ASSOCIATIVE_DTLB_TYPE, /* full associative buffer */ + /* used for 1 Gb pages */ +} dtlb_type_t; + +typedef enum pt_type { + UNDEFINED_PT_TYPE, /* undefined (not known) PT type */ + E2K_PT_TYPE, /* arch native 4-th level PT */ + X86_2_LEVELS_PT_TYPE, /* secondary PT VA32 - PA32 */ + X86_3_LEVELS_PT_TYPE, /* secondary PT VA32 - PA48 */ + X86_4_LEVELS_PT_TYPE, /* secondary PT VA48 - PA48 */ +} pt_type_t; + +typedef struct pt_level { + int id; /* level ID (now is level #) */ + e2k_size_t pt_size; /* page size, which the level one */ + /* entry can map */ + e2k_size_t page_size; /* page or huge page size, which */ + /* the level real map, this size can */ + /* differ from pt_size above, because */ + /* of a few entries should present */ + /* one huge page */ + int pt_shift; /* youngest bit of page address, */ + /* which the level one entry can map */ + int page_shift; /* youngest bit of huge page address, */ + /* which the level is mapped */ + e2k_addr_t pt_mask; /* mask of page address, which */ + /* the level one entry can map */ + e2k_addr_t pt_offset; /* mask of offset into the level */ + /* page address */ + e2k_addr_t pt_index_mask; /* mask of index bits into the level */ + /* page address */ + e2k_addr_t page_mask; /* mask of huge page address, which */ + /* the level is mapped */ + e2k_addr_t page_offset; /* mask of offset into the level */ + /* huge page address */ + int ptrs_per_pt; /* number of entries in one table of */ + /* the level */ + bool is_pte; /* page table entries level */ + bool is_huge; /* the level can be as huge page */ + /* table entries */ + unsigned char huge_ptes; /* number of the level page table */ + /* entries to present one huge page */ + dtlb_type_t dtlb_type; /* DTLB cache type for page tables */ + /* entries on the level */ + + /* interface function to handle some things on the level */ + pte_t * (*boot_get_huge_pte)(e2k_addr_t virt_addr, pgprot_t *ptp); + pte_t * (*init_get_huge_pte)(e2k_addr_t virt_addr, pgprot_t *ptp); + pte_t * (*get_huge_pte)(e2k_addr_t virt_addr, pgprot_t *ptp); + void (*boot_set_pte)(e2k_addr_t addr, pte_t *ptep, pte_t pte, + bool host_map); + void (*init_pte_clear)(pte_t *ptep); + void (*split_pt_page)(pgprot_t *ptp, pte_t *ptes[MAX_NUM_HUGE_PTES]); + void (*map_pt_huge_page_to_prev_level)(pgprot_t *pt_page, + e2k_addr_t phys_page, pgprot_t pgprot); + +} pt_level_t; + +typedef struct pt_struct { + pt_type_t type; /* PT type */ + const char *name; /* PT type name */ + bool pt_v6; /* PT entry structure V6 or above */ + int levels_num; /* real number of page table levels */ + + /* some useful PT entries structure bit mask */ + /* can be different for some PT types (depend on fields type, pt_v6) */ + pgprotval_t pfn_mask; /* pfn # */ + pgprotval_t accessed_mask; /* page accessed flags */ + pgprotval_t dirty_mask; /* page was updated */ + pgprotval_t present_mask; /* page is present */ + pgprotval_t valid_mask; /* page is present */ + pgprotval_t user_mask; /* page of user */ + pgprotval_t priv_mask; /* page is privileged */ + pgprotval_t non_exec_mask; /* page is not executable */ + pgprotval_t exec_mask; /* page is executable */ + + /* mask of bits available for software */ + pgprotval_t sw_bit1_mask; /* # 1 */ + pgprotval_t sw_bit2_mask; /* # 2 */ + pgprotval_t sw_mmio_mask; /* shadow pte is for MMIO */ + + /* some useful PT entries page protection base values */ + pgprotval_t ptd_kernel_prot; /* kernel PT directories protection */ + pgprotval_t ptd_user_prot; /* user PT directories protection */ + + /* interface function to get/set some protections */ + unsigned int (*get_pte_val_memory_type)(pgprot_t pte_val); + pgprot_t (*set_pte_val_memory_type)(pgprot_t pte_val, + unsigned int memory_type); + unsigned int (*get_pte_val_memory_type_rule)(pgprot_t pte_val); + pgprot_t (*set_pte_val_memory_type_rule)(pgprot_t pte_val, + unsigned int mtcr); + + /* level #0 is always physical page */ + pt_level_t levels[ARCH_MAX_PT_LEVELS + 1]; +} pt_struct_t; + +static inline int +get_pt_level_id(const pt_level_t *pt_level) +{ + /* now PT level is number of the level */ + return pt_level->id; +} + +static inline bool +is_page_pt_level(const pt_level_t *pt_level) +{ + return pt_level->is_pte; +} + +static inline bool +is_huge_pt_level(const pt_level_t *pt_level) +{ + return pt_level->is_huge; +} + +static inline e2k_size_t +get_pt_level_size(const pt_level_t *pt_level) +{ + return pt_level->pt_size; +} + +static inline e2k_size_t +get_pt_level_page_size(const pt_level_t *pt_level) +{ + return pt_level->page_size; +} + +static inline int +get_pt_level_shift(const pt_level_t *pt_level) +{ + return pt_level->pt_shift; +} + +static inline int +get_pt_level_page_shift(const pt_level_t *pt_level) +{ + return pt_level->page_shift; +} + +static inline e2k_addr_t +get_pt_level_mask(const pt_level_t *pt_level) +{ + return pt_level->pt_mask; +} +static inline e2k_addr_t +get_pt_level_offset(const pt_level_t *pt_level) +{ + return pt_level->pt_offset; +} +static inline e2k_addr_t +get_pt_level_addr_index(e2k_addr_t addr, const pt_level_t *pt_level) +{ + return (addr & pt_level->pt_index_mask) >> + get_pt_level_shift(pt_level); +} +static inline e2k_addr_t +set_pt_level_addr_index(e2k_addr_t addr, e2k_addr_t index, const pt_level_t *pt_level) +{ + return (addr & ~pt_level->pt_index_mask) | + ((index << get_pt_level_shift(pt_level)) & + pt_level->pt_index_mask); +} + +static inline e2k_addr_t +get_pt_level_page_mask(const pt_level_t *pt_level) +{ + return pt_level->page_mask; +} + +static inline e2k_addr_t +get_pt_level_page_offset(const pt_level_t *pt_level) +{ + return pt_level->page_offset; +} + +static inline int +get_ptrs_per_pt_level(const pt_level_t *pt_level) +{ + return pt_level->ptrs_per_pt; +} + +static inline int +get_pt_level_huge_ptes_num(const pt_level_t *pt_level) +{ + return pt_level->huge_ptes; +} + +static inline const pt_level_t * +get_pt_struct_level_on_id(const pt_struct_t *pt_struct, int level_id) +{ + /* now PT level is number of the level */ + return &pt_struct->levels[level_id]; +} + +static inline bool +is_page_pt_struct_level(const pt_struct_t *pt_struct, int level_id) +{ + return is_page_pt_level(&pt_struct->levels[level_id]); +} + +static inline bool +is_huge_pt_struct_level(const pt_struct_t *pt_struct, int level_id) +{ + return is_huge_pt_level(&pt_struct->levels[level_id]); +} + +static inline e2k_size_t +get_pt_struct_level_size(const pt_struct_t *pt_struct, int level_id) +{ + return get_pt_level_size(&pt_struct->levels[level_id]); +} + +static inline e2k_size_t +get_pt_struct_level_page_size(const pt_struct_t *pt_struct, int level_id) +{ + return get_pt_level_page_size(&pt_struct->levels[level_id]); +} + +static inline int +get_pt_struct_level_huge_ptes_num(const pt_struct_t *pt_struct, int level_id) +{ + return get_pt_level_huge_ptes_num(&pt_struct->levels[level_id]); +} + +/* This is definition of MMU TRAP_CELLAR types */ + +struct mmu_tc_dst { + unsigned address :8; /* [0-7] */ + unsigned vr :1; /* [8] */ + unsigned vl :1; /* [9] */ +}; + +typedef union { + unsigned word; + struct mmu_tc_dst fields; +} tc_dst_t; + +/* Maximum size for memory access from single channel is 8 + * (16 since e8c2) */ +#define E2K_MAX_FORMAT 16 + +struct mmu_tc_opcode { + unsigned fmt :3; /* [0-2] */ + unsigned npsp :1; /* [3] */ + unsigned fmtc :2; /* [4-5] */ +}; + +#endif /* ! __ASSEMBLY__ */ + +#define LDST_BYTE_FMT 1UL /* load/store byte (8 bits) */ +#define LDST_HALF_FMT 2UL /* load/store halfword (16 bits) */ +#define LDST_WORD_FMT 3UL /* load/store word (32 bits) */ +#define LDST_DWORD_FMT 4UL /* load/store double-word (64 bits) */ +#define LDST_QWORD_FMT 5UL /* load/store quad-word (128 bits) */ +#define LDST_QP_FMT 7UL /* load/store qpacked word (128 bits) */ + +#define LDRD_FMT_QWORD_A 0xdUL +#define LDRD_FMT_QPWORD_A 0xfUL + +#define TC_FMT_QPWORD_Q 0xdUL /* Single 16 position, tags as for Q */ +#define TC_FMT_QWORD_QP 0xfUL /* Two 16 positions, tags as for QP */ +#define TC_FMT_DWORD_Q 0x15UL /* Single 8 position, tags as for Q */ +#define TC_FMT_DWORD_QP 0x1fUL /* Single 8 position, tag as for QP */ + + +#ifndef __ASSEMBLY__ + +typedef union { + unsigned word; + struct mmu_tc_opcode fields; +} tc_opcode_t; + +#define TC_OPCODE_FMT_FULL(opcode) (AS(opcode).fmt || (AS(opcode).fmtc << 3)) + +struct mmu_tc_fault_type { + unsigned global_sp :1; /* [35] */ + unsigned page_bound :1; /* [36] */ + unsigned exc_mem_lock :1; /* [37] */ + unsigned ph_pr_page :1; /* [38] */ + unsigned io_page :1; /* [39] */ + unsigned isys_page :1; /* [40] */ + unsigned prot_page :1; /* [41] */ + unsigned priv_page :1; /* [42] */ + unsigned illegal_page :1; /* [43] */ + unsigned nwrite_page :1; /* [44] */ + unsigned page_miss :1; /* [45] */ + unsigned ph_bound :1; /* [46] */ + unsigned intl_res_bits :1; /* [47] */ +}; + +typedef union { + unsigned word; + struct mmu_tc_fault_type fields; +} tc_fault_type_t; + +typedef union mmu_tc_cond_dword { + struct { + u64 dst :10; // [0-9] + u64 opcode :6; // [10-15] + u64 r0 :1; // [16] + u64 store :1; // [17] + u64 mode_80 :1; // [18] + u64 s_f :1; // [19] + u64 mas :7; // [20-26] + u64 root :1; // [27] + u64 scal :1; // [28] + u64 sru :1; // [29] + u64 spec :1; // [30] + u64 pm :1; // [31] + u64 chan :2; // [32-33] + u64 r1 :1; // [34] + u64 fault_type :13; // [35-47] + u64 miss_lvl :2; // [48-49] + u64 num_align :1; // [50] + u64 empt :1; // [51] + u64 clw :1; // [52] + u64 dst_rcv :10; // [53-62] + u64 rcv :1; // [63] + }; + struct { + u64 address :8; // [0-7] + u64 vr :1; // [8] + u64 vl :1; // [9] + u64 fmt :3; // [10-12] + /* Be careful: npsp=1 => access is not protected, + * but npsp=0 does not mean that access is protected. */ + u64 npsp :1; // [13] + u64 fmtc :2; // [14-15] + u64 ___x1 :19; // [34-16] + u64 global_sp :1; /* [35] */ + u64 page_bound :1; /* [36] */ + u64 exc_mem_lock :1; /* [37] */ + u64 ph_pr_page :1; /* [38] */ + u64 io_page :1; /* [39] */ + u64 isys_page :1; /* [40] */ + u64 prot_page :1; /* [41] */ + u64 priv_page :1; /* [42] */ + u64 illegal_page :1; /* [43] */ + u64 nwrite_page :1; /* [44] */ + u64 page_miss :1; /* [45] */ + u64 ph_bound :1; /* [46] */ + u64 intl_res_bits :1; /* [47] */ + u64 ___x0 :5; /* [52:48] */ + u64 dst_ind :8; /* [60:53] */ + u64 ___x2 :3; /* [63-61] */ + }; +} mmu_tc_cond_dword_t; + +typedef union { + u64 word; + union mmu_tc_cond_dword fields; +} tc_cond_t; + +#define TC_COND_FMT_FULL(cond) (AS(cond).fmt | (AS(cond).fmtc << 3)) + +/* + * Caveat: for qword accesses this will return 16 bytes for + * the first entry in trap cellar and 8 bytes for the second one. + */ +static inline int tc_cond_to_size(tc_cond_t cond) +{ + const int fmt = TC_COND_FMT_FULL(cond); + int size; + + if (fmt == LDST_QP_FMT || fmt == TC_FMT_QPWORD_Q) { + size = 16; + } else if (fmt == LDST_QWORD_FMT || fmt == TC_FMT_QWORD_QP) { + if (AS(cond).chan == 0 || AS(cond).chan == 2) + size = 16; + else + size = 8; + } else if (fmt == TC_FMT_DWORD_Q || fmt == TC_FMT_DWORD_QP) { + size = 8; + } else { + size = 1 << ((fmt & 0x7) - 1); + } + + return size; +} + +typedef union { + u64 word; + struct { + u64 mask_lo : 8; /* [7-0] */ + u64 mask_hi : 8; /* [15-8] */ + u64 __reserved1 : 48; /* [63-16] */ + }; + struct { + u64 mask : 16; /* [15-0] */ + u64 __reserved2 : 48; /* [63-16] */ + }; +} tc_mask_t; + +static inline int +ldst_chan_opc_to_chan_num(int chan_opc) +{ + switch (chan_opc) { + case 0: return 0; + case 1: return 2; + case 2: return 3; + case 3: return 5; + default: return -1; + } +} +static inline int +ldst_chan_num_to_chan_opc(int chan_opc) +{ + switch (chan_opc) { + case 0: return 0; + case 2: return 1; + case 3: return 2; + case 5: return 3; + default: return -1; + } +} + +static inline bool +tc_cond_load_has_store_semantics(tc_cond_t condition, unsigned iset_ver) +{ + const unsigned mas = AS(condition).mas; + const unsigned mod = (mas & MAS_MOD_MASK) >> MAS_MOD_SHIFT; + const unsigned chan = AS(condition).chan; + const bool root = AS(condition).root; + const bool spec = AS(condition).spec; + + if (chan != 0) + return false; + if (spec) + return false; + return mod == _MAS_MODE_LOAD_OP_WAIT + || iset_ver < E2K_ISET_V3 && + mod == _MAS_MODE_LOAD_OP_TRAP_ON_LD + || root && iset_ver >= E2K_ISET_V3 && + ((mas & MAS_TRAP_ON_LD_ST_MASK) == + MAS_LOAD_SEC_TRAP_ON_LD_ST + || mas == MAS_SEC_SLT) + || iset_ver >= E2K_ISET_V5 && + mod == _MAS_MODE_LOAD_OP_WAIT_1 && + tc_cond_to_size(condition) == 16; +} + +static inline bool +tc_cond_is_store(tc_cond_t condition, unsigned iset_ver) +{ + const unsigned mas = AS(condition).mas; + + if (AS(condition).store && (mas != MAS_DCACHE_LINE_FLUSH)) + return true; + return tc_cond_load_has_store_semantics(condition, iset_ver); +} + +/* Trap cellar flags */ + +#define TC_DONE_FLAG 0x01 +#define TC_NESTED_EXC_FLAG 0x02 +#define TC_IS_HVA_FLAG 0x10 /* address at trap cellar is already */ + /* converted GVA->HVA */ + +/* + * Trap cellar as it is in hardware plus additional fields + */ +typedef struct { + unsigned long address; + unsigned long data; + tc_cond_t condition; + unsigned long data_ext; + tc_mask_t mask; + unsigned char flags; +} trap_cellar_t; + +/* bug 96719 - combination s_f = 0, store=1, sru =1 conside + * as s_f = 1, store=1, sru =1 + * */ +#define IS_SPILL(tc) (cpu_has(CPU_HWBUG_TRAP_CELLAR_S_F) && \ + AS((tc).condition).store && AS((tc).condition).sru) + +/* + * Trap cellar as it is in hardware + */ +typedef struct { + unsigned long address; + unsigned long data; + tc_cond_t condition; +} kernel_trap_cellar_t; + +typedef struct { + unsigned long __reserved; + unsigned long data; + tc_mask_t mask; +} kernel_trap_cellar_ext_t; + +/** + * is_record_asynchronous - return true if the record is asynchronous + * @cond: cond mask of record + * + * Asynchronous records are the ones that did not originate from wide + * instruction in user code, i.e. hardware-generated records. + * + * In current processor models (and probably in all future ones) only + * CLW records can mix with synchronous ones. + */ +static inline bool is_record_asynchronous(tc_cond_t cond) +{ + /* We use bitwise OR for performance */ + return AS(cond).mode_80 | AS(cond).s_f | AS(cond).sru | AS(cond).clw; +} + +/** + * tc_record_asynchronous - return true if the record + * in tcellar is asynchronous + * @tcellar: record in question + */ +static inline int tc_record_asynchronous(trap_cellar_t *tcellar) +{ + tc_cond_t cond = tcellar->condition; + + return is_record_asynchronous(cond); +} + +#endif /* ! __ASSEMBLY__ */ + +/* + * Second operand of Load and Store recovery instruction (LDRD & STRD): + * + * operation code and MAS flags + */ + +#define LDST_REC_OPC_INDEX_SHIFT 0 +#define LDST_REC_OPC_INDEX_SIZE 32 /* [31- 0] byte index */ +#define LDST_REC_OPC_MAS_SHIFT 32 +#define LDST_REC_OPC_MAS_SIZE 7 /* [38-32] MAS */ +#define LDST_REC_OPC_PROT_SHIFT 39 +#define LDST_REC_OPC_PROT_SIZE 1 /* [39] protected access */ +#define LDST_REC_OPC_FMT_SHIFT 40 +#define LDST_REC_OPC_FMT_SIZE 3 /* [42-40] format of access */ +#define LDST_REC_OPC_ROOT_SHIFT 43 +#define LDST_REC_OPC_ROOT_SIZE 1 /* [43] virtual space */ + /* type flag */ +#define LDST_REC_OPC_RG_SHIFT 44 +#define LDST_REC_OPC_RG_SIZE 8 /* [51-44] physical address */ + /* of an NR (in terms */ + /* of single-NR) used */ + /* for handling */ + /* memory locks */ + /* conflicts */ +#define LDST_REC_OPC_FMT_H_SHIFT 52 +#define LDST_REC_OPC_FMT_H_SIZE 1 /* [52] format of access */ +#define LDST_REC_OPC_MODE_H_SHIFT 53 +#define LDST_REC_OPC_MODE_H_SIZE 1 /* [53] mode (hi) of access */ +#define LDST_REC_OPC_UNUSED_SHIFT 54 +#define LDST_REC_OPC_UNUZED_SIZE 2 /* [55-54] unused bits */ + +#define LDST_REC_OPC_MASK_SHIFT 56 +#define LDST_REC_OPC_MASK_SIZE 8 + +#define LDST_REC_OPC_PROT (1UL << LDST_REC_OPC_PROT_SHIFT) +#define LDST_REC_OPC_ROOT (1UL << LDST_REC_OPC_ROOT_SHIFT) +#define LDST_REC_OPC_MODE_H (1UL << LDST_REC_OPC_MODE_H_SHIFT) +#define LDST_REC_OPC_MODE_MASK \ + (LDST_REC_OPC_PROT | LDST_REC_OPC_ROOT | LDST_REC_OPC_MODE_H) + +#define LDST_REC_OPC_GET_MODE(ldst_rec_opc) \ + ((((ldst_rec_opc) & LDST_REC_OPC_PROT) >> \ + (LDST_REC_OPC_PROT_SHIFT - 0)) | \ + (((ldst_rec_opc) & LDST_REC_OPC_ROOT) >> \ + (LDST_REC_OPC_ROOT_SHIFT - 1)) | \ + (((ldst_rec_opc) & LDST_REC_OPC_MODE_H) >> \ + (LDST_REC_OPC_MODE_H_SHIFT - 2))) +#define LDST_REC_OPC_SET_MODE(ldst_rec_opc, mode) \ + (((ldst_rec_opc) & ~LDST_REC_OPC_MODE_MASK) | \ + (((mode) & 0x01) << (LDST_REC_OPC_PROT_SHIFT - 0)) | \ + (((mode) & 0x02) << (LDST_REC_OPC_ROOT_SHIFT - 1)) | \ + (((mode) & 0x04) << (LDST_REC_OPC_MODE_H_SHIFT - 2))) + +#ifndef __ASSEMBLY__ +typedef union { + struct { + u64 index : 32; // [31- 0] + u64 mas : 7; // [38-32] + u64 prot : 1; // [39] + u64 fmt : 3; // [42-40] + u64 root : 1; // [43] + /* Used only on ES2 (E2K_ISET_V2), deprecated everywhere else. */ + u64 rg_deprecated : 8; // [51-44] + u64 fmt_h : 1; // [52] + u64 mode_h : 1; // [53] + u64 unused : 2; // [55-54] + u64 mask : 8; // [63-56] + }; + unsigned long word; +} ldst_rec_op_t; +#define LD_ST_REC_OPC_index(ld_st_rec) (ld_st_rec.index) +#define LD_ST_REC_OPC_mas(ld_st_rec) (ld_st_rec.mas) +#define LD_ST_REC_OPC_prot(ld_st_rec) (ld_st_rec.prot) +#define LD_ST_REC_OPC_fmt(ld_st_rec) (ld_st_rec.fmt) +#define LD_ST_REC_OPC_root(ld_st_rec) (ld_st_rec.root) +#define LD_ST_REC_OPC_fmt_h(ld_st_rec) (ld_st_rec.fmt_h) +#define LD_ST_REC_OPC_mode_h(ld_st_rec) (ld_st_rec.mode_h) +#define LD_ST_REC_OPC_mask(ld_st_rec) (ld_st_rec.mask) +#define LD_ST_REC_OPC_reg(ld_st_rec) (ld_st_rec.word) + +typedef enum ld_st_rec_mode { + primary_rec_mode = 0, /* primary, privileged, */ + primary_prot_rec_mode = 1, /* primary, privileged, protected */ + secondary_rec_mode = 2, /* secondary, privileged */ + guest_physical_rec_mode = 3, /* guest, physical, privileged */ + primary_user_rec_mode = 4, /* primary */ + guest_primary_rec_mode = 5, /* guest, primary, privileged, prot */ + secondary_user_rec_mode = 6, /* secondary, not privileged */ +} ld_st_rec_mode_t; + +static inline ld_st_rec_mode_t +get_ld_st_rec_opc_mode(ldst_rec_op_t rec_opcode) +{ + unsigned mode = 0; + + mode |= LD_ST_REC_OPC_prot(rec_opcode) ? 0x01 : 0x00; + mode |= LD_ST_REC_OPC_root(rec_opcode) ? 0x02 : 0x00; + mode |= LD_ST_REC_OPC_mode_h(rec_opcode) ? 0x04 : 0x00; + return (ld_st_rec_mode_t)mode; +} +static inline ldst_rec_op_t +set_ld_st_rec_opc_mode(ldst_rec_op_t rec_opcode, ld_st_rec_mode_t mode) +{ + LD_ST_REC_OPC_prot(rec_opcode) = (mode & 0x01) ? 1 : 0; + LD_ST_REC_OPC_root(rec_opcode) = (mode & 0x02) ? 1 : 0; + LD_ST_REC_OPC_mode_h(rec_opcode) = (mode & 0x04) ? 1 : 0; + return rec_opcode; +} + +#endif /* ! __ASSEMBLY__ */ + +#define LDST_REC_OPC_BYPASS_L1 (MAS_BYPASS_L1_CACHE << \ + LDST_REC_OPC_MAS_SHIFT) +#define LDST_REC_OPC_BYPASS_CACHE (MAS_BYPASS_ALL_CACHES << \ + LDST_REC_OPC_MAS_SHIFT) + +#define TAGGED_MEM_LOAD_REC_OPC (0UL | \ + LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT | \ + MAS_FILL_OPERATION << LDST_REC_OPC_MAS_SHIFT) +#define TAGGED_MEM_LOAD_REC_OPC_W (0UL | \ + LDST_WORD_FMT << LDST_REC_OPC_FMT_SHIFT | \ + MAS_FILL_OPERATION << LDST_REC_OPC_MAS_SHIFT) +#define TAGGED_MEM_STORE_REC_OPC (LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT) +#define TAGGED_MEM_STORE_REC_OPC_W (LDST_WORD_FMT << LDST_REC_OPC_FMT_SHIFT) + + +#endif /* _E2K_MMU_TYPES_H_ */ diff --git a/arch/e2k/include/asm/mmzone.h b/arch/e2k/include/asm/mmzone.h new file mode 100644 index 0000000..6758e22 --- /dev/null +++ b/arch/e2k/include/asm/mmzone.h @@ -0,0 +1,19 @@ +#ifndef _E2K_MMZONE_H_ +#define _E2K_MMZONE_H_ + +#include +#include + +#include +#include + +#define __NODE_DATA(ndata, nid) ((ndata)[(nid)]) + +#ifdef CONFIG_NEED_MULTIPLE_NODES +extern pg_data_t *node_data[]; +#define NODE_DATA(nid) __NODE_DATA(node_data, nid) +#endif + +#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT) + +#endif /* _E2K_MMZONE_H_ */ diff --git a/arch/e2k/include/asm/module.h b/arch/e2k/include/asm/module.h new file mode 100644 index 0000000..7cac067 --- /dev/null +++ b/arch/e2k/include/asm/module.h @@ -0,0 +1,13 @@ +#ifndef _E2K_MODULE_H_ +#define _E2K_MODULE_H_ +/* + * This file contains the E2K architecture specific module code. + */ + +struct mod_arch_specific { }; + +#define Elf_Shdr Elf64_Shdr +#define Elf_Sym Elf64_Sym +#define Elf_Ehdr Elf64_Ehdr + +#endif /* _E2K_MODULE_H_ */ diff --git a/arch/e2k/include/asm/monitors.h b/arch/e2k/include/asm/monitors.h new file mode 100644 index 0000000..2193755 --- /dev/null +++ b/arch/e2k/include/asm/monitors.h @@ -0,0 +1,49 @@ +/* + * arch/e2k/kernel/monitors.h + * + * This file contains declarations of interface functions for working with + * monitors. + * + * Copyright (C) 2009-2013 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +#ifndef _MONITORS_H_ +#define _MONITORS_H_ + +#ifdef CONFIG_MONITORS + +#define MONITORS_COUNT 4 + +#define SICMONITORS_COUNT_PER_NODE 2 +#define SICMONITORS_COUNT \ + (MAX_NUMNODES * SICMONITORS_COUNT_PER_NODE) + +#define IPCCMONITORS_COUNT 1 +#define IOCCMONITORS_COUNT 1 + +typedef struct { + unsigned char cpu_num; + unsigned long dim0; + unsigned long dim1; + unsigned long ddm0; + unsigned long ddm1; +} monitor_registers_delta_t; + +extern const struct file_operations proc_pid_monitors_events_operations; +extern int monitors_used; + +void process_monitors(struct task_struct *task); +void init_monitors(struct task_struct *task); +void store_monitors_delta(struct task_struct *task); +void add_dead_proc_events(struct task_struct *task); +unsigned char get_monitors_mask(char *title); + +#define MONITORING_IS_ACTIVE unlikely(monitors_used) + +#else /* !CONFIG_MONITORS*/ +#define MONITORING_IS_ACTIVE false +static inline void init_monitors(struct task_struct *task) { }; +#endif /* CONFIG_MONITORS */ + +#endif /* _MONITORS_H_ */ + diff --git a/arch/e2k/include/asm/mpspec.h b/arch/e2k/include/asm/mpspec.h new file mode 100644 index 0000000..74cd056 --- /dev/null +++ b/arch/e2k/include/asm/mpspec.h @@ -0,0 +1,35 @@ +#ifndef __ASM_MPSPEC_H +#define __ASM_MPSPEC_H + +#ifdef __KERNEL__ + +#include +#include + +#include /* For __LITTLE_ENDIAN definition */ + +/* all addresses in MP table is physical so do not change them */ +#define mpc_addr_to_virt(addr) phys_to_virt(boot_vpa_to_pa(addr)) +#define mpc_addr_to_phys(addr) (addr) +#define mpc_addr(addr) \ + ((READ_OSCUD_LO_REG().OSCUD_lo_base >= PAGE_OFFSET) ? \ + (u64)mpc_addr_to_virt(addr) : mpc_addr_to_phys(addr)) + +static inline int +boot_mpf_do_checksum(unsigned char *mp, int len) +{ + int sum = 0; + + while (len--) + sum += *mp++; + + return 0x100 - (sum & 0xFF); +} + +#endif /* __KERNEL__ */ + +#if defined(__KERNEL__) || defined(__KVM_MPSPEC_SUPPORT__) +#include +#endif /* __KERNEL__ || __KVM_MPSPEC_SUPPORT__ */ + +#endif /* __ASM_MPSPEC_H */ diff --git a/arch/e2k/include/asm/msgbuf.h b/arch/e2k/include/asm/msgbuf.h new file mode 100644 index 0000000..ffbd1ef --- /dev/null +++ b/arch/e2k/include/asm/msgbuf.h @@ -0,0 +1,27 @@ +#ifndef _E2K_MSGBUF_H_ +#define _E2K_MSGBUF_H_ + +/* + * The msqid64_ds structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + __kernel_time_t msg_rtime; /* last msgrcv time */ + __kernel_time_t msg_ctime; /* last change time */ + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _E2K_MSGBUF_H_ */ diff --git a/arch/e2k/include/asm/msidef.h b/arch/e2k/include/asm/msidef.h new file mode 100644 index 0000000..dba1666 --- /dev/null +++ b/arch/e2k/include/asm/msidef.h @@ -0,0 +1,6 @@ +#ifndef __ASM_MSIDEF_H +#define __ASM_MSIDEF_H + +#include + +#endif diff --git a/arch/e2k/include/asm/mtrr.h b/arch/e2k/include/asm/mtrr.h new file mode 100644 index 0000000..b8f10a3 --- /dev/null +++ b/arch/e2k/include/asm/mtrr.h @@ -0,0 +1,67 @@ +/* Generic MTRR (Memory Type Range Register) ioctls. + + Copyright (C) 1997-1999 Richard Gooch + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with this library; if not, write to the Free + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + Richard Gooch may be reached by email at rgooch@atnf.csiro.au + The postal address is: + Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. +*/ +#ifndef _LINUX_MTRR_H +#define _LINUX_MTRR_H + +#include + + +/* The following functions are for use by other drivers */ +# ifdef CONFIG_MTRR +extern int mtrr_add (unsigned long base, unsigned long size, + unsigned int type, char increment); +extern int mtrr_add_page (unsigned long base, unsigned long size, + unsigned int type, char increment); +extern int mtrr_del (int reg, unsigned long base, unsigned long size); +extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); +# else +static __inline__ int mtrr_add (unsigned long base, unsigned long size, + unsigned int type, char increment) +{ + return -ENODEV; +} +static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, + unsigned int type, char increment) +{ + return -ENODEV; +} +static __inline__ int mtrr_del (int reg, unsigned long base, + unsigned long size) +{ + return -ENODEV; +} +static __inline__ int mtrr_del_page (int reg, unsigned long base, + unsigned long size) +{ + return -ENODEV; +} +# endif + +/* The following functions are for initialisation: don't use them! */ +extern int mtrr_init (void); +# if defined(CONFIG_SMP) && defined(CONFIG_MTRR) +extern void mtrr_init_boot_cpu (void); +extern void mtrr_init_secondary_cpu (void); +# endif + +#endif /* _LINUX_MTRR_H */ diff --git a/arch/e2k/include/asm/namei.h b/arch/e2k/include/asm/namei.h new file mode 100644 index 0000000..eaad75d --- /dev/null +++ b/arch/e2k/include/asm/namei.h @@ -0,0 +1,17 @@ +/* $Id: namei.h,v 1.1 2001/05/16 13:33:16 anonymous Exp $ + * linux/include/asm-e2k/namei.h + * + * Included from linux/fs/namei.c + */ + +#ifndef _E2K_NAMEI_H_ +#define _E2K_NAMEI_H_ + +/* This dummy routine maybe changed to something useful + * for /usr/gnemul/ emulation stuff. + * Look at asm-sparc/namei.h for details. + */ + +#define __emul_prefix() NULL + +#endif /* _E2K_NAMEI_H_ */ diff --git a/arch/e2k/include/asm/native_aau_regs_access.h b/arch/e2k/include/asm/native_aau_regs_access.h new file mode 100644 index 0000000..5094967 --- /dev/null +++ b/arch/e2k/include/asm/native_aau_regs_access.h @@ -0,0 +1,1488 @@ +/* + * Native hardware AAU registers access + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _NATIVE_AAU_REGS_ACCESS_H_ +#define _NATIVE_AAU_REGS_ACCESS_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef DEBUG_AAU_REG_MODE + #define BUG_AAU() BUG_ON(true) +#else /* ! DEBUG_AAU_REG_MODE */ + #define BUG_AAU() +#endif /* DEBUG_AAU_REG_MODE */ + +/* macros to access to native hardware E2K AAU context */ + +#define NATIVE_GET_AAU_AAD(reg_mn, mem_p) \ + NATIVE_GET_AAUQREG(&AWP(mem_p), reg_mn) +#define NATIVE_GET_AAU_AADS(reg1, reg2, reg3, reg4, mem_p) \ + NATIVE_GET_AAUQREGS(&AWP(mem_p), reg1, reg2, reg3, reg4) +#define NATIVE_GET_AAU_AAIND_V2(reg_mnemonic) \ + NATIVE_GET_AAUREG(reg_mnemonic, 2) +#define NATIVE_GET_AAU_AAIND_V5(reg_mnemonic) \ + NATIVE_GET_AAUDREG(reg_mnemonic, 2) +#define NATIVE_GET_AAU_AAINDS_V2(reg1, reg2, val1, val2) \ + NATIVE_GET_AAUREGS(reg1, reg2, val1, val2) +#define NATIVE_GET_AAU_AAINDS_V5(reg1, reg2, val1, val2) \ + NATIVE_GET_AAUDREGS(reg1, reg2, val1, val2) +#define NATIVE_GET_AAU_AAIND_TAG() \ + NATIVE_GET_AAUREG(aaind_tag, 2) +#define NATIVE_GET_AAU_AAINCR_V2(reg_mnemonic) \ + NATIVE_GET_AAUREG(reg_mnemonic, 2) +#define NATIVE_GET_AAU_AAINCR_V5(reg_mnemonic) \ + NATIVE_GET_AAUDREG(reg_mnemonic, 2) +#define NATIVE_GET_AAU_AAINCRS_V2(reg1, reg2, val1, val2) \ + NATIVE_GET_AAUREGS(reg1, reg2, val1, val2) +#define NATIVE_GET_AAU_AAINCRS_V5(reg1, reg2, val1, val2) \ + NATIVE_GET_AAUDREGS(reg1, reg2, val1, val2) +#define NATIVE_GET_AAU_AAINCR_TAG() \ + NATIVE_GET_AAUREG(aaincr_tag, 2) +#define NATIVE_GET_AAU_AASTI_V2(reg_mnemonic) \ + NATIVE_GET_AAUREG(reg_mnemonic, 2) +#define NATIVE_GET_AAU_AASTI_V5(reg_mnemonic) \ + NATIVE_GET_AAUDREG(reg_mnemonic, 2) +#define NATIVE_GET_AAU_AASTIS_V2(reg1, reg2, val1, val2) \ + NATIVE_GET_AAUREGS(reg1, reg2, val1, val2) +#define NATIVE_GET_AAU_AASTIS_V5(reg1, reg2, val1, val2) \ + NATIVE_GET_AAUDREGS(reg1, reg2, val1, val2) +#define NATIVE_GET_AAU_AASTI_TAG() \ + NATIVE_GET_AAUREG(aasti_tag, 2) +#define NATIVE_GET_AAU_AASR() \ + NATIVE_GET_AAUREG(aasr, 2) +#define NATIVE_GET_AAU_AAFSTR() \ + NATIVE_GET_AAUREG(aafstr, 5) +#define NATIVE_GET_AAU_AALDI_V2(reg_mn, lval, rval) \ + NATIVE_GET_AAUREGS(reg_mn, reg_mn, lval, rval) +#define NATIVE_GET_AAU_AALDI_V5(reg_mn, lval, rval) \ + NATIVE_GET_AAUDREGS(reg_mn, reg_mn, lval, rval) +#define NATIVE_GET_AAU_AALDA(reg_mn, lval, rval) \ + NATIVE_GET_AAUREGS(reg_mn, reg_mn, lval, rval) +#define NATIVE_GET_AAU_AALDV(lo, hi) \ + NATIVE_GET_AAUREGS(aaldv, aaldv, lo, hi) +#define NATIVE_GET_AAU_AALDM(lo, hi) \ + NATIVE_GET_AAUREGS(aaldm, aaldm, lo, hi) + +#define NATIVE_SET_AAU_AAD(reg_mn, mem_p) \ + NATIVE_SET_AAUQREG(reg_mn, &AWP(mem_p)) +#define NATIVE_SET_AAU_AADS(reg1, reg2, reg3, reg4, mem_p) \ + NATIVE_SET_AAUQREGS(&AWP(mem_p), reg1, reg2, reg3, reg4) +#define NATIVE_SET_AAU_AAIND(reg_mn, val) \ + NATIVE_SET_AAUDREG(reg_mn, val, 2) +#define NATIVE_SET_AAU_AAINDS(reg1, reg2, val1, val2) \ + NATIVE_SET_AAUDREGS(reg1, reg2, val1, val2) +#define NATIVE_SET_AAU_AAIND_TAG(val) \ + NATIVE_SET_AAUREG(aaind_tag, val, 2) +#define NATIVE_SET_AAU_AAIND_REG_AND_TAGS(reg, reg_tag, val, tags_val) \ + NATIVE_SET_AAUREGS(reg, reg_tag, val, tags_val) +#define NATIVE_SET_AAU_AAIND_AAINCR_TAGS(aaind, aaincr) \ + NATIVE_SET_AAUREGS(aaind_tag, aaincr_tag, (aaind), (aaincr)) +#define NATIVE_SET_AAU_AAINCR(reg_mn, val) \ + NATIVE_SET_AAUDREG(reg_mn, val, 5) +#define NATIVE_SET_AAU_AAINCRS(reg1, reg2, val1, val2) \ + NATIVE_SET_AAUDREGS(reg1, reg2, val1, val2) +#define NATIVE_SET_AAU_AAINCR_TAG(val) \ + NATIVE_SET_AAUREG(aaincr_tag, val, 5) +#define NATIVE_SET_AAU_AAINCR_REG_AND_TAGS(reg, reg_tag, val, tags_val) \ + NATIVE_SET_AAUREGS(reg, reg_tag, val, tags_val) +#define NATIVE_SET_AAU_AASTI(reg_mn, val) \ + NATIVE_SET_AAUDREG(reg_mn, val, 2) +#define NATIVE_SET_AAU_AASTIS(reg1, reg2, val1, val2) \ + NATIVE_SET_AAUDREGS(reg1, reg2, val1, val2) +#define NATIVE_SET_AAU_AASTI_TAG(val) \ + NATIVE_SET_AAUREG(aasti_tag, val, 2) +#define NATIVE_SET_AAU_AASR(val) \ + NATIVE_SET_AAUREG(aasr, val, 2) +#define NATIVE_SET_AAU_AAFSTR(val) \ + NATIVE_SET_AAUREG(aafstr, val, 5) +#define NATIVE_SET_AAU_AALDI(reg_mn, lval, rval) \ + NATIVE_SET_AAUDREGS(reg_mn, reg_mn, lval, rval) +#define NATIVE_SET_AAU_AALDA(reg_mn, lval, rval) \ + NATIVE_SET_AAUREGS(reg_mn, reg_mn, lval, rval) +#define NATIVE_SET_AAU_AALDV(lo, hi) \ + NATIVE_SET_AAUREGS(aaldv, aaldv, lo, hi) +#define NATIVE_SET_AAU_AALDM(lo, hi) \ + NATIVE_SET_AAUREGS(aaldm, aaldm, lo, hi) + +/* + * Native hardware AAU registers access function (can be paravirtualized) + * WARNING: please use only following functions to access to AAU context, + * do not use macroses above directly, because of macroses cannot be + * paravirtualized + */ + +static __always_inline u32 native_read_aasr_reg_value(void) +{ + return NATIVE_GET_AAU_AASR(); +} +static __always_inline void native_write_aasr_reg_value(u32 reg_value) +{ + NATIVE_SET_AAU_AASR(reg_value); +} +static inline u32 native_read_aafstr_reg_value(void) +{ + return NATIVE_GET_AAU_AAFSTR(); +} +static __always_inline void native_write_aafstr_reg_value(u32 reg_value) +{ + NATIVE_SET_AAU_AAFSTR(reg_value); +} + +static __always_inline e2k_aasr_t native_read_aasr_reg(void) +{ + e2k_aasr_t aasr; + + AW(aasr) = native_read_aasr_reg_value(); + return aasr; +} +static __always_inline void native_write_aasr_reg(e2k_aasr_t aasr) +{ + NATIVE_SET_AAU_AASR(AW(aasr)); +} + +static inline u32 native_read_aaind_reg_value_v2(int AAIND_no) +{ + switch (AAIND_no) { + case 0: return NATIVE_GET_AAU_AAIND_V2(aaind0); + case 1: return NATIVE_GET_AAU_AAIND_V2(aaind1); + case 2: return NATIVE_GET_AAU_AAIND_V2(aaind2); + case 3: return NATIVE_GET_AAU_AAIND_V2(aaind3); + case 4: return NATIVE_GET_AAU_AAIND_V2(aaind4); + case 5: return NATIVE_GET_AAU_AAIND_V2(aaind5); + case 6: return NATIVE_GET_AAU_AAIND_V2(aaind6); + case 7: return NATIVE_GET_AAU_AAIND_V2(aaind7); + case 8: return NATIVE_GET_AAU_AAIND_V2(aaind8); + case 9: return NATIVE_GET_AAU_AAIND_V2(aaind9); + case 10: return NATIVE_GET_AAU_AAIND_V2(aaind10); + case 11: return NATIVE_GET_AAU_AAIND_V2(aaind11); + case 12: return NATIVE_GET_AAU_AAIND_V2(aaind12); + case 13: return NATIVE_GET_AAU_AAIND_V2(aaind13); + case 14: return NATIVE_GET_AAU_AAIND_V2(aaind14); + case 15: return NATIVE_GET_AAU_AAIND_V2(aaind15); + default: + BUG_AAU(); + return 0; + } +} + +static inline u64 native_read_aaind_reg_value_v5(int AAIND_no) +{ + switch (AAIND_no) { + case 0: return NATIVE_GET_AAU_AAIND_V5(aaind0); + case 1: return NATIVE_GET_AAU_AAIND_V5(aaind1); + case 2: return NATIVE_GET_AAU_AAIND_V5(aaind2); + case 3: return NATIVE_GET_AAU_AAIND_V5(aaind3); + case 4: return NATIVE_GET_AAU_AAIND_V5(aaind4); + case 5: return NATIVE_GET_AAU_AAIND_V5(aaind5); + case 6: return NATIVE_GET_AAU_AAIND_V5(aaind6); + case 7: return NATIVE_GET_AAU_AAIND_V5(aaind7); + case 8: return NATIVE_GET_AAU_AAIND_V5(aaind8); + case 9: return NATIVE_GET_AAU_AAIND_V5(aaind9); + case 10: return NATIVE_GET_AAU_AAIND_V5(aaind10); + case 11: return NATIVE_GET_AAU_AAIND_V5(aaind11); + case 12: return NATIVE_GET_AAU_AAIND_V5(aaind12); + case 13: return NATIVE_GET_AAU_AAIND_V5(aaind13); + case 14: return NATIVE_GET_AAU_AAIND_V5(aaind14); + case 15: return NATIVE_GET_AAU_AAIND_V5(aaind15); + default: + BUG_AAU(); + return 0; + } +} +static inline void native_write_aaind_reg_value(int AAIND_no, u64 reg_value) +{ + switch (AAIND_no) { + case 0: + NATIVE_SET_AAU_AAIND(aaind0, reg_value); + break; + case 1: + NATIVE_SET_AAU_AAIND(aaind1, reg_value); + break; + case 2: + NATIVE_SET_AAU_AAIND(aaind2, reg_value); + break; + case 3: + NATIVE_SET_AAU_AAIND(aaind3, reg_value); + break; + case 4: + NATIVE_SET_AAU_AAIND(aaind4, reg_value); + break; + case 5: + NATIVE_SET_AAU_AAIND(aaind5, reg_value); + break; + case 6: + NATIVE_SET_AAU_AAIND(aaind6, reg_value); + break; + case 7: + NATIVE_SET_AAU_AAIND(aaind7, reg_value); + break; + case 8: + NATIVE_SET_AAU_AAIND(aaind8, reg_value); + break; + case 9: + NATIVE_SET_AAU_AAIND(aaind9, reg_value); + break; + case 10: + NATIVE_SET_AAU_AAIND(aaind10, reg_value); + break; + case 11: + NATIVE_SET_AAU_AAIND(aaind11, reg_value); + break; + case 12: + NATIVE_SET_AAU_AAIND(aaind12, reg_value); + break; + case 13: + NATIVE_SET_AAU_AAIND(aaind13, reg_value); + break; + case 14: + NATIVE_SET_AAU_AAIND(aaind14, reg_value); + break; + case 15: + NATIVE_SET_AAU_AAIND(aaind15, reg_value); + break; + default: + BUG_AAU(); + } +} + +#define PREFIX_READ_AAINDS_PAIR_VALUE(PV_TYPE, pv_type, ISET, iset, \ + AAINDs_pair, value1, value2) \ +({ \ + switch (AAINDs_pair) { \ + case 0: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind0, aaind1, \ + value1, value2); \ + break; \ + case 1: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind1, aaind2, \ + value1, value2); \ + break; \ + case 2: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind2, aaind3, \ + value1, value2); \ + break; \ + case 3: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind3, aaind4, \ + value1, value2); \ + break; \ + case 4: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind4, aaind5, \ + value1, value2); \ + break; \ + case 5: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind5, aaind6, \ + value1, value2); \ + break; \ + case 6: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind6, aaind7, \ + value1, value2); \ + break; \ + case 7: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind7, aaind8, \ + value1, value2); \ + break; \ + case 8: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind8, aaind9, \ + value1, value2); \ + break; \ + case 9: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind9, aaind10, \ + value1, value2); \ + break; \ + case 10: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind10, aaind11, \ + value1, value2); \ + break; \ + case 11: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind11, aaind12, \ + value1, value2); \ + break; \ + case 12: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind12, aaind13, \ + value1, value2); \ + break; \ + case 13: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind13, aaind14, \ + value1, value2); \ + break; \ + case 14: \ + PV_TYPE##_GET_AAU_AAINDS_##ISET(aaind14, aaind15, \ + value1, value2); \ + break; \ + case 15: \ + value1 = PV_TYPE##_GET_AAU_AAIND_##ISET(aaind15); \ + value2 = PV_TYPE##_GET_AAU_AAIND_TAG(); \ + break; \ + default: \ + BUG_AAU(); \ + value1 = 0; \ + value2 = 0; \ + } \ +}) +#define NATIVE_READ_AAINDS_PAIR_VALUE_V2(AAINDs_pair, lo_value, hi_value) \ + PREFIX_READ_AAINDS_PAIR_VALUE(NATIVE, native, V2, v2, \ + AAINDs_pair, lo_value, hi_value) +#define NATIVE_READ_AAINDS_PAIR_VALUE_V5(AAINDs_pair, lo_value, hi_value) \ + PREFIX_READ_AAINDS_PAIR_VALUE(NATIVE, native, V5, v5, \ + AAINDs_pair, lo_value, hi_value) +#define PREFIX_READ_AAIND_REG15_AND_TAGS_VALUE_V2(PV_TYPE, pv_type, \ + reg_value, tags_value) \ +({ \ + PV_TYPE##_GET_AAU_AAINDS_V2(aaind15, aaind_tag, \ + reg_value, tags_value); \ +}) +#define NATIVE_READ_AAIND_REG15_AND_TAGS_VALUE_V2(reg_value, tags_value) \ + PREFIX_READ_AAIND_REG15_AND_TAGS_VALUE_V2(NATIVE, native, \ + reg_value, tags_value) +#define PREFIX_READ_AAIND_REG15_AND_TAGS_VALUE_V5(PV_TYPE, pv_type, \ + reg_value, tags_value) \ +({ \ + reg_value = PV_TYPE##_GET_AAU_AAIND_V5(aaind15); \ + tags_value = PV_TYPE##_GET_AAU_AAIND_TAG(); \ +}) +#define NATIVE_READ_AAIND_REG15_AND_TAGS_VALUE_V5(reg_value, tags_value) \ + PREFIX_READ_AAIND_REG15_AND_TAGS_VALUE_V5(NATIVE, native, \ + reg_value, tags_value) + +static __always_inline void +native_write_aainds_pair_value(int AAINDs_pair, u64 lo_value, u64 hi_value) +{ + switch (AAINDs_pair) { + case 0: + NATIVE_SET_AAU_AAINDS(aaind0, aaind1, lo_value, hi_value); + break; + case 1: + NATIVE_SET_AAU_AAINDS(aaind1, aaind2, lo_value, hi_value); + break; + case 2: + NATIVE_SET_AAU_AAINDS(aaind2, aaind3, lo_value, hi_value); + break; + case 3: + NATIVE_SET_AAU_AAINDS(aaind3, aaind4, lo_value, hi_value); + break; + case 4: + NATIVE_SET_AAU_AAINDS(aaind4, aaind5, lo_value, hi_value); + break; + case 5: + NATIVE_SET_AAU_AAINDS(aaind5, aaind6, lo_value, hi_value); + break; + case 6: + NATIVE_SET_AAU_AAINDS(aaind6, aaind7, lo_value, hi_value); + break; + case 7: + NATIVE_SET_AAU_AAINDS(aaind7, aaind8, lo_value, hi_value); + break; + case 8: + NATIVE_SET_AAU_AAINDS(aaind8, aaind9, lo_value, hi_value); + break; + case 9: + NATIVE_SET_AAU_AAINDS(aaind9, aaind10, lo_value, hi_value); + break; + case 10: + NATIVE_SET_AAU_AAINDS(aaind10, aaind11, lo_value, hi_value); + break; + case 11: + NATIVE_SET_AAU_AAINDS(aaind11, aaind12, lo_value, hi_value); + break; + case 12: + NATIVE_SET_AAU_AAINDS(aaind12, aaind13, lo_value, hi_value); + break; + case 13: + NATIVE_SET_AAU_AAINDS(aaind13, aaind14, lo_value, hi_value); + break; + case 14: + NATIVE_SET_AAU_AAINDS(aaind14, aaind15, lo_value, hi_value); + break; + default: + BUG_AAU(); + } +} + +static inline u32 native_read_aaind_tags_reg_value(void) +{ + return NATIVE_GET_AAU_AAIND_TAG(); +} +static inline void native_write_aaind_tags_reg_value(u32 reg_value) +{ + NATIVE_SET_AAU_AAIND_TAG(reg_value); +} +static inline u32 native_read_aaincr_reg_value_v2(int AAINCR_no) +{ + switch (AAINCR_no) { + case 0: return NATIVE_GET_AAU_AAINCR_V2(aaincr0); + case 1: return NATIVE_GET_AAU_AAINCR_V2(aaincr1); + case 2: return NATIVE_GET_AAU_AAINCR_V2(aaincr2); + case 3: return NATIVE_GET_AAU_AAINCR_V2(aaincr3); + case 4: return NATIVE_GET_AAU_AAINCR_V2(aaincr4); + case 5: return NATIVE_GET_AAU_AAINCR_V2(aaincr5); + case 6: return NATIVE_GET_AAU_AAINCR_V2(aaincr6); + case 7: return NATIVE_GET_AAU_AAINCR_V2(aaincr7); + default: + BUG_AAU(); + return 0; + } +} +static inline u64 native_read_aaincr_reg_value_v5(int AAINCR_no) +{ + switch (AAINCR_no) { + case 0: return NATIVE_GET_AAU_AAINCR_V5(aaincr0); + case 1: return NATIVE_GET_AAU_AAINCR_V5(aaincr1); + case 2: return NATIVE_GET_AAU_AAINCR_V5(aaincr2); + case 3: return NATIVE_GET_AAU_AAINCR_V5(aaincr3); + case 4: return NATIVE_GET_AAU_AAINCR_V5(aaincr4); + case 5: return NATIVE_GET_AAU_AAINCR_V5(aaincr5); + case 6: return NATIVE_GET_AAU_AAINCR_V5(aaincr6); + case 7: return NATIVE_GET_AAU_AAINCR_V5(aaincr7); + default: + BUG_AAU(); + return 0; + } +} +static inline void native_write_aaincr_reg_value(int AAINCR_no, u64 reg_value) +{ + switch (AAINCR_no) { + case 0: + NATIVE_SET_AAU_AAINCR(aaincr0, reg_value); + break; + case 1: + NATIVE_SET_AAU_AAINCR(aaincr1, reg_value); + break; + case 2: + NATIVE_SET_AAU_AAINCR(aaincr2, reg_value); + break; + case 3: + NATIVE_SET_AAU_AAINCR(aaincr3, reg_value); + break; + case 4: + NATIVE_SET_AAU_AAINCR(aaincr4, reg_value); + break; + case 5: + NATIVE_SET_AAU_AAINCR(aaincr5, reg_value); + break; + case 6: + NATIVE_SET_AAU_AAINCR(aaincr6, reg_value); + break; + case 7: + NATIVE_SET_AAU_AAINCR(aaincr7, reg_value); + break; + default: + BUG_AAU(); + } +} +static inline u32 native_read_aaincr_tags_reg_value(void) +{ + return NATIVE_GET_AAU_AAINCR_TAG(); +} +static inline void native_write_aaincr_tags_reg_value(u32 reg_value) +{ + NATIVE_SET_AAU_AAINCR_TAG(reg_value); +} + +#define PREFIX_READ_AAINCRS_PAIR_VALUE(PV_TYPE, pv_type, ISET, iset, \ + AAINCRs_pair, value1, value2) \ +({ \ + switch (AAINCRs_pair) { \ + case 0: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr0, aaincr1, \ + value1, value2); \ + break; \ + case 1: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr1, aaincr2, \ + value1, value2); \ + break; \ + case 2: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr2, aaincr3, \ + value1, value2); \ + break; \ + case 3: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr3, aaincr4, \ + value1, value2); \ + break; \ + case 4: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr4, aaincr5, \ + value1, value2); \ + break; \ + case 5: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr5, aaincr6, \ + value1, value2); \ + break; \ + case 6: \ + PV_TYPE##_GET_AAU_AAINCRS_##ISET(aaincr6, aaincr7, \ + value1, value2); \ + break; \ + case 7: \ + value1 = PV_TYPE##_GET_AAU_AAINCR_##ISET(aaind15); \ + value2 = PV_TYPE##_GET_AAU_AAINCR_TAG(); \ + break; \ + default: \ + BUG_AAU(); \ + value1 = 0; \ + value2 = 0; \ + } \ +}) +#define NATIVE_READ_AAINCRS_PAIR_VALUE_V2(AAINCRs_pair, lo_value, hi_value) \ + PREFIX_READ_AAINCRS_PAIR_VALUE(NATIVE, native, V2, v2, \ + AAINCRs_pair, lo_value, hi_value) +#define NATIVE_READ_AAINCRS_PAIR_VALUE_V5(AAINCRs_pair, lo_value, hi_value) \ + PREFIX_READ_AAINCRS_PAIR_VALUE(NATIVE, native, V5, v5, \ + AAINCRs_pair, lo_value, hi_value) +#define PREFIX_READ_AAINCR_REG7_AND_TAGS_VALUE_V2(PV_TYPE, pv_type, \ + reg_value, tags_value) \ +({ \ + PV_TYPE##_GET_AAU_AAINCRS_V2(aaincr7, aaincr_tag, \ + reg_value, tags_value); \ +}) +#define NATIVE_READ_AAINCR_REG7_AND_TAGS_VALUE_V2(reg_value, tags_value) \ + PREFIX_READ_AAINCR_REG7_AND_TAGS_VALUE_V2(NATIVE, native, \ + reg_value, tags_value) +#define PREFIX_READ_AAINCR_REG7_AND_TAGS_VALUE_V5(PV_TYPE, pv_type, \ + reg_value, tags_value) \ +({ \ + reg_value = PV_TYPE##_GET_AAU_AAINCR_V5(aaincr7); \ + tags_value = PV_TYPE##_GET_AAU_AAINCR_TAG(); \ +}) +#define NATIVE_READ_AAINCR_REG7_AND_TAGS_VALUE_V5(reg_value, tags_value) \ + PREFIX_READ_AAINCR_REG7_AND_TAGS_VALUE_V5(NATIVE, native, \ + reg_value, tags_value) + +static __always_inline void +native_write_aaincrs_pair_value(int AAINCRs_pair, u64 lo_value, u64 hi_value) +{ + switch (AAINCRs_pair) { + case 0: + NATIVE_SET_AAU_AAINCRS(aaincr0, aaincr1, lo_value, hi_value); + break; + case 1: + NATIVE_SET_AAU_AAINCRS(aaincr1, aaincr2, lo_value, hi_value); + break; + case 2: + NATIVE_SET_AAU_AAINCRS(aaincr2, aaincr3, lo_value, hi_value); + break; + case 3: + NATIVE_SET_AAU_AAINCRS(aaincr3, aaincr4, lo_value, hi_value); + break; + case 4: + NATIVE_SET_AAU_AAINCRS(aaincr4, aaincr5, lo_value, hi_value); + break; + case 5: + NATIVE_SET_AAU_AAINCRS(aaincr5, aaincr6, lo_value, hi_value); + break; + case 6: + NATIVE_SET_AAU_AAINCRS(aaincr6, aaincr7, lo_value, hi_value); + break; + default: + BUG_AAU(); + } +} + +static inline u32 native_read_aasti_reg_value_v2(int AASTI_no) +{ + switch (AASTI_no) { + case 0: return NATIVE_GET_AAU_AASTI_V2(aasti0); + case 1: return NATIVE_GET_AAU_AASTI_V2(aasti1); + case 2: return NATIVE_GET_AAU_AASTI_V2(aasti2); + case 3: return NATIVE_GET_AAU_AASTI_V2(aasti3); + case 4: return NATIVE_GET_AAU_AASTI_V2(aasti4); + case 5: return NATIVE_GET_AAU_AASTI_V2(aasti5); + case 6: return NATIVE_GET_AAU_AASTI_V2(aasti6); + case 7: return NATIVE_GET_AAU_AASTI_V2(aasti7); + case 8: return NATIVE_GET_AAU_AASTI_V2(aasti8); + case 9: return NATIVE_GET_AAU_AASTI_V2(aasti9); + case 10: return NATIVE_GET_AAU_AASTI_V2(aasti10); + case 11: return NATIVE_GET_AAU_AASTI_V2(aasti11); + case 12: return NATIVE_GET_AAU_AASTI_V2(aasti12); + case 13: return NATIVE_GET_AAU_AASTI_V2(aasti13); + case 14: return NATIVE_GET_AAU_AASTI_V2(aasti14); + case 15: return NATIVE_GET_AAU_AASTI_V2(aasti15); + default: + BUG_AAU(); + return 0; + } +} +static inline u64 native_read_aasti_reg_value_v5(int AASTI_no) +{ + switch (AASTI_no) { + case 0: return NATIVE_GET_AAU_AASTI_V5(aasti0); + case 1: return NATIVE_GET_AAU_AASTI_V5(aasti1); + case 2: return NATIVE_GET_AAU_AASTI_V5(aasti2); + case 3: return NATIVE_GET_AAU_AASTI_V5(aasti3); + case 4: return NATIVE_GET_AAU_AASTI_V5(aasti4); + case 5: return NATIVE_GET_AAU_AASTI_V5(aasti5); + case 6: return NATIVE_GET_AAU_AASTI_V5(aasti6); + case 7: return NATIVE_GET_AAU_AASTI_V5(aasti7); + case 8: return NATIVE_GET_AAU_AASTI_V5(aasti8); + case 9: return NATIVE_GET_AAU_AASTI_V5(aasti9); + case 10: return NATIVE_GET_AAU_AASTI_V5(aasti10); + case 11: return NATIVE_GET_AAU_AASTI_V5(aasti11); + case 12: return NATIVE_GET_AAU_AASTI_V5(aasti12); + case 13: return NATIVE_GET_AAU_AASTI_V5(aasti13); + case 14: return NATIVE_GET_AAU_AASTI_V5(aasti14); + case 15: return NATIVE_GET_AAU_AASTI_V5(aasti15); + default: + BUG_AAU(); + return 0; + } +} +static inline void native_write_aasti_reg_value(int AASTI_no, u64 reg_value) +{ + switch (AASTI_no) { + case 0: + NATIVE_SET_AAU_AASTI(aasti0, reg_value); + break; + case 1: + NATIVE_SET_AAU_AASTI(aasti1, reg_value); + break; + case 2: + NATIVE_SET_AAU_AASTI(aasti2, reg_value); + break; + case 3: + NATIVE_SET_AAU_AASTI(aasti3, reg_value); + break; + case 4: + NATIVE_SET_AAU_AASTI(aasti4, reg_value); + break; + case 5: + NATIVE_SET_AAU_AASTI(aasti5, reg_value); + break; + case 6: + NATIVE_SET_AAU_AASTI(aasti6, reg_value); + break; + case 7: + NATIVE_SET_AAU_AASTI(aasti7, reg_value); + break; + case 8: + NATIVE_SET_AAU_AASTI(aasti8, reg_value); + break; + case 9: + NATIVE_SET_AAU_AASTI(aasti9, reg_value); + break; + case 10: + NATIVE_SET_AAU_AASTI(aasti10, reg_value); + break; + case 11: + NATIVE_SET_AAU_AASTI(aasti11, reg_value); + break; + case 12: + NATIVE_SET_AAU_AASTI(aasti12, reg_value); + break; + case 13: + NATIVE_SET_AAU_AASTI(aasti13, reg_value); + break; + case 14: + NATIVE_SET_AAU_AASTI(aasti14, reg_value); + break; + case 15: + NATIVE_SET_AAU_AASTI(aasti15, reg_value); + break; + default: + BUG_AAU(); + } +} +static inline u32 native_read_aasti_tags_reg_value(void) +{ + return NATIVE_GET_AAU_AASTI_TAG(); +} +static inline void native_write_aasti_tags_reg_value(u32 reg_value) +{ + NATIVE_SET_AAU_AASTI_TAG(reg_value); +} + +#define PREFIX_READ_AASTIS_PAIR_VALUE(PV_TYPE, pv_type, ISET, iset, \ + AASTIs_pair, value1, value2) \ +({ \ + switch (AASTIs_pair) { \ + case 0: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti0, aasti1, \ + value1, value2); \ + break; \ + case 1: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti1, aasti2, \ + value1, value2); \ + break; \ + case 2: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti2, aasti3, \ + value1, value2); \ + break; \ + case 3: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti3, aasti4, \ + value1, value2);\ + break; \ + case 4: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti4, aasti5, \ + value1, value2); \ + break; \ + case 5: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti5, aasti6, \ + value1, value2); \ + break; \ + case 6: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti6, aasti7, \ + value1, value2); \ + break; \ + case 7: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti7, aasti8, \ + value1, value2); \ + break; \ + case 8: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti8, aasti9, \ + value1, value2); \ + break; \ + case 9: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti9, aasti10, \ + value1, value2); \ + break; \ + case 10: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti10, aasti11, \ + value1, value2); \ + break; \ + case 11: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti11, aasti12, \ + value1, value2); \ + break; \ + case 12: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti12, aasti13, \ + value1, value2); \ + break; \ + case 13: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti13, aasti14, \ + value1, value2); \ + break; \ + case 14: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti14, aasti15, \ + value1, value2); \ + break; \ + case 15: \ + PV_TYPE##_GET_AAU_AASTIS_##ISET(aasti15, aasti_tag, \ + value1, value2); \ + break; \ + default: \ + BUG_AAU(); \ + value1 = 0; \ + value2 = 0; \ + } \ +}) +#define NATIVE_READ_AASTIS_PAIR_VALUE_V2(AASTIs_pair, lo_value, hi_value) \ + PREFIX_READ_AASTIS_PAIR_VALUE(NATIVE, native, V2, v2, \ + AASTIs_pair, lo_value, hi_value) +#define NATIVE_READ_AASTIS_PAIR_VALUE_V5(AASTIs_pair, lo_value, hi_value) \ + PREFIX_READ_AASTIS_PAIR_VALUE(NATIVE, native, V5, v5, \ + AASTIs_pair, lo_value, hi_value) + +static __always_inline void +native_write_aastis_pair_value(int AASTIs_pair, u64 lo_value, u64 hi_value) +{ + switch (AASTIs_pair) { + case 0: + NATIVE_SET_AAU_AASTIS(aasti0, aasti1, lo_value, hi_value); + break; + case 1: + NATIVE_SET_AAU_AASTIS(aasti1, aasti2, lo_value, hi_value); + break; + case 2: + NATIVE_SET_AAU_AASTIS(aasti2, aasti3, lo_value, hi_value); + break; + case 3: + NATIVE_SET_AAU_AASTIS(aasti3, aasti4, lo_value, hi_value); + break; + case 4: + NATIVE_SET_AAU_AASTIS(aasti4, aasti5, lo_value, hi_value); + break; + case 5: + NATIVE_SET_AAU_AASTIS(aasti5, aasti6, lo_value, hi_value); + break; + case 6: + NATIVE_SET_AAU_AASTIS(aasti6, aasti7, lo_value, hi_value); + break; + case 7: + NATIVE_SET_AAU_AASTIS(aasti7, aasti8, lo_value, hi_value); + break; + case 8: + NATIVE_SET_AAU_AASTIS(aasti8, aasti9, lo_value, hi_value); + break; + case 9: + NATIVE_SET_AAU_AASTIS(aasti9, aasti10, lo_value, hi_value); + break; + case 10: + NATIVE_SET_AAU_AASTIS(aasti10, aasti11, lo_value, hi_value); + break; + case 11: + NATIVE_SET_AAU_AASTIS(aasti11, aasti12, lo_value, hi_value); + break; + case 12: + NATIVE_SET_AAU_AASTIS(aasti12, aasti13, lo_value, hi_value); + break; + case 13: + NATIVE_SET_AAU_AASTIS(aasti13, aasti14, lo_value, hi_value); + break; + case 14: + NATIVE_SET_AAU_AASTIS(aasti14, aasti15, lo_value, hi_value); + break; + case 15: + NATIVE_SET_AAU_AASTIS(aasti15, aasti_tag, lo_value, hi_value); + break; + default: + BUG_AAU(); + } +} + +#define PREFIX_READ_AALDI_REG_VALUE(PV_TYPE, pv_type, ISET, iset, \ + AALDI_no, value1, value2) \ +({ \ + switch (AALDI_no) { \ + case 0: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi0, value1, value2); \ + break; \ + case 1: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi1, value1, value2); \ + break; \ + case 2: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi2, value1, value2); \ + break; \ + case 3: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi3, value1, value2); \ + break; \ + case 4: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi4, value1, value2); \ + break; \ + case 5: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi5, value1, value2); \ + break; \ + case 6: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi6, value1, value2); \ + break; \ + case 7: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi7, value1, value2); \ + break; \ + case 8: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi8, value1, value2); \ + break; \ + case 9: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi9, value1, value2); \ + break; \ + case 10: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi10, value1, value2); \ + break; \ + case 11: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi11, value1, value2); \ + break; \ + case 12: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi12, value1, value2); \ + break; \ + case 13: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi13, value1, value2); \ + break; \ + case 14: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi14, value1, value2); \ + break; \ + case 15: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi15, value1, value2); \ + break; \ + case 16: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi16, value1, value2); \ + break; \ + case 17: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi17, value1, value2); \ + break; \ + case 18: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi18, value1, value2); \ + break; \ + case 19: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi19, value1, value2); \ + break; \ + case 20: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi20, value1, value2); \ + break; \ + case 21: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi21, value1, value2); \ + break; \ + case 22: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi22, value1, value2); \ + break; \ + case 23: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi23, value1, value2); \ + break; \ + case 24: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi24, value1, value2); \ + break; \ + case 25: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi25, value1, value2); \ + break; \ + case 26: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi26, value1, value2); \ + break; \ + case 27: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi27, value1, value2); \ + break; \ + case 28: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi28, value1, value2); \ + break; \ + case 29: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi29, value1, value2); \ + break; \ + case 30: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi30, value1, value2); \ + break; \ + case 31: \ + PV_TYPE##_GET_AAU_AALDI_##ISET(aaldi31, value1, value2); \ + break; \ + default: \ + BUG_AAU(); \ + value1 = 0; \ + value2 = 0; \ + } \ +}) +#define NATIVE_READ_AALDI_REG_VALUE_V2(AALDI_no, value1, value2) \ + PREFIX_READ_AALDI_REG_VALUE(NATIVE, native, V2, v2, \ + AALDI_no, value1, value2) +#define NATIVE_READ_AALDI_REG_VALUE_V5(AALDI_no, value1, value2) \ + PREFIX_READ_AALDI_REG_VALUE(NATIVE, native, V5, v5, \ + AALDI_no, value1, value2) + +static inline void +native_read_aaldi_reg_value_v2(int AALDI_no, u64 *l_value, u64 *r_value) +{ + u32 value1, value2; + + NATIVE_READ_AALDI_REG_VALUE_V2(AALDI_no, value1, value2); + *l_value = value1; + *r_value = value2; +} + +static inline void +native_read_aaldi_reg_value_v5(int AALDI_no, u64 *l_value, u64 *r_value) +{ + u64 value1, value2; + + NATIVE_READ_AALDI_REG_VALUE_V5(AALDI_no, value1, value2); + *l_value = value1; + *r_value = value2; +} + +static inline void +native_write_aaldi_reg_value(int AALDI_no, u64 l_value, u64 r_value) +{ + switch (AALDI_no) { + case 0: + NATIVE_SET_AAU_AALDI(aaldi0, l_value, r_value); + break; + case 1: + NATIVE_SET_AAU_AALDI(aaldi1, l_value, r_value); + break; + case 2: + NATIVE_SET_AAU_AALDI(aaldi2, l_value, r_value); + break; + case 3: + NATIVE_SET_AAU_AALDI(aaldi3, l_value, r_value); + break; + case 4: + NATIVE_SET_AAU_AALDI(aaldi4, l_value, r_value); + break; + case 5: + NATIVE_SET_AAU_AALDI(aaldi5, l_value, r_value); + break; + case 6: + NATIVE_SET_AAU_AALDI(aaldi6, l_value, r_value); + break; + case 7: + NATIVE_SET_AAU_AALDI(aaldi7, l_value, r_value); + break; + case 8: + NATIVE_SET_AAU_AALDI(aaldi8, l_value, r_value); + break; + case 9: + NATIVE_SET_AAU_AALDI(aaldi9, l_value, r_value); + break; + case 10: + NATIVE_SET_AAU_AALDI(aaldi10, l_value, r_value); + break; + case 11: + NATIVE_SET_AAU_AALDI(aaldi11, l_value, r_value); + break; + case 12: + NATIVE_SET_AAU_AALDI(aaldi12, l_value, r_value); + break; + case 13: + NATIVE_SET_AAU_AALDI(aaldi13, l_value, r_value); + break; + case 14: + NATIVE_SET_AAU_AALDI(aaldi14, l_value, r_value); + break; + case 15: + NATIVE_SET_AAU_AALDI(aaldi15, l_value, r_value); + break; + case 16: + NATIVE_SET_AAU_AALDI(aaldi16, l_value, r_value); + break; + case 17: + NATIVE_SET_AAU_AALDI(aaldi17, l_value, r_value); + break; + case 18: + NATIVE_SET_AAU_AALDI(aaldi18, l_value, r_value); + break; + case 19: + NATIVE_SET_AAU_AALDI(aaldi19, l_value, r_value); + break; + case 20: + NATIVE_SET_AAU_AALDI(aaldi20, l_value, r_value); + break; + case 21: + NATIVE_SET_AAU_AALDI(aaldi21, l_value, r_value); + break; + case 22: + NATIVE_SET_AAU_AALDI(aaldi22, l_value, r_value); + break; + case 23: + NATIVE_SET_AAU_AALDI(aaldi23, l_value, r_value); + break; + case 24: + NATIVE_SET_AAU_AALDI(aaldi24, l_value, r_value); + break; + case 25: + NATIVE_SET_AAU_AALDI(aaldi25, l_value, r_value); + break; + case 26: + NATIVE_SET_AAU_AALDI(aaldi26, l_value, r_value); + break; + case 27: + NATIVE_SET_AAU_AALDI(aaldi27, l_value, r_value); + break; + case 28: + NATIVE_SET_AAU_AALDI(aaldi28, l_value, r_value); + break; + case 29: + NATIVE_SET_AAU_AALDI(aaldi29, l_value, r_value); + break; + case 30: + NATIVE_SET_AAU_AALDI(aaldi30, l_value, r_value); + break; + case 31: + NATIVE_SET_AAU_AALDI(aaldi31, l_value, r_value); + break; + default: + BUG_AAU(); + l_value = 0; + r_value = 0; + } +} + +#define PREFIX_READ_AALDAS_REG_VALUE(PV_TYPE, pv_type, \ + AALDAs_no, value1, value2) \ +({ \ + switch (AALDAs_no) { \ + case 0: \ + PV_TYPE##_GET_AAU_AALDA(aalda0, value1, value2); \ + break; \ + case 4: \ + PV_TYPE##_GET_AAU_AALDA(aalda4, value1, value2); \ + break; \ + case 8: \ + PV_TYPE##_GET_AAU_AALDA(aalda8, value1, value2); \ + break; \ + case 12: \ + PV_TYPE##_GET_AAU_AALDA(aalda12, value1, value2); \ + break; \ + case 16: \ + PV_TYPE##_GET_AAU_AALDA(aalda16, value1, value2); \ + break; \ + case 20: \ + PV_TYPE##_GET_AAU_AALDA(aalda20, value1, value2); \ + break; \ + case 24: \ + PV_TYPE##_GET_AAU_AALDA(aalda24, value1, value2); \ + break; \ + case 28: \ + PV_TYPE##_GET_AAU_AALDA(aalda28, value1, value2); \ + break; \ + default: \ + BUG_AAU(); \ + value1 = 0; \ + value2 = 0; \ + } \ +}) +#define NATIVE_READ_AALDAS_REG_VALUE(AALDAs_no, value1, value2) \ + PREFIX_READ_AALDAS_REG_VALUE(NATIVE, native, \ + AALDAs_no, value1, value2) + +static inline void +native_read_aaldas_reg_value(int AALDAs_no, u32 *l_value, u32 *r_value) +{ + u32 value1, value2; + + NATIVE_READ_AALDAS_REG_VALUE(AALDAs_no, value1, value2); + *l_value = value1; + *r_value = value2; +} + +static inline void +native_write_aaldas_reg_value(int AALDAs_no, u32 l_value, u32 r_value) +{ + switch (AALDAs_no) { + case 0: + NATIVE_SET_AAU_AALDA(aalda0, l_value, r_value); + break; + case 4: + NATIVE_SET_AAU_AALDA(aalda4, l_value, r_value); + break; + case 8: + NATIVE_SET_AAU_AALDA(aalda8, l_value, r_value); + break; + case 12: + NATIVE_SET_AAU_AALDA(aalda12, l_value, r_value); + break; + case 16: + NATIVE_SET_AAU_AALDA(aalda16, l_value, r_value); + break; + case 20: + NATIVE_SET_AAU_AALDA(aalda20, l_value, r_value); + break; + case 24: + NATIVE_SET_AAU_AALDA(aalda24, l_value, r_value); + break; + case 28: + NATIVE_SET_AAU_AALDA(aalda28, l_value, r_value); + break; + default: + BUG_AAU(); + l_value = 0; + r_value = 0; + } +} +static inline void native_read_aaldm_reg_value(u32 *lo_value, u32 *hi_value) +{ + u32 value1, value2; + + NATIVE_GET_AAU_AALDM(value1, value2); + *lo_value = value1; + *hi_value = value2; +} +static __always_inline void native_write_aaldm_reg_value(u32 lo_value, + u32 hi_value) +{ + NATIVE_SET_AAU_AALDM(lo_value, hi_value); +} +static inline void native_read_aaldm_reg(e2k_aaldm_t *aaldm) +{ + native_read_aaldm_reg_value(&aaldm->lo, &aaldm->hi); +} +static __always_inline void native_write_aaldm_reg(e2k_aaldm_t *aaldm) +{ + native_write_aaldm_reg_value(aaldm->lo, aaldm->hi); +} +static inline void native_read_aaldv_reg_value(u32 *lo_value, u32 *hi_value) +{ + u32 value1, value2; + + NATIVE_GET_AAU_AALDV(value1, value2); + *lo_value = value1; + *hi_value = value2; +} +static __always_inline void native_write_aaldv_reg_value(u32 lo_value, + u32 hi_value) +{ + NATIVE_SET_AAU_AALDV(lo_value, hi_value); +} +static inline void native_read_aaldv_reg(e2k_aaldv_t *aaldv) +{ + native_read_aaldv_reg_value(&aaldv->lo, &aaldv->hi); +} +static __always_inline void native_write_aaldv_reg(e2k_aaldv_t *aaldv) +{ + native_write_aaldv_reg_value(aaldv->lo, aaldv->hi); +} + +static inline void native_read_aad_reg(int AAD_no, e2k_aadj_t *mem_p) +{ + switch (AAD_no) { + case 0: + NATIVE_GET_AAU_AAD(aadr0, mem_p); + break; + case 1: + NATIVE_GET_AAU_AAD(aadr1, mem_p); + break; + case 2: + NATIVE_GET_AAU_AAD(aadr2, mem_p); + break; + case 3: + NATIVE_GET_AAU_AAD(aadr3, mem_p); + break; + case 4: + NATIVE_GET_AAU_AAD(aadr4, mem_p); + break; + case 5: + NATIVE_GET_AAU_AAD(aadr5, mem_p); + break; + case 6: + NATIVE_GET_AAU_AAD(aadr6, mem_p); + break; + case 7: + NATIVE_GET_AAU_AAD(aadr7, mem_p); + break; + case 8: + NATIVE_GET_AAU_AAD(aadr8, mem_p); + break; + case 9: + NATIVE_GET_AAU_AAD(aadr9, mem_p); + break; + case 10: + NATIVE_GET_AAU_AAD(aadr10, mem_p); + break; + case 11: + NATIVE_GET_AAU_AAD(aadr11, mem_p); + break; + case 12: + NATIVE_GET_AAU_AAD(aadr12, mem_p); + break; + case 13: + NATIVE_GET_AAU_AAD(aadr13, mem_p); + break; + case 14: + NATIVE_GET_AAU_AAD(aadr14, mem_p); + break; + case 15: + NATIVE_GET_AAU_AAD(aadr15, mem_p); + break; + case 16: + NATIVE_GET_AAU_AAD(aadr16, mem_p); + break; + case 17: + NATIVE_GET_AAU_AAD(aadr17, mem_p); + break; + case 18: + NATIVE_GET_AAU_AAD(aadr18, mem_p); + break; + case 19: + NATIVE_GET_AAU_AAD(aadr19, mem_p); + break; + case 20: + NATIVE_GET_AAU_AAD(aadr20, mem_p); + break; + case 21: + NATIVE_GET_AAU_AAD(aadr21, mem_p); + break; + case 22: + NATIVE_GET_AAU_AAD(aadr22, mem_p); + break; + case 23: + NATIVE_GET_AAU_AAD(aadr23, mem_p); + break; + case 24: + NATIVE_GET_AAU_AAD(aadr24, mem_p); + break; + case 25: + NATIVE_GET_AAU_AAD(aadr25, mem_p); + break; + case 26: + NATIVE_GET_AAU_AAD(aadr26, mem_p); + break; + case 27: + NATIVE_GET_AAU_AAD(aadr27, mem_p); + break; + case 28: + NATIVE_GET_AAU_AAD(aadr28, mem_p); + break; + case 29: + NATIVE_GET_AAU_AAD(aadr29, mem_p); + break; + case 30: + NATIVE_GET_AAU_AAD(aadr30, mem_p); + break; + case 31: + NATIVE_GET_AAU_AAD(aadr31, mem_p); + break; + default: + BUG_AAU(); + } +} + +static inline void native_write_aad_reg(int AAD_no, e2k_aadj_t *mem_p) +{ + switch (AAD_no) { + case 0: + NATIVE_SET_AAU_AAD(aadr0, mem_p); + break; + case 1: + NATIVE_SET_AAU_AAD(aadr1, mem_p); + break; + case 2: + NATIVE_SET_AAU_AAD(aadr2, mem_p); + break; + case 3: + NATIVE_SET_AAU_AAD(aadr3, mem_p); + break; + case 4: + NATIVE_SET_AAU_AAD(aadr4, mem_p); + break; + case 5: + NATIVE_SET_AAU_AAD(aadr5, mem_p); + break; + case 6: + NATIVE_SET_AAU_AAD(aadr6, mem_p); + break; + case 7: + NATIVE_SET_AAU_AAD(aadr7, mem_p); + break; + case 8: + NATIVE_SET_AAU_AAD(aadr8, mem_p); + break; + case 9: + NATIVE_SET_AAU_AAD(aadr9, mem_p); + break; + case 10: + NATIVE_SET_AAU_AAD(aadr10, mem_p); + break; + case 11: + NATIVE_SET_AAU_AAD(aadr11, mem_p); + break; + case 12: + NATIVE_SET_AAU_AAD(aadr12, mem_p); + break; + case 13: + NATIVE_SET_AAU_AAD(aadr13, mem_p); + break; + case 14: + NATIVE_SET_AAU_AAD(aadr14, mem_p); + break; + case 15: + NATIVE_SET_AAU_AAD(aadr15, mem_p); + break; + case 16: + NATIVE_SET_AAU_AAD(aadr16, mem_p); + break; + case 17: + NATIVE_SET_AAU_AAD(aadr17, mem_p); + break; + case 18: + NATIVE_SET_AAU_AAD(aadr18, mem_p); + break; + case 19: + NATIVE_SET_AAU_AAD(aadr19, mem_p); + break; + case 20: + NATIVE_SET_AAU_AAD(aadr20, mem_p); + break; + case 21: + NATIVE_SET_AAU_AAD(aadr21, mem_p); + break; + case 22: + NATIVE_SET_AAU_AAD(aadr22, mem_p); + break; + case 23: + NATIVE_SET_AAU_AAD(aadr23, mem_p); + break; + case 24: + NATIVE_SET_AAU_AAD(aadr24, mem_p); + break; + case 25: + NATIVE_SET_AAU_AAD(aadr25, mem_p); + break; + case 26: + NATIVE_SET_AAU_AAD(aadr26, mem_p); + break; + case 27: + NATIVE_SET_AAU_AAD(aadr27, mem_p); + break; + case 28: + NATIVE_SET_AAU_AAD(aadr28, mem_p); + break; + case 29: + NATIVE_SET_AAU_AAD(aadr29, mem_p); + break; + case 30: + NATIVE_SET_AAU_AAD(aadr30, mem_p); + break; + case 31: + NATIVE_SET_AAU_AAD(aadr31, mem_p); + break; + default: + BUG_AAU(); + } +} + +static __always_inline void native_read_aads_4_reg(int AADs_no, e2k_aadj_t *mem_p) +{ + switch (AADs_no) { + case 0: + NATIVE_GET_AAU_AADS(aadr0, aadr1, aadr2, aadr3, mem_p); + break; + case 4: + NATIVE_GET_AAU_AADS(aadr4, aadr5, aadr6, aadr7, mem_p); + break; + case 8: + NATIVE_GET_AAU_AADS(aadr8, aadr9, aadr10, aadr11, mem_p); + break; + case 12: + NATIVE_GET_AAU_AADS(aadr12, aadr13, aadr14, aadr15, mem_p); + break; + case 16: + NATIVE_GET_AAU_AADS(aadr16, aadr17, aadr18, aadr19, mem_p); + break; + case 20: + NATIVE_GET_AAU_AADS(aadr20, aadr21, aadr22, aadr23, mem_p); + break; + case 24: + NATIVE_GET_AAU_AADS(aadr24, aadr25, aadr26, aadr27, mem_p); + break; + case 28: + NATIVE_GET_AAU_AADS(aadr28, aadr29, aadr30, aadr31, mem_p); + break; + default: + BUG_AAU(); + } +} + +static __always_inline void native_write_aads_4_reg(int AADs_no, + e2k_aadj_t *mem_p) +{ + switch (AADs_no) { + case 0: + NATIVE_SET_AAU_AADS(aadr0, aadr1, aadr2, aadr3, mem_p); + break; + case 4: + NATIVE_SET_AAU_AADS(aadr4, aadr5, aadr6, aadr7, mem_p); + break; + case 8: + NATIVE_SET_AAU_AADS(aadr8, aadr9, aadr10, aadr11, mem_p); + break; + case 12: + NATIVE_SET_AAU_AADS(aadr12, aadr13, aadr14, aadr15, mem_p); + break; + case 16: + NATIVE_SET_AAU_AADS(aadr16, aadr17, aadr18, aadr19, mem_p); + break; + case 20: + NATIVE_SET_AAU_AADS(aadr20, aadr21, aadr22, aadr23, mem_p); + break; + case 24: + NATIVE_SET_AAU_AADS(aadr24, aadr25, aadr26, aadr27, mem_p); + break; + case 28: + NATIVE_SET_AAU_AADS(aadr28, aadr29, aadr30, aadr31, mem_p); + break; + default: + BUG_AAU(); + } +} + +/* Clear AAU to prepare it for restoring. + * Make this a macro to avoid include hell - it uses cpu_has() inside... */ +#define native_clear_apb() NATIVE_CLEAR_APB() + +#endif /* _NATIVE_AAU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/native_cpu_regs_access.h b/arch/e2k/include/asm/native_cpu_regs_access.h new file mode 100644 index 0000000..1146f39 --- /dev/null +++ b/arch/e2k/include/asm/native_cpu_regs_access.h @@ -0,0 +1,557 @@ + +#ifndef _E2K_NATIVE_CPU_REGS_ACCESS_H_ +#define _E2K_NATIVE_CPU_REGS_ACCESS_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include + +/* + * Read/write low/high double-word OS Compilation Unit Descriptor (OSCUD) + */ + +#define NATIVE_READ_OSCUD_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(oscud.lo) +#define NATIVE_READ_OSCUD_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(oscud.hi) + +#define NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(oscud.lo, OSCUD_lo_value, 5) +#define NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(oscud.hi, OSCUD_hi_value, 5) +#define NATIVE_WRITE_OSCUD_LO_REG(OSCUD_lo) \ +({ \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo.OSCUD_lo_half); \ +}) +#define NATIVE_NV_NOIRQ_WRITE_OSCUD_HI_REG(OSCUD_hi) \ +({ \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi.OSCUD_hi_half); \ +}) +#define NATIVE_WRITE_OSCUD_REG_VALUE(OSCUD_hi_value, OSCUD_lo_value) \ +({ \ + NATIVE_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value); \ + NATIVE_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value); \ +}) +#define NATIVE_WRITE_OSCUD_REG(OSCUD_hi, OSCUD_lo) \ +({ \ + NATIVE_WRITE_OSCUD_REG_VALUE(OSCUD_hi.OSCUD_hi_half, \ + OSCUD_lo.OSCUD_lo_half); \ +}) + +/* + * Read/write low/hgh double-word OS Globals Register (OSGD) + */ + +#define NATIVE_READ_OSGD_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(osgd.lo) +#define NATIVE_READ_OSGD_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(osgd.hi) + +#define NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(osgd.lo, OSGD_lo_value, 5) +#define NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(osgd.hi, OSGD_hi_value, 5) +#define NATIVE_WRITE_OSGD_LO_REG(OSGD_lo) \ +({ \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo.OSGD_lo_half); \ +}) +#define NATIVE_WRITE_OSGD_HI_REG(OSGD_hi) \ +({ \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi.OSGD_hi_half); \ +}) +#define NATIVE_WRITE_OSGD_REG_VALUE(OSGD_hi_value, OSGD_lo_value) \ +({ \ + NATIVE_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value); \ + NATIVE_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value); \ +}) +#define NATIVE_WRITE_OSGD_REG(OSGD_hi, OSGD_lo) \ +({ \ + NATIVE_WRITE_OSGD_REG_VALUE(OSGD_hi.OSGD_hi_half, \ + OSGD_lo.OSGD_lo_half); \ +}) + +/* + * Read/write low/high double-word Compilation Unit Register (CUD) + */ + +#define NATIVE_READ_CUD_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(cud.lo) +#define NATIVE_READ_CUD_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(cud.hi) + +#define NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(cud.lo, CUD_lo_value, 4) +#define NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(cud.hi, CUD_hi_value, 4) +#define NATIVE_WRITE_CUD_LO_REG(CUD_lo) \ +({ \ + NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo.CUD_lo_half); \ +}) +#define NATIVE_WRITE_CUD_HI_REG(CUD_hi) \ +({ \ + NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi.CUD_hi_half); \ +}) +#define NATIVE_WRITE_CUD_REG_VALUE(CUD_hi_value, CUD_lo_value) \ +({ \ + NATIVE_WRITE_CUD_HI_REG_VALUE(CUD_hi_value); \ + NATIVE_WRITE_CUD_LO_REG_VALUE(CUD_lo_value); \ +}) +#define NATIVE_WRITE_CUD_REG(CUD_hi, CUD_lo) \ +({ \ + NATIVE_WRITE_CUD_REG_VALUE(CUD_hi.CUD_hi_half, CUD_lo.CUD_lo_half); \ +}) + +/* + * Read/write low/high double-word Globals Register (GD) + */ + +#define NATIVE_READ_GD_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(gd.lo) +#define NATIVE_READ_GD_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(gd.hi) + +#define NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(gd.lo, GD_lo_value, 4) +#define NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(gd.hi, GD_hi_value, 4) +#define NATIVE_WRITE_GD_LO_REG(GD_lo) \ + NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo.GD_lo_half) +#define NATIVE_WRITE_GD_HI_REG(GD_hi) \ + NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi.GD_hi_half) +#define NATIVE_WRITE_GD_REG_VALUE(GD_hi_value, GD_lo_value) \ +({ \ + NATIVE_WRITE_GD_HI_REG_VALUE(GD_hi_value); \ + NATIVE_WRITE_GD_LO_REG_VALUE(GD_lo_value); \ +}) +#define NATIVE_WRITE_GD_REG(GD_hi, GD_lo) \ +({ \ + NATIVE_WRITE_GD_REG_VALUE(GD_hi.GD_hi_half, GD_lo.GD_lo_half); \ +}) + +/* + * Read/write low/high quad-word Procedure Stack Pointer Register (PSP) + */ + +#define NATIVE_NV_READ_PSP_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(psp.lo) +#define NATIVE_NV_READ_PSP_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(psp.hi) + +#define NATIVE_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + NATIVE_SET_DSREG_OPEN(psp.lo, PSP_lo_value) +#define NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(psp.hi, PSP_hi_value) + +/* + * Read/write word Procedure Stack Harware Top Pointer (PSHTP) + */ +#define NATIVE_NV_READ_PSHTP_REG_VALUE() NATIVE_GET_DSREG_OPEN(pshtp) + +#define NATIVE_WRITE_PSHTP_REG_VALUE(PSHTP_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(pshtp, PSHTP_value, 5) + +/* + * Read/write low/high quad-word Procedure Chain Stack Pointer Register (PCSP) + */ +#define NATIVE_NV_READ_PCSP_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(pcsp.lo) +#define NATIVE_NV_READ_PCSP_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(pcsp.hi) + +#define NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + NATIVE_SET_DSREG_OPEN(pcsp.lo, PCSP_lo_value) +#define NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(pcsp.hi, PCSP_hi_value) + +/* + * Read/write low/high quad-word Current Chain Register (CR0/CR1) + */ +#define NATIVE_NV_READ_CR0_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(cr0.lo) +#define NATIVE_NV_READ_CR0_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(cr0.hi) +#define NATIVE_NV_READ_CR1_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(cr1.lo) +#define NATIVE_NV_READ_CR1_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(cr1.hi) + +#define NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(cr0.lo, CR0_lo_value) +#define NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(cr0.hi, CR0_hi_value) +#define NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(cr1.lo, CR1_lo_value) +#define NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(cr1.hi, CR1_hi_value) + +/* + * Read/write word Procedure Chain Stack Harware Top Pointer (PCSHTP) + */ +#define NATIVE_READ_PCSHTP_REG_SVALUE() \ + PCSHTP_SIGN_EXTEND(NATIVE_GET_SREG_OPEN(pcshtp)) + +#define NATIVE_WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) \ + NATIVE_SET_SREG_CLOSED_NOEXC(pcshtp, PCSHTP_svalue, 5) + +/* + * Read/write double-word Control Transfer Preparation Registers + * (CTPR1/CTPR2/CTPR3) + */ +#define NATIVE_NV_READ_CTPR_REG_VALUE(reg_no) \ + NATIVE_GET_DSREG_OPEN(ctpr##reg_no) + +#define NATIVE_READ_CTPR_HI_REG_VALUE(reg_no) \ + NATIVE_GET_DSREG_CLOSED_CLOBBERS(ctpr##reg_no.hi, \ + __stringify(ctpr##reg_no)) + +#define NATIVE_WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) \ + NATIVE_SET_DSREG_OPEN(ctpr##reg_no, CTPR_value) + +#define NATIVE_WRITE_CTPR_HI_REG_VALUE(reg_no, value) \ + NATIVE_SET_DSREG_CLOSED_EXC_CLOBBERS(ctpr##reg_no.hi, value, \ + 4, __stringify(ctpr##reg_no)) + +/* + * Read/write low/high double-word Trap Info Registers (TIRs) + */ +#define NATIVE_READ_TIR_LO_REG_VALUE() NATIVE_GET_DSREG_CLOSED(tir.lo) +#define NATIVE_READ_TIR_HI_REG_VALUE() NATIVE_GET_DSREG_CLOSED(tir.hi) + +#define NATIVE_WRITE_TIR_LO_REG_VALUE(TIR_lo_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(tir.lo, TIR_lo_value, 4) +#define NATIVE_WRITE_TIR_HI_REG_VALUE(TIR_hi_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(tir.hi, TIR_hi_value, 4) + +/* + * Read/write low/high double-word Trap Info Registers (TIRs) + * as the low/high word structure + */ +#define NATIVE_READ_TIR_LO_REG() \ +({ \ + e2k_tir_lo_t TIR_lo; \ + TIR_lo.TIR_lo_reg = NATIVE_READ_TIR_LO_REG_VALUE(); \ + TIR_lo; \ +}) +#define NATIVE_READ_TIR_HI_REG() \ +({ \ + e2k_tir_hi_t TIR_hi; \ + TIR_hi.TIR_hi_reg = NATIVE_READ_TIR_HI_REG_VALUE(); \ + TIR_hi; \ +}) +static inline e2k_tir_lo_t +native_read_TIR_lo_reg(void) +{ + return NATIVE_READ_TIR_LO_REG(); +} +static inline e2k_tir_hi_t +native_read_TIR_hi_reg(void) +{ + return NATIVE_READ_TIR_HI_REG(); +} + +/* + * Read/write low/high double-word Non-Protected User Stack Descriptor + * Register (USD) + */ +#define NATIVE_NV_READ_USD_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(usd.lo) +#define NATIVE_NV_READ_USD_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(usd.hi) + +#define NATIVE_NV_WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + NATIVE_SET_DSREG_OPEN(usd.lo, USD_lo_value) +#define NATIVE_NV_WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + NATIVE_SET_DSREG_OPEN(usd.hi, USD_hi_value) +#define NATIVE_NV_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ +({ \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define NATIVE_NV_WRITE_USD_REG(USD_hi, USD_lo) \ +({ \ + NATIVE_NV_WRITE_USD_REG_VALUE(USD_hi.USD_hi_half, USD_lo.USD_lo_half); \ +}) + +#define NATIVE_NV_WRITE_USBR_USD_REG_VALUE(usbr, usd_hi, usd_lo) \ +do { \ + NATIVE_NV_WRITE_USBR_REG_VALUE(usbr); \ + NATIVE_NV_WRITE_USD_HI_REG_VALUE(usd_hi); \ + NATIVE_NV_WRITE_USD_LO_REG_VALUE(usd_lo); \ +} while (0) + +#define NATIVE_NV_WRITE_USBR_USD_REG(usbr, usd_hi, usd_lo) \ +do { \ + NATIVE_NV_WRITE_USBR_REG(usbr); \ + NATIVE_NV_WRITE_USD_HI_REG(usd_hi); \ + NATIVE_NV_WRITE_USD_LO_REG(usd_lo); \ +} while (0) + +/* + * Read/write low/high double-word Protected User Stack Descriptor + * Register (PUSD) + */ +#define NATIVE_NV_READ_PUSD_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(usd.lo) +#define NATIVE_NV_READ_PUSD_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(usd.hi) + +#define NATIVE_NV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) \ + NATIVE_SET_DSREG_OPEN(usd.lo, PUSD_lo_value) +#define NATIVE_NV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) \ + NATIVE_SET_DSREG_OPEN(usd.hi, PUSD_hi_value) + +/* + * Read/write double-word User Stacks Base Register (USBR) + */ +#define NATIVE_NV_READ_USBR_REG_VALUE() NATIVE_GET_DSREG_OPEN(sbr) +#define NATIVE_NV_READ_SBR_REG_VALUE() NATIVE_GET_DSREG_OPEN(sbr) + +#define NATIVE_NV_WRITE_USBR_REG_VALUE(USBR_value) \ + NATIVE_SET_DSREG_OPEN(sbr, USBR_value) +#define NATIVE_NV_WRITE_SBR_REG_VALUE(SBR_value) \ + NATIVE_SET_DSREG_OPEN(sbr, SBR_value) +#define NATIVE_NV_WRITE_USBR_REG(USBR) \ + NATIVE_NV_WRITE_USBR_REG_VALUE(USBR.USBR_reg) + +/* + * Read/write double-word Window Descriptor Register (WD) + */ +#define NATIVE_READ_WD_REG_VALUE() NATIVE_GET_DSREG_OPEN(wd) + +#define NATIVE_WRITE_WD_REG_VALUE(WD_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(wd, WD_value, 5) + +/* + * Read/write double-word Loop Status Register (LSR/LSR1) + */ +#define NATIVE_READ_LSR_REG_VALUE() NATIVE_GET_DSREG_OPEN(lsr) +#define NATIVE_READ_LSR1_REG_VALUE() NATIVE_GET_DSREG_OPEN(lsr1) + +#define NATIVE_WRITE_LSR_REG_VALUE(LSR_value) \ + NATIVE_SET_DSREG_OPEN(lsr, LSR_value) +#define NATIVE_WRITE_LSR1_REG_VALUE(LSR1_value) \ + NATIVE_SET_DSREG_OPEN(lsr1, LSR1_value) + +/* + * Read/write double-word Loop Status Register (ILCR/ILCR1) + */ +#define NATIVE_READ_ILCR_REG_VALUE() NATIVE_GET_DSREG_OPEN(ilcr) +#define NATIVE_READ_ILCR1_REG_VALUE() NATIVE_GET_DSREG_CLOSED(ilcr1) + +#define NATIVE_WRITE_ILCR_REG_VALUE(ILCR_value) \ + NATIVE_SET_DSREG_OPEN(ilcr, ILCR_value) +#define NATIVE_WRITE_ILCR1_REG_VALUE(ILCR1_value) \ + NATIVE_SET_DSREG_CLOSED_EXC(ilcr1, ILCR1_value, 4) + +/* + * Read/write OS register which point to current process thread info + * structure (OSR0) + */ +#define NATIVE_NV_READ_OSR0_REG_VALUE() NATIVE_GET_DSREG_OPEN(osr0) + +#define NATIVE_NV_WRITE_OSR0_REG_VALUE(osr0_value) \ + NATIVE_SET_DSREG_OPEN(osr0, osr0_value) + +/* + * Read/write OS Entries Mask (OSEM) + */ +#define NATIVE_READ_OSEM_REG_VALUE() NATIVE_GET_SREG_CLOSED(osem) + +#define NATIVE_WRITE_OSEM_REG_VALUE(osem_value) \ + NATIVE_SET_SREG_CLOSED_NOEXC(osem, osem_value, 5) + +/* + * Read/write word Base Global Register (BGR) + */ +#define NATIVE_READ_BGR_REG_VALUE() NATIVE_GET_SREG_OPEN(bgr) + +#define NATIVE_WRITE_BGR_REG_VALUE(BGR_value) \ + NATIVE_SET_SREG_CLOSED_NOEXC(bgr, BGR_value, 5) + +/* + * Read CPU current clock register (CLKR) + */ +#define NATIVE_READ_CLKR_REG_VALUE() NATIVE_GET_DSREG_CLOSED(clkr) +#define NATIVE_WRITE_CLKR_REG_VALUE() \ + NATIVE_SET_DSREG_CLOSED_NOEXC(clkr, 0, 4) + +/* + * Read/Write system clock registers (SCLKM) + */ +#define NATIVE_READ_SCLKR_REG_VALUE() NATIVE_GET_DSREG_OPEN(sclkr) +#define NATIVE_READ_SCLKM1_REG_VALUE() NATIVE_GET_DSREG_OPEN(sclkm1) +#define NATIVE_READ_SCLKM2_REG_VALUE() NATIVE_GET_DSREG_OPEN(sclkm2) +#define NATIVE_READ_SCLKM3_REG_VALUE() NATIVE_GET_DSREG_CLOSED(sclkm3) + +#define NATIVE_WRITE_SCLKR_REG_VALUE(reg_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sclkr, reg_value, 4) +#define NATIVE_WRITE_SCLKM1_REG_VALUE(reg_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sclkm1, reg_value, 4) +#define NATIVE_WRITE_SCLKM2_REG_VALUE(reg_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sclkm2, reg_value, 4) +#define NATIVE_WRITE_SCLKM3_REG_VALUE(reg_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(sclkm3, reg_value, 4) + +extern unsigned long native_read_SCLKR_reg_value(void); +extern unsigned long native_read_SCLKM1_reg_value(void); +extern unsigned long native_read_SCLKM2_reg_value(void); +extern void native_write_SCLKR_reg_value(unsigned long reg_value); +extern void native_write_SCLKM1_reg_value(unsigned long reg_value); +extern void native_write_SCLKM2_reg_value(unsigned long reg_value); + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +#define NATIVE_READ_CU_HW0_REG_VALUE() NATIVE_GET_DSREG_CLOSED(cu_hw0) +#define NATIVE_READ_CU_HW1_REG_VALUE() NATIVE_GET_DSREG_CLOSED(cu_hw1) + +#define NATIVE_WRITE_CU_HW0_REG_VALUE(reg) \ + NATIVE_SET_DSREG_CLOSED_EXC(cu_hw0, reg, 5) +#define NATIVE_WRITE_CU_HW1_REG_VALUE(reg) \ + NATIVE_SET_DSREG_CLOSED_EXC(cu_hw1, reg, 5) + +/* + * Read/write low/high double-word Recovery point register (RPR) + */ +#define NATIVE_READ_RPR_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(rpr.lo) +#define NATIVE_READ_RPR_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(rpr.hi) +#define NATIVE_READ_SBBP_REG_VALUE() NATIVE_GET_DSREG_OPEN(sbbp) +#define NATIVE_WRITE_SBBP_REG_VALUE(x) \ + NATIVE_SET_DSREG_CLOSED_EXC(sbbp, (x), 0) + +#define NATIVE_WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \ + NATIVE_SET_DSREG_OPEN(rpr.lo, RPR_lo_value) +#define NATIVE_WRITE_RPR_HI_REG_VALUE(RPR_hi_value) \ + NATIVE_SET_DSREG_OPEN(rpr.hi, RPR_hi_value) + +/* + * Read double-word CPU current Instruction Pointer register (IP) + */ +#define NATIVE_READ_IP_REG_VALUE() NATIVE_GET_DSREG_CLOSED(ip) +#define NATIVE_NV_READ_IP_REG_VALUE() NATIVE_GET_DSREG_OPEN(ip) + +/* + * Read debug and monitors registers + */ +#define NATIVE_READ_DIBCR_REG_VALUE() NATIVE_GET_SREG_CLOSED(dibcr) +#define NATIVE_READ_DIBSR_REG_VALUE() NATIVE_GET_SREG_CLOSED(dibsr) +#define NATIVE_READ_DIMCR_REG_VALUE() NATIVE_GET_DSREG_CLOSED(dimcr) +#define NATIVE_READ_DIBAR0_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar0) +#define NATIVE_READ_DIBAR1_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar1) +#define NATIVE_READ_DIBAR2_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar2) +#define NATIVE_READ_DIBAR3_REG_VALUE() NATIVE_GET_DSREG_OPEN(dibar3) +#define NATIVE_READ_DIMAR0_REG_VALUE() NATIVE_GET_DSREG_OPEN(dimar0) +#define NATIVE_READ_DIMAR1_REG_VALUE() NATIVE_GET_DSREG_OPEN(dimar1) + +#define NATIVE_WRITE_DIBCR_REG_VALUE(DIBCR_value) \ + NATIVE_SET_SREG_CLOSED_NOEXC(dibcr, DIBCR_value, 4) +#define NATIVE_WRITE_DIBSR_REG_VALUE(DIBSR_value) \ + NATIVE_SET_SREG_CLOSED_NOEXC(dibsr, DIBSR_value, 4) +#define NATIVE_WRITE_DIMCR_REG_VALUE(DIMCR_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dimcr, DIMCR_value, 4) +#define NATIVE_WRITE_DIBAR0_REG_VALUE(DIBAR0_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dibar0, DIBAR0_value, 4) +#define NATIVE_WRITE_DIBAR1_REG_VALUE(DIBAR1_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dibar1, DIBAR1_value, 4) +#define NATIVE_WRITE_DIBAR2_REG_VALUE(DIBAR2_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dibar2, DIBAR2_value, 4) +#define NATIVE_WRITE_DIBAR3_REG_VALUE(DIBAR3_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dibar3, DIBAR3_value, 4) +#define NATIVE_WRITE_DIMAR0_REG_VALUE(DIMAR0_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dimar0, DIMAR0_value, 4) +#define NATIVE_WRITE_DIMAR1_REG_VALUE(DIMAR1_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(dimar1, DIMAR1_value, 4) + +/* + * Read/write double-word Compilation Unit Table Register (CUTD/OSCUTD) + */ +#define NATIVE_NV_READ_CUTD_REG_VALUE() NATIVE_GET_DSREG_OPEN(cutd) +#define NATIVE_READ_OSCUTD_REG_VALUE() NATIVE_GET_DSREG_CLOSED(oscutd) + +#define NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD_value) \ + NATIVE_SET_DSREG_OPEN_NOIRQ(cutd, CUTD_value) +#define NATIVE_WRITE_OSCUTD_REG_VALUE(CUTD_value) \ + NATIVE_SET_DSREG_CLOSED_NOEXC(oscutd, CUTD_value, 5) + +/* + * Read/write word Compilation Unit Index Register (CUIR/OSCUIR) + */ +#define NATIVE_READ_CUIR_REG_VALUE() NATIVE_GET_SREG_CLOSED(cuir) +#define NATIVE_READ_OSCUIR_REG_VALUE() NATIVE_GET_SREG_CLOSED(oscuir) +#define NATIVE_WRITE_CUIR_REG_VALUE(x) \ + NATIVE_SET_SREG_CLOSED_NOEXC(cuir, (x), 5) +#define NATIVE_WRITE_OSCUIR_REG_VALUE(x) \ + NATIVE_SET_SREG_CLOSED_NOEXC(oscuir, (x), 5) + +/* + * Read/write word Processor State Register (PSR) + */ +#define NATIVE_NV_READ_PSR_REG_VALUE() NATIVE_GET_SREG_OPEN(psr) + +#define NATIVE_WRITE_PSR_REG_VALUE(PSR_value) \ + NATIVE_SET_SREG_CLOSED_EXC(psr, PSR_value, 5) +#define NATIVE_WRITE_PSR_IRQ_BARRIER(psr_val) \ + NATIVE_SET_PSR_IRQ_BARRIER(psr_val) + + +/* + * Read/write word User Processor State Register (UPSR) + */ +/* upsr reg - byte register, but linux used long flag + * to save arch_local_irq_save.To avoid casting to long(redundant stx command) + * we can used read long register.Suprisingly, but size of image.boot decreased + * 4096 byte + */ +#define NATIVE_NV_READ_UPSR_REG_VALUE() NATIVE_GET_DSREG_OPEN(upsr) +#define NATIVE_WRITE_UPSR_REG_VALUE(UPSR_value) \ + NATIVE_SET_SREG_CLOSED_EXC(upsr, UPSR_value, 4) +#define NATIVE_WRITE_UPSR_IRQ_BARRIER(upsr_val) \ + NATIVE_SET_UPSR_IRQ_BARRIER(upsr_val) + +/* + * Read/write word floating point control registers (PFPFR/FPCR/FPSR) + */ +#define NATIVE_NV_READ_PFPFR_REG_VALUE() NATIVE_GET_SREG_OPEN(pfpfr) +#define NATIVE_NV_READ_FPCR_REG_VALUE() NATIVE_GET_SREG_OPEN(fpcr) +#define NATIVE_NV_READ_FPSR_REG_VALUE() NATIVE_GET_SREG_OPEN(fpsr) + +#define NATIVE_NV_WRITE_PFPFR_REG_VALUE(PFPFR_value) \ + NATIVE_SET_SREG_OPEN(pfpfr, PFPFR_value) +#define NATIVE_NV_WRITE_FPCR_REG_VALUE(FPCR_value) \ + NATIVE_SET_SREG_OPEN(fpcr, FPCR_value) +#define NATIVE_NV_WRITE_FPSR_REG_VALUE(FPSR_value) \ + NATIVE_SET_SREG_OPEN(fpsr, FPSR_value) + +/* + * Read/write low/high double-word Intel segments registers (xS) + */ + +#define NATIVE_READ_CS_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(cs.lo) +#define NATIVE_READ_CS_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(cs.hi) +#define NATIVE_READ_DS_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(ds.lo) +#define NATIVE_READ_DS_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(ds.hi) +#define NATIVE_READ_ES_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(es.lo) +#define NATIVE_READ_ES_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(es.hi) +#define NATIVE_READ_FS_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(fs.lo) +#define NATIVE_READ_FS_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(fs.hi) +#define NATIVE_READ_GS_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(gs.lo) +#define NATIVE_READ_GS_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(gs.hi) +#define NATIVE_READ_SS_LO_REG_VALUE() NATIVE_GET_DSREG_OPEN(ss.lo) +#define NATIVE_READ_SS_HI_REG_VALUE() NATIVE_GET_DSREG_OPEN(ss.hi) + +#define NATIVE_CL_WRITE_CS_LO_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(cs.lo, sd) +#define NATIVE_CL_WRITE_CS_HI_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(cs.hi, sd) +#define NATIVE_CL_WRITE_DS_LO_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(ds.lo, sd) +#define NATIVE_CL_WRITE_DS_HI_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(ds.hi, sd) +#define NATIVE_CL_WRITE_ES_LO_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(es.lo, sd) +#define NATIVE_CL_WRITE_ES_HI_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(es.hi, sd) +#define NATIVE_CL_WRITE_FS_LO_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(fs.lo, sd) +#define NATIVE_CL_WRITE_FS_HI_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(fs.hi, sd) +#define NATIVE_CL_WRITE_GS_LO_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(gs.lo, sd) +#define NATIVE_CL_WRITE_GS_HI_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(gs.hi, sd) +#define NATIVE_CL_WRITE_SS_LO_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(ss.lo, sd) +#define NATIVE_CL_WRITE_SS_HI_REG_VALUE(sd) NATIVE_SET_DSREG_OPEN(ss.hi, sd) + +/* + * Read doubleword User Processor Identification Register (IDR) + */ +#define NATIVE_READ_IDR_REG_VALUE() NATIVE_GET_DSREG_OPEN(idr) + +/* + * Read/Write Processor Core Mode Register (CORE_MODE) + */ +#if __LCC__ > 125 || __LCC__ == 125 && __LCC_MINOR__ >= 8 +# define NATIVE_READ_CORE_MODE_REG_VALUE() NATIVE_GET_SREG_OPEN(core_mode) +#else +# define NATIVE_READ_CORE_MODE_REG_VALUE() NATIVE_GET_SREG_CLOSED(core_mode) +#endif +#define NATIVE_WRITE_CORE_MODE_REG_VALUE(modes) \ + NATIVE_SET_SREG_CLOSED_NOEXC(core_mode, modes, 5) + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_NATIVE_CPU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/native_dcache_regs_access.h b/arch/e2k/include/asm/native_dcache_regs_access.h new file mode 100644 index 0000000..383478d --- /dev/null +++ b/arch/e2k/include/asm/native_dcache_regs_access.h @@ -0,0 +1,78 @@ +/* + * native E2K MMU structures & registers. + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_NATIVE_DCACHE_REGS_ACCESS_H_ +#define _E2K_NATIVE_DCACHE_REGS_ACCESS_H_ + +#ifndef __ASSEMBLY__ +#include + +#endif /* __ASSEMBLY__ */ + +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * Flush DCACHE line + */ +static inline void NATIVE_FLUSH_DCACHE_LINE(unsigned long addr) +{ + ldst_rec_op_t opc = { + .fmt = 4, + .mas = MAS_DCACHE_LINE_FLUSH, + .prot = 1 + }; + + NATIVE_RECOVERY_STORE(addr, 0x0, AW(opc), 2); +} + +static inline void NATIVE_FLUSH_DCACHE_LINE_OFFSET(unsigned long addr, size_t offset) +{ + ldst_rec_op_t opc = { + .fmt = 4, + .mas = MAS_DCACHE_LINE_FLUSH, + .prot = 1 + }; + + NATIVE_RECOVERY_STORE(addr, 0x0, AW(opc) | offset, 2); +} + +/* This can be used in non-privileged mode (e.g. guest kernel) but + * must not be used on user addresses (this does not have .prot = 1) */ +#define NATIVE_FLUSH_DCACHE_LINE_UNPRIV(virt_addr) \ + NATIVE_WRITE_MAS_D((virt_addr), 0, MAS_DCACHE_LINE_FLUSH) + + +/* + * Clear DCACHE L1 set + */ +#define NATIVE_CLEAR_DCACHE_L1_SET(virt_addr, set) \ + NATIVE_WRITE_MAS_D( \ + mk_dcache_l1_addr((virt_addr), set, 1, 0), \ + 0, MAS_DCACHE_L1_REG) + +/* + * Write DCACHE L2 registers + */ +#define NATIVE_WRITE_L2_REG(reg_val, reg_num, bank_num) \ + NATIVE_WRITE_MAS_D( \ + mk_dcache_l2_reg_addr(reg_num, bank_num), \ + (reg_val), \ + MAS_DCACHE_L2_REG) + +/* + * Read DCACHE L2 registers + */ +#define NATIVE_READ_L2_REG(reg_num, bank_num) \ + NATIVE_READ_MAS_D( \ + mk_dcache_l2_reg_addr(reg_num, bank_num), \ + MAS_DCACHE_L2_REG) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_NATIVE_MMU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/native_mmu_regs_access.h b/arch/e2k/include/asm/native_mmu_regs_access.h new file mode 100644 index 0000000..a949b76 --- /dev/null +++ b/arch/e2k/include/asm/native_mmu_regs_access.h @@ -0,0 +1,327 @@ +/* + * native E2K MMU structures & registers. + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_NATIVE_MMU_REGS_ACCESS_H_ +#define _E2K_NATIVE_MMU_REGS_ACCESS_H_ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#endif /* __ASSEMBLY__ */ + +#include +#include +#include + + +#undef DEBUG_MR_MODE +#undef DebugMR +#define DEBUG_MR_MODE 0 /* MMU registers access */ +#define DebugMR(...) DebugPrint(DEBUG_MR_MODE, ##__VA_ARGS__) + + +#ifndef __ASSEMBLY__ + +/* + * Write/read MMU register + */ +#define NATIVE_WRITE_MMU_REG(addr_val, reg_val) \ + NATIVE_WRITE_MAS_D((addr_val), (reg_val), MAS_MMU_REG) + +#define NATIVE_READ_MMU_REG(addr_val) \ + NATIVE_READ_MAS_D((addr_val), MAS_MMU_REG) +#define NATIVE_WRITE_MMU_CR(mmu_cr) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR_NO), \ + mmu_reg_val(mmu_cr)) +#define NATIVE_WRITE_MMU_TRAP_POINT(mmu_tc) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_TRAP_POINT_NO), \ + mmu_reg_val(mmu_tc)) +#define NATIVE_READ_MMU_TRAP_POINT() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_TRAP_POINT_NO)) +#define NATIVE_WRITE_MMU_OS_PPTB_REG_VALUE(mmu_phys_ptb) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_PPTB_NO), \ + mmu_reg_val(mmu_phys_ptb)) +#define NATIVE_READ_MMU_OS_PPTB_REG_VALUE() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_PPTB_NO)) +#define NATIVE_WRITE_MMU_OS_VPTB_REG_VALUE(mmu_virt_ptb) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VPTB_NO), \ + mmu_reg_val(mmu_virt_ptb)) +#define NATIVE_READ_MMU_OS_VPTB_REG_VALUE() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VPTB_NO)) +#define NATIVE_WRITE_MMU_OS_VAB_REG_VALUE(kernel_offset) \ + NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VAB_NO), \ + mmu_reg_val(kernel_offset)) +#define NATIVE_READ_MMU_OS_VAB_REG_VALUE() \ + NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VAB_NO)) + +#define BOOT_NATIVE_WRITE_MMU_REG(addr_val, reg_val) \ + NATIVE_WRITE_MMU_REG(addr_val, reg_val) +#define BOOT_NATIVE_READ_MMU_REG(addr_val) \ + NATIVE_READ_MMU_REG(addr_val) + +#define BOOT_NATIVE_WRITE_MMU_CR(mmu_cr) \ + BOOT_NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_CR_NO), \ + mmu_reg_val(mmu_cr)) +#define BOOT_NATIVE_WRITE_MMU_TRAP_POINT(mmu_tc) \ + BOOT_NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_TRAP_POINT_NO), \ + mmu_reg_val(mmu_tc)) +#define BOOT_NATIVE_READ_MMU_TRAP_POINT() \ + BOOT_NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_TRAP_POINT_NO)) +#define BOOT_NATIVE_WRITE_MMU_OS_PPTB_REG_VALUE(mmu_phys_ptb) \ + BOOT_NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_PPTB_NO), \ + mmu_reg_val(mmu_phys_ptb)) +#define BOOT_NATIVE_READ_MMU_OS_PPTB_REG_VALUE() \ + BOOT_NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_PPTB_NO)) +#define BOOT_NATIVE_WRITE_MMU_OS_VPTB_REG_VALUE(mmu_virt_ptb) \ + BOOT_NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VPTB_NO), \ + mmu_reg_val(mmu_virt_ptb)) +#define BOOT_NATIVE_READ_MMU_OS_VPTB_REG_VALUE() \ + BOOT_NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VPTB_NO)) +#define BOOT_NATIVE_WRITE_MMU_OS_VAB_REG_VALUE(kernel_offset) \ + BOOT_NATIVE_WRITE_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VAB_NO), \ + mmu_reg_val(kernel_offset)) +#define BOOT_NATIVE_READ_MMU_OS_VAB_REG_VALUE() \ + BOOT_NATIVE_READ_MMU_REG( \ + _MMU_REG_NO_TO_MMU_ADDR_VAL(_MMU_OS_VAB_NO)) + +/* + * Write/read Data TLB register + */ +#define NATIVE_WRITE_DTLB_REG(tlb_addr, tlb_value) \ + NATIVE_WRITE_MAS_D((tlb_addr), (tlb_value), MAS_DTLB_REG) + +#define NATIVE_READ_DTLB_REG(tlb_addr) \ + NATIVE_READ_MAS_D((tlb_addr), MAS_DTLB_REG) + +/* + * Flush TLB page/entry + */ +#define NATIVE_FLUSH_TLB_ENTRY(flush_op, addr) \ + NATIVE_WRITE_MAS_D((flush_op), (addr), MAS_TLB_PAGE_FLUSH) + +/* + * Flush ICACHE line + */ +#define NATIVE_FLUSH_ICACHE_LINE(flush_op, addr) \ + NATIVE_WRITE_MAS_D((flush_op), (addr), MAS_ICACHE_LINE_FLUSH) + +/* + * Flush and invalidate or write back CACHE(s) (invalidate all caches + * of the processor) + */ + +#define NATIVE_FLUSH_CACHE_L12(flush_op) \ + NATIVE_WRITE_MAS_D((flush_op), (0), MAS_CACHE_FLUSH) + +static inline void +native_invalidate_CACHE_L12(void) +{ + int invalidate_supported; + unsigned long flags; + + DebugMR("Flush : Invalidate all CACHEs (op 0x%lx)\n", + _flush_op_invalidate_cache_L12); + + /* Invalidate operation was removed in E2S */ + invalidate_supported = NATIVE_IS_MACHINE_ES2; + + raw_all_irq_save(flags); + E2K_WAIT_MA; + if (invalidate_supported) + NATIVE_FLUSH_CACHE_L12(_flush_op_invalidate_cache_L12); + else + NATIVE_FLUSH_CACHE_L12(_flush_op_write_back_cache_L12); + E2K_WAIT_FLUSH; + raw_all_irq_restore(flags); +} + +static inline void +native_write_back_CACHE_L12(void) +{ + unsigned long flags; + + DebugMR("Flush : Write back all CACHEs (op 0x%lx)\n", + _flush_op_write_back_cache_L12); + raw_all_irq_save(flags); + E2K_WAIT_MA; + NATIVE_FLUSH_CACHE_L12(_flush_op_write_back_cache_L12); + E2K_WAIT_FLUSH; + raw_all_irq_restore(flags); +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ + +#define NATIVE_FLUSH_TLB_ALL(flush_op) \ + NATIVE_WRITE_MAS_D((flush_op), (0), MAS_TLB_FLUSH) + +static inline void +native_flush_TLB_all(void) +{ + unsigned long flags; + + DebugMR("Flush all TLBs (op 0x%lx)\n", _flush_op_tlb_all); + raw_all_irq_save(flags); + E2K_WAIT_ST; + NATIVE_FLUSH_TLB_ALL(_flush_op_tlb_all); + E2K_WAIT(_fl_c | _ma_c); + raw_all_irq_restore(flags); +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ + +#define NATIVE_FLUSH_ICACHE_ALL(flush_op) \ + NATIVE_WRITE_MAS_D((flush_op), (0), MAS_ICACHE_FLUSH) + +static inline void +native_flush_ICACHE_all(void) +{ + DebugMR("Flush all ICACHE op 0x%lx\n", _flush_op_icache_all); + E2K_WAIT_ST; + NATIVE_FLUSH_ICACHE_ALL(_flush_op_icache_all); + E2K_WAIT_FLUSH; +} + +/* + * Get Entry probe for virtual address + */ +#define NATIVE_ENTRY_PROBE_MMU_OP(addr_val) \ + NATIVE_READ_MAS_D((addr_val), MAS_ENTRY_PROBE) + +/* + * Get physical address for virtual address + */ +#define NATIVE_ADDRESS_PROBE_MMU_OP(addr_val) \ + NATIVE_READ_MAS_D((addr_val), MAS_VA_PROBE) + +/* + * Read CLW register + */ +#define NATIVE_READ_CLW_REG(clw_addr) \ + NATIVE_READ_MAS_D_5((clw_addr), MAS_CLW_REG) + +/* + * native MMU DEBUG registers access + */ +#define NATIVE_READ_MMU_DEBUG_REG(reg_mnemonic) \ + NATIVE_GET_MMUREG(reg_mnemonic) +#define NATIVE_WRITE_MMU_DEBUG_REG(reg_mnemonic, reg_value) \ + NATIVE_SET_MMUREG(reg_mnemonic, reg_value) +#define NATIVE_READ_DDBAR0_REG_VALUE() \ + NATIVE_READ_MMU_DEBUG_REG(ddbar0) +#define NATIVE_READ_DDBAR1_REG_VALUE() \ + NATIVE_READ_MMU_DEBUG_REG(ddbar1) +#define NATIVE_READ_DDBAR2_REG_VALUE() \ + NATIVE_READ_MMU_DEBUG_REG(ddbar2) +#define NATIVE_READ_DDBAR3_REG_VALUE() \ + NATIVE_READ_MMU_DEBUG_REG(ddbar3) +#define NATIVE_READ_DDBCR_REG_VALUE() \ + NATIVE_READ_MMU_DEBUG_REG(ddbcr) +#define NATIVE_READ_DDBSR_REG_VALUE() \ + NATIVE_READ_MMU_DEBUG_REG(ddbsr) +#define NATIVE_READ_DDMAR0_REG_VALUE() \ + NATIVE_READ_MMU_DEBUG_REG(ddmar0) +#define NATIVE_READ_DDMAR1_REG_VALUE() \ + NATIVE_READ_MMU_DEBUG_REG(ddmar1) +#define NATIVE_READ_DDMCR_REG_VALUE() \ + NATIVE_READ_MMU_DEBUG_REG(ddmcr) +#define NATIVE_WRITE_DDBAR0_REG_VALUE(value) \ + NATIVE_WRITE_MMU_DEBUG_REG(ddbar0, value) +#define NATIVE_WRITE_DDBAR1_REG_VALUE(value) \ + NATIVE_WRITE_MMU_DEBUG_REG(ddbar1, value) +#define NATIVE_WRITE_DDBAR2_REG_VALUE(value) \ + NATIVE_WRITE_MMU_DEBUG_REG(ddbar2, value) +#define NATIVE_WRITE_DDBAR3_REG_VALUE(value) \ + NATIVE_WRITE_MMU_DEBUG_REG(ddbar3, value) +#define NATIVE_WRITE_DDBCR_REG_VALUE(value) \ + NATIVE_WRITE_MMU_DEBUG_REG(ddbcr, value) +#define NATIVE_WRITE_DDBSR_REG_VALUE(value) \ + NATIVE_WRITE_MMU_DEBUG_REG(ddbsr, value) +#define NATIVE_WRITE_DDMAR0_REG_VALUE(value) \ + NATIVE_WRITE_MMU_DEBUG_REG(ddmar0, value) +#define NATIVE_WRITE_DDMAR1_REG_VALUE(value) \ + NATIVE_WRITE_MMU_DEBUG_REG(ddmar1, value) +#define NATIVE_WRITE_DDMCR_REG_VALUE(value) \ + NATIVE_WRITE_MMU_DEBUG_REG(ddmcr, value) + +#define NATIVE_READ_DDBAR0_REG() \ + NATIVE_READ_DDBAR0_REG_VALUE() +#define NATIVE_READ_DDBAR1_REG() \ + NATIVE_READ_DDBAR1_REG_VALUE() +#define NATIVE_READ_DDBAR2_REG() \ + NATIVE_READ_DDBAR2_REG_VALUE() +#define NATIVE_READ_DDBAR3_REG() \ + NATIVE_READ_DDBAR3_REG_VALUE() +#define NATIVE_READ_DDBCR_REG() \ +({ \ + e2k_ddbcr_t ddbcr; \ + \ + ddbcr.DDBCR_reg = NATIVE_READ_DDBCR_REG_VALUE(); \ + ddbcr; \ +}) +#define NATIVE_READ_DDBSR_REG() \ +({ \ + e2k_ddbsr_t ddbsr; \ + \ + ddbsr.DDBSR_reg = NATIVE_READ_DDBSR_REG_VALUE(); \ + ddbsr; \ +}) +#define NATIVE_READ_DDMAR0_REG() \ + NATIVE_READ_DDMAR0_REG_VALUE() +#define NATIVE_READ_DDMAR1_REG() \ + NATIVE_READ_DDMAR1_REG_VALUE() +#define NATIVE_READ_DDMCR_REG() \ +({ \ + e2k_ddmcr_t ddmcr; \ + \ + ddmcr.DDMCR_reg = NATIVE_READ_DDMCR_REG_VALUE(); \ + ddmcr; \ +}) +#define NATIVE_WRITE_DDBAR0_REG(value) \ + NATIVE_WRITE_DDBAR0_REG_VALUE(value) +#define NATIVE_WRITE_DDBAR1_REG(value) \ + NATIVE_WRITE_DDBAR1_REG_VALUE(value) +#define NATIVE_WRITE_DDBAR2_REG(value) \ + NATIVE_WRITE_DDBAR2_REG_VALUE(value) +#define NATIVE_WRITE_DDBAR3_REG(value) \ + NATIVE_WRITE_DDBAR3_REG_VALUE(value) +#define NATIVE_WRITE_DDBCR_REG(value) \ + NATIVE_WRITE_DDBCR_REG_VALUE(value.DDBCR_reg) +#define NATIVE_WRITE_DDBSR_REG(value) \ + NATIVE_WRITE_DDBSR_REG_VALUE(value.DDBSR_reg) +#define NATIVE_WRITE_DDMAR0_REG(value) \ + NATIVE_WRITE_DDMAR0_REG_VALUE(value) +#define NATIVE_WRITE_DDMAR1_REG(value) \ + NATIVE_WRITE_DDMAR1_REG_VALUE(value) +#define NATIVE_WRITE_DDMCR_REG(value) \ + NATIVE_WRITE_DDMCR_REG_VALUE(value.DDMCR_reg) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_NATIVE_MMU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/nbsr_v6_regs.h b/arch/e2k/include/asm/nbsr_v6_regs.h new file mode 100644 index 0000000..41f8e1c --- /dev/null +++ b/arch/e2k/include/asm/nbsr_v6_regs.h @@ -0,0 +1,294 @@ +#pragma once + +/* HC monitors */ +#define HC_MCR 0x360 +#define HC_MID 0x364 +#define HC_MAR0_LO 0x368 +#define HC_MAR0_HI 0x36c +#define HC_MAR1_LO 0x370 +#define HC_MAR1_HI 0x374 + +/* IOMMU monitors - all processors */ +#define IOMMU_MCR 0x3c0 +#define IOMMU_MID 0x3c4 +#define IOMMU_MAR0_LO 0x3c8 +#define IOMMU_MAR0_HI 0x3cc +#define IOMMU_MAR1_LO 0x3d0 +#define IOMMU_MAR1_HI 0x3d4 + +/* Additional IOMMU monitors - e2c3 only. + * EDBC_IOMMU_* registers are used only to broadcast + * writing into ED{26-31}_IOMMU_* registers. */ +#define EDBC_IOMMU_MCR 0x50c0 +#define EDBC_IOMMU_MID 0x50c4 +#define EDBC_IOMMU_MAR0_LO 0x50c8 +#define EDBC_IOMMU_MAR0_HI 0x50cc +#define EDBC_IOMMU_MAR1_LO 0x50d0 +#define EDBC_IOMMU_MAR1_HI 0x50d4 +#define ED26_IOMMU_MCR 0x5d40 +#define ED26_IOMMU_MID 0x5d44 +#define ED26_IOMMU_MAR0_LO 0x5d48 +#define ED26_IOMMU_MAR0_HI 0x5d4c +#define ED26_IOMMU_MAR1_LO 0x5d50 +#define ED26_IOMMU_MAR1_HI 0x5d54 +#define ED27_IOMMU_MCR 0x5dc0 +#define ED27_IOMMU_MID 0x5dc4 +#define ED27_IOMMU_MAR0_LO 0x5dc8 +#define ED27_IOMMU_MAR0_HI 0x5dcc +#define ED27_IOMMU_MAR1_LO 0x5dd0 +#define ED27_IOMMU_MAR1_HI 0x5dd4 +#define ED28_IOMMU_MCR 0x5e40 +#define ED28_IOMMU_MID 0x5e44 +#define ED28_IOMMU_MAR0_LO 0x5e48 +#define ED28_IOMMU_MAR0_HI 0x5e4c +#define ED28_IOMMU_MAR1_LO 0x5e50 +#define ED28_IOMMU_MAR1_HI 0x5e54 +#define ED29_IOMMU_MCR 0x5ec0 +#define ED29_IOMMU_MID 0x5ec4 +#define ED29_IOMMU_MAR0_LO 0x5ec8 +#define ED29_IOMMU_MAR0_HI 0x5ecc +#define ED29_IOMMU_MAR1_LO 0x5ed0 +#define ED29_IOMMU_MAR1_HI 0x5ed4 +#define ED30_IOMMU_MCR 0x5f40 +#define ED30_IOMMU_MID 0x5f44 +#define ED30_IOMMU_MAR0_LO 0x5f48 +#define ED30_IOMMU_MAR0_HI 0x5f4c +#define ED30_IOMMU_MAR1_LO 0x5f50 +#define ED30_IOMMU_MAR1_HI 0x5f54 +#define ED31_IOMMU_MCR 0x5fc0 +#define ED31_IOMMU_MID 0x5fc4 +#define ED31_IOMMU_MAR0_LO 0x5fc8 +#define ED31_IOMMU_MAR0_HI 0x5fcc +#define ED31_IOMMU_MAR1_LO 0x5fd0 +#define ED31_IOMMU_MAR1_HI 0x5fd4 + +/* MC monitors */ +#define MC_CH 0x400 +#define MC_STATUS 0x44c +#define MC_MON_CTL 0x450 +#define MC_MON_CTR0 0x454 +#define MC_MON_CTR1 0x458 +#define MC_MON_CTRext 0x45c + +/* HMU monitors */ +#define HMU_MIC 0xd00 +#define HMU_MCR 0xd14 +#define HMU0_MAR0_LO 0xd44 +#define HMU0_MAR0_HI 0xd48 +#define HMU0_MAR1_LO 0xd4c +#define HMU0_MAR1_HI 0xd50 +#define HMU1_MAR0_LO 0xd74 +#define HMU1_MAR0_HI 0xd78 +#define HMU1_MAR1_LO 0xd7c +#define HMU1_MAR1_HI 0xd80 +#define HMU2_MAR0_LO 0xda4 +#define HMU2_MAR0_HI 0xda8 +#define HMU2_MAR1_LO 0xdac +#define HMU2_MAR1_HI 0xdb0 +#define HMU3_MAR0_LO 0xdd4 +#define HMU3_MAR0_HI 0xdd8 +#define HMU3_MAR1_LO 0xddc +#define HMU3_MAR1_HI 0xde0 + +/* PREPIC monitors */ +#define PREPIC_MCR 0x8070 +#define PREPIC_MID 0x8074 +#define PREPIC_MAR0_LO 0x8080 +#define PREPIC_MAR0_HI 0x8084 +#define PREPIC_MAR1_LO 0x8090 +#define PREPIC_MAR1_HI 0x8094 + +/* + * HC monitor control register (HC_MCR) + */ +typedef union { + struct { + u32 v0 : 1; + u32 __unused1 : 1; + u32 es0 : 6; + u32 v1 : 1; + u32 __unused2 : 1; + u32 es1 : 6; + u32 __unused3 : 16; + }; + u32 word; +} e2k_hc_mcr_t; + +/* + * HC monitor ID register (HC_MID) + */ +typedef union { + struct { + u32 id0 : 16; + u32 id1 : 16; + }; + u32 word; +} e2k_hc_mid_t; + +/* + * IOMMU monitor control register (IOMMU_MCR) + */ +typedef union { + struct { + u32 v0 : 1; + u32 __unused1 : 1; + u32 es0 : 6; + u32 v1 : 1; + u32 __unused2 : 1; + u32 es1 : 6; + u32 __unused3 : 16; + }; + u32 word; +} e2k_iommu_mcr_t; + +/* + * IOMMU monitor ID register (IOMMU_MID) + */ +typedef union { + struct { + u32 id0 : 16; + u32 id1 : 16; + }; + u32 word; +} e2k_iommu_mid_t; + +/* + * MC status register (MC_STATUS) + */ +typedef union { + struct { + u32 ecc_err : 1; + u32 ddrint_err : 1; + u32 phyccm_par_err : 1; + u32 dmem_par_err : 1; + u32 bridge_par_err : 1; + u32 phy_interrupt : 1; + u32 phy_init_complete : 1; + u32 dfi_par_err : 1; + u32 meminit_finish : 1; + u32 mon0_of : 1; + u32 mon1_of : 1; + u32 dfi_err : 1; + u32 dfi_err_info : 1; + u32 par_alert_delay : 6; + u32 rst_done : 1; + u32 wrcrc_aleert_delay : 6; + u32 __unused : 6; + }; + u32 word; +} e2k_mc_status_t; + +/* + * MC channel select register (MC_CH) + */ +typedef union { + struct { + u32 n : 4; + u32 __unused : 28; + }; + u32 word; +} e2k_mc_ch_t; + +/* + * MC monitor control register (MC_MON_CTL) + */ +typedef union { + struct { + u32 rst0 : 1; + u32 rst1 : 1; + u32 frz0 : 1; + u32 frz1 : 1; + u32 ld0 : 1; + u32 ld1 : 1; + u32 es0 : 5; + u32 es1 : 5; + u32 lb0 : 8; + u32 lb1 : 8; + }; + struct { + u32 __pad : 16; + u32 ba0 : 2; + u32 bg0 : 2; + u32 cid0 : 3; + u32 all0 : 1; + u32 ba1 : 2; + u32 bg1 : 2; + u32 cid1 : 3; + u32 all1 : 1; + }; + u32 word; +} e2k_mc_mon_ctl_t; + +/* + * MC monitor #0,1 counter high (MC_MON_CTRext) + */ +typedef union { + u16 cnt[2]; + u32 word; +} e2k_mc_mon_ctrext_t; + + +/* + * HMU memory interleaving control register (HMU_MIC) + */ +typedef union { + struct { + u32 mcil_bit0 : 6; + u32 mcil_bit1 : 6; + u32 mcil_bit2 : 6; + u32 mcil_bit3 : 6; + u32 mcen : 8; + }; + u32 word; +} e2k_hmu_mic_t; + +/* + * HMU monitor control register (HMU_MCR) + */ +typedef union { + struct { + u32 v0 : 1; + u32 __unused1 : 1; + u32 es0 : 6; + u32 v1 : 1; + u32 __unused2 : 1; + u32 es1 : 6; + u32 flt0_off : 1; + u32 flt0_rqid : 7; + u32 flt0_cid : 1; + u32 flt0_bid : 1; + u32 flt0_xid : 1; + u32 flt1_off : 1; + u32 flt1_node : 2; + u32 flt1_rnode : 1; + u32 __unused3 : 1; + }; + u32 word; +} e2k_hmu_mcr_t; + +/* + * PREPIC monitor control register (PREPIC_MCR) + */ +typedef union { + struct { + u32 vc0 : 1; + u32 __unused1 : 1; + u32 es0 : 6; + u32 vc1 : 1; + u32 __unused2 : 1; + u32 es1 : 6; + u32 __unused3 : 16; + }; + u32 word; +} e2k_prepic_mcr_t; + +/* + * PREPIC monitor ID register (PREPIC_MID) + */ +typedef union { + struct { + u32 id0 : 16; + u32 id1 : 16; + }; + u32 word; +} e2k_prepic_mid_t; diff --git a/arch/e2k/include/asm/nmi.h b/arch/e2k/include/asm/nmi.h new file mode 100644 index 0000000..0984dba --- /dev/null +++ b/arch/e2k/include/asm/nmi.h @@ -0,0 +1,95 @@ +#ifndef _ASM_E2K_NMI_H +#define _ASM_E2K_NMI_H + +#include + +/* + * ATTENTION nmi_call_function_xxx() are actually more limited + * than smp_call_function_xxx(). + * + * 1) You cannot use ANY drivers (since they are usually NOT async-safe). + * + * 2) You cannot use printk() (as a consequence of 1). + * + * 3) Function must be fast and non-blocking. + * + * So instead of using printk() it is better to save your message + * into a temporary buffer and later print that buffer from the function + * which called nmi_call_function_xxx(). + */ +extern void nmi_call_function_init(void); +extern void nmi_call_function_interrupt(void); +#ifdef CONFIG_SMP +extern int nmi_call_function(void (*func)(void *), void *info, int wait, + int timeout_msec); +extern int nmi_call_function_mask(const cpumask_t *mask, void (*func)(void *), + void *info, int wait, int timeout_msec); +extern int nmi_call_function_single(int cpu, void (*func)(void *), + void *info, int wait, int timeout_msec); +extern int nmi_call_function_single_offline(int cpu, void (*func)(void *), + void *info, int wait, int timeout_msec); +# define nmi_on_each_cpu(func, info, wait, timeout_msec) \ + ({ \ + unsigned long __flags; \ + WARN_ON_ONCE(raw_nmi_irqs_disabled()); \ + raw_local_irq_save(__flags); \ + nmi_call_function(func, info, wait, timeout_msec); \ + raw_all_irq_disable(); \ + func(info); \ + if (!raw_irqs_disabled_flags(__flags)) \ + trace_hardirqs_on(); \ + raw_all_irq_restore(__flags); \ + 0; \ + }) +#else +static inline int nmi_call_function_mask(const cpumask_t *mask, + void (*func)(void *), void *info, int wait, int timeout_msec) +{ + unsigned long flags; + + if (cpumask_test_cpu(0, mask)) { + raw_all_irq_save(flags); + func(info); + raw_all_irq_restore(flags); + } + + return 0; +} + +static inline int nmi_call_function_single(int cpu, void (*func)(void *), + void *info, int wait, int timeout_msec) +{ + unsigned long flags; + + WARN_ON(cpu != 0); + + raw_all_irq_save(flags); + func(info); + raw_all_irq_restore(flags); + + return 0; +} + +static inline int nmi_call_function_single_offline(int cpu, void (*func)(void *), + void *info, int wait, int timeout_msec) +{ + BUG(); +} + +static inline int up_nmi_call_function(void (*func)(void *), void *info) +{ + return 0; +} +# define nmi_call_function(func, info, wait, timeout) \ + (up_nmi_call_function(func, info)) +# define nmi_on_each_cpu(func, info, wait, timeout) \ + ({ \ + unsigned long __flags; \ + raw_all_irq_save(__flags); \ + func(info); \ + raw_all_irq_restore(__flags); \ + 0; \ + }) +#endif + +#endif /* _ASM_E2K_NMI_H */ diff --git a/arch/e2k/include/asm/numnodes.h b/arch/e2k/include/asm/numnodes.h new file mode 100644 index 0000000..1a06803 --- /dev/null +++ b/arch/e2k/include/asm/numnodes.h @@ -0,0 +1,124 @@ +#ifndef _E2K_NUMNODES_H +#define _E2K_NUMNODES_H + +#include +#include + +#if NODES_SHIFT > 0 + +#ifndef DEBUG_NODES_MODE +#define DEBUG_NODES_MODE 0 /* for each nodes from */ +#endif /* ! DEBUG_NODES_MODE */ +#define DebugN(...) DebugPrintCont(DEBUG_NODES_MODE ,##__VA_ARGS__) + +/* + * The define can be used only for preempt disable mode + * or argument 'from' is not dinamicaly recalculated expression + * as numa_node_id() + */ +#define for_each_node_mask_from_not_preempt(node, from, mask) \ + for ((node) = (from), \ + ({DebugN("for node %d = from %d, mask " \ + "0x%lx\n", node, from, mask.bits[0]); }), \ + ((!node_isset((node), (mask))) ? \ + ({DebugN(" node is not set "); \ + (node) = next_node((node), (mask)) \ + ; DebugN("so node is next %d\n", node);}) \ + : \ + ({DebugN(" node is set\n"); \ + (node);})); \ + ( ({DebugN("while node %d >= from %d ", node, from); \ + ((node) >= (from));}) ? ({DebugN(" ? node %d < " \ + "MAX_NUMNODES %d\n", \ + node, MAX_NUMNODES); \ + ((node) < MAX_NUMNODES);}) \ + : \ + ({DebugN(" : node < from\n"); \ + ((node) < (from));})); \ + (({DebugN("next: node %d >= from %d", node, from); \ + ((node) >= (from));}) ? \ + ({DebugN(" ? "); \ + ((node) = next_node((node), (mask))); \ + DebugN("node = next %d", node); \ + DebugN(" node %d >= MAX_NUMNODES %d ", \ + node, MAX_NUMNODES); \ + (node) >= MAX_NUMNODES;}) ? \ + ({DebugN(" ? "); \ + (({((node) = first_node((mask))); \ + DebugN("node = first %d >= from %d", \ + node, from); \ + (node) >= (from);}) ? \ + ({DebugN(" ? node = MAX_NUMNODES " \ + "%d\n", MAX_NUMNODES); \ + (node) = MAX_NUMNODES;}) \ + : \ + ({ DebugN(" : node %d\n", node); \ + (node);}));}) \ + : \ + ({DebugN(" : node %d\n", node); \ + (node);}) \ + : \ + ({DebugN(" : "); \ + ({((node) = next_node((node), (mask))); \ + DebugN("node = next %d", node); \ + (node) >= (from);}) ? \ + ({DebugN(" ? node = MAX_NUMNODES %d\n", \ + MAX_NUMNODES); \ + (node) = MAX_NUMNODES;}) \ + : \ + ({DebugN(" : node %d\n", node); \ + (node);});}))) +/* + * The define can be used at preempt enable mode, but you should pass + * additional temporary variable to keep 'from' value + */ +#define for_each_node_mask_from_preempt(node, from, mask, tmp_from) \ + tmp_from = (from); \ + for_each_node_mask_from_not_preempt((node), tmp_from, (mask)) +#else /* NODES_SHIFT == 0 */ +#define for_each_node_mask_from_not_preempt(node, from, mask) \ + for ((node) = (from); (node) < 1; (node)++) +#define for_each_node_mask_from_preempt(node, from, mask, tmp_from) \ + for_each_node_mask_from_not_preempt((node), (from), (mask)) +#endif /* NODES_SHIFT > 0 */ + +#define for_each_cpu_of_node(node, cpu, cpu_mask) \ + cpu_mask = node_to_present_cpumask(node); \ + for_each_cpu(cpu, &cpu_mask) + +#define for_each_node_from_not_preempt(node, from) \ + for_each_node_mask_from_not_preempt((node), (from), \ + node_possible_map) +#define for_each_node_from_preempt(node, from, tmp_from) \ + for_each_node_mask_from_preempt((node), (from), \ + node_possible_map, (tmp_from)) +#define for_each_online_node_from_not_preempt(node, from) \ + for_each_node_mask_from_not_preempt((node), (from), \ + node_online_map) +#define for_each_online_node_from_preempt(node, from, tmp_from) \ + for_each_node_mask_from_preempt((node), (from), \ + node_online_map, (tmp_from)) +#ifdef CONFIG_NUMA +#define calculate_node_has_not_dup_kernel_map(node_mask) \ +({ \ + nodemask_t node_present_map; \ + nodes_clear(node_present_map); \ + memcpy(node_present_map.bits, &phys_nodes_map, \ + sizeof(phys_nodes_map)); \ + nodes_andnot((node_mask), node_present_map, \ + node_has_dup_kernel_map); \ +}) +#define node_has_dup_kernel(nid) \ + node_isset((nid), node_has_dup_kernel_map) +#define for_each_node_has_dup_kernel(node) \ + for_each_node_mask((node), node_has_dup_kernel_map) +#define for_each_node_has_not_dup_kernel(node, node_mask) \ + calculate_node_has_not_dup_kernel_map((node_mask)); \ + for_each_node_mask((node), (node_mask)) +#else /* ! CONFIG_NUMA */ +#define node_has_dup_kernel(nid) ((nid) == 0) +#define for_each_node_has_dup_kernel(node) \ + for ((node) = 0; (node) < 1; (node)++) +#endif /* CONFIG_NUMA */ + +#endif /* _E2K_NUMNODES_H */ diff --git a/arch/e2k/include/asm/of_device.h b/arch/e2k/include/asm/of_device.h new file mode 100644 index 0000000..342357a --- /dev/null +++ b/arch/e2k/include/asm/of_device.h @@ -0,0 +1,8 @@ + +#ifndef E2K_OF_DEVICE_H +#define E2K_OF_DEVICE_H + +#include + +#endif + diff --git a/arch/e2k/include/asm/of_platform.h b/arch/e2k/include/asm/of_platform.h new file mode 100644 index 0000000..d0abb27 --- /dev/null +++ b/arch/e2k/include/asm/of_platform.h @@ -0,0 +1,4 @@ + +/* + * It is empty file just because it required to be included + */ diff --git a/arch/e2k/include/asm/openprom.h b/arch/e2k/include/asm/openprom.h new file mode 100644 index 0000000..e7f6988 --- /dev/null +++ b/arch/e2k/include/asm/openprom.h @@ -0,0 +1,47 @@ +/* $Id: openprom.h,v 1.1 2005/12/22 16:14:19 alexmipt Exp $ + * openprom.h: Prom structures and defines for access to the OPENBOOT + * prom routines and data areas. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 2005 Alexander Shmelev (ashmelev@task.sun.mcst.ru) + */ + +#ifndef __OPENPROM_H +#define __OPENPROM_H + +/* Routines for traversing the prom device tree. */ +struct linux_nodeops { + int (*no_nextnode)(int node); + int (*no_child)(int node); + int (*no_proplen)(int node, char *name); + int (*no_getprop)(int node, char *name, char *val); + int (*no_setprop)(int node, char *name, char *val, int len); + char * (*no_nextprop)(int node, char *name); +}; + +/* More fun PROM structures for device probing. */ +#define PROMREG_MAX 16 +#define PROMVADDR_MAX 16 +#define PROMINTR_MAX 15 + +struct linux_prom_registers { + u32 which_io; /* Registers space */ + u32 phys_addr; /* The physical address of this register */ + u32 reg_size; /* How many bytes does this register take up? */ +}; + +/* Element of the "ranges" vector */ +struct linux_prom_ranges { + u32 ot_child_space; + u32 ot_child_base; /* Bus feels this */ + u32 ot_parent_space; + u32 ot_parent_base; /* CPU looks from here */ + u32 or_size; +}; + +struct linux_prom_irqs { + u32 pri; /* IRQ priority */ + u32 vector; /* This is foobar, what does it do? */ +}; + +#endif /* !(__OPENPROM_H) */ diff --git a/arch/e2k/include/asm/oplib.h b/arch/e2k/include/asm/oplib.h new file mode 100644 index 0000000..b9b49c8 --- /dev/null +++ b/arch/e2k/include/asm/oplib.h @@ -0,0 +1,90 @@ +/* $Id: oplib.h,v 1.2 2007/09/05 12:05:52 kostin Exp $ + * oplib.h: Describes the interface and available routines in the + * Linux Prom library. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 2005 Alexander Shmelev (ashmelev@task.sun.mcst.ru) + */ + +#ifndef __OPLIB_H +#define __OPLIB_H + +#include "openprom.h" +#include + +/* Root node of the prom device tree, this stays constant after + * initialization is complete. + */ +extern int prom_root_node; + +/* The functions... */ + +/* PROM device tree traversal functions... */ + +/* Get the child node of the given node, or zero if no child exists. */ +extern int prom_getchild(int parent_node); + +/* Get the next sibling node of the given node, or zero if no further + * siblings exist. + */ +extern int prom_getsibling(int node); + +/* Get the length, at the passed node, of the given property type. + * Returns -1 on error (ie. no such property at this node). + */ +extern int prom_getproplen(int thisnode, const char *property); + +/* Fetch the requested property using the given buffer. Returns + * the number of bytes the prom put into your buffer or -1 on error. + */ +extern int prom_getproperty(int thisnode, const char *property, + char *prop_buffer, int propbuf_size); + +/* Acquire an integer property. */ +extern int prom_getint(int node, char *property); + +/* Acquire an integer property, with a default value. */ +extern int prom_getintdefault(int node, char *property, int defval); + +/* Acquire a boolean property, 0=FALSE 1=TRUE. */ +extern int prom_getbool(int node, char *prop); + +/* Acquire a string property, null string on error. */ +extern void prom_getstring(int node, char *prop, char *buf, int bufsize); + +/* Does the passed node have the given "name"? YES=1 NO=0 */ +extern int prom_nodematch(int thisnode, char *name); + +/* Puts in buffer a prom name in the form name@x,y or name (x for which_io + * and y for first regs phys address + */ +extern int prom_getname(int node, char *buf, int buflen); + +/* Search all siblings starting at the passed node for "name" matching + * the given string. Returns the node on success, zero on failure. + */ +extern int prom_searchsiblings(int node_start, char *name); + +/* Return the first property type, as a string, for the given node. + * Returns a null string on error. + */ +extern char *prom_firstprop(int node, char *buffer); + +/* Returns the next property after the passed property for the given + * node. Returns null string on failure. + */ +extern char *prom_nextprop(int node, char *prev_property, char *buffer); + +/* Returns phandle of the path specified */ +extern int prom_finddevice(char *name); + +/* Returns 1 if the specified node has given property. */ +extern int prom_node_has_property(int node, char *property); + +/* Set the indicated property at the given node with the passed value. + * Returns the number of bytes of your value that the prom took. + */ +extern int prom_setprop(int node, const char *prop_name, char *prop_value, + int value_size); + +#endif /* !(__OPLIB_H) */ diff --git a/arch/e2k/include/asm/ord_rwlock.h b/arch/e2k/include/asm/ord_rwlock.h new file mode 100644 index 0000000..6c31a80 --- /dev/null +++ b/arch/e2k/include/asm/ord_rwlock.h @@ -0,0 +1,403 @@ +/* + * New SMP ordered read/write spinlock mechanism. + * Locking is ordered and later readers cannot outrun former writers. + * Locking order based on coupons (tickets) received while first try to get + * lock, if lock is already taken by other. + * + * read/write spinlocks initial state allowing 2^32 active readers and + * only one active writer. But coupon discipline allows simultaniously + * have only 2^16 registered users of the lock: active + waiters +*/ + + +#ifndef __ASM_ORD_RWLOCK_H +#define __ASM_ORD_RWLOCK_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +#undef DEBUG_RWLOCK_MODE +#undef DebugRW +#define DEBUG_RWLOCK_MODE 0 /* RW spinlocks debugging */ +#define DebugRW(fmt, args...) \ +({ \ + if (DEBUG_RWLOCK_MODE) \ + host_printk("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_SLOW_RWLOCK_MODE +#undef DebugSLRW +#define DEBUG_SLOW_RWLOCK_MODE 0 /* RW spinlocks slow path debugging */ +#define DebugSLRW(fmt, args...) \ +({ \ + if (DEBUG_SLOW_RWLOCK_MODE) \ + host_printk("%s(): " fmt, __func__, ##args); \ +}) + +/* + * Read-write spinlocks, allowing multiple readers but only one writer. + */ + +static inline void +native_ord_wait_read_lock_slow(arch_rwlock_t *rw) +{ + /* waiting always on CPU, so nothing do some more */ +} +static inline void +native_ord_wait_write_lock_slow(arch_rwlock_t *rw) +{ + /* waiting always on CPU, so nothing do some more */ +} +static inline void +native_ord_arch_read_locked_slow(arch_rwlock_t *rw) +{ + /* waiting always on CPU, so nothing do some more */ +} +static inline void +native_ord_arch_write_locked_slow(arch_rwlock_t *rw) +{ + /* waiting always on CPU, so nothing do some more */ +} +static inline void +native_ord_arch_read_unlock_slow(arch_rwlock_t *rw) +{ + /* waiting always on CPU, so nothing do some more */ +} +static inline void +native_ord_arch_write_unlock_slow(arch_rwlock_t *rw) +{ + /* waiting always on CPU, so nothing do some more */ +} + +#if defined(CONFIG_PARAVIRT_GUEST) || defined(CONFIG_KVM_GUEST_KERNEL) +/* it is paravirtualized host and guest kernel */ +/* or native guest kernel */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel */ +/* or native kernel with virtualization support */ + +static inline void +ord_wait_read_lock_slow(arch_rwlock_t *rw) +{ + native_ord_wait_read_lock_slow(rw); +} +static inline void +ord_wait_write_lock_slow(arch_rwlock_t *rw) +{ + native_ord_wait_write_lock_slow(rw); +} +static inline void +ord_arch_read_locked_slow(arch_rwlock_t *rw) +{ + native_ord_arch_read_locked_slow(rw); +} +static inline void +ord_arch_write_locked_slow(arch_rwlock_t *rw) +{ + native_ord_arch_write_locked_slow(rw); +} +static inline void +ord_arch_read_unlock_slow(arch_rwlock_t *rw) +{ + native_ord_arch_read_unlock_slow(rw); +} +static inline void +ord_arch_write_unlock_slow(arch_rwlock_t *rw) +{ + native_ord_arch_write_unlock_slow(rw); +} + +#endif /* CONFIG_PARAVIRT_GUEST || CONFIG_KVM_GUEST_KERNEL */ + +/* + * would read_trylock() succeed? + * @rw: the rwlock in question. + */ +static inline bool +ord_arch_read_can_lock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + bool success; + + rwlock.lock = __api_atomic_can_lock_reader(rw, success); + + DebugRW("source lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (likely(success)) { + DebugRW("lock can be taken\n"); + } else { + DebugRW("lock can not be taken\n"); + } + return success; +} + +/* + * would write_trylock() succeed? + * @rw: the rwlock in question. + */ +static inline bool +ord_arch_write_can_lock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + bool success; + + rwlock.lock = __api_atomic_can_lock_writer(rw, success); + + DebugRW("source lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (likely(success)) { + DebugRW("lock can be taken\n"); + } else { + DebugRW("lock can not be taken\n"); + } + return success; +} + +static inline void +ord_arch_read_lock_slow(arch_rwlock_t *rw, rwlock_val_t coupon) +{ + arch_rwlock_t rwcoupon; + arch_rwlock_t rwlock; + u16 ticket; + bool success; + + rwcoupon.lock = coupon; + ticket = rwcoupon.ticket; + DebugSLRW("coupon value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwcoupon.lock, ticket, rwcoupon.head, rwcoupon.count); + + do { + /* wait for waking up after some unlocking */ + ord_wait_read_lock_slow(rw); + /* try lock again */ + rwlock.lock = __api_atomic_add_slow_reader(rw, ticket, success); + DebugSLRW("current lock value 0x%lx: ticket 0x%x head 0x%x " + "count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (!likely(success)) { + DebugSLRW("lock is not taken again\n"); + } + } while (!success); + DebugSLRW("lock is taken\n"); + if (rwlock.ticket != rwlock.head) { + /* there are other waiters to take the lock */ + /* probably the next on queue is reader and it can */ + /* take lock too */ + DebugSLRW("ticket 0x%x head 0x%x there are other waiters " + "to wake up\n", + rwlock.ticket, rwlock.head); + } + if (rwlock.count < -1) { + /* there is previous active reader and it wake up alredy */ + /* the folowing readers and does not wait for notification */ + /* from activated readers */ + DebugSLRW("count %d there is previous active reader, so do " + "not wake once again, enter to critical immediately\n", + rwlock.count); + return; + } + ord_arch_read_locked_slow(rw); + DebugSLRW("enter to critical section\n"); +} + +static inline void +ord_arch_read_lock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + bool success; + + rwlock.lock = __api_atomic_add_new_reader(rw, success); + + DebugRW("source lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (likely(success)) { + DebugRW("lock is taken\n"); + return; + } + DebugRW("lock is not taken, goto slow path\n"); + + /* slow path to take read spinlock (as mutex) */ + ord_arch_read_lock_slow(rw, rwlock.lock); +} + +static inline bool +ord_arch_read_trylock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + bool success; + + rwlock.lock = __api_atomic_try_add_new_reader(rw, success); + + DebugRW("source lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (likely(success)) { + DebugRW("lock is taken\n"); + } else { + DebugRW("lock is not taken\n"); + } + return success; +} + +static inline void +ord_arch_read_unlock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + u16 ticket, head; + int count; + + rwlock.lock = __api_atomic_free_lock_reader(rw); + + ticket = rwlock.ticket; + head = rwlock.head; + count = rwlock.count; + DebugRW("current lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, ticket, head, count); + if (count < 0) { + DebugRW("there are other %d readers, do not wake up now\n", + -count); + return; + } else if (count != 0) { + pr_err("%s(): not zero readers lock counter %d\n", + __func__, count); + BUG_ON(true); + return; + } + if (ticket == head) { + DebugRW("there are not other waiters, nothing to wake up\n"); + return; + } + DebugSLRW("there are other waiters, wake up now\n"); + + /* slow path to unlock read spinlock */ + /* need wake up other threads waiting for unlocking */ + ord_arch_read_unlock_slow(rw); +} + +static inline void +ord_arch_write_lock_slow(arch_rwlock_t *rw, rwlock_val_t coupon) +{ + arch_rwlock_t rwcoupon; + arch_rwlock_t rwlock; + u16 ticket; + bool success; + + rwcoupon.lock = coupon; + ticket = rwcoupon.ticket; + DebugSLRW("coupon value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwcoupon.lock, ticket, rwcoupon.head, rwcoupon.count); + + do { + /* wait for waking up after some unlocking */ + ord_wait_write_lock_slow(rw); + /* try lock again */ + rwlock.lock = __api_atomic_add_slow_writer(rw, ticket, success); + DebugSLRW("current lock value 0x%lx: ticket 0x%x " + "head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (!likely(success)) { + DebugSLRW("lock is not taken again\n"); + } + } while (!success); + DebugSLRW("lock is taken\n"); + if (rwlock.ticket != rwlock.head) { + /* there are other waiters to take the lock */ + DebugSLRW("there are other waiters to wake up\n"); + } + ord_arch_write_locked_slow(rw); + DebugSLRW("enter to critical section\n"); +} + +static inline void +ord_arch_write_lock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + bool success; + + rwlock.lock = __api_atomic_add_new_writer(rw, success); + + DebugRW("source lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (likely(success)) { + DebugRW("lock is taken\n"); + return; + } + DebugRW("lock is not taken, goto slow path\n"); + + /* slow path to take read spinlock (as mutex) */ + ord_arch_write_lock_slow(rw, rwlock.lock); +} + +static inline bool +ord_arch_write_trylock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + bool success; + + rwlock.lock = __api_atomic_try_add_new_writer(rw, success); + + DebugRW("source lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, rwlock.ticket, rwlock.head, rwlock.count); + if (likely(success)) { + DebugRW("lock is taken\n"); + } else { + DebugRW("lock is not taken\n"); + } + return success; +} + +static inline void +ord_arch_write_unlock(arch_rwlock_t *rw) +{ + arch_rwlock_t rwlock; + u16 ticket, head; + int count; + + rwlock.lock = __api_atomic_free_lock_writer(rw); + + ticket = rwlock.ticket; + head = rwlock.head; + count = rwlock.count; + DebugRW("current lock value 0x%lx: ticket 0x%x head 0x%x count %d\n", + rwlock.lock, ticket, head, count); + if (count != 0) { + pr_err("%s(): not zero writers counters %d\n", + __func__, count); + BUG_ON(true); + return; + } + if (ticket == head) { + DebugRW("ticket 0x%x head 0x%x there are not other waiters, " + "nothing to wake up\n", + ticket, head); + return; + } + DebugSLRW("ticket 0x%x head 0x%x there are other waiters, " + "wake up its now\n", + ticket, head); + + /* slow path to unlock read spinlock */ + /* need wake up other threads waiting for unlocking */ + ord_arch_write_unlock_slow(rw); +} + +#define arch_read_can_lock(rw) ord_arch_read_can_lock(rw) +#define arch_write_can_lock(rw) ord_arch_write_can_lock(rw) +#define arch_read_lock(rw) ord_arch_read_lock(rw) +#define arch_write_lock(rw) ord_arch_write_lock(rw) + +#define arch_read_unlock(rw) ord_arch_read_unlock(rw) +#define arch_write_unlock(rw) ord_arch_write_unlock(rw) +#define arch_read_trylock(rw) ord_arch_read_trylock(rw) +#define arch_write_trylock(rw) ord_arch_write_trylock(rw) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! __ASM_ORD_RWLOCK_H */ diff --git a/arch/e2k/include/asm/ord_rwlock_types.h b/arch/e2k/include/asm/ord_rwlock_types.h new file mode 100644 index 0000000..b974717 --- /dev/null +++ b/arch/e2k/include/asm/ord_rwlock_types.h @@ -0,0 +1,34 @@ +#ifndef __ASM_ORD_RWLOCK_TYPES_H +#define __ASM_ORD_RWLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +#include + +typedef unsigned long rwlock_val_t; +typedef struct { + union { + rwlock_val_t lock; /* as a single whole atomic value */ + struct { /* fields of lock value; */ + /* all or each of fields should be */ + /* updated only in atomic style: */ + u16 head; /* current # of active user of lock */ + u16 ticket; /* last # of potential (active or */ + /* waiting) user of the lock */ + s32 count; /* current counter of active users */ + /* readers is negative value and can */ + /* be from 0, -1, -2, ... max */ + /* negative value */ + /* writers can be only 0 or 1 */ + }; + }; +} arch_rwlock_t; +#define __ARCH_RW_LOCK_UNLOCKED { \ + { \ + .lock = 0 \ + } \ +} + +#endif /* __ASM_ORD_RWLOCK_TYPES_H */ diff --git a/arch/e2k/include/asm/override-lcc-warnings.h b/arch/e2k/include/asm/override-lcc-warnings.h new file mode 100644 index 0000000..d973752 --- /dev/null +++ b/arch/e2k/include/asm/override-lcc-warnings.h @@ -0,0 +1,13 @@ +/* identifier-list parameters may only be used in a function definition */ +#pragma diag_suppress 92 + +#pragma diag_suppress 1717 + +/* in 'goto *expr', expr must have type 'void *' (lcc bug #121409) */ +#pragma diag_suppress 1101 + +/* array of elements containing a flexible array member is nonstandard */ +#pragma diag_suppress 1717 + +/* a reduction in alignment without the 'packed' attribute is ignored */ +#pragma diag_suppress 1160 diff --git a/arch/e2k/include/asm/p2v/boot_bitops.h b/arch/e2k/include/asm/p2v/boot_bitops.h new file mode 100644 index 0000000..6670feb --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_bitops.h @@ -0,0 +1,71 @@ +#ifndef _E2K_P2V_BOOT_BITOPS_H_ +#define _E2K_P2V_BOOT_BITOPS_H_ + +#include + +#include +#include +#include + +#define bitops_get_mask(nr) (1UL << (nr & 63)); + +static inline void boot_set_bit(int nr, volatile void * addr) +{ + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long mask = bitops_get_mask(nr); + __api_atomic_op(mask, m, d, "ord", RELAXED_MB); +} + +static inline void boot_clear_bit(int nr, volatile void * addr) +{ + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long mask = bitops_get_mask(nr); + __api_atomic_op(mask, m, d, "andnd", RELAXED_MB); +} + +static inline void boot_change_bit(int nr, volatile void * addr) +{ + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long mask = bitops_get_mask(nr); + __api_atomic_op(mask, m, d, "xord", RELAXED_MB); +} + +static inline int boot_test_and_set_bit(int nr, volatile void * addr) +{ + long retval; + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long mask = bitops_get_mask(nr); + + retval = __api_atomic_fetch_op(mask, m, d, "ord", STRONG_MB); + + return (retval & mask) != 0; +} + +static inline int boot_test_and_clear_bit(int nr, volatile void * addr) +{ + long retval; + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long mask = bitops_get_mask(nr); + + retval = __api_atomic_fetch_op(mask, m, d, "andnd", STRONG_MB); + + return (retval & mask) != 0; +} + +static inline int boot_test_and_change_bit(int nr, volatile void * addr) +{ + long retval; + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long mask = bitops_get_mask(nr); + + retval = __api_atomic_fetch_op(mask, m, d, "xord", STRONG_MB); + + return (retval & mask) != 0; +} + +static inline int boot_test_bit(int nr, const volatile void *addr) +{ + return (1UL & (((unsigned long *)addr)[nr >> 6] >> (nr & 63))) != 0UL; +} + +#endif /* _E2K_P2V_BOOT_BITOPS_H_ */ diff --git a/arch/e2k/include/asm/p2v/boot_cacheflush.h b/arch/e2k/include/asm/p2v/boot_cacheflush.h new file mode 100644 index 0000000..fa89713 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_cacheflush.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include + +static __always_inline void boot_wait_for_flush_v5_L3(unsigned char *node_nbsr) +{ + l3_ctrl_t l3_ctrl; + + /* waiting for flush completion */ + do { + boot_cpu_relax(); + l3_ctrl.E2K_L3_CTRL_reg = + boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_ctrl); + } while (l3_ctrl.E2K_L3_CTRL_fl != 0); +} + +static __always_inline void boot_wait_for_flush_v4_L3(unsigned char *node_nbsr) +{ + l3_reg_t l3_diag; + + /* waiting for flush completion */ + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b0_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b1_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b2_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b3_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b4_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b5_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b6_diag_dw); + l3_diag = boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_b7_diag_dw); + + __E2K_WAIT_ALL; +} + +static __always_inline void boot_native_flush_L3(int iset_ver, + unsigned char *node_nbsr) +{ + l3_ctrl_t l3_ctrl; + + if (iset_ver < E2K_ISET_V4) + /* cache L3 is absent */ + return; + + /* set bit of L3 control register to flash L3 */ + l3_ctrl.E2K_L3_CTRL_reg = + boot_do_sic_read_node_nbsr_reg(node_nbsr, SIC_l3_ctrl); + l3_ctrl.E2K_L3_CTRL_fl = 1; + boot_do_sic_write_node_nbsr_reg(node_nbsr, SIC_l3_ctrl, + l3_ctrl.E2K_L3_CTRL_reg); + + /* waiting for flush completion */ + if (iset_ver > E2K_ISET_V4) + boot_wait_for_flush_v5_L3(node_nbsr); + else + boot_wait_for_flush_v4_L3(node_nbsr); +} + diff --git a/arch/e2k/include/asm/p2v/boot_console.h b/arch/e2k/include/asm/p2v/boot_console.h new file mode 100644 index 0000000..e462387 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_console.h @@ -0,0 +1,67 @@ +#ifndef _E2K_P2V_BOOT_CONSOLE_H_ +#define _E2K_P2V_BOOT_CONSOLE_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include + +#ifdef CONFIG_SERIAL_BOOT_PRINTK + +# define boot_serial_boot_console_opts \ + boot_get_vo_value(serial_boot_console_opts) +# define boot_opts_entry(opts, member) \ +({ \ + serial_console_opts_t *opts_p = boot_vp_to_pp(opts); \ + typeof(opts_p->member) entry; \ + entry = opts_p->member; \ + ((typeof(opts_p->member))boot_vp_to_pp(entry)); \ +}) +# define boot_opts_func_entry(opts, func) \ +({ \ + serial_console_opts_t *opts_p = boot_vp_to_pp(opts); \ + typeof(opts_p->func) entry; \ + entry = opts_p->func; \ + ((typeof(opts_p->func))boot_func_to_pp(entry)); \ +}) +# define boot_serial_boot_console_opts_entry(entry) \ + boot_opts_entry(boot_serial_boot_console_opts, entry) +# define boot_serial_boot_console_opts_func_entry(func) \ + boot_opts_func_entry(boot_serial_boot_console_opts, func) + +extern unsigned char serial_dump_console_num; +#define boot_serial_boot_console_num boot_get_vo_value(serial_dump_console_num) + +extern void __init_recv boot_setup_serial_console(bool bsp, boot_info_t *); +#endif /* CONFIG_SERIAL_BOOT_PRINTK */ + +# ifdef CONFIG_SERIAL_AM85C30_BOOT_CONSOLE +extern serial_console_opts_t am85c30_serial_boot_console; +# endif + +#ifdef CONFIG_BOOT_PRINTK +extern void do_boot_printk(char const *fmt_v, ...); +extern void boot_vprintk(char const *fmt_v, va_list ap_v); +extern void boot_bug(const char *fmt_v, ...); +extern void boot_warning(const char *fmt_v, ...); + +#else /* !CONFIG_BOOT_PRINTK */ +# define do_boot_printk(...) +# define boot_vprintk(...) +static inline void boot_bug(const char *fmt_v, ...) +{ +} +static inline void boot_warning(const char *fmt_v, ...) +{ +} +#endif /* CONFIG_BOOT_PRINTK */ + +#define boot_printk if (DEBUG_BOOT_MODE) do_boot_printk + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _E2K_P2V_BOOT_CONSOLE_H_ */ diff --git a/arch/e2k/include/asm/p2v/boot_head.h b/arch/e2k/include/asm/p2v/boot_head.h new file mode 100644 index 0000000..76ba796 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_head.h @@ -0,0 +1,321 @@ +/* $Id: boot_head.h,v 1.21 2009/06/29 11:53:53 atic Exp $ + * + * Heading of boot-time initialization. + * + * Copyright (C) 2001 Salavat Guiliazov + */ + +#ifndef _E2K_P2V_BOOT_HEAD_H +#define _E2K_P2V_BOOT_HEAD_H + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +#ifndef CONFIG_SMP +extern unsigned char boot_init_started; /* boot-time initialization */ + /* has been started */ +extern unsigned char _va_support_on; /* virtual addressing support */ + /* has turned on */ +#define boot_boot_init_started boot_get_vo_value(boot_init_started) +#define boot_va_support_on boot_get_vo_value(_va_support_on) +#define va_support_on _va_support_on +#else +extern unsigned char boot_init_started[NR_CPUS]; + /* boot-time initialization */ + /* has been started */ +extern unsigned char _va_support_on[NR_CPUS]; + /* virtual addressing support */ + /* has turned on */ +#define boot_boot_init_started \ + (boot_vp_to_pp((unsigned char *)boot_init_started)) \ + [boot_smp_processor_id()] +#define boot_va_support_on \ + (boot_vp_to_pp((unsigned char *)_va_support_on)) \ + [boot_smp_processor_id()] +#define va_support_on _va_support_on[boot_smp_processor_id()] +#endif /* CONFIG_SMP */ + +extern bootblock_struct_t *bootblock_phys; /* bootblock structure */ + /* physical pointer */ +extern bootblock_struct_t *bootblock_virt; /* bootblock structure */ + /* virtual pointer */ +#define boot_bootblock_phys boot_get_vo_value(bootblock_phys) +#define boot_bootblock_virt boot_get_vo_value(bootblock_virt) + +#ifdef CONFIG_E2K_MACHINE +# define boot_native_machine_id (native_machine_id) +#else +# if defined(CONFIG_ES2) || defined(CONFIG_E2S) || \ + defined(CONFIG_E8C) || defined(CONFIG_E1CP) || \ + defined(CONFIG_E8C2) || defined(CONFIG_E12C) || \ + defined(CONFIG_E16C) || defined(CONFIG_E2C3) +# define boot_native_machine_id (native_machine_id) +# else +# define boot_native_machine_id boot_get_vo_value(native_machine_id) +# endif + +# ifdef CONFIG_NUMA +# define boot_the_node_machine_id(nid) \ + boot_the_node_get_vo_value(nid, machine_id) +# define boot_node_machine_id \ + boot_the_node_machine_id(boot_numa_node_id()) +# endif +#endif + +#define boot_machine (boot_get_vo_value(machine)) + +#ifdef CONFIG_NUMA +#define boot_the_node_machine(nid) \ + ((machdep_t *)boot_the_node_vp_to_pp(nid, &machine)) +#define boot_node_machine(nid) \ + boot_the_node_machine(boot_numa_node_id()) +#else /* ! CONFIG_NUMA */ +#define boot_the_node_machine(nid) \ + ((machdep_t *)boot_vp_to_pp(&machine)) +#define boot_node_machine(nid) \ + boot_the_node_machine(0) +#endif /* CONFIG_NUMA */ + +extern e2k_addr_t start_of_phys_memory; /* start address of physical memory */ +extern e2k_addr_t end_of_phys_memory; /* end address + 1 of physical memory */ +extern e2k_size_t pages_of_phys_memory; /* number of pages of physical memory */ +extern e2k_addr_t kernel_image_size; /* size of full kernel image in the */ + /* memory ("text" + "data" + "bss") */ +#define boot_start_of_phys_memory boot_get_vo_value(start_of_phys_memory) +#define boot_end_of_phys_memory boot_get_vo_value(end_of_phys_memory) +#define boot_pages_of_phys_memory boot_get_vo_value(pages_of_phys_memory) +#define boot_kernel_image_size boot_get_vo_value(kernel_image_size) + +extern int phys_nodes_num; /* total number of online */ + /* nodes */ +extern unsigned long phys_nodes_map; /* map of all online nodes */ +extern int phys_mem_nodes_num; /* number of online nodes */ + /* only with memory */ +extern unsigned long phys_mem_nodes_map; /* map of online nodes */ + /* only with memory */ +#define boot_phys_nodes_num boot_get_vo_value(phys_nodes_num) +#define boot_phys_nodes_map boot_get_vo_value(phys_nodes_map) +#define boot_phys_mem_nodes_num boot_get_vo_value(phys_mem_nodes_num) +#define boot_phys_mem_nodes_map boot_get_vo_value(phys_mem_nodes_map) + +#ifdef CONFIG_NUMA +extern e2k_addr_t node_kernel_phys_base[MAX_NUMNODES]; +#define boot_node_kernel_phys_base(node_id) \ + boot_get_vo_value(node_kernel_phys_base[(node_id)]) +#define boot_kernel_phys_base \ + boot_node_kernel_phys_base(boot_numa_node_id()) +#define init_node_kernel_phys_base(node_id) \ + (node_kernel_phys_base[(node_id)]) +#define BOOT_EARLY_THE_NODE_HAS_DUP_KERNEL(node_id) \ + ((unsigned long)(boot_node_kernel_phys_base(node_id)) != \ + (unsigned long)-1) +#define BOOT_EARLY_NODE_HAS_DUP_KERNEL() \ + BOOT_EARLY_THE_NODE_HAS_DUP_KERNEL(boot_numa_node_id()) + +#define BOOT_TEST_AND_SET_NODE_LOCK(node_lock, node_done) \ +({ \ + int was_done; \ + boot_node_spin_lock((node_lock)); \ + was_done = (node_done); \ + if ((was_done)) { \ + boot_node_spin_unlock((node_lock)); \ + } \ + was_done; \ +}) +#define BOOT_NODE_UNLOCK(node_lock, node_done) \ +({ \ + (node_done) = 1; \ + boot_node_spin_unlock((node_lock)); \ +}) +#else /* ! CONFIG_NUMA */ +extern e2k_addr_t kernel_phys_base; /* physical address of kernel Image */ + /* begining */ +#define BOOT_IS_BSP_ID (boot_smp_processor_id() == 0) +#define boot_kernel_phys_base boot_get_vo_value(kernel_phys_base) +#define BOOT_TEST_AND_SET_NODE_LOCK(node_lock, node_done) (!BOOT_IS_BSP_ID) +#define BOOT_NODE_UNLOCK(node_lock, node_done) +#endif /* CONFIG_NUMA */ + +/* + * MMU Trap Cellar + */ +#ifndef CONFIG_SMP +extern unsigned long kernel_trap_cellar[MMU_TRAP_CELLAR_MAX_SIZE]; + +#define KERNEL_TRAP_CELLAR kernel_trap_cellar + +#define boot_kernel_trap_cellar boot_vp_to_pp((u64 *)kernel_trap_cellar) +#define boot_trap_cellar boot_kernel_trap_cellar +#define BOOT_KERNEL_TRAP_CELLAR boot_kernel_trap_cellar +#else /* CONFIG_SMP */ +extern unsigned long kernel_trap_cellar; + +/* + * Don't use hard_smp_processor_id() here to avoid function call in + * NATIVE_SAVE_TRAP_CELLAR(). + */ +#define KERNEL_TRAP_CELLAR \ + ((&kernel_trap_cellar) + MMU_TRAP_CELLAR_MAX_SIZE * \ + cpu_to_cpuid(raw_smp_processor_id())) + +#define boot_trap_cellar \ + boot_vp_to_pp((u64 *)(&kernel_trap_cellar) + \ + MMU_TRAP_CELLAR_MAX_SIZE * boot_smp_processor_id()) +#define boot_kernel_trap_cellar \ + boot_node_vp_to_pp((u64 *)(&kernel_trap_cellar) + \ + MMU_TRAP_CELLAR_MAX_SIZE * boot_smp_processor_id()) +#define BOOT_KERNEL_TRAP_CELLAR \ + ((&kernel_trap_cellar) + \ + MMU_TRAP_CELLAR_MAX_SIZE * boot_smp_processor_id()) +#endif /* ! CONFIG_SMP */ + +/* + * Native/guest VM indicator + */ +#define BOOT_IS_HV_GM() (boot_machine.gmi) + +/* + * Kernel Compilation units table + */ +extern e2k_cute_t kernel_CUT[MAX_KERNEL_CODES_UNITS]; +#define boot_kernel_CUT boot_node_vp_to_pp((e2k_cute_t *)kernel_CUT) + +/* + * Control process of boot-time initialization. + */ + +extern void boot_native_setup_machine_id(bootblock_struct_t *bootblock); +extern void boot_startup(bool bsp, bootblock_struct_t *bootblock); +extern void boot_native_clear_bss(void); +extern void __init boot_native_check_bootblock(bool bsp, + bootblock_struct_t *bootblock); +extern void boot_setup_iset_features(struct machdep *machine); +extern void boot_common_setup_arch_mmu(struct machdep *machine, + pt_struct_t *pt_struct); +extern void init_native_terminate_boot_init(bool bsp, int cpuid); +extern void init_start_kernel_init(bool bsp, int cpuid); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +#include +#else /* native kernel */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +static inline void boot_setup_machine_id(bootblock_struct_t *bootblock) +{ + boot_native_setup_machine_id(bootblock); +} +static inline void boot_clear_bss(void) +{ + boot_native_clear_bss(); +} +static inline void __init +boot_check_bootblock(bool bsp, bootblock_struct_t *bootblock) +{ + boot_native_check_bootblock(bsp, bootblock); +} + +static inline void init_terminate_boot_init(bool bsp, int cpuid) +{ + init_native_terminate_boot_init(bsp, cpuid); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +/* + * Convert virtual address of kernel item to the consistent physical address, + * while booting process is continued into virtual memory space. + */ + +#ifndef CONFIG_NUMA +#define kernel_va_to_pa(virt_addr) \ + ((e2k_addr_t)(virt_addr) - KERNEL_BASE + kernel_phys_base) +#else /* CONFIG_NUMA */ +#define kernel_va_to_pa(virt_addr) \ + node_kernel_va_to_pa(numa_node_id(), virt_addr) +#endif /* ! CONFIG_NUMA */ + +/* + * Convert virtual address of kernel item to the consistent physical address + * on the given node, while booting process is continued into virtual memory + * space. + */ + +#ifndef CONFIG_NUMA +#define node_kernel_va_to_pa(node_id, virt_addr) \ + ((e2k_addr_t)(virt_addr) - KERNEL_BASE + kernel_phys_base) +#else /* CONFIG_NUMA */ +#define node_kernel_va_to_pa(node_id, virt_addr) \ +({ \ + unsigned long virt_offset = (e2k_addr_t)(virt_addr) - \ + KERNEL_BASE; \ + unsigned long kernel_base; \ + if ((e2k_addr_t)(virt_addr) >= (e2k_addr_t)__node_data_end) { \ + kernel_base = node_kernel_phys_base[BOOT_BS_NODE_ID]; \ + } else if (node_has_dup_kernel(node_id)) { \ + kernel_base = node_kernel_phys_base[node_id]; \ + } else { \ + kernel_base = node_kernel_phys_base[ \ + node_dup_kernel_nid(node_id)]; \ + } \ + kernel_base + virt_offset; \ +}) +#endif /* ! CONFIG_NUMA */ + +#ifdef CONFIG_NUMA +/* + * The next macroses should be used for NUMA mode to convert addresses on + * the current node + */ +static inline void * +boot_node_kernel_va_to_pa(int node_id, void *virt_pnt) +{ + unsigned long node_base; + + node_base = boot_node_kernel_phys_base(node_id); + if (node_base == (unsigned long)-1) { + node_base = boot_node_kernel_phys_base(BOOT_BS_NODE_ID); + } + return boot_kernel_va_to_pa(virt_pnt, node_base); +} +#define boot_the_node_vp_to_pp(node_id, virt_pnt) \ + boot_node_kernel_va_to_pa((node_id), (void *)(virt_pnt)) +#define boot_the_node_get_vo_value(node_id, virt_value_name) \ + *(typeof ( virt_value_name)*) \ + boot_the_node_vp_to_pp((node_id), \ + &(virt_value_name)) +#define boot_the_node_get_vo_name(node_id, virt_value_name) \ + *(typeof ( virt_value_name)*) \ + boot_the_node_vp_to_pp((node_id), \ + &(virt_value_name)) +#define boot_node_vp_to_pp(virt_pnt) \ + boot_the_node_vp_to_pp(boot_numa_node_id(), virt_pnt) +#define boot_node_get_vo_value(virt_value_name) \ + boot_the_node_get_vo_value(boot_numa_node_id(), \ + virt_value_name) +#define boot_node_get_vo_name(virt_value_name) \ + boot_the_node_get_vo_name(boot_numa_node_id(), \ + virt_value_name) +#else /* ! CONFIG_NUMA */ +#define boot_node_vp_to_pp(virt_pnt) boot_vp_to_pp(virt_pnt) +#define boot_node_get_vo_value(virt_value_name) \ + boot_get_vo_value(virt_value_name) +#define boot_node_get_vo_name(virt_value_name) \ + boot_node_get_vo_name(virt_value_name) +#endif /* CONFIG_NUMA */ + +#endif /* !(__ASSEMBLY__) */ + +#endif /* !(_E2K_P2V_BOOT_HEAD_H) */ diff --git a/arch/e2k/include/asm/p2v/boot_init.h b/arch/e2k/include/asm/p2v/boot_init.h new file mode 100644 index 0000000..07320e3 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_init.h @@ -0,0 +1,372 @@ +/* $Id: boot_init.h,v 1.18 2009/06/29 11:53:32 atic Exp $ + * + * Boot-time initialization of Virtual memory support and switch + * from boot execution on physical memory to boot continuation + * on virtual memory + */ + +#ifndef _E2K_P2V_BOOT_INIT_H +#define _E2K_P2V_BOOT_INIT_H + +#include +#include + +#include +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * The next structures describe list of the memory areas used by boot-time + * initialization. The item 'phys' points to physical base address of + * area, when the item 'virt' points to virtual base address of same area. + * All the used memory areas enumerate below. If a some new area will be used, + * then it should be added to the list of already known ones. + */ + +typedef struct mem_area_desc { /* an area descriptor */ + e2k_addr_t phys; /* physical base address area */ + e2k_addr_t virt; /* virtual base address of same area */ + e2k_size_t size; /* bytes size of the area */ + e2k_size_t phys_offset; /* physical offset of the area */ + e2k_size_t virt_offset; /* virtual offset of the area */ +} mem_area_desc_t; + +typedef struct node_mem_area_desc { /* node an area descriptor */ + mem_area_desc_t nodes[L_MAX_MEM_NUMNODES]; +} node_mem_area_desc_t; + +typedef struct bootmem_areas { /* list of all areas */ +#ifndef CONFIG_NUMA + mem_area_desc_t text; /* segment 'text' of kernel */ + mem_area_desc_t data; /* segment 'data' of kernel */ +#else /* CONFIG_NUMA */ + node_mem_area_desc_t text; /* nodes segment 'text' of kernel */ + node_mem_area_desc_t dup_data; /* nodes duplicated 'data' segment */ + node_mem_area_desc_t data; /* node segment 'data' of kernel */ +#endif /* ! CONFIG_NUMA */ +#ifndef CONFIG_SMP + /* + * Boot-time stacks to switch from physical memory to virtual memory + */ + mem_area_desc_t boot_ps; /* procedure stack of kernel */ + mem_area_desc_t boot_pcs; /* procedure chain stack of kernel */ + mem_area_desc_t boot_stack; /* kernel procedure local data stack */ +#else + /* + * Boot-time stacks to switch from physical memory to virtual memory + */ + mem_area_desc_t boot_ps[NR_CPUS]; + mem_area_desc_t boot_pcs[NR_CPUS]; + mem_area_desc_t boot_stack[NR_CPUS]; +#endif /* CONFIG_SMP */ + mem_area_desc_t bootinfo; /* boot-time information from loader */ +#ifdef CONFIG_BLK_DEV_INITRD + mem_area_desc_t initrd; /* initial disk info */ +#endif /* CONFIG_BLK_DEV_INITRD */ + +#ifdef CONFIG_L_IO_APIC + mem_area_desc_t mpf; /* MP floating table */ + mem_area_desc_t mpc; /* MP configuration table */ +#endif /* CONFIG_L_IO_APIC */ + mem_area_desc_t symtab; /* kernel symbols table */ + mem_area_desc_t strtab; /* kernel strings table */ + mem_area_desc_t x86_hw; /* PA 640K - 1M are reserved for PC's */ + /* integrated hardware: BIOS, VGA,... */ +} bootmem_areas_t; + +extern unsigned long totalram_real_pages; +#define boot_totalram_real_pages boot_get_vo_value(totalram_real_pages) + +extern bootmem_areas_t kernel_bootmem; +#define boot_kernel_bootmem boot_vp_to_pp(&kernel_bootmem) + +#ifndef CONFIG_NUMA +#define boot_text_phys_base boot_get_vo_value(kernel_bootmem.text.phys) +#define boot_text_virt_base boot_get_vo_value(kernel_bootmem.text.virt) +#define boot_text_size boot_get_vo_value(kernel_bootmem.text.size) + +#define boot_data_phys_base boot_get_vo_value(kernel_bootmem.data.phys) +#define boot_data_virt_base boot_get_vo_value(kernel_bootmem.data.virt) +#define boot_data_size boot_get_vo_value(kernel_bootmem.data.size) +#else /* CONFIG_NUMA */ +#define boot_node_text_phys_base(nid) \ + boot_get_vo_value(kernel_bootmem.text.nodes[(nid)].phys) +#define boot_node_text_virt_base(nid) \ + boot_get_vo_value(kernel_bootmem.text.nodes[(nid)].virt) +#define boot_node_text_size(nid) \ + boot_get_vo_value(kernel_bootmem.text.nodes[(nid)].size) + +#define boot_node_dup_data_phys_base(nid) \ + boot_get_vo_value(kernel_bootmem.dup_data.nodes[(nid)].phys) +#define boot_node_dup_data_virt_base(nid) \ + boot_get_vo_value(kernel_bootmem.dup_data.nodes[(nid)].virt) +#define boot_node_dup_data_size(nid) \ + boot_get_vo_value(kernel_bootmem.dup_data.nodes[(nid)].size) +#define boot_node_data_phys_base(nid) \ + boot_get_vo_value(kernel_bootmem.data.nodes[(nid)].phys) +#define boot_node_data_virt_base(nid) \ + boot_get_vo_value(kernel_bootmem.data.nodes[(nid)].virt) +#define boot_node_data_size(nid) \ + boot_get_vo_value(kernel_bootmem.data.nodes[(nid)].size) + +#define boot_text_phys_base boot_node_text_phys_base(boot_numa_node_id()) +#define boot_text_virt_base boot_node_text_virt_base(boot_numa_node_id()) +#define boot_text_size boot_node_text_size(boot_numa_node_id()) + +#define boot_dup_data_phys_base \ + boot_node_dup_data_phys_base(boot_numa_node_id()) +#define boot_dup_data_virt_base \ + boot_node_dup_data_virt_base(boot_numa_node_id()) +#define boot_dup_data_size \ + boot_node_dup_data_size(boot_numa_node_id()) +#define boot_data_phys_base boot_node_data_phys_base(boot_numa_node_id()) +#define boot_data_virt_base boot_node_data_virt_base(boot_numa_node_id()) +#define boot_data_size boot_node_data_size(boot_numa_node_id()) +#endif /* ! CONFIG_NUMA */ + +#ifndef CONFIG_SMP +#define boot_boot_ps_phys_base boot_get_vo_value(kernel_bootmem.boot_ps.phys) +#define boot_boot_ps_virt_base boot_get_vo_value(kernel_bootmem.boot_ps.virt) +#define boot_boot_ps_size boot_get_vo_value(kernel_bootmem.boot_ps.size) +#define kernel_boot_ps_phys_base(cpuid) kernel_bootmem.boot_ps.phys +#define kernel_boot_ps_virt_base(cpuid) kernel_bootmem.boot_ps.virt +#define kernel_boot_ps_size(cpuid) kernel_bootmem.boot_ps.size +#else +#define boot_boot_ps_phys_base \ + boot_get_vo_value(kernel_bootmem.boot_ps[boot_smp_processor_id()].phys) +#define boot_boot_ps_virt_base \ + boot_get_vo_value(kernel_bootmem.boot_ps[boot_smp_processor_id()].virt) +#define boot_boot_ps_size \ + boot_get_vo_value(kernel_bootmem.boot_ps[boot_smp_processor_id()].size) +#define kernel_boot_ps_phys_base(cpuid) kernel_bootmem.boot_ps[cpuid].phys +#define kernel_boot_ps_virt_base(cpuid) kernel_bootmem.boot_ps[cpuid].virt +#define kernel_boot_ps_size(cpuid) kernel_bootmem.boot_ps[cpuid].size +#endif /* CONFIG_SMP */ + +#ifndef CONFIG_SMP +#define boot_boot_pcs_phys_base boot_get_vo_value(kernel_bootmem.boot_pcs.phys) +#define boot_boot_pcs_virt_base boot_get_vo_value(kernel_bootmem.boot_pcs.virt) +#define boot_boot_pcs_size boot_get_vo_value(kernel_bootmem.boot_pcs.size) +#define kernel_boot_pcs_phys_base(cpuid) kernel_bootmem.boot_pcs.phys +#define kernel_boot_pcs_virt_base(cpuid) kernel_bootmem.boot_pcs.virt +#define kernel_boot_pcs_size(cpuid) kernel_bootmem.boot_pcs.size +#else +#define boot_boot_pcs_phys_base \ + boot_get_vo_value(kernel_bootmem.boot_pcs[boot_smp_processor_id()].phys) +#define boot_boot_pcs_virt_base \ + boot_get_vo_value(kernel_bootmem.boot_pcs[boot_smp_processor_id()].virt) +#define boot_boot_pcs_size \ + boot_get_vo_value(kernel_bootmem.boot_pcs[boot_smp_processor_id()].size) +#define kernel_boot_pcs_phys_base(cpuid) \ + kernel_bootmem.boot_pcs[cpuid].phys +#define kernel_boot_pcs_virt_base(cpuid) \ + kernel_bootmem.boot_pcs[cpuid].virt +#define kernel_boot_pcs_size(cpuid) \ + kernel_bootmem.boot_pcs[cpuid].size +#endif /* CONFIG_SMP */ + +#ifndef CONFIG_SMP +#define boot_boot_stack_phys_base \ + boot_get_vo_value(kernel_bootmem.boot_stack.phys) +#define boot_boot_stack_virt_base \ + boot_get_vo_value(kernel_bootmem.boot_stack.virt) +#define boot_boot_stack_size \ + boot_get_vo_value(kernel_bootmem.boot_stack.size) +#define boot_boot_stack_phys_offset \ + boot_get_vo_value(kernel_bootmem.boot_stack.phys_offset) +#define boot_boot_stack_virt_offset \ + boot_get_vo_value(kernel_bootmem.boot_stack.virt_offset) + +#define kernel_boot_stack_phys_base(cpuid) kernel_bootmem.boot_stack.phys +#define kernel_boot_stack_virt_base(cpuid) kernel_bootmem.boot_stack.virt +#define kernel_boot_stack_virt_offset(cpuid) \ + kernel_bootmem.boot_stack.virt_offset +#define kernel_boot_stack_size(cpuid) kernel_bootmem.boot_stack.size +#else +#define boot_boot_stack_phys_base \ + boot_get_vo_value(kernel_bootmem.boot_stack[boot_smp_processor_id()]. \ + phys) +#define boot_boot_stack_virt_base \ + boot_get_vo_value(kernel_bootmem.boot_stack[boot_smp_processor_id()]. \ + virt) +#define boot_boot_stack_size \ + boot_get_vo_value(kernel_bootmem.boot_stack[boot_smp_processor_id()]. \ + size) +#define boot_boot_stack_phys_offset \ + boot_get_vo_value(kernel_bootmem.boot_stack[boot_smp_processor_id()]. \ + phys_offset) +#define boot_boot_stack_virt_offset \ + boot_get_vo_value(kernel_bootmem.boot_stack[boot_smp_processor_id()]. \ + virt_offset) +#define kernel_boot_stack_phys_base(cpuid) \ + kernel_bootmem.boot_stack[cpuid].phys +#define kernel_boot_stack_virt_base(cpuid) \ + kernel_bootmem.boot_stack[cpuid].virt +#define kernel_boot_stack_virt_offset(cpuid) \ + kernel_bootmem.boot_stack[cpuid].virt_offset +#define kernel_boot_stack_size(cpuid) \ + kernel_bootmem.boot_stack[cpuid].size +#endif /* CONFIG_SMP */ + +#define boot_bootinfo_phys_base boot_get_vo_value(kernel_bootmem.bootinfo.phys) +#define boot_bootinfo_virt_base boot_get_vo_value(kernel_bootmem.bootinfo.virt) +#define boot_bootinfo_size boot_get_vo_value(kernel_bootmem.bootinfo.size) + +#define init_bootinfo_phys_base kernel_bootmem.bootinfo.phys +#define init_bootinfo_virt_base kernel_bootmem.bootinfo.virt +#define init_bootinfo_size kernel_bootmem.bootinfo.size + +#ifdef CONFIG_BLK_DEV_INITRD +#define boot_initrd_phys_base boot_get_vo_value(kernel_bootmem.initrd.phys) +#define boot_initrd_virt_base boot_get_vo_value(kernel_bootmem.initrd.virt) +#define boot_initrd_size boot_get_vo_value(kernel_bootmem.initrd.size) + +#define init_initrd_phys_base kernel_bootmem.initrd.phys +#define init_initrd_virt_base kernel_bootmem.initrd.virt +#define init_initrd_size kernel_bootmem.initrd.size +#endif /* CONFIG_BLK_DEV_INITRD */ + +#ifdef CONFIG_L_IO_APIC +#define boot_mpf_phys_base boot_get_vo_value(kernel_bootmem.mpf.phys) +#define boot_mpf_virt_base boot_get_vo_value(kernel_bootmem.mpf.virt) +#define boot_mpf_size boot_get_vo_value(kernel_bootmem.mpf.size) + +#define init_mpf_phys_base kernel_bootmem.mpf.phys +#define init_mpf_virt_base kernel_bootmem.mpf.virt +#define init_mpf_size kernel_bootmem.mpf.size + +#define boot_mpc_phys_base boot_get_vo_value(kernel_bootmem.mpc.phys) +#define boot_mpc_virt_base boot_get_vo_value(kernel_bootmem.mpc.virt) +#define boot_mpc_size boot_get_vo_value(kernel_bootmem.mpc.size) + +#define init_mpc_phys_base kernel_bootmem.mpc.phys +#define init_mpc_virt_base kernel_bootmem.mpc.virt +#define init_mpc_size kernel_bootmem.mpc.size +#endif /* CONFIG_L_IO_APIC */ + +#define boot_symtab_phys_base boot_get_vo_value(kernel_bootmem.symtab.phys) +#define boot_symtab_virt_base boot_get_vo_value(kernel_bootmem.symtab.virt) +#define boot_symtab_size boot_get_vo_value(kernel_bootmem.symtab.size) + +#define init_symtab_phys_base kernel_bootmem.symtab.phys +#define init_symtab_virt_base kernel_bootmem.symtab.virt +#define init_symtab_size kernel_bootmem.symtab.size + +#define boot_strtab_phys_base boot_get_vo_value(kernel_bootmem.strtab.phys) +#define boot_strtab_virt_base boot_get_vo_value(kernel_bootmem.strtab.virt) +#define boot_strtab_size boot_get_vo_value(kernel_bootmem.strtab.size) + +#define init_strtab_phys_base kernel_bootmem.strtab.phys +#define init_strtab_virt_base kernel_bootmem.strtab.virt +#define init_strtab_size kernel_bootmem.strtab.size + +#define boot_x86_hw_phys_base boot_get_vo_value(kernel_bootmem.x86_hw.phys) +#define boot_x86_hw_size boot_get_vo_value(kernel_bootmem.x86_hw.size) + +#define init_x86_hw_phys_base kernel_bootmem.x86_hw.phys +#define init_x86_hw_size kernel_bootmem.x86_hw.size + +extern unsigned long disable_caches; +extern unsigned long disable_secondary_caches; +extern unsigned long disable_IP; + +#ifdef CONFIG_NUMA +extern boot_spinlock_t __initdata boot_node_map_lock[MAX_NUMNODES]; +extern int __initdata node_mem_mapped[MAX_NUMNODES]; +#define boot_node_mem_mapped \ + boot_get_vo_value(node_mem_mapped[boot_numa_node_id()]) +#else /* ! CONFIG_NUMA */ +#define boot_node_map_lock SPIN_LOCK_UNLOCKED; +#define boot_node_mem_mapped 0 +#endif /* CONFIG_NUMA */ + +/* + * Forwards of functions of Virtual memory support initialization + */ + +extern void boot_mem_init(bool bsp, int cpuid, boot_info_t *boot_info, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus)); +extern int boot_native_loader_probe_memory( + node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock); +extern int boot_biosx86_probe_memory( + node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock); +extern e2k_size_t boot_native_get_bootblock_size(boot_info_t *bblock); +extern void boot_native_reserve_all_bootmem(bool bsp, boot_info_t *boot_info); +extern void boot_reserve_stacks(boot_info_t *boot_info); +extern void boot_reserve_kernel_image(bool bsp, boot_info_t *boot_info); +extern void boot_reserve_bootblock(bool bsp, boot_info_t *boot_info); +extern void boot_native_map_all_bootmem(bool bsp, boot_info_t *boot_info); +extern void boot_map_kernel_image(bool populate_on_host); +extern void boot_map_kernel_boot_stacks(void); +extern void boot_map_all_phys_memory(void); +extern void boot_map_all_bootinfo_areas(boot_info_t *boot_info); +extern void init_mem_term(int cpuid); +extern void boot_native_map_needful_to_equal_virt_area( + e2k_addr_t stack_top_addr); +extern void boot_native_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus)); +extern void __init_recv switch_to_phys(void (*restart_sequel_func)(int)); +extern void __init_recv switch_to_phys_end(void); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +#include +#else /* native kernel */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +static inline int __init +boot_loader_probe_memory(node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock) +{ + return boot_native_loader_probe_memory(nodes_phys_mem, bootblock); +} + +static inline e2k_size_t __init +boot_get_bootblock_size(boot_info_t *bootblock) +{ + return boot_native_get_bootblock_size(bootblock); +} + +static inline void __init +boot_reserve_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + boot_native_reserve_all_bootmem(bsp, boot_info); +} + +static inline void __init +boot_map_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + boot_native_map_all_bootmem(bsp, boot_info); +} + +static inline void __init_recv +boot_map_needful_to_equal_virt_area(e2k_addr_t stack_top_addr) +{ + boot_native_map_needful_to_equal_virt_area(stack_top_addr); +} + +static inline void __init_recv +boot_kernel_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus_to_sync)) +{ + boot_native_switch_to_virt(bsp, cpuid, boot_init_sequel_func); +} + +/* pv_ops does not used in native host/guest mode */ +static inline void native_pv_ops_to_boot_ops(void) +{ +} +static inline void native_boot_pv_ops_to_ops(void) +{ +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* !(__ASSEMBLY__) */ +#endif /* _E2K_P2V_BOOT_INIT_H */ diff --git a/arch/e2k/include/asm/p2v/boot_map.h b/arch/e2k/include/asm/p2v/boot_map.h new file mode 100644 index 0000000..174715c --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_map.h @@ -0,0 +1,95 @@ +/* $Id: boot_map.h,v 1.5 2008/12/19 12:57:15 atic Exp $ + * + * boot-time mappings physical memory areas to viertual kernel space. + */ +#ifndef _E2K_P2V_BOOT_MAP_H +#define _E2K_P2V_BOOT_MAP_H + +#include +#include +#include +#include +#include +#include + +/* + * Structures to simulate TLB contents + */ + +typedef struct e2k_tlb_item { /* an item of TLB */ + e2k_addr_t virt_addr; /* virtual address - tag */ + int valid_bit; /* the item valid flag */ + int pt_level_id; /* page table level # to calculate */ + /* page size */ +} e2k_tlb_item_t; +typedef struct e2k_tlb_line { /* a line of TLB */ + e2k_tlb_item_t sets[NATIVE_TLB_SETS_NUM]; + int sets_num; /* number of valid entries in */ + /* the line */ +} e2k_tlb_line_t; +typedef struct e2k_tlb { /* all TLB */ + e2k_tlb_line_t lines[NATIVE_MAX_TLB_LINES_NUM]; + int entries_num; /* number of valid entries in */ + /* the TLB */ +} e2k_tlb_t; + +#ifndef CONFIG_SMP +extern e2k_tlb_t dtlb_contents; +extern e2k_tlb_t itlb_contents; +#define boot_dtlb_contents boot_vp_to_pp(&dtlb_contents) +#define boot_itlb_contents boot_vp_to_pp(&itlb_contents) +#else +extern e2k_tlb_t dtlb_contents[NR_CPUS]; +extern e2k_tlb_t itlb_contents[NR_CPUS]; +#define boot_dtlb_contents \ + boot_vp_to_pp(&dtlb_contents[boot_smp_processor_id()]) +#define boot_itlb_contents \ + boot_vp_to_pp(&itlb_contents[boot_smp_processor_id()]) +#endif /* CONFIG_SMP */ + +#define DTLB_ACCESS_MASK 0x01 +#define ITLB_ACCESS_MASK 0x02 +#define ALL_TLB_ACCESS_MASK (DTLB_ACCESS_MASK | ITLB_ACCESS_MASK) + +/* + * Forwards of boot-time functions to map physical areas to kernel virtual space + */ + +extern void boot_init_mapping(void); +#ifdef CONFIG_NUMA +extern void boot_node_init_mapping(void); +#endif /* CONFIG_NUMA */ + +/* Page Tables common structure interface's functions */ +extern pte_t * __init_recv +boot_get_double_huge_pte(e2k_addr_t addr, pgprot_t *ptp); +extern pte_t * __init_recv +boot_get_common_huge_pte(e2k_addr_t addr, pgprot_t *ptp); +extern void __init_recv +boot_set_double_pte(e2k_addr_t addr, pte_t *ptep, pte_t pte, bool host_map); +extern void __init_recv +boot_set_common_pte(e2k_addr_t addr, pte_t *ptep, pte_t pte, bool host_map); +extern pte_t * __init_recv +init_get_double_huge_pte(e2k_addr_t addr, pgprot_t *ptp); +extern pte_t * __init_recv +init_get_common_huge_pte(e2k_addr_t addr, pgprot_t *ptp); +extern void __init_recv +init_double_pte_clear(pte_t *ptep); +extern void __init_recv +init_common_pte_clear(pte_t *ptep); + +extern long boot_map_phys_area(e2k_addr_t phys_area_addr, + e2k_size_t phys_area_size, e2k_addr_t area_virt_addr, + pgprot_t prot_flags, e2k_size_t page_size, + bool ignore_busy, bool host_map); +extern long boot_do_map_phys_area(e2k_addr_t phys_area_addr, + e2k_size_t phys_area_size, e2k_addr_t area_virt_addr, + pgprot_t prot_flags, const pt_level_t *pt_level, + bool ignore_busy, bool host_map); +extern int boot_map_to_equal_virt_area(e2k_addr_t area_addr, + e2k_size_t area_size, + pgprot_t prot_flags, tlb_tag_t tlb_prot_flags, + e2k_size_t max_page_size, int tlb_mask, int va); +extern int init_clear_temporary_ptes(int tlb_mask, int cpuid); + +#endif /* _E2K_P2V_BOOT_MAP_H */ diff --git a/arch/e2k/include/asm/p2v/boot_mmu_context.h b/arch/e2k/include/asm/p2v/boot_mmu_context.h new file mode 100644 index 0000000..0c22b12 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_mmu_context.h @@ -0,0 +1,91 @@ +/* + * boot-time mmu_context.h support + */ + +#ifndef _E2K_P2V_BOOT_MMU_CONTEXT_H_ +#define _E2K_P2V_BOOT_MMU_CONTEXT_H_ + +#include + +#include +#include +#include +#include + +/* + * Set kernel MMU state + */ + +static inline void boot_native_set_kernel_MMU_state_before(void) +{ + e2k_addr_t root_base = MMU_KERNEL_PPTB; + + E2K_WAIT_ALL; + + BOOT_WRITE_OSCUIR_REG_VALUE(0); + BOOT_WRITE_OSCUTD_REG_VALUE((unsigned long) boot_kernel_CUT); + + if (MMU_IS_SEPARATE_PT()) { + e2k_core_mode_t core_mode; + +#ifndef CONFIG_PARAVIRT_GUEST + BUILD_BUG_ON(MMU_SEPARATE_KERNEL_VAB != PAGE_OFFSET); +#endif /* ! CONFIG_PARAVIRT_GUEST */ + BOOT_WRITE_MMU_OS_VPTB(MMU_SEPARATE_KERNEL_VPTB); + BOOT_WRITE_MMU_OS_PPTB(root_base); + BOOT_WRITE_MMU_CONT(MMU_KERNEL_CONTEXT); + /* set user PT to kernel PT too/ as initial state */ + BOOT_WRITE_MMU_U_VPTB(MMU_SEPARATE_USER_VPTB); + BOOT_WRITE_MMU_U_PPTB(root_base); + + /* + * How to enable separate virt spaces: + * 1) On phys. memory set OS_VAB = 0 + * 2) Set CORE_MODE.sep_virt_space = 1 + * 3) Enable virtual memory in MMU_CR + * 4) Jump out from short address by calling any function + * by its absolute virtual address + * 5) Set proper virtual OS_VAB + */ + BOOT_WRITE_MMU_OS_VAB(0UL); + core_mode.CORE_MODE_reg = BOOT_READ_CORE_MODE_REG_VALUE(); + core_mode.CORE_MODE_sep_virt_space = 1; + BOOT_WRITE_CORE_MODE_REG_VALUE(core_mode.CORE_MODE_reg); + } else { + BOOT_WRITE_MMU_U_VPTB(MMU_UNITED_KERNEL_VPTB); + BOOT_WRITE_MMU_U_PPTB(root_base); + BOOT_WRITE_MMU_CONT(MMU_KERNEL_CONTEXT); + } + E2K_WAIT_ALL; +} + +static inline void boot_native_set_kernel_MMU_state_after(void) +{ + E2K_WAIT_ALL; + BOOT_WRITE_OSCUTD_REG_VALUE((unsigned long) kernel_CUT); + if (MMU_IS_SEPARATE_PT()) { + BOOT_WRITE_MMU_OS_VAB(MMU_SEPARATE_KERNEL_VAB); + } + E2K_WAIT_ALL; +} + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* it is native host kernel with virtualization support */ + +static inline void boot_set_kernel_MMU_state_before(void) +{ + boot_native_set_kernel_MMU_state_before(); +} + +static inline void boot_set_kernel_MMU_state_after(void) +{ + boot_native_set_kernel_MMU_state_after(); +} + +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_P2V_BOOT_MMU_CONTEXT_H_ */ diff --git a/arch/e2k/include/asm/p2v/boot_param.h b/arch/e2k/include/asm/p2v/boot_param.h new file mode 100644 index 0000000..c56be02 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_param.h @@ -0,0 +1,107 @@ +/* + * Boot-time command line parsing. + * + * Copyright (C) 2011-2013 Pavel V. Panteleev (panteleev_p@mcst.ru) + */ + +#ifndef __E2K_P2V_BOOT_PARAM_H +#define __E2K_P2V_BOOT_PARAM_H + +#include + +#include + +#define _boot_ctype (boot_vp_to_pp((unsigned char *)_ctype)) +#define __boot_ismask(x) (_boot_ctype[(int)(unsigned char)(x)]) + +#define boot_isalnum(c) ((__boot_ismask(c)&(_U|_L|_D)) != 0) +#define boot_isalpha(c) ((__boot_ismask(c)&(_U|_L)) != 0) +#define boot_iscntrl(c) ((__boot_ismask(c)&(_C)) != 0) +#define boot_isdigit(c) ((__boot_ismask(c)&(_D)) != 0) +#define boot_isgraph(c) ((__boot_ismask(c)&(_P|_U|_L|_D)) != 0) +#define boot_islower(c) ((__boot_ismask(c)&(_L)) != 0) +#define boot_isprint(c) ((__boot_ismask(c)&(_P|_U|_L|_D|_SP)) != 0) +#define boot_ispunct(c) ((__boot_ismask(c)&(_P)) != 0) +/* Note: isspace() must return false for %NUL-terminator */ +#define boot_isspace(c) ((__boot_ismask(c)&(_S)) != 0) +#define boot_isupper(c) ((__boot_ismask(c)&(_U)) != 0) +#define boot_isxdigit(c) ((__boot_ismask(c)&(_D|_X)) != 0) + +/* Works only for digits and letters, but small and fast */ +#define BOOT_TOLOWER(x) ((x) | 0x20) + +/* + * Example of usage: + * + * int test = 0; + * ..... + * int boot_test(char *str) + * { + * boot_get_option(&str, boot_vp_to_pp(&test)); + * return 0; + * } + * + * boot_param("test", boot_test); + * ..... + * Function 'boot_test' would be called in case of kernel command line + * contains parameter 'test'. Input argument 'str' would point to the + * value of 'test' parameter. + */ + +typedef struct boot_kernel_param { + const char *str; + int (*setup_func)(char *); +} boot_kernel_param_t; + +extern boot_kernel_param_t __boot_setup_start[], __boot_setup_end[]; + +/* + * Only for really core code. See moduleparam.h for the normal way. + * + * Force the alignment so the compiler doesn't space elements of the + * boot_kernel_param "array" too far apart in .boot.setup. + */ +#define __boot_setup_param(str, unique_id, fn) \ + static const char __boot_setup_str_##unique_id[] __initconst \ + __aligned(1) = str; \ + static struct boot_kernel_param __boot_setup_##unique_id \ + __used __section(.boot.setup) \ + __attribute__((aligned((sizeof(long))))) \ + = { __boot_setup_str_##unique_id, fn } + +#define boot_param(str, fn) \ + __boot_setup_param(str, fn, fn) + +char* boot_skip_spaces(const char *str); +int boot_get_option(char **str, int *pint); +long long boot_simple_strtoll(const char *cp, char **endp, unsigned int base); +long boot_simple_strtol(const char *cp, char **endp, unsigned int base); +unsigned long boot_simple_strtoul( + const char *cp, char **endp, unsigned int base); +unsigned long long boot_simple_strtoull( + const char *cp, char **endp, unsigned int base); +void boot_native_parse_param(bootblock_struct_t *bootblock); + +struct kernel_param; + +extern char saved_boot_cmdline[]; +#define boot_saved_boot_cmdline \ + boot_vp_to_pp((char *)saved_boot_cmdline) + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +#include +#else /* native kernel */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +static inline void +boot_parse_param(bootblock_struct_t *bootblock) +{ + boot_native_parse_param(bootblock); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __E2K_P2V_BOOT_PARAM_H */ diff --git a/arch/e2k/include/asm/p2v/boot_phys.h b/arch/e2k/include/asm/p2v/boot_phys.h new file mode 100644 index 0000000..8019225 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_phys.h @@ -0,0 +1,275 @@ +/* $Id: boot_phys.h,v 1.5 2009/06/29 11:53:06 atic Exp $ + * + * Simple boot-time physical memory accounting and allocator. + * Discontiguous memory supports on physical memory banks level. + */ + +#ifndef _E2K_P2V_BOOT_PHYS_H +#define _E2K_P2V_BOOT_PHYS_H + +#include +#include +#include + +#include +#include +#include + +/* + * The structure 'boot_phys_bank_t' is the same as common kernel structure + * 'e2k_phys_bank_t' (see 'page.h' header). This structure is physical memory + * bank specifier and is used to hold the boot-time physical memory + * configuration of the machine. + * The array 'boot_phys_banks[]' contains base addresses and sizes of all + * physical memory banks. + * To reduce the boot-time map size, the boot map represents only needed + * to boot tasks first 'BOOT_MAX_PHYS_MEM_SIZE' bytes of real physical memory + * configuration. Creation of full physical memory map can be completed later, + * when virtual memory support will be ready. + */ + +typedef e2k_mem_map_t boot_mem_map_t; /* The same as common map */ + /* item : double-word */ + /* (64 bits == 64 pages) */ +typedef e2k_phys_bank_t boot_phys_bank_t; /* the same as common */ + /* memory bank structure */ +typedef node_phys_mem_t boot_phys_mem_t; /* The same as common */ + /* structure */ + +#define boot_phys_mem nodes_phys_mem /* The same as common banks */ + /* array */ + +#ifndef CONFIG_NUMA +#define BOOT_MAX_CPU_PHYS_MEM_SIZE (16UL * (1024 * 1024)) /* 16 Mbytes */ +/* some memory reserved by BIOS */ +#define BOOT_MAX_BIOS_PHYS_MEM_SIZE (16UL * (1024 * 1024)) /* 16 Mbytes */ + +#ifndef CONFIG_RECOVERY +#define BOOT_MAX_PHYS_MEM_SIZE (BOOT_MAX_CPU_PHYS_MEM_SIZE * NR_CPUS) +#else /* CONFIG_RECOVERY */ +#define BOOT_MAX_PHYS_MEM_SIZE (BOOT_MAX_CPU_PHYS_MEM_SIZE * NR_CPUS + \ + BOOT_MAX_BIOS_PHYS_MEM_SIZE) +#endif /* ! CONFIG_RECOVERY */ + +#else /* CONFIG_NUMA */ + +#define BOOT_MAX_CPU_PHYS_MEM_SIZE (16 * (1024 * 1024)) /* 16 Mbytes */ +/* some memory reserved by BIOS */ +#define BOOT_MAX_BIOS_PHYS_MEM_SIZE (16 * (1024 * 1024)) /* 16 Mbytes */ + +#ifndef CONFIG_RECOVERY +#define BOOT_MAX_NODE_MEM_SIZE (BOOT_MAX_CPU_PHYS_MEM_SIZE * MAX_NODE_CPUS) +#else /* CONFIG_RECOVERY */ +#define BOOT_MAX_NODE_MEM_SIZE (BOOT_MAX_CPU_PHYS_MEM_SIZE * MAX_NODE_CPUS + \ + BOOT_MAX_BIOS_PHYS_MEM_SIZE) +#endif /* ! CONFIG_RECOVERY */ + +typedef struct boot_node_mem_map { + boot_mem_map_t bitmap[(1UL * BOOT_MAX_NODE_MEM_SIZE * + L_MAX_NODE_PHYS_BANKS / PAGE_SIZE + + (sizeof(boot_mem_map_t) * 8 - 1)) / + (sizeof(boot_mem_map_t) * 8) + + L_MAX_NODE_PHYS_BANKS]; +} boot_node_mem_map_t; + +#endif /* ! CONFIG_NUMA */ + +#ifdef CONFIG_ONLY_HIGH_PHYS_MEM + +# define LOW_MEMORY_ENABLED_DEFAULT true + +extern bool low_memory_enabled; +# define boot_low_memory_enabled boot_get_vo_value(low_memory_enabled) +# define LOW_MEMORY_ENABLED() low_memory_enabled +# define BOOT_LOW_MEMORY_ENABLED() boot_low_memory_enabled +# define BOOT_SET_LOW_MEMORY_ENABLED() (BOOT_LOW_MEMORY_ENABLED() = true) +# define BOOT_LOW_MEMORY_DISABLED() !boot_low_memory_enabled +# define BOOT_SET_LOW_MEMORY_DISABLED() (BOOT_LOW_MEMORY_ENABLED() = false) + +extern void * __init_recv boot_pa_to_high_pa(void *lo_pa, + boot_info_t *boot_info); +extern bool __init boot_has_lo_bank_remap_to_hi(boot_phys_bank_t *phys_bank, + boot_info_t *bootblock); +extern void __init boot_remap_low_memory(bool bsp, boot_info_t *boot_info); + +#else /* ! CONFIG_ONLY_HIGH_PHYS_MEM */ + +# define LOW_MEMORY_ENABLED() true +# define BOOT_LOW_MEMORY_ENABLED() true +# define BOOT_SET_LOW_MEMORY_ENABLED() +# define BOOT_SET_LOW_MEMORY_DISABLED() + +static inline __init_recv void * +boot_pa_to_high_pa(void *lo_pa, boot_info_t *boot_info) +{ + /* nothing convertion of physical addresses is need */ + return lo_pa; +} +static inline __init bool +boot_has_lo_bank_remap_to_hi(boot_phys_bank_t *phys_bank, + boot_info_t *bootblock) +{ + return false; +} + +static inline __init void +boot_reserve_low_memory(boot_info_t *boot_info) +{ + /* low physical memory can be used in full measure, */ + /* so it does not need to reserve preliminarily some areas */ +} + +static inline __init void +boot_remap_low_memory(bool bsp, boot_info_t *boot_info) +{ + /* low and high memory can be used */ + /* so nothing remapping from one to other need */ +} +#endif /* CONFIG_ONLY_HIGH_PHYS_MEM */ + +static inline __init void * +boot_pa_end_to_high(void *lo_pa_end, boot_info_t *boot_info) +{ + /* end address of bank can be start address of next, */ + /* so transform end address to last address into a bank */ + return boot_pa_to_high_pa(lo_pa_end - 1, boot_info) + 1; +} + +/* + * Forwards of functions to allocate boot-time physical memory + */ + +extern e2k_size_t boot_do_create_physmem_maps(boot_info_t *bootblock, + bool create); +static inline __init e2k_size_t +boot_create_physmem_maps(boot_info_t *bootblock) +{ + return boot_do_create_physmem_maps(bootblock, true); +} +static inline __init e2k_size_t +boot_update_physmem_maps(boot_info_t *bootblock) +{ + return boot_do_create_physmem_maps(bootblock, false); +} +extern short __init_recv boot_init_new_phys_bank(int node, + node_phys_mem_t *node_mem, + e2k_addr_t bank_start, e2k_size_t bank_size); +extern short __init boot_create_new_phys_bank(int node, + node_phys_mem_t *node_mem, + e2k_addr_t bank_start, e2k_size_t bank_size); +extern void __init_recv boot_add_new_phys_bank(int node, + node_phys_mem_t *node_mem, + e2k_phys_bank_t *new_phys_bank, + short new_bank_ind); +extern short __init_recv boot_delete_phys_bank_part(int node_id, + node_phys_mem_t *node_mem, + short bank, e2k_phys_bank_t *phys_bank, + e2k_addr_t from_addr, e2k_addr_t to_addr); +extern short __init boot_create_phys_bank_part(int node_id, + node_phys_mem_t *node_mem, + short bank, e2k_phys_bank_t *phys_bank, + e2k_addr_t from_addr, e2k_addr_t to_addr); + +/* reserved memory flags (see following function arguments) */ +#define BOOT_NOT_IGNORE_BUSY_BANK 0x0000 /* area cannot intersect with */ + /* any other areas while */ + /* is reserving */ +#define BOOT_IGNORE_BUSY_BANK 0x0001 /* area can intersect with */ + /* other such areas while */ + /* is reserving */ +#define BOOT_IGNORE_BANK_NOT_FOUND 0x0002 /* area can point to pages */ + /* outside of present banks */ +#define BOOT_CAN_BE_INTERSECTIONS 0x0004 /* area can intersect with */ + /* other such areas after */ + /* was reserved */ +#define BOOT_DELETE_PHYS_MEM 0x0008 /* delete area from available */ + /* physical memory */ +#define BOOT_ONLY_LOW_PHYS_MEM 0x0010 /* area can be always only */ + /* at low memory ( < 2**32) */ +#define BOOT_IGNORE_AT_HIGH_PHYS_MEM 0x0020 /* it does not need remap */ + /* area from low to high */ + /* physical memory */ +#define BOOT_EXCLUDE_AT_HIGH_PHYS_MEM 0x0040 /* area should be mapped */ + /* only at low memory */ + /* and excluded from high */ + /* physical memory */ +#define BOOT_RESERVED_TO_FREE_PHYS_MEM 0x0080 /* area is reserved to free */ + /* while bootmem freeing */ +#define BOOT_ONLY_HIGH_PHYS_MEM 0x0100 /* area should be always */ + /* only at high memory */ +#define BOOT_FIRST_HIGH_PHYS_MEM 0x0200 /* area should be preferably */ + /* at high memory */ +#define BOOT_ONLY_ON_NODE_ALLOC_MEM 0x1000 /* allocate memory only on */ + /* the specified node */ +#define BOOT_IS_TRY_ALLOC_MEM 0x2000 /* it is try to allocate, */ + /* so miss is not BUG */ +#define BOOT_MERGEABLE_ALLOC_MEM 0x4000 /* allocated area can be */ + /* merged with other areas */ + /* if memory type is equal */ + +extern bool __init_recv boot_has_node_low_memory(int node, + boot_info_t *bootblock); +extern bool __init boot_has_node_high_memory(int node, boot_info_t *bootblock); +extern bool __init_recv boot_has_high_memory(boot_info_t *bootblock); + +extern int boot_reserve_physmem(e2k_addr_t virt_phys_addr, + e2k_size_t mem_size, busy_mem_type_t mem_type, + unsigned short flags); +extern int boot_delete_physmem(e2k_addr_t virt_phys_addr, + e2k_size_t mem_size); +extern void __init boot_rereserve_bank_area(int node_id, + boot_phys_mem_t *node_mem, + short bank, short new_bank, + short area, e2k_busy_mem_t *busy_area); +extern void *boot_alloc_node_mem(int node_id, e2k_size_t mem_size, + e2k_size_t align, e2k_size_t page_size, + busy_mem_type_t mem_type, unsigned short flags); + +#ifndef CONFIG_NUMA +#define boot_alloc_phys_mem(mem_size, align, type) \ + boot_alloc_node_mem(0, (mem_size), (align), \ + PAGE_SIZE, (type), \ + BOOT_NOT_IGNORE_BUSY_BANK | \ + BOOT_MERGEABLE_ALLOC_MEM | \ + BOOT_FIRST_HIGH_PHYS_MEM) +#define boot_node_alloc_physmem(node_id, mem_size, align, type) \ + boot_alloc_phys_mem(mem_size, align, type) + +#else /* CONFIG_NUMA */ +#define boot_node_alloc_physmem(node_id, mem_size, align, type) \ + boot_alloc_node_mem((node_id), (mem_size), (align), \ + PAGE_SIZE, (type), \ + BOOT_NOT_IGNORE_BUSY_BANK | \ + BOOT_MERGEABLE_ALLOC_MEM | \ + BOOT_FIRST_HIGH_PHYS_MEM) +#define boot_node_alloc_large_physpages(node_id, mem_size, align, type, flags) \ + boot_alloc_node_mem((node_id), (mem_size), (align), \ + BOOT_E2K_LARGE_PAGE_SIZE, (type), (flags)) +#define boot_alloc_phys_mem(mem_size, align, type) \ + boot_node_alloc_physmem(boot_numa_node_id(), \ + (mem_size), (align), (type)) +#endif /* ! CONFIG_NUMA */ + +#define boot_node_try_alloc_low_mem(mem_size, align, page_size, type) \ + boot_alloc_node_mem(boot_numa_node_id(), (mem_size), \ + (align), (page_size), (type), \ + BOOT_NOT_IGNORE_BUSY_BANK | \ + BOOT_ONLY_ON_NODE_ALLOC_MEM | \ + BOOT_ONLY_LOW_PHYS_MEM | \ + BOOT_EXCLUDE_AT_HIGH_PHYS_MEM | \ + BOOT_RESERVED_TO_FREE_PHYS_MEM | \ + BOOT_IS_TRY_ALLOC_MEM) + +#define boot_the_node_try_alloc_pages(node_id, mem_size, page_size, type) \ + boot_alloc_node_mem((node_id), (mem_size), (page_size), \ + (page_size), (type), \ + BOOT_NOT_IGNORE_BUSY_BANK | \ + BOOT_ONLY_ON_NODE_ALLOC_MEM | \ + BOOT_FIRST_HIGH_PHYS_MEM | \ + BOOT_MERGEABLE_ALLOC_MEM | \ + BOOT_IS_TRY_ALLOC_MEM) + +extern long boot_map_physmem(pgprot_t prot_flags, e2k_size_t max_page_size); +extern void boot_expand_phys_banks_reserved_areas(void); + +#endif /* _E2K_P2V_BOOT_PHYS_H */ diff --git a/arch/e2k/include/asm/p2v/boot_smp.h b/arch/e2k/include/asm/p2v/boot_smp.h new file mode 100644 index 0000000..25f55db --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_smp.h @@ -0,0 +1,524 @@ +/* $Id: boot_smp.h,v 1.11 2008/06/11 20:02:07 atic Exp $ + * + * Heading of SMP boot-time initialization. + * + * Copyright (C) 2001 Salavat Guiliazov + */ + +#ifndef _E2K_P2V_BOOT_SMP_H +#define _E2K_P2V_BOOT_SMP_H + +#include + +#include +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * Atomic operations for boot-time initialization + */ + +#define boot_mb() mb() + +#define boot_atomic_read(value_p) \ + atomic_read((atomic_t *)boot_vp_to_pp(value_p)) +#define boot_atomic_set(value_p, count) \ + atomic_set((atomic_t *)boot_vp_to_pp(value_p), count) +#define boot_atomic_inc(value_p) \ + atomic_inc((atomic_t *)boot_vp_to_pp(value_p)) +#define boot_atomic_dec(value_p) \ + atomic_dec((atomic_t *)boot_vp_to_pp(value_p)) +#define boot_atomic_inc_return(value_p) \ + atomic_inc_return((atomic_t *)boot_vp_to_pp(value_p)) + +/* + * Current CPU logical # and total number of active CPUs + */ +extern atomic_t boot_cpucount; +#define boot_smp_get_processor_id() \ +({ \ + int cpu_id = boot_early_pic_read_id(); \ + boot_atomic_inc(&boot_cpucount); \ + cpu_id; \ +}) +#define boot_smp_processors_num() \ + boot_atomic_read(&boot_cpucount) +#define init_smp_processors_num() \ + atomic_read(&boot_cpucount) +#define boot_reset_smp_processors_num() \ + boot_atomic_set(&boot_cpucount, 0) +#define init_reset_smp_processors_num() \ + atomic_set(&boot_cpucount, 0) +#define boot_set_smp_processors_num(num) \ + boot_atomic_set(&boot_cpucount, num) +#define init_set_smp_processors_num(num) \ + atomic_set(&boot_cpucount, num) + +/* + * Special system register 'OSR0' is used to hold logical processor number + * while boot-time initialization. + * Later this register will be used to hold pointer to 'current' task structure + */ + +#define boot_smp_set_processor_id(cpuid) \ + boot_set_current_thread_info(cpuid) +#define boot_smp_processor_id() \ +({ \ + long cpuid = (long)boot_current_thread_info(); \ + \ + if (cpuid >= BOOT_TASK_SIZE) \ + cpuid = raw_smp_processor_id(); \ + cpuid; \ +}) + +#ifdef CONFIG_SMP +#define BOOT_IS_BSP(__bsp) (__bsp) +#define INIT_IS_BSP(__bsp) (__bsp) +#else /* ! CONFIG_SMP */ +#define BOOT_IS_BSP(__bsp) true +#define INIT_IS_BSP(__bsp) true +#endif /* CONFIG_SMP */ + +/* + * Simple IRQ save/restore operations for SMP boot-time initialization + */ + +#define boot_raw_local_irq_restore(x) BOOT_UPSR_RESTORE(x) +#define boot_raw_local_irq_disable() BOOT_UPSR_ALL_CLI() +#define boot_raw_local_save_flags(x) BOOT_UPSR_SAVE(x) +#define boot_raw_irqs_disabled_flags(x) __raw_irqs_disabled_flags(x) +#define boot_raw_local_irq_enable() BOOT_UPSR_ALL_STI() +#define boot_raw_local_irq_save(x) BOOT_UPSR_ALL_SAVE_AND_CLI(x) + +/* + * Simple spin lock operations for SMP boot-time initialization + */ + +#ifdef CONFIG_SMP + +# include + +# define boot_spin_trylock(lock) arch_boot_spin_trylock(boot_vp_to_pp(lock) +# define boot_spin_lock(lock) arch_boot_spin_lock(boot_vp_to_pp(lock)) +# define boot_spin_unlock(lock) arch_boot_spin_unlock(boot_vp_to_pp(lock)) +# define init_spin_trylock(lock) arch_boot_spin_trylock(lock) +# define init_spin_lock(lock) arch_boot_spin_lock(lock) +# define init_spin_unlock(lock) arch_boot_spin_unlock(lock) +#else /* ! CONFIG_SMP */ +# define boot_spin_trylock(lock_p) +# define boot_spin_lock(lock_p) +# define boot_spin_unlock(lock_p) +# define init_spin_trylock(lock_p) +# define init_spin_lock(lock_p) +# define init_spin_unlock(lock_p) +#endif /* CONFIG_SMP */ + +#define boot_spin_lock_irqsave(lock_p, flags) \ +({ \ + boot_raw_local_irq_save(flags); \ + boot_spin_lock(lock_p); \ +}) + +#define boot_spin_unlock_irqrestore(lock_p, flags) \ +({ \ + boot_spin_unlock(lock_p); \ + boot_raw_local_irq_restore(flags); \ +}) + +/* + * Simple spin lock operations for the CPU node boot-time initialization + */ + +#define boot_node_spin_trylock(lock_p) \ + boot_spin_trylock(&lock_p[boot_numa_node_id()]) +#define boot_node_spin_lock(lock_p) \ + boot_spin_lock(&lock_p[boot_numa_node_id()]) +#define boot_node_spin_unlock(lock_p) \ + boot_spin_unlock(&lock_p[boot_numa_node_id()]) + +#define boot_dup_node_spin_trylock(lock_p) \ + boot_spin_trylock(&lock_p[boot_my_node_dup_kernel_nid]) +#define boot_dup_node_spin_lock(lock_p) \ + boot_spin_lock(&lock_p[boot_my_node_dup_kernel_nid]) +#define boot_dup_node_spin_unlock(lock_p) \ + boot_spin_unlock(&lock_p[boot_my_node_dup_kernel_nid]) + +#define init_dup_node_spin_trylock(lock_p) \ + init_spin_trylock(&lock_p[init_my_node_dup_kernel_nid]) +#define init_dup_node_spin_lock(lock_p) \ + init_spin_lock(&lock_p[init_my_node_dup_kernel_nid]) +#define init_dup_node_spin_unlock(lock_p) \ + init_spin_unlock(&lock_p[init_my_node_dup_kernel_nid]) + +/* + * Simple event maintenance for boot-time initialization + */ + +#define boot_wait_for_event(event_p) \ +({ \ + atomic_t *error_flag_p = boot_vp_to_pp(&boot_error_flag); \ + while (!boot_atomic_read(event_p)) { \ + if (unlikely(atomic_read(error_flag_p))) { \ + BOOT_BUG_POINT(__func__); \ + BOOT_BUG("detected BOOT ERROR FLAG while " \ + "wait for event\n"); \ + } \ + boot_mb(); \ + } \ +}) +#define boot_read_event(event_p) \ + boot_atomic_read(event_p) +#define boot_set_event(event_p) \ + boot_atomic_set(event_p, 1) +#define boot_reset_event(event_p) \ + boot_atomic_set(event_p, 0) +#define boot_wait_for_boot_event(boot_event_p, error_flag_p) \ +({ \ + while (!atomic_read(boot_event_p)) { \ + if (unlikely(atomic_read(error_flag_p))) { \ + BOOT_BUG_POINT(__func__); \ + BOOT_BUG("detected BOOT ERROR FLAG while " \ + "wait for event\n"); \ + } \ + boot_mb(); \ + } \ +}) +#define boot_read_boot_event(boot_event_p) atomic_read(boot_event_p) +#define boot_set_boot_event(boot_event_p) atomic_set(boot_event_p, 1) +#define boot_reset_boot_event(boot_event_p) atomic_set(boot_event_p, 0) + +/* + * Physical number and map of live CPUs passed by loader/BIOS through + * bootinfo structure + */ + +extern int phys_cpu_present_num; /* number of present CPUs */ + /* (passed by BIOS thru */ + /* MP table) */ +extern int cpu_to_sync_num; /* real number of CPUs to make */ + /* sinchronization */ + +#define boot_set_phys_cpu(cpuid, mask) physid_set(cpuid, mask) +#define boot_test_phys_cpu(cpuid, mask) physid_isset(cpuid, mask) + +#define boot_phys_cpu_present_map_p boot_vp_to_pp(&phys_cpu_present_map) + +#define boot_set_phys_cpu_present(cpu) \ + boot_set_phys_cpu(cpu, *boot_phys_cpu_present_map_p) +#define boot_phys_cpu_present(cpu) \ + boot_test_phys_cpu(cpu, *boot_phys_cpu_present_map_p) + +#define boot_phys_cpu_present_num boot_get_vo_value(phys_cpu_present_num) +#define boot_cpu_to_sync_num boot_get_vo_value(cpu_to_sync_num) + +#ifdef CONFIG_NUMA +extern atomic_t early_node_has_dup_kernel_num; + +#define boot_physid_to_cpu_mask(physid_mask_p) \ +({ \ + cpumask_t cpu_mask; \ + bitmap_copy(cpumask_bits(&cpu_mask), physid_mask_p->bits, \ + nr_cpumask_bits); \ + cpu_mask; \ +}) + +#define boot_node_to_cpumask(node) \ +({ \ + cpumask_t cpumask; \ + cpumask_t node_cpumask; \ + cpumask_t boot_main_cpu_mask = boot_physid_to_cpu_mask( \ + boot_phys_cpu_present_map_p); \ + bitmap_fill(cpumask_bits(&cpumask), boot_machine.nr_node_cpus); \ + cpumask_shift_left(&node_cpumask, (const cpumask_t *)&cpumask, \ + node * boot_machine.max_nr_node_cpus); \ + cpumask_and(&cpumask, &node_cpumask, &boot_main_cpu_mask); \ + cpumask; \ +}) + +#define boot___apicid_to_node boot_get_vo_value(__apicid_to_node) + +#define boot_cpu_to_node(cpu) ((cpu) / boot_machine.max_nr_node_cpus) +#define boot_numa_node_id() boot_cpu_to_node(boot_smp_processor_id()) +#define BOOT_BS_NODE_ID (0) +#define BOOT_IS_BS_NODE (boot_numa_node_id() == BOOT_BS_NODE_ID) + +#define boot_node_is_online(node) \ + (boot_phys_nodes_map & (1 << (node))) +#define boot_node_has_online_mem(nid) \ + (boot_nodes_phys_mem[nid].pfns_num != 0) + +#define boot_early_node_has_dup_kernel_from(node_from) \ +({ \ + int node = (node_from); \ + while (node < MAX_NUMNODES && \ + !BOOT_EARLY_THE_NODE_HAS_DUP_KERNEL(node)) { \ + node ++; \ + } \ + node; \ +}) +#define boot_early_next_node_has_dup_kernel(node_prev) \ + boot_early_node_has_dup_kernel_from((node_prev) + 1) +#define boot_node_has_dup_kernel_from(node_from) \ +({ \ + int node = (node_from); \ + while (node < MAX_NUMNODES && \ + !boot_the_node_has_dup_kernel(boot_numa_node_id(), \ + node)) { \ + node ++; \ + } \ + node; \ +}) +#define boot_next_node_has_dup_kernel(node_prev) \ +({ \ + int node_from = (node_prev) + 1; \ + boot_node_has_dup_kernel_from(node_from); \ +}) +#define boot_node_has_not_dup_kernel_from(node_from) \ +({ \ + int node = (node_from); \ + while (node < MAX_NUMNODES && (!boot_node_is_online(node) || \ + boot_the_node_has_dup_kernel(boot_numa_node_id(), \ + node))) { \ + node ++; \ + } \ + node; \ +}) +#define boot_next_node_has_not_dup_kernel(node_prev) \ +({ \ + int node_from = (node_prev) + 1; \ + boot_node_has_not_dup_kernel_from(node_from); \ +}) + +/* + * Get a next node which has own duplicated kernel image + * We start from the follow node and search in direct of increasing + * node number. If there is not more nodes, we start new search from + * node #1 and only at last we take node #0 so same algorithm is used + * while building zone lists on each node (see mm/page_alloc.c) + */ +#define boot_early_get_next_node_has_dup_kernel(node_prev) \ +({ \ + int node_next = boot_early_next_node_has_dup_kernel(node_prev); \ + if (node_next >= MAX_NUMNODES) { \ + node_next = boot_early_next_node_has_dup_kernel(0); \ + if (node_next >= MAX_NUMNODES) { \ + node_next = 0; /* BS node */ \ + } \ + } \ + node_next; \ +}) + +#define boot_for_each_node_has_online_mem(node) \ + for ((node) = 0, \ + ({while ((node) < MAX_NUMNODES && \ + !boot_node_has_online_mem(node))\ + (node) ++;}); \ + (node) < MAX_NUMNODES; \ + ({ (node) ++; while ((node) < MAX_NUMNODES && \ + !boot_node_has_online_mem(node))\ + (node) ++;})) + +#define boot_for_each_node_has_dup_kernel(node) \ + for ((node) = boot_node_has_dup_kernel_from(0); \ + (node) < MAX_NUMNODES; \ + (node) = boot_next_node_has_dup_kernel(node)) + +#define boot_for_each_node_has_not_dup_kernel(node) \ + for ((node) = boot_node_has_not_dup_kernel_from(0); \ + (node) < MAX_NUMNODES; \ + (node) = boot_next_node_has_not_dup_kernel(node)) + +#define boot_for_each_cpu(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = cpumask_next((cpu), (mask)), \ + (cpu) < NR_CPUS;) + +#define boot_for_each_online_cpu_of_node(node, cpu, cpu_mask) \ + cpu_mask = boot_node_to_cpumask(node); \ + boot_for_each_cpu(cpu, &cpu_mask) + +/* + * Next variables, arrays, structures have own copy on each nodes + */ +/* map of nodes which have duplicated kernel image and own page tables */ +#define boot_the_node_has_dup_kernel_map(nid) \ + boot_the_node_get_vo_value(nid, node_has_dup_kernel_map) +#define boot_the_node_has_dup_kernel(nid_where, nid_which) \ + boot_test_bit(nid_which, \ + boot_the_node_vp_to_pp(nid_where, \ + &(node_has_dup_kernel_map))) +#define boot_node_has_dup_kernel() \ + boot_the_node_has_dup_kernel(boot_numa_node_id(), \ + boot_numa_node_id()) +#define boot_the_node_set_has_dup_kernel(nid_where, nid_which) \ + boot_set_bit(nid_which, \ + boot_the_node_vp_to_pp(nid_where, \ + &(node_has_dup_kernel_map))) + +/* number of nodes which have duplicated kernel image and own page tables */ +#define boot_early_node_has_dup_kernel_num \ + boot_get_vo_value(early_node_has_dup_kernel_num) +#define boot_the_node_has_dup_kernel_num(nid) \ + boot_the_node_get_vo_value(nid, node_has_dup_kernel_num) +#define BOOT_EARLY_THERE_IS_DUP_KERNEL \ + boot_atomic_read(&boot_early_node_has_dup_kernel_num) +#define BOOT_EARLY_DUP_KERNEL_NUM \ + (boot_atomic_read(&boot_early_node_has_dup_kernel_num) + 1) +#define BOOT_THE_NODE_THERE_IS_DUP_KERNEL(nid) \ + boot_atomic_read(&boot_the_node_has_dup_kernel_num(nid)) +#define BOOT_THE_NODE_DUP_KERNEL_NUM(nid) \ + (boot_atomic_read(&boot_the_node_has_dup_kernel_num(nid)) + 1) +#define BOOT_NODE_THERE_IS_DUP_KERNEL() \ + BOOT_THE_NODE_THERE_IS_DUP_KERNEL(boot_numa_node_id()) +#define BOOT_NODE_DUP_KERNEL_NUM() \ + BOOT_THE_NODE_DUP_KERNEL_NUM(boot_numa_node_id()) + +/* array of node ID on which this node has kernel image and use page table */ +/* if the node has own copy of the kernel then node ID is own ID */ +/* if the node has not own copy of image and page table then node ID is */ +/* ID of node on which it use kernel image and page table (now in this case */ +/* node ID of BS NODE) */ +#define boot_the_node_dup_kernel_nid(nid) \ + ((int *)(boot_the_node_vp_to_pp(nid, \ + all_nodes_dup_kernel_nid))) +#define boot_dup_kernel_nid \ + boot_the_node_dup_kernel_nid(boot_numa_node_id()) +#define boot_node_dup_kernel_nid(node) \ + (boot_dup_kernel_nid[node]) +#define boot_my_node_dup_kernel_nid \ + boot_node_dup_kernel_nid(boot_numa_node_id()) +#define init_my_node_dup_kernel_nid \ + node_dup_kernel_nid(init_numa_node_id()) + +/* array of pointers to pg_dir (root page table) on each node */ +#define boot_the_node_pg_dir(nid) \ + ((pgd_t **)(boot_the_node_vp_to_pp(nid, \ + all_nodes_pg_dir))) +#define boot_node_pg_dir \ + boot_the_node_pg_dir(boot_numa_node_id()) + +#define init_cpu_to_node(cpu) ((cpu) / machine.max_nr_node_cpus) +#define init_numa_node_id() init_cpu_to_node(boot_early_pic_read_id()) +#else /* ! CONFIG_NUMA */ +#define BOOT_IS_BS_NODE 1 +#define boot_numa_node_id() 0 +#define boot_for_each_node_has_dup_kernel(node) \ + for ((node) = 0, (node) < 1; (node) ++) +#define boot_node_has_online_mem(nid) 1 + +#define init_numa_node_id() 0 +#endif /* CONFIG_NUMA */ + +extern void boot_setup_smp_cpu_config(boot_info_t *boot_info); + +/* + * Flag of error occured while boot-time initialization + */ + +extern atomic_t boot_error_flag; + +/* + * Synchronize all active processors at the specified point while boot-time + * initialization + */ + +#define BOOT_NO_ERROR_FLAG 0 + +#ifdef CONFIG_VIRTUALIZATION +#include /* to redefine synchronization times */ +#endif /* CONFIG_VIRTUALIZATION */ + +/* + * number of iterations of waiting for completion of synchronization + */ +#ifndef BOOT_WAITING_FOR_SYNC_ITER +#define BOOT_WAITING_FOR_SYNC_ITER (1000 * NR_CPUS) +#endif /* ! BOOT_WAITING_FOR_SYNC_ITER */ + +/* + * number of loops in each iteration of waiting for + * synchronization completion + */ + +#ifndef BOOT_WAITING_FOR_SYNC_LOOPS +#if defined(CONFIG_MEMLIMIT) && defined(CONFIG_EXT_MEMLIMIT) +#define BOOT_WAITING_FOR_SYNC_LOOPS (NR_CPUS * 64 * \ + (CONFIG_MEMLIMIT+CONFIG_EXT_MEMLIMIT)) +#else +#define BOOT_WAITING_FOR_SYNC_LOOPS (NR_CPUS * 16000) +#endif +#endif /* ! BOOT_WAITING_FOR_SYNC_LOOPS */ + +#ifdef CONFIG_SMP +typedef union cpu_sync_count { + atomic_t num_arrived; + u64 pad; +} cpu_sync_count_t; + +extern void __boot_sync_all_processors(atomic_t *num_arrived); +extern void __init_sync_all_processors(atomic_t *num_arrived, int cpus_to_sync); + +extern cpu_sync_count_t __cacheline_aligned_in_smp num_arrived; +# define boot_sync_all_processors() \ +do { \ + __boot_sync_all_processors(&num_arrived.num_arrived); \ +} while (0) + +/* number of CPUs arrived to sync while boot-time init completion */ +extern cpu_sync_count_t __cacheline_aligned_in_smp init_num_arrived; +# define init_sync_all_processors(cpus) \ +do { \ + __init_sync_all_processors(&init_num_arrived.num_arrived, cpus); \ +} while (0) +#else +# define boot_sync_all_processors() do { } while (0) +# define init_sync_all_processors(cpus) do { } while (0) +#endif + +extern int boot_native_smp_cpu_config(boot_info_t *bootblock); +extern int boot_biosx86_smp_cpu_config(boot_info_t *bootblock); +extern void boot_native_smp_node_config(boot_info_t *bootblock); +extern void boot_biosx86_smp_node_config(boot_info_t *bootblock); + +static inline void boot_native_cpu_relax(void) +{ + /* nothing to do */ +} + +#ifdef CONFIG_RECOVERY +extern void boot_recover_smp_cpu_config(boot_info_t *boot_info); +#endif /* CONFIG_RECOVERY */ + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +#include +#else /* native kernel */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +static inline e2k_size_t __init +boot_smp_cpu_config(boot_info_t *bootblock) +{ + return boot_native_smp_cpu_config(bootblock); +} +static inline void __init +boot_smp_node_config(boot_info_t *bootblock) +{ + boot_native_smp_node_config(bootblock); +} +static inline void +boot_cpu_relax(void) +{ + boot_native_cpu_relax(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* !(__ASSEMBLY__) */ +#endif /* !(_E2K_P2V_BOOT_SMP_H) */ diff --git a/arch/e2k/include/asm/p2v/boot_spinlock.h b/arch/e2k/include/asm/p2v/boot_spinlock.h new file mode 100644 index 0000000..199e7be --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_spinlock.h @@ -0,0 +1,97 @@ +#pragma once + +#include +#include + + +# if defined(CONFIG_PARAVIRT_GUEST) || defined(CONFIG_KVM_GUEST_KERNEL) +/* it is paravirtualized host and guest kernel */ +/* or native guest kernel */ +# include +# define arch_boot_spin_lock_slow(lock) \ + kvm_arch_boot_spin_lock_slow((lock)) +# define arch_boot_spin_locked_slow(lock) \ + kvm_arch_boot_spin_locked_slow((lock)) +# else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +# define arch_boot_spin_lock_slow(lock) do { } while (0) +# define arch_boot_spin_locked_slow(lock) do { } while (0) +# endif /* CONFIG_PARAVIRT_GUEST || CONFIG_KVM_GUEST_KERNEL */ + + +static inline void boot_native_spin_unlock_wait(boot_spinlock_t *lock) +{ + boot_spinlock_t val; + u16 next; + + val.lock = READ_ONCE(lock->lock); + + if (likely(val.head == val.tail)) + return; + + next = val.tail; + + do { + val.lock = READ_ONCE(lock->lock); + } while (val.head != val.tail && ((s16) (next - val.head) > 0)); +} + +static inline int boot_native_spin_is_locked(boot_spinlock_t *lock) +{ + boot_spinlock_t val; + + val.lock = READ_ONCE(lock->lock); + + return val.head != val.tail; +} + +static __always_inline int boot_native_spin_value_unlocked(boot_spinlock_t lock) +{ + return lock.head == lock.tail; +} + +static inline int boot_native_spin_is_contended(boot_spinlock_t *lock) +{ + boot_spinlock_t val; + + val.lock = READ_ONCE(lock->lock); + + return val.tail - val.head > 1; +} + +static inline int arch_boot_spin_trylock(boot_spinlock_t *lock) +{ + return __api_atomic_ticket_trylock(&lock->lock, + BOOT_SPINLOCK_TAIL_SHIFT); +} + +static inline void arch_boot_spin_lock(boot_spinlock_t *lock) +{ + boot_spinlock_t val; + u16 ticket, ready; + + /* Tail must be in the high 16 bits, otherwise this atomic + * addition will corrupt head. */ + val.lock = __api_atomic32_add_oldval_lock(1 << BOOT_SPINLOCK_TAIL_SHIFT, + &lock->lock); + ticket = val.tail; + ready = val.head; + + if (likely(ticket == ready)) + return; + + do { + arch_boot_spin_lock_slow(lock); + } while (unlikely(ticket != (ready = READ_ONCE(lock->head)))); + + arch_boot_spin_locked_slow(lock); +} + +#ifndef arch_boot_spin_unlock +#define arch_boot_spin_unlock arch_boot_spin_unlock +static inline void arch_boot_spin_unlock(boot_spinlock_t *lock) +{ + smp_store_release(&lock->head, lock->head + 1); +} +#endif diff --git a/arch/e2k/include/asm/p2v/boot_spinlock_types.h b/arch/e2k/include/asm/p2v/boot_spinlock_types.h new file mode 100644 index 0000000..f2115c2 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_spinlock_types.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +#define BOOT_SPINLOCK_HEAD_SHIFT 0 +#define BOOT_SPINLOCK_TAIL_SHIFT 16 +typedef union { + u32 lock; + struct { + u16 head; + u16 tail; + }; +} boot_spinlock_t; + +#define __BOOT_SPIN_LOCK_UNLOCKED (boot_spinlock_t) { .lock = 0 } diff --git a/arch/e2k/include/asm/p2v/boot_v2p.h b/arch/e2k/include/asm/p2v/boot_v2p.h new file mode 100644 index 0000000..ca7f538 --- /dev/null +++ b/arch/e2k/include/asm/p2v/boot_v2p.h @@ -0,0 +1,297 @@ +/* + * + * Heading of boot-time initialization. + * + * Copyright (C) 2001 Salavat Guiliazov + */ + +#ifndef _E2K_P2V_BOOT_V2P_H +#define _E2K_P2V_BOOT_V2P_H + +#include +#include + +#include +#include +#include + +#define EOS_RAM_BASE_LABEL _data +#define KERNEL_START_LABEL _start /* start label of Linux Image */ +#define KERNEL_END_LABEL _end /* end label of Linux Image */ + +#ifdef __ASSEMBLY__ + +#define KERNEL_BASE [KERNEL_START_LABEL] /* virtual address of Linux */ + /* Image begining */ +#define KERNEL_END [KERNEL_END_LABEL] /* virtual address of Linux */ + /* Image end */ +#define EOS_RAM_BASE [EOS_RAM_BASE_LABEL] + +#else /* !(__ASSEMBLY__) */ + +#define EOS_RAM_BASE ((e2k_addr_t)&EOS_RAM_BASE_LABEL) + +#define KERNEL_BASE ((e2k_addr_t)&KERNEL_START_LABEL) +#define KERNEL_END ((e2k_addr_t)&KERNEL_END_LABEL) + +#define HIGH_PHYS_MEM_SHIFT 32 /* above 2**32 */ +#define HIGH_PHYS_MEM_BASE (1UL << HIGH_PHYS_MEM_SHIFT) +#define LOW_PHYS_MEM_MASK ((1UL << HIGH_PHYS_MEM_SHIFT) - 1) + +static inline bool +is_addr_from_low_memory(e2k_addr_t addr) +{ + return (addr < HIGH_PHYS_MEM_BASE) ? true : false; +} + +static inline bool +is_addr_from_high_memory(e2k_addr_t addr) +{ + return (addr >= HIGH_PHYS_MEM_BASE) ? true : false; +} + +/* + * Convert virtual address of pointer of global or static variable, array, + * structure, string or other item of linux image to the consistent physical + * address of one, while booting process is in the progress and virtual memory + * support is not yet ready. + * Linker loads Linux image to a virtual space and all enumerated above items + * have virtual addresses into the image. BIOS loader loads image to the + * some existing area of physical memory, virtual addressing is off and direct + * access to the items is impossible. + * Loader should write pointer of image text segment location in the physical + * memory to the 'OSCUD' register: + * OSCUD.OSCUD_base + * OSCUD.OSCUD_size + * and pointer of image data & bss segments location in the physical memory + * to the 'OSGD' register: + * OSGD.OSGD_base + * OSGD.OSGD_size + * These areas can intersect. + * If some item of the image (see above) is located into the text, data or + * bss segment, then to access it on absolute address (pointer) you should + * call this function to convert absolute virtual address to real physical + * address. + * + * Example: + * + * char boot_buf[81]; + * int boot_buf_size = 80; + * ....... + * void + * xxx_func() + * { + * char *buf = (char *)boot_va_to_pa((void *)boot_buf); + * int buf_size = *((int *)boot_va_to_pa( + * (e2k_addr_t)&boot_buf_size)); + * ....... + * } + * + * NOTE !!!!! It is rather to use the macroses defined below to access image + * objects instead of this function. The mocroses have more convenient + * interfaces + */ + +static inline void * +boot_native_kernel_va_to_pa(void *virt_pnt, unsigned long kernel_base) +{ + unsigned long os_base; + + os_base = NATIVE_READ_OSCUD_LO_REG_VALUE() & OSCUD_lo_base_mask; + if (os_base >= NATIVE_KERNEL_VIRTUAL_SPACE_BASE) { + return virt_pnt; + } else if ((e2k_addr_t)virt_pnt >= KERNEL_BASE) { + if (kernel_base == -1) + kernel_base = os_base; + return (void *)(kernel_base + + ((e2k_addr_t)virt_pnt - KERNEL_BASE)); + } else { + return virt_pnt; + } +} + +static inline void * +boot_native_va_to_pa(void *virt_pnt) +{ + return boot_native_kernel_va_to_pa(virt_pnt, -1); +} + +static inline void * +boot_native_func_to_pa(void *virt_pnt) +{ + return boot_native_va_to_pa(virt_pnt); +} + +/* + * In some case kernel boot-time physical address can be appropriate virtual + * one. For example KVM guest kernel booting on physical memory mapped + * to virtual space with PAGE_OFFSET + * So it needs convert a virtual physical address to real physical. + * Native kernel booting on real physical memory, so convertion does not need + */ +static inline e2k_addr_t +boot_native_vpa_to_pa(e2k_addr_t vpa) +{ + return vpa; +} +static inline e2k_addr_t +boot_native_pa_to_vpa(e2k_addr_t pa) +{ + return pa; +} +static inline e2k_addr_t +native_vpa_to_pa(e2k_addr_t vpa) +{ + return vpa; +} +static inline e2k_addr_t +native_pa_to_vpa(e2k_addr_t pa) +{ + return pa; +} + +/* + * Convert pointer of global or static variable, array, structure, string or + * other item of linux image, which is located into the virtual linux text, + * data or bss segment to the consistent pointer with physical address of + * object, while booting process is in the progress and virtual memory + * support is not yet ready. + * See comments above ('boot_va_to_pa()' function declaration). + * + * Example of usage: + * + * char boot_buf[81]; + * + * ....... + * void + * xxx_func() + * { + * char *buf = boot_vp_to_pp(boot_buf); + * + * ....... + * } + */ + +#define boot_native_vp_to_pp(virt_pnt) \ + ((typeof(virt_pnt))boot_native_va_to_pa((void *)(virt_pnt))) +#define boot_native_func_to_pp(virt_pnt) \ + ((typeof(virt_pnt))boot_native_va_to_pa((void *)(virt_pnt))) +#define boot_vp_to_pp(virt_pnt) \ + ((typeof(virt_pnt))boot_va_to_pa((void *)(virt_pnt))) +#define boot_func_to_pp(virt_pnt) \ + ((typeof(virt_pnt))boot_func_to_pa((void *)(virt_pnt))) + +/* + * Get value of object (variable, array, structure, string or other item of + * linux image) which is located into the virtual linux text, data or bss + * segment, while booting process is in the progress and virtual memory support + * is not yet ready. + * See comments above ('boot_va_to_pa()' function declaration). + * + * Example of usage: + * + * static long *boot_long_p; + * int boot_buf_size = 80; + * + * ....... + * void + * xxx_func() + * { + * int buf_size = boot_get_vo_value(boot_buf_size); + * long *long_p = boot_get_vo_value(boot_long_p); + * + * long_p[0] = buf_size; + * ....... + * } + */ + +#define boot_native_get_vo_value(virt_value_name) \ + (*(boot_native_vp_to_pp(&virt_value_name))) +#define boot_get_vo_value(virt_value_name) \ + (*(boot_vp_to_pp(&virt_value_name))) + +/* + * Get name of object (variable, array, structure, string or other item of + * linux image) which is located into the virtual linux text, data or bss + * segment, while booting process is in the progress and virtual memory support + * is not yet ready. This name can be used to assign a value to the object. + * See comments above ('boot_va_to_pa()' function declaration). + * + * Example of usage: + * + * static int boot_memory_size; + * + * ....... + * void + * xxx_func() + * { + * int total_memory_size = 0; + * + * ....... + * boot_get_vo_name(boot_memory_size) = total_memory_size; + * ....... + * } + */ + +#define boot_native_get_vo_name(virt_value_name) \ + (*(typeof(virt_value_name) *)boot_native_vp_to_pp( \ + &virt_value_name)) +#define boot_get_vo_name(virt_value_name) \ + (*(typeof(virt_value_name) *)boot_vp_to_pp( \ + &virt_value_name)) + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops */ +#include +#else /* native kernel */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ + +static inline void * +boot_kernel_va_to_pa(void *virt_pnt, unsigned long kernel_base) +{ + return boot_native_kernel_va_to_pa(virt_pnt, kernel_base); +} + +static inline void * +boot_func_to_pa(void *virt_pnt) +{ + return boot_native_va_to_pa(virt_pnt); +} + +static inline void * +boot_va_to_pa(void *virt_pnt) +{ + return boot_native_va_to_pa(virt_pnt); +} + +static inline e2k_addr_t +boot_vpa_to_pa(e2k_addr_t vpa) +{ + return boot_native_vpa_to_pa(vpa); +} + +static inline e2k_addr_t +boot_pa_to_vpa(e2k_addr_t pa) +{ + return boot_native_pa_to_vpa(pa); +} + +static inline e2k_addr_t +vpa_to_pa(e2k_addr_t vpa) +{ + return native_vpa_to_pa(vpa); +} + +static inline e2k_addr_t +pa_to_vpa(e2k_addr_t pa) +{ + return native_pa_to_vpa(pa); +} +#endif /* CONFIG_PARAVIRT_GUEST */ +#endif /* __ASSEMBLY__ */ + +#endif /* !(_E2K_P2V_BOOT_V2P_H) */ diff --git a/arch/e2k/include/asm/p2v/io.h b/arch/e2k/include/asm/p2v/io.h new file mode 100644 index 0000000..67a0036 --- /dev/null +++ b/arch/e2k/include/asm/p2v/io.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include + +#define BOOT_E2K_X86_IO_PAGE_SIZE (boot_cpu_has(CPU_HWBUG_LARGE_PAGES) ? \ + E2K_SMALL_PAGE_SIZE : BOOT_E2K_LARGE_PAGE_SIZE) +#define BOOT_X86_IO_AREA_PHYS_BASE (boot_machine.x86_io_area_base) +#define BOOT_X86_IO_AREA_PHYS_SIZE (boot_machine.x86_io_area_size) + +static inline void boot_native_writeb(u8 b, void __iomem *addr) +{ + NATIVE_WRITE_MAS_B((e2k_addr_t)addr, b, MAS_IOADDR); +} + +static inline void boot_native_writew(u16 w, void __iomem *addr) +{ + NATIVE_WRITE_MAS_H((e2k_addr_t)addr, w, MAS_IOADDR); +} + +static inline void boot_native_writel(u32 l, void __iomem *addr) +{ + NATIVE_WRITE_MAS_W((e2k_addr_t)addr, l, MAS_IOADDR); +} + +static inline void boot_native_writeq(u64 q, void __iomem *addr) +{ + NATIVE_WRITE_MAS_D((e2k_addr_t)addr, q, MAS_IOADDR); +} + +static inline u8 boot_native_readb(void __iomem *addr) +{ + return NATIVE_READ_MAS_B((e2k_addr_t)addr, MAS_IOADDR); +} + +static inline u16 boot_native_readw(void __iomem *addr) +{ + return NATIVE_READ_MAS_H((e2k_addr_t)addr, MAS_IOADDR); +} + +static inline u32 boot_native_readl(void __iomem *addr) +{ + return NATIVE_READ_MAS_W((e2k_addr_t)addr, MAS_IOADDR); +} + +static inline u64 boot_native_readq(void __iomem *addr) +{ + return NATIVE_READ_MAS_D((e2k_addr_t)addr, MAS_IOADDR); +} + +//TODO seems like these are unused, probably should delete them + +static inline void boot_native_outb(u8 byte, u16 port) +{ + NATIVE_WRITE_MAS_B(BOOT_X86_IO_AREA_PHYS_BASE + port, byte, MAS_IOADDR); +} +static inline u8 boot_native_inb(u16 port) +{ + return (u8) NATIVE_READ_MAS_B(BOOT_X86_IO_AREA_PHYS_BASE + port, MAS_IOADDR); +} +static inline u32 boot_native_inl(u16 port) +{ + return (u32) NATIVE_READ_MAS_W(BOOT_X86_IO_AREA_PHYS_BASE + port, MAS_IOADDR); +} diff --git a/arch/e2k/include/asm/page.h b/arch/e2k/include/asm/page.h new file mode 100644 index 0000000..bb17b84 --- /dev/null +++ b/arch/e2k/include/asm/page.h @@ -0,0 +1,432 @@ +/* $Id: page.h,v 1.41 2009/07/24 12:02:54 kravtsunov_e Exp $ + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PAGE_H +#define _E2K_PAGE_H + +#include + +#define IOREMAP_MAX_ORDER PMD_SHIFT +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA + +#define E2K_4K_PAGE_SHIFT 12 /* 4 KBytes page */ +#define E2K_2M_PAGE_SHIFT 21 /* 2 MBytes page */ +#define E2K_4M_PAGE_SHIFT 22 /* 4 MBytes page */ + +#define E2K_SMALL_PAGE_SHIFT E2K_4K_PAGE_SHIFT /* 4 KBytes page */ + +/* + * large page: 4MBytes for E2C+ and 2MBytes for others + */ + +#define E2K_LARGE_PAGE_SHIFT \ + (!cpu_has(CPU_FEAT_ISET_V3) ? E2K_4M_PAGE_SHIFT : E2K_2M_PAGE_SHIFT) + +#define BOOT_E2K_LARGE_PAGE_SHIFT \ + (!boot_cpu_has(CPU_FEAT_ISET_V3) ? E2K_4M_PAGE_SHIFT : E2K_2M_PAGE_SHIFT) + +/* 4 KBytes page */ +#define PAGE_SHIFT E2K_SMALL_PAGE_SHIFT + +/* large page */ +#define LARGE_PAGE_SHIFT E2K_LARGE_PAGE_SHIFT + +#ifndef __ASSEMBLY__ +#define E2K_2M_PAGE_SIZE (1UL << E2K_2M_PAGE_SHIFT) +#define E2K_4M_PAGE_SIZE (1UL << E2K_4M_PAGE_SHIFT) +#endif /* !(__ASSEMBLY__) */ + +#define E2K_SMALL_PAGE_SIZE (1 << E2K_SMALL_PAGE_SHIFT) + +#define E2K_LARGE_PAGE_SIZE (1 << E2K_LARGE_PAGE_SHIFT) +#define BOOT_E2K_LARGE_PAGE_SIZE (1 << BOOT_E2K_LARGE_PAGE_SHIFT) + +#define PAGE_SIZE _BITUL(PAGE_SHIFT) +#define LARGE_PAGE_SIZE E2K_LARGE_PAGE_SIZE + +#if defined(CONFIG_CPU_ES2) +# define E2K_MAX_PAGE_SIZE (1 << E2K_4M_PAGE_SHIFT) +#else +# define E2K_MAX_PAGE_SIZE (1 << E2K_2M_PAGE_SHIFT) +#endif + +#define E2K_SMALL_PAGE_MASK (~(E2K_SMALL_PAGE_SIZE - 1)) + +#define E2K_LARGE_PAGE_MASK (~(E2K_LARGE_PAGE_SIZE - 1)) +#define BOOT_E2K_LARGE_PAGE_MASK (~(BOOT_E2K_LARGE_PAGE_SIZE - 1)) + +#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define LARGE_PAGE_MASK E2K_LARGE_PAGE_MASK + +#define HPAGE_SHIFT E2K_LARGE_PAGE_SHIFT +#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#define HPAGE_PMD_MAX_ORDER (E2K_4M_PAGE_SHIFT - PAGE_SHIFT) + + +#ifdef __KERNEL__ + +#include +#include + +#include +#include +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#endif /* !(__ASSEMBLY__) */ + + +/* + * To align the pointer to the (next) page boundary or to the beginning of + * the page + */ + +/* + * Round up the beginning of the address. + */ +#define _PAGE_ALIGN_UP(addr, page_size) \ + ((e2k_addr_t)(addr) & ~(page_size-1)) + +/* + * Round down the end of the address. + */ +#define _PAGE_ALIGN_DOWN(addr, page_size) \ + (((e2k_addr_t)(addr) + (page_size-1)) & ~(page_size-1)) + +#define E2K_SMALL_PAGE_ALIGN_DOWN(addr) \ + _PAGE_ALIGN_DOWN(addr, E2K_SMALL_PAGE_SIZE) +#define E2K_SMALL_PAGE_ALIGN_UP(addr) \ + _PAGE_ALIGN_UP(addr, E2K_SMALL_PAGE_SIZE) +#define E2K_SMALL_PAGE_ALIGN(addr) E2K_SMALL_PAGE_ALIGN_DOWN(addr) + +#define E2K_LARGE_PAGE_ALIGN_DOWN(addr) \ + _PAGE_ALIGN_DOWN(addr, E2K_LARGE_PAGE_SIZE) +#define E2K_LARGE_PAGE_ALIGN_UP(addr) \ + _PAGE_ALIGN_UP(addr, E2K_LARGE_PAGE_SIZE) +#define E2K_LARGE_PAGE_ALIGN(addr) E2K_LARGE_PAGE_ALIGN_DOWN(addr) + + +#define PAGE_ALIGN_DOWN(addr) _PAGE_ALIGN_DOWN(addr, PAGE_SIZE) +#define PAGE_ALIGN_UP(addr) _PAGE_ALIGN_UP(addr, PAGE_SIZE) +#define LARGE_PAGE_ALIGN_DOWN(addr) _PAGE_ALIGN_DOWN(addr, LARGE_PAGE_SIZE) +#define LARGE_PAGE_ALIGN_UP(addr) _PAGE_ALIGN_UP(addr, LARGE_PAGE_SIZE) + +#define E2K_ALIGN_SIZE_UP(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_MASK_UP(addr, ((size)-1))) +#define E2K_ALIGN_SIZE_DOWN(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_MASK_DOWN(addr, ((size)-1))) +#define E2K_ALIGN_SIZE(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_MASK(addr, ((size)-1))) + +#define ALIGN_MASK_UP(addr, mask) ((addr) & ~(mask)) +#define ALIGN_MASK_DOWN(addr, mask) (((addr) + (mask)) & ~(mask)) +#define ALIGN_TO_MASK(addr, mask) ALIGN_MASK_DOWN(addr, mask) +#define ALIGN_SIZE_UP(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_MASK_UP(addr, ((size)-1))) +#define ALIGN_SIZE_DOWN(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_MASK_DOWN(addr, ((size)-1))) +#define ALIGN_TO_SIZE(addr, size) \ + (((size) == 0) ? (addr) : ALIGN_TO_MASK(addr, ((size)-1))) + +#ifndef __ASSEMBLY__ + +#define CLEAR_MEMORY_TAG ETAGNUM /* memory filling mode: zeroing */ +/* #define CLEAR_MEMORY_TAG ETAGEWD / * memory filling mode: emptying */ + +/* + * A _lot_ of the kernel time is spent clearing pages, so + * do this as fast as it possibly can. + * + * #95931: try to keep small pages in cache, but avoid cache trashing + * when clearing huge pages. + */ + +#define clear_memory_8(addr, size, tag) \ + fast_tagged_memory_set(addr, 0, tag, size, \ + LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT | \ + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT) + +#define clear_page(addr) clear_memory_8((addr), PAGE_SIZE, CLEAR_MEMORY_TAG) + +#define clear_user_page(addr, vaddr, page) \ +do { \ + u64 strd_opcode; \ + /* Use WC stores to clear huge pages. \ + * e4c does not have shared L3 so cacheable stores are not _that_ \ + * bad and it also has hardware bug which forces to issue memory \ + * barrier after WC stores, so we avoid WC there. */ \ + if (!IS_MACHINE_E2S && PageCompound(page)) { \ + strd_opcode = LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT | \ + MAS_BYPASS_ALL_CACHES << LDST_REC_OPC_MAS_SHIFT; \ + } else { \ + strd_opcode = LDST_QWORD_FMT << LDST_REC_OPC_FMT_SHIFT | \ + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT; \ + } \ + fast_tagged_memory_set((addr), 0, CLEAR_MEMORY_TAG, \ + PAGE_SIZE, strd_opcode); \ +} while (0) + +#define clear_user_highpage(page, vaddr) \ + clear_user_page(page_address(page), (vaddr), (page)) + +#define __HAVE_ARCH_COPY_USER_HIGHPAGE +#define copy_user_highpage(to, from, vaddr, vma) \ + copy_page(page_address(to), page_address(from)) + +#define copy_tagged_page(to, from) __tagged_memcpy_8(to, from, PAGE_SIZE) + +#define copy_page(to, from) copy_tagged_page(to, from) +#define copy_user_page(to, from, vaddr, page) copy_page(to, from) + +typedef struct page *pgtable_t; + +#define __pa(x) ((e2k_addr_t)(x) - PAGE_OFFSET) +#define __va(x) ((void *)((e2k_addr_t) (x) + PAGE_OFFSET)) +#define __boot_pa(x) ((e2k_addr_t)(x) - BOOT_PAGE_OFFSET) +#define __boot_va(x) ((void *)((e2k_addr_t) (x) + BOOT_PAGE_OFFSET)) + +#define __pa_symbol(x) vpa_to_pa(kernel_va_to_pa((unsigned long) (x))) + +/* + * PFNs are real physical page numbers. However, mem_map only begins to record + * per-page information starting at pfn_base. + * This is to handle systems where the first physical page in the machine + * is not 0. + */ + +struct page; + +extern struct page *e2k_virt_to_page(const void *kaddr); + +#define phys_to_page(kaddr) pfn_to_page((kaddr) >> PAGE_SHIFT) +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) + +#define virt_to_page(kaddr) \ + (((e2k_addr_t)(kaddr) >= PAGE_OFFSET && \ + (e2k_addr_t)(kaddr) < PAGE_OFFSET + MAX_PM_SIZE) ? \ + phys_to_page(__pa(kaddr)) \ + : \ + e2k_virt_to_page((void *) (kaddr))) + + +#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) + +#define virt_to_phys __pa +#define phys_to_virt __va + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) + +#define page_valid(page) pfn_valid(page_to_pfn(page)) +#define phys_addr_valid(addr) pfn_valid((addr) >> PAGE_SHIFT) +#define virt_addr_valid(kaddr) ((e2k_addr_t)(kaddr) >= PAGE_OFFSET && \ + pfn_valid(virt_to_pfn(kaddr))) +#define kern_addr_valid(kaddr) page_valid(virt_to_page(kaddr)) + +#define boot_pa(x) ((BOOT_READ_OSCUD_LO_REG().OSCUD_lo_base >= \ + PAGE_OFFSET) \ + ? \ + __pa(x) : (void *)(boot_pa_to_vpa(x))) +#define boot_va(x) ((BOOT_READ_OSCUD_LO_REG().OSCUD_lo_base >= \ + PAGE_OFFSET) \ + ? \ + __va(x) : (void *)(boot_pa_to_vpa(x))) + +/* + * E2K architecture additional vm_flags + */ + +#define VM_HW_STACK_PS 0x00100000000UL /* procedure stack area */ +#define VM_TAGMAPPED 0x00200000000UL /* the tags area appropriate */ + /* to this data VM area was mapped */ +#define VM_HW_STACK_PCS 0x00400000000UL /* chain stack area */ +#define VM_WRITECOMBINED 0x00800000000UL +#define VM_PRIVILEGED 0x04000000000UL /* pages are privileged */ +#define VM_GUARD 0x08000000000UL /* guard page(s) mapping */ +#define VM_MPDMA 0x10000000000UL /* pages are under MPDMA */ + /* hardware protection */ +#define VM_SIGNAL_STACK 0x20000000000UL /* Signal stack area */ +#define VM_CUI 0xffff000000000000UL /* CUI for pages in VMA */ +#define VM_CUI_SHIFT 48 + +/* + * E2K architecture additional gup_flags + */ +#define FOLL_MPDMA 0x01000000 /* page is not writable only for DMA */ + +/* + * We don't set the valid bit for PROT_NONE areas, otherwise + * semi-speculative loads will cause page faults which is bad + * for performance when such loads come from an unrolled loop. + */ +#define VM_PAGESVALID (VM_READ | VM_WRITE | VM_EXEC) + +/* + * The following structure is used to hold the physical memory configuration + * of the machine. This is filled in 'boot_probe_memory()' and is later + * used by 'boot_mem_init()' to setup boot-time memory map and by 'mem_init()' + * to set up 'mem_map[]'. + * A base address of a bank should be page aligned. + * The structure item 'mem_bitmap' is a map pointer. The map bits represent + * the physical memory on the bank in terms of small pages (4 KB). + * To reduce the boot-time map size, the boot map represents only needed + * to boot tasks first 'BOOT_MAX_PHYS_MEM_SIZE' bytes of real physical memory + * configuration. Some of memory areas are prereserved ('text' & 'data' + * segments, stack, boot information etc) and have been allocated by BIOS or + * boot loaders. All these areas are known and listed in the header + * 'asm/boot_init.h' Such area can be allocated on any physical address and + * can be out of the boot map, which represents reserved memory + * The structure 'e2k_busy_mem_t' represents occupied memory areas in a bank, + * which can not be described by the boot map. + * Array of 'E2K_MAX_PHYS_BANKS' of these structures is statically allocated + * into the kernel image. + * The entry after the last valid one has 'pages_num == 0'. + */ + +typedef unsigned long e2k_mem_map_t; /* double-word (64 bits == 64 pages) */ + +typedef enum busy_mem_type { + undefined_mem_type = 0, /* unknown data area */ + boot_loader_mem_type, /* data and binary of boot loader */ + kernel_image_mem_type, /* kernel image (text & data) */ + data_stack_mem_type, /* local data stack */ + hw_stack_mem_type, /* hardware procedure or chain stack */ + page_table_mem_type, /* page tables */ + kernel_data_mem_type, /* kernel data/structures/tables */ + /* and other areas of memory */ + bootblock_mem_type, /* boot block (common data with boot */ + /* loader) */ + boot_time_data_mem_type, /* kernel data & structures allocated */ + /* while boot-time init */ + hw_reserved_mem_type, /* reserved for hardware purposes */ + /* (for example: bugs workaround) */ + hw_stripped_mem_type, /* hardware stripped out physical */ + /* memory (IO, ROM, VGA ...) */ + dma32_mem_type, /* low memory reserved for DMA and */ + /* bounce buffers */ +} busy_mem_type_t; + +typedef struct e2k_busy_mem { + e2k_size_t start_page; /* start page # of an area in a bank */ + e2k_size_t pages_num; /* number of occupied pages by the */ + /* area in the bank */ + short next; /* index of next area (last = -1) */ + /* busy areas is ordered list */ + /* on increase of addresses */ + unsigned short flags; /* boot-time busy area flags */ + /* (see asm/boot_phys.h) */ + busy_mem_type_t type; /* memory type */ +} e2k_busy_mem_t; + +#define BOOT_RESERVED_AREAS_SIZE (2 * PAGE_SIZE) + +/* max number of prereserved areas at boot-time */ +#define BOOT_MAX_PRERESERVED_AREAS \ + (1 + /* 0-page (hardware bug workaround) */ \ + 3 * NR_CPUS + /* 3 stacks (data/procedure/chain) */ \ + /* on each CPU */ \ + 1 + /* kernel image 'text' section */ \ + 1 + /* kernel image 'protexted text' */ \ + 1 + /* kernel '.data..ro_after_init' */ \ + 1 + /* kernel 'data/bss' */ \ + 1 + /* kernel 'init' */ \ + 1 + /* low IO memory (VGA memory) */ \ + 1 + /* bootblock */ \ + 1 + /* INITRD */ \ + 1 + /* mp table */ \ + 1 + /* MP floating table */ \ + 1 * L_MAX_BUSY_AREAS + /* boot loader busy memory */ \ + 1 + /* list of all occupied areas */ \ + 0) + +#ifdef CONFIG_ONLY_HIGH_PHYS_MEM +/* max number of prereserved areas at low physical memory */ +#define MAX_PRERESERVED_LOW_AREAS 2 +#else /* ! CONFIG_ONLY_HIGH_PHYS_MEM */ +#define MAX_PRERESERVED_LOW_AREAS 0 /* none such areas */ +#endif /* CONFIG_ONLY_HIGH_PHYS_MEM */ + +#define E2K_MAX_PRERESERVED_AREAS \ + (BOOT_MAX_PRERESERVED_AREAS + MAX_PRERESERVED_LOW_AREAS) + +typedef struct e2k_phys_bank { + e2k_addr_t base_addr; /* base physical address of the start */ + /* page of the bank */ + e2k_size_t pages_num; /* total number of pages in the bank */ + bool maybe_remapped_to_hi; /* the low bank can be remapped to */ + /* high range bank */ + short next; /* index of next bank (last = -1) */ + /* node bnaks is ordered list */ + /* on increase of addresses */ + atomic64_t free_pages_num; /* current number of free pages */ + /* in the map */ + short busy_areas_num; /* number of areas in the list of */ + /* occupied areas in the bank */ + short first_area; /* index of first busy area */ + e2k_busy_mem_t busy_areas_prereserved[E2K_MAX_PRERESERVED_AREAS]; + /* list of all occupied areas in the */ + /* bank, which not included to the */ + /* memory bitmap */ + e2k_busy_mem_t *busy_areas; /* pointer to list of all occupied */ + /* areas in the bank */ + bool mapped[L_MAX_MEM_NUMNODES]; + /* the bank was already mapped */ + /* on node [node_id] */ +} e2k_phys_bank_t; + +typedef struct node_phys_mem { + e2k_size_t start_pfn; /* start page number on the node */ + e2k_size_t pfns_num; /* number of pages on the node */ + /* including holes between banks */ + short banks_num; /* total number of banks in the list */ + short first_bank; /* index of first bank on the list */ + /* (starts from start_pfn) */ + e2k_phys_bank_t banks[L_MAX_NODE_PHYS_BANKS]; +} node_phys_mem_t; + +extern node_phys_mem_t nodes_phys_mem[L_MAX_MEM_NUMNODES]; + +#define boot_nodes_phys_mem \ + (boot_vp_to_pp((node_phys_mem_t *)nodes_phys_mem)) + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#ifndef CONFIG_E2K_HAS_OPT_BITOPS +#include +#else +#include +static inline int get_order(unsigned long size) +{ + int lz = 0; + + size = (size - 1) >> PAGE_SHIFT; + lz = E2K_LZCNTD(size); + + return BITS_PER_LONG - lz; +} +#endif /* CONFIG_E2K_HAS_OPT_BITOPS */ + +struct task_struct; + +extern e2k_addr_t node_kernel_address_to_phys(int node, e2k_addr_t address); +extern e2k_addr_t user_address_to_pva(struct task_struct *tsk, + e2k_addr_t address); +extern e2k_addr_t kernel_address_to_pva(e2k_addr_t address); + +#define ARCH_ZONE_DMA_BITS 32 + +#endif /* !(__ASSEMBLY__) */ + +#endif /* !(__KERNEL__) */ + +#include + +#endif /* !(_E2K_PAGE_H) */ diff --git a/arch/e2k/include/asm/page_io.h b/arch/e2k/include/asm/page_io.h new file mode 100644 index 0000000..46dfe7d --- /dev/null +++ b/arch/e2k/include/asm/page_io.h @@ -0,0 +1,51 @@ +/* $Id: page_io.h,v 1.6 2007/09/05 12:05:52 kostin Exp $ + * + */ + +#ifndef _E2K_PAGE_IO_H +#define _E2K_PAGE_IO_H + +#include +#include +#include +#include +#include + +#include +#include +#include + +#undef DEBUG_TAG_MODE +#undef DebugTM +#define DEBUG_TAG_MODE 0 /* Tag memory */ +#define DebugTM(...) DebugPrint(DEBUG_TAG_MODE ,##__VA_ARGS__) + + +#define TAGS_BITS_PER_LONG 4 +#define TAGS_BYTES_PER_PAGE (PAGE_SIZE / sizeof(long) * \ + TAGS_BITS_PER_LONG / 8) + +#define TAGS_PAGES 0xfff +#define TAGS_READ_PAGES 0xff + +#define TAGS_PAGES_FOR_COMPRESS 0xff +#define TAGS_READ_PAGES_FOR_COMPRESS 0xff + +struct tags_swap_page_table { + struct page **pages; + struct page **read_pages; + int index; /* last used page */ + int index_read; /* last used page for readpage */ + int size[2]; + spinlock_t lock_pages; + spinlock_t lock_read_pages; +}; + +extern void tags_swap_init(unsigned type, unsigned long *map); +extern void e2k_remove_swap(struct swap_info_struct *sis); +extern void restore_tags_for_data(u64 *, u8 *); +extern u32 save_tags_from_data(u64 *, u8 *); +extern void get_page_with_tags(u8 *, u8 *, int *); +extern int check_tags(unsigned type, unsigned long beg, unsigned long end); + +#endif //_E2K_PAGE_IO_H diff --git a/arch/e2k/include/asm/param.h b/arch/e2k/include/asm/param.h new file mode 100644 index 0000000..816f4ee --- /dev/null +++ b/arch/e2k/include/asm/param.h @@ -0,0 +1,12 @@ +/* $Id: param.h,v 1.4 2008/12/19 12:44:14 atic Exp $ */ +#ifndef _E2K_PARAM_H_ +#define _E2K_PARAM_H_ + +#include + + +# define HZ CONFIG_HZ /* Internal kernel timer frequency */ +# define USER_HZ HZ /* some user interfaces are in */ + /* "ticks" */ +# define CLOCKS_PER_SEC (USER_HZ) +#endif /* _E2K_PARAM_H_ */ diff --git a/arch/e2k/include/asm/paravirt.h b/arch/e2k/include/asm/paravirt.h new file mode 100644 index 0000000..77ebeb0 --- /dev/null +++ b/arch/e2k/include/asm/paravirt.h @@ -0,0 +1,8 @@ + +#ifndef __ASM_E2K_PARAVIRT_H +#define __ASM_E2K_PARAVIRT_H + +#include +#include + +#endif /* __ASM_E2K_PARAVIRT_H */ diff --git a/arch/e2k/include/asm/paravirt/aau_context.h b/arch/e2k/include/asm/paravirt/aau_context.h new file mode 100644 index 0000000..1cabf8b --- /dev/null +++ b/arch/e2k/include/asm/paravirt/aau_context.h @@ -0,0 +1,255 @@ +/* + * AAU registers description, macroses for load/store AAU context + * paravirtualized case + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +#ifndef _E2K_ASM_PARAVIRT_AAU_CONTEXT_H_ +#define _E2K_ASM_PARAVIRT_AAU_CONTEXT_H_ + +#include +#include + +#ifdef CONFIG_KVM_GUEST +#include +#include + +#define PV_SAVE_AAU_MASK_REGS(aau_context, aasr) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AAU_MASK_REGS(aau_context, aasr); \ + } else { \ + KVM_SAVE_AAU_MASK_REGS(aau_context, aasr); \ + } \ +}) + +#define PV_RESTORE_AAU_MASK_REGS(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_RESTORE_AAU_MASK_REGS(aau_context); \ + } else { \ + KVM_RESTORE_AAU_MASK_REGS(aau_context); \ + } \ +}) + +#define PV_SAVE_AADS(aau_regs) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AADS(aau_regs); \ + } else { \ + KVM_SAVE_AADS(aau_regs); \ + } \ +}) + +#define PV_RESTORE_AADS(aau_regs) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_RESTORE_AADS(aau_regs); \ + } else { \ + KVM_RESTORE_AADS(aau_regs); \ + } \ +}) + +#define PV_SAVE_AALDIS_V2(regs) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AALDIS_V2(regs); \ + } else { \ + KVM_SAVE_AALDIS(regs); \ + } \ +}) +#define PV_SAVE_AALDIS_V5(regs) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AALDIS_V5(regs); \ + } else { \ + KVM_SAVE_AALDIS(regs); \ + } \ +}) + +#define PV_SAVE_AALDAS(aaldas_p) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AALDAS(aaldas_p); \ + } else { \ + KVM_SAVE_AALDAS(aaldas_p); \ + } \ +}) + +#define PV_SAVE_AAFSTR(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AAFSTR(aau_context); \ + } else { \ + KVM_SAVE_AAFSTR(aau_context); \ + } \ +}) + +#define PV_SAVE_AAU_REGS_FOR_PTRACE(regs, ti) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SAVE_AAU_REGS_FOR_PTRACE(regs, ti); \ + } else { \ + KVM_SAVE_AAU_REGS_FOR_PTRACE(regs, ti); \ + } \ +}) + +#define PV_GET_ARRAY_DESCRIPTORS_V2(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_GET_ARRAY_DESCRIPTORS_V2(aau_context); \ + } else { \ + kvm_get_array_descriptors(aau_context); \ + } \ +}) +#define PV_GET_ARRAY_DESCRIPTORS_V5(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_GET_ARRAY_DESCRIPTORS_V5(aau_context); \ + } else { \ + kvm_get_array_descriptors(aau_context); \ + } \ +}) + +#define PV_SET_ARRAY_DESCRIPTORS(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SET_ARRAY_DESCRIPTORS(aau_context); \ + } else { \ + kvm_set_array_descriptors(aau_context); \ + } \ +}) + +#define PV_GET_SYNCHRONOUS_PART_V2(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_GET_SYNCHRONOUS_PART_V2(aau_context); \ + } else { \ + kvm_get_synchronous_part(aau_context); \ + } \ +}) +#define PV_GET_SYNCHRONOUS_PART_V5(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_GET_SYNCHRONOUS_PART_V5(aau_context); \ + } else { \ + kvm_get_synchronous_part(aau_context); \ + } \ +}) + +#define PV_GET_AAU_CONTEXT_V2(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_GET_AAU_CONTEXT_V2(aau_context); \ + } else { \ + kvm_get_aau_context(aau_context); \ + } \ +}) +#define PV_GET_AAU_CONTEXT_V5(aau_context) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_GET_AAU_CONTEXT_V5(aau_context); \ + } else { \ + kvm_get_aau_context(aau_context); \ + } \ +}) + +static inline void +pv_get_array_descriptors_v2(e2k_aau_t *context) +{ + PV_GET_ARRAY_DESCRIPTORS_V2(context); +} +static inline void +pv_get_array_descriptors_v5(e2k_aau_t *context) +{ + PV_GET_ARRAY_DESCRIPTORS_V5(context); +} + +static inline void +pv_get_synchronous_part_v2(e2k_aau_t *context) +{ + PV_GET_SYNCHRONOUS_PART_V2(context); +} +static inline void +pv_get_synchronous_part_v5(e2k_aau_t *context) +{ + PV_GET_SYNCHRONOUS_PART_V5(context); +} + +/* + * It's taken that aasr was get earlier(from get_aau_context caller) + * and comparison with aasr.iab was taken. + */ +static inline void +pv_get_aau_context_v2(e2k_aau_t *context) +{ + PV_GET_AAU_CONTEXT_V2(context); +} +static inline void +pv_get_aau_context_v5(e2k_aau_t *context) +{ + PV_GET_AAU_CONTEXT_V5(context); +} + +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* It is paravirtualized host and guest kernel */ + +#define SAVE_AAU_MASK_REGS(aau_context, aasr) \ + PV_SAVE_AAU_MASK_REGS(aau_context, aasr) + +#define RESTORE_AAU_MASK_REGS(aau_context) \ + PV_RESTORE_AAU_MASK_REGS(aau_context) + +#define SAVE_AADS(aau_regs) \ + PV_SAVE_AADS(aau_regs) + +#define RESTORE_AADS(aau_regs) \ + PV_RESTORE_AADS(aau_regs) + +#define SAVE_AALDIS_V2(regs) \ + PV_SAVE_AALDIS_V2(regs) +#define SAVE_AALDIS_V5(regs) \ + PV_SAVE_AALDIS_V5(regs) + +#define SAVE_AALDA(aaldas) \ + PV_SAVE_AALDAS(aaldas) + +#define SAVE_AAFSTR(regs) \ + PV_SAVE_AAFSTR_REG(regs) + +#define SAVE_AAU_REGS_FOR_PTRACE(regs, ti) \ + PV_SAVE_AAU_REGS_FOR_PTRACE(regs, ti) + +#define GET_ARRAY_DESCRIPTORS_V2(aau_context) \ + PV_GET_ARRAY_DESCRIPTORS_V2(aau_context) +#define GET_ARRAY_DESCRIPTORS_V5(aau_context) \ + PV_GET_ARRAY_DESCRIPTORS_V5(aau_context) + +#define GET_SYNCHRONOUS_PART_V2(aau_context) \ + PV_GET_SYNCHRONOUS_PART_V2(aau_context) +#define GET_SYNCHRONOUS_PART_V5(aau_context) \ + PV_GET_SYNCHRONOUS_PART_V5(aau_context) + +#define GET_AAU_CONTEXT_V2(context) \ + PV_GET_AAU_CONTEXT_V2(context) +#define GET_AAU_CONTEXT_V5(context) \ + PV_GET_AAU_CONTEXT_V5(context) + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_ASM_PARAVIRT_AAU_CONTEXT_H_ */ diff --git a/arch/e2k/include/asm/paravirt/aau_regs_access.h b/arch/e2k/include/asm/paravirt/aau_regs_access.h new file mode 100644 index 0000000..bb135ed --- /dev/null +++ b/arch/e2k/include/asm/paravirt/aau_regs_access.h @@ -0,0 +1,511 @@ +/* + * AAU registers description, macroses for load/store AAU context + * paravirtualized case + * + * array access descriptors (AAD0, ... , AAD31); + * initial indices (AIND0, ... , AAIND15); + * indices increment values (AAINCR0, ... , AAINCR7); + * current values of "prefetch" indices (AALDI0, ... , AALDI63); + * array prefetch initialization mask (AALDV); + * prefetch attributes (AALDA0, ... , AALDA63); + * array prefetch advance mask (AALDM); + * array access status register (AASR); + * array access fault status register (AAFSTR); + * current values of "store" indices (AASTI0, ... , AASTI15); + * store attributes (AASTA0, ... , AASTA15); + */ + +/* do not include this header directly, only through */ + +#ifndef _E2K_ASM_PARAVIRT_AAU_REGS_ACCESS_H_ +#define _E2K_ASM_PARAVIRT_AAU_REGS_ACCESS_H_ + +#include +#include + +static inline unsigned int pv_read_aasr_reg_value(void) +{ + if (!paravirt_enabled()) + return native_read_aasr_reg_value(); + else + return pv_cpu_ops.read_aasr_reg_value(); +} +static inline void pv_write_aasr_reg_value(unsigned int reg_value) +{ + if (!paravirt_enabled()) + native_write_aasr_reg_value(reg_value); + else + pv_cpu_ops.write_aasr_reg_value(reg_value); +} +static inline unsigned int pv_read_aafstr_reg_value(void) +{ + if (!paravirt_enabled()) + return native_read_aafstr_reg_value(); + else + return pv_cpu_ops.read_aafstr_reg_value(); +} +static inline void pv_write_aafstr_reg_value(unsigned int reg_value) +{ + if (!paravirt_enabled()) + native_write_aafstr_reg_value(reg_value); + else + pv_cpu_ops.write_aafstr_reg_value(reg_value); +} + +static __always_inline e2k_aasr_t +pv_read_aasr_reg(void) +{ + e2k_aasr_t aasr; + + AW(aasr) = pv_read_aasr_reg_value(); + return aasr; +} +static __always_inline void +pv_write_aasr_reg(e2k_aasr_t aasr) +{ + pv_write_aasr_reg_value(AW(aasr)); +} + +#ifdef CONFIG_KVM_GUEST +#include + +static inline u32 +pv_read_aaind_reg_value_v2(int AAIND_no) +{ + if (!paravirt_enabled()) + return native_read_aaind_reg_value_v2(AAIND_no); + else + return kvm_read_aaind_reg_value(AAIND_no); +} +static inline u64 +pv_read_aaind_reg_value_v5(int AAIND_no) +{ + if (!paravirt_enabled()) + return native_read_aaind_reg_value_v5(AAIND_no); + else + return kvm_read_aaind_reg_value(AAIND_no); +} +static inline void +pv_write_aaind_reg_value(int AAIND_no, u64 reg_value) +{ + if (!paravirt_enabled()) + native_write_aaind_reg_value(AAIND_no, reg_value); + else + kvm_write_aaind_reg_value(AAIND_no, reg_value); +} + +static inline void +pv_read_aainds_pair_value_v2(int AAINDs_pair, u64 *lo_value, u64 *hi_value) +{ + if (!paravirt_enabled()) { + u32 value1, value2; + + NATIVE_READ_AAINDS_PAIR_VALUE_V2(AAINDs_pair, value1, value2); + *lo_value = value1; + *hi_value = value2; + } else { + kvm_read_aainds_pair_value(AAINDs_pair, lo_value, hi_value); + } +} +static inline void +pv_read_aainds_pair_value_v5(int AAINDs_pair, u64 *lo_value, u64 *hi_value) +{ + if (!paravirt_enabled()) { + u64 value1, value2; + + NATIVE_READ_AAINDS_PAIR_VALUE_V5(AAINDs_pair, value1, value2); + *lo_value = value1; + *hi_value = value2; + } else { + kvm_read_aainds_pair_value(AAINDs_pair, lo_value, hi_value); + } +} + +static inline void +pv_write_aainds_pair_value(int AAINDs_pair, u64 lo_value, u64 hi_value) +{ + if (!paravirt_enabled()) + native_write_aainds_pair_value(AAINDs_pair, lo_value, hi_value); + else + kvm_write_aainds_pair_value(AAINDs_pair, lo_value, hi_value); +} +static inline u32 +pv_read_aaind_tags_reg_value(void) +{ + if (!paravirt_enabled()) + return native_read_aaind_tags_reg_value(); + else + return kvm_read_aaind_tags_reg_value(); +} +static inline void +pv_write_aaind_tags_reg_value(u32 reg_value) +{ + if (!paravirt_enabled()) + native_write_aaind_tags_reg_value(reg_value); + else + kvm_write_aaind_tags_reg_value(reg_value); +} +static inline u32 +pv_read_aaincr_reg_value(int AAINCR_no) +{ + if (!paravirt_enabled()) + return native_read_aaincr_reg_value_v2(AAINCR_no); + else + return kvm_read_aaincr_reg_value(AAINCR_no); +} +static inline u64 +pv_read_aaincr_reg_value_v5(int AAINCR_no) +{ + if (!paravirt_enabled()) + return native_read_aaincr_reg_value_v5(AAINCR_no); + else + return kvm_read_aaincr_reg_value(AAINCR_no); +} +static inline void +pv_write_aaincr_reg_value(int AAINCR_no, u64 reg_value) +{ + if (!paravirt_enabled()) + native_write_aaincr_reg_value(AAINCR_no, reg_value); + else + kvm_write_aaincr_reg_value(AAINCR_no, reg_value); +} +static inline u32 +pv_read_aaincr_tags_reg_value(void) +{ + if (!paravirt_enabled()) + return native_read_aaincr_tags_reg_value(); + else + return kvm_read_aaincr_tags_reg_value(); +} +static inline void +pv_write_aaincr_tags_reg_value(u32 reg_value) +{ + if (!paravirt_enabled()) + native_write_aaincr_tags_reg_value(reg_value); + else + kvm_write_aaincr_tags_reg_value(reg_value); +} + +static inline void +pv_read_aaincrs_pair_value_v2(int AAINCRs_pair, u64 *lo_value, u64 *hi_value) +{ + if (!paravirt_enabled()) { + u32 value1, value2; + + NATIVE_READ_AAINCRS_PAIR_VALUE_V2(AAINCRs_pair, value1, value2); + *lo_value = value1; + *hi_value = value2; + } else { + kvm_read_aaincrs_pair_value(AAINCRs_pair, + lo_value, hi_value); + } +} +static inline void +pv_read_aaincrs_pair_value_v5(int AAINCRs_pair, u64 *lo_value, u64 *hi_value) +{ + if (!paravirt_enabled()) { + u64 value1, value2; + + NATIVE_READ_AAINCRS_PAIR_VALUE_V5(AAINCRs_pair, value1, value2); + *lo_value = value1; + *hi_value = value2; + } else { + kvm_read_aaincrs_pair_value(AAINCRs_pair, + lo_value, hi_value); + } +} + +static inline void +pv_write_aaincrs_pair_value(int AAINCRs_pair, u64 lo_value, u64 hi_value) +{ + if (!paravirt_enabled()) + native_write_aaincrs_pair_value(AAINCRs_pair, + lo_value, hi_value); + else + kvm_write_aaincrs_pair_value(AAINCRs_pair, + lo_value, hi_value); +} +static inline u32 +pv_read_aasti_reg_value_v2(int AASTI_no) +{ + if (!paravirt_enabled()) + return native_read_aasti_reg_value_v2(AASTI_no); + else + return kvm_read_aasti_reg_value(AASTI_no); +} +static inline u64 +pv_read_aasti_reg_value_v5(int AASTI_no) +{ + if (!paravirt_enabled()) + return native_read_aasti_reg_value_v5(AASTI_no); + else + return kvm_read_aasti_reg_value(AASTI_no); +} +static inline void +pv_write_aasti_reg_value(int AASTI_no, u32 reg_value) +{ + if (!paravirt_enabled()) + native_write_aasti_reg_value(AASTI_no, reg_value); + else + kvm_write_aasti_reg_value(AASTI_no, reg_value); +} +static inline u32 +pv_read_aasti_tags_reg_value(void) +{ + if (!paravirt_enabled()) + return native_read_aasti_tags_reg_value(); + else + return kvm_read_aasti_tags_reg_value(); +} +static inline void +pv_write_aasti_tags_reg_value(u32 reg_value) +{ + if (!paravirt_enabled()) + native_write_aasti_tags_reg_value(reg_value); + else + kvm_write_aasti_tags_reg_value(reg_value); +} + +static inline void +pv_read_aastis_pair_value_v2(int AASTIs_pair, u64 *lo_value, u64 *hi_value) +{ + if (!paravirt_enabled()) { + u32 value1, value2; + + NATIVE_READ_AASTIS_PAIR_VALUE_V2(AASTIs_pair, value1, value2); + *lo_value = value1; + *hi_value = value2; + } else { + kvm_read_aastis_pair_value(AASTIs_pair, lo_value, hi_value); + } +} +static inline void +pv_read_aastis_pair_value_v5(int AASTIs_pair, u64 *lo_value, u64 *hi_value) +{ + if (!paravirt_enabled()) { + u64 value1, value2; + + NATIVE_READ_AASTIS_PAIR_VALUE_V5(AASTIs_pair, value1, value2); + *lo_value = value1; + *hi_value = value2; + } else { + kvm_read_aastis_pair_value(AASTIs_pair, lo_value, hi_value); + } +} + +static inline void +pv_write_aastis_pair_value(int AASTIs_pair, u64 lo_value, u64 hi_value) +{ + if (!paravirt_enabled()) + native_write_aastis_pair_value(AASTIs_pair, lo_value, hi_value); + else + kvm_write_aastis_pair_value(AASTIs_pair, lo_value, hi_value); +} + +static inline void +pv_read_aaldi_reg_value_v2(int AALDI_no, u64 *l_value, u64 *r_value) +{ + if (!paravirt_enabled()) + native_read_aaldi_reg_value_v2(AALDI_no, l_value, r_value); + else + kvm_read_aaldi_reg_value(AALDI_no, l_value, r_value); +} +static inline void +pv_read_aaldi_reg_value_v5(int AALDI_no, u64 *l_value, u64 *r_value) +{ + if (!paravirt_enabled()) + native_read_aaldi_reg_value_v5(AALDI_no, l_value, r_value); + else + kvm_read_aaldi_reg_value(AALDI_no, l_value, r_value); +} + +static inline void +pv_write_aaldi_reg_value(int AALDI_no, u64 l_value, u64 r_value) +{ + if (!paravirt_enabled()) + native_write_aaldi_reg_value(AALDI_no, l_value, r_value); + else + kvm_write_aaldi_reg_value(AALDI_no, l_value, r_value); +} + +static inline void +pv_read_aaldas_reg_value(int AALDAs_no, u32 *l_value, u32 *r_value) +{ + if (!paravirt_enabled()) + native_read_aaldas_reg_value(AALDAs_no, l_value, r_value); + else + kvm_read_aaldas_reg_value(AALDAs_no, l_value, r_value); +} + +static inline void +pv_write_aaldas_reg_value(int AALDAs_no, u32 l_value, u32 r_value) +{ + if (!paravirt_enabled()) + native_write_aaldas_reg_value(AALDAs_no, l_value, r_value); + else + kvm_write_aaldas_reg_value(AALDAs_no, l_value, r_value); +} +static inline void +pv_read_aaldm_reg_value(u32 *lo_value, u32 *hi_value) +{ + if (!paravirt_enabled()) + native_read_aaldm_reg_value(lo_value, hi_value); + else + kvm_read_aaldm_reg_value(lo_value, hi_value); +} +static inline void +pv_write_aaldm_reg_value(u32 lo_value, u32 hi_value) +{ + if (!paravirt_enabled()) + native_write_aaldm_reg_value(lo_value, hi_value); + else + kvm_write_aaldm_reg_value(lo_value, hi_value); +} +static inline void +pv_read_aaldm_reg(e2k_aaldm_t *aaldm) +{ + if (!paravirt_enabled()) + native_read_aaldm_reg(aaldm); + else + kvm_read_aaldm_reg(aaldm); +} +static inline void +pv_write_aaldm_reg(e2k_aaldm_t *aaldm) +{ + if (!paravirt_enabled()) + native_write_aaldm_reg(aaldm); + else + kvm_write_aaldm_reg(aaldm); +} +static inline void +pv_read_aaldv_reg_value(u32 *lo_value, u32 *hi_value) +{ + if (!paravirt_enabled()) + native_read_aaldv_reg_value(lo_value, hi_value); + else + kvm_read_aaldv_reg_value(lo_value, hi_value); +} +static inline void +pv_write_aaldv_reg_value(u32 lo_value, u32 hi_value) +{ + if (!paravirt_enabled()) + native_write_aaldv_reg_value(lo_value, hi_value); + else + kvm_write_aaldv_reg_value(lo_value, hi_value); +} +static inline void +pv_read_aaldv_reg(e2k_aaldv_t *aaldv) +{ + if (!paravirt_enabled()) + native_read_aaldv_reg(aaldv); + else + kvm_read_aaldv_reg(aaldv); +} +static inline void +pv_write_aaldv_reg(e2k_aaldv_t *aaldv) +{ + if (!paravirt_enabled()) + native_write_aaldv_reg(aaldv); + else + kvm_write_aaldv_reg(aaldv); +} + +static inline void +pv_read_aad_reg(int AAD_no, e2k_aadj_t *mem_p) +{ + if (!paravirt_enabled()) + native_read_aad_reg(AAD_no, mem_p); + else + kvm_read_aad_reg(AAD_no, mem_p); +} + +static inline void +pv_write_aad_reg(int AAD_no, e2k_aadj_t *mem_p) +{ + if (!paravirt_enabled()) + native_write_aad_reg(AAD_no, mem_p); + else + kvm_write_aad_reg(AAD_no, mem_p); +} + +static inline void +pv_read_aads_4_reg(int AADs_no, e2k_aadj_t *mem_p) +{ + if (!paravirt_enabled()) + native_read_aads_4_reg(AADs_no, mem_p); + else + kvm_read_aads_4_reg(AADs_no, mem_p); +} + +static inline void +pv_write_aads_4_reg(int AADs_no, e2k_aadj_t *mem_p) +{ + if (!paravirt_enabled()) + native_write_aads_4_reg(AADs_no, mem_p); + else + kvm_write_aads_4_reg(AADs_no, mem_p); +} + +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* It is paravirtualized host and guest kernel */ + +#include + +static __always_inline u32 +read_aasr_reg_value(void) +{ + return pv_read_aasr_reg_value(); +} +static __always_inline void +write_aasr_reg_value(u32 reg_value) +{ + pv_write_aasr_reg_value(reg_value); +} +static __always_inline e2k_aasr_t +read_aasr_reg(void) +{ + return pv_read_aasr_reg(); +} +static __always_inline void +write_aasr_reg(e2k_aasr_t aasr) +{ + pv_write_aasr_reg(aasr); +} +static inline u32 +read_aafstr_reg_value(void) +{ + return pv_read_aafstr_reg_value(); +} +static inline void +write_aafstr_reg_value(u32 reg_value) +{ + pv_write_aafstr_reg_value(reg_value); +} +static inline void +read_aaldm_reg(e2k_aaldm_t *aaldm) +{ + pv_read_aaldm_reg_value(&aaldm->lo, &aaldm->hi); +} +static inline void +write_aaldm_reg(e2k_aaldm_t *aaldm) +{ + pv_write_aaldm_reg_value(aaldm->lo, aaldm->hi); +} +static inline void +read_aaldv_reg(e2k_aaldv_t *aaldv) +{ + pv_read_aaldv_reg_value(&aaldv->lo, &aaldv->hi); +} +static inline void +write_aaldv_reg(e2k_aaldv_t *aaldv) +{ + pv_write_aaldm_reg_value(aaldv->lo, aaldv->hi); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_ASM_PARAVIRT_AAU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/paravirt/apic.h b/arch/e2k/include/asm/paravirt/apic.h new file mode 100644 index 0000000..39131a4 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/apic.h @@ -0,0 +1,56 @@ +#ifndef __ASM_PARAVIRT_APIC_H +#define __ASM_PARAVIRT_APIC_H + +#ifdef __KERNEL__ +#include +#include + +/* + * Basic functions accessing virtual Local APICs on guest. + */ + +static inline unsigned int pv_arch_apic_read(unsigned int reg) +{ + return pv_apic_ops.apic_read(reg); +} + +static inline void pv_arch_apic_write(unsigned int reg, unsigned int v) +{ + pv_apic_ops.apic_write(reg, v); +} + +static inline unsigned int boot_pv_arch_apic_read(unsigned int reg) +{ + return BOOT_PARAVIRT_APIC_READ(reg); +} + +static inline void boot_pv_arch_apic_write(unsigned int reg, unsigned int v) +{ + BOOT_PARAVIRT_APIC_WRITE(reg, v); +} + +#ifdef CONFIG_PARAVIRT_GUEST + +static inline void arch_apic_write(unsigned int reg, unsigned int v) +{ + pv_arch_apic_write(reg, v); +} + +static inline unsigned int arch_apic_read(unsigned int reg) +{ + return pv_arch_apic_read(reg); +} +static inline void boot_arch_apic_write(unsigned int reg, unsigned int v) +{ + boot_pv_arch_apic_write(reg, v); +} + +static inline unsigned int boot_arch_apic_read(unsigned int reg) +{ + return boot_pv_arch_apic_read(reg); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PARAVIRT_APIC_H */ diff --git a/arch/e2k/include/asm/paravirt/area_alloc.h b/arch/e2k/include/asm/paravirt/area_alloc.h new file mode 100644 index 0000000..271220b --- /dev/null +++ b/arch/e2k/include/asm/paravirt/area_alloc.h @@ -0,0 +1,152 @@ +#ifndef __ASM_PARAVIRT_AREA_ALLOC_H +#define __ASM_PARAVIRT_AREA_ALLOC_H + +#ifdef __KERNEL__ + +#include +#include + +static inline int +pv_register_kernel_hw_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (pv_cpu_ops.register_kernel_hw_stack == NULL) + return 0; + return pv_cpu_ops.register_kernel_hw_stack(stack_base, stack_size); +} + +static inline int +boot_pv_register_kernel_hw_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(register_kernel_hw_stack) == NULL) + return 0; + return BOOT_PARAVIRT_GET_CPU_FUNC( + register_kernel_hw_stack)(stack_base, stack_size); +} + +static inline int +pv_register_kernel_data_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (pv_cpu_ops.register_kernel_data_stack == NULL) + return 0; + return pv_cpu_ops.register_kernel_data_stack(stack_base, stack_size); +} + +static inline int +boot_pv_register_kernel_data_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(register_kernel_data_stack) == NULL) + return 0; + return BOOT_PARAVIRT_GET_CPU_FUNC( + register_kernel_data_stack)(stack_base, stack_size); +} + +static inline void +pv_unregister_kernel_hw_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (pv_cpu_ops.unregister_kernel_hw_stack == NULL) + return; + pv_cpu_ops.unregister_kernel_hw_stack(stack_base, stack_size); +} + +static inline void +boot_pv_unregister_kernel_hw_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(unregister_kernel_hw_stack) == NULL) + return; + BOOT_PARAVIRT_GET_CPU_FUNC( + unregister_kernel_hw_stack)(stack_base, stack_size); +} + +static inline void +pv_unregister_kernel_data_stack(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (pv_cpu_ops.unregister_kernel_hw_stack == NULL) + return; + pv_cpu_ops.unregister_kernel_hw_stack(stack_base, stack_size); +} + +static inline void +boot_pv_unregister_kernel_data_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(unregister_kernel_data_stack) == NULL) + return; + BOOT_PARAVIRT_GET_CPU_FUNC( + unregister_kernel_data_stack)(stack_base, stack_size); +} + +static inline int +pv_kmem_area_host_chunk(e2k_addr_t stack_base, e2k_size_t stack_size, + int hw_stack) +{ + if (pv_cpu_ops.kmem_area_host_chunk == NULL) + return 0; + return pv_cpu_ops.kmem_area_host_chunk(stack_base, stack_size, + hw_stack); +} + +static inline void +pv_kmem_area_unhost_chunk(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + if (pv_cpu_ops.kmem_area_unhost_chunk == NULL) + return; + pv_cpu_ops.kmem_area_unhost_chunk(stack_base, stack_size); +} + +#ifdef CONFIG_PARAVIRT_GUEST +static inline int register_kernel_hw_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + return pv_register_kernel_hw_stack(stack_base, stack_size); +} +static inline int register_kernel_data_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + return pv_register_kernel_data_stack(stack_base, stack_size); +} +static inline int boot_register_kernel_hw_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + return boot_pv_register_kernel_hw_stack(stack_base, stack_size); +} +static inline int boot_register_kernel_data_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + return boot_pv_register_kernel_data_stack(stack_base, stack_size); +} +static inline void unregister_kernel_hw_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + pv_unregister_kernel_hw_stack(stack_base, stack_size); +} +static inline void unregister_kernel_data_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + pv_unregister_kernel_data_stack(stack_base, stack_size); +} +static inline void boot_unregister_kernel_hw_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + boot_pv_unregister_kernel_hw_stack(stack_base, stack_size); +} +static inline void boot_unregister_kernel_data_stack(e2k_addr_t stack_base, + e2k_size_t stack_size) +{ + boot_pv_unregister_kernel_data_stack(stack_base, stack_size); +} +static inline int +kmem_area_host_chunk(e2k_addr_t stack_base, + e2k_size_t stack_size, int hw_flag) +{ + return pv_kmem_area_host_chunk(stack_base, stack_size, hw_flag); +} +static inline void +kmem_area_unhost_chunk(e2k_addr_t stack_base, e2k_size_t stack_size) +{ + pv_kmem_area_unhost_chunk(stack_base, stack_size); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PARAVIRT_AREA_ALLOC_H */ diff --git a/arch/e2k/include/asm/paravirt/atomic_api.h b/arch/e2k/include/asm/paravirt/atomic_api.h new file mode 100644 index 0000000..a80fe02 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/atomic_api.h @@ -0,0 +1,38 @@ +#ifndef _ASM_E2K_PARAVIRT_ATOMIC_API_H_ +#define _ASM_E2K_PARAVIRT_ATOMIC_API_H_ + +#include +#include + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +/* FIXME: on guest it is not implemented hardware bugs workarounds, */ +/* because of such workarounds contain privileged actions and */ +/* can be done only on host using appropriate hypercalls */ + +#ifdef CONFIG_KVM_GUEST +#include + +#define PV_HWBUG_AFTER_LD_ACQ() NATIVE_HWBUG_AFTER_LD_ACQ() +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +/* FIXME: examine bare hardware bugs only for host */ +/* guest virtual machine should examine host machine bugs too, but now */ +/* it is not implemented */ +#define virt_cpu_has(hwbug) ((!paravirt_enabled()) && cpu_has(hwbug)) + +#define VIRT_HWBUG_AFTER_LD_ACQ() PV_HWBUG_AFTER_LD_ACQ() +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_E2K_PARAVIRT_ATOMIC_API_H_ */ diff --git a/arch/e2k/include/asm/paravirt/boot.h b/arch/e2k/include/asm/paravirt/boot.h new file mode 100644 index 0000000..3e3cd36 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/boot.h @@ -0,0 +1,201 @@ +/* + * E2K boot-time initializtion virtualization for paravirtualized kernel + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_ASM_PARAVIRT_BOOT_H_ +#define _E2K_ASM_PARAVIRT_BOOT_H_ + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +static inline void +boot_pv_setup_machine_id(bootblock_struct_t *bootblock) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_setup_machine_id)(bootblock); +} +static inline int +boot_pv_loader_probe_memory(struct node_phys_mem *nodes_phys_mem, + boot_info_t *bootblock) +{ + return BOOT_PARAVIRT_GET_BOOT_FUNC(boot_loader_probe_memory) + (nodes_phys_mem, bootblock); +} + +static inline e2k_size_t +boot_pv_get_bootblock_size(boot_info_t *bootblock) +{ + return BOOT_PARAVIRT_GET_BOOT_FUNC(boot_get_bootblock_size) + (bootblock); +} + +static inline void +boot_pv_reserve_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_reserve_all_bootmem)(bsp, boot_info); +} + +static inline void +boot_pv_map_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_map_all_bootmem)(bsp, boot_info); +} +static inline void +boot_pv_map_needful_to_equal_virt_area(e2k_addr_t stack_top_addr) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_map_needful_to_equal_virt_area) + (stack_top_addr); +} +static inline void +boot_pv_kernel_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus_to_sync)) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_kernel_switch_to_virt) + (bsp, cpuid, boot_init_sequel_func); +} +static inline void +boot_pv_cpu_relax(void) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_cpu_relax)(); +} + +#ifdef CONFIG_SMP +static inline int +boot_pv_smp_cpu_config(boot_info_t *bootblock) +{ + return BOOT_PARAVIRT_GET_BOOT_FUNC(boot_smp_cpu_config)(bootblock); +} +static inline void +boot_pv_smp_node_config(boot_info_t *bootblock) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_smp_node_config)(bootblock); +} +#endif /* CONFIG_SMP */ + +static inline void +boot_pv_clear_bss(void) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_clear_bss)(); +} +static inline void __init +boot_pv_check_bootblock(bool bsp, bootblock_struct_t *bootblock) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_check_bootblock)(bsp, bootblock); +} +static inline void +pv_init_terminate_boot_init(bool bsp, int cpuid) +{ + pv_boot_ops.init_terminate_boot_init(bsp, cpuid); +} + +static inline void +boot_pv_parse_param(bootblock_struct_t *bootblock) +{ + BOOT_PARAVIRT_GET_BOOT_FUNC(boot_parse_param)(bootblock); +} + +#define boot_pv_panic(fmt, args...) \ + BOOT_PARAVIRT_GET_BOOT_FUNC(do_boot_panic)(fmt, ##args); + +extern void native_pv_ops_to_boot_ops(void); +extern void native_boot_pv_ops_to_ops(void); + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +static inline void +boot_setup_machine_id(bootblock_struct_t *bootblock) +{ + boot_pv_setup_machine_id(bootblock); +} +static inline int __init +boot_loader_probe_memory(node_phys_mem_t *nodes_phys_mem, + boot_info_t *bootblock) +{ + return boot_pv_loader_probe_memory(nodes_phys_mem, bootblock); +} + +static inline e2k_size_t __init +boot_get_bootblock_size(boot_info_t *bootblock) +{ + return boot_pv_get_bootblock_size(bootblock); +} + +#define boot_panic(fmt, args...) boot_pv_panic(fmt, ##args) + +static inline void +boot_cpu_relax(void) +{ + boot_pv_cpu_relax(); +} + +#ifdef CONFIG_SMP +static inline e2k_size_t __init +boot_smp_cpu_config(boot_info_t *bootblock) +{ + return boot_pv_smp_cpu_config(bootblock); +} + +static inline void __init +boot_smp_node_config(boot_info_t *bootblock) +{ + boot_pv_smp_node_config(bootblock); +} +#endif /* CONFIG_SMP */ + +static inline void __init +boot_reserve_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + boot_pv_reserve_all_bootmem(bsp, boot_info); +} + +static inline void __init +boot_map_all_bootmem(bool bsp, boot_info_t *boot_info) +{ + boot_pv_map_all_bootmem(bsp, boot_info); +} + +static inline void __init_recv +boot_map_needful_to_equal_virt_area(e2k_addr_t stack_top_addr) +{ + boot_pv_map_needful_to_equal_virt_area(stack_top_addr); +} + +static inline void __init_recv +boot_kernel_switch_to_virt(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, int cpus_to_sync)) +{ + boot_pv_kernel_switch_to_virt(bsp, cpuid, boot_init_sequel_func); +} + +static inline void __init +init_terminate_boot_init(bool bsp, int cpuid) +{ + pv_init_terminate_boot_init(bsp, cpuid); +} + +static inline void __init +boot_parse_param(bootblock_struct_t *bootblock) +{ + boot_pv_parse_param(bootblock); +} + +static inline void __init +boot_clear_bss(void) +{ + boot_pv_clear_bss(); +} +static inline void __init +boot_check_bootblock(bool bsp, bootblock_struct_t *bootblock) +{ + boot_pv_check_bootblock(bsp, bootblock); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_ASM_PARAVIRT_BOOT_H_ */ diff --git a/arch/e2k/include/asm/paravirt/boot_flags.h b/arch/e2k/include/asm/paravirt/boot_flags.h new file mode 100644 index 0000000..a721935 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/boot_flags.h @@ -0,0 +1,52 @@ +/* + * E2K boot info flags support on paravirtualized kernel + * + * Copyright 2016 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_ASM_PARAVIRT_BOOT_FLAGS_H_ +#define _E2K_ASM_PARAVIRT_BOOT_FLAGS_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +#ifdef CONFIG_KVM_GUEST +#include +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +/* + * bootblock manipulations (read/write/set/reset) in virtual kernel mode + * on physical level: + * write through and uncachable access on virtual "physical" address + * bootblock virtual address can be only read + */ + +#define PV_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + ((!paravirt_enabled()) ? \ + NATIVE_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + : \ + GUEST_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field)) +#define PV_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + ((!paravirt_enabled()) ? \ + NATIVE_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value) \ + : \ + GUEST_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value)) + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#define READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) \ + PV_READ_BOOTBLOCK_FIELD(bootblock_p, blk_field) +#define WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, field_value) \ + PV_WRITE_BOOTBLOCK_FIELD(bootblock_p, blk_field, \ + field_value) +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _E2K_ASM_PARAVIRT_BOOT_FLAGS_H_ */ diff --git a/arch/e2k/include/asm/paravirt/cacheflush.h b/arch/e2k/include/asm/paravirt/cacheflush.h new file mode 100644 index 0000000..9802339 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/cacheflush.h @@ -0,0 +1,88 @@ +#ifndef __ASM_PARAVIRT_CACHEFLUSH_H +#define __ASM_PARAVIRT_CACHEFLUSH_H + +#include +#include + +#ifdef CONFIG_SMP +static inline void +pv_smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + pv_cpu_ops.smp_flush_icache_range(start, end); +} +static inline void +pv_smp_flush_icache_range_array(void *icache_range_arr) +{ + pv_cpu_ops.smp_flush_icache_range_array(icache_range_arr); +} +static inline void +pv_smp_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + pv_cpu_ops.smp_flush_icache_page(vma, page); +} +static inline void +pv_smp_flush_icache_all(void) +{ + pv_cpu_ops.smp_flush_icache_all(); +} +static inline void +pv_smp_flush_icache_kernel_line(e2k_addr_t addr) +{ + pv_cpu_ops.smp_flush_icache_kernel_line(addr); +} +#endif /* CONFIG_SMP */ + +static inline void +pv_flush_DCACHE_range(void *addr, size_t len) +{ + pv_mmu_ops.flush_dcache_range(addr, len); +} +static inline void +pv_clear_DCACHE_L1_range(void *virt_addr, size_t len) +{ + pv_mmu_ops.clear_dcache_l1_range(virt_addr, len); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#ifdef CONFIG_SMP +static inline void +smp_flush_icache_range(e2k_addr_t start, e2k_addr_t end) +{ + pv_smp_flush_icache_range(start, end); +} +static inline void +smp_flush_icache_range_array(struct icache_range_array *icache_range_arr) +{ + pv_smp_flush_icache_range_array(icache_range_arr); +} +static inline void +smp_flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + pv_smp_flush_icache_page(vma, page); +} +static inline void +smp_flush_icache_all(void) +{ + pv_smp_flush_icache_all(); +} +static inline void +smp_flush_icache_kernel_line(e2k_addr_t addr) +{ + pv_smp_flush_icache_kernel_line(addr); +} +#endif /* CONFIG_SMP */ + +static inline void +flush_DCACHE_range(void *addr, size_t len) +{ + pv_flush_DCACHE_range(addr, len); +} +static inline void +clear_DCACHE_L1_range(void *virt_addr, size_t len) +{ + pv_clear_DCACHE_L1_range(virt_addr, len); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASM_PARAVIRT_CACHEFLUSH_H */ diff --git a/arch/e2k/include/asm/paravirt/clkr.h b/arch/e2k/include/asm/paravirt/clkr.h new file mode 100644 index 0000000..0e4d54c --- /dev/null +++ b/arch/e2k/include/asm/paravirt/clkr.h @@ -0,0 +1,20 @@ +#ifndef _ASM_E2K_PARAVIRT_CLKR_H +#define _ASM_E2K_PARAVIRT_CLKR_H + +#include +#include + +static inline unsigned long long pv_do_sched_clock(void) +{ + return pv_time_ops.do_sched_clock(); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized guest and host kernel */ +static inline unsigned long long do_sched_clock(void) +{ + return pv_do_sched_clock(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_E2K_PARAVIRT_CLKR_H */ diff --git a/arch/e2k/include/asm/paravirt/console.h b/arch/e2k/include/asm/paravirt/console.h new file mode 100644 index 0000000..85d4768 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/console.h @@ -0,0 +1,39 @@ + +#ifndef _ASM_E2K_PARAVIRT_CONSOLE_H_ +#define _ASM_E2K_PARAVIRT_CONSOLE_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#include + +#ifdef CONFIG_KVM_GUEST +#include +#include + +static inline void +pv_virt_console_dump_putc(char c) +{ + if (!paravirt_enabled()) + native_virt_console_dump_putc(c); + else + kvm_virt_console_dump_putc(c); +} +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized guest and host kernel */ +static inline void +virt_console_dump_putc(char c) +{ + pv_virt_console_dump_putc(c); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_E2K_PARAVIRT_CONSOLE_H_ */ diff --git a/arch/e2k/include/asm/paravirt/cpu.h b/arch/e2k/include/asm/paravirt/cpu.h new file mode 100644 index 0000000..1da54e4 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/cpu.h @@ -0,0 +1,99 @@ +#ifndef __ASM_E2K_PARAVIRT_CPU_H +#define __ASM_E2K_PARAVIRT_CPU_H + +#ifdef __KERNEL__ +#include +#include +#include + +#ifdef CONFIG_KVM_GUEST +#include + +#define PV_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id); \ + } else { \ + KVM_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id); \ + } \ +}) +#define PV_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id); \ + } else { \ + KVM_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id); \ + } \ +}) +#define PV_IS_ID_VIRQ_VCPU_NO(cpu_id) \ +({ \ + bool pv_is; \ + if (!paravirt_enabled()) { \ + pv_is = NATIVE_IS_ID_VIRQ_VCPU_NO(cpu_id); \ + } else { \ + pv_is = KVM_IS_ID_VIRQ_VCPU_NO(cpu_id); \ + } \ + pv_is; \ +}) +#define PV_IS_ID_VCPU_ID(cpu_id) \ +({ \ + bool pv_is; \ + if (!paravirt_enabled()) { \ + pv_is = NATIVE_IS_ID_VCPU_ID(cpu_id); \ + } else { \ + pv_is = KVM_IS_ID_VCPU_ID(cpu_id); \ + } \ + pv_is; \ +}) +#define PV_CONVERT_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_CONVERT_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id); \ + } else { \ + KVM_CONVERT_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id); \ + } \ +}) +#define PV_CONVERT_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_CONVERT_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id); \ + } else { \ + KVM_CONVERT_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id); \ + } \ +}) +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +static inline unsigned long +pv_get_cpu_running_cycles(void) +{ + return pv_time_ops.get_cpu_running_cycles(); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized guest and host kernel */ + +#define VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id) \ + PV_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id) +#define VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id) \ + PV_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id) +#define IS_ID_VIRQ_VCPU_NO(cpu_id) \ + PV_IS_ID_VIRQ_VCPU_NO(cpu_id) +#define IS_ID_VCPU_ID(cpu_id) \ + PV_IS_ID_VCPU_ID(cpu_id) +#define CONVERT_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id) \ + PV_CONVERT_VIRQ_VCPU_NO_TO_VCPU_ID(virq_vcpu_id) +#define CONVERT_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id) \ + PV_CONVERT_VCPU_ID_TO_VIRQ_VCPU_NO(vcpu_id) + +static inline unsigned long +get_cpu_running_cycles(void) +{ + return pv_get_cpu_running_cycles(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_E2K_PARAVIRT_CPU_H */ diff --git a/arch/e2k/include/asm/paravirt/cpu_regs_access.h b/arch/e2k/include/asm/paravirt/cpu_regs_access.h new file mode 100644 index 0000000..b9a258b --- /dev/null +++ b/arch/e2k/include/asm/paravirt/cpu_regs_access.h @@ -0,0 +1,1828 @@ + +#ifndef _E2K_PARAVIRT_CPU_REGS_ACCESS_H_ +#define _E2K_PARAVIRT_CPU_REGS_ACCESS_H_ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +static inline void PV_PUT_UPDATED_CPU_REGS_FLAGS(unsigned long flags) +{ + if (pv_cpu_ops.put_updated_cpu_regs_flags == NULL) + return; + pv_cpu_ops.put_updated_cpu_regs_flags(flags); +} + +static inline unsigned long long PV_READ_OSCUD_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_OSCUD_lo_reg_value(); +} + +static inline unsigned long long PV_READ_OSCUD_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_OSCUD_hi_reg_value(); +} + +static inline void PV_WRITE_OSCUD_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_OSCUD_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_OSCUD_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_OSCUD_hi_reg_value(reg_value); +} + +static inline unsigned long long BOOT_PV_READ_OSCUD_LO_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_OSCUD_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_OSCUD_HI_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_OSCUD_hi_reg_value); +} + +static inline void BOOT_PV_WRITE_OSCUD_LO_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_OSCUD_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_OSCUD_HI_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_OSCUD_hi_reg_value, reg_value); +} + +static inline unsigned long long PV_READ_OSGD_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_OSGD_lo_reg_value(); +} + +static inline unsigned long long PV_READ_OSGD_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_OSGD_hi_reg_value(); +} +static inline unsigned long long BOOT_PV_READ_OSGD_LO_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_OSGD_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_OSGD_HI_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_OSGD_hi_reg_value); +} + +static inline void PV_WRITE_OSGD_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_OSGD_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_OSGD_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_OSGD_hi_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_OSGD_LO_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_OSGD_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_OSGD_HI_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_OSGD_hi_reg_value, reg_value); +} + +static inline unsigned long long PV_READ_CUD_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_CUD_lo_reg_value(); +} + +static inline unsigned long long PV_READ_CUD_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_CUD_hi_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_CUD_LO_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_CUD_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_CUD_HI_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_CUD_hi_reg_value); +} + +static inline void PV_WRITE_CUD_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CUD_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_CUD_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CUD_hi_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_CUD_LO_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_CUD_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_CUD_HI_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_CUD_hi_reg_value, reg_value); +} + +static inline unsigned long long PV_READ_GD_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_GD_lo_reg_value(); +} + +static inline unsigned long long PV_READ_GD_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_GD_hi_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_GD_LO_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_GD_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_GD_HI_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_GD_hi_reg_value); +} + +static inline void PV_WRITE_GD_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_GD_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_GD_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_GD_hi_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_GD_LO_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_GD_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_GD_HI_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_GD_hi_reg_value, reg_value); +} + +static inline unsigned long long PV_READ_PSP_LO_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_PSP_LO_REG_VALUE(); + else + return pv_cpu_ops.read_PSP_lo_reg_value(); +} + +static inline unsigned long long PV_READ_PSP_HI_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_PSP_HI_REG_VALUE(); + else + return pv_cpu_ops.read_PSP_hi_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_PSP_LO_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_PSP_LO_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_PSP_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_PSP_HI_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_PSP_HI_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_PSP_hi_reg_value); +} + +static inline void PV_WRITE_PSP_LO_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_WRITE_PSP_LO_REG_VALUE(reg_value); + else + pv_cpu_ops.write_PSP_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_PSP_HI_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(reg_value); + else + pv_cpu_ops.write_PSP_hi_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_PSP_LO_REG_VALUE(unsigned long reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_NV_WRITE_PSP_LO_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_PSP_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_PSP_HI_REG_VALUE(unsigned long reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_PSP_hi_reg_value, reg_value); +} +#define PV_NV_READ_PSP_LO_REG_VALUE() PV_READ_PSP_LO_REG_VALUE() +#define PV_NV_READ_PSP_HI_REG_VALUE() PV_READ_PSP_HI_REG_VALUE() +#define PV_READ_PSP_LO_REG() \ +({ \ + e2k_psp_lo_t PSP_lo; \ + PSP_lo.PSP_lo_half = PV_READ_PSP_LO_REG_VALUE(); \ + PSP_lo; \ +}) +#define PV_READ_PSP_HI_REG() \ +({ \ + e2k_psp_hi_t PSP_hi; \ + PSP_hi.PSP_hi_half = PV_READ_PSP_HI_REG_VALUE(); \ + PSP_hi; \ +}) +#define PV_READ_PSP_REG() \ +({ \ + psp_struct_t PSP; \ + PSP.PSP_hi_struct = PV_READ_PSP_HI_REG(); \ + PSP.PSP_lo_struct = PV_READ_PSP_LO_REG(); \ + PSP; \ +}) +#define PV_NV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + PV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define PV_NV_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + PV_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) +#define PV_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ +({ \ + PV_WRITE_PSP_HI_REG_VALUE(PSP_hi_value); \ + PV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value); \ +}) +#define PV_NV_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) \ + PV_WRITE_PSP_REG_VALUE(PSP_hi_value, PSP_lo_value) +#define PV_WRITE_PSP_REG(PSP_hi, PSP_lo) \ +({ \ + PV_WRITE_PSP_REG_VALUE(PSP_hi.PSP_hi_half, PSP_lo.PSP_lo_half); \ +}) + +static inline unsigned long long PV_READ_PSHTP_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_PSHTP_REG_VALUE(); + else + return pv_cpu_ops.read_PSHTP_reg_value(); +} + +static inline void PV_WRITE_PSHTP_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_PSHTP_REG_VALUE(reg_value); + else + pv_cpu_ops.write_PSHTP_reg_value(reg_value); +} +#define PV_NV_READ_PSHTP_REG_VALUE() PV_READ_PSHTP_REG_VALUE() +#define PV_READ_PSHTP_REG() \ +({ \ + e2k_pshtp_t PSHTP_reg; \ + PSHTP_reg.word = PV_READ_PSHTP_REG_VALUE(); \ + PSHTP_reg; \ +}) +#define PV_WRITE_PSHTP_REG(PSHTP_reg) \ +({ \ + PV_WRITE_PSHTP_REG_VALUE(AS_WORD(PSHTP_reg)); \ +}) +#define PV_STRIP_PSHTP_WINDOW() PV_WRITE_PSHTP_REG_VALUE(0) + +static inline unsigned long long PV_READ_PCSP_LO_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_PCSP_LO_REG_VALUE(); + else + return pv_cpu_ops.read_PCSP_lo_reg_value(); +} + +static inline unsigned long long PV_READ_PCSP_HI_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_PCSP_HI_REG_VALUE(); + else + return pv_cpu_ops.read_PCSP_hi_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_PCSP_LO_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_PCSP_LO_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_PCSP_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_PCSP_HI_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_PCSP_HI_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_PCSP_hi_reg_value); +} + +static inline void PV_WRITE_PCSP_LO_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(reg_value); + else + pv_cpu_ops.write_PCSP_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_PCSP_HI_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(reg_value); + else + pv_cpu_ops.write_PCSP_hi_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_PCSP_LO_REG_VALUE(unsigned long reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_NV_WRITE_PCSP_LO_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_PCSP_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_PCSP_HI_REG_VALUE(unsigned long reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_PCSP_hi_reg_value, reg_value); +} +#define PV_NV_READ_PCSP_LO_REG_VALUE() PV_READ_PCSP_LO_REG_VALUE() +#define PV_NV_READ_PCSP_HI_REG_VALUE() PV_READ_PCSP_HI_REG_VALUE() +#define PV_READ_PCSP_LO_REG() \ +({ \ + e2k_pcsp_lo_t PCSP_lo; \ + PCSP_lo.PCSP_lo_half = PV_READ_PCSP_LO_REG_VALUE(); \ + PCSP_lo; \ +}) +#define PV_READ_PCSP_HI_REG() \ +({ \ + e2k_pcsp_hi_t PCSP_hi; \ + PCSP_hi.PCSP_hi_half = PV_READ_PCSP_HI_REG_VALUE(); \ + PCSP_hi; \ +}) +#define PV_READ_PCSP_REG() \ +({ \ + pcsp_struct_t PCSP; \ + PCSP.PCSP_hi_struct = PV_READ_PCSP_HI_REG(); \ + PCSP.PCSP_lo_struct = PV_READ_PCSP_LO_REG(); \ + PCSP; \ +}) +#define PV_READ_PCSP_REG_TO(PCSP) \ +({ \ + *PCSP = PV_READ_PCSP_REG(); \ +}) +#define PV_NV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + PV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define PV_NV_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + PV_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) +#define PV_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ +({ \ + PV_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value); \ + PV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value); \ +}) +#define PV_NV_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) \ + PV_WRITE_PCSP_REG_VALUE(PCSP_hi_value, PCSP_lo_value) +#define PV_WRITE_PCSP_REG(PCSP_hi, PCSP_lo) \ + PV_WRITE_PCSP_REG_VALUE(PCSP_hi.PCSP_hi_half, \ + PCSP_lo.PCSP_lo_half) + +static inline int PV_READ_PCSHTP_REG_SVALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_READ_PCSHTP_REG_SVALUE(); + else + return pv_cpu_ops.read_PCSHTP_reg_value(); +} + +static inline void PV_WRITE_PCSHTP_REG_SVALUE(int reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_PCSHTP_REG_SVALUE(reg_value); + else + pv_cpu_ops.write_PCSHTP_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_CR0_LO_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_CR0_LO_REG_VALUE(); + else + return pv_cpu_ops.read_CR0_lo_reg_value(); +} + +static inline unsigned long long PV_READ_CR0_HI_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_CR0_HI_REG_VALUE(); + else + return pv_cpu_ops.read_CR0_hi_reg_value(); +} + +static inline unsigned long long PV_READ_CR1_LO_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_CR1_LO_REG_VALUE(); + else + return pv_cpu_ops.read_CR1_lo_reg_value(); +} + +static inline unsigned long long PV_READ_CR1_HI_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_CR1_HI_REG_VALUE(); + else + return pv_cpu_ops.read_CR1_hi_reg_value(); +} + +static inline void PV_WRITE_CR0_LO_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(reg_value); + else + pv_cpu_ops.write_CR0_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_CR0_HI_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(reg_value); + else + pv_cpu_ops.write_CR0_hi_reg_value(reg_value); +} + +static inline void PV_WRITE_CR1_LO_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(reg_value); + else + pv_cpu_ops.write_CR1_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_CR1_HI_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(reg_value); + else + pv_cpu_ops.write_CR1_hi_reg_value(reg_value); +} +#define PV_NV_READ_CR0_LO_REG_VALUE() PV_READ_CR0_LO_REG_VALUE() +#define PV_NV_READ_CR0_HI_REG_VALUE() PV_READ_CR0_HI_REG_VALUE() +#define PV_NV_READ_CR1_LO_REG_VALUE() PV_READ_CR1_LO_REG_VALUE() +#define PV_NV_READ_CR1_HI_REG_VALUE() PV_READ_CR1_HI_REG_VALUE() +#define PV_READ_CR0_LO_REG() \ +({ \ + e2k_cr0_lo_t CR0_lo; \ + CR0_lo.CR0_lo_half = PV_READ_CR0_LO_REG_VALUE(); \ + CR0_lo; \ +}) +#define PV_READ_CR0_HI_REG() \ +({ \ + e2k_cr0_hi_t CR0_hi; \ + CR0_hi.CR0_hi_half = PV_READ_CR0_HI_REG_VALUE(); \ + CR0_hi; \ +}) +#define PV_READ_CR1_LO_REG() \ +({ \ + e2k_cr1_lo_t CR1_lo; \ + CR1_lo.CR1_lo_half = PV_READ_CR1_LO_REG_VALUE(); \ + CR1_lo; \ +}) +#define PV_READ_CR1_HI_REG() \ +({ \ + e2k_cr1_hi_t CR1_hi; \ + CR1_hi.CR1_hi_half = PV_READ_CR1_HI_REG_VALUE(); \ + CR1_hi; \ +}) +#define PV_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + PV_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) +#define PV_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + PV_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) +#define PV_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + PV_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) +#define PV_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + PV_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) + +static inline unsigned long long PV_READ_CTPR_REG_VALUE(int reg_no) +{ + return pv_cpu_ops.read_CTPR_reg_value(reg_no); +} + +static inline void PV_WRITE_CTPR_REG_VALUE(int reg_no, unsigned long reg_value) +{ + pv_cpu_ops.write_CTPR_reg_value(reg_no, reg_value); +} + +static inline unsigned long long PV_READ_USD_LO_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_USD_LO_REG_VALUE(); + else + return pv_cpu_ops.read_USD_lo_reg_value(); +} + +static inline unsigned long long PV_READ_USD_HI_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_USD_HI_REG_VALUE(); + else + return pv_cpu_ops.read_USD_hi_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_USD_LO_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_USD_LO_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_USD_lo_reg_value); +} + +static inline unsigned long long BOOT_PV_READ_USD_HI_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_USD_HI_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_USD_hi_reg_value); +} + +static inline void PV_WRITE_USD_LO_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_WRITE_USD_LO_REG_VALUE(reg_value); + else + pv_cpu_ops.write_USD_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_USD_HI_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_NV_WRITE_USD_HI_REG_VALUE(reg_value); + else + pv_cpu_ops.write_USD_hi_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_USD_LO_REG_VALUE(unsigned long reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_NV_WRITE_USD_LO_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_USD_lo_reg_value, reg_value); +} + +static inline void BOOT_PV_WRITE_USD_HI_REG_VALUE(unsigned long reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_NV_WRITE_USD_HI_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_USD_hi_reg_value, reg_value); +} +#define PV_READ_USD_LO_REG() \ +({ \ + e2k_usd_lo_t USD_lo; \ + USD_lo.USD_lo_half = PV_READ_USD_LO_REG_VALUE(); \ + USD_lo; \ +}) +#define PV_READ_USD_HI_REG() \ +({ \ + e2k_usd_hi_t USD_hi; \ + USD_hi.USD_hi_half = PV_READ_USD_HI_REG_VALUE(); \ + USD_hi; \ +}) +#define PV_READ_USD_REG() \ +({ \ + usd_struct_t USD; \ + USD.USD_hi_struct = PV_READ_USD_HI_REG(); \ + USD.USD_lo_struct = PV_READ_USD_LO_REG(); \ + USD; \ +}) +#define PV_NV_WRITE_USD_LO_REG_VALUE(usd_lo_value) \ + PV_WRITE_USD_LO_REG_VALUE(usd_lo_value) +#define PV_NV_WRITE_USD_HI_REG_VALUE(usd_hi_value) \ + PV_WRITE_USD_HI_REG_VALUE(usd_hi_value) +#define PV_WRITE_USD_LO_REG(USD_lo) \ + PV_WRITE_USD_LO_REG_VALUE(USD_lo.USD_lo_half) +#define PV_WRITE_USD_HI_REG(USD_hi) \ + PV_WRITE_USD_HI_REG_VALUE(USD_hi.USD_hi_half) +#define PV_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ +({ \ + PV_WRITE_USD_HI_REG_VALUE(USD_hi_value); \ + PV_WRITE_USD_LO_REG_VALUE(USD_lo_value); \ +}) +#define PV_NV_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) \ + PV_WRITE_USD_REG_VALUE(USD_hi_value, USD_lo_value) +#define PV_WRITE_USD_REG(USD_hi, USD_lo) \ +({ \ + PV_WRITE_USD_REG_VALUE(USD_hi.USD_hi_half, USD_lo.USD_lo_half); \ +}) + +static inline unsigned long long PV_READ_PUSD_LO_REG_VALUE(void) +{ + return PV_READ_USD_LO_REG_VALUE(); +} + +static inline unsigned long long PV_READ_PUSD_HI_REG_VALUE(void) +{ + return PV_READ_USD_HI_REG_VALUE(); +} + +static inline void PV_WRITE_PUSD_LO_REG_VALUE(unsigned long reg_value) +{ + PV_WRITE_USD_LO_REG_VALUE(reg_value); +} + +static inline void PV_WRITE_PUSD_HI_REG_VALUE(unsigned long reg_value) +{ + PV_WRITE_USD_HI_REG_VALUE(reg_value); +} +#define PV_READ_PUSD_LO_REG() \ +({ \ + e2k_pusd_lo_t PUSD_lo; \ + PUSD_lo.PUSD_lo_half = PV_READ_PUSD_LO_REG_VALUE(); \ + PUSD_lo; \ +}) +#define PV_READ_PUSD_HI_REG() \ +({ \ + e2k_pusd_hi_t PUSD_hi; \ + PUSD_hi.PUSD_hi_half = PV_READ_PUSD_HI_REG_VALUE(); \ + PUSD_hi; \ +}) +#define PV_READ_PUSD_REG() \ +({ \ + pusd_struct_t PUSD; \ + PUSD.PUSD_hi_struct = PV_READ_PUSD_HI_REG(); \ + PUSD.PUSD_lo_struct = PV_READ_PUSD_LO_REG(); \ + PUSD; \ +}) +#define PV_READ_PUSD_REG_TO(PUSD) \ +({ \ + *PUSD = PV_READ_PUSD_REG(); \ +}) +#define PV_WRITE_PUSD_LO_REG(PUSD_lo) \ + PV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo.PUSD_lo_half) +#define PV_WRITE_PUSD_HI_REG(PUSD_hi) \ + PV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi.PUSD_hi_half) +#define PV_WRITE_PUSD_REG_VALUE(PUSD_hi_value, PUSD_lo_value) \ +({ \ + PV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value); \ + PV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value); \ +}) +#define PV_WRITE_PUSD_REG(PUSD_hi, PUSD_lo) \ +({ \ + PV_WRITE_PUSD_REG_VALUE(PUSD_hi.PUSD_hi_half, \ + PUSD_lo.PUSD_lo_half); \ +}) + +static inline unsigned long long PV_READ_SBR_REG_VALUE(void) +{ + return pv_cpu_ops.read_SBR_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_SBR_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_SBR_reg_value); +} + +static inline void PV_WRITE_SBR_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_SBR_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_SBR_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_SBR_reg_value, reg_value); +} +#define PV_NV_READ_SBR_REG_VALUE() PV_READ_SBR_REG_VALUE() +#define PV_READ_SBR_REG() \ +({ \ + e2k_sbr_t SBR; \ + SBR.SBR_reg = PV_READ_SBR_REG_VALUE(); \ + SBR; \ +}) +#define PV_NV_WRITE_SBR_REG_VALUE(SBR_value) \ + PV_WRITE_SBR_REG_VALUE(SBR_value) + +static inline unsigned long long PV_READ_USBR_REG_VALUE(void) +{ + return PV_READ_SBR_REG_VALUE(); +} + +static inline unsigned long long BOOT_PV_READ_USBR_REG_VALUE(void) +{ + return BOOT_PV_READ_SBR_REG_VALUE(); +} + +static inline void PV_WRITE_USBR_REG_VALUE(unsigned long reg_value) +{ + PV_WRITE_SBR_REG_VALUE(reg_value); +} + +static inline void BOOT_PV_WRITE_USBR_REG_VALUE(unsigned long reg_value) +{ + BOOT_PV_WRITE_SBR_REG_VALUE(reg_value); +} +#define PV_READ_USBR_REG() \ +({ \ + e2k_usbr_t USBR; \ + USBR.USBR_reg = PV_READ_USBR_REG_VALUE(); \ + USBR; \ +}) +#define PV_WRITE_USBR_REG(USBR) \ + PV_WRITE_USBR_REG_VALUE(USBR.USBR_reg) + +static inline unsigned long long PV_READ_WD_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_READ_WD_REG_VALUE(); + else + return pv_cpu_ops.read_WD_reg_value(); +} + +static inline void PV_WRITE_WD_REG_VALUE(unsigned long reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_WD_REG_VALUE(reg_value); + else + pv_cpu_ops.write_WD_reg_value(reg_value); +} +#define PV_READ_WD_REG() \ +({ \ + e2k_wd_t WD; \ + WD.WD_reg = PV_READ_WD_REG_VALUE(); \ + WD; \ +}) +#define PV_WRITE_WD_REG(WD) \ + PV_WRITE_WD_REG_VALUE(WD.WD_reg) + +#ifdef NEED_PARAVIRT_LOOP_REGISTERS +static inline unsigned long long PV_READ_LSR_REG_VALUE(void) +{ + return pv_cpu_ops.read_LSR_reg_value(); +} + +static inline void PV_WRITE_LSR_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_LSR_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_ILCR_REG_VALUE(void) +{ + return pv_cpu_ops.read_ILCR_reg_value(); +} + +static inline void PV_WRITE_ILCR_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_ILCR_reg_value(reg_value); +} +#endif /* NEED_PARAVIRT_LOOP_REGISTERS */ + +static inline unsigned long long PV_READ_OSR0_REG_VALUE(void) +{ + return pv_cpu_ops.read_OSR0_reg_value(); +} + +static inline unsigned long long BOOT_PV_READ_OSR0_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_OSR0_reg_value); +} + +static inline void PV_WRITE_OSR0_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_OSR0_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_OSR0_REG_VALUE(unsigned long reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_OSR0_reg_value, reg_value); +} + +static inline unsigned int PV_READ_OSEM_REG_VALUE(void) +{ + return pv_cpu_ops.read_OSEM_reg_value(); +} + +static inline void PV_WRITE_OSEM_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_OSEM_reg_value(reg_value); +} + +static inline unsigned int PV_READ_BGR_REG_VALUE(void) +{ + return pv_cpu_ops.read_BGR_reg_value(); +} + +static inline unsigned int BOOT_PV_READ_BGR_REG_VALUE(void) +{ + return BOOT_PARAVIRT_READ_REG(read_BGR_reg_value); +} + +static inline void PV_WRITE_BGR_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_BGR_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_BGR_REG_VALUE(unsigned int reg_value) +{ + BOOT_PARAVIRT_WRITE_REG(write_BGR_reg_value, reg_value); +} +#define PV_READ_BGR_REG() \ +({ \ + e2k_bgr_t BGR; \ + BGR.BGR_reg = PV_READ_BGR_REG_VALUE(); \ + BGR; \ +}) +#define PV_WRITE_BGR_REG(BGR) \ + PV_WRITE_BGR_REG_VALUE(BGR.BGR_reg) + +static inline unsigned long long PV_READ_CLKR_REG_VALUE(void) +{ + return pv_cpu_ops.read_CLKR_reg_value(); +} + +static inline void PV_WRITE_CLKR_REG_VALUE(void) +{ + pv_cpu_ops.write_CLKR_reg_value(); +} + +static inline unsigned long long PV_READ_SCLKR_REG_VALUE(void) +{ + return pv_cpu_ops.read_SCLKR_reg_value(); +} + +static inline void PV_WRITE_SCLKR_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_SCLKR_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_SCLKM1_REG_VALUE(void) +{ + return pv_cpu_ops.read_SCLKM1_reg_value(); +} + +static inline void PV_WRITE_SCLKM1_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_SCLKM1_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_SCLKM2_REG_VALUE(void) +{ + return pv_cpu_ops.read_SCLKM2_reg_value(); +} + +static inline unsigned long long PV_READ_SCLKM3_REG_VALUE(void) +{ + return pv_cpu_ops.read_SCLKM3_reg_value(); +} + +static inline void PV_WRITE_SCLKM2_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_SCLKM2_reg_value(reg_value); +} + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +static inline unsigned long long PV_READ_CU_HW0_REG_VALUE(void) +{ + return pv_cpu_ops.read_CU_HW0_reg_value(); +} +static inline unsigned long long PV_READ_CU_HW1_REG_VALUE(void) +{ + return pv_cpu_ops.read_CU_HW1_reg_value(); +} +static inline void PV_WRITE_CU_HW0_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CU_HW0_reg_value(reg_value); +} +static inline void PV_WRITE_CU_HW1_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CU_HW1_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_RPR_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_RPR_lo_reg_value(); +} + +static inline unsigned long long PV_READ_RPR_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_RPR_hi_reg_value(); +} + +static inline void PV_WRITE_RPR_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_RPR_lo_reg_value(reg_value); +} +static inline void PV_CL_WRITE_RPR_LO_REG_VALUE(unsigned long reg_value) +{ + PV_WRITE_RPR_LO_REG_VALUE(reg_value); +} + +static inline void PV_WRITE_RPR_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_RPR_hi_reg_value(reg_value); +} +static inline void PV_CL_WRITE_RPR_HI_REG_VALUE(unsigned long reg_value) +{ + PV_WRITE_RPR_HI_REG_VALUE(reg_value); +} +#define PV_READ_RPR_LO_REG() \ +({ \ + e2k_rpr_lo_t RPR_lo; \ + RPR_lo.RPR_lo_half = PV_READ_RPR_LO_REG_VALUE(); \ + RPR_lo; \ +}) +#define PV_READ_RPR_HI_REG() \ +({ \ + e2k_rpr_hi_t RPR_hi; \ + RPR_hi.RPR_hi_half = PV_READ_RPR_HI_REG_VALUE(); \ + RPR_hi; \ +}) +#define PV_WRITE_RPR_LO_REG(RPR_lo) \ + PV_WRITE_RPR_LO_REG_VALUE(RPR_lo.RPR_lo_half) +#define PV_CL_WRITE_RPR_LO_REG(RPR_lo) \ + PV_CL_WRITE_RPR_LO_REG_VALUE(RPR_lo.RPR_lo_half) +#define PV_WRITE_RPR_HI_REG(RPR_hi) \ + PV_WRITE_RPR_HI_REG_VALUE(RPR_hi.RPR_hi_half) +#define PV_CL_WRITE_RPR_HI_REG(RPR_hi) \ + PV_CL_WRITE_RPR_HI_REG_VALUE(RPR_hi.RPR_hi_half) + +static inline unsigned long long PV_READ_SBBP_REG_VALUE(void) +{ + return pv_cpu_ops.read_SBBP_reg_value(); +} + +static inline unsigned long long PV_READ_IP_REG_VALUE(void) +{ + return pv_cpu_ops.read_IP_reg_value(); +} + +static inline unsigned int PV_READ_DIBCR_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIBCR_reg_value(); +} + +static inline void PV_WRITE_DIBCR_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_DIBCR_reg_value(reg_value); +} + +static inline unsigned int PV_READ_DIBSR_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIBSR_reg_value(); +} + +static inline void PV_WRITE_DIBSR_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_DIBSR_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIMCR_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIMCR_reg_value(); +} + +static inline void PV_WRITE_DIMCR_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIMCR_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIBAR0_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIBAR0_reg_value(); +} + +static inline void PV_WRITE_DIBAR0_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIBAR0_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIBAR1_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIBAR1_reg_value(); +} + +static inline void PV_WRITE_DIBAR1_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIBAR1_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIBAR2_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIBAR2_reg_value(); +} + +static inline void PV_WRITE_DIBAR2_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIBAR2_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIBAR3_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIBAR3_reg_value(); +} + +static inline void PV_WRITE_DIBAR3_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIBAR3_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIMAR0_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIMAR0_reg_value(); +} + +static inline void PV_WRITE_DIMAR0_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIMAR0_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_DIMAR1_REG_VALUE(void) +{ + return pv_cpu_ops.read_DIMAR1_reg_value(); +} + +static inline void PV_WRITE_DIMAR1_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DIMAR1_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_CUTD_REG_VALUE(void) +{ + return pv_cpu_ops.read_CUTD_reg_value(); +} + +static inline void PV_NV_NOIRQ_WRITE_CUTD_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CUTD_reg_value(reg_value); +} + +static inline unsigned int PV_READ_CUIR_REG_VALUE(void) +{ + return pv_cpu_ops.read_CUIR_reg_value(); +} + +static inline unsigned int PV_READ_UPSR_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_UPSR_REG_VALUE(); + else + return pv_cpu_ops.read_UPSR_reg_value(); +} + +static inline unsigned int BOOT_PV_READ_UPSR_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_UPSR_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_UPSR_reg_value); +} + +static inline void PV_WRITE_UPSR_REG_VALUE(unsigned int reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_UPSR_REG_VALUE(reg_value); + else + pv_cpu_ops.write_UPSR_reg_value(reg_value); +} +static inline void PV_WRITE_UPSR_REG(e2k_upsr_t UPSR) +{ + PV_WRITE_UPSR_REG_VALUE(UPSR.UPSR_reg); +} + +static inline void BOOT_PV_WRITE_UPSR_REG_VALUE(unsigned int reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_WRITE_UPSR_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_UPSR_reg_value, reg_value); +} +static inline void BOOT_PV_WRITE_UPSR_REG(e2k_upsr_t UPSR) +{ + BOOT_PV_WRITE_UPSR_REG_VALUE(UPSR.UPSR_reg); +} + +static inline void PV_WRITE_UPSR_IRQ_BARRIER(unsigned int reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_UPSR_IRQ_BARRIER(reg_value); + else + pv_cpu_ops.write_UPSR_irq_barrier(reg_value); +} + +static inline unsigned int PV_READ_PSR_REG_VALUE(void) +{ + if (!paravirt_enabled()) + return NATIVE_NV_READ_PSR_REG_VALUE(); + else + return pv_cpu_ops.read_PSR_reg_value(); +} + +static inline unsigned int BOOT_PV_READ_PSR_REG_VALUE(void) +{ + if (!boot_paravirt_enabled()) + return NATIVE_NV_READ_PSR_REG_VALUE(); + else + return BOOT_PARAVIRT_READ_REG(read_PSR_reg_value); +} + +static inline void PV_WRITE_PSR_REG_VALUE(unsigned int reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_PSR_REG_VALUE(reg_value); + else + pv_cpu_ops.write_PSR_reg_value(reg_value); +} + +static inline void BOOT_PV_WRITE_PSR_REG_VALUE(unsigned int reg_value) +{ + if (!boot_paravirt_enabled()) + NATIVE_WRITE_PSR_REG_VALUE(reg_value); + else + BOOT_PARAVIRT_WRITE_REG(write_PSR_reg_value, reg_value); +} + +static inline void PV_WRITE_PSR_IRQ_BARRIER(unsigned int reg_value) +{ + if (!paravirt_enabled()) + NATIVE_WRITE_PSR_IRQ_BARRIER(reg_value); + else + pv_cpu_ops.write_PSR_irq_barrier(reg_value); +} + +static inline unsigned int PV_READ_PFPFR_REG_VALUE(void) +{ + return pv_cpu_ops.read_PFPFR_reg_value(); +} + +static inline void PV_WRITE_PFPFR_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_PFPFR_reg_value(reg_value); +} + +static inline unsigned int PV_READ_FPCR_REG_VALUE(void) +{ + return pv_cpu_ops.read_FPCR_reg_value(); +} + +static inline void PV_WRITE_FPCR_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_FPCR_reg_value(reg_value); +} + +static inline unsigned int PV_READ_FPSR_REG_VALUE(void) +{ + return pv_cpu_ops.read_FPSR_reg_value(); +} + +static inline void PV_WRITE_FPSR_REG_VALUE(unsigned int reg_value) +{ + pv_cpu_ops.write_FPSR_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_CS_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_CS_lo_reg_value(); +} + +static inline unsigned long long PV_READ_CS_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_CS_hi_reg_value(); +} + +static inline unsigned long long PV_READ_DS_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_DS_lo_reg_value(); +} + +static inline unsigned long long PV_READ_DS_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_DS_hi_reg_value(); +} + +static inline unsigned long long PV_READ_ES_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_ES_lo_reg_value(); +} + +static inline unsigned long long PV_READ_ES_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_ES_hi_reg_value(); +} + +static inline unsigned long long PV_READ_FS_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_FS_lo_reg_value(); +} + +static inline unsigned long long PV_READ_FS_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_FS_hi_reg_value(); +} + +static inline unsigned long long PV_READ_GS_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_GS_lo_reg_value(); +} + +static inline unsigned long long PV_READ_GS_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_GS_hi_reg_value(); +} + +static inline unsigned long long PV_READ_SS_LO_REG_VALUE(void) +{ + return pv_cpu_ops.read_SS_lo_reg_value(); +} + +static inline unsigned long long PV_READ_SS_HI_REG_VALUE(void) +{ + return pv_cpu_ops.read_SS_hi_reg_value(); +} + +static inline void PV_WRITE_CS_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CS_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_CS_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_CS_hi_reg_value(reg_value); +} + +static inline void PV_WRITE_DS_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DS_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_DS_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_DS_hi_reg_value(reg_value); +} + +static inline void PV_WRITE_ES_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_ES_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_ES_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_ES_hi_reg_value(reg_value); +} + +static inline void PV_WRITE_FS_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_FS_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_FS_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_FS_hi_reg_value(reg_value); +} + +static inline void PV_WRITE_GS_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_GS_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_GS_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_GS_hi_reg_value(reg_value); +} + +static inline void PV_WRITE_SS_LO_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_SS_lo_reg_value(reg_value); +} + +static inline void PV_WRITE_SS_HI_REG_VALUE(unsigned long reg_value) +{ + pv_cpu_ops.write_SS_hi_reg_value(reg_value); +} + +static inline unsigned long long PV_READ_IDR_REG_VALUE(void) +{ + if (pv_cpu_ops.read_IDR_reg_value) + return pv_cpu_ops.read_IDR_reg_value(); + return 0; +} +static inline unsigned long long BOOT_PV_READ_IDR_REG_VALUE(void) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(boot_read_IDR_reg_value)) { + return BOOT_PARAVIRT_READ_REG(boot_read_IDR_reg_value); + } + return 0; +} + +static inline unsigned int PV_READ_CORE_MODE_REG_VALUE(void) +{ + if (pv_cpu_ops.read_CORE_MODE_reg_value) + return pv_cpu_ops.read_CORE_MODE_reg_value(); + return 0; +} +static inline unsigned int BOOT_PV_READ_CORE_MODE_REG_VALUE(void) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(boot_read_CORE_MODE_reg_value)) { + return BOOT_PARAVIRT_READ_REG(boot_read_CORE_MODE_reg_value); + } + return 0; +} +static inline void PV_WRITE_CORE_MODE_REG_VALUE(unsigned int modes) +{ + if (pv_cpu_ops.write_CORE_MODE_reg_value) + pv_cpu_ops.write_CORE_MODE_reg_value(modes); +} +static inline void BOOT_PV_WRITE_CORE_MODE_REG_VALUE(unsigned int modes) +{ + if (BOOT_PARAVIRT_GET_CPU_FUNC(boot_write_CORE_MODE_reg_value)) + BOOT_PARAVIRT_WRITE_REG(boot_write_CORE_MODE_reg_value, modes); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +/* + * Set flags of updated VCPU registers + */ + +static inline void PUT_UPDATED_CPU_REGS_FLAGS(unsigned long flags) +{ + PV_PUT_UPDATED_CPU_REGS_FLAGS(flags); +} + +/* + * Read/write word Procedure Stack Harware Top Pointer (PSHTP) + */ +#define READ_PSHTP_REG_VALUE() PV_READ_PSHTP_REG_VALUE() + +#define WRITE_PSHTP_REG_VALUE(PSHTP_value) \ + PV_WRITE_PSHTP_REG_VALUE(PSHTP_value) + +/* + * Read/write word Procedure Chain Stack Harware Top Pointer (PCSHTP) + */ +#define READ_PCSHTP_REG_SVALUE() PV_READ_PCSHTP_REG_SVALUE() + +#define WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) \ + PV_WRITE_PCSHTP_REG_SVALUE(PCSHTP_svalue) + +/* + * Read/write low/high double-word OS Compilation Unit Descriptor (OSCUD) + */ + +#define READ_OSCUD_LO_REG_VALUE() PV_READ_OSCUD_LO_REG_VALUE() +#define READ_OSCUD_HI_REG_VALUE() PV_READ_OSCUD_HI_REG_VALUE() +#define BOOT_READ_OSCUD_LO_REG_VALUE() BOOT_PV_READ_OSCUD_LO_REG_VALUE() +#define BOOT_READ_OSCUD_HI_REG_VALUE() BOOT_PV_READ_OSCUD_HI_REG_VALUE() + +#define WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + PV_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) +#define WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + PV_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) +#define BOOT_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) \ + BOOT_PV_WRITE_OSCUD_LO_REG_VALUE(OSCUD_lo_value) +#define BOOT_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) \ + BOOT_PV_WRITE_OSCUD_HI_REG_VALUE(OSCUD_hi_value) + +/* + * Read/write low/hgh double-word OS Globals Register (OSGD) + */ + +#define READ_OSGD_LO_REG_VALUE() PV_READ_OSGD_LO_REG_VALUE() +#define READ_OSGD_HI_REG_VALUE() PV_READ_OSGD_HI_REG_VALUE() +#define BOOT_READ_OSGD_LO_REG_VALUE() BOOT_PV_READ_OSGD_LO_REG_VALUE() +#define BOOT_READ_OSGD_HI_REG_VALUE() BOOT_PV_READ_OSGD_HI_REG_VALUE() + +#define WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + PV_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) +#define WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + PV_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) +#define BOOT_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) \ + BOOT_PV_WRITE_OSGD_LO_REG_VALUE(OSGD_lo_value) +#define BOOT_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) \ + BOOT_PV_WRITE_OSGD_HI_REG_VALUE(OSGD_hi_value) + + /* + * Read/write low/high double-word Compilation Unit Register (CUD) + */ + +#define READ_CUD_LO_REG_VALUE() PV_READ_CUD_LO_REG_VALUE() +#define READ_CUD_HI_REG_VALUE() PV_READ_CUD_HI_REG_VALUE() +#define BOOT_READ_CUD_LO_REG_VALUE() BOOT_PV_READ_CUD_LO_REG_VALUE() +#define BOOT_READ_CUD_HI_REG_VALUE() BOOT_PV_READ_CUD_HI_REG_VALUE() + +#define WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + PV_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) +#define WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + PV_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) +#define BOOT_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) \ + BOOT_PV_WRITE_CUD_LO_REG_VALUE(CUD_lo_value) +#define BOOT_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) \ + BOOT_PV_WRITE_CUD_HI_REG_VALUE(CUD_hi_value) + +/* + * Read/write low/high double-word Globals Register (GD) + */ + +#define READ_GD_LO_REG_VALUE() PV_READ_GD_LO_REG_VALUE() +#define READ_GD_HI_REG_VALUE() PV_READ_GD_HI_REG_VALUE() +#define BOOT_READ_GD_LO_REG_VALUE() BOOT_PV_READ_GD_LO_REG_VALUE() +#define BOOT_READ_GD_HI_REG_VALUE() BOOT_PV_READ_GD_HI_REG_VALUE() + +#define WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + PV_WRITE_GD_LO_REG_VALUE(GD_lo_value) +#define WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + PV_WRITE_GD_HI_REG_VALUE(GD_hi_value) +#define BOOT_WRITE_GD_LO_REG_VALUE(GD_lo_value) \ + BOOT_PV_WRITE_GD_LO_REG_VALUE(GD_lo_value) +#define BOOT_WRITE_GD_HI_REG_VALUE(GD_hi_value) \ + BOOT_PV_WRITE_GD_HI_REG_VALUE(GD_hi_value) + +/* + * Read/write low/high quad-word Procedure Stack Pointer Register (PSP) + */ + +#define READ_PSP_LO_REG_VALUE() PV_READ_PSP_LO_REG_VALUE() +#define READ_PSP_HI_REG_VALUE() PV_READ_PSP_HI_REG_VALUE() +#define BOOT_READ_PSP_LO_REG_VALUE() BOOT_PV_READ_PSP_LO_REG_VALUE() +#define BOOT_READ_PSP_HI_REG_VALUE() BOOT_PV_READ_PSP_HI_REG_VALUE() + +#define WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + PV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + PV_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) +#define BOOT_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) \ + BOOT_PV_WRITE_PSP_LO_REG_VALUE(PSP_lo_value) +#define BOOT_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) \ + BOOT_PV_WRITE_PSP_HI_REG_VALUE(PSP_hi_value) + +/* + * Read/write low/high quad-word Procedure Chain Stack Pointer Register (PCSP) + */ +#define READ_PCSP_LO_REG_VALUE() PV_READ_PCSP_LO_REG_VALUE() +#define READ_PCSP_HI_REG_VALUE() PV_READ_PCSP_HI_REG_VALUE() +#define BOOT_READ_PCSP_LO_REG_VALUE() BOOT_PV_READ_PCSP_LO_REG_VALUE() +#define BOOT_READ_PCSP_HI_REG_VALUE() BOOT_PV_READ_PCSP_HI_REG_VALUE() + +#define WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + PV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + PV_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) +#define BOOT_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) \ + BOOT_PV_WRITE_PCSP_LO_REG_VALUE(PCSP_lo_value) +#define BOOT_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) \ + BOOT_PV_WRITE_PCSP_HI_REG_VALUE(PCSP_hi_value) + +/* + * Read/write low/high quad-word Current Chain Register (CR0/CR1) + */ +#define READ_CR0_LO_REG_VALUE() PV_READ_CR0_LO_REG_VALUE() +#define READ_CR0_HI_REG_VALUE() PV_READ_CR0_HI_REG_VALUE() +#define READ_CR1_LO_REG_VALUE() PV_READ_CR1_LO_REG_VALUE() +#define READ_CR1_HI_REG_VALUE() PV_READ_CR1_HI_REG_VALUE() + +#define WRITE_CR0_LO_REG_VALUE(CR0_lo_value) \ + PV_WRITE_CR0_LO_REG_VALUE(CR0_lo_value) +#define WRITE_CR0_HI_REG_VALUE(CR0_hi_value) \ + PV_WRITE_CR0_HI_REG_VALUE(CR0_hi_value) +#define WRITE_CR1_LO_REG_VALUE(CR1_lo_value) \ + PV_WRITE_CR1_LO_REG_VALUE(CR1_lo_value) +#define WRITE_CR1_HI_REG_VALUE(CR1_hi_value) \ + PV_WRITE_CR1_HI_REG_VALUE(CR1_hi_value) + +/* + * Read/write double-word Control Transfer Preparation Registers + * (CTPR1/CTPR2/CTPR3) + */ +#define READ_CTPR_REG_VALUE(reg_no) PV_READ_CTPR_REG_VALUE(reg_no) + +#define WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) \ + PV_WRITE_CTPR_REG_VALUE(reg_no, CTPR_value) + +/* + * Read/write low/high double-word Trap Info Registers (TIRs) + */ +#define READ_TIR_LO_REG_VALUE() PV_READ_TIR_LO_REG_VALUE() +#define READ_TIR_HI_REG_VALUE() PV_READ_TIR_HI_REG_VALUE() + +#define WRITE_TIR_LO_REG_VALUE(TIR_lo_value) \ + PV_WRITE_TIR_LO_REG_VALUE(TIR_lo_value) +#define WRITE_TIR_HI_REG_VALUE(TIR_hi_value) \ + PV_WRITE_TIR_HI_REG_VALUE(TIR_hi_value) + +/* + * Read/write low/high double-word Non-Protected User Stack Descriptor + * Register (USD) + */ +#define READ_USD_LO_REG_VALUE() PV_READ_USD_LO_REG_VALUE() +#define READ_USD_HI_REG_VALUE() PV_READ_USD_HI_REG_VALUE() +#define BOOT_READ_USD_LO_REG_VALUE() BOOT_PV_READ_USD_LO_REG_VALUE() +#define BOOT_READ_USD_HI_REG_VALUE() BOOT_PV_READ_USD_HI_REG_VALUE() + +#define WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + PV_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + PV_WRITE_USD_HI_REG_VALUE(USD_hi_value) +#define BOOT_WRITE_USD_LO_REG_VALUE(USD_lo_value) \ + BOOT_PV_WRITE_USD_LO_REG_VALUE(USD_lo_value) +#define BOOT_WRITE_USD_HI_REG_VALUE(USD_hi_value) \ + BOOT_PV_WRITE_USD_HI_REG_VALUE(USD_hi_value) + +/* + * Read/write low/high double-word Protected User Stack Descriptor + * Register (PUSD) + */ +#define READ_PUSD_LO_REG_VALUE() PV_READ_PUSD_LO_REG_VALUE() +#define READ_PUSD_HI_REG_VALUE() PV_READ_PUSD_HI_REG_VALUE() + +#define WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) \ + PV_WRITE_PUSD_LO_REG_VALUE(PUSD_lo_value) +#define WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) \ + PV_WRITE_PUSD_HI_REG_VALUE(PUSD_hi_value) + +/* + * Read/write double-word User Stacks Base Register (USBR) + */ +#define READ_SBR_REG_VALUE() PV_READ_SBR_REG_VALUE() +#define READ_USBR_REG_VALUE() PV_READ_USBR_REG_VALUE() +#define BOOT_READ_USBR_REG_VALUE() BOOT_PV_READ_USBR_REG_VALUE() +#define BOOT_READ_SBR_REG_VALUE() BOOT_PV_READ_SBR_REG_VALUE() + +#define WRITE_SBR_REG_VALUE(SBR_value) \ + PV_WRITE_SBR_REG_VALUE(SBR_value) +#define WRITE_USBR_REG_VALUE(USBR_value) \ + PV_WRITE_USBR_REG_VALUE(USBR_value) +#define BOOT_WRITE_USBR_REG_VALUE(USBR_value) \ + BOOT_PV_WRITE_USBR_REG_VALUE(USBR_value) +#define BOOT_WRITE_SBR_REG_VALUE(SBR_value) \ + BOOT_PV_WRITE_SBR_REG_VALUE(SBR_value) + +/* + * Read/write double-word Window Descriptor Register (WD) + */ +#define READ_WD_REG_VALUE() PV_READ_WD_REG_VALUE() +#define WRITE_WD_REG_VALUE(WD_value) PV_WRITE_WD_REG_VALUE(WD_value) + +/* + * Read/write double-word Loop Status Register (LSR) + */ +#define READ_LSR_REG_VALUE() \ + PV_READ_LSR_REG_VALUE() +#define WRITE_LSR_REG_VALUE(LSR_value) \ + PV_WRITE_LSR_REG_VALUE(LSR_value) + +/* + * Read/write double-word Initial Loop Counters Register (ILCR) + */ +#define READ_ILCR_REG_VALUE() \ + PV_READ_ILCR_REG_VALUE() +#define WRITE_ILCR_REG_VALUE(ILCR_value) \ + PV_WRITE_ILCR_REG_VALUE(ILCR_value) + +/* + * Read/write OS register which point to current process thread info + * structure (OSR0) + */ +#define READ_OSR0_REG_VALUE() PV_READ_OSR0_REG_VALUE() +#define BOOT_READ_OSR0_REG_VALUE() BOOT_PV_READ_OSR0_REG_VALUE() + +#define WRITE_OSR0_REG_VALUE(osr0_value) \ + PV_WRITE_OSR0_REG_VALUE(osr0_value) +#define BOOT_WRITE_OSR0_REG_VALUE(osr0_value) \ + BOOT_PV_WRITE_OSR0_REG_VALUE(osr0_value) + +/* + * Read/write OS Entries Mask (OSEM) + */ +#define READ_OSEM_REG_VALUE() \ + PV_READ_OSEM_REG_VALUE() +#define WRITE_OSEM_REG_VALUE(OSEM_value) \ + PV_WRITE_OSEM_REG_VALUE(OSEM_value) + +/* + * Read/write word Base Global Register (BGR) + */ +#define READ_BGR_REG_VALUE() PV_READ_BGR_REG_VALUE() +#define BOOT_READ_BGR_REG_VALUE() BOOT_PV_READ_BGR_REG_VALUE() + +#define WRITE_BGR_REG_VALUE(BGR_value) \ + PV_WRITE_BGR_REG_VALUE(BGR_value) +#define BOOT_WRITE_BGR_REG_VALUE(BGR_value) \ + BOOT_PV_WRITE_BGR_REG_VALUE(BGR_value) + +/* + * Read CPU current clock regigister (CLKR) + */ +#define READ_CLKR_REG_VALUE() PV_READ_CLKR_REG_VALUE() + +/* + * Read/Write system clock registers (SCLKM) + */ +#define READ_SCLKR_REG_VALUE() PV_READ_SCLKR_REG_VALUE() +#define READ_SCLKM1_REG_VALUE() PV_READ_SCLKM1_REG_VALUE() +#define READ_SCLKM2_REG_VALUE() PV_READ_SCLKM2_REG_VALUE() +#define READ_SCLKM3_REG_VALUE() PV_READ_SCLKM3_REG_VALUE() + +#define WRITE_SCLKR_REG_VALUE(reg_value) \ + PV_WRITE_SCLKR_REG_VALUE(reg_value) +#define WRITE_SCLKM1_REG_VALUE(reg_value) \ + PV_WRITE_SCLKM1_REG_VALUE(reg_value) +#define WRITE_SCLKM2_REG_VALUE(reg_value) \ + PV_WRITE_SCLKM2_REG_VALUE(reg_value) +#define WRITE_SCLKM3_REG_VALUE(reg_value) \ + PV_WRITE_SCLKM3_REG_VALUE(reg_value) + +/* + * Read/Write Control Unit HardWare registers (CU_HW0/CU_HW1) + */ +#define READ_CU_HW0_REG_VALUE() PV_READ_CU_HW0_REG_VALUE() +#define READ_CU_HW1_REG_VALUE() PV_READ_CU_HW1_REG_VALUE() +#define WRITE_CU_HW0_REG_VALUE(reg_value) \ + PV_WRITE_CU_HW0_REG_VALUE(reg_value) +#define WRITE_CU_HW1_REG_VALUE(reg_value) \ + PV_WRITE_CU_HW1_REG_VALUE(reg_value) + +/* + * Read/write low/high double-word Recovery point register (RPR) + */ +#define READ_RPR_LO_REG_VALUE() PV_READ_RPR_LO_REG_VALUE() +#define READ_RPR_HI_REG_VALUE() PV_READ_RPR_HI_REG_VALUE() +#define READ_SBBP_REG_VALUE() PV_READ_SBBP_REG_VALUE() + +#define WRITE_RPR_LO_REG_VALUE(RPR_lo_value) \ + PV_WRITE_RPR_LO_REG_VALUE(RPR_lo_value) +#define WRITE_RPR_HI_REG_VALUE(RPR_hi_value) \ + PV_WRITE_RPR_HI_REG_VALUE(RPR_hi_value) + +/* + * Read double-word CPU current Instruction Pointer register (IP) + */ +#define READ_IP_REG_VALUE() PV_READ_IP_REG_VALUE() + +/* + * Read debug and monitors regigisters + */ +#define READ_DIBCR_REG_VALUE() PV_READ_DIBCR_REG_VALUE() +#define READ_DIBSR_REG_VALUE() PV_READ_DIBSR_REG_VALUE() +#define READ_DIMCR_REG_VALUE() PV_READ_DIMCR_REG_VALUE() +#define READ_DIBAR0_REG_VALUE() PV_READ_DIBAR0_REG_VALUE() +#define READ_DIBAR1_REG_VALUE() PV_READ_DIBAR1_REG_VALUE() +#define READ_DIBAR2_REG_VALUE() PV_READ_DIBAR2_REG_VALUE() +#define READ_DIBAR3_REG_VALUE() PV_READ_DIBAR3_REG_VALUE() +#define READ_DIMAR0_REG_VALUE() PV_READ_DIMAR0_REG_VALUE() +#define READ_DIMAR1_REG_VALUE() PV_READ_DIMAR1_REG_VALUE() + +#define WRITE_DIBCR_REG_VALUE(DIBCR_value) \ + PV_WRITE_DIBCR_REG_VALUE(DIBCR_value) +#define WRITE_DIBSR_REG_VALUE(DIBSR_value) \ + PV_WRITE_DIBSR_REG_VALUE(DIBSR_value) +#define WRITE_DIMCR_REG_VALUE(DIMCR_value) \ + PV_WRITE_DIMCR_REG_VALUE(DIMCR_value) +#define WRITE_DIBAR0_REG_VALUE(DIBAR0_value) \ + PV_WRITE_DIBAR0_REG_VALUE(DIBAR0_value) +#define WRITE_DIBAR1_REG_VALUE(DIBAR1_value) \ + PV_WRITE_DIBAR1_REG_VALUE(DIBAR1_value) +#define WRITE_DIBAR2_REG_VALUE(DIBAR2_value) \ + PV_WRITE_DIBAR2_REG_VALUE(DIBAR2_value) +#define WRITE_DIBAR3_REG_VALUE(DIBAR3_value) \ + PV_WRITE_DIBAR3_REG_VALUE(DIBAR3_value) +#define WRITE_DIMAR0_REG_VALUE(DIMAR0_value) \ + PV_WRITE_DIMAR0_REG_VALUE(DIMAR0_value) +#define WRITE_DIMAR1_REG_VALUE(DIMAR1_value) \ + PV_WRITE_DIMAR1_REG_VALUE(DIMAR1_value) + +/* + * Read/write double-word Compilation Unit Table Register (CUTD) + */ +#define READ_CUTD_REG_VALUE() \ + PV_READ_CUTD_REG_VALUE() +#define WRITE_CUTD_REG_VALUE(CUTD_value) \ + PV_NV_NOIRQ_WRITE_CUTD_REG_VALUE(CUTD_value) + +/* + * Read/write word Compilation Unit Index Register (CUIR) + */ +#define READ_CUIR_REG_VALUE() \ + PV_READ_CUIR_REG_VALUE() + +/* + * Read/write double-word Compilation Unit Types Descriptor (TSD) + */ +#define READ_TSD_REG_VALUE() \ + PV_READ_TSD_REG_VALUE() +#define WRITE_TSD_REG_VALUE(TSD_value) \ + PV_WRITE_TSD_REG_VALUE(TSD_value) + +/* + * Read/write word Processor State Register (PSR) + */ +#define READ_PSR_REG_VALUE() PV_READ_PSR_REG_VALUE() +#define BOOT_READ_PSR_REG_VALUE() BOOT_PV_READ_PSR_REG_VALUE() + +#define WRITE_PSR_REG_VALUE(PSR_value) \ + PV_WRITE_PSR_REG_VALUE(PSR_value) +#define BOOT_WRITE_PSR_REG_VALUE(PSR_value) \ + BOOT_PV_WRITE_PSR_REG_VALUE(PSR_value) +#define WRITE_PSR_IRQ_BARRIER(PSR_value) \ + PV_WRITE_PSR_IRQ_BARRIER(PSR_value) + +/* + * Read/write word User Processor State Register (UPSR) + */ +#define READ_UPSR_REG_VALUE() PV_READ_UPSR_REG_VALUE() +#define BOOT_READ_UPSR_REG_VALUE() BOOT_PV_READ_UPSR_REG_VALUE() + +#define WRITE_UPSR_REG_VALUE(UPSR_value) \ + PV_WRITE_UPSR_REG_VALUE(UPSR_value) +#define BOOT_WRITE_UPSR_REG_VALUE(UPSR_value) \ + BOOT_PV_WRITE_UPSR_REG_VALUE(UPSR_value) +#define WRITE_UPSR_IRQ_BARRIER(PSR_value) \ + PV_WRITE_UPSR_IRQ_BARRIER(PSR_value) + +/* + * Read/write word floating point control registers (PFPFR/FPCR/FPSR) + */ +#define READ_PFPFR_REG_VALUE() PV_READ_PFPFR_REG_VALUE() +#define READ_FPCR_REG_VALUE() PV_READ_FPCR_REG_VALUE() +#define READ_FPSR_REG_VALUE() PV_READ_FPSR_REG_VALUE() + +#define WRITE_PFPFR_REG_VALUE(PFPFR_value) \ + PV_WRITE_PFPFR_REG_VALUE(PFPFR_value) +#define WRITE_FPCR_REG_VALUE(FPCR_value) \ + PV_WRITE_FPCR_REG_VALUE(FPCR_value) +#define WRITE_FPSR_REG_VALUE(FPSR_value) \ + PV_WRITE_FPSR_REG_VALUE(FPSR_value) + +/* + * Read/write low/high double-word Intel segments registers (xS) + */ + +#define READ_CS_LO_REG_VALUE() PV_READ_CS_LO_REG_VALUE() +#define READ_CS_HI_REG_VALUE() PV_READ_CS_HI_REG_VALUE() +#define READ_DS_LO_REG_VALUE() PV_READ_DS_LO_REG_VALUE() +#define READ_DS_HI_REG_VALUE() PV_READ_DS_HI_REG_VALUE() +#define READ_ES_LO_REG_VALUE() PV_READ_ES_LO_REG_VALUE() +#define READ_ES_HI_REG_VALUE() PV_READ_ES_HI_REG_VALUE() +#define READ_FS_LO_REG_VALUE() PV_READ_FS_LO_REG_VALUE() +#define READ_FS_HI_REG_VALUE() PV_READ_FS_HI_REG_VALUE() +#define READ_GS_LO_REG_VALUE() PV_READ_GS_LO_REG_VALUE() +#define READ_GS_HI_REG_VALUE() PV_READ_GS_HI_REG_VALUE() +#define READ_SS_LO_REG_VALUE() PV_READ_SS_LO_REG_VALUE() +#define READ_SS_HI_REG_VALUE() PV_READ_SS_HI_REG_VALUE() + +#define WRITE_CS_LO_REG_VALUE(sd) PV_WRITE_CS_LO_REG_VALUE(sd) +#define WRITE_CS_HI_REG_VALUE(sd) PV_WRITE_CS_HI_REG_VALUE(sd) +#define WRITE_DS_LO_REG_VALUE(sd) PV_WRITE_DS_LO_REG_VALUE(sd) +#define WRITE_DS_HI_REG_VALUE(sd) PV_WRITE_DS_HI_REG_VALUE(sd) +#define WRITE_ES_LO_REG_VALUE(sd) PV_WRITE_ES_LO_REG_VALUE(sd) +#define WRITE_ES_HI_REG_VALUE(sd) PV_WRITE_ES_HI_REG_VALUE(sd) +#define WRITE_FS_LO_REG_VALUE(sd) PV_WRITE_FS_LO_REG_VALUE(sd) +#define WRITE_FS_HI_REG_VALUE(sd) PV_WRITE_FS_HI_REG_VALUE(sd) +#define WRITE_GS_LO_REG_VALUE(sd) PV_WRITE_GS_LO_REG_VALUE(sd) +#define WRITE_GS_HI_REG_VALUE(sd) PV_WRITE_GS_HI_REG_VALUE(sd) +#define WRITE_SS_LO_REG_VALUE(sd) PV_WRITE_SS_LO_REG_VALUE(sd) +#define WRITE_SS_HI_REG_VALUE(sd) PV_WRITE_SS_HI_REG_VALUE(sd) + +/* + * Read doubleword User Processor Identification Register (IDR) + */ +#define READ_IDR_REG_VALUE() PV_READ_IDR_REG_VALUE() +#define BOOT_READ_IDR_REG_VALUE() BOOT_PV_READ_IDR_REG_VALUE() + +/* + * Processor Core Mode Register (CORE_MODE) and + */ +#define READ_CORE_MODE_REG_VALUE() PV_READ_CORE_MODE_REG_VALUE() +#define BOOT_READ_CORE_MODE_REG_VALUE() BOOT_PV_READ_CORE_MODE_REG_VALUE() +#define WRITE_CORE_MODE_REG_VALUE(modes) \ + PV_WRITE_CORE_MODE_REG_VALUE(modes) +#define BOOT_WRITE_CORE_MODE_REG_VALUE(modes) \ + BOOT_PV_WRITE_CORE_MODE_REG_VALUE(modes) + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _E2K_PARAVIRT_CPU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/paravirt/e2k.h b/arch/e2k/include/asm/paravirt/e2k.h new file mode 100644 index 0000000..aff4671 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/e2k.h @@ -0,0 +1,53 @@ +#ifndef _ASM_PARAVIRT_E2K_H_ +#define _ASM_PARAVIRT_E2K_H_ + +/* Do not include the header directly, only through asm/e2k.h */ + + +#include +#include +#include +#include + +#define pv_get_machine_id() \ + ((paravirt_enabled()) ? guest_machine_id : native_machine_id) +#define boot_pv_get_machine_id() \ + ((boot_paravirt_enabled()) ? \ + guest_machine_id : boot_native_machine_id) +#define pv_set_machine_id(mach_id) \ +({ \ + if (paravirt_enabled()) \ + guest_machine_id = (mach_id); \ + else \ + native_machine_id = (mach_id); \ +}) +#define boot_pv_set_machine_id(mach_id) \ +({ \ + if (boot_paravirt_enabled()) \ + guest_machine_id = (mach_id); \ + else \ + boot_native_machine_id = (mach_id); \ +}) + +static inline void +pv_set_mach_type_id(void) +{ + pv_init_ops.set_mach_type_id(); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +#define get_machine_id() pv_get_machine_id() +#define boot_get_machine_id() boot_pv_get_machine_id() +#define set_machine_id(mach_id) pv_set_machine_id(mach_id) +#define boot_set_machine_id(mach_id) boot_pv_set_machine_id(mach_id) + +static inline void set_mach_type_id(void) +{ + pv_set_mach_type_id(); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_PARAVIRT_E2K_H_ */ diff --git a/arch/e2k/include/asm/paravirt/epic.h b/arch/e2k/include/asm/paravirt/epic.h new file mode 100644 index 0000000..2f891fb --- /dev/null +++ b/arch/e2k/include/asm/paravirt/epic.h @@ -0,0 +1,77 @@ +#ifndef __ASM_PARAVIRT_EPIC_H +#define __ASM_PARAVIRT_EPIC_H + +#ifdef __KERNEL__ +#include +#include + +/* + * Basic functions accessing virtual CEPICs on guest. + */ + +static inline unsigned int pv_epic_read_w(unsigned int reg) +{ + return pv_epic_ops.epic_read_w(reg); +} + +static inline void pv_epic_write_w(unsigned int reg, unsigned int v) +{ + pv_epic_ops.epic_write_w(reg, v); +} + +static inline unsigned long pv_epic_read_d(unsigned int reg) +{ + return pv_epic_ops.epic_read_d(reg); +} + +static inline void pv_epic_write_d(unsigned int reg, unsigned long v) +{ + pv_epic_ops.epic_write_d(reg, v); +} + +static inline unsigned int boot_pv_epic_read_w(unsigned int reg) +{ + return BOOT_PARAVIRT_EPIC_READ_W(reg); +} + +static inline void boot_pv_epic_write_w(unsigned int reg, unsigned int v) +{ + BOOT_PARAVIRT_EPIC_WRITE_W(reg, v); +} + +#ifdef CONFIG_PARAVIRT_GUEST + +static inline void epic_write_w(unsigned int reg, unsigned int v) +{ + pv_epic_write_w(reg, v); +} + +static inline unsigned int epic_read_w(unsigned int reg) +{ + return pv_epic_read_w(reg); +} + +static inline void epic_write_d(unsigned int reg, unsigned long v) +{ + pv_epic_write_d(reg, v); +} + +static inline unsigned long epic_read_d(unsigned int reg) +{ + return pv_epic_read_w(reg); +} + +static inline void boot_epic_write_w(unsigned int reg, unsigned int v) +{ + boot_pv_epic_write_w(reg, v); +} + +static inline unsigned int boot_epic_read_w(unsigned int reg) +{ + return boot_pv_epic_read_w(reg); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PARAVIRT_EPIC_H */ diff --git a/arch/e2k/include/asm/paravirt/fast_syscalls.h b/arch/e2k/include/asm/paravirt/fast_syscalls.h new file mode 100644 index 0000000..85bb758 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/fast_syscalls.h @@ -0,0 +1,59 @@ +#ifndef _ASM_E2K_PARAVIRT_FAST_SYSCALLS_H +#define _ASM_E2K_PARAVIRT_FAST_SYSCALLS_H + +#include +#include + +static inline int +pv_do_fast_clock_gettime(const clockid_t which_clock, struct timespec *tp) +{ + return pv_cpu_ops.do_fast_clock_gettime(which_clock, tp); +} + +static inline int +pv_fast_sys_clock_gettime(const clockid_t which_clock, + struct timespec __user *tp) +{ + return pv_cpu_ops.fast_sys_clock_gettime(which_clock, tp); +} + +static inline int +pv_do_fast_gettimeofday(struct timeval *tv) +{ + return pv_cpu_ops.do_fast_gettimeofday(tv); +} + +static inline int +pv_fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize) +{ + return pv_cpu_ops.fast_sys_siggetmask(oset, sigsetsize); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is native guest kernel (not paravirtualized based on pv_ops) */ +static inline int +do_fast_clock_gettime(const clockid_t which_clock, struct timespec *tp) +{ + return pv_do_fast_clock_gettime(which_clock, tp); +} + +static inline int +fast_sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) +{ + return pv_fast_sys_clock_gettime(which_clock, tp); +} + +static inline int +do_fast_gettimeofday(struct timeval *tv) +{ + return pv_do_fast_gettimeofday(tv); +} +static inline int +fast_sys_siggetmask(u64 __user *oset, size_t sigsetsize) +{ + return pv_fast_sys_siggetmask(oset, sigsetsize); +} +#endif /* ! CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_E2K_PARAVIRT_FAST_SYSCALLS_H */ + diff --git a/arch/e2k/include/asm/paravirt/host_printk.h b/arch/e2k/include/asm/paravirt/host_printk.h new file mode 100644 index 0000000..d10d6f7 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/host_printk.h @@ -0,0 +1,21 @@ +/* + * KVM guest printk() on host support + * + * Copyright 2015 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PARAVIRT_HOST_PRINTK_H +#define _E2K_PARAVIRT_HOST_PRINTK_H + +#include +#include +#include + +#define pv_host_printk(fmt, args...) (pv_cpu_ops.host_printk(fmt, ##args)) + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#define host_printk(fmt, args...) pv_host_printk(fmt, ##args) +#endif /* ! CONFIG_PARAVIRT_GUEST */ + +#endif /* ! _E2K_PARAVIRT_HOST_PRINTK_H */ diff --git a/arch/e2k/include/asm/paravirt/hw_stacks.h b/arch/e2k/include/asm/paravirt/hw_stacks.h new file mode 100644 index 0000000..5bc6a84 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/hw_stacks.h @@ -0,0 +1,141 @@ +/* + * KVM guest hardware stacks access support + * + * Copyright (C) 2016 MCST + */ + +#ifndef _E2K_PARAVIRT_HW_STACKS_H_ +#define _E2K_PARAVIRT_HW_STACKS_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +/* procedure chain stack items access */ + +static inline unsigned long +pv_get_active_cr0_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + return native_get_active_cr0_lo_value(base, cr_ind); + else + return pv_cpu_ops.get_active_cr0_lo_value(base, cr_ind); +} +static inline unsigned long +pv_get_active_cr0_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + return native_get_active_cr0_hi_value(base, cr_ind); + else + return pv_cpu_ops.get_active_cr0_hi_value(base, cr_ind); +} +static inline unsigned long +pv_get_active_cr1_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + return native_get_active_cr1_lo_value(base, cr_ind); + else + return pv_cpu_ops.get_active_cr1_lo_value(base, cr_ind); +} +static inline unsigned long +pv_get_active_cr1_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + return native_get_active_cr1_hi_value(base, cr_ind); + else + return pv_cpu_ops.get_active_cr1_hi_value(base, cr_ind); +} +static inline void +pv_put_active_cr0_lo_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + native_put_active_cr0_lo_value(cr_value, base, cr_ind); + else + pv_cpu_ops.put_active_cr0_lo_value(cr_value, base, cr_ind); +} +static inline void +pv_put_active_cr0_hi_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + native_put_active_cr0_hi_value(cr_value, base, cr_ind); + else + pv_cpu_ops.put_active_cr0_hi_value(cr_value, base, cr_ind); +} +static inline void +pv_put_active_cr1_lo_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + native_put_active_cr1_lo_value(cr_value, base, cr_ind); + else + pv_cpu_ops.put_active_cr1_lo_value(cr_value, base, cr_ind); +} +static inline void +pv_put_active_cr1_hi_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + if (!paravirt_enabled()) + native_put_active_cr1_hi_value(cr_value, base, cr_ind); + else + pv_cpu_ops.put_active_cr1_hi_value(cr_value, base, cr_ind); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* pure guest kernel (not paravirtualized based on pv_ops) */ + +/* + * Procedure chain stack items access + */ +static inline unsigned long +get_active_cr0_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return pv_get_active_cr0_lo_value(base, cr_ind); +} +static inline unsigned long +get_active_cr0_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return pv_get_active_cr0_hi_value(base, cr_ind); +} +static inline unsigned long +get_active_cr1_lo_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return pv_get_active_cr1_lo_value(base, cr_ind); +} +static inline unsigned long +get_active_cr1_hi_value(e2k_addr_t base, e2k_addr_t cr_ind) +{ + return pv_get_active_cr1_hi_value(base, cr_ind); +} +static inline void +put_active_cr0_lo_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + pv_put_active_cr0_lo_value(cr_value, base, cr_ind); +} +static inline void +put_active_cr0_hi_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + pv_put_active_cr0_hi_value(cr_value, base, cr_ind); +} +static inline void +put_active_cr1_lo_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + pv_put_active_cr1_lo_value(cr_value, base, cr_ind); +} +static inline void +put_active_cr1_hi_value(unsigned long cr_value, + e2k_addr_t base, e2k_addr_t cr_ind) +{ + pv_put_active_cr1_hi_value(cr_value, base, cr_ind); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ +#endif /* _E2K_PARAVIRT_HW_STACKS_H_ */ + + diff --git a/arch/e2k/include/asm/paravirt/io.h b/arch/e2k/include/asm/paravirt/io.h new file mode 100644 index 0000000..c8904c7 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/io.h @@ -0,0 +1,294 @@ + +#ifndef _E2K_ASM_PARAVIRT_IO_H_ +#define _E2K_ASM_PARAVIRT_IO_H_ + +#include +#include + + +static inline u8 boot_pv_readb(void __iomem *addr) +{ + return BOOT_PARAVIRT_IO_READ(boot_readb, addr); +} +static inline u16 boot_pv_readw(void __iomem *addr) +{ + return BOOT_PARAVIRT_IO_READ(boot_readw, addr); +} +static inline u32 boot_pv_readl(void __iomem *addr) +{ + return BOOT_PARAVIRT_IO_READ(boot_readl, addr); +} +static inline u64 boot_pv_readll(void __iomem *addr) +{ + return BOOT_PARAVIRT_IO_READ(boot_readll, addr); +} +static inline void boot_pv_writeb(u8 b, void __iomem *addr) +{ + BOOT_PARAVIRT_IO_WRITE(boot_writeb, b, addr); +} +static inline void boot_pv_writew(u16 w, void __iomem *addr) +{ + BOOT_PARAVIRT_IO_WRITE(boot_writew, w, addr); +} +static inline void boot_pv_writel(u32 l, void __iomem *addr) +{ + BOOT_PARAVIRT_IO_WRITE(boot_writel, l, addr); +} +static inline void boot_pv_writell(u64 q, void __iomem *addr) +{ + BOOT_PARAVIRT_IO_WRITE(boot_writell, q, addr); +} + +static inline u8 pv_readb(void __iomem *addr) +{ + return pv_io_ops.readb(addr); +} +static inline u16 pv_readw(void __iomem *addr) +{ + return pv_io_ops.readw(addr); +} +static inline u32 pv_readl(void __iomem *addr) +{ + return pv_io_ops.readl(addr); +} +static inline u64 pv_readll(void __iomem *addr) +{ + return pv_io_ops.readll(addr); +} +static inline void pv_writeb(u8 b, void __iomem *addr) +{ + pv_io_ops.writeb(b, addr); +} +static inline void pv_writew(u16 w, void __iomem *addr) +{ + pv_io_ops.writew(w, addr); +} +static inline void pv_writel(u32 l, void __iomem *addr) +{ + pv_io_ops.writel(l, addr); +} +static inline void pv_writeq(u64 q, void __iomem *addr) +{ + pv_io_ops.writell(q, addr); +} + +static inline u8 pv_inb(unsigned long port) +{ + return pv_io_ops.inb(port); +} +static inline void pv_outb(unsigned char byte, unsigned long port) +{ + pv_io_ops.outb(byte, port); +} +static inline void pv_outw(u16 halfword, unsigned long port) +{ + pv_io_ops.outw(halfword, port); +} +static inline u16 pv_inw(unsigned long port) +{ + return pv_io_ops.inw(port); +} +static inline void pv_outl(u32 word, unsigned long port) +{ + pv_io_ops.outl(word, port); +} +static inline u32 pv_inl(unsigned long port) +{ + return pv_io_ops.inl(port); +} + +static inline void pv_outsb(unsigned long port, const void *src, unsigned long count) +{ + pv_io_ops.outsb(port, src, count); +} +static inline void pv_outsw(unsigned long port, const void *src, unsigned long count) +{ + pv_io_ops.outsw(port, src, count); +} +static inline void pv_outsl(unsigned long port, const void *src, unsigned long count) +{ + pv_io_ops.outsl(port, src, count); +} +static inline void pv_insb(unsigned long port, void *dst, unsigned long count) +{ + pv_io_ops.insb(port, dst, count); +} +static inline void pv_insw(unsigned long port, void *dst, unsigned long count) +{ + pv_io_ops.insw(port, dst, count); +} +static inline void pv_insl(unsigned long port, void *dst, unsigned long count) +{ + pv_io_ops.insl(port, dst, count); +} + +static inline void +pv_conf_inb(unsigned int domain, unsigned int bus, unsigned long port, u8 *byte) +{ + pv_io_ops.conf_inb(domain, bus, port, byte); +} +static inline void +pv_conf_inw(unsigned int domain, unsigned int bus, unsigned long port, + u16 *hword) +{ + pv_io_ops.conf_inw(domain, bus, port, hword); +} +static inline void +pv_conf_inl(unsigned int domain, unsigned int bus, unsigned long port, + u32 *word) +{ + pv_io_ops.conf_inl(domain, bus, port, word); +} +static inline void +pv_conf_outb(unsigned int domain, unsigned int bus, unsigned long port, + u8 byte) +{ + pv_io_ops.conf_outb(domain, bus, port, byte); +} +static inline void +pv_conf_outw(unsigned int domain, unsigned int bus, unsigned long port, + u16 hword) +{ + pv_io_ops.conf_outw(domain, bus, port, hword); +} +static inline void +pv_conf_outl(unsigned int domain, unsigned int bus, unsigned long port, + u32 word) +{ + pv_io_ops.conf_outl(domain, bus, port, word); +} + +static inline void boot_pv_debug_cons_outb(u8 byte, u16 port) +{ + BOOT_PARAVIRT_OUT_OP(boot_debug_cons_outb, byte, port); +} + +static inline u8 boot_pv_debug_cons_inb(u16 port) +{ + return BOOT_PARAVIRT_IN_OP(boot_debug_cons_inb, port); +} + +static inline u32 boot_pv_debug_cons_inl(u16 port) +{ + return BOOT_PARAVIRT_IN_OP(boot_debug_cons_inl, port); +} + +static inline void pv_debug_cons_outb(u8 byte, u16 port) +{ + pv_boot_ops.debug_cons_outb(byte, port); +} + +static inline void pv_debug_cons_outb_p(u8 byte, u16 port) +{ + pv_boot_ops.debug_cons_outb(byte, port); +} + +static inline u8 pv_debug_cons_inb(u16 port) +{ + return pv_boot_ops.debug_cons_inb(port); +} + +static inline u32 pv_debug_cons_inl(u16 port) +{ + return pv_boot_ops.debug_cons_inl(port); +} + +static inline int __init pv_arch_pci_init(void) +{ + return pv_io_ops.pci_init(); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +static inline void boot_writeb(u8 b, void __iomem *addr) +{ + boot_pv_writeb(b, addr); +} +static inline void boot_writew(u16 w, void __iomem *addr) +{ + boot_pv_writew(w, addr); +} +static inline void boot_writel(u32 l, void __iomem *addr) +{ + boot_pv_writel(l, addr); +} +static inline u8 boot_readb(void __iomem *addr) +{ + return boot_pv_readb(addr); +} +static inline u16 boot_readw(void __iomem *addr) +{ + return boot_pv_readw(addr); +} +static inline u32 boot_readl(void __iomem *addr) +{ + return boot_pv_readl(addr); +} + +static inline void +conf_inb(unsigned int domain, unsigned int bus, unsigned long port, u8 *byte) +{ + pv_conf_inb(domain, bus, port, byte); +} +static inline void +conf_inw(unsigned int domain, unsigned int bus, unsigned long port, u16 *hword) +{ + pv_conf_inw(domain, bus, port, hword); +} +static inline void +conf_inl(unsigned int domain, unsigned int bus, unsigned long port, u32 *word) +{ + pv_conf_inl(domain, bus, port, word); +} +static inline void +conf_outb(unsigned int domain, unsigned int bus, unsigned long port, u8 byte) +{ + pv_conf_outb(domain, bus, port, byte); +} +static inline void +conf_outw(unsigned int domain, unsigned int bus, unsigned long port, u16 hword) +{ + pv_conf_outw(domain, bus, port, hword); +} +static inline void +conf_outl(unsigned int domain, unsigned int bus, unsigned long port, u32 word) +{ + pv_conf_outl(domain, bus, port, word); +} + +static inline void boot_debug_cons_outb(u8 byte, u16 port) +{ + boot_pv_debug_cons_outb(byte, port); +} +static inline u8 boot_debug_cons_inb(u16 port) +{ + return boot_pv_debug_cons_inb(port); +} +static inline u32 boot_debug_cons_inl(u16 port) +{ + return boot_pv_debug_cons_inl(port); +} +static inline void debug_cons_outb(u8 byte, u16 port) +{ + pv_debug_cons_outb(byte, port); +} +static inline void debug_cons_outb_p(u8 byte, u16 port) +{ + pv_debug_cons_outb(byte, port); +} +static inline u8 debug_cons_inb(u16 port) +{ + return pv_debug_cons_inb(port); +} +static inline u32 debug_cons_inl(u16 port) +{ + return pv_debug_cons_inl(port); +} + +static inline int __init arch_pci_init(void) +{ + return pv_arch_pci_init(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_ASM_PARAVIRT_IO_H_ */ diff --git a/arch/e2k/include/asm/paravirt/mman.h b/arch/e2k/include/asm/paravirt/mman.h new file mode 100644 index 0000000..c713735 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/mman.h @@ -0,0 +1,203 @@ +#ifndef __ASM_PARAVIRT_MMAN_H +#define __ASM_PARAVIRT_MMAN_H + +#ifdef __KERNEL__ + +#include +#include + +struct mm_struct; +struct vm_area_struct; +struct task_struct; +struct vmap_area; + +/* Memory management mman */ + +static inline int +pv_remap_area_pages(unsigned long address, unsigned long phys_addr, + unsigned long size, unsigned long flags) +{ + return pv_mmu_ops.remap_area_pages(address, phys_addr, size, flags); +} + +static inline int +pv_host_guest_vmap_area(e2k_addr_t start, e2k_addr_t end) +{ + if (pv_mmu_ops.host_guest_vmap_area != NULL) + return pv_mmu_ops.host_guest_vmap_area(start, end); + else + return 0; +} +static inline int +pv_unhost_guest_vmap_area(e2k_addr_t start, e2k_addr_t end) +{ + if (pv_mmu_ops.unhost_guest_vmap_area != NULL) + return pv_mmu_ops.unhost_guest_vmap_area(start, end); + else + return 0; +} + +/* + * Memory management mman + */ +extern inline void pv_free_mm(struct mm_struct *mm) +{ + pv_mmu_ops.free_mm(mm); +} +extern inline struct mm_struct *pv_mm_init(struct mm_struct *mm, + struct task_struct *p, + struct user_namespace *user_ns) +{ + return pv_mmu_ops.mm_init(mm, p, user_ns); +} +static inline int pv_make_host_pages_valid(struct vm_area_struct *vma, + e2k_addr_t start_addr, e2k_addr_t end_addr, + bool chprot, bool flush) +{ + return pv_mmu_ops.make_host_pages_valid(vma, start_addr, end_addr, + chprot, flush); +} +static inline int +pv_set_memory_attr_on_host(e2k_addr_t start, e2k_addr_t end, + enum sma_mode mode) +{ + return pv_mmu_ops.set_memory_attr_on_host(start, end, mode); +} +static inline int pv_access_process_vm(struct task_struct *tsk, + unsigned long addr, void *buf, int len, + unsigned int gup_flags) +{ + return pv_mmu_ops.access_process_vm(tsk, addr, buf, len, gup_flags); +} + +extern inline struct vmap_area *pv_alloc_vmap_area(unsigned long size, + unsigned long align, + unsigned long vstart, unsigned long vend, + int node, gfp_t gfp_mask) +{ + return pv_mmu_ops.alloc_vmap_area(size, align, vstart, vend, + node, gfp_mask); +} +extern inline void pv__free_vmap_area(struct vmap_area *va) +{ + pv_mmu_ops.__free_vmap_area(va); +} +#ifdef CONFIG_SMP +extern inline struct vm_struct ** +pv_pcpu_get_vm_areas(const unsigned long *offsets, + const size_t *sizes, int nr_vms, + size_t align) +{ + return pv_mmu_ops.pcpu_get_vm_areas(offsets, sizes, nr_vms, align); +} +#endif /* CONFIG_SMP */ +static inline void +pv_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pv_mmu_ops.free_pgd_range(tlb, addr, end, floor, ceiling); +} +extern inline void pv_free_unmap_vmap_area(struct vmap_area *va) +{ + pv_mmu_ops.free_unmap_vmap_area(va); +} + +extern inline void pv_unmap_initmem(void *start, void *end) +{ + if (pv_mmu_ops.unmap_initmem) { + pv_mmu_ops.unmap_initmem(start, end); + } +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* Memory management mman */ +static inline void free_mm(struct mm_struct *mm) +{ + pv_free_mm(mm); +} +static inline struct mm_struct *mm_init(struct mm_struct *mm, + struct task_struct *p, + struct user_namespace *user_ns) +{ + return pv_mm_init(mm, p, user_ns); +} +static inline int make_host_pages_valid(struct vm_area_struct *vma, + e2k_addr_t start_addr, e2k_addr_t end_addr, + bool chprot, bool flush) +{ + return pv_make_host_pages_valid(vma, start_addr, end_addr, + chprot, flush); +} +static inline int +set_memory_attr_on_host(e2k_addr_t start, e2k_addr_t end, + enum sma_mode mode) +{ + return pv_set_memory_attr_on_host(start, end, mode); +} +static inline int access_process_vm(struct task_struct *tsk, + unsigned long addr, void *buf, int len, + unsigned int gup_flags) +{ + return pv_access_process_vm(tsk, addr, buf, len, gup_flags); +} + +static inline struct vmap_area *alloc_vmap_area(unsigned long size, + unsigned long align, + unsigned long vstart, unsigned long vend, + int node, gfp_t gfp_mask) +{ + return pv_alloc_vmap_area(size, align, vstart, vend, + node, gfp_mask); +} +static inline void __free_vmap_area(struct vmap_area *va) +{ + pv__free_vmap_area(va); +} +#ifdef CONFIG_SMP +static inline struct vm_struct ** +pcpu_get_vm_areas(const unsigned long *offsets, + const size_t *sizes, int nr_vms, + size_t align) +{ + return pv_pcpu_get_vm_areas(offsets, sizes, nr_vms, align); +} +#endif /* CONFIG_SMP */ +static inline void +free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pv_free_pgd_range(tlb, addr, end, floor, ceiling); +} +static inline void free_unmap_vmap_area(struct vmap_area *va) +{ + pv_free_unmap_vmap_area(va); +} +static inline void unmap_initmem(void *start, void *end) +{ + pv_unmap_initmem(start, end); +} +static inline int +remap_area_pages(unsigned long address, unsigned long phys_addr, + unsigned long size, unsigned long flags) +{ + return pv_remap_area_pages(address, phys_addr, size, flags); +} + +static inline int +host_guest_vmap_area(e2k_addr_t start, e2k_addr_t end) +{ + return pv_host_guest_vmap_area(start, end); +} +static inline int +unhost_guest_vmap_area(e2k_addr_t start, e2k_addr_t end) +{ + return pv_unhost_guest_vmap_area(start, end); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PARAVIRT_MMAN_H */ diff --git a/arch/e2k/include/asm/paravirt/mmu.h b/arch/e2k/include/asm/paravirt/mmu.h new file mode 100644 index 0000000..0848346 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/mmu.h @@ -0,0 +1,130 @@ +#ifndef __ASM_PARAVIRT_MMU_H +#define __ASM_PARAVIRT_MMU_H + +#ifdef __KERNEL__ + +#include +#include + +static inline long +pv_recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, u32 data_tag, + u64 st_rec_opc, int chan) +{ + return pv_mmu_ops.recovery_faulted_tagged_store(address, wr_data, + data_tag, st_rec_opc, chan); +} +static inline long +pv_recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan) +{ + return pv_mmu_ops.recovery_faulted_load(address, ld_val, data_tag, + ld_rec_opc, chan); +} +static inline long +pv_recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + int format, int vr, u64 ld_rec_opc, int chan) +{ + return pv_mmu_ops.recovery_faulted_move(addr_from, addr_to, + format, vr, ld_rec_opc, chan); +} +static inline long +pv_recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, int format, + int vr, u64 ld_rec_opc, int chan, void *saved_greg) +{ + return pv_mmu_ops.recovery_faulted_load_to_greg(address, greg_num_d, + format, vr, ld_rec_opc, chan, saved_greg); +} + +static inline bool +pv_is_guest_kernel_gregs(struct thread_info *ti, + unsigned greg_num_d, u64 **greg_copy) +{ + if (!paravirt_enabled()) + return native_is_guest_kernel_gregs(ti, greg_num_d, greg_copy); + else + return kvm_is_guest_kernel_gregs(ti, greg_num_d, greg_copy); +} +static inline void +pv_move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + pv_mmu_ops.move_tagged_word(addr_from, addr_to); +} +static inline void +pv_move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + pv_mmu_ops.move_tagged_dword(addr_from, addr_to); +} +static inline void +pv_move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + pv_mmu_ops.move_tagged_qword(addr_from, addr_to); +} +static inline void +pv_save_DAM(unsigned long long dam[DAM_ENTRIES_NUM]) +{ + pv_mmu_ops.save_DAM(dam); +} + +#ifdef CONFIG_PARAVIRT_GUEST +static inline long +recovery_faulted_tagged_store(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, int chan) +{ + return pv_recovery_faulted_tagged_store(address, wr_data, data_tag, + st_rec_opc, chan); +} +static inline long +recovery_faulted_load(e2k_addr_t address, u64 *ld_val, u8 *data_tag, + u64 ld_rec_opc, int chan) +{ + return pv_recovery_faulted_load(address, ld_val, data_tag, + ld_rec_opc, chan); +} +static inline long +recovery_faulted_move(e2k_addr_t addr_from, e2k_addr_t addr_to, + int format, int vr, u64 ld_rec_opc, int chan) +{ + return pv_recovery_faulted_move(addr_from, addr_to, format, vr, + ld_rec_opc, chan); +} +static inline long +recovery_faulted_load_to_greg(e2k_addr_t address, u32 greg_num_d, + int format, int vr, u64 ld_rec_opc, int chan, void *saved_greg) +{ + return pv_recovery_faulted_load_to_greg(address, greg_num_d, + format, vr, ld_rec_opc, chan, saved_greg); +} +static inline bool +is_guest_kernel_gregs(struct thread_info *ti, + unsigned greg_num_d, u64 **greg_copy) +{ + return pv_is_guest_kernel_gregs(ti, greg_num_d, greg_copy); +} +static inline void +move_tagged_word(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + pv_move_tagged_word(addr_from, addr_to); +} +static inline void +move_tagged_dword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + pv_move_tagged_dword(addr_from, addr_to); +} +static inline void +move_tagged_qword(e2k_addr_t addr_from, e2k_addr_t addr_to) +{ + pv_move_tagged_qword(addr_from, addr_to); +} + +static inline void +save_DAM(unsigned long long dam[DAM_ENTRIES_NUM]) +{ + pv_save_DAM(dam); +} +#define SAVE_DAM(dam) save_DAM(dam) + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PARAVIRT_MMU_H */ diff --git a/arch/e2k/include/asm/paravirt/mmu_context.h b/arch/e2k/include/asm/paravirt/mmu_context.h new file mode 100644 index 0000000..9203ea1 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/mmu_context.h @@ -0,0 +1,37 @@ +#ifndef __ASM_PARAVIRT_MMU_CONTEXT_H +#define __ASM_PARAVIRT_MMU_CONTEXT_H + +#ifdef __KERNEL__ + +#include +#include + +static inline void +pv_activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) +{ + pv_mmu_ops.activate_mm(active_mm, mm); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +static inline void +activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) +{ + pv_activate_mm(active_mm, mm); +} + +static inline void call_switch_mm(struct mm_struct *prev_mm, + struct mm_struct *next_mm, struct task_struct *next, + int switch_pgd, int switch_mm) +{ + if (!paravirt_enabled() || IS_HV_GM()) + native_call_switch_mm(prev_mm, next_mm, next, + switch_pgd, switch_mm); + else + kvm_call_switch_mm(prev_mm, next_mm, next, + switch_pgd, switch_mm); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ +#endif /* __ASM_PARAVIRT_MMU_CONTEXT_H */ diff --git a/arch/e2k/include/asm/paravirt/mmu_regs_access.h b/arch/e2k/include/asm/paravirt/mmu_regs_access.h new file mode 100644 index 0000000..212ef96 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/mmu_regs_access.h @@ -0,0 +1,632 @@ +/* + * E2K MMU registers access paravirtualization + * + * Copyright 2017 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PARAVIRT_MMU_REGS_ACCESS_H_ +#define _E2K_PARAVIRT_MMU_REGS_ACCESS_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +/* + * Write/read MMU register + */ +#ifdef CONFIG_KVM_GUEST +#include + +static inline void +PV_WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + pv_mmu_ops.write_mmu_reg(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t +PV_READ_MMU_REG(mmu_addr_t mmu_addr) +{ + return pv_mmu_ops.read_mmu_reg(mmu_addr); +} + +static inline void PV_WRITE_MMU_OS_PPTB_REG(mmu_reg_t reg_val) +{ + PV_WRITE_MMU_REG(MMU_ADDR_OS_PPTB, reg_val); +} +static inline unsigned long PV_READ_MMU_OS_PPTB_REG(void) +{ + return mmu_reg_val(PV_READ_MMU_REG(MMU_ADDR_OS_PPTB)); +} +static inline void PV_WRITE_MMU_OS_VPTB_REG(mmu_reg_t reg_val) +{ + PV_WRITE_MMU_REG(MMU_ADDR_OS_VPTB, reg_val); +} +static inline unsigned long PV_READ_MMU_OS_VPTB_REG(void) +{ + return mmu_reg_val(PV_READ_MMU_REG(MMU_ADDR_OS_VPTB)); +} +static inline void PV_WRITE_MMU_OS_VAB_REG(mmu_reg_t reg_val) +{ + PV_WRITE_MMU_REG(MMU_ADDR_OS_VAB, reg_val); +} +static inline unsigned long PV_READ_MMU_OS_VAB_REG(void) +{ + return mmu_reg_val(PV_READ_MMU_REG(MMU_ADDR_OS_VAB)); +} + +static inline void +BOOT_PV_WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + BOOT_PARAVIRT_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t +BOOT_PV_READ_MMU_REG(mmu_addr_t mmu_addr) +{ + return BOOT_PARAVIRT_READ_MMU_REG(mmu_addr); +} + +static inline void BOOT_PV_WRITE_MMU_OS_PPTB_REG(mmu_reg_t reg_val) +{ + BOOT_PV_WRITE_MMU_REG(MMU_ADDR_OS_PPTB, reg_val); +} +static inline unsigned long BOOT_PV_READ_MMU_OS_PPTB_REG(void) +{ + return mmu_reg_val(BOOT_PV_READ_MMU_REG(MMU_ADDR_OS_PPTB)); +} +static inline void BOOT_PV_WRITE_MMU_OS_VPTB_REG(mmu_reg_t reg_val) +{ + BOOT_PV_WRITE_MMU_REG(MMU_ADDR_OS_VPTB, reg_val); +} +static inline unsigned long BOOT_PV_READ_MMU_OS_VPTB_REG(void) +{ + return mmu_reg_val(BOOT_PV_READ_MMU_REG(MMU_ADDR_OS_VPTB)); +} +static inline void BOOT_PV_WRITE_MMU_OS_VAB_REG(mmu_reg_t reg_val) +{ + BOOT_PV_WRITE_MMU_REG(MMU_ADDR_OS_VAB, reg_val); +} +static inline unsigned long BOOT_PV_READ_MMU_OS_VAB_REG(void) +{ + return mmu_reg_val(BOOT_PV_READ_MMU_REG(MMU_ADDR_OS_VAB)); +} + +/* + * Write/read Data TLB register + */ + +static inline void +PV_WRITE_DTLB_REG(tlb_addr_t tlb_addr, mmu_reg_t mmu_reg) +{ + pv_mmu_ops.write_dtlb_reg(tlb_addr, mmu_reg); +} + +static inline mmu_reg_t +PV_READ_DTLB_REG(tlb_addr_t tlb_addr) +{ + return pv_mmu_ops.read_dtlb_reg(tlb_addr); +} + +/* + * Flush TLB page/entry + */ + +static inline void +PV_FLUSH_TLB_ENTRY(flush_op_t flush_op, flush_addr_t flush_addr) +{ + pv_mmu_ops.flush_tlb_entry(flush_op, flush_addr); +} + +/* + * Flush DCACHE line + */ + +static inline void +PV_FLUSH_DCACHE_LINE(e2k_addr_t virt_addr) +{ + pv_mmu_ops.flush_dcache_line(virt_addr); +} + +/* + * Clear DCACHE L1 set + */ +static inline void +PV_CLEAR_DCACHE_L1_SET(e2k_addr_t virt_addr, unsigned long set) +{ + pv_mmu_ops.clear_dcache_l1_set(virt_addr, set); +} + +/* + * Write/read DCACHE L2 registers + */ +static inline void +PV_WRITE_L2_REG(unsigned long reg_val, int reg_num, int bank_num) +{ + pv_mmu_ops.write_dcache_l2_reg(reg_val, reg_num, bank_num); +} +static inline unsigned long +PV_READ_L2_REG(int reg_num, int bank_num) +{ + return pv_mmu_ops.read_dcache_l2_reg(reg_num, bank_num); +} + +/* + * Flush ICACHE line + */ +static inline void +PV_FLUSH_ICACHE_LINE(flush_op_t flush_op, flush_addr_t flush_addr) +{ + pv_mmu_ops.flush_icache_line(flush_op, flush_addr); +} + +/* + * Flush and invalidate or write back CACHE(s) (invalidate all caches + * of the processor) + */ +static inline void +PV_FLUSH_CACHE_L12(flush_op_t flush_op) +{ + pv_mmu_ops.flush_cache_all(flush_op); +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ +static inline void +PV_FLUSH_TLB_ALL(flush_op_t flush_op) +{ + pv_mmu_ops.do_flush_tlb_all(flush_op); +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ +static inline void +PV_FLUSH_ICACHE_ALL(flush_op_t flush_op) +{ + pv_mmu_ops.flush_icache_all(flush_op); +} + +/* + * Get Entry probe for virtual address + */ +static inline probe_entry_t +PV_ENTRY_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return pv_mmu_ops.entry_probe_mmu_op(virt_addr); +} + +/* + * Get physical address for virtual address + */ +static inline probe_entry_t +PV_ADDRESS_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return pv_mmu_ops.address_probe_mmu_op(virt_addr); +} + +/* + * Read CLW register + */ +static inline clw_reg_t +PV_READ_CLW_REG(clw_addr_t clw_addr) +{ + return pv_mmu_ops.read_clw_reg(clw_addr); +} + +/* + * MMU DEBUG registers access + */ +static inline void +PV_WRITE_MMU_DEBUG_REG_VALUE(int reg_no, mmu_reg_t mmu_reg) +{ + pv_mmu_ops.write_mmu_debug_reg(reg_no, mmu_reg); +} +static inline mmu_reg_t +PV_READ_MMU_DEBUG_REG_VALUE(int reg_no) +{ + return pv_mmu_ops.read_mmu_debug_reg(reg_no); +} + +static inline mmu_reg_t +PV_READ_DDBAR0_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR0_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDBAR1_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR1_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDBAR2_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR2_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDBAR3_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDBAR3_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDBCR_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDBCR_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDBSR_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDBSR_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDMAR0_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDMAR0_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDMAR1_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDMAR1_REG_NO); +} +static inline mmu_reg_t +PV_READ_DDMCR_REG_VALUE(void) +{ + return PV_READ_MMU_DEBUG_REG_VALUE(MMU_DDMCR_REG_NO); +} +static inline void +PV_WRITE_DDBAR0_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR0_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDBAR1_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR1_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDBAR2_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR2_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDBAR3_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBAR3_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDBCR_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBCR_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDBSR_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDBSR_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDMAR0_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDMAR0_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDMAR1_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDMAR1_REG_NO, mmu_reg); +} +static inline void +PV_WRITE_DDMCR_REG_VALUE(mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_DEBUG_REG_VALUE(MMU_DDMCR_REG_NO, mmu_reg); +} +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type" +#endif /* CONFIG_KVM_GUEST */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* It is paravirtualized host and guest kernel */ + +static inline void WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + PV_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t READ_MMU_REG(mmu_addr_t mmu_addr) +{ + return (mmu_reg_t)PV_READ_MMU_REG(mmu_addr); +} + +static inline void WRITE_MMU_OS_PPTB(mmu_reg_t reg_val) +{ + PV_WRITE_MMU_OS_PPTB_REG(reg_val); +} +static inline unsigned long READ_MMU_OS_PPTB(void) +{ + return PV_READ_MMU_OS_PPTB_REG(); +} +static inline void WRITE_MMU_OS_VPTB(mmu_reg_t reg_val) +{ + PV_WRITE_MMU_OS_VPTB_REG(reg_val); +} +static inline unsigned long READ_MMU_OS_VPTB(void) +{ + return PV_READ_MMU_OS_VPTB_REG(); +} +static inline void WRITE_MMU_OS_VAB(mmu_reg_t reg_val) +{ + PV_WRITE_MMU_OS_VAB_REG(reg_val); +} +static inline unsigned long READ_MMU_OS_VAB(void) +{ + return PV_READ_MMU_OS_VAB_REG(); +} + +static inline void +BOOT_WRITE_MMU_REG(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg) +{ + BOOT_PV_WRITE_MMU_REG(mmu_addr, mmu_reg); +} + +static inline mmu_reg_t +BOOT_READ_MMU_REG(mmu_addr_t mmu_addr) +{ + return BOOT_PV_READ_MMU_REG(mmu_addr); +} + +static inline void BOOT_WRITE_MMU_OS_PPTB(mmu_reg_t reg_val) +{ + BOOT_PV_WRITE_MMU_OS_PPTB_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_OS_PPTB(void) +{ + return BOOT_PV_READ_MMU_OS_PPTB_REG(); +} +static inline void BOOT_WRITE_MMU_OS_VPTB(mmu_reg_t reg_val) +{ + BOOT_PV_WRITE_MMU_OS_VPTB_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_OS_VPTB(void) +{ + return BOOT_PV_READ_MMU_OS_VPTB_REG(); +} +static inline void BOOT_WRITE_MMU_OS_VAB(mmu_reg_t reg_val) +{ + BOOT_PV_WRITE_MMU_OS_VAB_REG(reg_val); +} +static inline unsigned long BOOT_READ_MMU_OS_VAB(void) +{ + return BOOT_PV_READ_MMU_OS_VAB_REG(); +} + +/* + * Write/read Data TLB register + */ + +static inline void +WRITE_DTLB_REG(tlb_addr_t tlb_addr, mmu_reg_t mmu_reg) +{ + PV_WRITE_DTLB_REG(tlb_addr, mmu_reg); +} + +static inline mmu_reg_t +READ_DTLB_REG(tlb_addr_t tlb_addr) +{ + return PV_READ_DTLB_REG(tlb_addr); +} + +/* + * Flush TLB page/entry + */ + +static inline void +FLUSH_TLB_ENTRY(flush_op_t flush_op, flush_addr_t flush_addr) +{ + PV_FLUSH_TLB_ENTRY(flush_op, flush_addr); +} + +/* + * Flush DCACHE line + */ + +static inline void FLUSH_DCACHE_LINE(e2k_addr_t virt_addr) +{ + PV_FLUSH_DCACHE_LINE(virt_addr); +} +static inline void FLUSH_DCACHE_LINE_OFFSET(e2k_addr_t virt_addr, size_t offset) +{ + PV_FLUSH_DCACHE_LINE(virt_addr + offset); +} + +/* + * Clear DCACHE L1 set + */ +static inline void +CLEAR_DCACHE_L1_SET(e2k_addr_t virt_addr, unsigned long set) +{ + PV_CLEAR_DCACHE_L1_SET(virt_addr, set); +} + +/* + * Write/read DCACHE L2 registers + */ +static inline void +WRITE_L2_REG(unsigned long reg_val, int reg_num, int bank_num) +{ + PV_WRITE_L2_REG(reg_val, reg_num, bank_num); +} +static inline unsigned long +READ_L2_REG(int reg_num, int bank_num) +{ + return PV_READ_L2_REG(reg_num, bank_num); +} + +/* + * Flush ICACHE line + */ + +static inline void +FLUSH_ICACHE_LINE(flush_op_t flush_op, flush_addr_t flush_addr) +{ + PV_FLUSH_ICACHE_LINE(flush_op, flush_addr); +} + +/* + * Flush and invalidate or write back CACHE(s) (invalidate all caches + * of the processor) + */ + +static inline void +FLUSH_CACHE_L12(flush_op_t flush_op) +{ + PV_FLUSH_CACHE_L12(flush_op); +} + +/* + * Flush TLB (invalidate all TLBs of the processor) + */ + +static inline void +FLUSH_TLB_ALL(flush_op_t flush_op) +{ + PV_FLUSH_TLB_ALL(flush_op); +} + +/* + * Flush ICACHE (invalidate instruction caches of the processor) + */ + +static inline void +FLUSH_ICACHE_ALL(flush_op_t flush_op) +{ + PV_FLUSH_ICACHE_ALL(flush_op); +} + +/* + * Get Entry probe for virtual address + */ + +static inline probe_entry_t +ENTRY_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return PV_ENTRY_PROBE_MMU_OP(virt_addr); +} + +/* + * Get physical address for virtual address + */ + +static inline probe_entry_t +ADDRESS_PROBE_MMU_OP(e2k_addr_t virt_addr) +{ + return PV_ADDRESS_PROBE_MMU_OP(virt_addr); +} + +/* + * Read CLW register + */ + +static inline clw_reg_t +READ_CLW_REG(clw_addr_t clw_addr) +{ + return PV_READ_CLW_REG(clw_addr); +} + +/* + * KVM MMU DEBUG registers access + */ +static inline mmu_reg_t +READ_DDBAR0_REG_VALUE(void) +{ + return PV_READ_DDBAR0_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBAR1_REG_VALUE(void) +{ + return PV_READ_DDBAR1_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBAR2_REG_VALUE(void) +{ + return PV_READ_DDBAR2_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBAR3_REG_VALUE(void) +{ + return PV_READ_DDBAR3_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBCR_REG_VALUE(void) +{ + return PV_READ_DDBCR_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDBSR_REG_VALUE(void) +{ + return PV_READ_DDBSR_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDMAR0_REG_VALUE(void) +{ + return PV_READ_DDMAR0_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDMAR1_REG_VALUE(void) +{ + return PV_READ_DDMAR1_REG_VALUE(); +} +static inline mmu_reg_t +READ_DDMCR_REG_VALUE(void) +{ + return PV_READ_DDMCR_REG_VALUE(); +} +static inline void +WRITE_DDBAR0_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDBAR0_REG_VALUE(value); +} +static inline void +WRITE_DDBAR1_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDBAR1_REG_VALUE(value); +} +static inline void +WRITE_DDBAR2_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDBAR2_REG_VALUE(value); +} +static inline void +WRITE_DDBAR3_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDBAR3_REG_VALUE(value); +} +static inline void +WRITE_DDBCR_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDBCR_REG_VALUE(value); +} +static inline void +WRITE_DDBSR_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDBSR_REG_VALUE(value); +} +static inline void +WRITE_DDMAR0_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDMAR0_REG_VALUE(value); +} +static inline void +WRITE_DDMAR1_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDMAR1_REG_VALUE(value); +} +static inline void +WRITE_DDMCR_REG_VALUE(mmu_reg_t value) +{ + PV_WRITE_DDMCR_REG_VALUE(value); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_PARAVIRT_MMU_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/paravirt/pgatomic.h b/arch/e2k/include/asm/paravirt/pgatomic.h new file mode 100644 index 0000000..426fd86 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/pgatomic.h @@ -0,0 +1,127 @@ +/* + * E2K paravirtualized page table atomic operations. + * + * Copyright 2018 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PARAVIRT_PGATOMIC_H +#define _E2K_PARAVIRT_PGATOMIC_H + +#include + +#include +#include +#include +#include + +static inline pgprotval_t +pv_pt_set_wrprotect_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (paravirt_enabled()) { + return kvm_pt_set_wrprotect_atomic(mm, addr, pgprot); + } else { + return native_pt_set_wrprotect_atomic(&pgprot->pgprot); + } +} + +static inline pgprotval_t +pv_pt_get_and_clear_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (paravirt_enabled()) { + return kvm_pt_get_and_clear_atomic(mm, addr, pgprot); + } else { + return native_pt_get_and_clear_atomic(&pgprot->pgprot); + } +} + +static inline pgprotval_t +pv_pt_get_and_xchg_atomic(struct mm_struct *mm, unsigned long addr, + pgprotval_t newval, pgprot_t *pgprot) +{ + if (paravirt_enabled()) { + return kvm_pt_get_and_xchg_atomic(mm, addr, newval, pgprot); + } else { + return native_pt_get_and_xchg_atomic(newval, &pgprot->pgprot); + } +} + +static inline pgprotval_t +pv_pt_clear_relaxed_atomic(pgprotval_t mask, pgprot_t *pgprot) +{ + if (paravirt_enabled()) { + return kvm_pt_clear_relaxed_atomic(mask, pgprot); + } else { + return native_pt_clear_relaxed_atomic(mask, &pgprot->pgprot); + } +} + +static inline pgprotval_t +pv_pt_clear_young_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (paravirt_enabled()) { + return kvm_pt_clear_young_atomic(mm, addr, pgprot); + } else { + return native_pt_clear_young_atomic(&pgprot->pgprot); + } +} + +static inline pgprotval_t +pv_pt_modify_prot_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + if (paravirt_enabled()) { + return kvm_pt_modify_prot_atomic(mm, addr, pgprot); + } else { + return native_pt_modify_prot_atomic(&pgprot->pgprot); + } +} + +#if defined(CONFIG_PARAVIRT_GUEST) +/* It is paravirtualized host and guest kernel */ + +static inline pgprotval_t +pt_set_wrprotect_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return pv_pt_set_wrprotect_atomic(mm, addr, pgprot); +} + +static inline pgprotval_t +pt_get_and_clear_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return pv_pt_get_and_clear_atomic(mm, addr, pgprot); +} + +static inline pgprotval_t +pt_get_and_xchg_atomic(struct mm_struct *mm, unsigned long addr, + pgprotval_t newval, pgprot_t *pgprot) +{ + return pv_pt_get_and_xchg_atomic(mm, addr, newval, pgprot); +} + +static inline pgprotval_t +pt_clear_relaxed_atomic(pgprotval_t mask, pgprot_t *pgprot) +{ + return pv_pt_clear_relaxed_atomic(mask, pgprot); +} + +static inline pgprotval_t +pt_clear_young_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return pv_pt_clear_young_atomic(mm, addr, pgprot); +} + +static inline pgprotval_t +pt_modify_prot_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return pv_pt_modify_prot_atomic(mm, addr, pgprot); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! _E2K_PARAVIRT_PGATOMIC_H */ diff --git a/arch/e2k/include/asm/paravirt/pgtable.h b/arch/e2k/include/asm/paravirt/pgtable.h new file mode 100644 index 0000000..a030a64 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/pgtable.h @@ -0,0 +1,244 @@ +#ifndef __ASM_PARAVIRT_PGTABLE_H +#define __ASM_PARAVIRT_PGTABLE_H + +#ifdef __KERNEL__ + +#include +#include + +static inline void +pv_write_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval, + bool only_validate, bool to_move) +{ + pv_mmu_ops.write_pte_at(mm, addr, ptep, pteval, only_validate, to_move); +} +static inline void +pv_set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + pv_write_pte_at(mm, addr, ptep, pteval, false, false); +} +static inline void +pv_set_pte_to_move_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + pv_write_pte_at(mm, addr, ptep, pteval, false, true); +} +static inline void +pv_validate_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + pv_write_pte_at(mm, addr, ptep, pteval, true, false); +} +static inline void boot_pv_set_pte_at(unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + BOOT_PARAVIRT_GET_MMU_FUNC(boot_set_pte_at)(addr, ptep, pteval); +} +/* private case set pte for guest kernel address */ +static inline void pv_set_pte(pte_t *ptep, pte_t pteval) +{ + pv_mmu_ops.set_pte(ptep, pteval); +} + +static inline void +pv_write_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval, + bool only_validate) +{ + pv_mmu_ops.write_pmd_at(mm, addr, pmdp, pmdval, only_validate); +} +static inline void +pv_set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + pv_write_pmd_at(mm, addr, pmdp, pmdval, false); +} +static inline void +pv_validate_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + pv_write_pmd_at(mm, addr, pmdp, pmdval, true); +} + +static inline void +pv_write_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval, + bool only_validate) +{ + pv_mmu_ops.write_pud_at(mm, addr, pudp, pudval, only_validate); +} +static inline void +pv_set_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval) +{ + pv_write_pud_at(mm, addr, pudp, pudval, false); +} +static inline void +pv_validate_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval) +{ + pv_write_pud_at(mm, addr, pudp, pudval, true); +} + +static inline void +pv_write_pgd_at(struct mm_struct *mm, unsigned long addr, + pgd_t *pgdp, pgd_t pgdval, + bool only_validate) +{ + pv_mmu_ops.write_pgd_at(mm, addr, pgdp, pgdval, only_validate); +} +static inline void +pv_set_pgd_at(struct mm_struct *mm, unsigned long addr, + pgd_t *pgdp, pgd_t pgdval) +{ + pv_write_pgd_at(mm, addr, pgdp, pgdval, false); +} +static inline void +pv_validate_pgd_at(struct mm_struct *mm, unsigned long addr, + pgd_t *pgdp, pgd_t pgdval) +{ + pv_write_pgd_at(mm, addr, pgdp, pgdval, true); +} + +static inline pte_t pv_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return pv_mmu_ops.ptep_get_and_clear(mm, addr, ptep, false); +} +static inline pte_t pv_ptep_get_and_clear_to_move(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return pv_mmu_ops.ptep_get_and_clear(mm, addr, ptep, true); +} +static inline pte_t pv_ptep_get_and_clear_as_valid(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return pv_mmu_ops.ptep_get_and_clear_as_valid(mm, addr, ptep); +} +static inline void pv_ptep_wrprotect_atomic(struct mm_struct *mm, + e2k_addr_t addr, pte_t *ptep) +{ + if (pv_mmu_ops.ptep_wrprotect_atomic) + pv_mmu_ops.ptep_wrprotect_atomic(mm, addr, ptep); +} + +static inline pte_t pv_get_pte_for_address(struct vm_area_struct *vma, + e2k_addr_t address) +{ + return pv_mmu_ops.get_pte_for_address(vma, address); +} + +#ifdef CONFIG_PARAVIRT_GUEST + +#include + +#define set_pte_not_present_at set_pte_at +static inline void +set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + pv_set_pte_at(mm, addr, ptep, pteval); +} +static inline void +set_pte_to_move_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + pv_set_pte_to_move_at(mm, addr, ptep, pteval); +} +static inline void +validate_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + pv_validate_pte_at(mm, addr, ptep, pteval); +} +static inline void boot_set_pte_at(unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + boot_pv_set_pte_at(addr, ptep, pteval); +} +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + pv_set_pte(ptep, pteval); +} + +static inline void +set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + pv_set_pmd_at(mm, addr, pmdp, pmdval); +} +static inline void +validate_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, + pmd_t pmdval) +{ + pv_validate_pmd_at(mm, addr, pmdp, pmdval); +} + +static inline void +set_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval) +{ + pv_set_pud_at(mm, addr, pudp, pudval); +} +static inline void +validate_pud_at(struct mm_struct *mm, unsigned long addr, pud_t *pudp) +{ + pv_validate_pud_at(mm, addr, pudp, __pud(_PAGE_INIT_VALID)); +} +static inline void +invalidate_pud_at(struct mm_struct *mm, unsigned long addr, pud_t *pudp) +{ + pv_validate_pud_at(mm, addr, pudp, __pud(0)); +} + +static inline void +set_pgd_at(struct mm_struct *mm, unsigned long addr, + pgd_t *pgdp, pgd_t pgdval) +{ + pv_set_pgd_at(mm, addr, pgdp, pgdval); +} +static inline void +validate_pgd_at(struct mm_struct *mm, unsigned long addr, pgd_t *pgdp) +{ + pv_validate_pgd_at(mm, addr, pgdp, __pgd(_PAGE_INIT_VALID)); +} +static inline void +invalidate_pgd_at(struct mm_struct *mm, unsigned long addr, pgd_t *pgdp) +{ + pv_validate_pgd_at(mm, addr, pgdp, __pgd(0)); +} + +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + return pv_ptep_get_and_clear(mm, addr, ptep); +} +static inline pte_t ptep_get_and_clear_to_move(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return pv_ptep_get_and_clear_to_move(mm, addr, ptep); +} +static inline pte_t ptep_get_and_clear_as_valid(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return pv_ptep_get_and_clear_as_valid(mm, addr, ptep); +} +static inline void ptep_wrprotect_atomic(struct mm_struct *mm, + e2k_addr_t addr, pte_t *ptep) +{ + pv_ptep_wrprotect_atomic(mm, addr, ptep); +} + +static inline pte_t get_pte_for_address(struct vm_area_struct *vma, + e2k_addr_t address) +{ + return pv_get_pte_for_address(vma, address); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_PARAVIRT_PGTABLE_H */ diff --git a/arch/e2k/include/asm/paravirt/process.h b/arch/e2k/include/asm/paravirt/process.h new file mode 100644 index 0000000..a78adf7 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/process.h @@ -0,0 +1,344 @@ +/* + * KVM paravirtualized kernel processes support + * Copyright 2011 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PARAVIRT_PROCESS_H +#define _E2K_PARAVIRT_PROCESS_H + +#include +#include +#include + +static inline void PV_FLUSH_STACKS(void) +{ + if (!paravirt_enabled()) + native_flush_stacks(); + else + pv_cpu_ops.flush_stacks(); +} + +static inline void PV_FLUSH_REGS_STACK(void) +{ + if (!paravirt_enabled()) + native_flush_regs_stack(); + else + pv_cpu_ops.flush_regs_stack(); +} + +static inline void PV_FLUSH_CHAIN_STACK(void) +{ + if (!paravirt_enabled()) + native_flush_chain_stack(); + else + pv_cpu_ops.flush_chain_stack(); +} + +static inline void BOOT_PV_FLUSH_STACKS(void) +{ + if (!boot_paravirt_enabled()) + native_flush_stacks(); + else + BOOT_PARAVIRT_FLUSH(flush_stacks); +} + +static inline void BOOT_PV_FLUSH_REGS_STACK(void) +{ + if (!boot_paravirt_enabled()) + native_flush_regs_stack(); + else + BOOT_PARAVIRT_FLUSH(flush_regs_stack); +} + +static inline void BOOT_PV_FLUSH_CHAIN_STACK(void) +{ + if (!boot_paravirt_enabled()) + native_flush_chain_stack(); + else + BOOT_PARAVIRT_FLUSH(flush_chain_stack); +} + +static inline void PV_COPY_STACKS_TO_MEMORY(void) +{ + pv_cpu_ops.copy_stacks_to_memory(); +} + +#define PV_FLUSHCPU PV_FLUSH_STACKS() +#define PV_FLUSHR PV_FLUSH_REGS_STACK() +#define PV_FLUSHC PV_FLUSH_CHAIN_STACK() + +/* see arch/e2k/include/asm/process.h for more details why and how */ +#define PV_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, __gti, __vcpu) \ +({ \ + if (paravirt_enabled()) { \ + KVM_GUEST_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, \ + __gti, __vcpu); \ + } else { \ + KVM_HOST_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, \ + __gti, __vcpu); \ + } \ +}) +#define PV_CHECK_VCPU_THREAD_CONTEXT(__ti) \ +({ \ + if (paravirt_enabled()) { \ + KVM_GUEST_CHECK_VCPU_THREAD_CONTEXT(__ti); \ + } else { \ + KVM_HOST_CHECK_VCPU_THREAD_CONTEXT(__ti); \ + } \ +}) + +#define PV_ONLY_SET_GUEST_GREGS(__ti) \ +({ \ + if (paravirt_enabled()) { \ + KVM_ONLY_SET_GUEST_GREGS(__ti); \ + } else { \ + NATIVE_ONLY_SET_GUEST_GREGS(__ti); \ + } \ +}) + +static inline void +pv_virt_cpu_thread_init(struct task_struct *boot_task) +{ + if (pv_cpu_ops.virt_cpu_thread_init) + pv_cpu_ops.virt_cpu_thread_init(boot_task); +} + +static inline int +pv_copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg) +{ + return pv_cpu_ops.copy_kernel_stacks(new_task, fn, arg); +} + +static inline int +pv_copy_user_stacks(unsigned long clone_flags, + e2k_addr_t new_stk_base, e2k_size_t new_stk_sz, + struct task_struct *new_task, struct pt_regs *regs) +{ + return pv_cpu_ops.copy_user_stacks(clone_flags, + new_stk_base, new_stk_sz, new_task, regs); +} + +static inline void +pv_define_kernel_hw_stacks_sizes(struct hw_stack *hw_stacks) +{ + pv_cpu_ops.define_kernel_hw_stacks_sizes(hw_stacks); +} +static inline void +boot_pv_define_kernel_hw_stacks_sizes(struct hw_stack *hw_stacks) +{ + BOOT_PARAVIRT_GET_CPU_FUNC(define_kernel_hw_stacks_sizes)(hw_stacks); +} + +static inline void +pv_define_user_hw_stacks_sizes(struct hw_stack *hw_stacks) +{ + pv_cpu_ops.define_user_hw_stacks_sizes(hw_stacks); +} + +static inline void +pv_release_hw_stacks(struct thread_info *dead_ti) +{ + pv_cpu_ops.release_hw_stacks(dead_ti); +} + +static inline void +pv_release_kernel_stacks(struct thread_info *dead_ti) +{ + pv_cpu_ops.release_kernel_stacks(dead_ti); +} + +static inline int +pv_switch_to_new_user(struct e2k_stacks *stacks, struct hw_stack *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel) +{ + return pv_cpu_ops.switch_to_new_user(stacks, hw_stacks, + cut_base, cut_size, entry_point, cui, flags, kernel); +} + +static inline int +pv_do_map_user_hard_stack_to_kernel(int nid, + e2k_addr_t kernel_start, e2k_addr_t user_stack_base, + e2k_size_t kernel_size) +{ + return pv_cpu_ops.do_map_user_hard_stack_to_kernel(nid, + kernel_start, user_stack_base, kernel_size); +} +static __always_inline int +pv_complete_switch_to_kernel_hardware_stacks(e2k_addr_t ps, e2k_addr_t cs, + unsigned long *delta_proc, unsigned long *delta_chain, + bool to_exit) +{ + if (!paravirt_enabled()) + return native_complete_switch_to_kernel_hardware_stacks(ps, cs, + delta_proc, delta_chain, to_exit); + else + return kvm_complete_switch_to_kernel_hardware_stacks(ps, cs, + delta_proc, delta_chain, to_exit); +} +static __always_inline int +pv_complete_switch_from_kernel_hardware_stacks( + unsigned long delta_proc, unsigned long delta_chain) +{ + if (!paravirt_enabled()) { + native_complete_switch_from_kernel_hardware_stacks( + delta_proc, delta_chain); + return 0; + } else { + return kvm_complete_switch_from_kernel_hardware_stacks( + delta_proc, delta_chain); + } +} +static inline void +pv_free_old_kernel_hardware_stacks(void) +{ + pv_cpu_ops.free_old_kernel_hardware_stacks(); +} + +static inline void +pv_fix_process_pt_regs(struct thread_info *ti, struct e2k_stacks *stacks, + struct pt_regs *regs, struct pt_regs *old_regs) +{ + if (pv_cpu_ops.fix_process_pt_regs) + pv_cpu_ops.fix_process_pt_regs(ti, stacks, regs, old_regs); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* It is paravirtualized host and guest kernel */ + +#define E2K_FLUSHCPU PV_FLUSHCPU +#define E2K_FLUSHR PV_FLUSHR +#define E2K_FLUSHC PV_FLUSHC +#define BOOT_FLUSHCPU BOOT_PV_FLUSH_STACKS() +#define BOOT_FLUSHR BOOT_PV_FLUSH_REGS_STACK() +#define BOOT_FLUSHC BOOT_PV_FLUSH_CHAIN_STACK() + +static inline void COPY_STACKS_TO_MEMORY(void) +{ + PV_COPY_STACKS_TO_MEMORY(); +} + +#define UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, __gti, __vcpu) \ + PV_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, \ + __gti, __vcpu) +#define CHECK_VCPU_THREAD_CONTEXT(__ti) \ + PV_CHECK_VCPU_THREAD_CONTEXT(__ti) +#define ONLY_SET_GUEST_GREGS(ti) PV_ONLY_SET_GUEST_GREGS(ti) + +#ifdef CONFIG_KVM_GUEST + #define GOTO_RETURN_TO_PARAVIRT_GUEST(ret_value) \ + KVM_GOTO_RETURN_TO_PARAVIRT_GUEST(ret_value) + #define COND_GOTO_RETURN_TO_PARAVIRT_GUEST(cond, ret_value) \ + KVM_COND_GOTO_RETURN_TO_PARAVIRT_GUEST(cond, ret_value) + #define GOTO_DONE_TO_PARAVIRT_GUEST() \ + KVM_GOTO_DONE_TO_PARAVIRT_GUEST() + #define COND_GOTO_DONE_TO_PARAVIRT_GUEST(cond) \ + KVM_COND_GOTO_DONE_TO_PARAVIRT_GUEST(cond) +#else + #error "undefined guest kernel type" +#endif /* CONFIG_KVM_GUEST */ + +static inline int +do_map_user_hard_stack_to_kernel(int nid, + e2k_addr_t kernel_start, e2k_addr_t user_stack_base, + e2k_size_t kernel_size) +{ + return pv_do_map_user_hard_stack_to_kernel(nid, + kernel_start, user_stack_base, kernel_size); +} + +static inline void +virt_cpu_thread_init(struct task_struct *boot_task) +{ + pv_virt_cpu_thread_init(boot_task); +} + +static inline int +copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg) +{ + return pv_copy_kernel_stacks(new_task, fn, arg); +} +static inline int +copy_user_stacks(unsigned long clone_flags, + e2k_addr_t new_stk_base, e2k_size_t new_stk_sz, + struct task_struct *new_task, pt_regs_t *regs) +{ + return pv_copy_user_stacks(clone_flags, new_stk_base, new_stk_sz, + new_task, regs); +} + +static inline void +define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + pv_define_kernel_hw_stacks_sizes(hw_stacks); +} + +static inline void +boot_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + boot_pv_define_kernel_hw_stacks_sizes(hw_stacks); +} + +static inline void +define_user_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + pv_define_user_hw_stacks_sizes(hw_stacks); +} + +static inline void +release_hw_stacks(struct thread_info *dead_ti) +{ + pv_release_hw_stacks(dead_ti); +} +static inline void +release_kernel_stacks(struct thread_info *dead_ti) +{ + pv_release_kernel_stacks(dead_ti); +} + +static inline int +switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel) +{ + return pv_switch_to_new_user(stacks, hw_stacks, + cut_base, cut_size, entry_point, cui, flags, kernel); +} + +static __always_inline int +complete_switch_to_kernel_hardware_stacks(e2k_addr_t ps, e2k_addr_t cs, + unsigned long *delta_proc, unsigned long *delta_chain, + bool to_exit) +{ + return pv_complete_switch_to_kernel_hardware_stacks(ps, cs, + delta_proc, delta_chain, to_exit); +} + +static __always_inline int +complete_switch_from_kernel_hardware_stacks( + unsigned long delta_proc, unsigned long delta_chain) +{ + return pv_complete_switch_from_kernel_hardware_stacks( + delta_proc, delta_chain); +} + +static inline void +free_old_kernel_hardware_stacks(void) +{ + pv_free_old_kernel_hardware_stacks(); +} + +static inline void +fix_process_pt_regs(thread_info_t *ti, e2k_stacks_t *stacks, + pt_regs_t *regs, pt_regs_t *old_regs) +{ + pv_fix_process_pt_regs(ti, stacks, regs, old_regs); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* !_E2K_PARAVIRT_PROCESS_H */ diff --git a/arch/e2k/include/asm/paravirt/processor.h b/arch/e2k/include/asm/paravirt/processor.h new file mode 100644 index 0000000..63cd484 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/processor.h @@ -0,0 +1,72 @@ +/* + * KVM guest processor and processes support + * + * Copyright (C) 2014 MCST + */ + +#ifndef _E2K_PARAVIRT_PROCESSOR_H_ +#define _E2K_PARAVIRT_PROCESSOR_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +static inline int +pv_prepare_start_thread_frames(unsigned long entry, unsigned long sp) +{ + return pv_cpu_ops.prepare_start_thread_frames(entry, sp); +} + +#define pv_default_idle (pv_cpu_ops.cpu_default_idle) +#define pv_cpu_relax() (pv_cpu_ops.cpu_relax()) +#define pv_cpu_relax_no_resched() (pv_cpu_ops.cpu_relax_no_resched()) + +static inline void +pv_print_machine_type_info(void) +{ + pv_init_ops.print_machine_type_info(); +} + +static inline void +pv_paravirt_banner(void) +{ + if (pv_init_ops.banner) + pv_init_ops.banner(); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* pure guest kernel (not paravirtualized based on pv_ops) */ + +static inline int +prepare_start_thread_frames(unsigned long entry, unsigned long sp) +{ + return pv_prepare_start_thread_frames(entry, sp); +} + +#define default_idle pv_default_idle + +static inline void cpu_relax(void) +{ + pv_cpu_relax(); +} +static inline void cpu_relax_no_resched(void) +{ + pv_cpu_relax_no_resched(); +} + +static inline void print_machine_type_info(void) +{ + pv_print_machine_type_info(); +} + +static inline void paravirt_banner(void) +{ + pv_paravirt_banner(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ +#endif /* _E2K_PARAVIRT_PROCESSOR_H_ */ + + diff --git a/arch/e2k/include/asm/paravirt/pv_info.h b/arch/e2k/include/asm/paravirt/pv_info.h new file mode 100644 index 0000000..b47fee7 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/pv_info.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2016 MCST, Salavat Gilyazov atic@mcst.ru + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#ifndef __ASM_E2K_PARAVIRT_INFO_H +#define __ASM_E2K_PARAVIRT_INFO_H + +#ifndef __ASSEMBLY__ + +#include + +/* + * general info + */ +typedef struct pv_info { + int paravirt_enabled; + unsigned long page_offset; + unsigned long vmalloc_start; + unsigned long vmalloc_end; + unsigned long vmemmap_start; + unsigned long vmemmap_end; + const char *name; +} pv_info_t; + +extern pv_info_t pv_info; + +/* + * general info + */ +#define PARAVIRT_ENABLED (pv_info.paravirt_enabled) +#define BOOT_PARAVIRT_ENABLED boot_get_vo_value(PARAVIRT_ENABLED) + +#ifndef CONFIG_BOOT_E2K +#define pv_paravirt_enabled() PARAVIRT_ENABLED +#define boot_pv_paravirt_enabled() BOOT_PARAVIRT_ENABLED +#else /* CONFIG_BOOT_E2K */ +#define pv_paravirt_enabled() false +#define boot_pv_paravirt_enabled() false +#endif /* ! CONFIG_BOOT_E2K */ + +#define PV_PAGE_OFFSET (pv_info.page_offset) +#define PV_TASK_SIZE PV_PAGE_OFFSET +#define PV_VMALLOC_START (pv_info.vmalloc_start) +#define PV_VMALLOC_END (pv_info.vmalloc_end) +#define PV_VMEMMAP_START (pv_info.vmemmap_start) +#define PV_VMEMMAP_END (pv_info.vmemmap_end) + +#define BOOT_PV_PAGE_OFFSET \ + boot_get_vo_value(PV_PAGE_OFFSET) +#define BOOT_PV_TASK_SIZE BOOT_PV_PAGE_OFFSET + +#ifdef CONFIG_PARAVIRT_GUEST + +static inline int paravirt_enabled(void) +{ + return pv_paravirt_enabled(); +} +#define boot_paravirt_enabled() boot_pv_paravirt_enabled() + +#define PAGE_OFFSET PV_PAGE_OFFSET +#define TASK_SIZE PV_TASK_SIZE +#define VMALLOC_START PV_VMALLOC_START +#define VMALLOC_END PV_VMALLOC_END +#define VMEMMAP_START PV_VMEMMAP_START +#define VMEMMAP_END PV_VMEMMAP_END + +#define BOOT_PAGE_OFFSET BOOT_PV_PAGE_OFFSET +#define BOOT_TASK_SIZE BOOT_PV_TASK_SIZE +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* __ASM_E2K_PARAVIRT_INFO_H */ diff --git a/arch/e2k/include/asm/paravirt/pv_ops.h b/arch/e2k/include/asm/paravirt/pv_ops.h new file mode 100644 index 0000000..546c770 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/pv_ops.h @@ -0,0 +1,732 @@ +/****************************************************************************** + * Copyright (c) 2008 MCST (C) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#ifndef __ASM_E2K_PARAVIRT_PV_OPS_H +#define __ASM_E2K_PARAVIRT_PV_OPS_H + +#ifdef CONFIG_PARAVIRT + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * Should use call_single_data_t instead, but there is a problem with forward + * declaration of typedef. + */ +struct __call_single_data; + +struct pt_regs; +struct e2k_stacks; +struct hw_stack; +struct hw_stack_area; +struct thread_info; +struct as_sa_handler_arg; +struct global_regs; +struct local_gregs; +struct page; +struct trap_cellar; +struct user_namespace; +enum pte_mem_type; +struct mmu_gather; + +#define INLINE_FUNC_CALL ((void *)-1UL) /* inline function should be */ + /* called here */ + +/* + * booting hooks. + */ + +struct node_phys_mem; +struct mm_struct; +struct vm_area_struct; +struct thread_info; +struct timespec; +struct timeval; + +typedef struct pv_boot_ops { + void (*boot_setup_machine_id)(bootblock_struct_t *bootblock); + int (*boot_loader_probe_memory)( + struct node_phys_mem *nodes_phys_mem, + boot_info_t *bootblock); + e2k_size_t (*boot_get_bootblock_size)(boot_info_t *bblock); + void (*boot_cpu_relax)(void); +#ifdef CONFIG_SMP + int (*boot_smp_cpu_config)(boot_info_t *bootblock); + void (*boot_smp_node_config)(boot_info_t *bootblock); +#endif /* CONFIG_SMP */ + void (*boot_reserve_all_bootmem)(bool bsp, boot_info_t *boot_info); + void (*boot_map_all_bootmem)(bool bsp, boot_info_t *boot_info); + void (*boot_map_needful_to_equal_virt_area) + (e2k_addr_t stack_top_addr); + void (*boot_kernel_switch_to_virt)(bool bsp, int cpuid, + void (*boot_init_sequel_func)(bool bsp, int cpuid, + int cpus_to_sync)); + void (*boot_clear_bss)(void); + void (*boot_check_bootblock)(bool bsp, + bootblock_struct_t *bootblock); + + void (*init_terminate_boot_init)(bool bsp, int cpuid); + void (*boot_parse_param)(bootblock_struct_t *bootblock); + void (*boot_debug_cons_outb)(u8 byte, u16 port); + u8 (*boot_debug_cons_inb)(u16 port); + u32 (*boot_debug_cons_inl)(u16 port); + void (*debug_cons_outb)(u8 byte, u16 port); + u8 (*debug_cons_inb)(u16 port); + u32 (*debug_cons_inl)(u16 port); + void (*do_boot_panic)(const char *fmt_v, ...); +} pv_boot_ops_t; + +extern pv_boot_ops_t pv_boot_ops; +extern pv_boot_ops_t *cur_pv_boot_ops; /* pointer to boot-time tables of OPs */ + +#define BOOT_PARAVIRT_GET_BOOT_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_boot_ops, func_name) + +#define BOOT_PARAVIRT_OUT_OP(op_func, value, port) \ +({ \ + BOOT_PARAVIRT_GET_BOOT_FUNC(op_func)(value, port); \ +}) +#define BOOT_PARAVIRT_IN_OP(op_func, port) \ +({ \ + BOOT_PARAVIRT_GET_BOOT_FUNC(op_func)(port); \ +}) + +/* + * initialization hooks. + */ + +typedef struct pv_init_ops { + void (*banner)(void); + void (*set_mach_type_id)(void); + void (*print_machine_type_info)(void); +} pv_init_ops_t; + +extern pv_init_ops_t pv_init_ops; + +extern unsigned long return_to_paravirt_guest(unsigned long ret_value); +extern void done_to_paravirt_guest(void); +extern void __init kvm_init_paravirt_guest(void); + +typedef struct pv_cpu_ops { + unsigned long (*read_OSCUD_lo_reg_value)(void); + unsigned long (*read_OSCUD_hi_reg_value)(void); + void (*write_OSCUD_lo_reg_value)(unsigned long reg_value); + void (*write_OSCUD_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_OSGD_lo_reg_value)(void); + unsigned long long (*read_OSGD_hi_reg_value)(void); + void (*write_OSGD_lo_reg_value)(unsigned long reg_value); + void (*write_OSGD_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_CUD_lo_reg_value)(void); + unsigned long long (*read_CUD_hi_reg_value)(void); + void (*write_CUD_lo_reg_value)(unsigned long reg_value); + void (*write_CUD_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_GD_lo_reg_value)(void); + unsigned long long (*read_GD_hi_reg_value)(void); + void (*write_GD_lo_reg_value)(unsigned long reg_value); + void (*write_GD_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_PSP_lo_reg_value)(void); + unsigned long long (*read_PSP_hi_reg_value)(void); + void (*write_PSP_lo_reg_value)(unsigned long reg_value); + void (*write_PSP_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_PSHTP_reg_value)(void); + void (*write_PSHTP_reg_value)(unsigned long reg_value); + unsigned long long (*read_PCSP_lo_reg_value)(void); + unsigned long long (*read_PCSP_hi_reg_value)(void); + void (*write_PCSP_lo_reg_value)(unsigned long reg_value); + void (*write_PCSP_hi_reg_value)(unsigned long reg_value); + int (*read_PCSHTP_reg_value)(void); + void (*write_PCSHTP_reg_value)(int reg_value); + unsigned long long (*read_CR0_lo_reg_value)(void); + unsigned long long (*read_CR0_hi_reg_value)(void); + unsigned long long (*read_CR1_lo_reg_value)(void); + unsigned long long (*read_CR1_hi_reg_value)(void); + void (*write_CR0_lo_reg_value)(unsigned long reg_value); + void (*write_CR0_hi_reg_value)(unsigned long reg_value); + void (*write_CR1_lo_reg_value)(unsigned long reg_value); + void (*write_CR1_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_CTPR_reg_value)(int reg_no); + void (*write_CTPR_reg_value)(int reg_no, unsigned long reg_value); + unsigned long long (*read_USD_lo_reg_value)(void); + unsigned long long (*read_USD_hi_reg_value)(void); + void (*write_USD_lo_reg_value)(unsigned long reg_value); + void (*write_USD_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_SBR_reg_value)(void); + void (*write_SBR_reg_value)(unsigned long reg_value); + unsigned long long (*read_WD_reg_value)(void); + void (*write_WD_reg_value)(unsigned long reg_value); +#ifdef NEED_PARAVIRT_LOOP_REGISTERS + unsigned long long (*read_LSR_reg_value)(void); + void (*write_LSR_reg_value)(unsigned long reg_value); + unsigned long long (*read_ILCR_reg_value)(void); + void (*write_ILCR_reg_value)(unsigned long reg_value); +#endif /* NEED_PARAVIRT_LOOP_REGISTERS */ + unsigned long long (*read_OSR0_reg_value)(void); + void (*write_OSR0_reg_value)(unsigned long reg_value); + unsigned int (*read_OSEM_reg_value)(void); + void (*write_OSEM_reg_value)(unsigned int reg_value); + unsigned int (*read_BGR_reg_value)(void); + void (*write_BGR_reg_value)(unsigned int reg_value); + unsigned long long (*read_CLKR_reg_value)(void); + void (*write_CLKR_reg_value)(void); + unsigned long long (*read_SCLKR_reg_value)(void); + void (*write_SCLKR_reg_value)(unsigned long reg_value); + unsigned long long (*read_SCLKM1_reg_value)(void); + void (*write_SCLKM1_reg_value)(unsigned long reg_value); + unsigned long long (*read_SCLKM2_reg_value)(void); + unsigned long long (*read_SCLKM3_reg_value)(void); + void (*write_SCLKM2_reg_value)(unsigned long reg_value); + unsigned long long (*read_CU_HW0_reg_value)(void); + void (*write_CU_HW0_reg_value)(unsigned long reg_value); + unsigned long long (*read_CU_HW1_reg_value)(void); + void (*write_CU_HW1_reg_value)(unsigned long reg_value); + unsigned long long (*read_RPR_lo_reg_value)(void); + unsigned long long (*read_RPR_hi_reg_value)(void); + void (*write_RPR_lo_reg_value)(unsigned long reg_value); + void (*write_RPR_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_SBBP_reg_value)(void); + unsigned long long (*read_IP_reg_value)(void); + unsigned int (*read_DIBCR_reg_value)(void); + unsigned int (*read_DIBSR_reg_value)(void); + unsigned long long (*read_DIMCR_reg_value)(void); + unsigned long long (*read_DIBAR0_reg_value)(void); + unsigned long long (*read_DIBAR1_reg_value)(void); + unsigned long long (*read_DIBAR2_reg_value)(void); + unsigned long long (*read_DIBAR3_reg_value)(void); + unsigned long long (*read_DIMAR0_reg_value)(void); + unsigned long long (*read_DIMAR1_reg_value)(void); + void (*write_DIBCR_reg_value)(unsigned int reg_value); + void (*write_DIBSR_reg_value)(unsigned int reg_value); + void (*write_DIMCR_reg_value)(unsigned long reg_value); + void (*write_DIBAR0_reg_value)(unsigned long reg_value); + void (*write_DIBAR1_reg_value)(unsigned long reg_value); + void (*write_DIBAR2_reg_value)(unsigned long reg_value); + void (*write_DIBAR3_reg_value)(unsigned long reg_value); + void (*write_DIMAR0_reg_value)(unsigned long reg_value); + void (*write_DIMAR1_reg_value)(unsigned long reg_value); + unsigned long long (*read_CUTD_reg_value)(void); + void (*write_CUTD_reg_value)(unsigned long reg_value); + unsigned int (*read_CUIR_reg_value)(void); + unsigned int (*read_PSR_reg_value)(void); + void (*write_PSR_reg_value)(unsigned int reg_value); + unsigned int (*read_UPSR_reg_value)(void); + void (*write_UPSR_reg_value)(unsigned int reg_value); + void (*write_PSR_irq_barrier)(unsigned int reg_value); + void (*write_UPSR_irq_barrier)(unsigned int reg_value); + unsigned int (*read_PFPFR_reg_value)(void); + void (*write_PFPFR_reg_value)(unsigned int reg_value); + unsigned int (*read_FPCR_reg_value)(void); + void (*write_FPCR_reg_value)(unsigned int reg_value); + unsigned int (*read_FPSR_reg_value)(void); + void (*write_FPSR_reg_value)(unsigned int reg_value); + unsigned long long (*read_CS_lo_reg_value)(void); + unsigned long long (*read_CS_hi_reg_value)(void); + unsigned long long (*read_DS_lo_reg_value)(void); + unsigned long long (*read_DS_hi_reg_value)(void); + unsigned long long (*read_ES_lo_reg_value)(void); + unsigned long long (*read_ES_hi_reg_value)(void); + unsigned long long (*read_FS_lo_reg_value)(void); + unsigned long long (*read_FS_hi_reg_value)(void); + unsigned long long (*read_GS_lo_reg_value)(void); + unsigned long long (*read_GS_hi_reg_value)(void); + unsigned long long (*read_SS_lo_reg_value)(void); + unsigned long long (*read_SS_hi_reg_value)(void); + void (*write_CS_lo_reg_value)(unsigned long reg_value); + void (*write_CS_hi_reg_value)(unsigned long reg_value); + void (*write_DS_lo_reg_value)(unsigned long reg_value); + void (*write_DS_hi_reg_value)(unsigned long reg_value); + void (*write_ES_lo_reg_value)(unsigned long reg_value); + void (*write_ES_hi_reg_value)(unsigned long reg_value); + void (*write_FS_lo_reg_value)(unsigned long reg_value); + void (*write_FS_hi_reg_value)(unsigned long reg_value); + void (*write_GS_lo_reg_value)(unsigned long reg_value); + void (*write_GS_hi_reg_value)(unsigned long reg_value); + void (*write_SS_lo_reg_value)(unsigned long reg_value); + void (*write_SS_hi_reg_value)(unsigned long reg_value); + unsigned long long (*read_IDR_reg_value)(void); + unsigned long long (*boot_read_IDR_reg_value)(void); + unsigned int (*read_CORE_MODE_reg_value)(void); + unsigned int (*boot_read_CORE_MODE_reg_value)(void); + void (*write_CORE_MODE_reg_value)(unsigned int modes); + void (*boot_write_CORE_MODE_reg_value)(unsigned int modes); + + void (*put_updated_cpu_regs_flags)(unsigned long flags); + + unsigned int (*read_aasr_reg_value)(void); + void (*write_aasr_reg_value)(unsigned int reg_value); + unsigned int (*read_aafstr_reg_value)(void); + void (*write_aafstr_reg_value)(unsigned int reg_value); + + void (*flush_stacks)(void); + void (*flush_regs_stack)(void); + void (*flush_chain_stack)(void); + void (*copy_stacks_to_memory)(void); + unsigned long long (*get_active_cr0_lo_value)(e2k_addr_t base, + e2k_addr_t cr_ind); + unsigned long long (*get_active_cr0_hi_value)(e2k_addr_t base, + e2k_addr_t cr_ind); + unsigned long long (*get_active_cr1_lo_value)(e2k_addr_t base, + e2k_addr_t cr_ind); + unsigned long long (*get_active_cr1_hi_value)(e2k_addr_t base, + e2k_addr_t cr_ind); + void (*put_active_cr0_lo_value)(unsigned long cr0_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind); + void (*put_active_cr0_hi_value)(unsigned long cr0_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind); + void (*put_active_cr1_lo_value)(unsigned long cr1_lo_value, + e2k_addr_t base, e2k_addr_t cr_ind); + void (*put_active_cr1_hi_value)(unsigned long cr1_hi_value, + e2k_addr_t base, e2k_addr_t cr_ind); + void (*correct_trap_psp_pcsp)(struct pt_regs *regs, + struct thread_info *thread_info); + void (*correct_scall_psp_pcsp)(struct pt_regs *regs, + struct thread_info *thread_info); + void (*correct_trap_return_ip)(struct pt_regs *regs, + unsigned long return_ip); + void *(*nested_kernel_return_address)(int n); + void (*virt_cpu_thread_init)(struct task_struct *boot_task); + int (*prepare_start_thread_frames)(unsigned long entry, + unsigned long sp); + int (*copy_kernel_stacks)(struct task_struct *new_task, + unsigned long fn, unsigned long arg); + int (*copy_user_stacks)(unsigned long clone_flags, + e2k_addr_t new_stk_base, e2k_size_t new_stk_sz, + struct task_struct *new_task, + struct pt_regs *regs); + void (*define_kernel_hw_stacks_sizes)(struct hw_stack *hw_stacks); + void (*define_user_hw_stacks_sizes)(struct hw_stack *hw_stacks); + void (*switch_to_expanded_proc_stack)(long delta_size, + long delta_offset, bool decr_k_ps); + void (*switch_to_expanded_chain_stack)(long delta_size, + long delta_offset, bool decr_k_pcs); + void (*stack_bounds_trap_enable)(void); + bool (*is_proc_stack_bounds)(struct thread_info *ti, + struct pt_regs *regs); + bool (*is_chain_stack_bounds)(struct thread_info *ti, + struct pt_regs *regs); + + void (*release_hw_stacks)(struct thread_info *dead_ti); + void (*release_kernel_stacks)(struct thread_info *dead_ti); + int (*register_kernel_hw_stack)(e2k_addr_t stack_base, + e2k_size_t stack_size); + int (*register_kernel_data_stack)(e2k_addr_t stack_base, + e2k_size_t stack_size); + void (*unregister_kernel_hw_stack)(e2k_addr_t stack_base, + e2k_size_t stack_size); + void (*unregister_kernel_data_stack)(e2k_addr_t stack_base, + e2k_size_t stack_size); + int (*kmem_area_host_chunk)(e2k_addr_t stack_base, + e2k_size_t stack_size, int hw_stack); + void (*kmem_area_unhost_chunk)(e2k_addr_t stack_base, + e2k_size_t stack_size); + int (*switch_to_new_user)(struct e2k_stacks *stacks, + struct hw_stack *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel); + int (*do_map_user_hard_stack_to_kernel)(int nid, + e2k_addr_t kernel_start, e2k_addr_t user_stack_base, + e2k_size_t kernel_size); + int (*do_switch_to_kernel_hardware_stacks)(void); + void (*free_old_kernel_hardware_stacks)(void); + void (*instr_page_fault)(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr); + unsigned long (*mmio_page_fault)(struct pt_regs *regs, + struct trap_cellar *tcellar); + int (*do_hw_stack_bounds)(struct pt_regs *regs, + bool proc_bounds, bool chain_bounds); + irqreturn_t (*handle_interrupt)(struct pt_regs *regs); + void (*init_guest_system_handlers_table)(void); + void (*handle_deferred_traps_in_syscall)(struct pt_regs *regs, + bool use_pt_regs, bool new_hs); + void (*fix_process_pt_regs)(struct thread_info *ti, + struct e2k_stacks *stacks, struct pt_regs *regs, + struct pt_regs *old_regs); + int (*run_user_handler)(struct as_sa_handler_arg *arg); + long (*trap_table_entry1)(int sys_num, ...); + long (*trap_table_entry3)(int sys_num, ...); + long (*trap_table_entry4)(int sys_num, ...); + + int (*do_fast_clock_gettime)(const clockid_t which_clock, + struct timespec *tp); + int (*fast_sys_clock_gettime)(const clockid_t which_clock, + struct timespec __user *tp); + int (*do_fast_gettimeofday)(struct timeval *tv); + int (*fast_sys_siggetmask)(u64 __user *oset, size_t sigsetsize); + + unsigned long (*fast_tagged_memory_copy)(void *dst, const void *src, + size_t len, unsigned long strd_opcode, + unsigned long ldrd_opcode, int prefetch); + unsigned long (*fast_tagged_memory_set)(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode); + unsigned long (*extract_tags_32)(u16 *dst, const void *src); + void (*save_local_glob_regs)(struct local_gregs *l_gregs); + void (*restore_local_glob_regs)(struct local_gregs *l_gregs); + void (*restore_kernel_gregs_in_syscall)(struct thread_info *ti); + void (*get_all_user_glob_regs)(struct global_regs *gregs); + void (*arch_setup_machine)(void); + void (*cpu_default_idle)(void); + void (*cpu_relax)(void); + void (*cpu_relax_no_resched)(void); +#ifdef CONFIG_SMP + void (*wait_for_cpu_booting)(void); + void (*wait_for_cpu_wake_up)(void); + int (*activate_cpu)(int cpu_id); + int (*activate_all_cpus)(void); + void (*csd_lock_wait)(struct __call_single_data *data); + void (*csd_lock)(struct __call_single_data *data); + void (*arch_csd_lock_async)(struct __call_single_data *data); + void (*csd_unlock)(struct __call_single_data *data); + void (*setup_local_pic_virq)(unsigned int cpuid); + void (*startup_local_pic_virq)(unsigned int cpuid); + void (*smp_flush_tlb_all)(void); + void (*smp_flush_tlb_mm)(struct mm_struct *mm); + void (*smp_flush_tlb_page)(struct vm_area_struct *vma, + e2k_addr_t addr); + void (*smp_flush_tlb_range)(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); + void (*smp_flush_pmd_tlb_range)(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); + void (*smp_flush_tlb_range_and_pgtables)(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); + void (*smp_flush_icache_range)(e2k_addr_t start, e2k_addr_t end); + void (*smp_flush_icache_range_array)( + void *icache_range_arr); + void (*smp_flush_icache_page)(struct vm_area_struct *vma, + struct page *page); + void (*smp_flush_icache_all)(void); + void (*smp_flush_icache_kernel_line)(e2k_addr_t addr); +#endif /* CONFIG_SMP */ + int (*host_printk)(const char *fmt, ...); + + void (*arch_spin_lock_slow)(void *lock); + void (*arch_spin_locked_slow)(void *lock); + void (*arch_spin_unlock_slow)(void *lock); + + void (*ord_wait_read_lock_slow)(arch_rwlock_t *rw); + void (*ord_wait_write_lock_slow)(arch_rwlock_t *rw); + void (*ord_arch_read_locked_slow)(arch_rwlock_t *rw); + void (*ord_arch_write_locked_slow)(arch_rwlock_t *rw); + void (*ord_arch_read_unlock_slow)(arch_rwlock_t *rw); + void (*ord_arch_write_unlock_slow)(arch_rwlock_t *rw); +} pv_cpu_ops_t; + +extern pv_cpu_ops_t pv_cpu_ops; +extern pv_cpu_ops_t *cur_pv_cpu_ops; + +/* FIXME: this include should be deleted, paravirt_enable() should migrate */ +/* from processor.h to some other common include */ +#include + +#define PATCH_GOTO_AND_RETURN_CTPR_NO 1 /* ctpr1 */ + +#define GOTO_FUNC_PATCH(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE, POST) \ + E2K_GOTO_PATCH_AND_RETURN(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE, POST) +#define GOTO_CPU_FUNC_PATCH(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE, POST) \ + GOTO_FUNC_PATCH(pv_cpu_ops.patch_addr, return_addr_greg_no, \ + ctpr_no, PRE, POST) +#define DEF_GOTO_CPU_FUNC_PATCH(patch_addr) \ + GOTO_CPU_FUNC_PATCH(patch_addr, \ + PATCH_RETURN_ADDR_GREG, \ + PATCH_GOTO_AND_RETURN_CTPR_NO, \ + "", "") + +#define GOTO_READ_SREG_PATCH(patch_addr, return_addr_greg_no, \ + ctpr_no, POST) \ + E2K_GOTO_PATCH_AND_RETURN(patch_addr, return_addr_greg_no, \ + ctpr_no, "", POST) +#define GOTO_WRITE_SREG_PATCH(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE) \ + E2K_GOTO_PATCH_AND_RETURN(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE, "") +#define GOTO_SET_UPDATED_FLAGS_PATCH(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE) \ + E2K_GOTO_PATCH_AND_RETURN(patch_addr, return_addr_greg_no, \ + ctpr_no, PRE, "") +#define READ_SREG_PATCH_POST(reg_value_greg_no, res) \ + E2K_MOVE_GREG_TO_REG(reg_value_greg_no, res) +#define READ_DSREG_PATCH_POST(reg_value_greg_no, res) \ + E2K_MOVE_DGREG_TO_DREG(reg_value_greg_no, res) +#define DEF_READ_SREG_PATCH_POST(res) \ + READ_SREG_PATCH_POST(PATCH_RES_GREG, res) +#define DEF_READ_DSREG_PATCH_POST(res) \ + READ_DSREG_PATCH_POST(PATCH_RES_GREG, res) + +#define PV_WRITE_SREG_PATCH_PRE(reg_value_greg_no, reg_value) \ + E2K_MOVE_REG_TO_GREG(reg_value_greg_no, reg_value) +#define PV_WRITE_DSREG_PATCH_PRE(reg_value_greg_no, reg_value) \ + E2K_MOVE_DREG_TO_DGREG(reg_value_greg_no, reg_value) +#define DEF_WRITE_SREG_PATCH_PRE(reg_value) \ + PV_WRITE_SREG_PATCH_PRE(PATCH_ARG_GREG, reg_value) +#define DEF_WRITE_DSREG_PATCH_PRE(reg_value) \ + PV_WRITE_DSREG_PATCH_PRE(PATCH_ARG_GREG, reg_value) + +#define BOOT_PARAVIRT_GET_CPU_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_cpu_ops, func_name) +#define BOOT_PARAVIRT_READ_REG(reg_func) \ + (BOOT_PARAVIRT_GET_CPU_FUNC(reg_func)()) +#define BOOT_PARAVIRT_WRITE_REG(reg_func, reg_value) \ + (BOOT_PARAVIRT_GET_CPU_FUNC(reg_func)(reg_value)) +#define BOOT_GET_CPU_PATCH_LABEL(patch_addr) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_cpu_ops, patch_addr) + +#define BOOT_PARAVIRT_FLUSH(reg_func) \ + (BOOT_PARAVIRT_GET_CPU_FUNC(reg_func)()) + +typedef struct pv_apic_ops { + void (*apic_write)(unsigned int reg, unsigned int v); + unsigned int (*apic_read) (unsigned int reg); + void (*boot_apic_write)(unsigned int reg, unsigned int v); + unsigned int (*boot_apic_read) (unsigned int reg); +} pv_apic_ops_t; + +extern pv_apic_ops_t pv_apic_ops; +extern pv_apic_ops_t *cur_pv_apic_ops; + +#define BOOT_PARAVIRT_GET_APIC_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_apic_ops, func_name) +#define BOOT_PARAVIRT_APIC_READ(reg) \ + (BOOT_PARAVIRT_GET_APIC_FUNC(boot_apic_read)(reg)) +#define BOOT_PARAVIRT_APIC_WRITE(reg, reg_value) \ + (BOOT_PARAVIRT_GET_APIC_FUNC(boot_apic_write)(reg, reg_value)) + +typedef struct pv_epic_ops { + void (*epic_write_w)(unsigned int reg, unsigned int v); + unsigned int (*epic_read_w)(unsigned int reg); + void (*epic_write_d)(unsigned int reg, unsigned long v); + unsigned long (*epic_read_d)(unsigned int reg); + void (*boot_epic_write_w)(unsigned int reg, unsigned int v); + unsigned int (*boot_epic_read_w)(unsigned int reg); +} pv_epic_ops_t; + +extern pv_epic_ops_t pv_epic_ops; +extern pv_epic_ops_t *cur_pv_epic_ops; + +#define BOOT_PARAVIRT_GET_EPIC_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_epic_ops, func_name) +#define BOOT_PARAVIRT_EPIC_READ_W(reg) \ + (BOOT_PARAVIRT_GET_EPIC_FUNC(boot_epic_read_w)(reg)) +#define BOOT_PARAVIRT_EPIC_WRITE_W(reg, reg_value) \ + (BOOT_PARAVIRT_GET_EPIC_FUNC(boot_epic_write_w)(reg, reg_value)) + +typedef struct pv_mmu_ops { + long (*recovery_faulted_tagged_store)(e2k_addr_t address, u64 wr_data, + u32 data_tag, u64 st_rec_opc, int chan); + long (*recovery_faulted_load)(e2k_addr_t address, u64 *ld_val, + u8 *data_tag, u64 ld_rec_opc, int chan); + long (*recovery_faulted_move)(e2k_addr_t addr_from, e2k_addr_t addr_to, + int format, int vr, u64 ld_rec_opc, int chan); + long (*recovery_faulted_load_to_greg)(e2k_addr_t address, + u32 greg_num_d, int format, int vr, + u64 ld_rec_opc, int chan, void *saved_greg); + void (*move_tagged_word)(e2k_addr_t addr_from, e2k_addr_t addr_to); + void (*move_tagged_dword)(e2k_addr_t addr_from, e2k_addr_t addr_to); + void (*move_tagged_qword)(e2k_addr_t addr_from, e2k_addr_t addr_to); + void (*write_mmu_reg)(mmu_addr_t mmu_addr, mmu_reg_t mmu_reg); + mmu_reg_t (*read_mmu_reg)(mmu_addr_t mmu_addr); + void (*write_dtlb_reg)(tlb_addr_t tlb_addr, mmu_reg_t mmu_reg); + mmu_reg_t (*read_dtlb_reg)(tlb_addr_t tlb_addr); + void (*flush_tlb_entry)(flush_op_t flush_op, flush_addr_t flush_addr); + void (*flush_dcache_line)(e2k_addr_t virt_addr); + void (*clear_dcache_l1_set)(e2k_addr_t virt_addr, unsigned long set); + void (*flush_dcache_range)(void *addr, size_t len); + void (*clear_dcache_l1_range)(void *virt_addr, size_t len); + void (*flush_icache_line)(flush_op_t flush_op, flush_addr_t flush_addr); + void (*write_dcache_l2_reg)(unsigned long reg_val, + int reg_num, int bank_num); + unsigned long (*read_dcache_l2_reg)(int reg_num, int bank_num); + void (*flush_cache_all)(flush_op_t flush_op); + void (*do_flush_tlb_all)(flush_op_t flush_op); + void (*flush_icache_all)(flush_op_t flush_op); + probe_entry_t (*entry_probe_mmu_op)(e2k_addr_t virt_addr); + probe_entry_t (*address_probe_mmu_op)(e2k_addr_t virt_addr); + clw_reg_t (*read_clw_reg)(clw_addr_t clw_addr); + void (*save_DAM)(unsigned long long *dam); + void (*write_mmu_debug_reg)(int reg_no, mmu_reg_t mmu_reg); + mmu_reg_t (*read_mmu_debug_reg)(int reg_no); + void (*boot_set_pte_at)(unsigned long addr, + pte_t *ptep, pte_t pteval); + void (*write_pte_at)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval, + bool only_validate, bool to_move); + void (*set_pte)(pte_t *ptep, pte_t pteval); + void (*write_pmd_at)(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval, bool only_validate); + void (*write_pud_at)(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval, bool only_validate); + void (*write_pgd_at)(struct mm_struct *mm, unsigned long addr, + pgd_t *pgdp, pgd_t pgdval, bool only_validate); + pte_t (*ptep_get_and_clear)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, bool to_move); + pte_t (*ptep_get_and_clear_as_valid)(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + void (*ptep_wrprotect_atomic)(struct mm_struct *mm, + e2k_addr_t addr, pte_t *ptep); + pte_t (*get_pte_for_address)(struct vm_area_struct *vma, + e2k_addr_t address); + int (*remap_area_pages)(unsigned long address, unsigned long phys_addr, + unsigned long size, + enum pte_mem_type memory_type); + int (*host_guest_vmap_area)(e2k_addr_t start, e2k_addr_t end); + int (*unhost_guest_vmap_area)(e2k_addr_t start, e2k_addr_t end); + + /* memory management - mman.h */ + void (*free_mm)(struct mm_struct *); + struct mm_struct *(*mm_init)(struct mm_struct *mm, + struct task_struct *p, + struct user_namespace *user_ns); + void (*activate_mm)(struct mm_struct *active_mm, struct mm_struct *mm); + int (*make_host_pages_valid)(struct vm_area_struct *vma, + e2k_addr_t start_addr, e2k_addr_t end_addr, + bool chprot, bool flush); + int (*set_memory_attr_on_host)(e2k_addr_t start, e2k_addr_t end, + int mode); + int (*access_process_vm)(struct task_struct *task, + unsigned long addr, void *buf, int len, + unsigned int gup_flags); + + /* memory management - mm.h */ + void (*free_pgd_range)(struct mmu_gather *tlb, unsigned long addr, + unsigned long end, unsigned long floor, unsigned long ceiling); + + /* kernel virtual memory allocation */ + struct vmap_area *(*alloc_vmap_area)(unsigned long size, + unsigned long align, + unsigned long vstart, unsigned long vend, + int node, gfp_t gfp_mask); + void (*__free_vmap_area)(struct vmap_area *va); +#ifdef CONFIG_SMP + struct vm_struct **(*pcpu_get_vm_areas)(const unsigned long *offsets, + const size_t *sizes, int nr_vms, + size_t align); +#endif /* CONFIG_SMP */ + void (*free_unmap_vmap_area)(struct vmap_area *va); + + /* unmap __init areas */ + void (*unmap_initmem)(void *start, void *end); + +} pv_mmu_ops_t; + +extern pv_mmu_ops_t pv_mmu_ops; +extern pv_mmu_ops_t *cur_pv_mmu_ops; + +#define BOOT_PARAVIRT_GET_MMU_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_mmu_ops, func_name) +#define BOOT_PARAVIRT_READ_MMU_REG(mmu_addr) \ + BOOT_PARAVIRT_GET_MMU_FUNC(read_mmu_reg)(mmu_addr) +#define BOOT_PARAVIRT_WRITE_MMU_REG(mmu_addr, mmu_reg) \ + BOOT_PARAVIRT_GET_MMU_FUNC(write_mmu_reg)(mmu_addr, mmu_reg) + +typedef struct pv_irq_ops { +} pv_irq_ops_t; + +extern pv_irq_ops_t pv_irq_ops; + +typedef struct pv_time_ops { + void (*time_init)(void); + void (*clock_init)(void); + int (*read_current_timer)(unsigned long *timer_val); + unsigned long (*get_cpu_running_cycles)(void); + unsigned long long (*do_sched_clock)(void); + unsigned long (*steal_clock)(int cpu); +} pv_time_ops_t; + +extern pv_time_ops_t pv_time_ops; + +typedef struct pv_io_ops { + void (*boot_writeb)(u8 b, void __iomem *addr); + void (*boot_writew)(u16 w, void __iomem *addr); + void (*boot_writel)(u32 l, void __iomem *addr); + void (*boot_writell)(u64 q, void __iomem *addr); + u8 (*boot_readb)(void __iomem *addr); + u16 (*boot_readw)(void __iomem *addr); + u32 (*boot_readl)(void __iomem *addr); + u64 (*boot_readll)(void __iomem *addr); + void (*writeb)(u8 b, void __iomem *addr); + void (*writew)(u16 w, void __iomem *addr); + void (*writel)(u32 l, void __iomem *addr); + void (*writell)(u64 q, void __iomem *addr); + u8 (*readb)(void __iomem *addr); + u16 (*readw)(void __iomem *addr); + u32 (*readl)(void __iomem *addr); + u64 (*readll)(void __iomem *addr); + u8 (*inb)(unsigned short port); + void (*outb)(unsigned char byte, unsigned short port); + void (*outw)(u16 halfword, unsigned short port); + u16 (*inw)(unsigned short port); + void (*outl)(u32 word, unsigned short port); + u32 (*inl)(unsigned short port); + void (*outsb)(unsigned short port, const void *src, unsigned long count); + void (*outsw)(unsigned short port, const void *src, unsigned long count); + void (*outsl)(unsigned short port, const void *src, unsigned long count); + void (*insb)(unsigned short port, void *dst, unsigned long count); + void (*insw)(unsigned short port, void *dst, unsigned long count); + void (*insl)(unsigned short port, void *dst, unsigned long count); + void (*conf_inb)(unsigned int domain, unsigned int bus, + unsigned long port, u8 *byte); + void (*conf_inw)(unsigned int domain, unsigned int bus, + unsigned long port, u16 *hword); + void (*conf_inl)(unsigned int domain, unsigned int bus, + unsigned long port, u32 *word); + void (*conf_outb)(unsigned int domain, unsigned int bus, + unsigned long port, u8 byte); + void (*conf_outw)(unsigned int domain, unsigned int bus, + unsigned long port, u16 hword); + void (*conf_outl)(unsigned int domain, unsigned int bus, + unsigned long port, u32 word); + + void (*scr_writew)(u16 val, volatile u16 *addr); + u16 (*scr_readw)(volatile const u16 *addr); + void (*vga_writeb)(u8 val, volatile u8 *addr); + u8 (*vga_readb)(volatile const u8 *addr); + + int (*pci_init)(void); +} pv_io_ops_t; + +extern pv_io_ops_t pv_io_ops; +extern pv_io_ops_t *cur_pv_io_ops; + +#define BOOT_PARAVIRT_GET_IO_OPS_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_io_ops, func_name) +#define BOOT_PARAVIRT_IO_READ(io_func, io_addr) \ + (BOOT_PARAVIRT_GET_IO_OPS_FUNC(io_func)(io_addr)) +#define BOOT_PARAVIRT_IO_WRITE(io_func, io_value, io_addr) \ + (BOOT_PARAVIRT_GET_IO_OPS_FUNC(io_func)(io_value, io_addr)) + +#endif /* ! __ASSEMBLY__ */ +#endif /* CONFIG_PARAVIRT */ + +#endif /* __ASM_E2K_PARAVIRT_PV_OPS_H */ diff --git a/arch/e2k/include/asm/paravirt/regs_state.h b/arch/e2k/include/asm/paravirt/regs_state.h new file mode 100644 index 0000000..3a0e8aa --- /dev/null +++ b/arch/e2k/include/asm/paravirt/regs_state.h @@ -0,0 +1,87 @@ +#ifndef _E2K_PARAVIRT_REGS_STATE_H +#define _E2K_PARAVIRT_REGS_STATE_H + +#include +#include +#include + +#define PV_INIT_G_REGS() \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_INIT_G_REGS(regs); \ + } else { \ + KVM_INIT_G_REGS(regs); \ + } \ +}) + +static inline void +pv_save_local_glob_regs(local_gregs_t *l_gregs) +{ + pv_cpu_ops.save_local_glob_regs(l_gregs); +} +static inline void +pv_restore_local_glob_regs(local_gregs_t *l_gregs) +{ + pv_cpu_ops.restore_local_glob_regs(l_gregs); +} +static inline void +pv_get_all_user_glob_regs(struct global_regs *gregs) +{ + pv_cpu_ops.get_all_user_glob_regs(gregs); +} +static inline void +pv_restore_kernel_gregs_in_syscall(struct thread_info *ti) +{ + pv_cpu_ops.restore_kernel_gregs_in_syscall(ti); +} + +/* Save stack registers on guest kernel mode */ + +#define PV_SAVE_STACK_REGS(regs, ti, user, trap) \ + PREFIX_SAVE_STACK_REGS(PV, regs, ti, user, trap) + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +#define INIT_G_REGS() PV_INIT_G_REGS() + +static inline void +save_local_glob_regs(local_gregs_t *l_gregs) +{ + pv_save_local_glob_regs(l_gregs); +} +static inline void +restore_local_glob_regs(local_gregs_t *l_gregs) +{ + pv_restore_local_glob_regs(l_gregs); +} +static inline void +get_all_user_glob_regs(struct global_regs *gregs) +{ + pv_get_all_user_glob_regs(gregs); +} + +#define RESTORE_KERNEL_GREGS_IN_SYSCALL(thread_info) \ + pv_restore_kernel_gregs_in_syscall(thread_info) + +#define SAVE_STACK_REGS(regs, ti, user, trap) \ + PV_SAVE_STACK_REGS(regs, ti, user, trap) +#define RESTORE_HS_REGS(regs) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_RESTORE_HS_REGS(regs); \ + } else { \ + KVM_RESTORE_HS_REGS(regs); \ + } \ +}) +#define RESTORE_USER_STACK_REGS(regs, restore_hs) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_RESTORE_USER_STACK_REGS(regs, restore_hs); \ + } else { \ + KVM_RESTORE_USER_STACK_REGS(regs, restore_hs); \ + } \ +}) +#endif /* CONFIG_PARAVIRT_GUEST */ +#endif /* _E2K_PARAVIRT_REGS_STATE_H */ + diff --git a/arch/e2k/include/asm/paravirt/secondary_space.h b/arch/e2k/include/asm/paravirt/secondary_space.h new file mode 100644 index 0000000..8e91e74 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/secondary_space.h @@ -0,0 +1,35 @@ +/* + * Secondary space support for E2K binary compiler + * Paravirtualized host and guest kernel support + */ +#ifndef _ASM_PARAVIRT_SECONDARY_SPACE_H +#define _ASM_PARAVIRT_SECONDARY_SPACE_H + +/* do not include the header directly, use asm/secondary_space.h include */ +#include + +#ifdef CONFIG_KVM_GUEST +#include + +#define PV_IS_NEXT_ELBRUS_2S \ + ((!paravirt_enabled()) ? \ + NATIVE_IS_NEXT_ELBRUS_2S : KVM_IS_NEXT_ELBRUS_2S) +#define PV_SS_SIZE \ + ((!paravirt_enabled()) ? \ + NATIVE_SS_SIZE : KVM_SS_SIZE) +#define PV_SS_ADDR_START \ + ((!paravirt_enabled()) ? \ + NATIVE_SS_ADDR_START : KVM_SS_ADDR_START) +#else /* ! CONFIG_KVM_GUEST */ + #error "Unknown virtualization type */ +#endif /* CONFIG_KVM_GUEST */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +#define IS_NEXT_ELBRUS_2S PV_IS_NEXT_ELBRUS_2S +#define SS_SIZE PV_SS_SIZE +#define SS_ADDR_START PV_SS_ADDR_START +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_PARAVIRT_SECONDARY_SPACE_H */ diff --git a/arch/e2k/include/asm/paravirt/setup.h b/arch/e2k/include/asm/paravirt/setup.h new file mode 100644 index 0000000..a8f72b6 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/setup.h @@ -0,0 +1,21 @@ +#ifndef _ASM_PARAVIRT_MACHDEP_H_ +#define _ASM_PARAVIRT_MACHDEP_H_ + +#include +#include + +static inline void +pv_arch_setup_machine(void) +{ + pv_cpu_ops.arch_setup_machine(); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +static inline void arch_setup_machine(void) +{ + pv_arch_setup_machine(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_PARAVIRT_MACHDEP_H_ */ diff --git a/arch/e2k/include/asm/paravirt/sge.h b/arch/e2k/include/asm/paravirt/sge.h new file mode 100644 index 0000000..1ad3f2e --- /dev/null +++ b/arch/e2k/include/asm/paravirt/sge.h @@ -0,0 +1,46 @@ +#ifndef _E2K_ASM_PARAVIRT_SGE_H +#define _E2K_ASM_PARAVIRT_SGE_H + +#ifdef __KERNEL__ + +#include +#include + +static inline void +pv_switch_to_expanded_proc_stack(long delta_size, long delta_offset, + bool decr_k_ps) +{ + if (pv_cpu_ops.switch_to_expanded_proc_stack) + pv_cpu_ops.switch_to_expanded_proc_stack(delta_size, + delta_offset, decr_k_ps); +} +static inline void +pv_switch_to_expanded_chain_stack(long delta_size, long delta_offset, + bool decr_k_pcs) +{ + if (pv_cpu_ops.switch_to_expanded_chain_stack) + pv_cpu_ops.switch_to_expanded_chain_stack(delta_size, + delta_offset, decr_k_pcs); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* It is paravirtualized host and guest kernel */ + +static inline void +switch_to_expanded_proc_stack(long delta_size, long delta_offset, + bool decr_k_ps) +{ + pv_switch_to_expanded_proc_stack(delta_size, delta_offset, + decr_k_ps); +} +static inline void +switch_to_expanded_chain_stack(long delta_size, long delta_offset, + bool decr_k_pcs) +{ + pv_switch_to_expanded_chain_stack(delta_size, delta_offset, + decr_k_pcs); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ +#endif /* _E2K_ASM_PARAVIRT_SGE_H */ diff --git a/arch/e2k/include/asm/paravirt/smp.h b/arch/e2k/include/asm/paravirt/smp.h new file mode 100644 index 0000000..11ecedb --- /dev/null +++ b/arch/e2k/include/asm/paravirt/smp.h @@ -0,0 +1,121 @@ +#ifndef __ASM_PARAVIRT_SMP_H +#define __ASM_PARAVIRT_SMP_H + +#include +#include + +#ifdef CONFIG_SMP +static inline void +pv_wait_for_cpu_booting(void) +{ + pv_cpu_ops.wait_for_cpu_booting(); +} +static inline void +pv_wait_for_cpu_wake_up(void) +{ + pv_cpu_ops.wait_for_cpu_wake_up(); +} +static inline int +pv_activate_cpu(int cpu_id) +{ + return pv_cpu_ops.activate_cpu(cpu_id); +} +static inline int +pv_activate_all_cpus(void) +{ + return pv_cpu_ops.activate_all_cpus(); +} + +static inline void +pv_csd_lock_wait(call_single_data_t *data) +{ + pv_cpu_ops.csd_lock_wait(data); +} +static inline void +pv_csd_lock(call_single_data_t *data) +{ + pv_cpu_ops.csd_lock(data); +} +static inline void +pv_arch_csd_lock_async(call_single_data_t *data) +{ + pv_cpu_ops.arch_csd_lock_async(data); +} +static inline void +pv_csd_unlock(call_single_data_t *data) +{ + pv_cpu_ops.csd_unlock(data); +} + +static inline void +pv_setup_local_pic_virq(unsigned int cpuid) +{ + if (pv_cpu_ops.setup_local_pic_virq != NULL) + pv_cpu_ops.setup_local_pic_virq(cpuid); +} +static inline void +pv_startup_local_pic_virq(unsigned int cpuid) +{ + if (pv_cpu_ops.startup_local_pic_virq != NULL) + pv_cpu_ops.startup_local_pic_virq(cpuid); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +static inline void +wait_for_cpu_booting(void) +{ + pv_wait_for_cpu_booting(); +} +static inline void +wait_for_cpu_wake_up(void) +{ + pv_wait_for_cpu_wake_up(); +} +static inline int +activate_cpu(int cpu_id) +{ + return pv_activate_cpu(cpu_id); +} +static inline int +activate_all_cpus(void) +{ + return pv_activate_all_cpus(); +} + +static inline void +csd_lock_wait(call_single_data_t *data) +{ + pv_csd_lock_wait(data); +} +static inline void +csd_lock(call_single_data_t *data) +{ + pv_csd_lock(data); +} +static inline void +arch_csd_lock_async(call_single_data_t *data) +{ + pv_arch_csd_lock_async(data); +} +static inline void +csd_unlock(call_single_data_t *data) +{ + pv_csd_unlock(data); +} + +static inline void +setup_local_pic_virq(unsigned int cpuid) +{ + pv_setup_local_pic_virq(cpuid); +} + +static inline void +startup_local_pic_virq(unsigned int cpuid) +{ + pv_startup_local_pic_virq(cpuid); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* CONFIG_SMP */ +#endif /* __ASM_PARAVIRT_SMP_H */ diff --git a/arch/e2k/include/asm/paravirt/spinlock.h b/arch/e2k/include/asm/paravirt/spinlock.h new file mode 100644 index 0000000..777ea8e --- /dev/null +++ b/arch/e2k/include/asm/paravirt/spinlock.h @@ -0,0 +1,151 @@ +#ifndef __ASM_PARAVIRT_SPINLOCK_H +#define __ASM_PARAVIRT_SPINLOCK_H +/* + * This file implements the arch-dependent parts of kvm guest + * spin_lock()/spin_unlock() fast and slow part + * + * Copyright 2014 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#include +#include + +static inline void +pv_arch_spin_lock_slow(arch_spinlock_t *lock) +{ + pv_cpu_ops.arch_spin_lock_slow(lock); +} +static inline void +pv_arch_spin_locked_slow(arch_spinlock_t *lock) +{ + pv_cpu_ops.arch_spin_locked_slow(lock); +} +static inline void +pv_arch_spin_unlock_slow(arch_spinlock_t *lock) +{ + pv_cpu_ops.arch_spin_unlock_slow(lock); +} + +static inline void +pv_ord_wait_read_lock_slow(arch_rwlock_t *rw) +{ + if (pv_cpu_ops.ord_wait_read_lock_slow) + pv_cpu_ops.ord_wait_read_lock_slow(rw); +} +static inline void +pv_ord_wait_write_lock_slow(arch_rwlock_t *rw) +{ + if (pv_cpu_ops.ord_wait_write_lock_slow) + pv_cpu_ops.ord_wait_write_lock_slow(rw); +} +static inline void +pv_ord_arch_read_locked_slow(arch_rwlock_t *rw) +{ + if (pv_cpu_ops.ord_arch_read_locked_slow) + pv_cpu_ops.ord_arch_read_locked_slow(rw); +} +static inline void +pv_ord_arch_write_locked_slow(arch_rwlock_t *rw) +{ + if (pv_cpu_ops.ord_arch_write_locked_slow) + pv_cpu_ops.ord_arch_write_locked_slow(rw); +} +static inline void +pv_ord_arch_read_unlock_slow(arch_rwlock_t *rw) +{ + if (pv_cpu_ops.ord_arch_read_unlock_slow) + pv_cpu_ops.ord_arch_read_unlock_slow(rw); +} +static inline void +pv_ord_arch_write_unlock_slow(arch_rwlock_t *rw) +{ + if (pv_cpu_ops.ord_arch_write_unlock_slow) + pv_cpu_ops.ord_arch_write_unlock_slow(rw); +} + +static inline void +boot_pv_arch_spin_lock_slow(arch_spinlock_t *lock) +{ + BOOT_PARAVIRT_GET_CPU_FUNC(arch_spin_lock_slow)(lock); +} +static inline void +boot_pv_arch_spin_locked_slow(arch_spinlock_t *lock) +{ + BOOT_PARAVIRT_GET_CPU_FUNC(arch_spin_locked_slow)(lock); +} +static inline void +boot_pv_arch_spin_unlock_slow(arch_spinlock_t *lock) +{ + BOOT_PARAVIRT_GET_CPU_FUNC(arch_spin_unlock_slow)(lock); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +#define arch_spin_relax(lock) pv_cpu_relax() +#define arch_read_relax(lock) pv_cpu_relax() +#define arch_write_relax(lock) pv_cpu_relax() + +static inline void +arch_spin_lock_slow(arch_spinlock_t *lock) +{ + pv_arch_spin_lock_slow(lock); +} +static inline void +arch_spin_locked_slow(arch_spinlock_t *lock) +{ + pv_arch_spin_locked_slow(lock); +} +static inline void +arch_spin_unlock_slow(arch_spinlock_t *lock) +{ + pv_arch_spin_unlock_slow(lock); +} +/* boot-time functions */ +static inline void boot_arch_spin_lock_slow(arch_spinlock_t *lock) +{ + boot_pv_arch_spin_lock_slow(lock); +} +static inline void boot_arch_spin_locked_slow(arch_spinlock_t *lock) +{ + boot_pv_arch_spin_locked_slow(lock); +} +static inline void boot_arch_spin_unlock_slow(arch_spinlock_t *lock) +{ + boot_pv_arch_spin_unlock_slow(lock); +} + +static inline void +ord_wait_read_lock_slow(arch_rwlock_t *rw) +{ + pv_ord_wait_read_lock_slow(rw); +} +static inline void +ord_wait_write_lock_slow(arch_rwlock_t *rw) +{ + pv_ord_wait_write_lock_slow(rw); +} +static inline void +ord_arch_read_locked_slow(arch_rwlock_t *rw) +{ + pv_ord_arch_read_locked_slow(rw); +} +static inline void +ord_arch_write_locked_slow(arch_rwlock_t *rw) +{ + pv_ord_arch_write_locked_slow(rw); +} +static inline void +ord_arch_read_unlock_slow(arch_rwlock_t *rw) +{ + pv_ord_arch_read_unlock_slow(rw); +} +static inline void +ord_arch_write_unlock_slow(arch_rwlock_t *rw) +{ + pv_ord_arch_write_unlock_slow(rw); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASM_PARAVIRT_SPINLOCK_H */ diff --git a/arch/e2k/include/asm/paravirt/string.h b/arch/e2k/include/asm/paravirt/string.h new file mode 100644 index 0000000..6eafc53 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/string.h @@ -0,0 +1,88 @@ +#ifndef _ASM_E2K_PARAVIRT_STRING_H +#define _ASM_E2K_PARAVIRT_STRING_H + +#include +#include + +#ifndef CONFIG_BOOT_E2K +static inline unsigned long +pv_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return pv_cpu_ops.fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +boot_pv_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return BOOT_PARAVIRT_GET_CPU_FUNC(fast_tagged_memory_copy)(dst, src, + len, strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +pv_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return pv_cpu_ops.fast_tagged_memory_set(addr, val, + tag, len, strd_opcode); +} +static inline void +boot_pv_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + BOOT_PARAVIRT_GET_CPU_FUNC(fast_tagged_memory_set)(addr, val, tag, len, + strd_opcode); +} +#endif /* ! CONFIG_BOOT_E2K */ + +static inline unsigned long +pv_extract_tags_32(u16 *dst, const void *src) +{ + return pv_cpu_ops.extract_tags_32(dst, src); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is native guest kernel (not paravirtualized based on pv_ops) */ + +#ifndef CONFIG_BOOT_E2K +static inline unsigned long +fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return pv_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +boot_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return boot_pv_fast_tagged_memory_copy(dst, src, + len, strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return pv_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} +static inline void +boot_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + boot_pv_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} +#endif /* CONFIG_BOOT_E2K */ + +static inline unsigned long +extract_tags_32(u16 *dst, const void *src) +{ + return pv_extract_tags_32(dst, src); +} +#endif /* ! CONFIG_PARAVIRT_GUEST */ + +#endif /* _ASM_E2K_PARAVIRT_STRING_H */ + diff --git a/arch/e2k/include/asm/paravirt/switch.h b/arch/e2k/include/asm/paravirt/switch.h new file mode 100644 index 0000000..d64fba8 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/switch.h @@ -0,0 +1,54 @@ +#ifndef _E2K_PARAVIRT_SWITCH_H +#define _E2K_PARAVIRT_SWITCH_H + +static inline void pv_guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, int switch_usd, int hypercall) +{ + if (!paravirt_enabled()) + host_guest_enter(ti, vcpu, switch_usd, hypercall); +} + +static inline void pv_guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, int switch_usd, int hypercall) +{ + if (!paravirt_enabled()) + host_guest_exit(ti, vcpu, switch_usd, hypercall); +} + +static inline void +pv_trap_guest_enter(struct thread_info *ti, struct pt_regs *regs) +{ + host_trap_guest_enter(ti, regs); +} +static inline void +pv_trap_guest_exit(struct thread_info *ti, struct pt_regs *regs) +{ + host_trap_guest_exit(ti, regs); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest */ + +static inline void __guest_enter(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, int switch_usd, int hypercall) +{ + pv_guest_enter(ti, vcpu, switch_usd, hypercall); +} + +static inline void __guest_exit(struct thread_info *ti, + struct kvm_vcpu_arch *vcpu, int switch_usd, int hypercall) +{ + pv_guest_exit(ti, vcpu, switch_usd, hypercall); +} +static inline void +trap_guest_enter(struct thread_info *ti, struct pt_regs *regs) +{ + pv_trap_guest_enter(ti, regs); +} +static inline void +trap_guest_exit(struct thread_info *ti, struct pt_regs *regs) +{ + pv_trap_guest_exit(ti, regs); +} +#endif /* CONFIG_PARAVIRT_GUEST */ +#endif /* ! _E2K_PARAVIRT_SWITCH_H */ diff --git a/arch/e2k/include/asm/paravirt/switch_to.h b/arch/e2k/include/asm/paravirt/switch_to.h new file mode 100644 index 0000000..36f1099 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/switch_to.h @@ -0,0 +1,28 @@ +#ifndef _ASM_PARAVIRT_SWITCH_TO_H +#define _ASM_PARAVIRT_SWITCH_TO_H + +#ifdef __KERNEL__ + +#include +#include + +/* switch_to() should be only macros to update pointer 'prev' at */ +/* __schedule() function. It is important for guest kernel */ +#define pv_switch_to(prev, next, last) \ +do { \ + if (!paravirt_enabled()) \ + native_switch_to(prev, next, last); \ + else \ + kvm_switch_to(prev, next, last); \ +} while (false) + +#ifdef CONFIG_PARAVIRT_GUEST +/* It is paravirtualized host and guest kernel */ + +#define switch_to(prev, next, last) pv_switch_to(prev, next, last) + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_PARAVIRT_SWITCH_TO_H */ diff --git a/arch/e2k/include/asm/paravirt/system.h b/arch/e2k/include/asm/paravirt/system.h new file mode 100644 index 0000000..b89004d --- /dev/null +++ b/arch/e2k/include/asm/paravirt/system.h @@ -0,0 +1,69 @@ +/* + * Paravirtualized PV_OPs support + * + * Copyright (C) 2016 MCST + */ + +#ifndef _E2K_PARAVIRT_SYSTEM_H_ +#define _E2K_PARAVIRT_SYSTEM_H_ + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +#define PV_SWITCH_IRQ_TO_UPSR(disable_sge) \ +({ \ + if (!paravirt_enabled()) { \ + NATIVE_SWITCH_IRQ_TO_UPSR(disable_sge); \ + } else { \ + KVM_SWITCH_IRQ_TO_UPSR(disable_sge); \ + } \ +}) +#define BOOT_PV_SWITCH_IRQ_TO_UPSR(disable_sge) \ +({ \ + if (!boot_paravirt_enabled()) { \ + BOOT_NATIVE_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_ENABLED)); \ + } else { \ + BOOT_KVM_WRITE_PSR_REG_VALUE(AW(E2K_KERNEL_PSR_ENABLED)); \ + } \ +}) +#define PV_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + PREFIX_INIT_KERNEL_UPSR_REG(PV, irq_en, nmirq_dis) +#define BOOT_PV_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + BOOT_PREFIX_INIT_KERNEL_UPSR_REG(PV, irq_en, nmirq_dis) +#define PV_SET_KERNEL_UPSR_WITH_DISABLED_NMI(disable_sge) \ + PREFIX_SET_KERNEL_UPSR_WITH_DISABLED_NMI(PV, disable_sge) +#define BOOT_PV_SET_KERNEL_UPSR() \ + BOOT_PREFIX_SET_KERNEL_UPSR(PV) + +static inline void * +pv_nested_kernel_return_address(int n) +{ + return pv_cpu_ops.nested_kernel_return_address(n); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ + +#define INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + PV_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) +#define BOOT_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + BOOT_PV_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) +#define SET_KERNEL_UPSR_WITH_DISABLED_NMI(disable_sge) \ + PV_SET_KERNEL_UPSR_WITH_DISABLED_NMI(disable_sge) +#define BOOT_SET_KERNEL_UPSR() BOOT_PV_SET_KERNEL_UPSR() + +static inline void * +nested_kernel_return_address(int n) +{ + return pv_nested_kernel_return_address(n); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* ! __ASSEMBLY__ */ +#endif /* _E2K_PARAVIRT_SYSTEM_H_ */ + + diff --git a/arch/e2k/include/asm/paravirt/time.h b/arch/e2k/include/asm/paravirt/time.h new file mode 100644 index 0000000..3772546 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/time.h @@ -0,0 +1,37 @@ +#ifndef __ASM_E2K_PARAVIRT_TIME_H +#define __ASM_E2K_PARAVIRT_TIME_H + +#ifdef __KERNEL__ +#include +#include + +extern struct static_key paravirt_steal_enabled; + +static inline void +pv_arch_clock_init(void) +{ + return pv_time_ops.clock_init(); +} +static inline unsigned long +pv_steal_clock(int cpu) +{ + return pv_time_ops.steal_clock(cpu); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized guest and host kernel */ +static inline void +arch_clock_init(void) +{ + pv_arch_clock_init(); +} +static inline u64 +paravirt_steal_clock(int cpu) +{ + return pv_steal_clock(cpu); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_E2K_PARAVIRT_TIME_H */ diff --git a/arch/e2k/include/asm/paravirt/timex.h b/arch/e2k/include/asm/paravirt/timex.h new file mode 100644 index 0000000..db375c6 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/timex.h @@ -0,0 +1,36 @@ +#ifndef __ASM_E2K_PARAVIRT_TIMEX_H +#define __ASM_E2K_PARAVIRT_TIMEX_H + +#ifdef __KERNEL__ +#include +#include + +static inline void +pv_time_init(void) +{ + pv_time_ops.time_init(); +} + +static inline int +pv_read_current_timer(unsigned long *timer_val) +{ + return pv_time_ops.read_current_timer(timer_val); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized guest and host kernel */ +static inline void +time_init(void) +{ + pv_time_init(); +} +static inline int +read_current_timer(unsigned long *timer_val) +{ + return pv_read_current_timer(timer_val); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_E2K_PARAVIRT_TIMEX_H */ diff --git a/arch/e2k/include/asm/paravirt/tlbflush.h b/arch/e2k/include/asm/paravirt/tlbflush.h new file mode 100644 index 0000000..daf9c9d --- /dev/null +++ b/arch/e2k/include/asm/paravirt/tlbflush.h @@ -0,0 +1,85 @@ +/* + * Host and guest MMU caches flushing on paravirtualized kernel + * + * Copyright 2016 Salavat S. Gilyazov (atic@mcst.ru) + */ +#ifndef _E2K_PARAVIRT_TLBFLUSH_H +#define _E2K_PARAVIRT_TLBFLUSH_H + +#include +#include + +#ifdef CONFIG_SMP +static inline void +pv_smp_flush_tlb_all(void) +{ + pv_cpu_ops.smp_flush_tlb_all(); +} +static inline void +pv_smp_flush_tlb_mm(struct mm_struct *mm) +{ + pv_cpu_ops.smp_flush_tlb_mm(mm); +} +static inline void +pv_smp_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + pv_cpu_ops.smp_flush_tlb_page(vma, addr); +} +static inline void +pv_smp_flush_tlb_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + pv_cpu_ops.smp_flush_tlb_range(mm, start, end); +} +static inline void +pv_smp_flush_pmd_tlb_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + pv_cpu_ops.smp_flush_pmd_tlb_range(mm, start, end); +} +static inline void +pv_smp_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + pv_cpu_ops.smp_flush_tlb_range_and_pgtables(mm, start, end); +} +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#ifdef CONFIG_SMP +static inline void +smp_flush_tlb_all(void) +{ + pv_smp_flush_tlb_all(); +} +static inline void +smp_flush_tlb_mm(struct mm_struct *mm) +{ + pv_smp_flush_tlb_mm(mm); +} +static inline void +smp_flush_tlb_page(struct vm_area_struct *vma, e2k_addr_t addr) +{ + pv_smp_flush_tlb_page(vma, addr); +} +static inline void +smp_flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end) +{ + pv_smp_flush_tlb_range(mm, start, end); +} +static inline void +smp_flush_pmd_tlb_range(struct mm_struct *mm, e2k_addr_t start, e2k_addr_t end) +{ + pv_smp_flush_pmd_tlb_range(mm, start, end); +} +static inline void +smp_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end) +{ + pv_smp_flush_tlb_range_and_pgtables(mm, start, end); +} +#endif /* CONFIG_SMP */ +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_PARAVIRT_TLBFLUSH_H */ diff --git a/arch/e2k/include/asm/paravirt/trap_table.h b/arch/e2k/include/asm/paravirt/trap_table.h new file mode 100644 index 0000000..a8a9be7 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/trap_table.h @@ -0,0 +1,122 @@ +#ifndef __E2K_PARAVIRT_TRAP_TABLE_H +#define __E2K_PARAVIRT_TRAP_TABLE_H + +#include +#include +#include + +#define pv_ttable_entry1 (pv_cpu_ops.trap_table_entry1) +#define pv_ttable_entry3 (pv_cpu_ops.trap_table_entry3) +#define pv_ttable_entry4 (pv_cpu_ops.trap_table_entry4) + +static inline void +pv_handle_deferred_traps_in_syscall(struct pt_regs *regs, + bool use_pt_regs, bool new_hs) +{ + if (pv_cpu_ops.handle_deferred_traps_in_syscall) + pv_cpu_ops.handle_deferred_traps_in_syscall(regs, + use_pt_regs, new_hs); +} + +static inline void +pv_exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi, + e2k_usd_lo_t usd_lo, e2k_upsr_t upsr) +{ + if (!paravirt_enabled()) + native_exit_handle_syscall(sbr, usd_hi, usd_lo, upsr); + else + kvm_exit_handle_syscall(sbr, usd_hi, usd_lo, upsr); +} + +static inline void pv_stack_bounds_trap_enable(void) +{ + if (pv_cpu_ops.stack_bounds_trap_enable) + pv_cpu_ops.stack_bounds_trap_enable(); +} +static inline bool +pv_is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return pv_cpu_ops.is_proc_stack_bounds(ti, regs); +} +static inline bool +pv_is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return pv_cpu_ops.is_chain_stack_bounds(ti, regs); +} + +#define PV_RETURN_TO_USER_PSP_PCSP(thread_info) \ + PREFIX_RETURN_TO_USER_PSP_PCSP(PV, thread_info) + +static inline void +pv_correct_trap_psp_pcsp(struct pt_regs *regs, struct thread_info *thread_info) +{ + pv_cpu_ops.correct_trap_psp_pcsp(regs, thread_info); +} +static inline void +pv_correct_scall_psp_pcsp(struct pt_regs *regs, struct thread_info *thread_info) +{ + pv_cpu_ops.correct_scall_psp_pcsp(regs, thread_info); +} +static inline void +pv_correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip) +{ + pv_cpu_ops.correct_trap_return_ip(regs, return_ip); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +/* It is paravirtualized host/guest kernel */ +#define RETURN_TO_USER_PSP_PCSP(thread_info) \ + PV_RETURN_TO_USER_PSP_PCSP(thread_info) + +#define ttable_entry1 pv_ttable_entry1 +#define ttable_entry3 pv_ttable_entry3 +#define ttable_entry4 pv_ttable_entry4 + +static inline void +exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi, + e2k_usd_lo_t usd_lo, e2k_upsr_t upsr) +{ + pv_exit_handle_syscall(sbr, usd_hi, usd_lo, upsr); +} + +static inline void +handle_deferred_traps_in_syscall(struct pt_regs *regs, + bool use_pt_regs, bool new_hs) +{ + pv_handle_deferred_traps_in_syscall(regs, use_pt_regs, new_hs); +} + +static inline bool +is_proc_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return pv_is_proc_stack_bounds(ti, regs); +} +static inline bool +is_chain_stack_bounds(struct thread_info *ti, struct pt_regs *regs) +{ + return pv_is_chain_stack_bounds(ti, regs); +} +static inline void +stack_bounds_trap_enable(void) +{ + pv_stack_bounds_trap_enable(); +} +static inline void +correct_trap_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + pv_correct_trap_psp_pcsp(regs, thread_info); +} +static inline void +correct_scall_psp_pcsp(struct pt_regs *regs, thread_info_t *thread_info) +{ + pv_correct_scall_psp_pcsp(regs, thread_info); +} +static inline void +correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip) +{ + pv_correct_trap_return_ip(regs, return_ip); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __E2K_PARAVIRT_TRAP_TABLE_H */ diff --git a/arch/e2k/include/asm/paravirt/traps.h b/arch/e2k/include/asm/paravirt/traps.h new file mode 100644 index 0000000..ba37177 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/traps.h @@ -0,0 +1,87 @@ +/* + * + * Copyright (C) 2016 MCST + * + * Defenition of paravirtualized kernel traps handling routines. + */ + +#ifndef _E2K_ASM_PARAVIRT_TRAPS_H +#define _E2K_ASM_PARAVIRT_TRAPS_H + +#include +#include + +#define pv_TIR0_clear_false_exceptions(__TIR_hi, __nr_TIRs) \ +({ \ + u64 TIR; \ + \ + if (!paravirt_enabled()) { \ + TIR = native_TIR0_clear_false_exceptions(__TIR_hi, __nr_TIRs); \ + } else { \ + TIR = kvm_TIR0_clear_false_exceptions(__TIR_hi, __nr_TIRs); \ + } \ + TIR; \ +}) + +static inline void +pv_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + pv_cpu_ops.instr_page_fault(regs, ftype, async_instr); +} + +static inline unsigned long +pv_mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return pv_cpu_ops.mmio_page_fault(regs, (struct trap_cellar *)tcellar); +} +static inline int +pv_do_hw_stack_bounds(struct pt_regs *regs, bool proc_bounds, bool chain_bounds) +{ + return pv_cpu_ops.do_hw_stack_bounds(regs, proc_bounds, chain_bounds); +} +static inline irqreturn_t +pv_handle_interrupt(struct pt_regs *regs) +{ + return pv_cpu_ops.handle_interrupt(regs); +} +static inline void +pv_init_guest_system_handlers_table(void) +{ + pv_cpu_ops.init_guest_system_handlers_table(); +} + +#ifdef CONFIG_PARAVIRT_GUEST + +#define TIR0_clear_false_exceptions(__TIR_hi, __nr_TIRs) \ + pv_TIR0_clear_false_exceptions(__TIR_hi, __nr_TIRs) + +static inline void +instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr) +{ + pv_instr_page_fault(regs, ftype, async_instr); +} +static inline int +do_hw_stack_bounds(struct pt_regs *regs, bool proc_bounds, bool chain_bounds) +{ + return pv_do_hw_stack_bounds(regs, proc_bounds, chain_bounds); +} +static inline void +handle_interrupt(struct pt_regs *regs) +{ + pv_handle_interrupt(regs); +} +static inline unsigned long +mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return pv_mmio_page_fault(regs, tcellar); +} +static inline void +init_guest_system_handlers_table(void) +{ + pv_init_guest_system_handlers_table(); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_ASM_PARAVIRT_TRAPS_H */ diff --git a/arch/e2k/include/asm/paravirt/v2p.h b/arch/e2k/include/asm/paravirt/v2p.h new file mode 100644 index 0000000..9d63662 --- /dev/null +++ b/arch/e2k/include/asm/paravirt/v2p.h @@ -0,0 +1,129 @@ +/* + * + * Heading of boot-time initialization. + * + * Copyright (C) 2011 Salavat Guiliazov + */ + +#ifndef _E2K_ASM_PARAVIRT_V2P_H +#define _E2K_ASM_PARAVIRT_V2P_H + +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * boot-time virtual to physical conversions hooks. + */ + +typedef struct pv_v2p_ops { + void *(*boot_kernel_va_to_pa)(void *virt_pnt, + unsigned long kernel_base); + void *(*boot_func_to_pa)(void *virt_pnt); + e2k_addr_t (*boot_vpa_to_pa)(e2k_addr_t vpa); + e2k_addr_t (*boot_pa_to_vpa)(e2k_addr_t pa); + e2k_addr_t (*vpa_to_pa)(e2k_addr_t vpa); + e2k_addr_t (*pa_to_vpa)(e2k_addr_t pa); +} pv_v2p_ops_t; + +extern pv_v2p_ops_t pv_v2p_ops; +extern pv_v2p_ops_t *cur_pv_v2p_ops; /* pointer to boot-time tables of OPs */ + +#define BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(boot_ops, func_name) \ +({ \ + typeof(boot_ops) ops = boot_native_get_vo_value(boot_ops); \ + typeof(boot_ops->func_name) func; \ + func = boot_native_vp_to_pp(ops)->func_name; \ + func; \ +}) + +#define BOOT_PARAVIRT_GET_V2P_FUNC(func_name) \ + BOOT_PARAVIRT_GET_BOOT_OPS_FUNC(cur_pv_v2p_ops, func_name) + +static inline void * +boot_pv_kernel_va_to_pa(void *virt_pnt, unsigned long kernel_base) +{ + return BOOT_PARAVIRT_GET_V2P_FUNC(boot_kernel_va_to_pa) + (virt_pnt, kernel_base); +} + +static inline void * +boot_pv_func_to_pa(void *virt_pnt) +{ + return BOOT_PARAVIRT_GET_V2P_FUNC(boot_func_to_pa)(virt_pnt); +} + +static inline e2k_addr_t +boot_pv_vpa_to_pa(e2k_addr_t vpa) +{ + return BOOT_PARAVIRT_GET_V2P_FUNC(boot_vpa_to_pa)(vpa); +} +static inline e2k_addr_t +boot_pv_pa_to_vpa(e2k_addr_t pa) +{ + return BOOT_PARAVIRT_GET_V2P_FUNC(boot_pa_to_vpa)(pa); +} + +static inline void * +boot_pv_va_to_pa(void *virt_pnt) +{ + return boot_pv_kernel_va_to_pa(virt_pnt, -1); +} + +static inline e2k_addr_t +pv_vpa_to_pa(e2k_addr_t vpa) +{ + return pv_v2p_ops.vpa_to_pa(vpa); +} +static inline e2k_addr_t +pv_pa_to_vpa(e2k_addr_t pa) +{ + return pv_v2p_ops.pa_to_vpa(pa); +} + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +static inline void * +boot_kernel_va_to_pa(void *virt_pnt, unsigned long kernel_base) +{ + return boot_pv_kernel_va_to_pa(virt_pnt, kernel_base); +} +static inline void * +boot_func_to_pa(void *virt_pnt) +{ + return boot_pv_func_to_pa(virt_pnt); +} + +static inline void * +boot_va_to_pa(void *virt_pnt) +{ + return boot_pv_va_to_pa(virt_pnt); +} + +static inline e2k_addr_t +boot_vpa_to_pa(e2k_addr_t vpa) +{ + return boot_pv_vpa_to_pa(vpa); +} +static inline e2k_addr_t +boot_pa_to_vpa(e2k_addr_t pa) +{ + return boot_pv_pa_to_vpa(pa); +} + +static inline e2k_addr_t +vpa_to_pa(e2k_addr_t vpa) +{ + return pv_vpa_to_pa(vpa); +} +static inline e2k_addr_t +pa_to_vpa(e2k_addr_t pa) +{ + return pv_pa_to_vpa(pa); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASSEMBLY__ */ + +#endif /* ! _E2K_ASM_PARAVIRT_V2P_H */ diff --git a/arch/e2k/include/asm/paravirt/vga.h b/arch/e2k/include/asm/paravirt/vga.h new file mode 100644 index 0000000..e523b7e --- /dev/null +++ b/arch/e2k/include/asm/paravirt/vga.h @@ -0,0 +1,52 @@ + +#ifndef _E2K_PARAVIRT_VGA_H_ +#define _E2K_PARAVIRT_VGA_H_ + +#include +#include +#include + +/* + * VGA screen support + * VGA Video Memory emulated as part of common guest VCPUs virtual memory + */ + +static inline void pv_scr_writew(u16 val, volatile u16 *addr) +{ + pv_io_ops.scr_writew(val, addr); +} +static inline u16 pv_scr_readw(volatile const u16 *addr) +{ + return pv_io_ops.scr_readw(addr); +} +static inline void pv_vga_writeb(u8 val, volatile u8 *addr) +{ + pv_io_ops.vga_writeb(val, addr); +} +static inline u8 pv_vga_readb(volatile const u8 *addr) +{ + return pv_io_ops.vga_readb(addr); +} + +#ifdef CONFIG_PARAVIRT_GUEST +static inline void scr_writew(u16 val, volatile u16 *addr) +{ + pv_scr_writew(val, addr); +} + +static inline u16 scr_readw(volatile const u16 *addr) +{ + return pv_scr_readw(addr); +} +static inline void vga_writeb(u8 val, volatile u8 *addr) +{ + pv_vga_writeb(val, addr); +} + +static inline u8 vga_readb(volatile const u8 *addr) +{ + return pv_vga_readb(addr); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_PARAVIRT_VGA_H_ */ diff --git a/arch/e2k/include/asm/parport.h b/arch/e2k/include/asm/parport.h new file mode 100644 index 0000000..b1d8681 --- /dev/null +++ b/arch/e2k/include/asm/parport.h @@ -0,0 +1,18 @@ +/* + * parport.h: ia32-specific parport initialisation + * + * Copyright (C) 1999, 2000 Tim Waugh + * + * This file should only be included by drivers/parport/parport_pc.c. + */ + +#ifndef _ASM_I386_PARPORT_H +#define _ASM_I386_PARPORT_H 1 + +static int parport_pc_find_isa_ports (int autoirq, int autodma); +static int parport_pc_find_nonpci_ports (int autoirq, int autodma) +{ + return parport_pc_find_isa_ports (autoirq, autodma); +} + +#endif /* !(_ASM_I386_PARPORT_H) */ diff --git a/arch/e2k/include/asm/pci.h b/arch/e2k/include/asm/pci.h new file mode 100644 index 0000000..94d29a6 --- /dev/null +++ b/arch/e2k/include/asm/pci.h @@ -0,0 +1,53 @@ +#ifndef _E2K_PCI_H +#define _E2K_PCI_H + +#ifdef __KERNEL__ + +#define HAVE_PCI_LEGACY 1 +#define HAVE_MULTIROOT_BUS_PCI_DOMAINS 1 /* each IOHUB has own */ + /* config space */ + +extern unsigned long pci_mem_start; +#define PCIBIOS_MIN_IO 0x1000 +#define PCIBIOS_MIN_MEM (pci_mem_start) +#define PCIBIOS_MAX_MEM_32 0xffffffffUL + +#define PCIBIOS_MIN_CARDBUS_IO 0x4000 + +#define PCI_ARCH_CACHE_LINE_SIZE 32 +/* Dynamic DMA mapping stuff. + * i386 has everything mapped statically. + */ + +#include +#include +#include +#include +#include + +/* The PCI address space does equal the physical memory + * address space. The networking and block device layers use + * this boolean for bounce buffer decisions. + */ +#define PCI_DMA_BUS_IS_PHYS (1) + +struct pci_raw_ops { + int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val); + int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val); +}; + +extern struct pci_raw_ops *raw_pci_ops; + +#define HAVE_PCI_MMAP +#define arch_can_pci_mmap_wc() 1 + +/* generic elbrus pci stuff */ +#include + +/* generic pci stuff */ +#include +#endif /* __KERNEL__ */ + +#endif /* _E2K_PCI_H */ diff --git a/arch/e2k/include/asm/percpu.h b/arch/e2k/include/asm/percpu.h new file mode 100644 index 0000000..9e99b81 --- /dev/null +++ b/arch/e2k/include/asm/percpu.h @@ -0,0 +1,74 @@ +#ifndef _E2K_PERCPU_H_ +#define _E2K_PERCPU_H_ + +#ifndef CONFIG_SMP +# define set_my_cpu_offset(off) +#else + +# include +# include + +# define __my_cpu_offset __my_cpu_offset +register unsigned long __my_cpu_offset DO_ASM_GET_GREG_MEMONIC( + MY_CPU_OFFSET_GREG); + +# define set_my_cpu_offset(off) do {__my_cpu_offset = (off); } while (0) + +# define this_cpu_read_1(pcp) __arch_this_cpu_read((pcp), "b") +# define this_cpu_read_2(pcp) __arch_this_cpu_read((pcp), "h") +# define this_cpu_read_4(pcp) __arch_this_cpu_read((pcp), "w") +# define this_cpu_read_8(pcp) __arch_this_cpu_read((pcp), "d") + +# define this_cpu_write_1(pcp, val) __arch_this_cpu_write((pcp), (val), "b") +# define this_cpu_write_2(pcp, val) __arch_this_cpu_write((pcp), (val), "h") +# define this_cpu_write_4(pcp, val) __arch_this_cpu_write((pcp), (val), "w") +# define this_cpu_write_8(pcp, val) __arch_this_cpu_write((pcp), (val), "d") + +/* Use relaxed atomics if they are available */ +# if CONFIG_CPU_ISET >= 5 +# define this_cpu_xchg_1(pcp, nval) __arch_pcpu_atomic_xchg((nval), (pcp), "b") +# define this_cpu_xchg_2(pcp, nval) __arch_pcpu_atomic_xchg((nval), (pcp), "h") +# define this_cpu_xchg_4(pcp, nval) __arch_pcpu_atomic_xchg((nval), (pcp), "w") +# define this_cpu_xchg_8(pcp, nval) __arch_pcpu_atomic_xchg((nval), (pcp), "d") + +# define this_cpu_cmpxchg_1(pcp, oval, nval) \ + __arch_pcpu_atomic_cmpxchg((oval), (nval), (pcp), "b", 0x4) +# define this_cpu_cmpxchg_2(pcp, oval, nval) \ + __arch_pcpu_atomic_cmpxchg((oval), (nval), (pcp), "h", 0x5) +# define this_cpu_cmpxchg_4(pcp, oval, nval) \ + __arch_pcpu_atomic_cmpxchg_word((oval), (nval), (pcp)) +# define this_cpu_cmpxchg_8(pcp, oval, nval) \ + __arch_pcpu_atomic_cmpxchg_dword((oval), (nval), (pcp)) + +# define this_cpu_and_1(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "b", "ands")) +# define this_cpu_and_2(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "h", "ands")) +# define this_cpu_and_4(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "w", "ands")) +# define this_cpu_and_8(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "d", "andd")) + +# define this_cpu_or_1(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "b", "ors")) +# define this_cpu_or_2(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "h", "ors")) +# define this_cpu_or_4(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "w", "ors")) +# define this_cpu_or_8(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "d", "ord")) + +# define this_cpu_add_1(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "b", "adds")) +# define this_cpu_add_2(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "h", "adds")) +# define this_cpu_add_4(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "w", "adds")) +# define this_cpu_add_8(pcp, val) ((void) __arch_pcpu_atomic_op((val), (pcp), "d", "addd")) + +# define this_cpu_add_return_1(pcp, val) __arch_pcpu_atomic_op((val), (pcp), "b", "adds") +# define this_cpu_add_return_2(pcp, val) __arch_pcpu_atomic_op((val), (pcp), "h", "adds") +# define this_cpu_add_return_4(pcp, val) __arch_pcpu_atomic_op((val), (pcp), "w", "adds") +# define this_cpu_add_return_8(pcp, val) __arch_pcpu_atomic_op((val), (pcp), "d", "addd") +# endif + +#endif + +#include + +/* For EARLY_PER_CPU_* definitions */ +#include + +DECLARE_PER_CPU(unsigned long, cpu_loops_per_jiffy); + +#endif /* _E2K_PERCPU_H_ */ + diff --git a/arch/e2k/include/asm/perf_event.h b/arch/e2k/include/asm/perf_event.h new file mode 100644 index 0000000..8f989f7 --- /dev/null +++ b/arch/e2k/include/asm/perf_event.h @@ -0,0 +1,55 @@ +#ifndef _ASM_E2K_PERF_EVENT_H +#define _ASM_E2K_PERF_EVENT_H + +#include +#include + +static inline void set_perf_event_pending(void) {} +static inline void clear_perf_event_pending(void) {} + +#define PERF_EVENT_INDEX_OFFSET 0 + +int perf_data_overflow_handle(struct pt_regs *); +int perf_instr_overflow_handle(struct pt_regs *); +void dimtp_overflow(struct perf_event *event); + +#define perf_arch_fetch_caller_regs perf_arch_fetch_caller_regs +static __always_inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, + unsigned long ip) +{ + SAVE_STACK_REGS(regs, current_thread_info(), false, false); + WARN_ON_ONCE(instruction_pointer(regs) != ip); +} + +#define ARCH_PERFMON_EVENT_MASK 0xffff +#define ARCH_PERFMON_OS (1 << 16) +#define ARCH_PERFMON_USR (1 << 17) +#define ARCH_PERFMON_ENABLED (1 << 18) + + +DECLARE_PER_CPU(struct perf_event * [4], cpu_events); + +/* + * Bitmask for perf_monitors_used + * + * DIM0 has all counters from DIM1 and some more. So events for + * DIM1 are marked with DIM0_DIM1, and the actual used monitor + * will be determined at runtime. + */ +enum { + _DDM0 = 0, + _DDM1, + _DIM0, + _DIM1, + _DDM0_DDM1, + _DIM0_DIM1, + MAX_HW_MONITORS +}; +#define DDM0 (1 << _DDM0) +#define DDM1 (1 << _DDM1) +#define DIM0 (1 << _DIM0) +#define DIM1 (1 << _DIM1) +#define DDM0_DDM1 (1 << _DDM0_DDM1) +#define DIM0_DIM1 (1 << _DIM0_DIM1) + +#endif diff --git a/arch/e2k/include/asm/perf_event_uncore.h b/arch/e2k/include/asm/perf_event_uncore.h new file mode 100644 index 0000000..316a1a6 --- /dev/null +++ b/arch/e2k/include/asm/perf_event_uncore.h @@ -0,0 +1,116 @@ +#ifndef _ASM_E2K_PERF_EVENT_UNCORE_H +#define _ASM_E2K_PERF_EVENT_UNCORE_H + +#include +#include + +#define UNCORE_PMU_NAME_LEN 32 + +#define E2K_UNCORE_HAS_IPCC \ + (IS_MACHINE_E2S || IS_MACHINE_E8C) + +#define E2K_UNCORE_HAS_IOCC \ + (IS_MACHINE_E2S || IS_MACHINE_ES2 || IS_MACHINE_E1CP) + +#define E2K_UNCORE_HAS_SIC (HAS_MACHINE_L_SIC && \ + (IS_MACHINE_E2S || IS_MACHINE_E8C || IS_MACHINE_E8C2)) + +#define E2K_UNCORE_HAS_SIC_L3 (E2K_UNCORE_HAS_SIC && \ + (IS_MACHINE_E8C || IS_MACHINE_E8C2)) + +#define E2K_UNCORE_HAS_HMU \ + (IS_MACHINE_E2C3 || IS_MACHINE_E12C || IS_MACHINE_E16C) + +#define E2K_UNCORE_HAS_IOMMU \ + (IS_MACHINE_E2C3 || IS_MACHINE_E12C || IS_MACHINE_E16C) + +#define E2K_UNCORE_HAS_HC \ + (IS_MACHINE_E2C3 || IS_MACHINE_E12C || IS_MACHINE_E16C) + +#define E2K_UNCORE_HAS_MC \ + (IS_MACHINE_E2C3 || IS_MACHINE_E12C || IS_MACHINE_E16C) + +#define E2K_UNCORE_HAS_PREPIC \ + (IS_MACHINE_E2C3 || IS_MACHINE_E12C || IS_MACHINE_E16C) + +enum { + E2K_UNCORE_IOCC = 1, + E2K_UNCORE_IPCC, + E2K_UNCORE_SIC, + E2K_UNCORE_HMU, + E2K_UNCORE_IOMMU, + E2K_UNCORE_HC, + E2K_UNCORE_MC, + E2K_UNCORE_PREPIC +}; + +extern int __init register_iocc_pmus(void); +extern int __init register_ipcc_pmus(void); +extern int __init register_sic_pmus(void); +extern int __init register_hmu_pmus(void); +extern int __init register_iommu_pmus(void); +extern int __init register_hc_pmus(void); +extern int __init register_mc_pmus(void); +extern int __init register_prepic_pmus(void); + +extern int e2k_uncore_event_init(struct perf_event *event); + +extern int e2k_uncore_add(struct perf_event *event, int flags); +extern void e2k_uncore_del(struct perf_event *event, int flags); +extern void e2k_uncore_start(struct perf_event *event, int flags); +extern void e2k_uncore_stop(struct perf_event *event, int flags); +extern void e2k_uncore_read(struct perf_event *event); + +extern const struct attribute_group e2k_cpumask_attr_group; + +struct e2k_uncore_valid_events { + int first; + int last; +}; + +struct e2k_uncore { + char name[UNCORE_PMU_NAME_LEN]; + int type; + int num_counters; + int node; + int idx_at_node; + + /* + * Array of valid event numbers. + * Must be terminated with { -1, -1 } + */ + struct e2k_uncore_valid_events *valid_events; + int (*validate_event)(struct e2k_uncore *, struct hw_perf_event *); + u64 (*get_event)(struct hw_perf_event *); + int (*add_event)(struct perf_event *); + + struct e2k_uncore_reg_ops *reg_ops; + struct pmu pmu; + struct perf_event *events[0]; +}; + +/* + * We implement this functions to generalize access to + * monitor registers. (void *) arguments for flexibility. + */ +struct e2k_uncore_reg_ops { + u64 (*get_cnt)(struct e2k_uncore *uncore, struct hw_perf_event *); + void (*set_cfg)(struct e2k_uncore *uncore, + struct hw_perf_event *, bool); + void (*set_cnt)(struct e2k_uncore *uncore, struct hw_perf_event *, u64); +}; + +struct e2k_uncore_event_desc { + struct kobj_attribute attr; + const char *config; +}; + +extern ssize_t e2k_uncore_event_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf); +#define E2K_UNCORE_EVENT_DESC(_name, _config) \ +{ \ + .attr = __ATTR(_name, 0444, e2k_uncore_event_show, NULL), \ + .config = _config, \ +} + +#endif /* _ASM_E2K_PERF_EVENT_UNCORE_H */ diff --git a/arch/e2k/include/asm/pgalloc.h b/arch/e2k/include/asm/pgalloc.h new file mode 100644 index 0000000..0045da6 --- /dev/null +++ b/arch/e2k/include/asm/pgalloc.h @@ -0,0 +1,594 @@ +/* + * pgalloc.h: the functions and defines necessary to allocate + * page tables. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ +#ifndef _E2K_PGALLOC_H +#define _E2K_PGALLOC_H + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG_PT_MODE +#undef DebugPT +#define DEBUG_PT_MODE 0 /* page table */ +#define DebugPT(...) DebugPrint(DEBUG_PT_MODE ,##__VA_ARGS__) + +#undef DEBUG_PA_MODE +#undef DebugPA +#define DEBUG_PA_MODE 0 /* page table allocation */ +#define DebugPA(...) DebugPrint(DEBUG_PA_MODE ,##__VA_ARGS__) + +extern struct cpuinfo_e2k cpu_data[NR_CPUS]; + +extern void __init *node_early_get_zeroed_page(int nid); + +extern int mem_init_done; + +#define check_pgt_cache() do { } while (0) + +static inline void pgd_ctor(pgd_t *pgd) +{ + int root_pt_index; + pgd_t *init_pgd; + + init_pgd = node_pgd_offset_kernel(numa_node_id(), 0UL); + + if (MMU_IS_SEPARATE_PT()) { + root_pt_index = pgd_index(USER_VPTB_BASE_ADDR); + } else { + /* + * Check for whether we use mm->pgd to store kernel's pgd. + * If (COPY_USER_PGD_TO_KERNEL_ROOT_PT && THERE_IS_DUP_KERNEL), + * then kernel's pgd are kept in per-cpu pagetables. + */ + if (!IS_ENABLED(CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT) || + !THERE_IS_DUP_KERNEL) + copy_kernel_pgd_range(pgd, init_pgd); + root_pt_index = pgd_index(MMU_UNITED_USER_VPTB); + } + /* One PGD entry is the VPTB self-map. */ + vmlpt_pgd_set(&pgd[root_pt_index], pgd); +} + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *pgd; + gfp_t gfp = GFP_KERNEL_ACCOUNT; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + + pgd = (pgd_t *) get_zeroed_page(gfp); + if (!pgd) + return NULL; + + pgd_ctor(pgd); + + return pgd; +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + BUILD_BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); + free_page((unsigned long) pgd); +} + +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + gfp_t gfp = GFP_KERNEL_ACCOUNT; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + + return (pud_t *) get_zeroed_page(gfp); +} + +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + BUILD_BUG_ON(PTRS_PER_PUD * sizeof(pud_t) != PAGE_SIZE); + free_page((unsigned long) pud); +} + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + struct page *page; + gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + page = alloc_page(gfp); + if (unlikely(!page)) + return NULL; + if (unlikely(!pgtable_pmd_page_ctor(page))) { + __free_page(page); + return NULL; + } + + return (pmd_t *) page_address(page); +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + struct page *page = phys_to_page(__pa(pmd)); + + BUILD_BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); + pgtable_pmd_page_dtor(page); + __free_page(page); +} + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +{ + struct page *page; + + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (unlikely(!page)) + return NULL; + + return (pte_t *) page_address(page); +} + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + __free_page(phys_to_page(__pa(pte))); +} + +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) +{ + struct page *page; + + page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (unlikely(!page)) + return NULL; + + if (unlikely(!pgtable_pte_page_ctor(page))) { + __free_page(page); + return NULL; + } + + return page; +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t pte_page) +{ + BUILD_BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); + pgtable_pte_page_dtor(pte_page); + __free_page(pte_page); +} + +#ifdef CONFIG_MAKE_ALL_PAGES_VALID +static inline void +pud_page_validate(pgd_t *pgdp, pud_t *pudp) +{ + int i; + + if (pgd_val(*pgdp) != _PAGE_INIT_VALID) + return; + for (i = 0; i < PTRS_PER_PUD; i++, pudp++) { + WARN_ON(pud_val(*pudp)); + *pudp = __pud(_PAGE_INIT_VALID); + } +} +#else /* ! CONFIG_MAKE_ALL_PAGES_VALID */ +static inline void +pud_page_validate(pgd_t *pgdp, pud_t *pudp) +{ + /* nothing to do */ +} +#endif /* CONFIG_MAKE_ALL_PAGES_VALID */ + +static inline void +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +{ + BUG_ON(mm != &init_mm); + pgd_set_k(pgd, pud); + virt_kernel_pgd_populate(mm, pgd); +} + +static inline void +pgd_populate_user(pgd_t *pgd, pud_t *pud) +{ + pud_page_validate(pgd, pud); + pgd_set_u(pgd, pud); +} + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +static inline bool +pgd_populate_cpu_root_pt(struct mm_struct *mm, pgd_t *pgd) +{ + unsigned long pgd_ind; + pgd_t *cpu_pgd; + bool only_populate; + + if (MMU_IS_SEPARATE_PT()) + return false; + if (!THERE_IS_DUP_KERNEL) + return false; + if (current->active_mm != mm) + return false; + + preempt_disable(); + pgd_ind = pgd_to_index(pgd); + cpu_pgd = &cpu_kernel_root_pt[pgd_ind]; + only_populate = (pgd_none(*cpu_pgd) && !pgd_none(*pgd)); + /* + * FIXME: follow two IFs only for debug purpose to detect + * case of user PGD updating + */ + if (!pgd_none(*cpu_pgd) && + (_PAGE_CLEAR_ACCESSED(pgd_val(*pgd)) != + _PAGE_CLEAR_ACCESSED(pgd_val(*cpu_pgd)))) { + pr_err("%s(): updated CPU #%d kernel root pgd %px " + "from 0x%lx to 0x%lx\n", + __func__, raw_smp_processor_id(), + cpu_pgd, pgd_val(*cpu_pgd), pgd_val(*pgd)); + dump_stack(); + } + if (pgd_none_full(*pgd)) { + pr_err("%s(): cleared CPU #%d kernel root pgd %px " + "from 0x%lx to 0x%lx\n", + __func__, raw_smp_processor_id(), + cpu_pgd, pgd_val(*cpu_pgd), pgd_val(*pgd)); + dump_stack(); + } + if (pgd_val(*cpu_pgd) != pgd_val(*pgd)) { + *cpu_pgd = *pgd; + __flush_tlb_page(mm, (e2k_addr_t) cpu_pgd); + } + DebugPT("CPU #%d set kernel root pgd %px to 0x%lx\n", + smp_processor_id(), cpu_pgd, pgd_val(*cpu_pgd)); + preempt_enable(); + + return only_populate; +} +#else /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +#define pgd_populate_cpu_root_pt(mm, pgd) false +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +static inline void +pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +{ + unsigned long mask; + + BUG_ON(mm == NULL); + if (unlikely(mm == &init_mm)) { + pgd_populate_kernel(mm, pgd, pud); + return; + } + + /* + * PGD should be set into two root page tables (main and + * CPU's) and in atomic style, so close interrupts to preserve + * from smp call for flush_tlb_all() between two settings, + * while the CPU restore CPU's root PGD from main. In this case + * CPU's PGD will be restored as populated when we wait for not + * yet populated state (see above pgd_populate_cpu_root_pt()) + */ + raw_local_irq_save(mask); + pgd_populate_user(pgd, pud); /* order of setting is */ + pgd_populate_cpu_root_pt(mm, pgd); /* significant, if IRQs */ + virt_kernel_pgd_populate(mm, pgd); /* do not close and flush */ + /* of TLB can restore */ + /* second PGD from first */ + raw_local_irq_restore(mask); +} + +static inline void +pgd_populate_not_present(struct mm_struct *mm, e2k_addr_t addr, pgd_t *pgd) +{ + unsigned long mask; + + /* See comment in pgd_populate() */ + raw_local_irq_save(mask); + validate_pgd_at(mm, addr, pgd); + pgd_populate_cpu_root_pt(mm, pgd); + raw_local_irq_restore(mask); +} + +#else /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +static inline void +pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +{ +#ifdef CONFIG_VIRTUALIZATION + unsigned long mask; +#endif /* CONFIG_VIRTUALIZATION */ + + BUG_ON(mm == NULL); + if (unlikely(mm == &init_mm)) { + pgd_populate_kernel(mm, pgd, pud); + return; + } + +#ifdef CONFIG_VIRTUALIZATION + /* see comment above: pgd_populate() */ + /* for CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + raw_local_irq_save(mask); +#endif /* CONFIG_VIRTUALIZATION */ + + pgd_populate_user(pgd, pud); + +#ifdef CONFIG_VIRTUALIZATION + virt_kernel_pgd_populate(mm, pgd); + raw_local_irq_restore(mask); +#endif /* CONFIG_VIRTUALIZATION */ +} + +static inline void +pgd_populate_not_present(struct mm_struct *mm, e2k_addr_t addr, pgd_t *pgd) +{ + validate_pgd_at(mm, addr, pgd); +} + +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +static inline void +node_pgd_populate_kernel(int nid, struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +{ + node_pgd_set_k(nid, pgd, pud); +} + +extern pud_t *node_pud_alloc_kernel(int nid, pgd_t *pgd, e2k_addr_t address); +extern pmd_t *node_pmd_alloc_kernel(int nid, pud_t *pud, e2k_addr_t address); +extern pte_t *node_pte_alloc_kernel(int nid, pmd_t *pmd, e2k_addr_t address); + +static inline pud_t * +node_early_pud_alloc(int nid, pgd_t *pgd, unsigned long address) +{ + pud_t *pud; + + if (!pgd_none(*pgd)) { + DebugPT("pud was allocated already " + "at addr 0x%lx\n", pgd_val(*pgd)); + return pud_offset(pgd, address); + } + pud = (pud_t *) node_early_get_zeroed_page(nid); + DebugPT("allocated pud at addr 0x%px\n", pud); + node_pgd_populate_kernel(nid, (&init_mm), pgd, pud); + return pud_offset(pgd, address); +} + +static inline pud_t * +early_pud_alloc(pgd_t *pgd, unsigned long address) +{ + return node_early_pud_alloc(numa_node_id(), pgd, address); +} + +static inline void +pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + pud_set_k(pud, pmd); +} + +#ifdef CONFIG_MAKE_ALL_PAGES_VALID +static inline void +pmd_page_validate(pud_t *pudp, pmd_t *pmdp) +{ + int i; + + if (pud_val(*pudp) != _PAGE_INIT_VALID) + return; + + for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) { + WARN_ON(pmd_val(*pmdp)); + *pmdp = __pmd(_PAGE_INIT_VALID); + } +} +#else /* ! CONFIG_MAKE_ALL_PAGES_VALID */ +static inline void +pmd_page_validate(pud_t *pudp, pmd_t *pmdp) +{ + /* nothing to do */ +} +#endif /* CONFIG_MAKE_ALL_PAGES_VALID */ + +static inline void +pud_populate_user(pud_t *pud, pmd_t *pmd) +{ + pmd_page_validate(pud, pmd); + pud_set_u(pud, pmd); +} + +static inline void +pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + BUG_ON(mm == NULL); + if (unlikely(mm == &init_mm)) { + pud_set_k(pud, pmd); + return; + } + pud_populate_user(pud, pmd); +} + +static inline pmd_t * +node_early_pmd_alloc(int nid, pud_t *pud, unsigned long address) +{ + pmd_t *pmd; + + if (!pud_none(*pud)) { + DebugPT("pmd was allocated already " + "at addr 0x%lx\n", pud_val(*pud)); + return pmd_offset(pud, address); + } + pmd = (pmd_t *) node_early_get_zeroed_page(nid); + DebugPT("allocated pmd at addr 0x%px\n", pmd); + pud_populate_kernel((&init_mm), pud, pmd); + return pmd_offset(pud, address); +} + +static inline pmd_t * +early_pmd_alloc(pud_t *pud, unsigned long address) +{ + return node_early_pmd_alloc(numa_node_id(), pud, address); +} + +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) +{ + pmd_set_k(pmd, pte); +} + +#ifdef CONFIG_MAKE_ALL_PAGES_VALID +static inline void +pte_page_validate(pmd_t *pmdp, pte_t *ptep) +{ + int i; + + if (pmd_val(*pmdp) != _PAGE_INIT_VALID) + return; + + for (i = 0; i < PTRS_PER_PTE; i++, ptep++) + *ptep = pte_mkvalid(*ptep); +} +#else /* ! CONFIG_MAKE_ALL_PAGES_VALID */ +static inline void +pte_page_validate(pmd_t *pmdp, pte_t *ptep) +{ + /* nothing to do */ +} +#endif /* CONFIG_MAKE_ALL_PAGES_VALID */ + +#define pmd_pgtable(pmd) pmd_page(pmd) + +static inline void +pmd_populate_user(pmd_t *pmd, pte_t *pte) +{ + pte_page_validate(pmd, pte); + pmd_set_u(pmd, pte); +} +static inline void +pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) +{ + BUG_ON(mm == NULL); + + if (unlikely(mm == &init_mm)) { + pmd_set_k(pmd, (pte_t *)page_address(pte)); + return; + } + pmd_populate_user(pmd, page_address(pte)); +} + +static inline pte_t * +node_early_pte_alloc(int nid, pmd_t *pmd, unsigned long address) +{ + pte_t *pte = (pte_t *) node_early_get_zeroed_page(nid); + + if (!pmd_none(*pmd)) { + DebugPT("pte was allocated already " + "at addr 0x%lx\n", pmd_val(*pmd)); + return pte_offset_kernel(pmd, address); + } + pte = (pte_t *) node_early_get_zeroed_page(nid); + DebugPT("allocated pte at addr 0x%px\n", pte); + pmd_populate_kernel(&init_mm, pmd, pte); + return pte_offset_kernel(pmd, address); +} + +static inline pte_t * +early_pte_alloc(pmd_t *pmd, unsigned long address) +{ + return node_early_pte_alloc(numa_node_id(), pmd, address); +} + +#ifdef CONFIG_NUMA +extern int node_map_vm_area(int nid_from, nodemask_t nodes_to, + unsigned long address, unsigned long size); + +static inline int +all_nodes_map_vm_area(int nid_from, unsigned long address, unsigned long size) +{ + return node_map_vm_area(nid_from, node_has_dup_kernel_map, + address, size); +} + +static inline int +all_other_nodes_map_vm_area(int nid_from, unsigned long address, + unsigned long size) +{ + return node_map_vm_area(nid_from, node_has_dup_kernel_map, + address, size); +} + +extern void node_unmap_kernel_vm_area_noflush(nodemask_t nodes, + unsigned long address, unsigned long end); +extern void node_unmap_vm_area_noflush(nodemask_t nodes, + struct vm_struct *area); + +static inline void +all_nodes_unmap_kernel_vm_area_noflush(unsigned long start, unsigned long end) +{ + node_unmap_kernel_vm_area_noflush(node_has_dup_kernel_map, start, end); +} + +static inline void +all_nodes_unmap_vm_area_noflush(struct vm_struct *area) +{ + node_unmap_vm_area_noflush(node_has_dup_kernel_map, area); +} + +static inline nodemask_t +get_node_has_dup_kernel_map(int nid_to_clear) +{ + nodemask_t nodes_map = node_has_dup_kernel_map; + int dup_nid = node_dup_kernel_nid(nid_to_clear); + + if (nid_to_clear != dup_nid) { + node_clear(dup_nid, nodes_map); + } else { + node_clear(nid_to_clear, nodes_map); + } + return nodes_map; +} + +static inline void +all_other_nodes_unmap_vm_area_noflush(int the_nid, struct vm_struct *area) +{ + nodemask_t nodes_map = get_node_has_dup_kernel_map(the_nid); + + node_unmap_vm_area_noflush(nodes_map, area); +} +extern void node_unmap_kmem_area(nodemask_t nodes, + unsigned long address, unsigned long size); + +static inline void +all_nodes_unmap_kmem_area(unsigned long address, unsigned long size) +{ + node_unmap_kmem_area(node_has_dup_kernel_map, address, size); +} + +static inline void +all_other_nodes_unmap_kmem_area(int the_nid, unsigned long address, + unsigned long size) +{ + nodemask_t nodes_map = get_node_has_dup_kernel_map(the_nid); + + node_unmap_kmem_area(nodes_map, address, size); +} +#else /* ! CONFIG_NUMA */ +static inline int +all_other_nodes_map_vm_area(int nid_from, unsigned long address, + unsigned long size) +{ + return 0; +} +#endif /* CONFIG_NUMA */ + +#endif /* _E2K_PGALLOC_H */ diff --git a/arch/e2k/include/asm/pgatomic.h b/arch/e2k/include/asm/pgatomic.h new file mode 100644 index 0000000..e1bc921 --- /dev/null +++ b/arch/e2k/include/asm/pgatomic.h @@ -0,0 +1,130 @@ +/* + * E2K page table atomic update operations. + * + * Copyright 2001-2018 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PGATOMIC_H +#define _E2K_PGATOMIC_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K page tables. + * NOTE: E2K has four levels of page tables, while Linux assumes that + * there are three levels of page tables. + */ + +#include + +#include +#include +#include + +/* + * Atomic operations under page table items. + * WARNING: these values should be agreed with guest kernel because of + * are used by hypercall to support atomic modifications on guest. + * So do not change/delete the values, only can add new operations and values. + */ +typedef enum pt_atomic_op { + ATOMIC_GET_AND_XCHG, + ATOMIC_GET_AND_CLEAR, + ATOMIC_SET_WRPROTECT, + ATOMIC_MODIFY_START, + ATOMIC_TEST_AND_CLEAR_YOUNG, + ATOMIC_TEST_AND_CLEAR_RELAXED, +} pt_atomic_op_t; + +static inline pgprotval_t +native_pt_set_wrprotect_atomic(pgprotval_t *pgprot) +{ + return __api_atomic_op(_PAGE_INIT_WRITEABLE, pgprot, d, + "andnd", RELAXED_MB); +} + +static inline pgprotval_t +native_pt_get_and_clear_atomic(pgprotval_t *pgprot) +{ + return __api_atomic_fetch_op(_PAGE_INIT_VALID, pgprot, + d, "andd", RELAXED_MB); +} + +static inline pgprotval_t +native_pt_get_and_xchg_atomic(pgprotval_t newval, pgprotval_t *pgprot) +{ + return __api_xchg_return(newval, pgprot, d, RELAXED_MB); +} + +static inline pgprotval_t +native_pt_clear_relaxed_atomic(pgprotval_t mask, pgprotval_t *pgprot) +{ + return __api_atomic_fetch_op(mask, pgprot, d, "andnd", RELAXED_MB); +} + +static inline pgprotval_t +native_pt_clear_young_atomic(pgprotval_t *pgprot) +{ + return __api_atomic_fetch_op(_PAGE_INIT_ACCESSED, pgprot, + d, "andnd", RELAXED_MB); +} + +static inline pgprotval_t +native_pt_modify_prot_atomic(pgprotval_t *pgprot) +{ + return __api_atomic_fetch_op(_PAGE_INIT_VALID, pgprot, + d, "andd", RELAXED_MB); +} + +#if defined(CONFIG_KVM_GUEST_KERNEL) +/* It is native guest kernel (without paravirtualization on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* paravirtualized host and guest kernel on pv_ops */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* native kernel without virtualization support */ +/* host kernel with virtualization support */ + +static inline pgprotval_t +pt_set_wrprotect_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return native_pt_set_wrprotect_atomic(&pgprot->pgprot); +} + +static inline pgprotval_t +pt_get_and_clear_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return native_pt_get_and_clear_atomic(&pgprot->pgprot); +} + +static inline pgprotval_t +pt_get_and_xchg_atomic(struct mm_struct *mm, unsigned long addr, + pgprotval_t newval, pgprot_t *pgprot) +{ + return native_pt_get_and_xchg_atomic(newval, &pgprot->pgprot); +} + +static inline pgprotval_t +pt_clear_relaxed_atomic(pgprotval_t mask, pgprot_t *pgprot) +{ + return native_pt_clear_relaxed_atomic(mask, &pgprot->pgprot); +} + +static inline pgprotval_t +pt_clear_young_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return native_pt_clear_young_atomic(&pgprot->pgprot); +} + +static inline pgprotval_t +pt_modify_prot_atomic(struct mm_struct *mm, + unsigned long addr, pgprot_t *pgprot) +{ + return native_pt_modify_prot_atomic(&pgprot->pgprot); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif /* ! _E2K_PGATOMIC_H */ diff --git a/arch/e2k/include/asm/pgd.h b/arch/e2k/include/asm/pgd.h new file mode 100644 index 0000000..c9e6593 --- /dev/null +++ b/arch/e2k/include/asm/pgd.h @@ -0,0 +1,293 @@ +/* + * pgd.h: the functions and defines necessary to manage + * root level of page tables - pgd + * + * Copyright 2013 Salavat S. Guiliazov (atic@mcst.ru) + */ +#ifndef _E2K_PGD_H +#define _E2K_PGD_H + +#include + +#include +#include + +#undef DEBUG_PA_MODE +#undef DebugPA +#define DEBUG_PA_MODE 0 /* page table allocation */ +#define DebugPA(fmt, args...) \ +({ \ + if (DEBUG_PA_MODE) \ + pr_info(fmt, ##args); \ +}) + +/* + * The pointer of kernel root-level page table directory + * The Page table directory is allocated and created at boot-time + */ + +#ifndef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +#else /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +typedef struct pg_dir { + pgd_t pg_dir[PTRS_PER_PGD]; +} pg_dir_t; +extern pg_dir_t all_cpus_swapper_pg_dir[NR_CPUS]; +#define swapper_pg_dir (all_cpus_swapper_pg_dir[0].pg_dir) +#endif /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +#ifndef CONFIG_NUMA +#define kernel_root_pt ((pgd_t *)swapper_pg_dir) +#define boot_root_pt boot_vp_to_pp(kernel_root_pt) +#define node_pg_dir(nid) ((nid), &swapper_pg_dir) +#define cpu_pg_dir(cpu) kernel_root_pt +#define the_cpu_pg_dir cpu_pg_dir +#define cpu_kernel_root_pt cpu_pg_dir(dummy) +#else /* CONFIG_NUMA */ +#ifndef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +extern pgd_t __nodedata *all_nodes_pg_dir[MAX_NUMNODES]; +#else /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +extern pg_dir_t __nodedata *all_nodes_pg_dir[MAX_NUMNODES]; +#endif /* ! ONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +#define node_pg_dir(nid) (all_nodes_pg_dir[nid]) + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +#define my_cpu_pg_dir \ +({ \ + pgd_t *pgdp; \ + int my_cpu = hard_smp_processor_id(); \ + \ + if (!MMU_IS_SEPARATE_PT() && THERE_IS_DUP_KERNEL) { \ + pgdp = all_cpus_swapper_pg_dir[my_cpu].pg_dir; \ + } else { \ + pgdp = swapper_pg_dir; \ + } \ + pgdp; \ +}) +#define the_node_pg_dir(nid) \ +({ \ + pg_dir_t *node_pgds; \ + pg_dir_t *pg_dir; \ + \ + node_pgds = node_pg_dir(nid); \ + pg_dir = &node_pgds[0]; \ + pg_dir; \ +}) +#define the_node_kernel_root_pt(nid) \ +({ \ + pg_dir_t *pg_dir; \ + pgd_t *pgdp; \ + \ + pg_dir = the_node_pg_dir(nid); \ + pgdp = pg_dir->pg_dir; \ + pgdp; \ +}) +#define the_cpu_pg_dir(cpu) \ +({ \ + int the_cpu = (cpu); \ + int nid = cpu_to_node(the_cpu); \ + pg_dir_t *pg_dir; \ + pgd_t *pgdp; \ + \ + pg_dir = the_node_pg_dir(nid); \ + if (!MMU_IS_SEPARATE_PT() && THERE_IS_DUP_KERNEL) { \ + pgdp = pg_dir[cpu_to_cpuid(the_cpu)].pg_dir; \ + } else { \ + pgdp = pg_dir->pg_dir; \ + } \ + pgdp; \ +}) +#define boot_the_node_root_pg_dir(nid) \ +({ \ + pg_dir_t *pg_dir; \ + \ + pg_dir = boot_the_node_vp_to_pp(nid, all_cpus_swapper_pg_dir); \ + pg_dir; \ +}) +#define boot_the_node_root_pt(nid) \ +({ \ + pg_dir_t *pg_dir; \ + pgd_t *pgdp; \ + \ + pg_dir = boot_the_node_root_pg_dir(nid); \ + pgdp = pg_dir->pg_dir; \ + pgdp; \ +}) +#define boot_node_cpu_pg_dir(nid, cpuid) \ +({ \ + pg_dir_t *pg_dir; \ + pgd_t *pgdp; \ + \ + pg_dir = boot_the_node_root_pg_dir(nid); \ + if (!MMU_IS_SEPARATE_PT() && BOOT_NODE_THERE_IS_DUP_KERNEL()) { \ + pgdp = pg_dir[cpuid].pg_dir; \ + } else { \ + pgdp = pg_dir->pg_dir; \ + } \ + pgdp; \ +}) +#define boot_cpu_pg_dir(cpuid) \ + boot_node_cpu_pg_dir(boot_numa_node_id(), cpuid) +#define cpu_kernel_root_pt my_cpu_pg_dir +#define boot_cpu_kernel_root_pt boot_cpu_pg_dir(boot_smp_processor_id()) +#define boot_node_root_pt boot_the_node_root_pt(boot_numa_node_id()) +#define boot_root_pt boot_cpu_kernel_root_pt +#define kernel_root_pt cpu_kernel_root_pt +#else /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +#define boot_the_node_root_pt(nid) \ + boot_the_node_vp_to_pp(nid, swapper_pg_dir) +#define boot_node_root_pt boot_node_vp_to_pp(swapper_pg_dir) +#define boot_root_pt boot_node_root_pt +#define kernel_root_pt node_pg_dir(numa_node_id()) +#define cpu_pg_dir(cpu) kernel_root_pt +#define cpu_kernel_root_pt cpu_pg_dir(dummy) +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +#endif /* ! CONFIG_NUMA */ + +static inline void +clear_pgd_range(pgd_t *dst_pgd, int start_index, int end_index) +{ + int index; + + BUG_ON(start_index > PTRS_PER_PGD); + BUG_ON(end_index > PTRS_PER_PGD); + BUG_ON(start_index >= end_index); + BUG_ON(MMU_IS_SEPARATE_PT() && end_index > USER_PTRS_PER_PGD); + for (index = start_index; index < end_index; index++) { + DebugPA("clear_pgd_range() clear pgd #%d 0x%px = 0x%lx\n", + index, + &dst_pgd[index], pgd_val(dst_pgd[index])); + dst_pgd[index] = __pgd(0); + } +} + +static inline void +copy_pgd_range(pgd_t *dst_pgd, pgd_t *src_pgd, int start_index, int end_index) +{ + int index; + + BUG_ON(start_index > PTRS_PER_PGD); + BUG_ON(end_index > PTRS_PER_PGD); + BUG_ON(start_index >= end_index); + BUG_ON(MMU_IS_SEPARATE_PT() && end_index > USER_PTRS_PER_PGD); + for (index = start_index; index < end_index; index++) { + dst_pgd[index] = src_pgd[index]; + DebugPA("copy_pgd_range() copy pgd #%d 0x%px = 0x%lx to " + "pgd 0x%px\n", + index, + &src_pgd[index], pgd_val(src_pgd[index]), + &dst_pgd[index]); + } +} + +static inline void +copy_kernel_pgd_range(pgd_t *dst_pgd, pgd_t *src_pgd) +{ + copy_pgd_range(dst_pgd, src_pgd, USER_PTRS_PER_PGD, PTRS_PER_PGD); +} + +static inline void +set_pgd_range(pgd_t *dst_pgd, pgd_t pgd_to_set, int start_index, int end_index) +{ + int index; + + BUG_ON(start_index > PTRS_PER_PGD); + BUG_ON(end_index > PTRS_PER_PGD); + BUG_ON(start_index >= end_index); + BUG_ON(MMU_IS_SEPARATE_PT() && end_index > USER_PTRS_PER_PGD); + for (index = start_index; index < end_index; index++) { + dst_pgd[index] = pgd_to_set; + DebugPA("set_pgd_range() set pgd #%d 0x%px to 0x%lx\n", + index, + &dst_pgd[index], pgd_val(pgd_to_set)); + } +} + +static inline void +set_kernel_pgd_range(pgd_t *dst_pgd, pgd_t pgd_to_set) +{ + set_pgd_range(dst_pgd, pgd_to_set, USER_PTRS_PER_PGD, PTRS_PER_PGD); +} + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +static inline void +copy_one_user_pgd_to_kernel_pgd(pgd_t *kernel_pgd, pgd_t *user_pgd, int index) +{ + BUG_ON(index >= USER_PTRS_PER_PGD); + BUG_ON(MMU_IS_SEPARATE_PT()); + kernel_pgd[index] = user_pgd[index]; + DebugPA("copy_one_user_pgd_to_kernel_pgd() CPU #%d copy one user pgd " + "#%d 0x%px = 0x%lx to kernel root pt 0x%px\n", + raw_smp_processor_id(), index, + &user_pgd[index], pgd_val(user_pgd[index]), + &kernel_pgd[index]); +} +static inline void +copy_user_pgd_to_kernel_pgd_addr(pgd_t *kernel_pgd, pgd_t *user_pgd, + e2k_addr_t addr) +{ + copy_one_user_pgd_to_kernel_pgd(kernel_pgd, user_pgd, + pgd_index(addr)); +} +static inline void +copy_user_pgd_to_kernel_root_pt_addr(pgd_t *user_pgd, e2k_addr_t addr) +{ + copy_user_pgd_to_kernel_pgd_addr(cpu_kernel_root_pt, user_pgd, + addr); +} + +static inline void +copy_user_pgd_to_kernel_pgd_range(pgd_t *kernel_pgd, pgd_t *user_pgd, + int start_index, int end_index) +{ +#if DEBUG_PA_MODE + int index; +#endif + BUG_ON(start_index >= USER_PTRS_PER_PGD); + BUG_ON(end_index > USER_PTRS_PER_PGD); + BUG_ON(start_index >= end_index); + BUG_ON(MMU_IS_SEPARATE_PT()); +#if DEBUG_PA_MODE + for (index = start_index; index < end_index; index++) + DebugPA("copy_user_pgd_to_kernel_pgd_range() CPU #%d copy " + "user pgd #%d 0x%px = 0x%lx to kernel root pt 0x%px\n", + raw_smp_processor_id(), index, + &user_pgd[index], pgd_val(user_pgd[index]), + &kernel_pgd[index]); +#endif + memcpy(&kernel_pgd[start_index], &user_pgd[start_index], + sizeof(pgd_t) * (end_index - start_index)); +} + +static inline void +copy_user_pgd_to_kernel_pgd_addr_range(pgd_t *kernel_pgd, pgd_t *user_pgd, + e2k_addr_t start_addr, e2k_addr_t end_addr) +{ + copy_user_pgd_to_kernel_pgd_range(kernel_pgd, user_pgd, + pgd_index(start_addr), + pgd_index(_PAGE_ALIGN_DOWN(end_addr, PGDIR_SIZE))); +} +static inline void +copy_user_pgd_to_kernel_root_pt_addr_range(pgd_t *user_pgd, + e2k_addr_t start_addr, e2k_addr_t end_addr) +{ + copy_user_pgd_to_kernel_pgd_addr_range(cpu_kernel_root_pt, user_pgd, + start_addr, end_addr); +} + +static inline void +copy_user_pgd_to_kernel_pgd(pgd_t *kernel_pgd, pgd_t *user_pgd) +{ + copy_user_pgd_to_kernel_pgd_range(kernel_pgd, user_pgd, + 0, USER_PTRS_PER_PGD); +} + +static inline void +copy_user_pgd_to_kernel_root_pt(pgd_t *user_pgd) +{ + copy_user_pgd_to_kernel_pgd(cpu_kernel_root_pt, user_pgd); +} +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +#endif /* _E2K_PGD_H */ diff --git a/arch/e2k/include/asm/pgtable-v2.h b/arch/e2k/include/asm/pgtable-v2.h new file mode 100644 index 0000000..cdfc6d9 --- /dev/null +++ b/arch/e2k/include/asm/pgtable-v2.h @@ -0,0 +1,412 @@ +/* + * E2K ISET V2-V5 page table structure and common definitions. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_PGTABLE_V2_H +#define _ASM_E2K_PGTABLE_V2_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K ISET V2-V5 page tables. + * NOTE: E2K has four levels of page tables. + */ + +#include +#include + +#define E2K_MAX_PHYS_BITS_V2 40 /* max. number of physical address */ + /* bits (architected) */ + +#ifndef __ASSEMBLY__ + +/* + * PTE format + */ + +#define _PAGE_W_BIT_V2 1 /* bit # of Writable */ +#define _PAGE_CD1_BIT_V2 4 /* right bit of Cache disable */ +#define _PAGE_CD2_BIT_V2 9 /* left bit of Cache disable */ +#define _PAGE_A_HW_BIT_V2 5 /* bit # of Accessed Page */ +#define _PAGE_D_BIT_V2 6 /* bit # of Page Dirty */ +#define _PAGE_HUGE_BIT_V2 7 /* bit # of Page Size */ +#define _PAGE_AVAIL_BIT_V2 11 /* prog bit Page Available */ +#define _PAGE_PFN_SHIFT_V2 12 /* shift of PFN field */ +#define _PAGE_CU_BITS_V2 48 /* bits # of Compilation Unit */ + +#define _PAGE_P_V2 0x0000000000000001ULL /* Page Present bit */ +#define _PAGE_W_V2 0x0000000000000002ULL /* Writable (0 - only read) */ +#define _PAGE_UU2_V2 0x0000000000000004ULL /* unused bit # 2 */ +#define _PAGE_PWT_V2 0x0000000000000008ULL /* Write Through */ +#define _PAGE_CD1_V2 (1UL << _PAGE_CD1_BIT_V2) /* 0x0000000000000010 */ + /* Cache disable (right bit) */ +#define _PAGE_A_HW_V2 (1UL << _PAGE_A_HW_BIT_V2) /* Accessed Page */ +#define _PAGE_D_V2 (1UL << _PAGE_D_BIT_V2) /* Page Dirty */ +#define _PAGE_HUGE_V2 0x0000000000000080ULL /* Page Size */ +#define _PAGE_G_V2 0x0000000000000100ULL /* Global Page */ +#define _PAGE_CD2_V2 (1UL << _PAGE_CD2_BIT_V2) /* 0x0000000000000200 */ + /* Cache disable (left bit) */ +#define _PAGE_NWA_V2 0x0000000000000400ULL /* Prohibit address writing */ +/* + * The _PAGE_PROTNONE bit is set only when the _PAGE_PRESENT bit + * is cleared, so we can use almost any bits for it. Must make + * sure though that pte_modify() will work with _PAGE_PROTNONE. + */ +#define _PAGE_PROTNONE_V2 _PAGE_NWA_V2 +#define _PAGE_AVAIL_V2 (1UL << _PAGE_AVAIL_BIT_V2) +#define _PAGE_SPECIAL_V2 _PAGE_AVAIL_V2 +#define _PAGE_GFN_V2 _PAGE_AVAIL_V2 /* Page is mapped to guest */ + /* physical memory */ +#define _PAGE_PFN_V2 0x000000fffffff000ULL /* Physical Page Number */ +#define _PAGE_VALID_V2 0x0000010000000000ULL /* Valid Page */ +#define _PAGE_PV_V2 0x0000020000000000ULL /* PriVileged Page */ +#define _PAGE_INT_PR_V2 0x0000040000000000ULL /* Integer address access */ + /* Protection */ +#define _PAGE_NON_EX_V2 0x0000080000000000ULL /* Non Executable Page */ +#define _PAGE_RES_V2 0x0000f00000000000ULL /* Reserved bits */ +#define _PAGE_RES_44_V2 0x0000100000000000ULL /* SG bit was previously */ +#define _PAGE_SEC_MAP_V2 0x0000200000000000ULL /* Secondary space mapping */ + /* Software only bit */ +#define _PAGE_INTL_PR_V2 0x0000400000000000ULL /* used as Intel PR in TLB */ + /* should be 1 for Elbrus */ +#define _PAGE_INTL_WP_V2 0x0000800000000000ULL /* used as Intel WR in TLB */ + /* should be 0 for Elbrus */ +#define _PAGE_A_SW_V2 (1UL << _PAGE_A_SW_BIT_V2) /* Accessed Page */ + /* (software emulation) */ +#define _PAGE_C_UNIT_V2 0xffff000000000000ULL /* Compilation Unit */ +#define _PAGE_MMIO_SW_V2 0x0c00000000000000ULL /* pte is MMIO software flag */ + +/* + * #76626 - hardware access bit should always be set. So we do not + * touch it and use software bit for things like pte_mkyoung(). + */ +#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) && \ + defined(CONFIG_CPU_ES2) +# define _PAGE_A_SW_BIT_V2 47 /* bit # of Accessed Page */ + /* (software emulated) */ +# define _PAGE_A_BIT_V2 (cpu_has(CPU_HWBUG_PAGE_A) ? \ + _PAGE_A_SW_BIT_V2 : _PAGE_A_HW_BIT_V2) +#else +# define _PAGE_A_SW_BIT_V2 _PAGE_A_HW_BIT_V2 +# define _PAGE_A_BIT_V2 _PAGE_A_HW_BIT_V2 +#endif +#define _PAGE_A_V2 (1UL << _PAGE_A_BIT_V2) /* Accessed Page */ + +/* Cache disable flags */ +#define _PAGE_CD_MASK_V2 (_PAGE_CD1_V2 | _PAGE_CD2_V2) +#define _PAGE_CD_VAL_V2(x) ((x & 0x1ULL) << _PAGE_CD1_BIT_V2 | \ + (x & 0x2ULL) << (_PAGE_CD2_BIT_V2 - 1)) +#define _PAGE_CD_EN_V2 _PAGE_CD_VAL_V2(0UL) /* all caches enabled */ +#define _PAGE_CD_D1_DIS_V2 _PAGE_CD_VAL_V2(1UL) /* DCACHE1 disabled */ +#define _PAGE_CD_D_DIS_V2 _PAGE_CD_VAL_V2(2UL) /* DCACHE1, DCACHE2 disabled */ +#define _PAGE_CD_DIS_V2 _PAGE_CD_VAL_V2(3UL) /* DCACHE1, DCACHE2, ECACHE */ + /* disabled */ +#define _PAGE_PWT_DIS_V2 0UL /* Page Write Through */ + /* disabled */ +#define _PAGE_PWT_EN_V2 _PAGE_PWT_V2 /* Page Write Through */ + /* enabled */ + +/* some useful PT entries protection basis values */ +#define _PAGE_KERNEL_RX_NOT_GLOB_V2 \ + (_PAGE_P_V2 | _PAGE_VALID_V2 | \ + _PAGE_PV_V2 | _PAGE_A_HW_V2) +#define _PAGE_KERNEL_RO_NOT_GLOB_V2 \ + (_PAGE_KERNEL_RX_NOT_GLOB_V2 | _PAGE_NON_EX_V2) +#define _PAGE_KERNEL_RWX_NOT_GLOB_V2 \ + (_PAGE_KERNEL_RX_NOT_GLOB_V2 | \ + _PAGE_W_V2 | _PAGE_D_V2) +#define _PAGE_KERNEL_RW_NOT_GLOB_V2 \ + (_PAGE_KERNEL_RWX_NOT_GLOB_V2 | _PAGE_NON_EX_V2) +#define _PAGE_KERNEL_HUGE_RW_NOT_GLOB_V2 \ + (_PAGE_KERNEL_RW_NOT_GLOB_V2 | _PAGE_HUGE_V2) +#define _PAGE_KERNEL_RX_GLOB_V2 \ + (_PAGE_KERNEL_RX_NOT_GLOB_V2 | _PAGE_G_V2) +#define _PAGE_KERNEL_RO_GLOB_V2 \ + (_PAGE_KERNEL_RO_NOT_GLOB_V2 | _PAGE_G_V2) +#define _PAGE_KERNEL_RWX_GLOB_V2 \ + (_PAGE_KERNEL_RWX_NOT_GLOB_V2 | _PAGE_G_V2) +#define _PAGE_KERNEL_RW_GLOB_V2 \ + (_PAGE_KERNEL_RW_NOT_GLOB_V2 | _PAGE_G_V2) +#define _PAGE_KERNEL_HUGE_RW_GLOB_V2 \ + (_PAGE_KERNEL_HUGE_RW_NOT_GLOB_V2 | _PAGE_G_V2) +#ifdef CONFIG_GLOBAL_CONTEXT +#define _PAGE_KERNEL_RX_V2 _PAGE_KERNEL_RX_GLOB_V2 +#define _PAGE_KERNEL_RO_V2 _PAGE_KERNEL_RO_GLOB_V2 +#define _PAGE_KERNEL_RWX_V2 _PAGE_KERNEL_RWX_GLOB_V2 +#define _PAGE_KERNEL_RW_V2 _PAGE_KERNEL_RW_GLOB_V2 +#define _PAGE_KERNEL_HUGE_RW_V2 _PAGE_KERNEL_HUGE_RW_GLOB_V2 +#else /* ! CONFIG_GLOBAL_CONTEXT */ +#define _PAGE_KERNEL_RX_V2 _PAGE_KERNEL_RX_NOT_GLOB_V2 +#define _PAGE_KERNEL_RO_V2 _PAGE_KERNEL_RO_NOT_GLOB_V2 +#define _PAGE_KERNEL_RWX_V2 _PAGE_KERNEL_RWX_NOT_GLOB_V2 +#define _PAGE_KERNEL_RW_V2 _PAGE_KERNEL_RW_NOT_GLOB_V2 +#define _PAGE_KERNEL_HUGE_RW_V2 _PAGE_KERNEL_HUGE_RW_NOT_GLOB_V2 +#endif /* CONFIG_GLOBAL_CONTEXT */ +#define _PAGE_KERNEL_V2 _PAGE_KERNEL_RW_V2 +#define _PAGE_KERNEL_HUGE_V2 _PAGE_KERNEL_HUGE_RW_V2 +#define _PAGE_KERNEL_IMAGE_V2 _PAGE_KERNEL_RX_V2 +#define _PAGE_KERNEL_MODULE_V2 _PAGE_KERNEL_RWX_V2 +#define _PAGE_KERNEL_PT_V2 _PAGE_KERNEL_V2 +#define _PAGE_USER_PT_V2 _PAGE_KERNEL_RW_NOT_GLOB_V2 + +/* convert physical address to page frame number for PTE */ +#define _PAGE_PADDR_TO_PFN_V2(phys_addr) \ + (((e2k_addr_t)phys_addr) & _PAGE_PFN_V2) + +/* convert the page frame number from PTE to physical address */ +#define _PAGE_PFN_TO_PADDR_V2(pte_val) \ + (((e2k_addr_t)(pte_val) & _PAGE_PFN_V2)) + +/* get/set pte Compilation Unit Index field */ +#define _PAGE_INDEX_TO_CUNIT_V2(index) \ + (((pteval_t)(index) << _PAGE_CU_BITS_V2) & _PAGE_C_UNIT_V2) +#define _PAGE_INDEX_FROM_CUNIT_V2(prot) \ + (((prot) & _PAGE_C_UNIT_V2) >> _PAGE_CU_BITS_V2) +#define SET_PAGE_CUI_V2(pte_val, cui) \ + (((pte_val) & ~_PAGE_C_UNIT_V2) | _PAGE_INDEX_TO_CUNIT_V2(cui)) + +/* PTE flags mask to can update/reduce and restricted to update */ +#define _PAGE_CHG_MASK_V2 (_PAGE_PFN_V2 | _PAGE_A_HW_V2 | _PAGE_A_V2 | \ + _PAGE_D_V2 | _PAGE_SPECIAL_V2 | \ + _PAGE_CD1_V2 | _PAGE_CD2_V2 | _PAGE_PWT_V2) +#define _HPAGE_CHG_MASK_V2 (_PAGE_CHG_MASK_V2 | _PAGE_HUGE_V2) +#define _PROT_REDUCE_MASK_V2 (_PAGE_P_V2 | _PAGE_W_V2 | _PAGE_A_HW_V2 | \ + _PAGE_A_V2 | _PAGE_D_V2 | _PAGE_VALID_V2 | \ + _PAGE_G_V2 | \ + _PAGE_CD_MASK_V2 | _PAGE_PWT_V2) +#define _PROT_RESTRICT_MASK_V2 (_PAGE_PV_V2 | _PAGE_NON_EX_V2 | \ + _PAGE_INT_PR_V2) +static inline pteval_t +get_pte_val_v2_changeable_mask(void) +{ + return _PAGE_CHG_MASK_V2; +} +static inline pteval_t +get_huge_pte_val_v2_changeable_mask(void) +{ + return _HPAGE_CHG_MASK_V2; +} +static inline pteval_t +get_pte_val_v2_reduceable_mask(void) +{ + return _PROT_REDUCE_MASK_V2; +} +static inline pteval_t +get_pte_val_v2_restricted_mask(void) +{ + return _PROT_RESTRICT_MASK_V2; +} + +static inline pteval_t +covert_uni_pte_flags_to_pte_val_v2(const uni_pteval_t uni_flags) +{ + pteval_t pte_flags = 0; + + if (uni_flags & UNI_PAGE_PRESENT) + pte_flags |= (_PAGE_P_V2); + if (uni_flags & UNI_PAGE_WRITE) + pte_flags |= (_PAGE_W_V2); + if (uni_flags & UNI_PAGE_PRIV) + pte_flags |= (_PAGE_PV_V2); + if (uni_flags & UNI_PAGE_VALID) + pte_flags |= (_PAGE_VALID_V2); + if (uni_flags & UNI_PAGE_PROTECT) + pte_flags |= (_PAGE_INT_PR_V2); + if (uni_flags & UNI_PAGE_HW_ACCESS) + pte_flags |= (_PAGE_A_HW_V2); + if (uni_flags & UNI_PAGE_DIRTY) + pte_flags |= (_PAGE_D_V2); + if (uni_flags & UNI_PAGE_HUGE) + pte_flags |= (_PAGE_HUGE_V2); + if (uni_flags & UNI_PAGE_GLOBAL) + pte_flags |= (_PAGE_G_V2); + if (uni_flags & UNI_PAGE_NWA) + pte_flags |= (_PAGE_NWA_V2); + if (uni_flags & UNI_PAGE_NON_EX) + pte_flags |= (_PAGE_NON_EX_V2); + if (uni_flags & UNI_PAGE_PROTNONE) + pte_flags |= (_PAGE_PROTNONE_V2); + if (uni_flags & UNI_PAGE_AVAIL) + pte_flags |= (_PAGE_AVAIL_V2); + if (uni_flags & UNI_PAGE_SW_ACCESS) + pte_flags |= (_PAGE_A_SW_V2); + if (uni_flags & UNI_PAGE_SPECIAL) + pte_flags |= (_PAGE_SPECIAL_V2); + if (uni_flags & UNI_PAGE_GFN) + pte_flags |= (_PAGE_GFN_V2); + if (uni_flags & UNI_PAGE_ACCESSED) + pte_flags |= (_PAGE_A_V2); + if (uni_flags & UNI_PAGE_PFN) + pte_flags |= (_PAGE_PFN_V2); + if (uni_flags & UNI_PAGE_MEM_TYPE) + pte_flags |= (_PAGE_CD_MASK_V2 | _PAGE_PWT_V2); + + BUG_ON(pte_flags == 0); + + return pte_flags; +} + +static inline pteval_t +fill_pte_val_v2_flags(const uni_pteval_t uni_flags) +{ + return covert_uni_pte_flags_to_pte_val_v2(uni_flags); +} +static inline pteval_t +get_pte_val_v2_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & covert_uni_pte_flags_to_pte_val_v2(uni_flags); +} +static inline bool +test_pte_val_v2_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return get_pte_val_v2_flags(pte_val, uni_flags) != 0; +} +static inline pteval_t +set_pte_val_v2_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val | covert_uni_pte_flags_to_pte_val_v2(uni_flags); +} +static inline pteval_t +clear_pte_val_v2_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & ~covert_uni_pte_flags_to_pte_val_v2(uni_flags); +} + +static inline pteval_t +convert_kernel_text_pte_val_v2_to_protected(pteval_t kernel_text_pte_val, + e2k_addr_t cui) +{ + return SET_PAGE_CUI_V2(kernel_text_pte_val, cui); +} +static inline pte_mem_type_t get_pte_val_v2_memory_type(pteval_t pte_val) +{ + pteval_t caches_mask; + + caches_mask = pte_val & (_PAGE_CD_MASK_V2 | _PAGE_PWT_V2); + + /* convert old PTE style fields to new PTE memory type + * see iset 8.2.4. 2) + * + * Use the same default values as what was used in older + * kernels in pgprot_noncached()/pgprot_writecombine(). */ + if (caches_mask & _PAGE_PWT_V2) + return EXT_CONFIG_MT; + else if (caches_mask & _PAGE_CD_MASK_V2) + return GEN_NON_CACHE_MT; + else + return GEN_CACHE_MT; +} + +static inline pteval_t +set_pte_val_v2_memory_type(pteval_t pte_val, pte_mem_type_t memory_type) +{ + pteval_t caches_mask; + + /* convert new PTE style memory type to old PTE caches mask */ + /* see iset 8.2.4. 2) */ + if (memory_type == GEN_CACHE_MT || memory_type == EXT_CACHE_MT) + caches_mask = _PAGE_CD_EN_V2 | _PAGE_PWT_DIS_V2; + else if (memory_type == GEN_NON_CACHE_MT || memory_type == EXT_PREFETCH_MT) + caches_mask = _PAGE_CD_DIS_V2 | _PAGE_PWT_DIS_V2; + else if (memory_type == EXT_NON_PREFETCH_MT || memory_type == EXT_CONFIG_MT || + memory_type == GEN_NON_CACHE_ORDERED_MT) + caches_mask = _PAGE_CD_DIS_V2 | _PAGE_PWT_EN_V2; + else + BUG(); + pte_val &= ~(_PAGE_CD_MASK_V2 | _PAGE_PWT_V2); + pte_val |= caches_mask; + return pte_val; +} + +/* + * Encode and de-code a swap entry + * + * Format of swap offset: + * if ! (CONFIG_MAKE_ALL_PAGES_VALID): + * bits 20-63: swap offset + * else if (CONFIG_MAKE_ALL_PAGES_VALID) + * bits 20-39: low part of swap offset + * bit 40 : _PAGE_VALID (must be one) + * bits 41-63: hi part of swap offset + */ +#ifndef CONFIG_MAKE_ALL_PAGES_VALID +static inline unsigned long +get_swap_offset_v2(swp_entry_t swap_entry) +{ + return swap_entry.val >> __SWP_OFFSET_SHIFT; +} +static inline swp_entry_t +create_swap_entry_v2(unsigned long type, unsigned long offset) +{ + swp_entry_t swap_entry; + + swap_entry.val = type << __SWP_TYPE_SHIFT; + swap_entry.val |= (offset << __SWP_OFFSET_SHIFT); + + return swap_entry; +} +static inline pte_t +convert_swap_entry_to_pte_v2(swp_entry_t swap_entry) +{ + pte_t pte; + + pte_val(pte) = swap_entry.val; + return pte; +} +#else /* CONFIG_MAKE_ALL_PAGES_VALID */ +# define INSERT_VALID(off) (((off) & (_PAGE_VALID_V2 - 1UL)) | \ + (((off) & ~(_PAGE_VALID_V2 - 1UL)) << 1)) +# define REMOVE_VALID(off) (((off) & (_PAGE_VALID_V2 - 1UL)) | \ + (((off >> 1) & ~(_PAGE_VALID_V2 - 1UL)))) +static inline unsigned long +insert_valid_bit_to_offset(unsigned long offset) +{ + return (offset & (_PAGE_VALID_V2 - 1UL)) | + ((offset & ~(_PAGE_VALID_V2 - 1UL)) << 1); +} +static inline unsigned long +remove_valid_bit_from_entry(swp_entry_t swap_entry) +{ + unsigned long entry = swap_entry.val; + + return (entry & (_PAGE_VALID_V2 - 1UL)) | + ((entry >> 1) & ~(_PAGE_VALID_V2 - 1UL)); +} +# define __swp_offset_v2(entry) (REMOVE_VALID((entry).val) >> \ + __SWP_OFFSET_SHIFT) +# define __swp_entry_v2(type, off) ((swp_entry_t) { \ + (((type) << __SWP_TYPE_SHIFT) | \ + INSERT_VALID(((off) << __SWP_OFFSET_SHIFT))) }) +# define __swp_entry_to_pte_v2(entry) ((pte_t) { (entry).val | _PAGE_VALID }) +static inline unsigned long +get_swap_offset_v2(swp_entry_t swap_entry) +{ + unsigned long entry = remove_valid_bit_from_entry(swap_entry); + + return entry >> __SWP_OFFSET_SHIFT; +} +static inline swp_entry_t +create_swap_entry_v2(unsigned long type, unsigned long offset) +{ + swp_entry_t swap_entry; + + swap_entry.val = type << __SWP_TYPE_SHIFT; + swap_entry.val |= + insert_valid_bit_to_offset(offset << __SWP_OFFSET_SHIFT); + + return swap_entry; +} +static inline pte_t +convert_swap_entry_to_pte_v2(swp_entry_t swap_entry) +{ + pte_t pte; + + pte_val(pte) = swap_entry.val | _PAGE_VALID_V2; + return pte; +} +#endif /* ! CONFIG_MAKE_ALL_PAGES_VALID */ + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _ASM_E2K_PGTABLE_V2_H */ diff --git a/arch/e2k/include/asm/pgtable-v6.h b/arch/e2k/include/asm/pgtable-v6.h new file mode 100644 index 0000000..e4174c2 --- /dev/null +++ b/arch/e2k/include/asm/pgtable-v6.h @@ -0,0 +1,339 @@ +/* + * E2K ISET V6 page table structure and common definitions. + * + * Copyright 2017 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_PGTABLE_V6_H +#define _ASM_E2K_PGTABLE_V6_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K ISET V6 page tables. + * NOTE: E2K has four levels of page tables. + */ + +#include +#include + +#ifndef __ASSEMBLY__ + +/* + * PTE-V6 format + */ + +/* numbers of PTE's bits */ +#define _PAGE_P_BIT_V6 0 /* Present */ +#define _PAGE_W_BIT_V6 1 /* Writable */ +#define _PAGE_PV_BIT_V6 2 /* PriVileged */ +#define _PAGE_VALID_BIT_V6 3 /* Valid */ +#define _PAGE_INT_PR_BIT_V6 4 /* PRotected */ +#define _PAGE_A_HW_BIT_V6 5 /* page Accessed */ +#define _PAGE_D_BIT_V6 6 /* page Dirty */ +#define _PAGE_HUGE_BIT_V6 7 /* huge Page Size */ +#define _PAGE_G_BIT_V6 8 /* Global page */ +#define _PAGE_NWA_BIT_V6 9 /* No Writable Address */ +#define _PAGE_SW1_BIT_V6 10 /* SoftWare bit #1 */ +#define _PAGE_SW2_BIT_V6 11 /* SoftWare bit #2 */ +#define _PAGE_PFN_SHIFT_V6 12 /* shift of Physical Page Number */ +#define _PAGE_MT_SHIFT_V6 60 /* shift of Memory Type field */ +#define _PAGE_MT_BITS_NUM_V6 3 /* occupies 3 bits */ +#define _PAGE_NON_EX_BIT_V6 63 /* NON EXecutable */ + +#define _PAGE_P_V6 (1ULL << _PAGE_P_BIT_V6) +#define _PAGE_W_V6 (1ULL << _PAGE_W_BIT_V6) +#define _PAGE_PV_V6 (1ULL << _PAGE_PV_BIT_V6) +#define _PAGE_VALID_V6 (1ULL << _PAGE_VALID_BIT_V6) +#define _PAGE_INT_PR_V6 (1ULL << _PAGE_INT_PR_BIT_V6) +#define _PAGE_A_HW_V6 (1ULL << _PAGE_A_HW_BIT_V6) +#define _PAGE_D_V6 (1ULL << _PAGE_D_BIT_V6) +#define _PAGE_HUGE_V6 (1ULL << _PAGE_HUGE_BIT_V6) +#define _PAGE_G_V6 (1ULL << _PAGE_G_BIT_V6) +#define _PAGE_NWA_V6 (1ULL << _PAGE_NWA_BIT_V6) +#define _PAGE_SW1_V6 (1ULL << _PAGE_SW1_BIT_V6) +#define _PAGE_SW2_V6 (1ULL << _PAGE_SW2_BIT_V6) +#define _PAGE_MMIO_SW_V6 0x0c00000000000000ULL /* pte is MMIO */ + /* software flag */ +#define _PAGE_PFN_V6 \ + ((((1ULL << E2K_MAX_PHYS_BITS_V6) - 1) >> \ + PAGE_SHIFT) << \ + _PAGE_PFN_SHIFT_V6) +#define _PAGE_MT_V6 \ + (((1ULL << _PAGE_MT_BITS_NUM_V6) - 1) << _PAGE_MT_SHIFT_V6) +#define _PAGE_NON_EX_V6 (1ULL << _PAGE_NON_EX_BIT_V6) + +/* + * The _PAGE_PROTNONE bit is set only when the _PAGE_PRESENT bit + * is cleared, so we can use almost any bits for it. Must make + * sure though that pte_modify() will work with _PAGE_PROTNONE. + */ +#define _PAGE_PROTNONE_V6 _PAGE_NWA_V6 +#define _PAGE_SOFTWARE_MT _PAGE_SW1_V6 +#define _PAGE_AVAIL_V6 _PAGE_SW2_V6 +#define _PAGE_SPECIAL_V6 _PAGE_SW2_V6 +#define _PAGE_GFN_V6 _PAGE_SW2_V6 /* Page is mapped to guest */ + /* physical memory */ + +/* + * Bug #76626 - hardware access bit should always be set. + * This bug is actual for e2c/e2c+ only CPUs, + * so CPUs based on iset v6 cannot have the bug all the more + * and software bit can be identical to hardware one. + * Software bit is not used in the case of V6, but is not deleted + * for compatibility. + */ +#define _PAGE_A_SW_BIT_V6 _PAGE_A_HW_BIT_V6 +#define _PAGE_A_BIT_V6 _PAGE_A_HW_BIT_V6 +#define _PAGE_A_SW_V6 _PAGE_A_HW_V6 +#define _PAGE_A_V6 _PAGE_A_HW_V6 + +#define _PAGE_MT_GET_VAL(x) (((x) & _PAGE_MT_V6) >> _PAGE_MT_SHIFT_V6) +#define _PAGE_MT_SET_VAL(x, mt) \ + (((x) & ~_PAGE_MT_V6) | \ + (((pteval_t)(mt) << _PAGE_MT_SHIFT_V6) & _PAGE_MT_V6)) + +/* some useful PT entries protection basis values */ +#define _PAGE_KERNEL_RX_NOT_GLOB_V6 \ + (_PAGE_P_V6 | _PAGE_VALID_V6 | \ + _PAGE_PV_V6 | _PAGE_A_HW_V6) +#define _PAGE_KERNEL_RO_NOT_GLOB_V6 \ + (_PAGE_KERNEL_RX_NOT_GLOB_V6 | _PAGE_NON_EX_V6) +#define _PAGE_KERNEL_RWX_NOT_GLOB_V6 \ + (_PAGE_KERNEL_RX_NOT_GLOB_V6 | \ + _PAGE_W_V6 | _PAGE_D_V6) +#define _PAGE_KERNEL_RW_NOT_GLOB_V6 \ + (_PAGE_KERNEL_RWX_NOT_GLOB_V6 | _PAGE_NON_EX_V6) +#define _PAGE_KERNEL_HUGE_RW_NOT_GLOB_V6 \ + (_PAGE_KERNEL_RW_NOT_GLOB_V6 | _PAGE_HUGE_V6) +#define _PAGE_KERNEL_RX_GLOB_V6 \ + (_PAGE_KERNEL_RX_NOT_GLOB_V6 | _PAGE_G_V6) +#define _PAGE_KERNEL_RO_GLOB_V6 \ + (_PAGE_KERNEL_RO_NOT_GLOB_V6 | _PAGE_G_V6) +#define _PAGE_KERNEL_RWX_GLOB_V6 \ + (_PAGE_KERNEL_RWX_NOT_GLOB_V6 | _PAGE_G_V6) +#define _PAGE_KERNEL_RW_GLOB_V6 \ + (_PAGE_KERNEL_RW_NOT_GLOB_V6 | _PAGE_G_V6) +#define _PAGE_KERNEL_HUGE_RW_GLOB_V6 \ + (_PAGE_KERNEL_HUGE_RW_NOT_GLOB_V6 | _PAGE_G_V6) +#ifdef CONFIG_GLOBAL_CONTEXT +#define _PAGE_KERNEL_RX_V6 _PAGE_KERNEL_RX_GLOB_V6 +#define _PAGE_KERNEL_RO_V6 _PAGE_KERNEL_RO_GLOB_V6 +#define _PAGE_KERNEL_RWX_V6 _PAGE_KERNEL_RWX_GLOB_V6 +#define _PAGE_KERNEL_RW_V6 _PAGE_KERNEL_RW_GLOB_V6 +#define _PAGE_KERNEL_HUGE_RW_V6 _PAGE_KERNEL_HUGE_RW_GLOB_V6 +#else /* ! CONFIG_GLOBAL_CONTEXT */ +#define _PAGE_KERNEL_RX_V6 _PAGE_KERNEL_RX_NOT_GLOB_V6 +#define _PAGE_KERNEL_RO_V6 _PAGE_KERNEL_RO_NOT_GLOB_V6 +#define _PAGE_KERNEL_RWX_V6 _PAGE_KERNEL_RWX_NOT_GLOB_V6 +#define _PAGE_KERNEL_RW_V6 _PAGE_KERNEL_RW_NOT_GLOB_V6 +#define _PAGE_KERNEL_HUGE_RW_V6 _PAGE_KERNEL_HUGE_RW_NOT_GLOB_V6 +#endif /* CONFIG_GLOBAL_CONTEXT */ +#define _PAGE_KERNEL_V6 _PAGE_KERNEL_RW_V6 +#define _PAGE_KERNEL_HUGE_V6 _PAGE_KERNEL_HUGE_RW_V6 +#define _PAGE_KERNEL_IMAGE_V6 _PAGE_KERNEL_RX_V6 +#define _PAGE_KERNEL_MODULE_V6 _PAGE_KERNEL_RWX_V6 +#define _PAGE_KERNEL_PT_V6 _PAGE_KERNEL_V6 +#define _PAGE_USER_PT_V6 _PAGE_KERNEL_RW_NOT_GLOB_V6 + +/* convert physical address to page frame number for PTE */ +#define _PAGE_PADDR_TO_PFN_V6(phys_addr) \ + (((e2k_addr_t)phys_addr) & _PAGE_PFN_V6) + +/* convert the page frame number from PTE to physical address */ +#define _PAGE_PFN_TO_PADDR_V6(pte_val) \ + ((e2k_addr_t)(pte_val) & _PAGE_PFN_V6) + +/* get/set pte Compilation Unit Index field */ +/* PTE V6 has not more field CUI, so fix error */ +#define _PAGE_INDEX_TO_CUNIT_V6(index) BUILD_BUG_ON(true) +#define _PAGE_INDEX_FROM_CUNIT_V6(prot) BUILD_BUG_ON(true) +#define SET_PAGE_CUI_V6(pte_val, cui) BUILD_BUG_ON(true) + +/* PTE flags mask to can update/reduce and restricted to update */ +#define _PAGE_CHG_MASK_V6 (_PAGE_PFN_V6 | _PAGE_A_V6 | _PAGE_D_V6 | \ + _PAGE_SW1_V6 | _PAGE_SW2_V6 | \ + _PAGE_MT_V6) +#define _HPAGE_CHG_MASK_V6 (_PAGE_CHG_MASK_V6 | _PAGE_HUGE_V6) +#define _PROT_REDUCE_MASK_V6 (_PAGE_P_V6 | _PAGE_W_V6 | _PAGE_A_V6 | \ + _PAGE_D_V6 | _PAGE_VALID_V6 | _PAGE_G_V6 | \ + _PAGE_MT_V6) +#define _PROT_RESTRICT_MASK_V6 (_PAGE_PV_V6 | _PAGE_NON_EX_V6 | \ + _PAGE_INT_PR_V6) +static inline pteval_t +get_pte_val_v6_changeable_mask(void) +{ + return _PAGE_CHG_MASK_V6; +} +static inline pteval_t +get_huge_pte_val_v6_changeable_mask(void) +{ + return _HPAGE_CHG_MASK_V6; +} +static inline pteval_t +get_pte_val_v6_reduceable_mask(void) +{ + return _PROT_REDUCE_MASK_V6; +} +static inline pteval_t +get_pte_val_v6_restricted_mask(void) +{ + return _PROT_RESTRICT_MASK_V6; +} + +static inline pteval_t +covert_uni_pte_flags_to_pte_val_v6(const uni_pteval_t uni_flags) +{ + pteval_t pte_flags = 0; + + if (uni_flags & UNI_PAGE_PRESENT) + pte_flags |= (_PAGE_P_V6); + if (uni_flags & UNI_PAGE_WRITE) + pte_flags |= (_PAGE_W_V6); + if (uni_flags & UNI_PAGE_PRIV) + pte_flags |= (_PAGE_PV_V6); + if (uni_flags & UNI_PAGE_VALID) + pte_flags |= (_PAGE_VALID_V6); + if (uni_flags & UNI_PAGE_PROTECT) + pte_flags |= (_PAGE_INT_PR_V6); + if (uni_flags & UNI_PAGE_HW_ACCESS) + pte_flags |= (_PAGE_A_HW_V6); + if (uni_flags & UNI_PAGE_DIRTY) + pte_flags |= (_PAGE_D_V6); + if (uni_flags & UNI_PAGE_HUGE) + pte_flags |= (_PAGE_HUGE_V6); + if (uni_flags & UNI_PAGE_GLOBAL) + pte_flags |= (_PAGE_G_V6); + if (uni_flags & UNI_PAGE_NWA) + pte_flags |= (_PAGE_NWA_V6); + if (uni_flags & UNI_PAGE_NON_EX) + pte_flags |= (_PAGE_NON_EX_V6); + if (uni_flags & UNI_PAGE_PROTNONE) + pte_flags |= (_PAGE_PROTNONE_V6); + if (uni_flags & UNI_PAGE_AVAIL) + pte_flags |= (_PAGE_SW2_V6); + if (uni_flags & UNI_PAGE_SW_ACCESS) + pte_flags |= (_PAGE_A_SW_V6); + if (uni_flags & UNI_PAGE_SPECIAL) + pte_flags |= (_PAGE_SPECIAL_V6); + if (uni_flags & UNI_PAGE_GFN) + pte_flags |= (_PAGE_GFN_V6); + if (uni_flags & UNI_PAGE_ACCESSED) + pte_flags |= (_PAGE_A_V6); + if (uni_flags & UNI_PAGE_PFN) + pte_flags |= (_PAGE_PFN_V6); + if (uni_flags & UNI_PAGE_MEM_TYPE) + pte_flags |= (_PAGE_MT_V6); + + BUG_ON(pte_flags == 0); + + return pte_flags; +} + +static inline pteval_t +fill_pte_val_v6_flags(const uni_pteval_t uni_flags) +{ + return covert_uni_pte_flags_to_pte_val_v6(uni_flags); +} +static inline pteval_t +get_pte_val_v6_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & covert_uni_pte_flags_to_pte_val_v6(uni_flags); +} +static inline bool +test_pte_val_v6_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return get_pte_val_v6_flags(pte_val, uni_flags) != 0; +} +static inline pteval_t +set_pte_val_v6_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val | covert_uni_pte_flags_to_pte_val_v6(uni_flags); +} +static inline pteval_t +clear_pte_val_v6_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return pte_val & ~covert_uni_pte_flags_to_pte_val_v6(uni_flags); +} + +static inline pteval_t +convert_kernel_text_pte_val_v6_to_protected(pteval_t kernel_text_pte_val) +{ + return kernel_text_pte_val; +} + +static inline pte_mem_type_t get_pte_val_v6_memory_type(pteval_t pte_val) +{ + int hardware_mt = _PAGE_MT_GET_VAL(pte_val); + + if (!(pte_val & _PAGE_SOFTWARE_MT)) + return hardware_mt; + + if (hardware_mt == GEN_CACHE_MT) { + return EXT_CACHE_MT; + } else if (hardware_mt == GEN_NON_CACHE_MT) { + return GEN_NON_CACHE_ORDERED_MT; + } else { + WARN_ON_ONCE(1); + return EXT_CACHE_MT; + } +} + +static inline pteval_t +set_pte_val_v6_memory_type(pteval_t pte_val, pte_mem_type_t memory_type) +{ + BUG_ON(memory_type != GEN_CACHE_MT && + memory_type != GEN_NON_CACHE_MT && + memory_type != GEN_NON_CACHE_ORDERED_MT && + memory_type != EXT_PREFETCH_MT && + memory_type != EXT_NON_PREFETCH_MT && + memory_type != EXT_CONFIG_MT && + memory_type != EXT_CACHE_MT); + + if (memory_type == EXT_CACHE_MT) { + pte_val |= _PAGE_SOFTWARE_MT; + memory_type = GEN_CACHE_MT; + } else if (memory_type == GEN_NON_CACHE_ORDERED_MT) { + pte_val |= _PAGE_SOFTWARE_MT; + memory_type = GEN_NON_CACHE_MT; + } else { + pte_val &= ~_PAGE_SOFTWARE_MT; + } + return _PAGE_MT_SET_VAL(pte_val, memory_type); +} + +/* + * Encode and de-code a swap entry + * + * Format of swap offset: + * bits 20-63: swap offset + */ +static inline unsigned long +get_swap_offset_v6(swp_entry_t swap_entry) +{ + return swap_entry.val >> __SWP_OFFSET_SHIFT; +} +static inline swp_entry_t +create_swap_entry_v6(unsigned long type, unsigned long offset) +{ + swp_entry_t swap_entry; + + swap_entry.val = type << __SWP_TYPE_SHIFT; + swap_entry.val |= (offset << __SWP_OFFSET_SHIFT); + + return swap_entry; +} +static inline pte_t +convert_swap_entry_to_pte_v6(swp_entry_t swap_entry) +{ + pte_t pte; + + pte_val(pte) = swap_entry.val; +#ifdef CONFIG_MAKE_ALL_PAGES_VALID + pte_val(pte) |= _PAGE_VALID_V6; +#endif /* CONFIG_MAKE_ALL_PAGES_VALID */ + return pte; +} + +#endif /* ! __ASSEMBLY__ */ + +#endif /* ! _ASM_E2K_PGTABLE_V6_H */ diff --git a/arch/e2k/include/asm/pgtable.h b/arch/e2k/include/asm/pgtable.h new file mode 100644 index 0000000..092f74d --- /dev/null +++ b/arch/e2k/include/asm/pgtable.h @@ -0,0 +1,989 @@ +/* + * pgtable.h: E2K page table operations. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PGTABLE_H +#define _E2K_PGTABLE_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K page tables. + * NOTE: E2K has four levels of page tables, while Linux assumes that + * there are three levels of page tables. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * The defines and routines to manage and access the four-level + * page table. + */ + +#define set_pte(ptep, pteval) \ + native_set_pte(ptep, pteval, false) +#define set_pte_at(mm, addr, ptep, pteval) \ + native_set_pte(ptep, pteval, false) +#define set_pte_not_present_at(mm, addr, ptep, pteval) \ + native_set_pte(ptep, pteval, true) +#define set_pte_to_move_at(mm, addr, ptep, pteval) \ + native_set_pte_to_move(ptep, pteval) +#define validate_pte_at(mm, addr, ptep, pteval) \ + native_set_pte_noflush(ptep, pteval) +#define ptep_get_and_clear_to_move(mm, addr, ptep) \ + ptep_get_and_clear(mm, addr, ptep) +#define boot_set_pte_at(addr, ptep, pteval) \ + native_set_pte(ptep, pteval, false) +#define boot_set_pte_kernel(addr, ptep, pteval) \ + boot_set_pte_at(addr, ptep, pteval) + +#define set_pmd(pmdp, pmdval) \ + native_set_pmd(pmdp, pmdval) +#define set_pmd_at(mm, addr, pmdp, pmdval) \ +({ \ + (void)(mm); \ + (void)(addr); \ + native_set_pmd(pmdp, pmdval); \ +}) +#define validate_pmd_at(mm, addr, pmdp, pmdval) \ + native_set_pmd_noflush(pmdp, pmdval) + +#define set_pud(pudp, pudval) \ + native_set_pud(pudp, pudval) +#define set_pud_at(mm, addr, pudp, pudval) \ + native_set_pud(pudp, pudval) +#define validate_pud_at(mm, addr, pudp) \ + set_pud_at(mm, addr, pudp, __pud(_PAGE_INIT_VALID)) +#define invalidate_pud_at(mm, addr, pudp) \ + set_pud_at(mm, addr, pudp, __pud(0)) + +#define set_pgd(pgdp, pgdval) \ + native_set_pgd(pgdp, pgdval) +#define set_pgd_at(mm, addr, pgdp, pgdval) \ + native_set_pgd(pgdp, pgdval) +#define validate_pgd_at(mm, addr, pgdp) \ + set_pgd_at(mm, addr, pgdp, __pgd(_PAGE_INIT_VALID)) +#define invalidate_pgd_at(mm, addr, pgdp) \ + set_pgd_at(mm, addr, pgdp, __pgd(0)) + +#define get_pte_for_address(vma, address) \ + native_do_get_pte_for_address(vma, address) + +#define pgd_clear_kernel(pgdp) (pgd_val(*(pgdp)) = 0UL) +#define pud_clear_kernel(pudp) (pud_val(*(pudp)) = 0UL) +#define pmd_clear_kernel(pmdp) (pmd_val(*(pmdp)) = 0UL) +#define pte_clear_kernel(ptep) (pte_val(*(ptep)) = 0UL) + +/* pte_page() returns the 'struct page *' corresponding to the PTE: */ +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) +#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) +#define pud_page(pud) pfn_to_page(pud_pfn(pud)) +#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) + + +#define boot_pte_page(pte) \ + (e2k_addr_t)boot_va(_PAGE_PFN_TO_PADDR(pte_val(pte))) + +#define pmd_set_k(pmdp, ptep) (*(pmdp) = mk_pmd_addr(ptep, \ + PAGE_KERNEL_PTE)) +#define pmd_set_u(pmdp, ptep) (*(pmdp) = mk_pmd_addr(ptep, \ + PAGE_USER_PTE)) +#define pmd_clear(pmdp) \ +do { \ + u64 __pmdval; \ + __pmdval = (test_ts_flag(TS_KEEP_PAGES_VALID)) ? \ + _PAGE_INIT_VALID : 0UL; \ + native_set_pmd(pmdp, __pmd(__pmdval)); \ +} while (0) + +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long)__va(_PAGE_PFN_TO_PADDR(pmd_val(pmd))); +} + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + return (unsigned long)__va(_PAGE_PFN_TO_PADDR(pud_val(pud))); +} + +static inline unsigned long pgd_page_vaddr(pgd_t pgd) +{ + return (unsigned long)__va(_PAGE_PFN_TO_PADDR(pgd_val(pgd))); +} + + +#define boot_pmd_set_k(pmdp, ptep) \ + (*(pmdp) = mk_pmd_phys(boot_vpa_to_pa((e2k_addr_t)(ptep)), \ + PAGE_KERNEL_PTE)) +#define boot_pmd_set_u(pmdp, ptep) \ + (*(pmdp) = mk_pmd_phys(boot_vpa_to_pa((e2k_addr_t)(ptep)), \ + PAGE_USER_PTE)) +#define boot_pmd_page(pmd) \ + (e2k_addr_t)boot_va(_PAGE_PFN_TO_PADDR(pmd_val(pmd))) + +#define pud_set_k(pudp, pmdp) (*(pudp) = mk_pud_addr(pmdp, \ + PAGE_KERNEL_PMD)) +#define pud_set_u(pudp, pmdp) (*(pudp) = mk_pud_addr(pmdp, \ + PAGE_USER_PMD)) +static inline void pud_clear(pud_t *pud) +{ + pud_val(*pud) = (test_ts_flag(TS_KEEP_PAGES_VALID)) ? + _PAGE_INIT_VALID : 0UL; +} + +#define boot_pud_set_k(pudp, pmdp) \ + (*(pudp) = mk_pud_phys(boot_vpa_to_pa((e2k_addr_t)(pmdp)), \ + PAGE_KERNEL_PMD)) +#define boot_pud_set_u(pudp, pmdp) \ + (*(pudp) = mk_pud_phys(boot_vpa_to_pa((e2k_addr_t)(pmdp)), \ + PAGE_USER_PMD)) +#define boot_pud_page(pud) \ + (e2k_addr_t)boot_va(_PAGE_PFN_TO_PADDR(pud_val(pud))) + +#define mk_pgd_phys_k(pudp) mk_pgd_addr(pudp, PAGE_KERNEL_PUD) + +#ifndef CONFIG_NUMA +#define pgd_set_k(pgdp, pudp) (*(pgdp) = mk_pgd_phys_k(pudp)) +#define node_pgd_set_k(nid, pgdp, pudp) pgd_set_k(pgdp, pudp) +#else /* CONFIG_NUMA */ +extern void node_pgd_set_k(int nid, pgd_t *pgdp, pud_t *pudp); +static void inline pgd_set_k(pgd_t *pgdp, pud_t *pudp) +{ + node_pgd_set_k(numa_node_id(), pgdp, pudp); +} +#endif /* ! CONFIG_NUMA */ + +#define vmlpt_pgd_set(pgdp, lpt) pgd_set_u(pgdp, (pud_t *)(lpt)) +#define pgd_set_u(pgdp, pudp) (*(pgdp) = mk_pgd_addr(pudp, \ + PAGE_USER_PUD)) +static inline void pgd_clear_one(pgd_t *pgd) +{ + pgd_val(*pgd) = (test_ts_flag(TS_KEEP_PAGES_VALID)) ? + _PAGE_INIT_VALID : 0UL; +} + + +#define boot_mk_pgd_phys_k(pudp) \ + mk_pgd_phys(boot_vpa_to_pa((e2k_addr_t)(pudp)), PAGE_KERNEL_PUD) +#define boot_mk_pgd_phys_u(pudp) \ + mk_pgd_phys(boot_vpa_to_pa((e2k_addr_t)(pudp)), PAGE_USER_PUD) +#ifndef CONFIG_NUMA +#define boot_pgd_set_k(pgdp, pudp) (*(pgdp) = boot_mk_pgd_phys_k(pudp)) +#define boot_pgd_set_u(pgdp, pudp) (*(pgdp) = boot_mk_pgd_phys_u(pudp)) + +#else /* CONFIG_NUMA */ +extern void boot_pgd_set(pgd_t *my_pgdp, pud_t *pudp, int user); +#define boot_pgd_set_k(pgdp, pudp) boot_pgd_set(pgdp, pudp, 0) +#define boot_pgd_set_u(pgdp, pudp) boot_pgd_set(pgdp, pudp, 1) +#endif /* ! CONFIG_NUMA */ + +#define boot_vmlpt_pgd_set(pgdp, lpt) (*(pgdp) = boot_mk_pgd_phys_k( \ + (pud_t *)(lpt))) +#define boot_pgd_page(pgd) \ + (e2k_addr_t)boot_va(_PAGE_PFN_TO_PADDR(pgd_val(pgd))) + +static inline void native_set_pte_noflush(pte_t *ptep, pte_t pteval) +{ + prefetch_offset(ptep, PREFETCH_STRIDE); + *ptep = pteval; +} + +static inline void native_set_pmd_noflush(pmd_t *pmdp, pmd_t pmdval) +{ + *pmdp = pmdval; +} + +#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) +#include + +/* + * When instruction page changes its physical address, we must + * flush old physical address from Instruction Cache, otherwise + * it could be accessed by its virtual address. + * + * Since we do not know whether the instruction page will change + * its address in the future, we have to be conservative here. + */ +static inline void flush_pte_from_ic(pte_t val) +{ + unsigned long address; + + address = (unsigned long) __va(_PAGE_PFN_TO_PADDR(pte_val(val))); + __flush_icache_range(address, address + PTE_SIZE); +} + +static inline void flush_pmd_from_ic(pmd_t val) +{ + unsigned long address; + + address = (unsigned long) __va(_PAGE_PFN_TO_PADDR(pmd_val(val))); + __flush_icache_range(address, address + PMD_SIZE); +} + +static inline void flush_pud_from_ic(pud_t val) +{ + /* pud is too large to step through it, so flush everything at once */ + __flush_icache_all(); +} + +static __always_inline void native_set_pte(pte_t *ptep, pte_t pteval, + bool known_not_present) +{ + prefetch_offset(ptep, PREFETCH_STRIDE); + + BUILD_BUG_ON(!__builtin_constant_p(known_not_present)); + /* If we know that pte is not present, then this means + * that instruction buffer has been flushed already + * and we can avoid the check altogether. */ + if (known_not_present) { + *ptep = pteval; + } else { + int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC); + pte_t oldpte = *ptep; + + *ptep = pteval; + + if (have_flush_dc_ic && pte_present_and_exec(oldpte) && + (!pte_present_and_exec(pteval) || + pte_pfn(oldpte) != pte_pfn(pteval))) + flush_pte_from_ic(oldpte); + } +} + +static inline void native_set_pte_to_move(pte_t *ptep, pte_t pteval) +{ + native_set_pte(ptep, pteval, false); +} + +static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmdval) +{ + int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC); + pmd_t oldpmd = *pmdp; + + *pmdp = pmdval; + + if (have_flush_dc_ic && pmd_present_and_exec_and_huge(oldpmd) && + (!pmd_present_and_exec_and_huge(pmdval) || + pmd_pfn(oldpmd) != pmd_pfn(pmdval))) + flush_pmd_from_ic(oldpmd); +} + +static inline void native_set_pud(pud_t *pudp, pud_t pudval) +{ + int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC); + pud_t oldpud = *pudp; + + *pudp = pudval; + + if (have_flush_dc_ic && pud_present_and_exec_and_huge(oldpud) && + (!pud_present_and_exec_and_huge(pudval) || + pud_pfn(oldpud) != pud_pfn(pudval))) + flush_pud_from_ic(oldpud); +} + +static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgdval) +{ + *pgdp = pgdval; +} +#else +# define native_set_pte(ptep, pteval, known_not_present) (*(ptep) = (pteval)) +# define native_set_pte_to_move(ptep, pteval) native_set_pte(ptep, pteval, false) +# define native_set_pmd(pmdp, pmdval) (*(pmdp) = (pmdval)) +# define native_set_pud(pudp, pudval) (*(pudp) = (pudval)) +# define native_set_pgd(pgdp, pgdval) (*(pgdp) = (pgdval)) +#endif + +/* + * Remap I/O pages at `pfn' of size `size' with page protection + * `prot' into virtual address `from'. + * + * This function is used only on device memory and track_pfn_remap() + * will explicitly set "External" memory type. + * + * And remap_pfn_range() should be used on RAM only and not on device + * memory, but in practice many drivers violate API and just use + * remap_pfn_range() everywhere. In this case track_pfn_remap() will + * determine the required type. */ +#define io_remap_pfn_range(vma, addr, pfn, size, prot) \ +({ \ + unsigned long __irp_pfn = (pfn); \ + VM_WARN_ON_ONCE(pfn_valid(__irp_pfn)); \ + remap_pfn_range((vma), (addr), (pfn), (size), (prot)); \ +}) + +/* + * track_pfn_remap is called when a _new_ pfn mapping is being established + * by remap_pfn_range() for physical range indicated by pfn and size. + */ +extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, unsigned long size); + +/* + * track_pfn_insert is called when a _new_ single pfn is established + * by vm_insert_pfn(). + * + * This does not cover vm_insert_page so if some bad driver decides + * to use it on I/O memory we could get into trouble. + */ +extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn); + +/* + * track_pfn_copy is called when vma that is covering the pfnmap gets + * copied through copy_page_range(). + */ +static inline int track_pfn_copy(struct vm_area_struct *vma) +{ + return 0; +} + +/* + * untrack_pfn is called while unmapping a pfnmap for a region. + * untrack can be called for a specific region indicated by pfn and size or + * can be for the entire vma (in which case pfn, size are zero). + */ +static inline void untrack_pfn(struct vm_area_struct *vma, + unsigned long pfn, unsigned long size) +{ +} + +/* + * untrack_pfn_moved is called while mremapping a pfnmap for a new region. + */ +static inline void untrack_pfn_moved(struct vm_area_struct *vma) +{ +} +#define MK_IOSPACE_PFN(space, pfn) (pfn) +#define GET_IOSPACE(pfn) 0 +#define GET_PFN(pfn) (pfn) + +#ifndef __ASSEMBLY__ + +#define NATIVE_VMALLOC_START (NATIVE_KERNEL_IMAGE_AREA_BASE + \ + 0x020000000000UL) + /* 0x0000 e400 0000 0000 */ +#define NATIVE_VMALLOC_END (NATIVE_VMALLOC_START + 0x010000000000UL) + /* 0x0000 e500 0000 0000 */ +#define NATIVE_VMEMMAP_START (NATIVE_VMALLOC_END + 0x010000000000UL) + /* 0x0000 e600 0000 0000 */ +#define NATIVE_VMEMMAP_END (NATIVE_VMEMMAP_START + \ + (1ULL << (E2K_MAX_PHYS_BITS - \ + PAGE_SHIFT)) * \ + sizeof(struct page)) + +#ifdef CONFIG_SMP +static inline void +ptep_wrprotect_atomic(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pt_set_wrprotect_atomic(mm, addr, (pgprot_t *)ptep); +} +#else /* ! CONFIG_SMP */ +static inline void +ptep_wrprotect_atomic(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ +} +#endif /* CONFIG_SMP */ + +/* + * The module space starts from end of resident kernel image and + * both areas should be within 2 ** 30 bits of the virtual addresses. + */ +#define MODULES_VADDR E2K_MODULES_START /* 0x0000 e200 0xxx x000 */ +#define MODULES_END E2K_MODULES_END /* 0x0000 e200 4000 0000 */ + +/* virtualization support */ +#include + +#define pte_clear_not_present_full(mm, addr, ptep, fullmm) \ +do { \ + u64 __pteval; \ + __pteval = (test_ts_flag(TS_KEEP_PAGES_VALID)) ? \ + _PAGE_INIT_VALID : 0UL; \ + set_pte_not_present_at(mm, addr, ptep, __pte(__pteval)); \ +} while (0) + + +#define pte_clear(mm, addr, ptep) \ +do { \ + u64 __pteval; \ + __pteval = (test_ts_flag(TS_KEEP_PAGES_VALID)) ? \ + _PAGE_INIT_VALID : 0UL; \ + set_pte_at(mm, addr, ptep, __pte(__pteval)); \ +} while (0) + +#if !defined(CONFIG_BOOT_E2K) && !defined(E2K_P2V) +static inline pte_t +do_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC); + int mm_users; + pte_t oldpte; + prefetch_offset(ptep, PREFETCH_STRIDE); +# ifdef CONFIG_SMP + u64 newval; + + newval = (test_ts_flag(TS_KEEP_PAGES_VALID)) ? + _PAGE_INIT_VALID : 0UL; + + oldpte = __pte(pt_get_and_xchg_atomic(mm, addr, newval, + (pgprot_t *)ptep)); +# else + oldpte = *ptep; + pte_clear(mm, addr, ptep); +# endif + + if (likely(mm != NULL)) + mm_users = (atomic_read(&mm->mm_users) != 0); + else + /* kernel or guest process: users exist always */ + mm_users = true; + + /* mm_users check is for the fork() case: we do not + * want to spend time flushing when we are exiting. */ + if (have_flush_dc_ic && mm_users && pte_present_and_exec(oldpte)) + flush_pte_from_ic(oldpte); + + return oldpte; +} + +static inline pte_t +do_ptep_get_and_clear_as_valid(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + int have_flush_dc_ic = cpu_has(CPU_FEAT_FLUSH_DC_IC); + pte_t oldpte; + + prefetch_offset(ptep, PREFETCH_STRIDE); +# ifdef CONFIG_SMP + oldpte = __pte(pt_get_and_clear_atomic(mm, addr, (pgprot_t *)ptep)); +# else + oldpte = *ptep; + pte_val(*ptep) &= _PAGE_INIT_VALID; +# endif + + if (have_flush_dc_ic && pte_present_and_exec(oldpte)) + flush_pte_from_ic(oldpte); + + return oldpte; +} +#else +static inline pte_t +do_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + prefetch_offset(ptep, PREFETCH_STRIDE); +# ifdef CONFIG_SMP + return __pte(pt_get_and_xchg_atomic(mm, addr, 0UL, (pgprot_t *)ptep)); +# else + pte_t pte = *ptep; + pte_clear(mm, addr, ptep); + return pte; +# endif +} + +static inline pte_t +do_ptep_get_and_clear_as_valid(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t oldpte; + + prefetch_offset(ptep, PREFETCH_STRIDE); +# ifdef CONFIG_SMP + oldpte = __pte(pt_get_and_clear_atomic(mm, addr, (pgprot_t *)ptep)); +# else + oldpte = *ptep; + pte_val(*ptep) &= _PAGE_INIT_VALID; +# endif + + return oldpte; +} +#endif + +static inline pte_t +ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + return do_ptep_get_and_clear(mm, addr, ptep); +} + +static inline pte_t +ptep_get_and_clear_as_valid(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + return do_ptep_get_and_clear_as_valid(mm, addr, ptep); +} + +#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) +# define vmemmap ((struct page *)VMEMMAP_START) +#endif + +#include + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; +extern struct page *zeroed_page; +extern u64 zero_page_nid_to_pfn[MAX_NUMNODES]; +extern struct page *zero_page_nid_to_page[MAX_NUMNODES]; + +#define ZERO_PAGE(vaddr) zeroed_page + +#define is_zero_pfn is_zero_pfn +static inline int is_zero_pfn(unsigned long pfn) +{ + int node; + + for_each_node_has_dup_kernel(node) + if (zero_page_nid_to_pfn[node] == pfn) + return 1; + + return 0; +} + +#define my_zero_pfn my_zero_pfn +static inline u64 my_zero_pfn(unsigned long addr) +{ + u64 pfn = 0; + int node = numa_node_id(); + + if (node_has_dup_kernel(node)) { + pfn = zero_page_nid_to_pfn[node]; + } else { + for_each_node_has_dup_kernel(node) { + pfn = zero_page_nid_to_pfn[node]; + break; + } + } + + return pfn; +} + +static inline int is_zero_page(struct page *page) +{ + int node; + + for_each_node_has_dup_kernel(node) + if (zero_page_nid_to_page[node] == page) + return 1; + + return 0; +} + +extern void paging_init(void); + +/* + * The index and offset in the root-level page table directory. + */ +/* to find an entry in a kernel root page-table-directory */ +#define pgd_offset_k(virt_addr) ((pgd_t *)kernel_root_pt + \ + pgd_index(virt_addr)) +#define boot_pgd_offset_k(virt_addr) ((pgd_t *)boot_root_pt + \ + boot_pgd_index(virt_addr)) +#ifdef CONFIG_NUMA +extern pgd_t *node_pgd_offset_kernel(int nid, e2k_addr_t virt_addr); +#else /* ! CONFIG_NUMA */ +#define node_pgd_offset_kernel(nid, virt_addr) \ +({ \ + (nid); \ + pgd_offset_k(virt_addr); \ +}) +#endif /* CONFIG_NUMA */ + +#ifndef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +#define pgd_clear(pgdp) pgd_clear_one(pgdp) +#else /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +static inline int +pgd_clear_cpu_root_pt(pgd_t *pgd) +{ + pgd_t *pgd_table = pgd_to_page(pgd); + unsigned long pgd_ind; + pgd_t *cpu_pgd; + + if (MMU_IS_SEPARATE_PT()) + /* CPU pgd is not user and user pgds do not copy to some */ + /* other PGDs */ + return 0; + if (!THERE_IS_DUP_KERNEL) + return 0; + if (!current->active_mm || current->active_mm->pgd != pgd_table) + return 0; + pgd_ind = pgd_to_index(pgd); + cpu_pgd = &cpu_kernel_root_pt[pgd_ind]; + if (pgd_none(*cpu_pgd)) { + pr_err("%s(): CPU #%u kernel root pgd %px already clean 0x%lx\n", + __func__, raw_smp_processor_id(), + cpu_pgd, pgd_val(*cpu_pgd)); + } + pgd_clear_one(cpu_pgd); + return 1; +} +static inline void +pgd_clear(pgd_t *pgd) +{ + unsigned long mask; + + /* + * PGD clearing should be done into two root page tables (main and + * CPU's) and in atomic style, so close interrupts to preserve + `* from smp call for flush_tlb_all() between two clearing + * while the CPU restore CPU's root PGD from main. In this case + * CPU's PGD will be restored as clean when we wait for not + * yet cleared state (see above pgd_clear_cpu_root_pt()) + */ + raw_local_irq_save(mask); + pgd_clear_one(pgd); /* order of clearing is significant */ + pgd_clear_cpu_root_pt(pgd); /* if interrupts do not close */ + /* and flush of TLB can restore */ + /* second PGD from first PGD */ + raw_local_irq_restore(mask); +} +#endif /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + +/* + * The index and offset in the upper page table directory. + */ +#define pud_offset(dir, address) ((pud_t *)pgd_page_vaddr(*(dir)) + \ + pud_index(address)) + +/* + * The index and offset in the middle page table directory + */ +#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \ + pmd_index(address)) + +/* + * The index and offset in the third-level page table. + */ +#define pte_offset_kernel(pmd, address) \ + ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) +#define pte_offset_map(pmd, address) \ +({ \ + pte_t *__pom_pte = pte_offset_kernel((pmd), (address)); \ + prefetchw(__pom_pte); \ + __pom_pte; \ +}) + +#define pte_unmap(pte) do { } while (0) + +#define boot_pte_index(virt_addr) pte_index(virt_addr) +#define boot_pte_offset(pmdp, addr) ((pte_t *)boot_pmd_page(*(pmdp)) + \ + boot_pte_index(addr)) + +/* + * Encode and de-code a swap entry + */ +static inline unsigned long +mmu_get_swap_offset(swp_entry_t swap_entry, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_swap_offset_v6(swap_entry); + else + return get_swap_offset_v2(swap_entry); +} +static inline swp_entry_t +mmu_create_swap_entry(unsigned long type, unsigned long offset, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return create_swap_entry_v6(type, offset); + else + return create_swap_entry_v2(type, offset); +} +static inline pte_t +mmu_convert_swap_entry_to_pte(swp_entry_t swap_entry, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return convert_swap_entry_to_pte_v6(swap_entry); + else + return convert_swap_entry_to_pte_v2(swap_entry); +} +static inline unsigned long __swp_offset(swp_entry_t swap_entry) +{ + return mmu_get_swap_offset(swap_entry, MMU_IS_PT_V6()); +} +static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) +{ + return mmu_create_swap_entry(type, offset, MMU_IS_PT_V6()); +} +static inline pte_t __swp_entry_to_pte(swp_entry_t swap_entry) +{ + return mmu_convert_swap_entry_to_pte(swap_entry, MMU_IS_PT_V6()); +} +static inline pmd_t __swp_entry_to_pmd(swp_entry_t swap_entry) +{ + return __pmd(pte_val(__swp_entry_to_pte(swap_entry))); +} + +/* + * atomic versions of the some PTE manipulations: + */ +static inline pte_t +native_do_get_pte_for_address(struct vm_area_struct *vma, e2k_addr_t address) +{ + probe_entry_t probe_pte; + + probe_pte = get_MMU_DTLB_ENTRY(address); + if (DTLB_ENTRY_TEST_SUCCESSFUL(probe_entry_val(probe_pte)) && + DTLB_ENTRY_TEST_VVA(probe_entry_val(probe_pte))) { + return __pte(_PAGE_SET_PRESENT(probe_entry_val(probe_pte))); + } else if (!DTLB_ENTRY_TEST_SUCCESSFUL(probe_entry_val(probe_pte))) { + return __pte(0); + } else { + return __pte(probe_entry_val(probe_pte)); + } +} + +#ifdef CONFIG_SMP +static inline int +test_and_clear_relaxed(pgprotval_t mask, pgprot_t *addr) +{ + pgprotval_t retval; + + retval = pt_clear_relaxed_atomic(mask, addr); + + return (retval & mask) != 0; +} +#endif /* CONFIG_SMP */ + +static inline int +ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + pte_t pte; + + prefetch_offset(ptep, PREFETCH_STRIDE); + +#ifdef CONFIG_SMP + pte_val(pte) = pt_clear_young_atomic(vma->vm_mm, addr, + (pgprot_t *)ptep); + return pte_young(pte); +#else + pte = *ptep; + if (!pte_young(pte)) + return 0; + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); + return 1; +#endif +} + +static inline void +ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + prefetch_offset(ptep, PREFETCH_STRIDE); +#ifdef CONFIG_SMP + ptep_wrprotect_atomic(mm, addr, ptep); +#else + pte_t pte = *ptep; + pte = pte_wrprotect(pte); + set_pte_at(mm, addr, ptep, pte); +#endif +} + +extern int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty); + +#ifdef CONFIG_MAKE_ALL_PAGES_VALID +# define ptep_clear_flush_as_valid(__vma, __address, __ptep) \ +({ \ + pte_t __pte; \ + __pte = ptep_get_and_clear_as_valid((__vma)->vm_mm, __address, __ptep);\ + flush_tlb_page(__vma, __address); \ + __pte; \ +}) +#endif /* CONFIG_MAKE_ALL_PAGES_VALID */ + +#define pgd_addr_bound(addr) (((addr) + PGDIR_SIZE) & PGDIR_MASK) +#define pud_addr_bound(addr) (((addr) + PUD_SIZE) & PUD_MASK) +#define pmd_addr_bound(addr) (((addr) + PMD_SIZE) & PMD_MASK) + +#if defined CONFIG_TRANSPARENT_HUGEPAGE && defined CONFIG_MAKE_ALL_PAGES_VALID +# define pmdp_collapse_flush pmdp_collapse_flush +extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ +# ifdef CONFIG_SMP + u64 newval; + + newval = (test_ts_flag(TS_KEEP_PAGES_VALID)) ? + _PAGE_INIT_VALID : 0UL; + + return __pmd(__api_xchg_return(newval, &pmdp->pmd, d, RELAXED_MB)); +# else + pmd_t pmd = *pmdp; + pmd_clear(pmdp); + return pmd; +# endif +} + +static inline pmd_t pmdp_huge_get_and_clear_as_valid(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ +# ifdef CONFIG_SMP + return __pmd(__api_xchg_return(_PAGE_INIT_VALID, &pmdp->pmd, d, + RELAXED_MB)); +# else + pmd_t pmd = *pmdp; + set_pmd_at(mm, addr, pmdp, __pmd(_PAGE_INIT_VALID)); + return pmd; +# endif +} +#endif + +/* interface functions to handle some things on the PT level */ +void split_simple_pmd_page(pgprot_t *ptp, pte_t *ptes[MAX_NUM_HUGE_PTES]); +void split_multiple_pmd_page(pgprot_t *ptp, pte_t *ptes[MAX_NUM_HUGE_PTES]); +void map_pud_huge_page_to_simple_pmds(pgprot_t *pmd_page, e2k_addr_t phys_page, + pgprot_t pgprot); +void map_pud_huge_page_to_multiple_pmds(pgprot_t *pmd_page, + e2k_addr_t phys_page, pgprot_t pgprot); + +#endif /* !(__ASSEMBLY__) */ + +extern void memmap_init(unsigned long size, int nid, unsigned long zone, + unsigned long start_pfn); + +#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) + +#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +#define __HAVE_ARCH_PMD_WRITE +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +#define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL +#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +#define __HAVE_ARCH_PMDP_CLEAR_FLUSH +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +#define __HAVE_ARCH_MEMMAP_INIT +#define __HAVE_PFNMAP_TRACKING +#include + +typedef enum pte_cmp { + PTE_SAME_CMP, + PTE_CHANGE_PFN_CMP, + PTE_CHANGE_PROTECTS_CMP, + PTE_CHANGE_FLAGS_CMP, + PTE_CHANGE_FLAGS_AND_PROTECTS_CMP, +} pte_cmp_t; + +static inline pte_cmp_t pte_compare(pte_t src_pte, pte_t dst_pte) +{ + pteval_t src_pte_flags; + pteval_t dst_pte_flags; + pteval_t src_pte_protects; + pteval_t dst_pte_protects; + + if (pte_same(src_pte, dst_pte)) + return PTE_SAME_CMP; + if (pte_pfn(src_pte) != pte_pfn(dst_pte)) + return PTE_CHANGE_PFN_CMP; + src_pte_flags = pte_only_flags(src_pte); + dst_pte_flags = pte_only_flags(dst_pte); + src_pte_protects = pte_only_protects(src_pte); + dst_pte_protects = pte_only_protects(dst_pte); + if (src_pte_flags == dst_pte_flags) { + if (src_pte_protects == dst_pte_protects) + return PTE_SAME_CMP; + else + return PTE_CHANGE_PROTECTS_CMP; + } else if (src_pte_protects == dst_pte_protects) { + return PTE_CHANGE_FLAGS_CMP; + } else { + return PTE_CHANGE_FLAGS_AND_PROTECTS_CMP; + } +} + +static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep) +{ + return __pte(pt_modify_prot_atomic(vma->vm_mm, addr, (pgprot_t *)ptep)); +} + +static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte) +{ + __ptep_modify_prot_commit(vma, addr, ptep, pte); +} + +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ + pmd_t pmd; + +#ifdef CONFIG_SMP + pmd_val(pmd) = pt_clear_young_atomic(vma->vm_mm, addr, + (pgprot_t *)pmdp); + return pmd_young(pmd); +#else + pmd = *pmdp; + if (!pmd_young(pmd)) + return 0; + set_pmd_at(vma->vm_mm, addr, pmdp, pmd_mkold(pmd)); + return 1; +#endif +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + pt_set_wrprotect_atomic(mm, addr, (pgprot_t *)pmdp); +} + +#define pmdp_establish pmdp_establish +static inline pmd_t pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); +} + +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); +#else +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); +} + +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#endif /* !(_E2K_PGTABLE_H) */ diff --git a/arch/e2k/include/asm/pgtable_def.h b/arch/e2k/include/asm/pgtable_def.h new file mode 100644 index 0000000..5dd62e9 --- /dev/null +++ b/arch/e2k/include/asm/pgtable_def.h @@ -0,0 +1,1307 @@ +/* + * pgtable_def.h: E2K page table common definitions. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _ASM_E2K_PGTABLE_DEF_H +#define _ASM_E2K_PGTABLE_DEF_H + +/* + * This file contains the functions and defines necessary to modify and + * use the E2K page tables. + * NOTE: E2K has four levels of page tables. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ + +/* max. number of physical address bits (architected) */ +static inline int +mmu_max_phys_addr_bits(bool mmu_pt_v6) +{ + return (mmu_pt_v6) ? E2K_MAX_PHYS_BITS_V6 : E2K_MAX_PHYS_BITS_V2; +} +static inline int +e2k_max_phys_addr_bits(void) +{ + return mmu_max_phys_addr_bits(MMU_IS_PT_V6()); +} +#define E2K_PHYS_BITS_NUM e2k_max_phys_addr_bits() +#define E2K_MAX_PHYS_BITS E2K_MAX_PHYS_BITS_V6 + +/* + * Hardware MMUs page tables have some differences from one ISET to other + * moreover each MMU supports a few different page tables: + * native (primary) + * secondary page tables for sevral modes (VA32, VA48, PA32, PA48 ...) + * The follow interface to manage page tables as common item + */ + +static inline const pt_level_t * +get_pt_level_on_id(int level_id) +{ + /* now PT level is number of level */ + return get_pt_struct_level_on_id(&pgtable_struct, level_id); +} + +static inline bool +is_huge_pmd_level(void) +{ + return is_huge_pt_struct_level(&pgtable_struct, E2K_PMD_LEVEL_NUM); +} + +static inline bool +is_huge_pud_level(void) +{ + return is_huge_pt_struct_level(&pgtable_struct, E2K_PUD_LEVEL_NUM); +} + +static inline bool +is_huge_pgd_level(void) +{ + return is_huge_pt_struct_level(&pgtable_struct, E2K_PGD_LEVEL_NUM); +} + +static inline e2k_size_t +get_e2k_pt_level_size(int level_id) +{ + return get_pt_struct_level_size(&pgtable_struct, level_id); +} +static inline e2k_size_t +get_pgd_level_size(void) +{ + return get_e2k_pt_level_size(E2K_PGD_LEVEL_NUM); +} +static inline e2k_size_t +get_pud_level_size(void) +{ + return get_e2k_pt_level_size(E2K_PUD_LEVEL_NUM); +} +static inline e2k_size_t +get_pmd_level_size(void) +{ + return get_e2k_pt_level_size(E2K_PMD_LEVEL_NUM); +} +static inline e2k_size_t +get_pte_level_size(void) +{ + return get_e2k_pt_level_size(E2K_PTE_LEVEL_NUM); +} + +static inline e2k_size_t +get_e2k_pt_level_page_size(int level_id) +{ + return get_pt_struct_level_page_size(&pgtable_struct, level_id); +} +static inline e2k_size_t +get_pgd_level_page_size(void) +{ + return get_e2k_pt_level_page_size(E2K_PGD_LEVEL_NUM); +} +static inline e2k_size_t +get_pud_level_page_size(void) +{ + return get_e2k_pt_level_page_size(E2K_PUD_LEVEL_NUM); +} +static inline e2k_size_t +get_pmd_level_page_size(void) +{ + return get_e2k_pt_level_page_size(E2K_PMD_LEVEL_NUM); +} +static inline e2k_size_t +get_pte_level_page_size(void) +{ + return get_e2k_pt_level_page_size(E2K_PTE_LEVEL_NUM); +} + +static inline int +get_e2k_pt_level_huge_ptes_num(int level_id) +{ + return get_pt_struct_level_huge_ptes_num(&pgtable_struct, level_id); +} +static inline int +get_pgd_level_huge_ptes_num(void) +{ + return get_e2k_pt_level_huge_ptes_num(E2K_PGD_LEVEL_NUM); +} +static inline int +get_pud_level_huge_ptes_num(void) +{ + return get_e2k_pt_level_huge_ptes_num(E2K_PUD_LEVEL_NUM); +} +static inline int +get_pmd_level_huge_ptes_num(void) +{ + return get_e2k_pt_level_huge_ptes_num(E2K_PMD_LEVEL_NUM); +} + +/* + * PTE format + */ + +static inline pteval_t +mmu_phys_addr_to_pte_pfn(e2k_addr_t phys_addr, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return _PAGE_PADDR_TO_PFN_V6(phys_addr); + else + return _PAGE_PADDR_TO_PFN_V2(phys_addr); +} +static inline e2k_addr_t +mmu_pte_pfn_to_phys_addr(pteval_t pte_val, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return _PAGE_PFN_TO_PADDR_V6(pte_val); + else + return _PAGE_PFN_TO_PADDR_V2(pte_val); +} + +static inline pteval_t +phys_addr_to_pte_pfn(e2k_addr_t phys_addr) +{ + return mmu_phys_addr_to_pte_pfn(phys_addr, MMU_IS_PT_V6()); +} +static inline e2k_addr_t +pte_pfn_to_phys_addr(pteval_t pte_val) +{ + return mmu_pte_pfn_to_phys_addr(pte_val, MMU_IS_PT_V6()); +} +#define _PAGE_PADDR_TO_PFN(phys_addr) phys_addr_to_pte_pfn(phys_addr) +#define _PAGE_PFN_TO_PADDR(pte_val) pte_pfn_to_phys_addr(pte_val) + +static inline pteval_t +mmu_kernel_protected_text_pte_val(pteval_t kernel_text_pte_val, e2k_addr_t cui, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return convert_kernel_text_pte_val_v6_to_protected( + kernel_text_pte_val); + else + return convert_kernel_text_pte_val_v2_to_protected( + kernel_text_pte_val, cui); +} +static inline pteval_t +kernel_protected_text_pte_val(pteval_t kernel_text_pte_val, e2k_addr_t cui) +{ + return mmu_kernel_protected_text_pte_val(kernel_text_pte_val, cui, + MMU_IS_PT_V6()); +} +#define _PAGE_KERNEL_PROT_TEXT(kernel_text_pte_val, cui) \ + kernel_protected_text_pte_val(kernel_text_pte_val, cui) + +/* PTE Memory Type */ +static inline enum pte_mem_type get_pte_val_memory_type(pteval_t pte_val) +{ + if (MMU_IS_PT_V6()) + return get_pte_val_v6_memory_type(pte_val); + else + return get_pte_val_v2_memory_type(pte_val); +} +static inline pteval_t set_pte_val_memory_type(pteval_t pte_val, + pte_mem_type_t memory_type) +{ + if (MMU_IS_PT_V6()) + return set_pte_val_v6_memory_type(pte_val, memory_type); + else + return set_pte_val_v2_memory_type(pte_val, memory_type); +} +#define _PAGE_GET_MEM_TYPE(pte_val) \ + get_pte_val_memory_type(pte_val) +#define _PAGE_SET_MEM_TYPE(pte_val, memory_type) \ + set_pte_val_memory_type(pte_val, memory_type) + +static inline pteval_t +mmu_fill_pte_val_flags(const uni_pteval_t uni_flags, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return fill_pte_val_v6_flags(uni_flags); + else + return fill_pte_val_v2_flags(uni_flags); +} +static inline pteval_t +mmu_get_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_pte_val_v6_flags(pte_val, uni_flags); + else + return get_pte_val_v2_flags(pte_val, uni_flags); +} +static inline bool +mmu_test_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags, + bool mmu_pt_v6) +{ + return mmu_get_pte_val_flags(pte_val, uni_flags, mmu_pt_v6) != 0; +} +static inline pteval_t +mmu_set_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return set_pte_val_v6_flags(pte_val, uni_flags); + else + return set_pte_val_v2_flags(pte_val, uni_flags); +} +static inline pteval_t +mmu_clear_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return clear_pte_val_v6_flags(pte_val, uni_flags); + else + return clear_pte_val_v2_flags(pte_val, uni_flags); +} +static inline pteval_t +fill_pte_val_flags(const uni_pteval_t uni_flags) +{ + return mmu_fill_pte_val_flags(uni_flags, MMU_IS_PT_V6()); +} +static inline pteval_t +get_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return mmu_get_pte_val_flags(pte_val, uni_flags, MMU_IS_PT_V6()); +} +static inline bool +test_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return mmu_test_pte_val_flags(pte_val, uni_flags, MMU_IS_PT_V6()); +} +static inline pteval_t +set_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return mmu_set_pte_val_flags(pte_val, uni_flags, MMU_IS_PT_V6()); +} +static inline pteval_t +clear_pte_val_flags(pteval_t pte_val, const uni_pteval_t uni_flags) +{ + return mmu_clear_pte_val_flags(pte_val, uni_flags, MMU_IS_PT_V6()); +} +#define _PAGE_INIT(uni_flags) fill_pte_val_flags(uni_flags) +#define _PAGE_GET(pte_val, uni_flags) get_pte_val_flags(pte_val, uni_flags) +#define _PAGE_TEST(pte_val, uni_flags) test_pte_val_flags(pte_val, uni_flags) +#define _PAGE_SET(pte_val, uni_flags) set_pte_val_flags(pte_val, uni_flags) +#define _PAGE_CLEAR(pte_val, uni_flags) clear_pte_val_flags(pte_val, uni_flags) + +static inline pteval_t +mmu_get_pte_val_changeable_mask(bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_pte_val_v6_changeable_mask(); + else + return get_pte_val_v2_changeable_mask(); +} +static inline pteval_t +mmu_get_huge_pte_val_changeable_mask(bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_huge_pte_val_v6_changeable_mask(); + else + return get_huge_pte_val_v2_changeable_mask(); +} +static inline pteval_t +mmu_get_pte_val_reduceable_mask(bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_pte_val_v6_reduceable_mask(); + else + return get_pte_val_v2_reduceable_mask(); +} +static inline pteval_t +mmu_get_pte_val_restricted_mask(bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_pte_val_v6_restricted_mask(); + else + return get_pte_val_v2_restricted_mask(); +} +static inline pteval_t +get_pte_val_changeable_mask(void) +{ + return mmu_get_pte_val_changeable_mask(MMU_IS_PT_V6()); +} +static inline pteval_t +get_huge_pte_val_changeable_mask(void) +{ + return mmu_get_huge_pte_val_changeable_mask(MMU_IS_PT_V6()); +} +static inline pteval_t +get_pte_val_reduceable_mask(void) +{ + return mmu_get_pte_val_reduceable_mask(MMU_IS_PT_V6()); +} +static inline pteval_t +get_pte_val_restricted_mask(void) +{ + return mmu_get_pte_val_restricted_mask(MMU_IS_PT_V6()); +} + +#define _PAGE_CHG_MASK get_pte_val_changeable_mask() +#define _HPAGE_CHG_MASK get_huge_pte_val_changeable_mask() +#define _PROT_REDUCE_MASK get_pte_val_reduceable_mask() +#define _PROT_RESTRICT_MASK get_pte_val_restricted_mask() + +/* some the most popular PTEs */ +#define _PAGE_INIT_VALID _PAGE_INIT(UNI_PAGE_VALID) +#define _PAGE_GET_VALID(pte_val) _PAGE_GET(pte_val, UNI_PAGE_VALID) +#define _PAGE_TEST_VALID(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_VALID) +#define _PAGE_SET_VALID(pte_val) _PAGE_SET(pte_val, UNI_PAGE_VALID) +#define _PAGE_CLEAR_VALID(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_VALID) + +#define _PAGE_INIT_PRESENT _PAGE_INIT(UNI_PAGE_PRESENT) +#define _PAGE_GET_PRESENT(pte_val) _PAGE_GET(pte_val, UNI_PAGE_PRESENT) +#define _PAGE_TEST_PRESENT(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_PRESENT) +#define _PAGE_SET_PRESENT(pte_val) _PAGE_SET(pte_val, UNI_PAGE_PRESENT) +#define _PAGE_CLEAR_PRESENT(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_PRESENT) + +#define _PAGE_INIT_PROTNONE _PAGE_INIT(UNI_PAGE_PROTNONE) +#define _PAGE_GET_PROTNONE(pte_val) _PAGE_GET(pte_val, UNI_PAGE_PROTNONE) +#define _PAGE_TEST_PROTNONE(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_PROTNONE) +#define _PAGE_SET_PROTNONE(pte_val) _PAGE_SET(pte_val, UNI_PAGE_PROTNONE) +#define _PAGE_CLEAR_PROTNONE(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_PROTNONE) + +#define _PAGE_INIT_WRITEABLE _PAGE_INIT(UNI_PAGE_WRITE) +#define _PAGE_GET_WRITEABLE(pte_val) _PAGE_GET(pte_val, UNI_PAGE_WRITE) +#define _PAGE_TEST_WRITEABLE(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_WRITE) +#define _PAGE_SET_WRITEABLE(pte_val) _PAGE_SET(pte_val, UNI_PAGE_WRITE) +#define _PAGE_CLEAR_WRITEABLE(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_WRITE) + +#define _PAGE_INIT_PRIV _PAGE_INIT(UNI_PAGE_PRIV) +#define _PAGE_GET_PRIV(pte_val) _PAGE_GET(pte_val, UNI_PAGE_PRIV) +#define _PAGE_TEST_PRIV(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_PRIV) +#define _PAGE_SET_PRIV(pte_val) _PAGE_SET(pte_val, UNI_PAGE_PRIV) +#define _PAGE_CLEAR_PRIV(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_PRIV) + +#define _PAGE_INIT_ACCESSED _PAGE_INIT(UNI_PAGE_ACCESSED) +#define _PAGE_GET_ACCESSED(pte_val) _PAGE_GET(pte_val, UNI_PAGE_ACCESSED) +#define _PAGE_TEST_ACCESSED(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_ACCESSED) +#define _PAGE_SET_ACCESSED(pte_val) _PAGE_SET(pte_val, UNI_PAGE_ACCESSED) +#define _PAGE_CLEAR_ACCESSED(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_ACCESSED) + +#define _PAGE_INIT_DIRTY _PAGE_INIT(UNI_PAGE_DIRTY) +#define _PAGE_GET_DIRTY(pte_val) _PAGE_GET(pte_val, UNI_PAGE_DIRTY) +#define _PAGE_TEST_DIRTY(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_DIRTY) +#define _PAGE_SET_DIRTY(pte_val) _PAGE_SET(pte_val, UNI_PAGE_DIRTY) +#define _PAGE_CLEAR_DIRTY(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_DIRTY) + +#define _PAGE_INIT_HUGE _PAGE_INIT(UNI_PAGE_HUGE) +#define _PAGE_GET_HUGE(pte_val) _PAGE_GET(pte_val, UNI_PAGE_HUGE) +#define _PAGE_TEST_HUGE(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_HUGE) +#define _PAGE_SET_HUGE(pte_val) _PAGE_SET(pte_val, UNI_PAGE_HUGE) +#define _PAGE_CLEAR_HUGE(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_HUGE) + +#define _PAGE_INIT_NOT_EXEC _PAGE_INIT(UNI_PAGE_NON_EX) +#define _PAGE_GET_NOT_EXEC(pte_val) _PAGE_GET(pte_val, UNI_PAGE_NON_EX) +#define _PAGE_TEST_NOT_EXEC(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_NON_EX) +#define _PAGE_SET_NOT_EXEC(pte_val) _PAGE_SET(pte_val, UNI_PAGE_NON_EX) +#define _PAGE_CLEAR_NOT_EXEC(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_NON_EX) + +#define _PAGE_INIT_EXECUTEABLE ((pteval_t)0ULL) +#define _PAGE_TEST_EXECUTEABLE(pte_val) (!_PAGE_TEST_NOT_EXEC(pte_val)) +#define _PAGE_SET_EXECUTEABLE(pte_val) _PAGE_CLEAR_NOT_EXEC(pte_val) +#define _PAGE_CLEAR_EXECUTEABLE(pte_val) _PAGE_SET_NOT_EXEC(pte_val) + +#define _PAGE_INIT_SPECIAL _PAGE_INIT(UNI_PAGE_SPECIAL) +#define _PAGE_GET_SPECIAL(pte_val) _PAGE_GET(pte_val, UNI_PAGE_SPECIAL) +#define _PAGE_TEST_SPECIAL(pte_val) _PAGE_TEST(pte_val, UNI_PAGE_SPECIAL) +#define _PAGE_SET_SPECIAL(pte_val) _PAGE_SET(pte_val, UNI_PAGE_SPECIAL) +#define _PAGE_CLEAR_SPECIAL(pte_val) _PAGE_CLEAR(pte_val, UNI_PAGE_SPECIAL) + +#define _PAGE_PFN_MASK _PAGE_INIT(UNI_PAGE_PFN) + +#define _PAGE_KERNEL_RX_NOT_GLOB \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_PRIV | UNI_PAGE_HW_ACCESS) +#define _PAGE_KERNEL_RO_NOT_GLOB \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_PRIV | UNI_PAGE_HW_ACCESS | \ + UNI_PAGE_NON_EX) +#define _PAGE_KERNEL_RWX_NOT_GLOB \ + _PAGE_SET(_PAGE_KERNEL_RX_NOT_GLOB, \ + UNI_PAGE_WRITE | UNI_PAGE_DIRTY) +#define _PAGE_KERNEL_RW_NOT_GLOB \ + _PAGE_SET(_PAGE_KERNEL_RWX_NOT_GLOB, UNI_PAGE_NON_EX) +#define _PAGE_KERNEL_HUGE_RW_NOT_GLOB \ + _PAGE_SET(_PAGE_KERNEL_RW_NOT_GLOB, UNI_PAGE_HUGE) +#ifdef CONFIG_GLOBAL_CONTEXT +#define _PAGE_KERNEL_RX \ + _PAGE_SET(_PAGE_KERNEL_RX_NOT_GLOB, UNI_PAGE_GLOBAL) +#define _PAGE_KERNEL_RO \ + _PAGE_SET(_PAGE_KERNEL_RO_NOT_GLOB, UNI_PAGE_GLOBAL) +#define _PAGE_KERNEL_RWX \ + _PAGE_SET(_PAGE_KERNEL_RWX_NOT_GLOB, UNI_PAGE_GLOBAL) +#define _PAGE_KERNEL_RW \ + _PAGE_SET(_PAGE_KERNEL_RW_NOT_GLOB, UNI_PAGE_GLOBAL) +#define _PAGE_KERNEL_HUGE_RW \ + _PAGE_SET(_PAGE_KERNEL_HUGE_RW_NOT_GLOB, UNI_PAGE_GLOBAL) +#else /* ! CONFIG_GLOBAL_CONTEXT */ +#define _PAGE_KERNEL_RX _PAGE_KERNEL_RX_NOT_GLOB +#define _PAGE_KERNEL_RO _PAGE_KERNEL_RO_NOT_GLOB +#define _PAGE_KERNEL_RWX _PAGE_KERNEL_RWX_NOT_GLOB +#define _PAGE_KERNEL_RW _PAGE_KERNEL_RW_NOT_GLOB +#define _PAGE_KERNEL_HUGE_RW _PAGE_KERNEL_HUGE_RW_NOT_GLOB +#endif /* CONFIG_GLOBAL_CONTEXT */ + +#define _PAGE_KERNEL _PAGE_KERNEL_RW +#define _PAGE_KERNEL_HUGE _PAGE_KERNEL_HUGE_RW +#define _PAGE_KERNEL_IMAGE _PAGE_KERNEL_RX +#define _PAGE_KERNEL_PT _PAGE_KERNEL +#define _PAGE_USER_PT _PAGE_KERNEL_RW_NOT_GLOB +#define _PAGE_KERNEL_PTE _PAGE_KERNEL_PT +#define _PAGE_KERNEL_PMD _PAGE_KERNEL_PT +#define _PAGE_KERNEL_PUD _PAGE_KERNEL_PT +#define _PAGE_USER_PTE _PAGE_USER_PT +#define _PAGE_USER_PMD _PAGE_USER_PT +#define _PAGE_USER_PUD _PAGE_USER_PT + +#define _PAGE_IO_MAP_BASE _PAGE_KERNEL_RW +#define _PAGE_IO_MAP \ + _PAGE_SET_MEM_TYPE(_PAGE_IO_MAP_BASE, EXT_NON_PREFETCH_MT) +#define _PAGE_IO_PORTS \ + _PAGE_SET_MEM_TYPE(_PAGE_IO_MAP_BASE, EXT_NON_PREFETCH_MT) + +#define _PAGE_KERNEL_SWITCHING_IMAGE \ + _PAGE_SET_MEM_TYPE(_PAGE_KERNEL_RX_NOT_GLOB, EXT_CONFIG_MT) + +#define _PAGE_PROT_MASK \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_WRITE | UNI_PAGE_NON_EX) + +#define _PAGE_USER \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_WRITE | UNI_PAGE_DIRTY | UNI_PAGE_NON_EX) +#define _PAGE_USER_RO_ACCESSED \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_NON_EX) + +#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) +#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) +#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_RWX) +#define PAGE_KERNEL_LARGE __pgprot(_PAGE_KERNEL_HUGE) +#define PAGE_KERNEL_PTE __pgprot(_PAGE_KERNEL_PTE) +#define PAGE_KERNEL_PMD __pgprot(_PAGE_KERNEL_PMD) +#define PAGE_KERNEL_PUD __pgprot(_PAGE_KERNEL_PUD) +#define PAGE_USER_PTE __pgprot(_PAGE_USER_PTE) +#define PAGE_USER_PMD __pgprot(_PAGE_USER_PMD) +#define PAGE_USER_PUD __pgprot(_PAGE_USER_PUD) + +#define PAGE_KERNEL_NOCACHE PAGE_IO_MAP + +#define PAGE_USER __pgprot(_PAGE_USER) +#define PAGE_USER_RO_ACCESSED __pgprot(_PAGE_USER_RO_ACCESSED) + +#define PAGE_KERNEL_TEXT __pgprot(_PAGE_KERNEL_IMAGE) + +#define PAGE_KERNEL_DATA \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_IMAGE, \ + UNI_PAGE_WRITE | UNI_PAGE_DIRTY | \ + UNI_PAGE_NON_EX)) +#define PAGE_KERNEL_STACK \ + __pgprot(_PAGE_SET(_PAGE_KERNEL, UNI_PAGE_NON_EX)) + +#define PAGE_USER_HWS __pgprot(_PAGE_KERNEL_RW_NOT_GLOB) +#define PAGE_USER_PS PAGE_USER_HWS +#define PAGE_USER_PCS PAGE_USER_HWS +#define PAGE_USER_STACK \ + __pgprot(_PAGE_SET(_PAGE_USER, UNI_PAGE_NON_EX)) + +#define PAGE_TAG_MEMORY __pgprot(_PAGE_KERNEL_RW_NOT_GLOB) + +#define PAGE_BOOTINFO \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_IMAGE, UNI_PAGE_NON_EX)) + +#define PAGE_INITRD \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_IMAGE, UNI_PAGE_NON_EX)) + +#define PAGE_MPT \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_IMAGE, UNI_PAGE_NON_EX)) + +#define PAGE_KERNEL_NAMETAB \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_IMAGE, UNI_PAGE_NON_EX)) + +#define PAGE_MAPPED_PHYS_MEM __pgprot(_PAGE_KERNEL) + +#define PAGE_CNTP_MAPPED_MEM \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_IMAGE, UNI_PAGE_NON_EX)) + +#define PAGE_X86_IO_PORTS __pgprot(_PAGE_IO_PORTS) + +#define PAGE_IO_MAP __pgprot(_PAGE_IO_MAP) + +#define PAGE_KERNEL_SWITCHING_TEXT __pgprot(_PAGE_KERNEL_SWITCHING_IMAGE) +#define PAGE_KERNEL_SWITCHING_DATA \ + __pgprot(_PAGE_SET(_PAGE_KERNEL_SWITCHING_IMAGE, \ + UNI_PAGE_WRITE | UNI_PAGE_NON_EX)) +#define PAGE_KERNEL_SWITCHING_US_STACK \ + __pgprot(_PAGE_SET_MEM_TYPE(_PAGE_KERNEL_RW_NOT_GLOB, \ + EXT_CONFIG_MT)) + +#define PAGE_SHARED \ + __pgprot(_PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_SW_ACCESS | \ + UNI_PAGE_WRITE | UNI_PAGE_NON_EX)) +#define PAGE_SHARED_EX \ + __pgprot(_PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_SW_ACCESS | \ + UNI_PAGE_WRITE)) +#define PAGE_COPY_NEX \ + __pgprot(_PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_SW_ACCESS | \ + UNI_PAGE_NON_EX)) +#define PAGE_COPY_EX \ + __pgprot(_PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_SW_ACCESS)) + +#define PAGE_COPY PAGE_COPY_NEX + +#define PAGE_READONLY \ + __pgprot(_PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_SW_ACCESS | \ + UNI_PAGE_NON_EX)) +#define PAGE_EXECUTABLE \ + __pgprot(_PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_VALID | \ + UNI_PAGE_HW_ACCESS | UNI_PAGE_SW_ACCESS)) + +/* + * PAGE_NONE is used for NUMA hinting faults and should be valid. + * __P000/__S000 are used for mmap(PROT_NONE) mapping and should be not valid. + */ +#define PAGE_ENPTY __pgprot(0ULL) +#define PAGE_NONE \ + __pgprot(_PAGE_INIT(UNI_PAGE_PROTNONE | UNI_PAGE_HW_ACCESS | \ + UNI_PAGE_VALID)) +#define PAGE_NONE_INVALID \ + __pgprot(_PAGE_INIT(UNI_PAGE_PROTNONE | UNI_PAGE_HW_ACCESS)) + +#define PAGE_INT_PR \ + __pgprot(_PAGE_INIT(UNI_PAGE_INT_PR)) + +/* + * Next come the mappings that determine how mmap() protection bits + * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The + * _P version gets used for a private shared memory segment, the _S + * version gets used for a shared memory segment with MAP_SHARED on. + * In a private shared memory segment, we do a copy-on-write if a task + * attempts to write to the page. + */ + /* initial boot-time page protections are not used, */ + /* so can be set to empty state */ + /* xwr */ +#define __P000 PAGE_ENPTY /* PAGE_NONE_INVALID */ +#define __P001 PAGE_ENPTY /* PAGE_READONLY */ +#define __P010 PAGE_ENPTY /* PAGE_COPY_NEX */ +#define __P011 PAGE_ENPTY /* PAGE_COPY_NEX */ +#define __P100 PAGE_ENPTY /* PAGE_EXECUTABLE */ +#define __P101 PAGE_ENPTY /* PAGE_EXECUTABLE */ +#define __P110 PAGE_ENPTY /* PAGE_COPY_EX */ +#define __P111 PAGE_ENPTY /* PAGE_COPY_EX */ + +#define __S000 PAGE_ENPTY /* PAGE_NONE_INVALID */ +#define __S001 PAGE_ENPTY /* PAGE_READONLY */ +#define __S010 PAGE_ENPTY /* PAGE_SHARED */ +#define __S011 PAGE_ENPTY /* PAGE_SHARED */ +#define __S100 PAGE_ENPTY /* PAGE_EXECUTABLE */ +#define __S101 PAGE_ENPTY /* PAGE_EXECUTABLE */ +#define __S110 PAGE_ENPTY /* PAGE_SHARED_EX */ +#define __S111 PAGE_ENPTY /* PAGE_SHARED_EX */ + +/* real protection map */ + /* xwr */ +#define PROT_MAP_P000 PAGE_NONE_INVALID +#define PROT_MAP_P001 PAGE_READONLY +#define PROT_MAP_P010 PAGE_COPY_NEX +#define PROT_MAP_P011 PAGE_COPY_NEX +#define PROT_MAP_P100 PAGE_EXECUTABLE +#define PROT_MAP_P101 PAGE_EXECUTABLE +#define PROT_MAP_P110 PAGE_COPY_EX +#define PROT_MAP_P111 PAGE_COPY_EX + +#define PROT_MAP_S000 PAGE_NONE_INVALID +#define PROT_MAP_S001 PAGE_READONLY +#define PROT_MAP_S010 PAGE_SHARED +#define PROT_MAP_S011 PAGE_SHARED +#define PROT_MAP_S100 PAGE_EXECUTABLE +#define PROT_MAP_S101 PAGE_EXECUTABLE +#define PROT_MAP_S110 PAGE_SHARED_EX +#define PROT_MAP_S111 PAGE_SHARED_EX + +static inline void +create_protection_map(pgprot_t prot_map[16]) +{ + prot_map[0] = PROT_MAP_P000; + prot_map[1] = PROT_MAP_P001; + prot_map[2] = PROT_MAP_P010; + prot_map[3] = PROT_MAP_P011; + prot_map[4] = PROT_MAP_P100; + prot_map[5] = PROT_MAP_P101; + prot_map[6] = PROT_MAP_P110; + prot_map[7] = PROT_MAP_P111; + + prot_map[8 + 0] = PROT_MAP_S000; + prot_map[8 + 1] = PROT_MAP_S001; + prot_map[8 + 2] = PROT_MAP_S010; + prot_map[8 + 3] = PROT_MAP_S011; + prot_map[8 + 4] = PROT_MAP_S100; + prot_map[8 + 5] = PROT_MAP_S101; + prot_map[8 + 6] = PROT_MAP_S110; + prot_map[8 + 7] = PROT_MAP_S111; +} + +#define pgd_ERROR(e) \ + do { \ + pr_warn("%s:%d: bad pgd 0x%016lx.\n", \ + __FILE__, __LINE__, pgd_val(e)); \ + dump_stack(); \ + } while (0) +#define pud_ERROR(e) \ + do { \ + pr_warn("%s:%d: bad pud 0x%016lx.\n", \ + __FILE__, __LINE__, pud_val(e)); \ + dump_stack(); \ + } while (0) +#define pmd_ERROR(e) \ + do { \ + pr_warn("%s:%d: bad pmd 0x%016lx.\n", \ + __FILE__, __LINE__, pmd_val(e)); \ + dump_stack(); \ + } while (0) +#define pte_ERROR(e) \ + do { \ + pr_warn("%s:%d: bad pte 0x%016lx.\n", \ + __FILE__, __LINE__, pte_val(e)); \ + dump_stack(); \ + } while (0) + +/* + * This takes a physical page address and protection bits to make + * pte/pmd/pud/pgd + */ +#define mk_pte_phys(phys_addr, pgprot) \ + (__pte(_PAGE_PADDR_TO_PFN(phys_addr) | pgprot_val(pgprot))) +#define mk_pmd_phys(phys_addr, pgprot) \ + (__pmd(_PAGE_PADDR_TO_PFN(phys_addr) | pgprot_val(pgprot))) +#define mk_pud_phys(phys_addr, pgprot) \ + (__pud(_PAGE_PADDR_TO_PFN(phys_addr) | pgprot_val(pgprot))) +#define mk_pgd_phys(phys_addr, pgprot) \ + (__pgd(_PAGE_PADDR_TO_PFN(phys_addr) | pgprot_val(pgprot))) + +#define mk_pmd_addr(virt_addr, pgprot) \ + (__pmd(_PAGE_PADDR_TO_PFN(__pa(virt_addr)) | pgprot_val(pgprot))) +#define mk_pud_addr(virt_addr, pgprot) \ + (__pud(_PAGE_PADDR_TO_PFN(__pa(virt_addr)) | pgprot_val(pgprot))) +#define mk_pgd_addr(virt_addr, pgprot) \ + (__pgd(_PAGE_PADDR_TO_PFN(__pa(virt_addr)) | pgprot_val(pgprot))) + +/* + * Conversion functions: convert page frame number (pfn) and + * a protection value to a page table entry (pte). + */ +#define pfn_pte(pfn, pgprot) mk_pte_phys((pfn) << PAGE_SHIFT, pgprot) +#define pfn_pmd(pfn, pgprot) mk_pmd_phys((pfn) << PAGE_SHIFT, pgprot) + +/* + * Currently all these mappings correlate to what arm64 uses + * and there must be a good reason to use anything else. + * + * Any changes here should take into account set_general_mt() + * and set_external_mt(). + */ +#define pgprot_device(prot) \ + (__pgprot(_PAGE_SET_MEM_TYPE(pgprot_val(prot), EXT_NON_PREFETCH_MT))) +#define pgprot_noncached(prot) \ + (__pgprot(_PAGE_SET_MEM_TYPE(pgprot_val(prot), GEN_NON_CACHE_ORDERED_MT))) +/* pgprot_writecombine() can be used both for RAM and devices, and while + * "general" memory type can be used for devices using "external" type + * for RAM is prohibited as it disables cache snooping. So by default + * use "general" memory type for it. */ +#define pgprot_writecombine(prot) \ + __pgprot(_PAGE_SET_MEM_TYPE(pgprot_val(prot), GEN_NON_CACHE_MT)) + +#define pgprot_writethrough pgprot_writecombine + +/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ +#define PTE_PFN_MASK _PAGE_PFN_MASK + +/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ +#define PTE_FLAGS_MASK (~PTE_PFN_MASK) + +/* PTE_PROT_MASK extracts the protection flags from a (pte|pmd|pud|pgd)val_t */ +#define PTE_PROTECTS_MASK _PAGE_INIT(UNI_PAGE_WRITE | UNI_PAGE_NON_EX) + +/* PT_USER_FLAGS_MASK extracts the flags from a user (pgd|pud|pmd)val_t */ +#define PMD_USER_FLAGS_MASK _PAGE_SET(_PAGE_USER_PTE, UNI_PAGE_HUGE) +#define PUD_USER_FLAGS_MASK (_PAGE_USER_PMD) +#define PGD_USER_FLAGS_MASK (_PAGE_USER_PUD) + +static inline pteval_t pte_flags(pte_t pte) +{ + return pte_val(pte) & PTE_FLAGS_MASK; +} +static inline pteval_t pte_only_flags(pte_t pte) +{ + return pte_flags(pte) & ~PTE_PROTECTS_MASK; +} +static inline pteval_t pte_only_protects(pte_t pte) +{ + return pte_val(pte) & PTE_PROTECTS_MASK; +} + +static inline pmdval_t pmd_user_flags(pmd_t pmd) +{ + return pmd_val(pmd) & PMD_USER_FLAGS_MASK; +} + +static inline pudval_t pud_user_flags(pud_t pud) +{ + return pud_val(pud) & PUD_USER_FLAGS_MASK; +} + +static inline pgdval_t pgd_user_flags(pgd_t pgd) +{ + return pgd_val(pgd) & PGD_USER_FLAGS_MASK; +} + +/* + * Extract pfn from pte. + */ +#define pte_pfn(pte) (_PAGE_PFN_TO_PADDR(pte_val(pte)) >> PAGE_SHIFT) +#define pmd_pfn(pmd) (_PAGE_PFN_TO_PADDR(pmd_val(pmd)) >> PAGE_SHIFT) +#define pud_pfn(pud) (_PAGE_PFN_TO_PADDR(pud_val(pud)) >> PAGE_SHIFT) +#define pgd_pfn(pgd) (_PAGE_PFN_TO_PADDR(pgd_val(pgd)) >> PAGE_SHIFT) + +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) +#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) + +#define mk_pfn_pte(pfn, pte) \ + pfn_pte(pfn, __pgprot(pte_val(pte) & ~_PAGE_PFN_MASK)) +#define mk_clone_pte(page, pte) \ + pfn_pte(page_to_pfn(page), \ + __pgprot(pte_val(pte) & ~_PAGE_PFN_MASK)) +#define mk_not_present_pte(pgprot) \ + __pte(_PAGE_CLEAR_PRESENT(pgprot_val(pgprot))) +#define mk_guest_pfn_prot(pgprot) \ + __pgprot(_PAGE_SET(pgprot_val(pgprot), UNI_PAGE_GFN)) +#define mk_pte_pgprot(pte, pgprot) \ + __pte(pte_val(pte) | pgprot_val(pgprot)) +#define mk_pmd_pgprot(pmd, pgprot) \ + __pmd(pmd_val(pmd) | pgprot_val(pgprot)) +#define page_pte_prot(page, prot) mk_pte(page, prot) +#define page_pte(page) page_pte_prot(page, __pgprot(0)) + +#define pgprot_modify_mask(old_prot, newprot_val, prot_mask) \ + (__pgprot(((pgprot_val(old_prot) & ~(prot_mask)) | \ + ((newprot_val) & (prot_mask))))) + +#define pgprot_large_size_set(prot) \ + __pgprot(_PAGE_SET_HUGE(pgprot_val(prot))) +#define pgprot_small_size_set(prot) \ + __pgprot(_PAGE_CLEAR_HUGE(pgprot_val(prot))) +#define pgprot_present_flag_set(prot) \ + pgprot_modify_mask(prot, _PAGE_INIT_PRESENT, _PAGE_INIT_PRESENT) +#define pgprot_present_flag_reset(prot) \ + pgprot_modify_mask(prot, 0UL, \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_PFN)) +#define _pgprot_reduce(src_prot_val, reduced_prot_val) \ + (((src_prot_val) & ~(_PROT_REDUCE_MASK)) | \ + (((src_prot_val) & (_PROT_REDUCE_MASK)) | \ + ((reduced_prot_val) & (_PROT_REDUCE_MASK)))) +#define _pgprot_restrict(src_prot_val, restricted_prot_val) \ + (((src_prot_val) & ~(_PROT_RESTRICT_MASK)) | \ + (((src_prot_val) & (_PROT_RESTRICT_MASK)) & \ + ((restricted_prot_val) & (_PROT_RESTRICT_MASK)))) +#define pgprot_reduce(src_prot, reduced_prot) \ + (__pgprot(_pgprot_reduce(pgprot_val(src_prot), \ + pgprot_val(reduced_prot)))) +#define pgprot_restrict(src_prot, restricted_prot) \ + (__pgprot(_pgprot_restrict(pgprot_val(src_prot), \ + pgprot_val(restricted_prot)))) +#define pte_reduce_prot(src_pte, reduced_prot) \ + (__pte(_pgprot_reduce(pte_val(src_pte), \ + pgprot_val(reduced_prot)))) +#define pte_restrict_prot(src_pte, restricted_prot) \ + (__pte(_pgprot_restrict(pte_val(src_pte), \ + pgprot_val(restricted_prot)))) +#define pgprot_priv(pgprot) _PAGE_TEST_PRIV(pgprot_val(pgprot)) +#define pgprot_present(pgprot) _PAGE_TEST_PRESENT(pgprot_val(pgprot)) +#define pgprot_valid(pgprot) _PAGE_TEST_VALID(pgprot_val(pgprot)) +#define pgprot_write(pgprot) _PAGE_TEST_WRITEABLE(pgprot_val(pgprot)) +#define pgprot_huge(pgprot) _PAGE_TEST_HUGE(pgprot_val(pgprot)) +#define pgprot_special(pgprot) _PAGE_TEST_SPECIAL(pgprot_val(pgprot)) + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pteval_t val = pte_val(pte); + + val &= _PAGE_CHG_MASK; + val |= pgprot_val(newprot) & ~_PAGE_CHG_MASK; + + return __pte(val); +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pmdval_t val = pmd_val(pmd); + + val &= _HPAGE_CHG_MASK; + val |= pgprot_val(newprot) & ~_HPAGE_CHG_MASK; + + return __pmd(val); +} + +#ifndef CONFIG_MAKE_ALL_PAGES_VALID +# define pte_none(pte) (!pte_val(pte)) +#else +# define pte_none(pte) (_PAGE_CLEAR_VALID(pte_val(pte)) == 0) +#endif + +#define pte_valid(pte) _PAGE_TEST_VALID(pte_val(pte)) +#define pte_present(pte) \ + _PAGE_TEST(pte_val(pte), UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE) +#define pte_secondary(pte) (pte_val(pte) & _PAGE_SEC_MAP) +#define pte_priv(pte) _PAGE_TEST_PRIV(pte_val(pte)) +#define pte_clear_guest(pte) \ + (__pte(_PAGE_CLEAR(pte_val(pte), \ + UNI_PAGE_PRIV | UNI_PAGE_GLOBAL))) +#define pte_set_priv(pte) __pte(_PAGE_SET_PRIV(pte_val(pte))) +#define pte_large_page(pte) _PAGE_TEST_HUGE(pte_val(pte)) +#define pte_set_small_size(pte) __pte(_PAGE_CLEAR_HUGE(pte_val(pte))) +#define pte_set_large_size(pte) __pte(_PAGE_SET_HUGE(pte_val(pte))) + +#define pte_accessible(mm, pte) \ + (mm_tlb_flush_pending(mm) ? pte_present(pte) : \ + _PAGE_TEST_PRESENT(pte_val(pte))) + +#ifdef CONFIG_NUMA_BALANCING +/* + * These return true for PAGE_NONE too but the kernel does not care. + * See the comment in include/asm-generic/pgtable.h + */ +static inline int pte_protnone(pte_t pte) +{ + return _PAGE_GET(pte_val(pte), UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE) == + _PAGE_INIT_PROTNONE; +} +static inline int pmd_protnone(pmd_t pmd) +{ + return _PAGE_GET(pmd_val(pmd), UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE) == + _PAGE_INIT_PROTNONE; +} +# define pte_present_and_exec(pte) (pte_present(pte) && pte_exec(pte)) +#else /* ! CONFIG_NUMA_BALANCING */ +# define pte_present_and_exec(pte) \ + (_PAGE_GET(pte_val(pte), \ + UNI_PAGE_PRESENT | UNI_PAGE_NON_EX) == \ + _PAGE_INIT_PRESENT) +#endif /* CONFIG_NUMA_BALANCING */ + +/* Since x86 uses Write Combine both for external devices + * (meaning optimization if CPU accesses) and for RAM + * (meaning avoid cache allocation) we do the same here + * as that is what drivers expect. */ +#define is_mt_wb(mt) \ +({ \ + u64 __im_mt = (mt); \ + (__im_mt == GEN_CACHE_MT || __im_mt == EXT_CACHE_MT); \ +}) +#define is_mt_wc(mt) \ +({ \ + u64 __im_mt = (mt); \ + (__im_mt == GEN_NON_CACHE_MT || __im_mt == EXT_PREFETCH_MT); \ +}) +#define is_mt_uc(mt) \ +({ \ + u64 __im_mt = (mt); \ + (__im_mt == GEN_NON_CACHE_ORDERED_MT || \ + __im_mt == EXT_NON_PREFETCH_MT || __im_mt == EXT_CONFIG_MT); \ +}) +#define is_mt_general(mt) \ +({ \ + u64 __im_mt = (mt); \ + (__im_mt == GEN_NON_CACHE_MT || __im_mt == GEN_CACHE_MT); \ +}) +#define is_mt_external(mt) (!is_mt_general(mt)) + +static inline pgprot_t set_general_mt(pgprot_t prot) +{ + pte_mem_type_t mt = get_pte_val_memory_type(pgprot_val(prot)); + + switch (mt) { + case EXT_CACHE_MT: + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + GEN_CACHE_MT)); + break; + case EXT_NON_PREFETCH_MT: + /* pgprot_device() case */ + case EXT_PREFETCH_MT: + case EXT_CONFIG_MT: + mt = GEN_NON_CACHE_MT; + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + GEN_NON_CACHE_MT)); + break; + case GEN_NON_CACHE_ORDERED_MT: + /* pgprot_noncached() case */ + case GEN_NON_CACHE_MT: + /* pgprot_writecombine() and pgprot_writethrough() case */ + case GEN_CACHE_MT: + break; + default: + WARN_ON_ONCE(1); + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + GEN_NON_CACHE_MT)); + break; + } + + return prot; +} + +static inline pgprot_t set_external_mt(pgprot_t prot) +{ + pte_mem_type_t mt = get_pte_val_memory_type(pgprot_val(prot)); + + switch (mt) { + case GEN_CACHE_MT: + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + EXT_CACHE_MT)); + break; + case GEN_NON_CACHE_MT: + /* pgprot_writecombine() and pgprot_writethrough() case */ + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + EXT_PREFETCH_MT)); + break; + case GEN_NON_CACHE_ORDERED_MT: + /* pgprot_noncached() case */ + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + EXT_NON_PREFETCH_MT)); + break; + case EXT_NON_PREFETCH_MT: + /* pgprot_device() case */ + case EXT_PREFETCH_MT: + case EXT_CONFIG_MT: + case EXT_CACHE_MT: + break; + default: + WARN_ON_ONCE(1); + prot = __pgprot(set_pte_val_memory_type(pgprot_val(prot), + EXT_CONFIG_MT)); + break; + } + + return prot; +} + +/* + * See comment in pmd_present() - since _PAGE_HUGE bit stays on at all times + * (both during split_huge_page and when the _PAGE_PROTNONE bit gets set) + * we can check only the _PAGE_HUGE bit. + */ +#define pmd_present_and_exec_and_huge(pmd) \ + (_PAGE_GET(pmd_val(pmd), UNI_PAGE_NON_EX | \ + UNI_PAGE_HUGE) == \ + _PAGE_INIT_HUGE) + +#define pud_present_and_exec_and_huge(pud) \ + (_PAGE_GET(pud_val(pud), UNI_PAGE_PRESENT | \ + UNI_PAGE_NON_EX | UNI_PAGE_HUGE) == \ + _PAGE_INIT(UNI_PAGE_PRESENT | UNI_PAGE_HUGE)) + +/* + * See comment in pmd_present() - since _PAGE_HUGE bit stays on at all times + * (both during split_huge_page and when the _PAGE_PROTNONE bit gets set) we + * should not return "pmd_none() == true" when the _PAGE_HUGE bit is set. + */ +#ifndef CONFIG_MAKE_ALL_PAGES_VALID +# define pmd_none(pmd) (pmd_val(pmd) == 0) +#else +# define pmd_none(pmd) (_PAGE_CLEAR_VALID(pmd_val(pmd)) == 0) +#endif + +#define pmd_valid(pmd) _PAGE_TEST_VALID(pmd_val(pmd)) + +/* This will return true for huge pages as expected by arch-independent part */ +static inline int pmd_bad(pmd_t pmd) +{ + return unlikely(_PAGE_CLEAR(pmd_val(pmd) & PTE_FLAGS_MASK, + UNI_PAGE_GLOBAL) != _PAGE_USER_PTE); +} + +#define user_pmd_huge(pmd) _PAGE_TEST_HUGE(pmd_val(pmd)) +#define kernel_pmd_huge(pmd) \ + (is_huge_pmd_level() && _PAGE_TEST_HUGE(pmd_val(pmd))) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define has_transparent_hugepage has_transparent_hugepage +static inline int has_transparent_hugepage(void) +{ + return cpu_has(CPU_FEAT_ISET_V3); +} + +#define pmd_trans_huge(pmd) user_pmd_huge(pmd) +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +/* + * Checking for _PAGE_HUGE is needed too because + * split_huge_page will temporarily clear the present bit (but + * the _PAGE_HUGE flag will remain set at all times while the + * _PAGE_PRESENT bit is clear). + */ +#define pmd_present(pmd) \ + _PAGE_TEST(pmd_val(pmd), UNI_PAGE_PRESENT | \ + UNI_PAGE_PROTNONE | UNI_PAGE_HUGE) +#define pmd_write(pmd) _PAGE_TEST_WRITEABLE(pmd_val(pmd)) +#define pmd_exec(pmd) _PAGE_TEST_EXECUTEABLE(pmd_val(pmd)) +#define pmd_dirty(pmd) _PAGE_TEST_DIRTY(pmd_val(pmd)) +#define pmd_young(pmd) _PAGE_TEST_ACCESSED(pmd_val(pmd)) +#define pmd_wb(pmd) is_mt_wb(_PAGE_GET_MEM_TYPE(pmd_val(pmd))) +#define pmd_wc(pmd) is_mt_wc(_PAGE_GET_MEM_TYPE(pmd_val(pmd))) +#define pmd_uc(pmd) is_mt_uc(_PAGE_GET_MEM_TYPE(pmd_val(pmd))) + +#define pmd_wrprotect(pmd) (__pmd(_PAGE_CLEAR_WRITEABLE(pmd_val(pmd)))) +#define pmd_mkwrite(pmd) (__pmd(_PAGE_SET_WRITEABLE(pmd_val(pmd)))) +#define pmd_mkexec(pmd) (__pmd(_PAGE_SET_EXECUTEABLE(pmd_val(pmd)))) +#define pmd_mknotexec(pmd) (__pmd(_PAGE_CLEAR_EXECUTEABLE(pmd_val(pmd)))) +#define pmd_mkpresent(pmd) (__pmd(_PAGE_SET_PRESENT(pmd_val(pmd)))) +#define pmd_mk_present_valid(pmd) (__pmd(_PAGE_SET(pmd_val(pmd), \ + UNI_PAGE_PRESENT | UNI_PAGE_VALID))) +#define pmd_mknotpresent(pmd) \ + (__pmd(_PAGE_CLEAR(pmd_val(pmd), \ + UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE))) +#define pmd_mknot_present_valid(pmd) (__pmd(_PAGE_CLEAR(pmd_val(pmd), \ + UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE | UNI_PAGE_VALID))) +#define pmd_mkold(pmd) (__pmd(_PAGE_CLEAR_ACCESSED(pmd_val(pmd)))) +#define pmd_mkyoung(pmd) (__pmd(_PAGE_SET_ACCESSED(pmd_val(pmd)))) +#define pmd_mkclean(pmd) (__pmd(_PAGE_CLEAR_DIRTY(pmd_val(pmd)))) +#define pmd_mkdirty(pmd) (__pmd(_PAGE_SET_DIRTY(pmd_val(pmd)))) +#define pmd_mkhuge(pmd) (__pmd(_PAGE_SET_HUGE(pmd_val(pmd)))) +#define pmd_clear_guest(pmd) \ + (__pmd(_PAGE_CLEAR(pmd_val(pmd), \ + UNI_PAGE_PRIV | UNI_PAGE_GLOBAL))) +static inline pmd_t pmd_mk_wb(pmd_t pmd) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pmd_val(pmd)))) + return __pmd(_PAGE_SET_MEM_TYPE(pmd_val(pmd), EXT_CACHE_MT)); + else + return __pmd(_PAGE_SET_MEM_TYPE(pmd_val(pmd), GEN_CACHE_MT)); +} +static inline pmd_t pmd_mk_wc(pmd_t pmd) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pmd_val(pmd)))) + return __pmd(_PAGE_SET_MEM_TYPE(pmd_val(pmd), EXT_PREFETCH_MT)); + else + return __pmd(_PAGE_SET_MEM_TYPE(pmd_val(pmd), GEN_NON_CACHE_MT)); +} +static inline pmd_t pmd_mk_uc(pmd_t pmd) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pmd_val(pmd)))) + return __pmd(_PAGE_SET_MEM_TYPE(pmd_val(pmd), EXT_NON_PREFETCH_MT)); + else + return __pmd(_PAGE_SET_MEM_TYPE(pmd_val(pmd), GEN_NON_CACHE_ORDERED_MT)); +} + +#ifndef CONFIG_MAKE_ALL_PAGES_VALID +# define pud_none(pud) (pud_val(pud) == 0) +#else +# define pud_none(pud) (_PAGE_CLEAR_VALID(pud_val(pud)) == 0) +#endif + +#define pud_valid(pud) _PAGE_TEST_VALID(pud_val(pud)) + +/* This will return true for huge pages as expected by arch-independent part */ +static inline int pud_bad(pud_t pud) +{ + return unlikely(_PAGE_CLEAR(pud_val(pud) & PTE_FLAGS_MASK, + UNI_PAGE_GLOBAL) != _PAGE_USER_PMD); +} + +#define pud_present(pud) _PAGE_TEST_PRESENT(pud_val(pud)) +#define pud_write(pud) _PAGE_TEST_WRITEABLE(pud_val(pud)) +#define pud_exec(pud) _PAGE_TEST_EXECUTEABLE(pud_val(pud)) +#define user_pud_huge(pud) _PAGE_TEST_HUGE(pud_val(pud)) +#define kernel_pud_huge(pud) \ + (is_huge_pud_level() && _PAGE_TEST_HUGE(pud_val(pud))) +#define pud_wb(pud) is_mt_wb(_PAGE_GET_MEM_TYPE(pud_val(pud))) +#define pud_wc(pud) is_mt_wc(_PAGE_GET_MEM_TYPE(pud_val(pud))) +#define pud_uc(pud) is_mt_uc(_PAGE_GET_MEM_TYPE(pud_val(pud))) + +#define pud_clear_guest(pud) \ + (__pud(_PAGE_CLEAR(pud_val(pud), \ + UNI_PAGE_PRIV | UNI_PAGE_GLOBAL))) +#define pud_wrprotect(pud) (__pud(_PAGE_CLEAR_WRITEABLE(pud_val(pud)))) +#define pud_mkwrite(pud) (__pud(_PAGE_SET_WRITEABLE(pud_val(pud)))) +#define pud_mkexec(pud) (__pud(_PAGE_SET_EXECUTEABLE(pud_val(pud)))) +#define pud_mknotexec(pud) (__pud(_PAGE_CLEAR_EXECUTEABLE(pud_val(pud)))) +#define pud_mkpresent(pud) (__pud(_PAGE_SET_PRESENT(pud_val(pud)))) +#define pud_mk_present_valid(pud) (__pud(_PAGE_SET(pud_val(pud), \ + UNI_PAGE_PRESENT | UNI_PAGE_VALID))) +#define pud_mknotpresent(pud) (__pud(_PAGE_CLEAR_PRESENT(pud_val(pud)))) +#define pud_mknot_present_valid(pud) (__pud(_PAGE_CLEAR(pud_val(pud), \ + UNI_PAGE_PRESENT | UNI_PAGE_VALID))) +static inline pud_t pud_mk_wb(pud_t pud) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pud_val(pud)))) + return __pud(_PAGE_SET_MEM_TYPE(pud_val(pud), EXT_CACHE_MT)); + else + return __pud(_PAGE_SET_MEM_TYPE(pud_val(pud), GEN_CACHE_MT)); +} +static inline pud_t pud_mk_wc(pud_t pud) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pud_val(pud)))) + return __pud(_PAGE_SET_MEM_TYPE(pud_val(pud), EXT_PREFETCH_MT)); + else + return __pud(_PAGE_SET_MEM_TYPE(pud_val(pud), GEN_NON_CACHE_MT)); +} +static inline pud_t pud_mk_uc(pud_t pud) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pud_val(pud)))) + return __pud(_PAGE_SET_MEM_TYPE(pud_val(pud), EXT_NON_PREFETCH_MT)); + else + return __pud(_PAGE_SET_MEM_TYPE(pud_val(pud), GEN_NON_CACHE_ORDERED_MT)); +} + +#ifndef CONFIG_MAKE_ALL_PAGES_VALID +#define pgd_none(pgd) (!pgd_val(pgd)) +#else +#define pgd_none(pgd) (_PAGE_CLEAR_VALID(pgd_val(pgd)) == 0) +#endif +#define pgd_none_full(pgd) (!pgd_val(pgd)) +#define pgd_valid(pgd) _PAGE_TEST_VALID(pgd_val(pgd)) + +static inline int pgd_bad(pgd_t pgd) +{ + return unlikely(_PAGE_CLEAR(pgd_val(pgd) & PTE_FLAGS_MASK, + UNI_PAGE_GLOBAL) != _PAGE_USER_PUD); +} +#define pgd_present(pgd) _PAGE_TEST_PRESENT(pgd_val(pgd)) +#define user_pgd_huge(pgd) _PAGE_TEST_HUGE(pgd_val(pgd)) +#define kernel_pgd_huge(pgd) \ + (is_huge_pgd_level() && _PAGE_TEST_HUGE(pgd_val(pgd))) +#define pgd_clear_guest(pgd) \ + (__pgd(_PAGE_CLEAR(pgd_val(pgd), \ + UNI_PAGE_PRIV | UNI_PAGE_GLOBAL))) + +/* + * The following have defined behavior only work if pte_present() is true. + */ +#define pte_read(pte) (true) +#define pte_write(pte) _PAGE_TEST_WRITEABLE(pte_val(pte)) +#define pte_exec(pte) _PAGE_TEST_EXECUTEABLE(pte_val(pte)) +#define pte_dirty(pte) _PAGE_TEST_DIRTY(pte_val(pte)) +#define pte_young(pte) _PAGE_TEST_ACCESSED(pte_val(pte)) +#define pte_huge(pte) _PAGE_TEST_HUGE(pte_val(pte)) +#define pte_special(pte) _PAGE_TEST_SPECIAL(pte_val(pte)) +#define pte_wb(pte) is_mt_wb(_PAGE_GET_MEM_TYPE(pte_val(pte))) +#define pte_wc(pte) is_mt_wc(_PAGE_GET_MEM_TYPE(pte_val(pte))) +#define pte_uc(pte) is_mt_uc(_PAGE_GET_MEM_TYPE(pte_val(pte))) + +#define pte_wrprotect(pte) (__pte(_PAGE_CLEAR_WRITEABLE(pte_val(pte)))) +#define pte_mkwrite(pte) (__pte(_PAGE_SET_WRITEABLE(pte_val(pte)))) +#define pte_mkexec(pte) (__pte(_PAGE_SET_EXECUTEABLE(pte_val(pte)))) +#define pte_mknotexec(pte) (__pte(_PAGE_CLEAR_EXECUTEABLE(pte_val(pte)))) +#define pte_mkpresent(pte) (__pte(_PAGE_SET_PRESENT(pte_val(pte)))) +#define pte_mk_present_valid(pte) (__pte(_PAGE_SET(pte_val(pte), \ + UNI_PAGE_PRESENT | UNI_PAGE_VALID))) +#define pte_mknotpresent(pte) \ + (__pte(_PAGE_CLEAR(pte_val(pte), \ + UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE))) +#define pte_mknot_present_valid(pte) (__pte(_PAGE_CLEAR(pte_val(pte), \ + UNI_PAGE_PRESENT | UNI_PAGE_PROTNONE | UNI_PAGE_VALID))) +#define pte_mkold(pte) (__pte(_PAGE_CLEAR_ACCESSED(pte_val(pte)))) +#define pte_mkyoung(pte) (__pte(_PAGE_SET_ACCESSED(pte_val(pte)))) +#define pte_mkclean(pte) (__pte(_PAGE_CLEAR_DIRTY(pte_val(pte)))) +#define pte_mkdirty(pte) (__pte(_PAGE_SET_DIRTY(pte_val(pte)))) +#define pte_mkvalid(pte) (__pte(_PAGE_SET_VALID(pte_val(pte)))) +#define pte_mkhuge(pte) \ + (__pte(_PAGE_SET(pte_val(pte), \ + UNI_PAGE_PRESENT | UNI_PAGE_HUGE))) +#define pte_mkspecial(pte) (__pte(_PAGE_SET_SPECIAL(pte_val(pte)))) +#define pte_mknotvalid(pte) (__pte(_PAGE_CLEAR_VALID(pte_val(pte)))) +static inline pte_t pte_mk_wb(pte_t pte) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pte_val(pte)))) + return __pte(_PAGE_SET_MEM_TYPE(pte_val(pte), EXT_CACHE_MT)); + else + return __pte(_PAGE_SET_MEM_TYPE(pte_val(pte), GEN_CACHE_MT)); +} +static inline pte_t pte_mk_wc(pte_t pte) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pte_val(pte)))) + return __pte(_PAGE_SET_MEM_TYPE(pte_val(pte), EXT_PREFETCH_MT)); + else + return __pte(_PAGE_SET_MEM_TYPE(pte_val(pte), GEN_NON_CACHE_MT)); +} +static inline pte_t pte_mk_uc(pte_t pte) +{ + if (is_mt_external(_PAGE_GET_MEM_TYPE(pte_val(pte)))) + return __pte(_PAGE_SET_MEM_TYPE(pte_val(pte), EXT_NON_PREFETCH_MT)); + else + return __pte(_PAGE_SET_MEM_TYPE(pte_val(pte), GEN_NON_CACHE_ORDERED_MT)); +} + +/* + * The index and offset in the root-level page table directory. + */ +#define pgd_index(virt_addr) (((virt_addr) >> PGDIR_SHIFT) & \ + (PTRS_PER_PGD - 1)) +#define pgd_offset(mm, virt_addr) ((mm)->pgd + pgd_index(virt_addr)) +#define pgd_to_index(pgdp) ((((unsigned long)(pgdp)) / \ + (sizeof(pgd_t))) & \ + (PTRS_PER_PGD - 1)) +#define pgd_to_page(pgdp) ((pgdp) - pgd_to_index(pgdp)) +#define boot_pgd_index(virt_addr) pgd_index(virt_addr) + +#define VIRT_ADDR_VPTB_BASE(va) \ + ((MMU_IS_SEPARATE_PT()) ? \ + (((va) >= MMU_SEPARATE_KERNEL_VAB) ? \ + KERNEL_VPTB_BASE_ADDR : USER_VPTB_BASE_ADDR) \ + : \ + MMU_UNITED_KERNEL_VPTB) +/* + * The index and offset in the upper page table directory. + */ +#define pud_index(virt_addr) ((virt_addr >> PUD_SHIFT) & \ + (PTRS_PER_PUD - 1)) +#define pud_virt_offset(virt_addr) (VIRT_ADDR_VPTB_BASE(virt_addr) | \ + ((pmd_virt_offset(virt_addr) & \ + PTE_MASK) >> \ + (E2K_VA_SIZE - PGDIR_SHIFT))) +#define boot_pud_index(virt_addr) pud_index(virt_addr) +#define boot_pud_offset(pgdp, addr) ((pud_t *)boot_pgd_page(*(pgdp)) + \ + boot_pud_index(addr)) + +/* + * The index and offset in the middle page table directory + */ +#define pmd_index(virt_addr) ((virt_addr >> PMD_SHIFT) & \ + (PTRS_PER_PMD - 1)) +#define pmd_virt_offset(virt_addr) (VIRT_ADDR_VPTB_BASE(virt_addr) | \ + ((pte_virt_offset(virt_addr) & \ + PTE_MASK) >> \ + (E2K_VA_SIZE - PGDIR_SHIFT))) +#define boot_pmd_index(virt_addr) pmd_index(virt_addr) +#define boot_pmd_offset(pudp, addr) ((pmd_t *)boot_pud_page(*(pudp)) + \ + boot_pmd_index(addr)) + +/* + * The index and offset in the third-level page table. + */ +#define pte_index(virt_addr) ((virt_addr >> PAGE_SHIFT) & \ + (PTRS_PER_PTE - 1)) +#define pte_virt_offset(virt_addr) (VIRT_ADDR_VPTB_BASE(virt_addr) | \ + (((virt_addr) & PTE_MASK) >> \ + (E2K_VA_SIZE - PGDIR_SHIFT))) + +#define boot_pte_index(virt_addr) pte_index(virt_addr) +#define boot_pte_offset(pmdp, addr) ((pte_t *)boot_pmd_page(*(pmdp)) + \ + boot_pte_index(addr)) + +#endif /* !(__ASSEMBLY__) */ + +#endif /* !(_ASM_E2K_PGTABLE_DEF_H) */ diff --git a/arch/e2k/include/asm/pgtable_types.h b/arch/e2k/include/asm/pgtable_types.h new file mode 100644 index 0000000..557fd39 --- /dev/null +++ b/arch/e2k/include/asm/pgtable_types.h @@ -0,0 +1,194 @@ +#ifndef _E2K_PGTABLE_TYPES_H_ +#define _E2K_PGTABLE_TYPES_H_ + +#ifndef __ASSEMBLY__ + +#include +#include + +#if CONFIG_CPU_ISET >= 6 +# ifdef CONFIG_MMU_PT_V6 +# define MMU_IS_PT_V6() true +# else /* ! CONFIG_MMU_PT_V6 */ +# define MMU_IS_PT_V6() false +# endif /* CONFIG_MMU_PT_V6 */ +# ifdef CONFIG_MMU_SEP_VIRT_SPACE +# ifndef CONFIG_DYNAMIC_SEP_VIRT_SPACE +# define MMU_IS_SEPARATE_PT() true +# else /* CONFIG_DYNAMIC_SEP_VIRT_SPACE */ +# ifdef E2K_P2V +# define MMU_IS_SEPARATE_PT() (boot_machine.mmu_separate_pt) +# else /* ! E2K_P2V */ +# define MMU_IS_SEPARATE_PT() (machine.mmu_separate_pt) +# endif /* E2K_P2V */ +# endif /* ! CONFIG_DYNAMIC_SEP_VIRT_SPACE */ +# else /* ! MMU_SEP_VIRT_SPACE */ +# define MMU_IS_SEPARATE_PT() false +# endif /* MMU_SEP_VIRT_SPACE */ +#elif CONFIG_CPU_ISET >= 2 +# define MMU_IS_PT_V6() false +# define MMU_IS_SEPARATE_PT() false +#elif CONFIG_CPU_ISET == 0 +# ifdef E2K_P2V +# define MMU_IS_PT_V6() (boot_machine.mmu_pt_v6) +# define MMU_IS_SEPARATE_PT() (boot_machine.mmu_separate_pt) +# else /* ! E2K_P2V */ +# define MMU_IS_PT_V6() (machine.mmu_pt_v6) +# define MMU_IS_SEPARATE_PT() (machine.mmu_separate_pt) +# endif /* E2K_P2V */ +#else /* CONFIG_CPU_ISET undefined or negative */ +# warning "Undefined CPU ISET VERSION #, MMU pt_v6 mode is defined dinamicaly" +# warning "Undefined CPU ISET VERSION #, MMU sep_pt mode is defined dinamicaly" +# ifdef E2K_P2V +# define MMU_IS_PT_V6() (boot_machine.mmu_pt_v6) +# define MMU_IS_SEPARATE_PT() (boot_machine.mmu_separate_pt) +# else /* ! E2K_P2V */ +# define MMU_IS_PT_V6() (machine.mmu_pt_v6) +# define MMU_IS_SEPARATE_PT() (machine.mmu_separate_pt) +# endif /* E2K_P2V */ +#endif /* CONFIG_CPU_ISET 0-6 */ + +/* max. number of physical address bits (architected) */ +#define E2K_MAX_PHYS_BITS_V2 40 /* on V1-V5 */ +#define E2K_MAX_PHYS_BITS_V6 48 /* from V6-... */ + +/* + * Memory types, the same as PTE.MT field values, + * see iset 8.2.3. 1) + */ +typedef enum pte_mem_type { + GEN_CACHE_MT = 0, + GEN_NON_CACHE_MT = 1, + EXT_PREFETCH_MT = 4, + EXT_NON_PREFETCH_MT = 6, + EXT_CONFIG_MT = 7, + /* See comment in ioremap_cache() */ + EXT_CACHE_MT = 8, + /* This is the same as GEN_NON_CACHE_MT but with additional bit + * set so that track_pfn_*() functions can understand if this + * is EXT_PREFETCH_MT (i.e. came from pgprot_writecombine()) + * or EXT_NON_PREFETCH_MT (i.e. came from pgprot_noncached()). + * + * This is needed to distinguish between the following cases: + * 1) pgprot_noncached() + vm_insert_page() + * 2) pgprot_writecombine() + vm_insert_page() + * 3) pgprot_noncached() + some other mapping function + * 4) pgprot_writecombine() + some other mapping function + * + * If we are mapping device ("External") then track_pfn_insert() + * and track_pfn_remap() functions will convert the type (cases + * 3 and 4). And by default set hardware "General" type (cases 1 + * and 2) because vm_insert_page() does not call track_pfn_*() + * functions, and "General" type has cache coherency properly + * enabled unlike "External" type. */ + GEN_NON_CACHE_ORDERED_MT = 9, +} pte_mem_type_t; + +typedef enum pte_mem_type_rule { + MOST_STRONG_MTCR = 0, + FROM_HYPERVISOR_MTCR = 2, + FROM_GUEST_MTCR = 3, +} pte_mem_type_rule_t; + +/* arch-independent structure of page table entries */ +typedef enum uni_page_bits { + UNI_PAGE_PRESENT_BIT, /* Present */ + UNI_PAGE_WRITE_BIT, /* Writable */ + UNI_PAGE_PRIV_BIT, /* PriVileged */ + UNI_PAGE_VALID_BIT, /* Valid */ + UNI_PAGE_PROTECT_BIT, /* PRotected */ + UNI_PAGE_HW_ACCESS_BIT, /* page hardware Accessed */ + UNI_PAGE_DIRTY_BIT, /* page Dirty */ + UNI_PAGE_HUGE_BIT, /* huge Page Size */ + UNI_PAGE_GLOBAL_BIT, /* Global page */ + UNI_PAGE_NWA_BIT, /* No Writable Address */ + UNI_PAGE_NON_EX_BIT, /* NON EXecutable */ + UNI_PAGE_PROTNONE_BIT, /* software PROTection NONE */ + UNI_PAGE_AVAIL_BIT, /* software AVAILable */ + UNI_PAGE_SW_ACCESS_BIT, /* page software Accessed */ + UNI_PAGE_SPECIAL_BIT, /* software SPECIAL */ + UNI_PAGE_GFN_BIT, /* software Guest page Frame Number */ + UNI_PAGE_ACCESSED_BIT, /* page hardware/software Accessed */ + UNI_PAGE_PFN_BIT, /* Physical Page Number field */ + UNI_PAGE_MEM_TYPE_BIT, /* Memory Type field */ + UNI_PAGE_MEM_TYPE_RULE_BIT, /* Memory Type Combination Rule field */ + UNI_PAGE_MEM_TYPE_MA_BIT, /* Memory Type to memory access */ + /* DTLB field */ + UNI_PAGE_WRITE_INT_BIT, /* Write protected physical address */ + /* DTLB field */ + UNI_PAGE_INTL_RD_BIT, /* Intel Read protection */ + /* DTLB field */ + UNI_PAGE_INTL_WR_BIT, /* Intel Write protection */ + /* DTLB field */ + UNI_DTLB_EP_RES_BIT, /* DTLB entry probe result field */ + /* for successful probe completion */ + UNI_DTLB_PH_ADDR_AP_RES_BIT, /* physical address for successful */ + /* DTLB address probe result */ + UNI_DTLB_ERROR_MASK_BIT, /* DTLB entry probe faults mask */ + /* for unsuccessful probe completion */ + UNI_DTLB_MISS_LEVEL_BIT, /* miss level DTLB field */ + UNI_DTLB_SUCCESSFUL_BIT, /* seccessful translation flag */ + /* for DTLB probe operation */ + UNI_DTLB_RES_BITS_BIT, /* reserved bits of DTLB probe */ + /* result */ +} uni_page_bits_t; + +typedef const unsigned long uni_pteval_t; +typedef const unsigned long uni_dtlb_t; + +#define UNI_PAGE_PRESENT (uni_pteval_t)(1ULL << UNI_PAGE_PRESENT_BIT) +#define UNI_PAGE_WRITE (uni_pteval_t)(1ULL << UNI_PAGE_WRITE_BIT) +#define UNI_PAGE_PRIV (uni_pteval_t)(1ULL << UNI_PAGE_PRIV_BIT) +#define UNI_PAGE_VALID (uni_pteval_t)(1ULL << UNI_PAGE_VALID_BIT) +#define UNI_PAGE_PROTECT (uni_pteval_t)(1ULL << UNI_PAGE_PROTECT_BIT) +#define UNI_PAGE_HW_ACCESS (uni_pteval_t)(1ULL << UNI_PAGE_HW_ACCESS_BIT) +#define UNI_PAGE_DIRTY (uni_pteval_t)(1ULL << UNI_PAGE_DIRTY_BIT) +#define UNI_PAGE_HUGE (uni_pteval_t)(1ULL << UNI_PAGE_HUGE_BIT) +#define UNI_PAGE_GLOBAL (uni_pteval_t)(1ULL << UNI_PAGE_GLOBAL_BIT) +#define UNI_PAGE_NWA (uni_pteval_t)(1ULL << UNI_PAGE_NWA_BIT) +#define UNI_PAGE_NON_EX (uni_pteval_t)(1ULL << UNI_PAGE_NON_EX_BIT) +#define UNI_PAGE_PROTNONE (uni_pteval_t)(1ULL << UNI_PAGE_PROTNONE_BIT) +#define UNI_PAGE_AVAIL (uni_pteval_t)(1ULL << UNI_PAGE_AVAIL_BIT) +#define UNI_PAGE_SW_ACCESS (uni_pteval_t)(1ULL << UNI_PAGE_SW_ACCESS_BIT) +#define UNI_PAGE_SPECIAL (uni_pteval_t)(1ULL << UNI_PAGE_SPECIAL_BIT) +#define UNI_PAGE_GFN (uni_pteval_t)(1ULL << UNI_PAGE_GFN_BIT) +#define UNI_PAGE_ACCESSED (uni_pteval_t)(1ULL << UNI_PAGE_ACCESSED_BIT) +#define UNI_PAGE_PFN (uni_pteval_t)(1ULL << UNI_PAGE_PFN_BIT) +#define UNI_PAGE_MEM_TYPE (uni_pteval_t)(1ULL << UNI_PAGE_MEM_TYPE_BIT) +#define UNI_PAGE_MEM_TYPE_RULE \ + (uni_pteval_t)(1ULL << UNI_PAGE_MEM_TYPE_RULE_BIT) +#define UNI_PAGE_MEM_TYPE_MA (uni_dtlb_t)(1ULL << UNI_PAGE_MEM_TYPE_MA_BIT) +#define UNI_PAGE_WRITE_INT (uni_dtlb_t)(1ULL << UNI_PAGE_WRITE_INT_BIT) +#define UNI_PAGE_INTL_RD (uni_dtlb_t)(1ULL << UNI_PAGE_INTL_RD_BIT) +#define UNI_PAGE_INTL_WR (uni_dtlb_t)(1ULL << UNI_PAGE_INTL_WR_BIT) +#define UNI_DTLB_EP_RES (uni_dtlb_t)(1ULL << UNI_DTLB_EP_RES_BIT) +#define UNI_DTLB_PH_ADDR_AP_RES \ + (uni_dtlb_t)(1ULL << UNI_DTLB_PH_ADDR_AP_RES_BIT) +#define UNI_DTLB_ERROR_MASK (uni_dtlb_t)(1ULL << UNI_DTLB_ERROR_MASK_BIT) +#define UNI_DTLB_MISS_LEVEL (uni_dtlb_t)(1ULL << UNI_DTLB_MISS_LEVEL_BIT) +#define UNI_DTLB_SUCCESSFUL (uni_dtlb_t)(1ULL << UNI_DTLB_SUCCESSFUL_BIT) +#define UNI_DTLB_RES_BITS (uni_dtlb_t)(1ULL << UNI_DTLB_RES_BITS_BIT) + +/* + * Encode and de-code a swap entry + * + * Format of swap pte: + * bits 0, _PAGE_PROTNONE : present bits (must be zero) + * bits 13-19: swap-type + * bits 20-63: swap offset (MMU PTE version dependent, see pgtable-v*.h) + */ +#define __SWP_TYPE_BITS 7 +#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > \ + __SWP_TYPE_BITS) +#define __SWP_TYPE_SHIFT (PAGE_SHIFT + 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_SHIFT + __SWP_TYPE_BITS) +#define __FILE_PGOFF_SHIFT (PAGE_SHIFT + 1) + +#define __swp_type(entry) (((entry).val >> __SWP_TYPE_SHIFT) & \ + ((1U << __SWP_TYPE_BITS) - 1)) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __pmd_to_swp_entry(pte) ((swp_entry_t) { pmd_val(pmd) }) + +#endif /* ! __ASSEMBLY__ */ + +#endif /* _E2K_PGTABLE_TYPES_H_ */ diff --git a/arch/e2k/include/asm/pic.h b/arch/e2k/include/asm/pic.h new file mode 100644 index 0000000..2c2683d --- /dev/null +++ b/arch/e2k/include/asm/pic.h @@ -0,0 +1,150 @@ +#ifndef __ASM_E2K_PIC_H +#define __ASM_E2K_PIC_H + +/* + * Choose between APIC and EPIC basic functions + */ + +#include +#include +#include + +/* P2V */ +static inline unsigned int boot_epic_is_bsp(void) +{ + union cepic_ctrl reg; + + reg.raw = boot_epic_read_w(CEPIC_CTRL); + return reg.bits.bsp_core; +} + +static inline unsigned int boot_apic_is_bsp(void) +{ + return BootStrap(boot_arch_apic_read(APIC_BSP)); +} + +static inline unsigned int boot_epic_read_id(void) +{ + return cepic_id_full_to_short(boot_epic_read_w(CEPIC_ID)); +} + +static inline unsigned int boot_apic_read_id(void) +{ + return GET_APIC_ID(boot_arch_apic_read(APIC_ID)); +} + +/* + * Reading PIC registers is mainly done in early P2V: before and slightly + * after the initialization of boot_machine structure. Unfortunately, the + * structure is initalized by BSP, and AP may proceed to read PIC in the mean + * time. The barrier can't be used that early either. As such, PIC reads in P2V + * can't rely on CPU_FEAT_EPIC. We manually read the IDR register instead. + */ +#ifdef CONFIG_EPIC +static inline bool boot_early_pic_is_bsp(void) +{ + e2k_idr_t idr; + unsigned int reg; + + idr = boot_read_IDR_reg(); + if (idr.IDR_mdl >= IDR_E12C_MDL) + reg = boot_epic_is_bsp(); + else + reg = boot_apic_is_bsp(); + + return !!reg; +} + +static inline unsigned int boot_early_pic_read_id(void) +{ + e2k_idr_t idr; + + idr = boot_read_IDR_reg(); + if (idr.IDR_mdl >= IDR_E12C_MDL) + return boot_epic_read_id(); + else + return boot_apic_read_id(); +} +#else +static inline bool boot_early_pic_is_bsp(void) +{ + return !!boot_apic_is_bsp(); +} + +static inline unsigned int boot_early_pic_read_id(void) +{ + return boot_apic_read_id(); +} +#endif + +/* Kernel */ +#ifndef E2K_P2V +#include + +#ifdef CONFIG_EPIC +static inline bool read_pic_bsp(void) +{ + if (cpu_has_epic()) + return read_epic_bsp(); + else + return !!BootStrap(arch_apic_read(APIC_BSP)); +} + +extern void __init_recv e2k_setup_secondary_apic(void); +static inline void __init_recv e2k_setup_secondary_pic(void) +{ + if (cpu_has_epic()) + setup_cepic(); + else + e2k_setup_secondary_apic(); +} + +extern void __init_recv setup_prepic(void); +static inline void __init_recv setup_processor_pic(void) +{ + if (cpu_has_epic()) + setup_prepic(); +} +#else /* CONFIG_EPIC */ +static inline bool read_pic_bsp(void) +{ + return !!BootStrap(arch_apic_read(APIC_BSP)); +} + +extern void __init_recv e2k_setup_secondary_apic(void); +static inline void __init_recv e2k_setup_secondary_pic(void) +{ + e2k_setup_secondary_apic(); +} + +extern void __init_recv e2k_apic_map_logical_id(int recovery); +static inline void __init_recv e2k_pic_map_logical_id(int recovery) +{ + e2k_apic_map_logical_id(recovery); +} + +static inline void __init_recv setup_processor_pic(void) +{ + /* Nothing to do */ +} + +extern void setup_secondary_APIC_clock(void); +static inline void __init_recv setup_secondary_pic_clock(void) +{ + setup_secondary_APIC_clock(); +} + +static inline int pic_get_vector(void) +{ + return apic_get_vector(); +} + +struct pci_dev; +int ioepic_pin_to_msi_ioapic_irq(unsigned int pin, struct pci_dev *dev); +static inline int ioepic_pin_to_irq_pic(unsigned int pin, struct pci_dev *dev) +{ + return ioepic_pin_to_msi_ioapic_irq(pin, dev); +} +#endif /* CONFIG_EPIC */ +#endif /* E2K_P2V */ +#endif /* __ASM_E2K_PIC_H */ diff --git a/arch/e2k/include/asm/poll.h b/arch/e2k/include/asm/poll.h new file mode 100644 index 0000000..c98509d --- /dev/null +++ b/arch/e2k/include/asm/poll.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/asm/posix_types.h b/arch/e2k/include/asm/posix_types.h new file mode 100644 index 0000000..575deb1 --- /dev/null +++ b/arch/e2k/include/asm/posix_types.h @@ -0,0 +1,11 @@ +#ifndef _E2K_POSIX_TYPES_H_ +#define _E2K_POSIX_TYPES_H_ + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. + */ + +#include + +#endif /* _E2K_POSIX_TYPES_H_ */ diff --git a/arch/e2k/include/asm/process.h b/arch/e2k/include/asm/process.h new file mode 100644 index 0000000..e1c4383 --- /dev/null +++ b/arch/e2k/include/asm/process.h @@ -0,0 +1,1728 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * include/asm-e2k/process.h + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_PROCESS_H +#define _E2K_PROCESS_H + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* host mode support */ + +#undef DEBUG_PV_UST_MODE +#undef DebugUST +#define DEBUG_PV_UST_MODE 0 /* guest user stacks debug */ +#define DebugUST(fmt, args...) \ +({ \ + if (debug_guest_ust) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PV_SYSCALL_MODE +#define DEBUG_PV_SYSCALL_MODE 0 /* syscall injection debugging */ + +#if DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE +extern bool debug_guest_ust; +#else +#define debug_guest_ust false +#endif /* DEBUG_PV_UST_MODE || DEBUG_PV_SYSCALL_MODE */ + +#undef DEBUG_SS_MODE +#undef DebugSS +#define DEBUG_SS_MODE 0 /* stack switching */ +#define DebugSS(fmt, args...) \ +({ \ + if (DEBUG_SS_MODE) \ + pr_info(fmt, ##args); \ +}) + +#undef DEBUG_US_MODE +#undef DebugUS +#define DEBUG_US_MODE 0 /* user stacks */ +#define DebugUS(fmt, args...) \ +({ \ + if (DEBUG_US_MODE) \ + pr_info(fmt, ##args); \ +}) + +#undef DEBUG_HS_MODE +#undef DebugHS +#define DEBUG_HS_MODE 0 /* hardware stacks */ +#define DebugHS(...) DebugPrint(DEBUG_HS_MODE ,##__VA_ARGS__) + +#undef DEBUG_KS_MODE +#undef DebugKS +#define DEBUG_KS_MODE 0 /* kernel stacks */ +#define DebugKS(fmt, args...) \ +({ \ + if (DEBUG_KS_MODE) \ + pr_info(fmt, ##args); \ +}) + +#undef DEBUG_EXECVE_MODE +#undef DebugEX +#define DEBUG_EXECVE_MODE 0 /* execve and exit */ +#define DebugEX(...) DebugPrint(DEBUG_EXECVE_MODE, ##__VA_ARGS__) + +#undef DEBUG_KVM_OLD_MODE +#undef DebugOLD +#define DEBUG_KVM_OLD_MODE 0 +#define DebugOLD(fmt, args...) \ +({ \ + if (DEBUG_KVM_OLD_MODE) \ + pr_info("%s(): " fmt, __func__, ##args); \ +}) + +#undef DEBUG_PROCESS_MODE +#undef DbgP +#define DEBUG_PROCESS_MODE 0 /* processes */ +#define DbgP(fmt, args...) \ +({ \ + if (DEBUG_PROCESS_MODE) \ + pr_info(fmt, ##args); \ +}) + +/* + * additional arch-dep flags for clone() + */ +#define CLONE_GUEST_KERNEL 0x0000010000000000 /* guest kernel */ + /* thread creation */ + +/* + * Stack frame types (determinate based on chain registers state): + * host kernel frame - PSR.pm == 1 + * user frame (host and guest) - PSR.pm == 0 && IP < TASK_SIZE + * guest kernel frame - PSP.pm == 0 && IP >= GUEST_TASK_SIZE + */ +typedef enum stack_frame { + kernel_frame_type = 0, /* host kernel frame */ + user_frame_type = 1, /* user (host or guest) frame */ + guest_kernel_frame_type, /* guest kernel frame type */ + undefined_frame_type, +} stack_frame_t; + +static inline stack_frame_t +get_kernel_stack_frame_type(void) +{ + if (!paravirt_enabled() || IS_HV_GM()) + return kernel_frame_type; + return guest_kernel_frame_type; +} + +static inline stack_frame_t +get_user_stack_frame_type(void) +{ + return user_frame_type; +} + +static inline stack_frame_t +get_the_stack_frame_type(e2k_cr0_hi_t cr0_hi, e2k_cr1_lo_t cr1_lo, + bool guest, bool ignore_IP) +{ + if (likely(!guest)) { + /* host kernel: guest kernel is host user */ + if (is_call_from_host_kernel_IP(cr0_hi, cr1_lo, ignore_IP)) { + return kernel_frame_type; + } else if (is_call_from_host_user_IP(cr0_hi, cr1_lo, + true)) { + return user_frame_type; + } else { + pr_err("%s(): unknown host stack frame type: " + "CR0_hi 0x%llx CR1_lo 0x%llx\n", + __func__, cr0_hi.CR0_hi_half, + cr1_lo.CR1_lo_half); + } + } else { + /* guest kernel: frames can be host kernel, guest kernel */ + /* or guest user */ + if (is_call_from_host_kernel_IP(cr0_hi, cr1_lo, ignore_IP)) { + return kernel_frame_type; + } else if (is_call_from_guest_kernel_IP(cr0_hi, cr1_lo, + ignore_IP)) { + return guest_kernel_frame_type; + } else if (is_call_from_guest_user_IP(cr0_hi, cr1_lo, + ignore_IP)) { + return user_frame_type; + } else { + pr_err("%s(): unknown guest stack frame type: " + "CR0_hi 0x%llx CR1_lo 0x%llx\n", + __func__, cr0_hi.CR0_hi_half, + cr1_lo.CR1_lo_half); + } + } + return undefined_frame_type; +} +static inline stack_frame_t +get_stack_frame_type(e2k_cr0_hi_t cr0_hi, e2k_cr1_lo_t cr1_lo) +{ + return get_the_stack_frame_type(cr0_hi, cr1_lo, + paravirt_enabled() && !IS_HV_GM(), false); +} + +static inline stack_frame_t +get_stack_frame_type_IP(e2k_cr0_hi_t cr0_hi, e2k_cr1_lo_t cr1_lo, + bool ignore_IP) +{ + return get_the_stack_frame_type(cr0_hi, cr1_lo, + paravirt_enabled() && !IS_HV_GM(), ignore_IP); +} + +static inline stack_frame_t +get_task_stack_frame_type_IP(struct task_struct *task, + e2k_cr0_hi_t cr0_hi, e2k_cr1_lo_t cr1_lo, bool ignore_IP) +{ + return get_the_stack_frame_type(cr0_hi, cr1_lo, + paravirt_enabled() && !IS_HV_GM() || + guest_task_mode(task), + ignore_IP); +} + +static __always_inline int +is_kernel_hardware_stacks(e2k_addr_t p_stack_base, e2k_addr_t pc_stack_base) +{ + if (p_stack_base >= TASK_SIZE || pc_stack_base >= TASK_SIZE) { + return 1; + } + return 0; +} + +extern int native_copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg); + +static __always_inline e2k_size_t +get_hw_ps_area_user_size(hw_stack_area_t *ps) +{ + return ps->size; +} +static __always_inline e2k_size_t +get_hw_pcs_area_user_size(hw_stack_area_t *pcs) +{ + return pcs->size; +} +static __always_inline void +set_hw_ps_area_user_size(hw_stack_area_t *ps, e2k_size_t u_ps_size) +{ + ps->size = u_ps_size; +} +static __always_inline void +set_hw_pcs_area_user_size(hw_stack_area_t *pcs, e2k_size_t u_pcs_size) +{ + pcs->size = u_pcs_size; +} + +static __always_inline e2k_size_t +get_hw_ps_user_size(hw_stack_t *hw_stacks) +{ + return get_hw_ps_area_user_size(&hw_stacks->ps); +} +static __always_inline e2k_size_t +get_hw_pcs_user_size(hw_stack_t *hw_stacks) +{ + return get_hw_pcs_area_user_size(&hw_stacks->pcs); +} +static __always_inline void +set_hw_ps_user_size(hw_stack_t *hw_stacks, e2k_size_t u_ps_size) +{ + set_hw_ps_area_user_size(&hw_stacks->ps, u_ps_size); +} +static __always_inline void +set_hw_pcs_user_size(hw_stack_t *hw_stacks, e2k_size_t u_pcs_size) +{ + set_hw_pcs_area_user_size(&hw_stacks->pcs, u_pcs_size); +} + +#define NATIVE_ONLY_SET_GUEST_GREGS(ti) \ + /* it is native or host kernel, nothing to set */ + +static __always_inline int +native_switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel) +{ + return 0; /* to continue switching at native mode */ +} + +extern int native_clone_prepare_spilled_user_stacks(e2k_stacks_t *child_stacks, + const e2k_mem_crs_t *child_crs, const struct pt_regs *regs, + struct sw_regs *new_sw_regs, struct thread_info *new_ti, + unsigned long clone_flags); +extern void native_copy_spilled_user_stacks(e2k_stacks_t *child_stacks, + e2k_mem_crs_t *child_crs, struct sw_regs *new_sw_regs, + const struct thread_info *new_ti); + +/* + * Functions create all kernel hardware stacks(PS & PCS) + */ +static inline void +native_do_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks) +{ + set_hw_ps_user_size(hw_stacks, KERNEL_P_STACK_SIZE); + set_hw_pcs_user_size(hw_stacks, KERNEL_PC_STACK_SIZE); +} + +extern void native_define_kernel_hw_stacks_sizes(hw_stack_t *hw_stacks); +extern void native_define_user_hw_stacks_sizes(hw_stack_t *hw_stacks); + +extern void init_sw_user_regs(struct sw_regs *sw_regs, + bool save_gregs, bool save_binco_regs); + +/* + * The function should be only in-line nad cannot call any other not in-line + * functions, including printk() or other debugging print + */ +static __always_inline void +goto_new_user_hard_stk(e2k_stacks_t *stacks) +{ + /* + * Optimization to do not flush chain stack. + * + * Old stacks are not needed anymore, do not flush procedure + * registers and chain registers - only strip sizes + */ + NATIVE_STRIP_PSHTP_WINDOW(); + NATIVE_STRIP_PCSHTP_WINDOW(); + + /* + * There might be a FILL operation still going right now. + * Wait for it's completion before going further - otherwise + * the next FILL on the new PSP/PCSP registers will race + * with the previous one. + * + * The first and the second FILL operations will use different + * addresses because we will change PSP/PCSP registers, and + * thus loads/stores from these two FILLs can race with each + * other leading to bad register file (containing values from + * both stacks).. + */ + E2K_WAIT(_ma_c); + + /* + * Since we are switching to user stacks their sizes + * have been stripped already, so use RAW_* writes. + */ + NATIVE_NV_WRITE_PSP_REG(stacks->psp_hi, stacks->psp_lo); + NATIVE_NV_WRITE_PCSP_REG(stacks->pcsp_hi, stacks->pcsp_lo); +} + +typedef void (*start_fn)(u64 __start); + +extern void switch_to_user_func(long dummy, + start_fn start_func, e2k_size_t us_size, int cui); +#ifdef CONFIG_PROTECTED_MODE +extern void pm_exit_robust_list(struct task_struct *curr); +extern void protected_switch_to_user_func(long r0, long r1, + start_fn start_func, e2k_size_t us_size, int cui); +#endif + +static inline void native_flush_stacks(void) +{ + NATIVE_FLUSHCPU; +} +static inline void native_flush_regs_stack(void) +{ + NATIVE_FLUSHR; +} +static inline void native_flush_chain_stack(void) +{ + NATIVE_FLUSHC; +} +#define NATIVE_COPY_STACKS_TO_MEMORY() \ +({ \ + NATIVE_FLUSHCPU; \ +}) + +/* + * Native threads can be scheduled and migrate to other CPUs, + * but never can change task and thread info structures. + * Host threads on which are implemented guest VCPUs cannot change + * task and thread info structures too. But these threads are multithreaded, + * it means there are many guest kernel and user processes which are runing + * under that host thread. Each guest process has own stacks, context and + * switch from one guest process to other causes switching of context and + * stacks, including host kernel stacks allocated for every such process and + * used to its run. Guest process stacks and other context is kept into + * structure guest thread info. Guest process switching means change of + * guest thread info, but host thread info and task structure stays the same. + * However there is complexity because of guest processes migration from one + * VCPU to other. In this case some local variables into dinamic chain of + * host functions calls (local and procedure registers stacks) can keep values + * of old host VCPU thread. + * For example, guest process was trapped on VCPU #0 and after trap handling + * completion megrated to VCPU #1. Trap on VCPU #0 can set some variables to + * pointers to current and current_thread_info() structures: + * VCPU #0 ti = current_thread_info(); + * task = current; + * Further host trap handler start guest trap handler: + * VCPU #0 user_trap_handler() + * kvm_start_guest_trap_handler() + * kvm_trap_handler() + * schedule() + * ...... + * switch_to() VCPU #1 + * VCPU #1 is other host thread and has own current & current_thread_info + * other than on VCPU #0 + * schedule() + * kvm_trap_handler() + * kvm_start_guest_trap_handler() + * user_trap_handler() + * In this place variables ti and task contain pointers to old structures, + * point to task & thread info structures of VCPU #0, so should be updated to + * point to new structures of VCPU #1. + * Good style is using only direct current & current_thread_info() macroses, + * but there are some derived values: regs, vcpu, gti ... and them should be + * updated too. + * Follow macroses to help to solve this problem + */ +/* native kernel does not support virtualization and cannot be run as host */ +/* so has not the problem - nothing to do */ +#define NATIVE_UPDATE_VCPU_THREAD_CONTEXT(task, ti, regs, gti, vcpu) +#define NATIVE_CHECK_VCPU_THREAD_CONTEXT(__ti) + +static inline int __is_privileged_range(struct vm_area_struct *vma, + e2k_addr_t start, e2k_addr_t end) +{ + while (vma && vma->vm_start < end) { + if (vma->vm_flags & VM_PRIVILEGED) + return 1; + vma = vma->vm_next; + } + + return 0; +} + +static inline int is_privileged_range(e2k_addr_t start, e2k_addr_t end) +{ + return start >= USER_HW_STACKS_BASE || end >= USER_HW_STACKS_BASE; +} + +extern int do_update_vm_area_flags(e2k_addr_t start, e2k_size_t len, + vm_flags_t flags_to_set, vm_flags_t flags_to_clear); +extern int create_cut_entry(int tcount, + unsigned long code_base, unsigned code_sz, + unsigned long glob_base, unsigned glob_sz); +extern int free_cut_entry(unsigned long glob_base, size_t glob_sz, + unsigned long *code_base, size_t *code_sz); +extern void fill_cut_entry(e2k_cute_t *cute_p, + unsigned long code_base, unsigned code_sz, + unsigned long glob_base, unsigned glob_sz); + +extern int alloc_user_hw_stacks(hw_stack_t *hw_stacks, size_t p_size, size_t pc_size); +extern void free_user_hw_stacks(hw_stack_t *hw_stacks); +extern void free_user_old_pc_stack_areas(struct list_head *old_u_pcs_list); +extern int free_user_hardware_stacks(hw_stack_t *u_hw_stacks); + +#define ATOMIC_GET_HW_STACK_INDEXES(ps_ind, pcs_ind) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long pshtp_val; \ + unsigned long pcsp_hi_val; \ + unsigned int pcshtp_val; \ + e2k_psp_hi_t psp_hi; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_HW_STACKS_SIZES(psp_hi_val, pshtp_val, \ + pcsp_hi_val, pcshtp_val); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_ind = psp_hi.PSP_hi_ind + GET_PSHTP_MEM_INDEX(pshtp); \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp_val); \ +}) + +#define ATOMIC_GET_HW_STACKS_IND_AND_TOP(ps_ind, pshtop, pcs_ind, pcshtop) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long pshtp_val; \ + unsigned long pcsp_hi_val; \ + e2k_psp_hi_t psp_hi; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_HW_STACKS_SIZES(psp_hi_val, pshtp_val, \ + pcsp_hi_val, pcshtop); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_ind = psp_hi.PSP_hi_ind; \ + pshtop = GET_PSHTP_MEM_INDEX(pshtp); \ + pcs_ind = pcsp_hi.PCSP_hi_ind; \ +}) + +#define ATOMIC_GET_HW_PS_SIZES(ps_ind, ps_size) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long pshtp_val; \ + e2k_psp_hi_t psp_hi; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_HW_PS_SIZES(psp_hi_val, pshtp_val); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_size = psp_hi.PSP_hi_size; \ + ps_ind = psp_hi.PSP_hi_ind + GET_PSHTP_MEM_INDEX(pshtp); \ +}) + +#define ATOMIC_GET_HW_PS_SIZES_AND_BASE(ps_ind, ps_size, ps_base) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long psp_lo_val; \ + unsigned long pshtp_val; \ + e2k_psp_hi_t psp_hi; \ + e2k_psp_lo_t psp_lo; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_P_STACK_REGS(psp_lo_val, psp_hi_val, pshtp_val); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + psp_lo.PSP_lo_half = psp_lo_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_size = psp_hi.PSP_hi_size; \ + ps_ind = psp_hi.PSP_hi_ind + GET_PSHTP_MEM_INDEX(pshtp); \ + ps_base = psp_lo.PSP_lo_base; \ +}) + +#define ATOMIC_GET_HW_PS_SIZES_BASE_TOP(ps_ind, ps_size, ps_base, pshtop) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long psp_lo_val; \ + unsigned long pshtp_val; \ + e2k_psp_hi_t psp_hi; \ + e2k_psp_lo_t psp_lo; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_P_STACK_REGS(psp_lo_val, psp_hi_val, pshtp_val); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + psp_lo.PSP_lo_half = psp_lo_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_size = psp_hi.PSP_hi_size; \ + ps_ind = psp_hi.PSP_hi_ind; \ + pshtop = GET_PSHTP_MEM_INDEX(pshtp); \ + ps_base = psp_lo.PSP_lo_base; \ +}) + +#define ATOMIC_GET_HW_PCS_SIZES(pcs_ind, pcs_size) \ +({ \ + unsigned long pcsp_hi_val; \ + unsigned int pcshtp_val; \ + e2k_pcsp_hi_t pcsp_hi; \ + \ + ATOMIC_READ_HW_PCS_SIZES(pcsp_hi_val, pcshtp_val); \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcs_size = pcsp_hi.PCSP_hi_size; \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp_val); \ +}) + +#define ATOMIC_GET_HW_PCS_SIZES_AND_BASE(pcs_ind, pcs_size, pcs_base) \ +({ \ + unsigned long pcsp_hi_val; \ + unsigned long pcsp_lo_val; \ + unsigned int pcshtp_val; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pcsp_lo_t pcsp_lo; \ + \ + ATOMIC_READ_PC_STACK_REGS(pcsp_lo_val, pcsp_hi_val, pcshtp_val); \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcsp_lo.PCSP_lo_half = pcsp_lo_val; \ + pcs_size = pcsp_hi.PCSP_hi_size; \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp_val); \ + pcs_base = pcsp_lo.PCSP_lo_base; \ +}) + +#define ATOMIC_GET_HW_PCS_SIZES_BASE_TOP(pcs_ind, pcs_size, pcs_base, pcshtp) \ +({ \ + unsigned long pcsp_hi_val; \ + unsigned long pcsp_lo_val; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pcsp_lo_t pcsp_lo; \ + e2k_pcshtp_t pcshtp_val; \ + \ + ATOMIC_READ_PC_STACK_REGS(pcsp_lo_val, pcsp_hi_val, pcshtp_val); \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcsp_lo.PCSP_lo_half = pcsp_lo_val; \ + pcs_size = pcsp_hi.PCSP_hi_size; \ + pcs_ind = pcsp_hi.PCSP_hi_ind; \ + pcs_base = pcsp_lo.PCSP_lo_base; \ + pcshtp = PCSHTP_SIGN_EXTEND(pcshtp_val); \ +}) + +#define ATOMIC_GET_HW_PCS_IND_AND_BASE(pcs_ind, pcs_base) \ +({ \ + unsigned long pcsp_hi_val; \ + unsigned long pcsp_lo_val; \ + unsigned int pcshtp_val; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pcsp_lo_t pcsp_lo; \ + \ + ATOMIC_READ_PC_STACK_REGS(pcsp_lo_val, pcsp_hi_val, pcshtp_val); \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcsp_lo.PCSP_lo_half = pcsp_lo_val; \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp_val); \ + pcs_base = pcsp_lo.PCSP_lo_base; \ +}) + +#define ATOMIC_GET_HW_STACKS_SIZES_AND_BASE(ps_ind, ps_size, ps_base, \ + pcs_ind, pcs_size, pcs_base) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long psp_lo_val; \ + unsigned long pshtp_val; \ + unsigned long pcsp_hi_val; \ + unsigned long pcsp_lo_val; \ + unsigned int pcshtp; \ + e2k_pshtp_t pshtp; \ + e2k_psp_hi_t psp_hi; \ + e2k_psp_lo_t psp_lo; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pcsp_lo_t pcsp_lo; \ + \ + ATOMIC_READ_HW_STACKS_REGS(psp_lo, psp_hi, pshtp_val, \ + pcsp_lo, pcsp_hi, pcshtp); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + psp_lo.PSP_lo_half = psp_lo_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_size = psp_hi.PSP_hi_size; \ + ps_ind = psp_hi.PSP_hi_ind + GET_PSHTP_MEM_INDEX(pshtp); \ + ps_base = psp_lo.PSP_lo_base; \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcsp_lo.PCSP_lo_half = pcsp_lo_val; \ + pcs_size = pcsp_hi.PCSP_hi_size; \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp_val); \ + pcs_base = pcsp_lo.PCSP_lo_base; \ +}) + +#define ATOMIC_GET_HW_STACKS_IND_AND_BASE(ps_ind, ps_base, \ + pcs_ind, pcs_base) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long psp_lo_val; \ + unsigned long pshtp_val; \ + unsigned long pcsp_hi_val; \ + unsigned long pcsp_lo_val; \ + unsigned int pcshtp; \ + e2k_pshtp_t pshtp; \ + e2k_psp_hi_t psp_hi; \ + e2k_psp_lo_t psp_lo; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pcsp_lo_t pcsp_lo; \ + \ + ATOMIC_READ_HW_STACKS_REGS(psp_lo_val, psp_hi_val, pshtp_val, \ + pcsp_lo_val, pcsp_hi_val, pcshtp); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + psp_lo.PSP_lo_half = psp_lo_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_ind = psp_hi.PSP_hi_ind + GET_PSHTP_MEM_INDEX(pshtp); \ + ps_base = psp_lo.PSP_lo_base; \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pcsp_lo.PCSP_lo_half = pcsp_lo_val; \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp); \ + pcs_base = pcsp_lo.PCSP_lo_base; \ +}) + +#define ATOMIC_GET_HW_STACK_SIZES(ps_ind, ps_size, pcs_ind, pcs_size) \ +({ \ + unsigned long psp_hi_val; \ + unsigned long pshtp_val; \ + unsigned long pcsp_hi_val; \ + unsigned int pcshtp_val; \ + e2k_psp_hi_t psp_hi; \ + e2k_pcsp_hi_t pcsp_hi; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_HW_STACKS_SIZES(psp_hi_val, pshtp_val, \ + pcsp_hi_val, pcshtp_val); \ + psp_hi.PSP_hi_half = psp_hi_val; \ + pcsp_hi.PCSP_hi_half = pcsp_hi_val; \ + pshtp.PSHTP_reg = pshtp_val; \ + ps_size = psp_hi.PSP_hi_size; \ + pcs_size = pcsp_hi.PCSP_hi_size; \ + ps_ind = psp_hi.PSP_hi_ind + GET_PSHTP_MEM_INDEX(pshtp); \ + pcs_ind = pcsp_hi.PCSP_hi_ind + PCSHTP_SIGN_EXTEND(pcshtp_val); \ +}) +#define ATOMIC_GET_HW_HWS_SIZES(hws_ind, hws_size, is_pcs) \ +({ \ + if (is_pcs) { \ + ATOMIC_GET_HW_PCS_SIZES(hws_ind, hws_size); \ + } else { \ + ATOMIC_GET_HW_PS_SIZES(hws_ind, hws_size); \ + } \ +}) + +#define ATOMIC_DO_SAVE_HW_STACKS_REGS(st_regs) \ +({ \ + unsigned long psp_lo; \ + unsigned long psp_hi; \ + unsigned long pshtp_val; \ + unsigned long pcsp_lo; \ + unsigned long pcsp_hi; \ + unsigned int pcshtp; \ + e2k_pshtp_t pshtp; \ + \ + ATOMIC_READ_HW_STACKS_REGS(psp_lo, psp_hi, pshtp_val, \ + pcsp_lo, pcsp_hi, pcshtp); \ + (st_regs)->psp_hi.PSP_hi_half = psp_hi; \ + (st_regs)->psp_lo.PSP_lo_half = psp_lo; \ + pshtp.PSHTP_reg = pshtp_val; \ + (st_regs)->psp_hi.PSP_hi_ind += GET_PSHTP_MEM_INDEX(pshtp); \ + (st_regs)->pcsp_hi.PCSP_hi_half = pcsp_hi; \ + (st_regs)->pcsp_lo.PCSP_lo_half = pcsp_lo; \ + (st_regs)->pcsp_hi.PCSP_hi_ind += PCSHTP_SIGN_EXTEND(pcshtp); \ +}) + +static inline void +atomic_save_hw_stacks_regs(e2k_stacks_t *stacks) +{ + ATOMIC_DO_SAVE_HW_STACKS_REGS(stacks); +} + +#define ATOMIC_DO_SAVE_ALL_STACKS_REGS(st_regs, cr1_hi_p, usd_lo, usd_hi) \ +({ \ + unsigned long psp_lo; \ + unsigned long psp_hi; \ + unsigned long pshtp_val; \ + unsigned long pcsp_lo; \ + unsigned long pcsp_hi; \ + unsigned int pcshtp; \ + unsigned long cr1_hi_val; /* cotains ussz field for data stack */ \ + e2k_pshtp_t pshtp; \ + e2k_cr1_hi_t cr1_hi; \ + \ + ATOMIC_READ_ALL_STACKS_REGS(psp_lo, psp_hi, pshtp_val, \ + pcsp_lo, pcsp_hi, pcshtp, \ + (usd_lo), (usd_hi), cr1_hi_val); \ + (st_regs)->psp_hi.PSP_hi_half = psp_hi; \ + (st_regs)->psp_lo.PSP_lo_half = psp_lo; \ + pshtp.PSHTP_reg = pshtp_val; \ + (st_regs)->psp_hi.PSP_hi_ind += GET_PSHTP_MEM_INDEX(pshtp); \ + (st_regs)->pcsp_hi.PCSP_hi_half = pcsp_hi; \ + (st_regs)->pcsp_lo.PCSP_lo_half = pcsp_lo; \ + (st_regs)->pcsp_hi.PCSP_hi_ind += PCSHTP_SIGN_EXTEND(pcshtp); \ + cr1_hi.CR1_hi_half = cr1_hi_val; \ + *(cr1_hi_p) = cr1_hi; \ +}) +#define ATOMIC_SAVE_ALL_STACKS_REGS(st_regs, cr1_hi_p) \ +({ \ + unsigned long usd_lo; \ + unsigned long usd_hi; \ + \ + ATOMIC_DO_SAVE_ALL_STACKS_REGS(st_regs, cr1_hi_p, \ + usd_lo, usd_hi); \ + (st_regs)->usd_hi.USD_hi_half = usd_hi; \ + (st_regs)->usd_lo.USD_lo_half = usd_lo; \ +}) + +static inline void +atomic_save_all_stacks_regs(e2k_stacks_t *stacks, e2k_cr1_hi_t *cr1_hi_p) +{ + ATOMIC_SAVE_ALL_STACKS_REGS(stacks, cr1_hi_p); +} + +#define user_stack_cannot_be_expanded() test_thread_flag(TIF_USD_NOT_EXPANDED) + +#define set_user_stack_cannot_be_expanded() \ +({ \ + if (TASK_IS_PROTECTED(current)) { \ + set_thread_flag(TIF_USD_NOT_EXPANDED);\ + } \ +}) + +typedef enum restore_caller { + FROM_SYSCALL_N_PROT = 1 << 1, + FROM_SYSCALL_PROT_8 = 1 << 2, + FROM_SYSCALL_PROT_10 = 1 << 3, + FROM_USER_TRAP = 1 << 4, + FROM_SIGRETURN = 1 << 5, + FROM_RET_FROM_FORK = 1 << 6, + FROM_MAKECONTEXT = 1 << 7, + FROM_RETURN_PV_VCPU_TRAP = 1 << 8, + FROM_PV_VCPU_SYSCALL = 1 << 10, + FROM_PV_VCPU_SYSFORK = 1 << 11, +} restore_caller_t; + +#define FROM_PV_VCPU_MODE (FROM_RETURN_PV_VCPU_TRAP | \ + FROM_PV_VCPU_SYSCALL | \ + FROM_PV_VCPU_SYSFORK) + +#ifndef CONFIG_VIRTUALIZATION +/* it native kernel without virtualization support */ + +/* + * Is the CPU at guest Hardware Virtualized mode + * CORE_MODE.gmi is true only at guest HV mode + */ +static inline bool host_is_at_HV_GM_mode(void) +{ + /* native kernel does not support VMs and cannot be at guest mode */ + return false; +} +#define usd_cannot_be_expanded(regs) user_stack_cannot_be_expanded() + /* all user stacks can be */ + /* expanded if it possible */ +#define clear_vm_thread_flags() /* virtual machines is not supported */ + /* nothing to clear */ + +#define GET_PARAVIRT_GUEST_MODE(pv_guest, regs) /* nothing to do */ + +#define UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, __gti, __vcpu) \ + NATIVE_UPDATE_VCPU_THREAD_CONTEXT(__task, __ti, __regs, \ + __gti, __vcpu) +#define CHECK_VCPU_THREAD_CONTEXT(__ti) \ + NATIVE_CHECK_VCPU_THREAD_CONTEXT(__ti) + +static inline void +clear_virt_thread_struct(thread_info_t *thread_info) +{ + /* virtual machines is not supported */ +} +static __always_inline __interrupt void +complete_switch_to_user_func(void) +{ + /* virtualization not supported, so nothing to do */ + /* but the function should switch interrupt control from UPSR to */ + /* PSR and set initial state of user UPSR */ + NATIVE_SET_USER_INITIAL_UPSR(E2K_USER_INITIAL_UPSR); +} +static __always_inline __interrupt void +complete_go2user(thread_info_t *ti, long fn) +{ + /* virtualization not supported, so nothing to do */ + /* but the function should restore user UPSR state */ + NATIVE_WRITE_UPSR_REG(ti->upsr); +} +static inline void free_virt_task_struct(struct task_struct *task) +{ + /* virtual machines is not supported */ +} +#else /* CONFIG_VIRTUALIZATION */ +/* It is native host kernel with virtualization support */ +/* or paravirtualized host and guest */ +/* or native guest kernel + #include + */ +#endif /* ! CONFIG_VIRTUALIZATION */ + +static __always_inline void +native_kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size) +{ + void *dst_tail; + const void *src_tail; + u64 copied; + int i; + + /* + * Kernel does not use FP registers so do not copy them. + * This only applies to CPUs before V5 instruction set + * (since V5 FP registers become general-purpose QP registers). + */ + if (cpu_has(CPU_FEAT_QPREG)) { +#pragma loop count (10) + for (i = 0; i < size / 64; i++) + E2K_TAGGED_MEMMOVE_64(&dst[8 * i], &src[8 * i]); + + copied = round_down(size, 64); + dst_tail = (void *) dst + copied; + src_tail = (void *) src + copied; + } else { +#pragma loop count (5) + for (i = 0; i < size / 128; i++) + E2K_TAGGED_MEMMOVE_128_RF_V2(&dst[16 * i], + &src[16 * i]); + + copied = round_down(size, 128); + dst_tail = (void *) dst + copied; + src_tail = (void *) src + copied; + + if (size & 64) { + E2K_TAGGED_MEMMOVE_64(dst_tail, src_tail); + dst_tail += 64; + src_tail += 64; + } + } + + if (size & 32) + E2K_TAGGED_MEMMOVE_32(dst_tail, src_tail); +} + +static __always_inline void +native_collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size) +{ + e2k_pcsp_hi_t k_pcsp_hi; + u64 size; + int i; + + DebugUST("current host chain stack index 0x%x, PCSHTP 0x%llx\n", + NATIVE_NV_READ_PCSP_HI_REG().PCSP_hi_ind, + NATIVE_READ_PCSHTP_REG_SVALUE()); + + NATIVE_FLUSHC; + k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + + size = k_pcsp_hi.PCSP_hi_ind - spilled_size; + BUG_ON(!IS_ALIGNED(size, ALIGN_PCSTACK_TOP_SIZE) || (s64) size < 0); +#pragma loop count (2) + for (i = 0; i < size / 32; i++) { + u64 v0, v1, v2, v3; + + v0 = src[4 * i]; + v1 = src[4 * i + 1]; + v2 = src[4 * i + 2]; + v3 = src[4 * i + 3]; + dst[4 * i] = v0; + dst[4 * i + 1] = v1; + dst[4 * i + 2] = v2; + dst[4 * i + 3] = v3; + } + + k_pcsp_hi.PCSP_hi_ind -= spilled_size; + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(k_pcsp_hi); + + DebugUST("move spilled chain part from host top %px to " + "bottom %px, size 0x%llx\n", + src, dst, size); + DebugUST("host kernel chain stack index is now 0x%x, " + "guest user PCSHTP 0x%llx\n", + k_pcsp_hi.PCSP_hi_ind, spilled_size); +} + +static __always_inline void +native_collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size) +{ + e2k_psp_hi_t k_psp_hi; + u64 size; + + DebugUST("current host procedure stack index 0x%x, PSHTP 0x%x\n", + NATIVE_NV_READ_PSP_HI_REG().PSP_hi_ind, + NATIVE_NV_READ_PSHTP_REG().PSHTP_ind); + + NATIVE_FLUSHR; + k_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + + size = k_psp_hi.PSP_hi_ind - spilled_size; + BUG_ON(!IS_ALIGNED(size, ALIGN_PSTACK_TOP_SIZE) || (s64) size < 0); + + prefetchw_range(src, size); + native_kernel_hw_stack_frames_copy(dst, src, size); + + k_psp_hi.PSP_hi_ind -= spilled_size; + NATIVE_NV_NOIRQ_WRITE_PSP_HI_REG(k_psp_hi); + + DebugUST("move spilled procedure part from host top %px to " + "bottom %px, size 0x%llx\n", + src, dst, size); + DebugUST("host kernel procedure stack index is now 0x%x, " + "guest user PSHTP 0x%llx\n", + k_psp_hi.PSP_hi_ind, spilled_size); +} + +/** + * find_in_u_pcs_list - find frame offset from old_u_pcs_list + * @frame - frame to search + * @delta - chain stack offset will be returned here + * + * Returns 0 on success. + */ +static inline int __find_in_old_u_pcs_list(unsigned long frame, + unsigned long *delta, unsigned long pcs_base, + unsigned long pcs_top, struct list_head *old_u_pcs_list) +{ + struct old_pcs_area *u_pcs; + int ret = -ESRCH; + + if (frame >= pcs_base && frame < pcs_top) { + *delta = 0; + return 0; + } + + list_for_each_entry(u_pcs, old_u_pcs_list, list_entry) { + if (frame >= (unsigned long) u_pcs->base && + frame < (unsigned long) u_pcs->base + + u_pcs->size) { + *delta = pcs_base - (unsigned long) u_pcs->base; + ret = 0; + break; + } + } + + return ret; +} + +static inline int find_in_old_u_pcs_list(unsigned long frame, + unsigned long *delta) +{ + unsigned long pcs_base, pcs_top; + + pcs_base = (unsigned long) CURRENT_PCS_BASE(); + pcs_top = pcs_base + + get_hw_pcs_user_size(¤t_thread_info()->u_hw_stack); + + return __find_in_old_u_pcs_list(frame, delta, pcs_base, pcs_top, + ¤t_thread_info()->old_u_pcs_list); +} + +static inline int __copy_old_u_pcs_list(struct list_head *to, + const struct list_head *from) +{ + const struct old_pcs_area *u_pcs_from; + struct old_pcs_area *u_pcs_to; + + list_for_each_entry(u_pcs_from, from, list_entry) { + u_pcs_to = kmalloc(sizeof(struct old_pcs_area), GFP_KERNEL); + if (unlikely(!u_pcs_to)) + return -ENOMEM; + + u_pcs_to->base = u_pcs_from->base; + u_pcs_to->size = u_pcs_from->size; + + list_add_tail(&u_pcs_to->list_entry, to); + } + + return 0; +} + +static inline int copy_old_u_pcs_list(struct thread_info *to, + const struct thread_info *from) +{ + return __copy_old_u_pcs_list(&to->old_u_pcs_list, &from->old_u_pcs_list); +} + +static inline int +update_vm_area_flags(e2k_addr_t start, e2k_size_t len, + vm_flags_t flags_to_set, vm_flags_t flags_to_clear) +{ + int error = 0; + + down_write(¤t->mm->mmap_sem); + len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); + start &= PAGE_MASK; + + error = do_update_vm_area_flags(start, len, flags_to_set, + flags_to_clear); + + up_write(¤t->mm->mmap_sem); + return error; +} + +extern unsigned long *__alloc_thread_stack_node(int node); +extern void __free_thread_stack(void *address); + +extern struct task_struct *init_tasks[]; + +extern e2k_addr_t get_nested_kernel_IP(pt_regs_t *regs, int n); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* paravirtualized kernel (host and guest) */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* It is native guest kernel (without paravirtualization) */ +#include +#elif defined(CONFIG_VIRTUALIZATION) || !defined(CONFIG_VIRTUALIZATION) +/* native kernel with virtualization support */ +/* native kernel without virtualization support */ +#define E2K_FLUSHCPU NATIVE_FLUSHCPU +#define E2K_FLUSHR NATIVE_FLUSHR +#define E2K_FLUSHC NATIVE_FLUSHC +#define COPY_STACKS_TO_MEMORY() NATIVE_COPY_STACKS_TO_MEMORY() +#define GOTO_RETURN_TO_PARAVIRT_GUEST(ret_value) +#define COND_GOTO_RETURN_TO_PARAVIRT_GUEST(cond, ret_value) +#define GOTO_DONE_TO_PARAVIRT_GUEST() +#define COND_GOTO_DONE_TO_PARAVIRT_GUEST(cond) + +#define ONLY_SET_GUEST_GREGS(ti) NATIVE_ONLY_SET_GUEST_GREGS(ti) + +static __always_inline void +kernel_hw_stack_frames_copy(u64 *dst, const u64 *src, unsigned long size) +{ + native_kernel_hw_stack_frames_copy(dst, src, size); +} +static __always_inline void +collapse_kernel_pcs(u64 *dst, const u64 *src, u64 spilled_size) +{ + native_collapse_kernel_pcs(dst, src, spilled_size); +} +static __always_inline void +collapse_kernel_ps(u64 *dst, const u64 *src, u64 spilled_size) +{ + native_collapse_kernel_ps(dst, src, spilled_size); +} + +#if !defined(CONFIG_VIRTUALIZATION) +/* native kernel without virtualization support */ +#define do_map_user_hard_stack_to_kernel(node, kstart, ubase, size) \ + do_map_native_user_hard_stack_to_kernel(node, kstart, \ + ubase, size) +#define resume_vm_thread() /* none any virtual machines and threads */ +#endif /* ! CONFIG_VIRTUALIZATION */ + +static inline void +virt_cpu_thread_init(struct task_struct *boot_task) +{ + /* nothing to do */ +} + +static inline int +copy_kernel_stacks(struct task_struct *new_task, + unsigned long fn, unsigned long arg) +{ + return native_copy_kernel_stacks(new_task, fn, arg); +} + +#define define_user_hw_stacks_sizes(hw_stacks) \ + native_define_user_hw_stacks_sizes(hw_stacks) + +static __always_inline int +switch_to_new_user(e2k_stacks_t *stacks, hw_stack_t *hw_stacks, + e2k_addr_t cut_base, e2k_size_t cut_size, + e2k_addr_t entry_point, int cui, + unsigned long flags, bool kernel) +{ + return native_switch_to_new_user(stacks, hw_stacks, + cut_base, cut_size, entry_point, cui, flags, kernel); +} +static inline int clone_prepare_spilled_user_stacks(e2k_stacks_t *child_stacks, + const e2k_mem_crs_t *child_crs, const struct pt_regs *regs, + struct sw_regs *new_sw_regs, struct thread_info *new_ti, + unsigned long clone_flags) +{ + return native_clone_prepare_spilled_user_stacks(child_stacks, + child_crs, regs, new_sw_regs, new_ti, clone_flags); +} +static inline int +copy_spilled_user_stacks(struct e2k_stacks *child_stacks, + e2k_mem_crs_t *child_crs, struct sw_regs *new_sw_regs, + const struct thread_info *new_ti) +{ + native_copy_spilled_user_stacks(child_stacks, child_crs, + new_sw_regs, new_ti); + return 0; +} + +#else /* ??? */ + #error "Undefined virtualization mode" +#endif /* CONFIG_PARAVIRT_GUEST */ + +/* + * Copy hardware stack from user to *current* kernel stack. + * One has to be careful to avoid hardware FILL of this stack. + */ +static inline int __copy_user_to_current_hw_stack(void *dst, void __user *src, + unsigned long size, const pt_regs_t *regs, bool chain) +{ + unsigned long min_flt, maj_flt, ts_flag; + + if (likely(!host_test_intc_emul_mode(regs))) { + if (!__range_ok((unsigned long __force) src, size, + PAGE_OFFSET)) + return -EFAULT; + } + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + + /* + * Every page fault here has a chance of FILL'ing the frame + * that is being copied, in which case we repeat the copy. + */ + do { + min_flt = READ_ONCE(current->min_flt); + maj_flt = READ_ONCE(current->maj_flt); + + if (chain) + E2K_FLUSHC; + else + E2K_FLUSHR; + + SET_USR_PFAULT("$.recovery_memcpy_fault"); + fast_tagged_memory_copy_from_user(dst, src, size, regs, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + true); + if (RESTORE_USR_PFAULT) { + clear_ts_flag(ts_flag); + return -EFAULT; + } + } while (unlikely(min_flt != READ_ONCE(current->min_flt) || + maj_flt != READ_ONCE(current->maj_flt))); + + clear_ts_flag(ts_flag); + return 0; +} + + +static inline int copy_user_to_current_hw_stack(void *dst, void __user *src, + unsigned long size, pt_regs_t *regs, bool chain) +{ + unsigned long flags; + int ret; + + raw_all_irq_save(flags); + ret = __copy_user_to_current_hw_stack(dst, src, size, regs, chain); + raw_all_irq_restore(flags); + + return ret; +} + +static inline int copy_e2k_stack_from_user(void *dst, void __user *src, + unsigned long size, pt_regs_t *regs) +{ + unsigned long ts_flag; + int ret; + + if (likely(!host_test_intc_emul_mode(regs))) { + if (!__range_ok((unsigned long __force) src, size, PAGE_OFFSET)) + return -EFAULT; + } + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = host_copy_from_user_with_tags(dst, src, size, regs); + clear_ts_flag(ts_flag); + + return (ret) ? -EFAULT : 0; +} + +static inline int copy_e2k_stack_to_user(void __user *dst, void *src, + unsigned long size, pt_regs_t *regs) +{ + unsigned long ts_flag; + int ret; + + if (likely(!host_test_intc_emul_mode(regs))) { + if (!__range_ok((unsigned long __force) dst, size, PAGE_OFFSET)) + return -EFAULT; + } + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = host_copy_to_user_with_tags(dst, src, size, regs); + clear_ts_flag(ts_flag); + + return (ret) ? -EFAULT : 0; +} + +DECLARE_PER_CPU(void *, reserve_hw_stacks); +static inline int on_reserve_stacks(void) +{ + e2k_pcsp_lo_t pcsp_lo; + unsigned long res_base; + + WARN_ON_ONCE(!psr_and_upsr_irqs_disabled()); + + pcsp_lo = READ_PCSP_LO_REG(); + res_base = (unsigned long) raw_cpu_read(reserve_hw_stacks); + + return AS(pcsp_lo).base >= res_base + KERNEL_PC_STACK_OFFSET && + AS(pcsp_lo).base < res_base + KERNEL_PC_STACK_OFFSET + + KERNEL_PC_STACK_SIZE; +} + +static __always_inline int +user_hw_stack_frames_copy(void __user *dst, void *src, unsigned long copy_size, + const pt_regs_t *regs, unsigned long hw_stack_ind, bool is_pcsp) +{ + unsigned long ts_flag; + + if (unlikely(hw_stack_ind < copy_size)) { + unsigned long flags; + raw_all_irq_save(flags); + if (is_pcsp) { + E2K_FLUSHC; + } else { + E2K_FLUSHR; + } + raw_all_irq_restore(flags); + } + + SET_USR_PFAULT("$.recovery_memcpy_fault"); + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + fast_tagged_memory_copy_to_user(dst, src, copy_size, regs, + TAGGED_MEM_STORE_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, + TAGGED_MEM_LOAD_REC_OPC | + MAS_BYPASS_L1_CACHE << LDST_REC_OPC_MAS_SHIFT, true); + clear_ts_flag(ts_flag); + + if (RESTORE_USR_PFAULT) { + pr_err("process %s (%d) %s stack could not be copied " + "from %px to %px size 0x%lx (out of memory?)\n", + current->comm, current->pid, + (is_pcsp) ? "chain" : "procedure", + src, dst, copy_size); + return -EFAULT; + } + DebugUST("copying guest %s stack spilled to host from %px " + "to guest kernel stack from %px, size 0x%lx\n", + (is_pcsp) ? "chain" : "procedure", src, dst, copy_size); + + return 0; +} + +static __always_inline int +user_crs_frames_copy(e2k_mem_crs_t __user *u_frame, pt_regs_t *regs) +{ + e2k_mem_crs_t *crs = ®s->crs; + unsigned long ts_flag; + int ret; + + ts_flag = set_ts_flag(TS_KERNEL_SYSCALL); + ret = host_copy_to_user(u_frame, crs, sizeof(*crs), regs); + clear_ts_flag(ts_flag); + if (unlikely(ret)) + return -EFAULT; + + return 0; +} + +static __always_inline u64 get_wsz(enum restore_caller from) +{ + return NATIVE_READ_WD_REG().size >> 4; +} + +static __always_inline int user_psp_stack_copy(e2k_psp_lo_t u_psp_lo, + e2k_psp_hi_t u_psp_hi, s64 u_pshtp_size, + e2k_psp_lo_t k_psp_lo, e2k_psp_hi_t k_psp_hi, + unsigned long copy_size, const pt_regs_t *regs) +{ + void __user *dst; + void *src; + + dst = (void __user *) (AS(u_psp_lo).base + AS(u_psp_hi).ind - + u_pshtp_size); + src = (void *) AS(k_psp_lo).base; + + return user_hw_stack_frames_copy(dst, src, copy_size, + regs, k_psp_hi.PSP_hi_ind, false); +} + +static __always_inline int user_pcsp_stack_copy(e2k_pcsp_lo_t u_pcsp_lo, + e2k_pcsp_hi_t u_pcsp_hi, s64 u_pcshtp_size, + e2k_pcsp_lo_t k_pcsp_lo, e2k_pcsp_hi_t k_pcsp_hi, + unsigned long copy_size, const pt_regs_t *regs) +{ + void __user *dst; + void *src; + + dst = (void __user *)(AS(u_pcsp_lo).base + AS(u_pcsp_hi).ind - + u_pcshtp_size); + src = (void *) AS(k_pcsp_lo).base; + + return user_hw_stack_frames_copy(dst, src, copy_size, + regs, k_pcsp_hi.PCSP_hi_ind, true); +} + +static __always_inline u64 get_ps_clear_size(u64 cur_window_q, + e2k_pshtp_t pshtp) +{ + s64 u_pshtp_size_q; + + u_pshtp_size_q = GET_PSHTP_Q_INDEX(pshtp); + if (u_pshtp_size_q > E2K_MAXSR - cur_window_q) + u_pshtp_size_q = E2K_MAXSR - cur_window_q; + + return E2K_MAXSR - (cur_window_q + u_pshtp_size_q); +} + +static __always_inline s64 get_ps_copy_size(u64 cur_window_q, s64 u_pshtp_size) +{ + return u_pshtp_size - (E2K_MAXSR - cur_window_q) * EXT_4_NR_SZ; +} + +#ifdef CONFIG_CPU_HAS_FILL_INSTRUCTION +# define E2K_CF_MAX_FILL (E2K_CF_MAX_FILL_FILLC_q * 0x10) +#else +extern int cf_max_fill_return; +# define E2K_CF_MAX_FILL cf_max_fill_return +#endif + +static __always_inline s64 get_pcs_copy_size(s64 u_pcshtp_size) +{ + /* Before v6 it was possible to fill no more than 16 registers. + * Since E2K_MAXCR_q is much bigger than 16 we can be sure that + * there is enough space in CF for the FILL, so there is no + * need to take into account space taken by current window. */ + return u_pcshtp_size - E2K_CF_MAX_FILL; +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* This function is used to fixup ret_stack, so make sure it itself + * does not rely on correct values in ret_stack by using "notrace". */ +notrace +static inline void apply_graph_tracer_delta(unsigned long delta) +{ + int i, last; + + if (likely(!current->ret_stack)) + return; + + last = current->curr_ret_stack; + for (i = min(last, FTRACE_RETFUNC_DEPTH - 1); i >= 0; i--) + current->ret_stack[i].fp += delta; +} +#else +static inline void apply_graph_tracer_delta(unsigned long delta) +{ +} +#endif + +/** + * user_hw_stacks_copy - copy user hardware stacks that have been + * SPILLed to kernel back to user space + * @stacks - saved user stack registers + * @cur_window_q - size of current window in procedure stack, + * needed only if @copy_full is not set + * @copy_full - set if want to copy _all_ of SPILLed stacks + * + * This does not update stacks->pshtp and stacks->pcshtp. Main reason is + * signals: if a signal arrives after copying then it must see a coherent + * state where saved stacks->pshtp and stacks->pcshtp values show how much + * data from user space is spilled to kernel space. + */ +static __always_inline int +user_hw_stacks_copy(struct e2k_stacks *stacks, + pt_regs_t *regs, u64 cur_window_q, bool copy_full) +{ + trap_pt_regs_t *trap = regs->trap; + e2k_psp_lo_t u_psp_lo = stacks->psp_lo, + k_psp_lo = current_thread_info()->k_psp_lo; + e2k_psp_hi_t u_psp_hi = stacks->psp_hi; + e2k_pcsp_lo_t u_pcsp_lo = stacks->pcsp_lo, + k_pcsp_lo = current_thread_info()->k_pcsp_lo; + e2k_pcsp_hi_t u_pcsp_hi = stacks->pcsp_hi; + s64 u_pshtp_size, u_pcshtp_size, ps_copy_size, pcs_copy_size; + int ret; + + u_pshtp_size = GET_PSHTP_MEM_INDEX(stacks->pshtp); + u_pcshtp_size = PCSHTP_SIGN_EXTEND(stacks->pcshtp); + + /* + * Copy user's part from kernel stacks into user stacks + * Update user's stack registers + */ + if (copy_full) { + pcs_copy_size = u_pcshtp_size; + ps_copy_size = u_pshtp_size; + } else { + pcs_copy_size = get_pcs_copy_size(u_pcshtp_size); + ps_copy_size = get_ps_copy_size(cur_window_q, u_pshtp_size); + + /* Make sure there is enough space in CF for the FILL */ + BUG_ON((E2K_MAXCR_q - 4) * 16 < E2K_CF_MAX_FILL); + } + + if (likely(pcs_copy_size <= 0 && ps_copy_size <= 0)) + return 0; + + if (unlikely(pcs_copy_size > 0)) { + e2k_pcsp_hi_t k_pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + + /* Since not all user data has been SPILL'ed it is possible + * that we have already overflown user's hardware stack. */ + if (unlikely(AS(u_pcsp_hi).ind > AS(u_pcsp_hi).size)) { + ret = handle_chain_stack_bounds(stacks, trap); + if (unlikely(ret)) { + pr_warning("process %s (%d) chain stack overflow (out of memory?)\n", + current->comm, current->pid); + return ret; + } + + u_pcsp_lo = stacks->pcsp_lo; + u_pcsp_hi = stacks->pcsp_hi; + } + + ret = user_pcsp_stack_copy(u_pcsp_lo, u_pcsp_hi, u_pcshtp_size, + k_pcsp_lo, k_pcsp_hi, pcs_copy_size, regs); + if (ret) + return ret; + } + + if (unlikely(ps_copy_size > 0)) { + e2k_psp_hi_t k_psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + + /* Since not all user data has been SPILL'ed it is possible + * that we have already overflowed user's hardware stack. */ + if (unlikely(AS(u_psp_hi).ind > AS(u_psp_hi).size)) { + ret = handle_proc_stack_bounds(stacks, trap); + if (unlikely(ret)) { + pr_warning("process %s (%d) procedure stack overflow (out of memory?)\n", + current->comm, current->pid); + return ret; + } + + u_psp_lo = stacks->psp_lo; + u_psp_hi = stacks->psp_hi; + } + + ret = user_psp_stack_copy(u_psp_lo, u_psp_hi, u_pshtp_size, + k_psp_lo, k_psp_hi, ps_copy_size, regs); + if (ret) + return ret; + } + + return 0; +} + +static inline void collapse_kernel_hw_stacks(struct e2k_stacks *stacks) +{ + e2k_pcsp_lo_t k_pcsp_lo = current_thread_info()->k_pcsp_lo; + e2k_psp_lo_t k_psp_lo = current_thread_info()->k_psp_lo; + unsigned long flags, spilled_pc_size, spilled_p_size; + e2k_pshtp_t pshtp = stacks->pshtp; + u64 *dst; + const u64 *src; + + spilled_pc_size = PCSHTP_SIGN_EXTEND(stacks->pcshtp); + spilled_p_size = GET_PSHTP_MEM_INDEX(pshtp); + DebugUST("guest user spilled to host kernel stack part: chain 0x%lx " + "procedure 0x%lx\n", + spilled_pc_size, spilled_p_size); + /* When user tries to return from the last user frame + * we will have pcshtp = pcsp_hi.ind = 0. But situation + * with pcsp_hi.ind != 0 and pcshtp = 0 is impossible. */ + if (WARN_ON_ONCE(spilled_pc_size < SZ_OF_CR && + AS(stacks->pcsp_hi).ind != 0)) + do_exit(SIGKILL); + + /* Keep the last user frame (see user_hw_stacks_copy_full()) */ + if (spilled_pc_size >= SZ_OF_CR) { + spilled_pc_size -= SZ_OF_CR; + DebugUST("Keep the prev user chain frame, so spilled chain " + "size is now 0x%lx\n", + spilled_pc_size); + } + + raw_all_irq_save(flags); + + if (spilled_pc_size) { + dst = (u64 *) AS(k_pcsp_lo).base; + src = (u64 *) (AS(k_pcsp_lo).base + spilled_pc_size); + collapse_kernel_pcs(dst, src, spilled_pc_size); + + stacks->pcshtp = SZ_OF_CR; + + apply_graph_tracer_delta(-spilled_pc_size); + } + + if (spilled_p_size) { + dst = (u64 *) AS(k_psp_lo).base; + src = (u64 *) (AS(k_psp_lo).base + spilled_p_size); + collapse_kernel_ps(dst, src, spilled_p_size); + + AS(pshtp).ind = 0; + stacks->pshtp = pshtp; + } + + raw_all_irq_restore(flags); +} + +/** + * user_hw_stacks_copy_full - copy part of user stacks that was SPILLed + * into kernel back to user stacks. + * @stacks - saved user stack registers + * @regs - pt_regs pointer + * @crs - last frame to copy + * + * If @crs is not NULL then the frame pointed to by it will also be copied + * to userspace. Note that 'stacks->pcsp_hi.ind' is _not_ updated after + * copying since it would leave stack in inconsistent state (with two + * copies of the same @crs frame), this is left to the caller. * + * + * Inlining this reduces the amount of memory to copy in + * collapse_kernel_hw_stacks(). + */ +static inline int user_hw_stacks_copy_full(struct e2k_stacks *stacks, + pt_regs_t *regs, e2k_mem_crs_t *crs) +{ + int ret; + + /* + * Copy part of user stacks that were SPILLed into kernel stacks + */ + ret = user_hw_stacks_copy(stacks, regs, 0, true); + if (unlikely(ret)) + return ret; + + /* + * Nothing to FILL so remove the resulting hole from kernel stacks. + * + * IMPORTANT: there is always at least one user frame at the top of + * kernel stack - the one that issued a system call (in case of an + * exception we uphold this rule manually, see user_hw_stacks_prepare()) + * We keep this ABI and _always_ leave space for one user frame, + * this way we can later FILL using return trick (otherwise there + * would be no space in chain stack for the trick). + */ + collapse_kernel_hw_stacks(stacks); + + /* + * Copy saved %cr registers + * + * Caller must take care of filling of resulting hole + * (last user frame from pcshtp == SZ_OF_CR). + */ + if (crs) { + e2k_mem_crs_t __user *u_frame; + int ret; + + u_frame = (void __user *) (AS(stacks->pcsp_lo).base + + AS(stacks->pcsp_hi).ind); + ret = user_crs_frames_copy(u_frame, regs); + if (unlikely(ret)) + return ret; + } + + return 0; +} + +/** + * user_hw_stacks_prepare - prepare user hardware stacks that have been + * SPILLed to kernel back to user space + * @stacks - saved user stack registers + * @cur_window_q - size of current window in procedure stack, + * needed only if @copy_full is not set + * @syscall - true if called upon direct system call exit (no signal handlers) + * + * This does two things: + * + * 1) It is possible that upon kernel entry pcshtp == 0 in some cases: + * - user signal handler had pcshtp==0x20 before return to sigreturn() + * - user context had pcshtp==0x20 before return to makecontext_trampoline() + * - chain stack underflow happened + * So it is possible in sigreturn() and traps, but not in system calls. + * If we are using the trick with return to FILL user hardware stacks than + * we must have frame in chain stack to return to. So in this case kernel's + * chain stack is moved up by one frame (0x20 bytes). + * We also fill the new frame with actual user data and update stacks->pcshtp, + * this is needed to keep the coherent state where saved stacks->pcshtp values + * shows how much data from user space has been spilled to kernel space. + * + * 2) It is not possible to always FILL all of user data that have been + * SPILLed to kernel stacks. So we manually copy the leftovers that can + * not be FILLed to user space. + * This copy does not update stacks->pshtp and stacks->pcshtp. Main reason + * is signals: if a signal arrives after copying then it must see a coherent + * state where saved stacks->pshtp and stacks->pcshtp values show how much + * data from user space has been spilled to kernel space. + */ +static __always_inline void native_user_hw_stacks_prepare( + struct e2k_stacks *stacks, pt_regs_t *regs, + u64 cur_window_q, enum restore_caller from, int syscall) +{ + e2k_pcshtp_t u_pcshtp = stacks->pcshtp; + int ret; + + BUG_ON(from & FROM_PV_VCPU_MODE); + + /* + * 1) Make sure there is free space in kernel chain stack to return to + */ + if (!syscall && u_pcshtp == 0) { + unsigned long flags; + e2k_pcsp_lo_t u_pcsp_lo = stacks->pcsp_lo, + k_pcsp_lo = current_thread_info()->k_pcsp_lo; + e2k_pcsp_hi_t u_pcsp_hi = stacks->pcsp_hi, k_pcsp_hi; + e2k_mem_crs_t __user *u_cframe; + e2k_mem_crs_t *k_crs; + u64 u_cbase; + int ret = -EINVAL; + + raw_all_irq_save(flags); + E2K_FLUSHC; + k_pcsp_hi = READ_PCSP_HI_REG(); + BUG_ON(AS(k_pcsp_hi).ind); + AS(k_pcsp_hi).ind += SZ_OF_CR; + WRITE_PCSP_HI_REG(k_pcsp_hi); + + k_crs = (e2k_mem_crs_t *) AS(k_pcsp_lo).base; + u_cframe = (e2k_mem_crs_t __user *) (AS(u_pcsp_lo).base + + AS(u_pcsp_hi).ind); + u_cbase = ((from & FROM_RETURN_PV_VCPU_TRAP) || + host_test_intc_emul_mode(regs)) ? + u_pcsp_lo.PCSP_lo_base : + (u64) CURRENT_PCS_BASE(); + if ((u64) u_cframe > u_cbase) { + ret = __copy_user_to_current_hw_stack(k_crs, + u_cframe - 1, sizeof(*k_crs), regs, true); + } + raw_all_irq_restore(flags); + + /* Can happen if application returns until runs out of + * chain stack or there is no free memory for stacks. + * There is no user stack to return to - die. */ + if (ret) { + E2K_LMS_HALT_OK; + SIGDEBUG_PRINT("SIGKILL. %s\n", + (ret == -EINVAL) ? "tried to return to kernel" : + "ran into Out-of-Memory on user stacks"); + force_sig(SIGKILL); + return; + } + + if (AS(u_pcsp_hi).ind < SZ_OF_CR) { + update_pcsp_regs(AS(u_pcsp_lo).base, + &u_pcsp_lo, &u_pcsp_hi); + stacks->pcsp_lo = u_pcsp_lo; + stacks->pcsp_hi = u_pcsp_hi; + BUG_ON(AS(u_pcsp_hi).ind < SZ_OF_CR); + } + + u_pcshtp = SZ_OF_CR; + stacks->pcshtp = u_pcshtp; + } + + /* + * 2) Copy user data that cannot be FILLed + */ + ret = user_hw_stacks_copy(stacks, regs, cur_window_q, false); + if (unlikely(ret)) + do_exit(SIGKILL); +} + +#ifndef CONFIG_VIRTUALIZATION +/* native kernel without virtualization support */ +static __always_inline void +host_user_hw_stacks_prepare(struct e2k_stacks *stacks, pt_regs_t *regs, + u64 cur_window_q, enum restore_caller from, int syscall) +{ + native_user_hw_stacks_prepare(stacks, regs, cur_window_q, + from, syscall); +} +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* It is native guest kernel (without paravirtualization) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* It is paravirtualized kernel (host and guest) */ +#include +#elif defined(CONFIG_KVM_HOST_MODE) +/* It is host kernel with virtualization support */ +#include +#else /* unknow mode */ +#error "unknown virtualization mode" +#endif /* !CONFIG_VIRTUALIZATION */ + + +extern e2k_addr_t get_nested_kernel_IP(pt_regs_t *regs, int n); +extern unsigned long remap_e2k_stack(unsigned long addr, + unsigned long old_size, unsigned long new_size, bool after); + +extern int find_cui_by_ip(unsigned long ip); +#endif /* _E2K_PROCESS_H */ + diff --git a/arch/e2k/include/asm/processor.h b/arch/e2k/include/asm/processor.h new file mode 100644 index 0000000..d611eea --- /dev/null +++ b/arch/e2k/include/asm/processor.h @@ -0,0 +1,458 @@ +/* + * include/asm-e2k/processor.h + * + * Copyright (C) 2001 MCST + */ + +#ifndef _E2K_PROCESSOR_H_ +#define _E2K_PROCESSOR_H_ +#ifndef __ASSEMBLY__ + +#include +#include + +#include +#include +#include +#include +#include +#include + + +/* We want to use OSGD for fast access to task_struct */ +#define ARCH_MIN_TASKALIGN E2K_ALIGN_GLOBALS_SZ + +/* + * CPU type, hardware bug flags, and per-CPU state. + */ +typedef struct cpuinfo_e2k { + __u8 family; + __u8 model; + __u8 revision; + char vendor[16]; + __u64 proc_freq; /* frequency of processor */ +#ifdef CONFIG_SMP + int cpu; + __u64 mmu_last_context; + __u64 ipi_count; +#endif +} cpuinfo_e2k_t; + +extern cpuinfo_e2k_t cpu_data[NR_CPUS]; + +#define my_cpu_data1(num_cpu) cpu_data[num_cpu] + +#define my_cpu_data cpu_data[smp_processor_id()] +#define raw_my_cpu_data cpu_data[raw_smp_processor_id()] + +#define STACK_TOP (current->thread.flags & E2K_FLAG_32BIT ? \ + USER32_STACK_TOP : USER64_STACK_TOP) +#define STACK_TOP_MAX USER64_STACK_TOP + +#define HAVE_ARCH_PICK_MMAP_LAYOUT +#define HAVE_ARCH_UNMAPPED_AREA + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE \ + PAGE_ALIGN((current->thread.flags & \ + (E2K_FLAG_32BIT | E2K_FLAG_PROTECTED_MODE)) ? \ + (TASK32_SIZE / 3) : (TASK_SIZE / 3)) + +/* + * Size of io_bitmap in longwords: 32 is ports 0-0x3ff. + */ +#define IO_BITMAP_SIZE 32 +#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) +#define INVALID_IO_BITMAP_OFFSET 0x8000 + +typedef struct thread_struct { +#ifndef CONFIG_CPU_HAS_FILL_INSTRUCTION + /* Used as a temporary area */ + struct { + e2k_cr0_hi_t cr0_hi; + e2k_cr1_lo_t cr1_lo; + e2k_cr1_hi_t cr1_hi; + e2k_psp_lo_t u_psp_lo; + e2k_psp_hi_t u_psp_hi; + e2k_pcsp_lo_t u_pcsp_lo; + e2k_pcsp_hi_t u_pcsp_hi; + int from; + bool return_to_user; +# if defined(CONFIG_VIRTUALIZATION) && !defined(CONFIG_KVM_GUEST_KERNEL) + bool from_paravirt_guest; +# endif + } fill; +#endif + u32 context; /* context of running process */ + struct sw_regs sw_regs; /* switch regs */ + + struct { + struct { + e2k_dibcr_t dibcr; + u64 dibar0; + u64 dibar1; + u64 dibar2; + u64 dibar3; + e2k_ddbcr_t ddbcr; + u64 ddbar0; + u64 ddbar1; + u64 ddbar2; + u64 ddbar3; + } regs; + /* user breakpoints set via ptrace */ + struct perf_event *hbp_data[HBP_NUM]; + struct perf_event *hbp_instr[HBP_NUM]; + } debug; + + /* Used by an old implementation of C3 sleep state */ + struct { + e2k_dibcr_t dibcr; + e2k_ddbcr_t ddbcr; + e2k_dimcr_t dimcr; + e2k_ddmcr_t ddmcr; + } C3; + + unsigned long flags; /* various flags (e.g. for mmap) */ +} thread_t; +#endif /* !__ASSEMBLY__ */ + +/* + * Thread flags + */ +#define E2K_FLAG_32BIT 0x01 /* task is older 32-bit binary */ +#define E2K_FLAG_PROTECTED_MODE 0x02 /* task is running in protected mode */ +#define E2K_FLAG_BIN_COMP_CODE 0x04 /* task is binary compiler code */ +#define PRINT_FUNCY_STACK_WORKS_BIT 3 /* do print_stack */ +#define PRINT_FUNCY_STACK_WORKS \ + (1UL << PRINT_FUNCY_STACK_WORKS_BIT) /* 0x08 */ +#define E2K_FLAG_PRINT_ALL_TASK 0x10 /* do print_stack */ +#define PRINT_STACK_WORKS 0x20 /* do print_stack */ +#define E2K_FLAG_64BIT_BINCO 0x40 /* 32-bit binco is running 64-bit x86 */ +#define E2K_FLAG_3P_ELF32 0x80 /* can be removed when only elf64 3P */ + /* is supported */ + +/* + * Various task info flags (is common for host and guest task) + * See last 'flags' argument of function switch_to_new_user() and + * same as field 'flags' of structure kvm_task_info_t (asm/kvm/hypervisor.h) + */ +#define BIN_32_CODE_TASK_FLAG_BIT 2 /* task is 32-bit binary */ + /* application */ +#define PROTECTED_CODE_TASK_FLAG_BIT 3 /* task is running in */ + /* protected mode */ +#define BIN_COMP_CODE_TASK_FLAG_BIT 4 /* task is binary application */ + /* compiler code */ +#define DO_PRESENT_HW_STACKS_TASK_FLAG_BIT 8 /* hardware stacks should be */ + /* made present (populated) */ +#define DO_LOCK_HW_STACKS_TASK_FLAG_BIT 9 /* hardware stacks should be */ + /* locked */ +#define PS_HAS_NOT_GUARD_PAGE_TASK_BIT 12 /* hardware procedure stack */ + /* has not extra guard page */ +#define PCS_HAS_NOT_GUARD_PAGE_TASK_BIT 13 /* hardware chain stack */ + /* has not extra guard page */ +#define SWITCH_TO_COMPLETE_TASK_BIT 14 /* switch to kernel hardware */ + /* stacks to complete task */ + /* else to change user stacks */ + /* and return to them */ +#define RETURN_TO_USER_STACKS_TASK_BIT 15 /* return to user hardware */ + /* stacks, else switch to */ + /* kernel stacks */ + +#define BIN_32_CODE_TASK_FLAG (1UL << BIN_32_CODE_TASK_FLAG_BIT) +#define BIN_COMP_CODE_TASK_FLAG (1UL << BIN_COMP_CODE_TASK_FLAG_BIT) +#define PROTECTED_CODE_TASK_FLAG (1UL << PROTECTED_CODE_TASK_FLAG_BIT) +#define DO_PRESENT_HW_STACKS_TASK_FLAG \ + (1UL << DO_PRESENT_HW_STACKS_TASK_FLAG_BIT) +#define DO_LOCK_HW_STACKS_TASK_FLAG \ + (1UL << DO_LOCK_HW_STACKS_TASK_FLAG_BIT) +#define PS_HAS_NOT_GUARD_PAGE_TASK_FLAG \ + (1UL << PS_HAS_NOT_GUARD_PAGE_TASK_BIT) +#define PCS_HAS_NOT_GUARD_PAGE_TASK_FLAG \ + (1UL << PCS_HAS_NOT_GUARD_PAGE_TASK_BIT) +#define SWITCH_TO_COMPLETE_TASK_FLAG (1UL << SWITCH_TO_COMPLETE_TASK_BIT) +#define RETURN_TO_USER_STACKS_TASK_FLAG (1UL << RETURN_TO_USER_STACKS_TASK_BIT) + +#ifndef __ASSEMBLY__ + +#define K_STK_BASE(thr) ((thr)->k_stk_base) +#define K_STK_TOP(thr) ((thr)->k_stk_base + KERNEL_C_STACK_SIZE) + + +#define INIT_THREAD { 0 } + +#define INIT_MMAP \ +{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } + +extern void start_thread(struct pt_regs *regs, + unsigned long entry, unsigned long sp); +extern int native_do_prepare_start_thread_frames(unsigned long entry, + unsigned long sp); +extern long do_sys_execve(unsigned long entry, unsigned long sp, int kernel); + +/* Forward declaration, a strange C thing */ +struct task_struct; +struct mm_struct; + +/* Free all resources held by a thread. */ +extern void release_thread(struct task_struct *); +/* + * create a kernel thread without removing it from tasklists + */ +extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); + +extern void thread_init(void); + +/* + * Prepare to copy thread state - unlazy all lazy status + */ +#define prepare_to_copy(tsk) do { } while (0) + +/* Copy and release all segment info associated with a VM */ + +#define copy_segments(tsk, mm) do { } while (0) /* We don't have */ +#define release_segments(mm) do { } while (0) /* segments on E2K */ + +extern unsigned long boot_option_idle_override; +extern unsigned long idle_halt; +extern void native_default_idle(void); +extern bool idle_nomwait; + +unsigned long get_wchan(struct task_struct *p); +#define KSTK_EIP(tsk) \ +({ \ + struct pt_regs *pt_regs = task_thread_info(tsk)->pt_regs; \ + (pt_regs) ? \ + (unsigned long)AS_STRUCT(pt_regs->crs.cr0_hi).ip << 3 : \ + 0UL; \ +}) +#define KSTK_ESP(tsk) \ +({ \ + struct pt_regs *pt_regs = task_thread_info(tsk)->pt_regs; \ + (pt_regs) ? AS_STRUCT(pt_regs->stacks.usd_lo).base : \ + task_thread_info(tsk)->u_stack.top; \ +}) + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +# define TASK_IS_BINCO(tsk) (tsk->thread.flags & E2K_FLAG_BIN_COMP_CODE) +#else +# define TASK_IS_BINCO(tsk) 0UL +#endif + +#ifdef CONFIG_PROTECTED_MODE +# define TASK_IS_PROTECTED(tsk) (tsk->thread.flags & E2K_FLAG_PROTECTED_MODE) +#else +# define TASK_IS_PROTECTED(tsk) 0UL +#endif + +#define native_cpu_relax() \ + __asm__ __volatile__("{nop 7}" ::: "memory", PREEMPTION_CLOBBERS) +#define cpu_relax_lowlatency() cpu_relax() +#define native_cpu_relax_no_resched() native_cpu_relax() + +#define ARCH_HAS_PREFETCH +static inline void prefetch(const void *ptr) +{ + /* Use fully speculative load since ptr could be bad */ + E2K_PREFETCH_L1_SPEC(ptr); +} + +#define ARCH_HAS_PREFETCHW +static inline void prefetchw(const void *ptr) +{ + /* prefetchw() is used when ptr is good, thus + * we can use half-speculative load */ + E2K_PREFETCH_L1(ptr); +} + +#define prefetch_offset(ptr, offset) \ +do { \ + /* Use fully speculative load since ptr could be bad */ \ + E2K_PREFETCH_L1_SPEC_OFFSET((ptr), (offset)); \ +} while (0) + +#define prefetchw_offset(ptr, offset) \ +do { \ + E2K_PREFETCH_L2_OFFSET((ptr), (offset)); \ +} while (0) + +/* Use L2 cache line size since we are prefetching to L2 */ +#define PREFETCH_STRIDE 64 + +static __always_inline void prefetchw_range(const void *addr, size_t len) +{ +#ifdef ARCH_HAS_PREFETCHW + s64 i, rem, prefetched; + + if (__builtin_constant_p(len) && len < 24 * PREFETCH_STRIDE) { + if (len > 0) + prefetchw(addr); + if (len > PREFETCH_STRIDE) + prefetchw_offset(addr, PREFETCH_STRIDE); + if (len > 2 * PREFETCH_STRIDE) + prefetchw_offset(addr, 2 * PREFETCH_STRIDE); + if (len > 3 * PREFETCH_STRIDE) + prefetchw_offset(addr, 3 * PREFETCH_STRIDE); + if (len > 4 * PREFETCH_STRIDE) + prefetchw_offset(addr, 4 * PREFETCH_STRIDE); + if (len > 5 * PREFETCH_STRIDE) + prefetchw_offset(addr, 5 * PREFETCH_STRIDE); + if (len > 6 * PREFETCH_STRIDE) + prefetchw_offset(addr, 6 * PREFETCH_STRIDE); + if (len > 7 * PREFETCH_STRIDE) + prefetchw_offset(addr, 7 * PREFETCH_STRIDE); + if (len > 8 * PREFETCH_STRIDE) + prefetchw_offset(addr, 8 * PREFETCH_STRIDE); + if (len > 9 * PREFETCH_STRIDE) + prefetchw_offset(addr, 9 * PREFETCH_STRIDE); + if (len > 10 * PREFETCH_STRIDE) + prefetchw_offset(addr, 10 * PREFETCH_STRIDE); + if (len > 11 * PREFETCH_STRIDE) + prefetchw_offset(addr, 11 * PREFETCH_STRIDE); + if (len > 12 * PREFETCH_STRIDE) + prefetchw_offset(addr, 12 * PREFETCH_STRIDE); + if (len > 13 * PREFETCH_STRIDE) + prefetchw_offset(addr, 13 * PREFETCH_STRIDE); + if (len > 14 * PREFETCH_STRIDE) + prefetchw_offset(addr, 14 * PREFETCH_STRIDE); + if (len > 15 * PREFETCH_STRIDE) + prefetchw_offset(addr, 15 * PREFETCH_STRIDE); + if (len > 16 * PREFETCH_STRIDE) + prefetchw_offset(addr, 16 * PREFETCH_STRIDE); + if (len > 17 * PREFETCH_STRIDE) + prefetchw_offset(addr, 17 * PREFETCH_STRIDE); + if (len > 18 * PREFETCH_STRIDE) + prefetchw_offset(addr, 18 * PREFETCH_STRIDE); + if (len > 19 * PREFETCH_STRIDE) + prefetchw_offset(addr, 19 * PREFETCH_STRIDE); + if (len > 20 * PREFETCH_STRIDE) + prefetchw_offset(addr, 20 * PREFETCH_STRIDE); + if (len > 21 * PREFETCH_STRIDE) + prefetchw_offset(addr, 21 * PREFETCH_STRIDE); + if (len > 22 * PREFETCH_STRIDE) + prefetchw_offset(addr, 22 * PREFETCH_STRIDE); + if (len > 23 * PREFETCH_STRIDE) + prefetchw_offset(addr, 23 * PREFETCH_STRIDE); + + return; + } + + rem = len % (4 * PREFETCH_STRIDE); + prefetched = len / (4 * PREFETCH_STRIDE); + + for (i = 0; i <= (s64) len - 256; i += 256) + E2K_PREFETCH_L2_256(addr + i); + + if (rem > 0) + prefetchw(addr + prefetched); + if (rem > PREFETCH_STRIDE) + prefetchw_offset(addr + prefetched, PREFETCH_STRIDE); + if (rem > 2 * PREFETCH_STRIDE) + prefetchw_offset(addr + prefetched, 2 * PREFETCH_STRIDE); + if (rem > 3 * PREFETCH_STRIDE) + prefetchw_offset(addr + prefetched, 3 * PREFETCH_STRIDE); +#endif +} + +extern u64 cacheinfo_get_l1d_line_size(void); +extern void show_cacheinfo(struct seq_file *m); +extern int get_cpuinfo(char *buffer); +extern void native_print_machine_type_info(void); + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* pure guest kernel (not paravirtualized) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel with or without virtualization support */ + +#define prepare_start_thread_frames(entry, sp) \ + native_prepare_start_thread_frames(entry, sp) + +#define default_idle() native_default_idle() +#define cpu_relax() native_cpu_relax() +#define cpu_relax_no_resched() native_cpu_relax_no_resched() + +static inline void +print_machine_type_info(void) +{ + native_print_machine_type_info(); +} + +#ifdef CONFIG_VIRTUALIZATION +/* it is host kernel with virtualization support */ +static inline void +paravirt_banner(void) +{ + printk(KERN_INFO "Booting host kernel with virtualization support\n"); +} +#else /* ! CONFIG_VIRTUALIZATION */ +/* it is native kernel without any virtualization */ +static inline void +paravirt_banner(void) +{ + printk(KERN_INFO "Booting native kernel without any virtualization " + "support\n"); +} +#endif /* CONFIG_VIRTUALIZATION */ +#endif /* CONFIG_PARAVIRT */ + +#endif /* !__ASSEMBLY__ */ + +/* + * If there are user pt_regs, return them. + * Return the first kernel pt_regs otherwise. + * + * This way it should be compatible with all other architectures + * which always return the first pt_regs structure. + */ +#define current_pt_regs() \ +({ \ + struct pt_regs *__cpr_pt_regs = current_thread_info()->pt_regs; \ + if (__cpr_pt_regs) \ + __cpr_pt_regs = find_entry_regs(__cpr_pt_regs); \ + __cpr_pt_regs; \ +}) + +#define task_pt_regs(task) \ +({ \ + struct pt_regs *__tpr_pt_regs = task_thread_info(task)->pt_regs; \ + if (__tpr_pt_regs) \ + __tpr_pt_regs = find_entry_regs(__tpr_pt_regs); \ + __tpr_pt_regs; \ +}) + +static inline int cpu_max_cores_num(void) +{ + if (IS_MACHINE_E1CP) + return 1; + else if (IS_MACHINE_ES2 || IS_MACHINE_E2C3) + return 2; + else if (IS_MACHINE_E2S) + return 4; + else if (IS_MACHINE_E8C || IS_MACHINE_E8C2) + return 8; + else if (IS_MACHINE_E12C) + return 12; + else if (IS_MACHINE_E16C) + return 16; + else + BUG(); +} + +static inline bool range_includes(unsigned long addr1, size_t size1, + unsigned long addr2, size_t size2) +{ + return addr2 >= addr1 && addr2 + size2 <= addr1 + size1; +} + +static inline bool range_intersects(unsigned long addr1, size_t size1, + unsigned long addr2, size_t size2) +{ + return addr1 + size1 > addr2 && addr2 + size2 > addr1; +} + +#endif /* _E2K_PROCESSOR_H_ */ diff --git a/arch/e2k/include/asm/prom.h b/arch/e2k/include/asm/prom.h new file mode 100644 index 0000000..a79f45e --- /dev/null +++ b/arch/e2k/include/asm/prom.h @@ -0,0 +1,17 @@ +#ifndef __E2K_PROM_H +#define __E2K_PROM_H + +#ifdef CONFIG_OF +#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2 +#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 + +#define of_compat_cmp(s1, s2, l) strncmp((s1), (s2), (l)) +#define of_prop_cmp(s1, s2) strcasecmp((s1), (s2)) +#define of_node_cmp(s1, s2) strcmp((s1), (s2)) + +int of_getintprop_default(struct device_node *np, const char *name, int def); + +#define of_node_to_nid(dp) (NUMA_NO_NODE) +#endif + +#endif /* __E2K_PROM_H */ diff --git a/arch/e2k/include/asm/prot_loader.h b/arch/e2k/include/asm/prot_loader.h new file mode 100644 index 0000000..a9aae0e --- /dev/null +++ b/arch/e2k/include/asm/prot_loader.h @@ -0,0 +1,176 @@ +#ifndef _E2K_PROT_LOADER_H_ +#define _E2K_PROT_LOADER_H_ + +#include + +#define USE_ELF64 0 + +#define ARGS_AS_ONE_ARRAY + +#define E2k_ELF_ARGV_IND 0 +#define E2k_ELF_ENVP_IND 1 +#define E2k_ELF_AUX_IND 2 +#define E2k_ELF_ARG_NUM_AP 3 + +#define DT_PLTGOTSZ 0x7000101b +#define DT_INIT_GOT 0x7000101c + +#ifdef CONFIG_HAVE_FUTEX_CMPXCHG +#define futex_cmpxchg_enabled 1 +#else +extern int __read_mostly futex_cmpxchg_enabled; +#endif + +typedef struct { + e2k_pl_t mdd_init_got; + e2k_ptr_t mdd_got; +} umdd_t; +#define MDD_PROT_SIZE ((sizeof(umdd_t) + 15) & ~15) + +typedef struct { + e2k_pl_lo_t mdd_init_got; + e2k_pl_lo_t mdd_init; + e2k_pl_lo_t mdd_fini; + e2k_pl_lo_t mdd_start; + e2k_ptr_t mdd_got; + /* By a call descriptors of the areas of memory containing preparations + * are located here (without external tags) for formation of the tagged + * values placed in sections .gott (OT), .gctt (CT) and .gompt (OMP) + * of the loaded module. */ + e2k_ptr_t mdd_gtt[3]; +} umdd_old_t; +#define MDD_OLD_PROT_SIZE ((sizeof(umdd_old_t) + 15) & ~15) + +typedef struct { + u64 got_addr; + u64 got_len; + u32 cui; + u64 init_got_point; + u64 entry_point; + u64 init_point; + u64 fini_point; +} kmdd_t; + + /* It's here for compatibility with old loader */ + +typedef enum { + RTL_FT_NONE, /* The type isn't defined */ + RTL_FT_EXE, /* Loading file */ + RTL_FT_LIB, /* Dynamic library */ + RTL_FT_DRV /* System driver */ +} rtl_FileType_t; + +typedef struct rtl_Unit_s rtl_Unit_t; + +struct rtl_Unit_s { + char *u_code; /* The pointer on code sector */ + char *u_data; /* The pointer on data sector */ + char *u_name; /* Module name */ + char *u_fullname; /* Module full name */ + char *u_type_map; + char *u_type_structs; /* The pointer on the array of structures + * for construction of templates */ + char *u_type_structs_end; /* The pionter for the end of the array + * of structures */ + rtl_Unit_t *u_next; /* The pointer on the following module */ + rtl_Unit_t *u_prev; /* The pointer on the previous module */ + char *u_init; /* The pointer on function of initialization + * of the module */ + char *u_fini; /* The pointer on function of a finalization + * of the module */ + unsigned long long u_entry; /* Entry point */ + rtl_FileType_t u_mtype; /* Module type */ + unsigned int u_num; /* Number of the module */ + unsigned int u_tnum; /* Number of the first class of the + * module */ + unsigned int u_tcount; /* Quantity of classes in the module */ + + struct { + unsigned long long ub_code; /* Base address of a code */ + unsigned long long ub_data; /* Base address of data */ + unsigned long long ub_bss; + unsigned long long ub_brk; /* Address brk */ + } base; + + struct { + unsigned long long uc_start; /* Code start address */ + unsigned long long uc_dataend; /* End address data file */ + unsigned long long uc_allocend; /* End address all data */ + unsigned long long uc_mapend; /* End address mapped memory */ + unsigned long long uc_mapoff; /* Initial offset of a code in the + * file */ + unsigned int uc_prot; /* Flags of protection of code pages */ + } code; + + struct { + unsigned long long ud_start; /* Initial address of data */ + unsigned long long ud_dataend; /* End address of file data */ + unsigned long long ud_allocend; /* End address of all data */ + unsigned long long ud_mapend; /* End address of mapped memory */ + unsigned long long ud_mapoff; /* Initial offset of data on the file */ + unsigned int ud_prot; /* Pprtection data data pages */ + } data; + +/* ELF file */ + char *u_eheader; /* The pointer on file title */ + char *u_pheader; /* The pointer on program title */ + char *u_symtab; /* The pointer on the dynamic character table */ + char *u_symtab_st; /* The pointer on the static character table */ + char *u_strtab; /* The pointer on the dynamic strings table */ + char *u_strtab_st; /* The pointer on the static strings table */ + unsigned int *u_hash; /* The pointer on hash the table of symbolic + * names */ + char *u_got; /* The pointer on the global table of offsets */ + char *u_gtt; /* The pointer on global tables of types */ + char *u_type; /* The pointer on the table of types for Xi ++ */ + char *u_dynrel; /* The pointer on the table of dynamic + * relocation */ + char *u_gttrel; /* The pointer on the table of relocation for + * GTT */ + char *u_typerel; /* The pointer on the table of relocation for + * types */ + char *u_dyn; /* The pointer on dynamic section */ + char *u_tobj; /* The pointer on section of the description of + * classes */ + char *u_tcast; /* The pointer on section of coercions */ + char *u_typed; /* The pointer on section of descriptors of + * types */ +struct { + unsigned long long ul_code; /* Code segment size */ + unsigned long long ul_data; /* Data segment size */ + unsigned long long ul_strtab; /* Size of the strings table */ + unsigned long long ul_strtab_st; /* Size of the strings table */ + unsigned long long ul_type; /* The size of the table of types + * for Xi ++ */ + unsigned long long ul_dynrel; /* Size of the table of dynamic + * relocation */ + unsigned long long ul_gttrel; /* The size of the table of + * relocation for GTT */ + unsigned long long ul_typerel; /* The size of the table of relocation + * for types */ + unsigned int ul_symtab; /* Quantity of elements of the dynamic + * character table */ + unsigned int ul_symtab_st; /* Quantity of elements of the static + * character table */ + unsigned int ul_hash; /* Size table hash */ + unsigned int ul_gtt; /* Size of the global table of types */ + unsigned int ul_tobj; /* Size of section of member data */ + unsigned int ul_typed; /* Size of section of types */ + unsigned int ul_tcast; /* Size of section of reductions */ +} len; + +}; + +/* Global Type Table (GTT) correction. C++ stuff hadling. */ +extern void rtl32_CorrectionType( rtl_Unit_t *unit_p ); + +extern long sys_load_cu_elf32_3P(char *name, kmdd_t *mdd); +extern long sys_load_cu_elf64_3P(char *name, kmdd_t *mdd); + +extern long sys_unload_cu_elf32_3P(unsigned long glob_base, + size_t glob_size); +extern long sys_unload_cu_elf64_3P(unsigned long glob_base, + size_t glob_size); + +#endif /* _E2K_PROT_LOADER_H_ */ + diff --git a/arch/e2k/include/asm/protected_syscalls.h b/arch/e2k/include/asm/protected_syscalls.h new file mode 100644 index 0000000..ab547be --- /dev/null +++ b/arch/e2k/include/asm/protected_syscalls.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/e2k/include/asm/protected_syscalls.h, v 1.0 25/12/2019. + * + * Copyright (C) 2019 MCST + */ + +/****************** PROTECTED SYSTEM CALL DEBUG DEFINES *******************/ + +#ifdef CONFIG_PROTECTED_MODE + +#include + +#undef DYNAMIC_DEBUG_SYSCALLP_ENABLED +#define DYNAMIC_DEBUG_SYSCALLP_ENABLED 1 /* Dynamic prot. syscalls control */ + +#if (!DYNAMIC_DEBUG_SYSCALLP_ENABLED) + +/* Static debug defines (old style): */ + +#undef DEBUG_SYSCALLP +#define DEBUG_SYSCALLP 0 /* System Calls trace */ +#undef DEBUG_SYSCALLP_CHECK +#define DEBUG_SYSCALLP_CHECK 1 /* Protected System Call args checks/warnings */ +#define PM_SYSCALL_WARN_ONLY 1 + +#if DEBUG_SYSCALLP +#define DbgSCP printk +#else +#define DbgSCP(...) +#endif /* DEBUG_SYSCALLP */ + +#if DEBUG_SYSCALLP_CHECK +#define DbgSCP_ERR(fmt, ...) pr_err(fmt, ##__VA_ARGS__) +#define DbgSCP_WARN(fmt, ...) pr_warn(fmt, ##__VA_ARGS__) +#define DbgSCP_ALERT(fmt, ...) pr_alert(fmt, ##__VA_ARGS__) +#else +#define DbgSC_ERR(...) +#define DbgSC_WARN(...) +#define DbgSC_ALERT(...) +#endif /* DEBUG_SYSCALLP_CHECK */ + +#else /* DYNAMIC_DEBUG_SYSCALLP_ENABLED */ + +/* Dynamic debug defines (new style): + * When enabled, environment variables control syscall + * debug/diagnostic output. + * To enable particular control: export =1 + * To disnable particular control: export =0 + * + * The options are as follows: + * + * PM_SC_DBG_MODE_DEBUG - Output basic debug info on system calls to journal; + * + * PM_SC_DBG_MODE_COMPLEX_WRAPPERS - Output debug info on protected + * complex syscall wrappers to journal; + * PM_SC_DBG_MODE_CHECK - Report issue to journal if syscall arg + * mismatch expected format; + * PM_SC_DBG_MODE_WARN_ONLY - If error in arg format detected, + * don't block syscall but run it anyway; + * PM_SC_DBG_MODE_CONV_STRUCT - Output to journal debug info on converting + * structures in syscall args; + * PM_SC_DBG_MODE_SIGNALS - Output to system journal debug info related + * to signal manipulation; + * PM_SC_DBG_MODE_NO_ERR_MESSAGES - Blocks diagnostic messages to journal + * (may be useful when running latency-sensitive tests/applications); + * + * PM_MM_CHECK_4_DANGLING_POINTERS - Enable check for dangling descriptors + * allocated with 'malloc' (libc specific); + * + * PM_SC_DBG_MODE_ALL - Enable all debug/diagnostic output to system journal; + * + * PM_SC_DBG_MODE_DISABLED - Disable debug/diagnostic output to system journal. + */ + +#include "asm/syscalls.h" + +#define DbgSCP(fmt, ...) \ +do { \ + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_DEBUG)) \ + pr_info("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +#define DbgSCP_ERR(fmt, ...) \ +do { \ + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_CHECK) \ + && !(current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_NO_ERR_MESSAGES)) \ + pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) +#define DbgSCP_ALERT(fmt, ...) \ +do { \ + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_CHECK) \ + && !(current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_NO_ERR_MESSAGES)) \ + pr_alert("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) +#define DbgSCP_WARN(fmt, ...) \ +do { \ + if (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_CHECK) \ + && !(current->mm->context.pm_sc_debug_mode \ + & PM_SC_DBG_MODE_NO_ERR_MESSAGES)) \ + pr_warn("%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +#define PM_SYSCALL_WARN_ONLY \ + (arch_init_pm_sc_debug_mode(PM_SC_DBG_MODE_WARN_ONLY)) + /* Backward compatibility with syscalls */ + /* NB> It may happen legacy s/w written incompatible with + * context protection principles. + * For example, tests for syscalls may be of that kind + * to intentionally pass bad arguments to syscalls to check + * if behavior is correct in that case. + * This define, being activated, eases argument check control + * when doing system calls in the protected execution mode: + * - a warning still gets reported to the journal, but + * - system call is not blocked at it is normally done. + */ + +#define DEBUG_SYSCALLP_CHECK 1 /* protected syscall args checks enabled */ + +#endif /* DYNAMIC_DEBUG_SYSCALLP_ENABLED */ + +/**************************** END of DEBUG DEFINES ***********************/ + +#else /* #ifndef CONFIG_PROTECTED_MODE */ + +#define DbgSCP(...) +#define DbgSC_ERR(...) +#define DbgSC_WARN(...) +#define DbgSC_ALERT(...) + +#endif /* CONFIG_PROTECTED_MODE */ diff --git a/arch/e2k/include/asm/ptrace-abi.h b/arch/e2k/include/asm/ptrace-abi.h new file mode 100644 index 0000000..3043bd7 --- /dev/null +++ b/arch/e2k/include/asm/ptrace-abi.h @@ -0,0 +1,78 @@ +#ifndef _ASM_PTRACE_ABI_H +#define _ASM_PTRACE_ABI_H + +#define PTRACE_OLDSETOPTIONS 21 + +/* only useful for access 32bit programs / kernels */ +#define PTRACE_GET_THREAD_AREA 25 +#define PTRACE_SET_THREAD_AREA 26 + +#ifdef __x86_64__ +# define PTRACE_ARCH_PRCTL 30 +#endif + +#define PTRACE_SYSEMU 31 +#define PTRACE_SYSEMU_SINGLESTEP 32 + +#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ + +#ifndef __ASSEMBLY__ +#include + +/* configuration/status structure used in PTRACE_BTS_CONFIG and + PTRACE_BTS_STATUS commands. +*/ +struct ptrace_bts_config { + /* requested or actual size of BTS buffer in bytes */ + __u32 size; + /* bitmask of below flags */ + __u32 flags; + /* buffer overflow signal */ + __u32 signal; + /* actual size of bts_struct in bytes */ + __u32 bts_size; +}; +#endif /* __ASSEMBLY__ */ + +#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */ +#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */ +#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG on buffer overflow + instead of wrapping around */ +#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */ + +#define PTRACE_BTS_CONFIG 40 +/* Configure branch trace recording. + ADDR points to a struct ptrace_bts_config. + DATA gives the size of that buffer. + A new buffer is allocated, if requested in the flags. + An overflow signal may only be requested for new buffers. + Returns the number of bytes read. +*/ +#define PTRACE_BTS_STATUS 41 +/* Return the current configuration in a struct ptrace_bts_config + pointed to by ADDR; DATA gives the size of that buffer. + Returns the number of bytes written. +*/ +#define PTRACE_BTS_SIZE 42 +/* Return the number of available BTS records for draining. + DATA and ADDR are ignored. +*/ +#define PTRACE_BTS_GET 43 +/* Get a single BTS record. + DATA defines the index into the BTS array, where 0 is the newest + entry, and higher indices refer to older entries. + ADDR is pointing to struct bts_struct (see asm/ds.h). +*/ +#define PTRACE_BTS_CLEAR 44 +/* Clear the BTS buffer. + DATA and ADDR are ignored. +*/ +#define PTRACE_BTS_DRAIN 45 +/* Read all available BTS records and clear the buffer. + ADDR points to an array of struct bts_struct. + DATA gives the size of that buffer. + BTS records are read from oldest to newest. + Returns number of BTS records drained. +*/ + +#endif /* _ASM_PTRACE_ABI_H */ diff --git a/arch/e2k/include/asm/ptrace.h b/arch/e2k/include/asm/ptrace.h new file mode 100644 index 0000000..8e6b2a5 --- /dev/null +++ b/arch/e2k/include/asm/ptrace.h @@ -0,0 +1,801 @@ +#ifndef _E2K_PTRACE_H +#define _E2K_PTRACE_H + + +#ifndef __ASSEMBLY__ +#include +#include + +#include +#endif /* __ASSEMBLY__ */ + +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_USE_AAU +#include +#endif /* CONFIG_USE_AAU */ +#include +#include + +#endif /* __ASSEMBLY__ */ +#include +#include + +#define TASK_TOP TASK_SIZE + +/* + * User process size in MA32 mode. + */ +#define TASK32_SIZE (0xf0000000UL) + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT +#include +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ +#include + +#include + +struct mm_struct; + +typedef struct pt_regs ptregs_t; +typedef struct sw_regs sw_regs_t; + +struct e2k_greg { + union { + u64 xreg[2]; /* extended register */ + struct { + u64 base; /* main part of value */ + u64 ext; /* extended part of floating point */ + /* value */ + }; + }; +} __aligned(16); /* must be aligned for stgdq/stqp/ldqp to work */ + +#ifdef CONFIG_GREGS_CONTEXT + +typedef struct global_regs { + struct e2k_greg g[E2K_GLOBAL_REGS_NUM]; + e2k_bgr_t bgr; +} global_regs_t; + +/* Sometimes we only want to save %g16-%g31 (so called "local" gregs) */ +typedef struct local_gregs { + struct e2k_greg g[LOCAL_GREGS_NUM]; + e2k_bgr_t bgr; +} local_gregs_t; + +/* gN and gN+1 global registers hold pointers to current in kernel, */ +/* gN+2 and gN+3 are used for per-cpu data pointer and current cpu id. */ +/* Now N = 16 (see real numbers at asm/glob_regs.h) */ +typedef struct kernel_gregs { + struct e2k_greg g[KERNEL_GREGS_NUM]; +} kernel_gregs_t; +#endif /* CONFIG_GREGS_CONTEXT */ + +#define HW_TC_SIZE 7 + +/* trap_pt_regs->flags */ +#define TRAP_PCSP_FILL_ADJUSTED 0x0001 +#define TRAP_PSP_FILL_ADJUSTED 0x0002 +#define TRAP_SRP_FLAG 0x0004 +#define TRAP_RP_FLAG 0x0008 + +typedef struct trap_pt_regs { + u64 TIR_hi; /* Trap info registers */ + u64 TIR_lo; + int TIR_no; /* current handled TIRs # */ + s8 nr_TIRs; + bool irqs_disabled; /* IRQs are disabled while trap */ + s8 tc_count; + s8 curr_cnt; + char ignore_user_tc; + char tc_called; + char from_sigreturn; + bool is_intc; /* intercept page fault */ + u8 nr_trap; /* number of trap */ + u8 nr_page_fault_exc; /* number of page fault trap */ + int prev_state; + int flags; + e2k_addr_t srp_ip; + e2k_tir_t TIRs[TIR_NUM]; + trap_cellar_t tcellar[HW_TC_SIZE]; + u64 *sbbp; +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + e2k_mlt_t mlt_state; /* MLT state for binco */ +#endif +} trap_pt_regs_t; + +/* + * WARNING: 'usd_lo' field in the 'pt_regs' structure should have offset + * JB_USD_LO = 22 (in format of long long) as defined by e2k GLIBC header + * /usr/include/bits/setjmp.h + */ +typedef struct pt_regs { + struct pt_regs *next; /* the previous regs structure */ + struct trap_pt_regs *trap; +#ifdef CONFIG_USE_AAU + e2k_aau_t *aau_context; /* aau registers */ +#endif + e2k_stacks_t stacks; /* current state of all stacks */ + /* registers */ + e2k_mem_crs_t crs; /* current chain window regs state */ + e2k_wd_t wd; /* current window descriptor */ + int sys_num; /* to restart sys_call */ + int kernel_entry; + u32 flags; /* Trap occured on the instruction */ + /* with "Store recovery point" flag */ + bool irqs_disabled; /* IRQs are disabled while trap */ + e2k_ctpr_t ctpr1; /* CTPRj for control transfer */ + e2k_ctpr_t ctpr2; + e2k_ctpr_t ctpr3; + e2k_ctpr_hi_t ctpr1_hi; + e2k_ctpr_hi_t ctpr2_hi; + e2k_ctpr_hi_t ctpr3_hi; + u64 lsr; /* loops */ + u64 ilcr; /* initial loop value */ + u64 lsr1; + u64 ilcr1; + int interrupt_vector; +#ifdef CONFIG_EPIC + unsigned int epic_core_priority; +#endif + long sys_rval; + long args[13]; /* unused, arg1, ... arg12 */ + long tags; + long rval1; + long rval2; + int return_desk; + int rv1_tag; + int rv2_tag; +#ifdef CONFIG_CLW_ENABLE + int clw_cpu; + int clw_count; + int clw_first; + clw_reg_t us_cl_m[CLW_MASK_WORD_NUM]; + clw_reg_t us_cl_up; + clw_reg_t us_cl_b; +#endif /* CONFIG_CLW_ENABLE */ + /* for bin_comp */ + u64 rpr_lo; + u64 rpr_hi; +#ifdef CONFIG_VIRTUALIZATION + e2k_stacks_t g_stacks; /* current state of guest kernel */ + /* stacks registers */ + bool g_stacks_valid; /* the state of guest kernel stacks */ + /* registers is valid */ + bool g_stacks_active; /* the guest kernel stacks */ + /* registers is in active work */ + bool need_inject; /* flag for unconditional injection */ + /* trap to guest to avoid acces to */ + /* guest user space in trap context */ + bool in_hypercall; /* trap is occured in hypercall */ + bool is_guest_user; /* trap/system call on/from guest */ + /* user */ + unsigned long traps_to_guest; /* mask of traps passed to guest */ + /* and are not yet handled by guest */ + /* need only for host */ + unsigned long deferred_traps; /* mask of deffered traps, which */ + /* cannot be handled immediately */ + /* (for guest) or occured while */ + /* guest is handling previous traps */ + /* (for host, for example interrupts) */ +#endif /* CONFIG_VIRTUALIZATION */ + +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST_KERNEL) + e2k_svd_gregs_t guest_vcpu_state_greg; +#endif /* CONFIG_KVM || CONFIG_KVM_GUEST_KERNEL */ + +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + scall_times_t *scall_times; +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ +} pt_regs_t; + +#define E_MMU_OP_FLAG_PT_REGS 0x2U /* execute_mmu_operations is */ + /* working */ +#define E_MMU_NESTED_OP_FLAG_PT_REGS 0x4U /* nested exception appeared */ + /* while */ + /* execute_mmu_operations is */ + /* working */ +#define SIG_CALL_HANDLER_FLAG_PT_REGS 0x8U +#define SIG_RESTART_SYSCALL_FLAG_PT_REGS 0x10U +#define PROT_10_FLAG_PT_REGS 0x20U /* marked 10 pm sys_call */ +#define TRAP_AS_INTC_EMUL_PT_REGS 0x0100 /* trap or system call */ + /* is on or from guest */ +#define GUEST_FLAG_PT_REGS 0x10000U /* Trap occurred on the */ + /* guest and should be */ + /* handled by guest */ +#define LIGHT_HYPERCALL_FLAG_PT_REGS 0x20000U /* Trap occurred in */ + /* hypercall */ + +static inline struct trap_pt_regs * +pt_regs_to_trap_regs(struct pt_regs *regs) +{ + return PTR_ALIGN((void *) regs + sizeof(*regs), 8); +} + +#ifdef CONFIG_USE_AAU +static inline e2k_aau_t * +pt_regs_to_aau_regs(struct pt_regs *regs) +{ + struct trap_pt_regs *trap; + + trap = pt_regs_to_trap_regs(regs); + + return PTR_ALIGN((void *) trap + sizeof(*trap), 8); +} +#else /* ! CONFIG_USE_AAU */ +static inline e2k_aau_t * +pt_regs_to_aau_regs(struct pt_regs *regs) +{ + return NULL; +} +#endif +static inline bool +is_sys_call_pt_regs(struct pt_regs *regs) +{ + return regs->trap == NULL && regs->kernel_entry != 0; +} +static inline bool +is_trap_pt_regs(struct pt_regs *regs) +{ + return regs->trap != NULL && regs->kernel_entry == 0; +} + +typedef struct sw_regs { + e2k_mem_crs_t crs; + e2k_addr_t top; /* top of all user data stacks */ + e2k_usd_lo_t usd_lo; + e2k_usd_hi_t usd_hi; + e2k_psp_lo_t psp_lo; /* procedure stack pointer(as empty)*/ + e2k_psp_hi_t psp_hi; + e2k_pcsp_lo_t pcsp_lo; /* procedure chaine stack pointer */ + e2k_pcsp_hi_t pcsp_hi; /* (as empty) */ + e2k_upsr_t upsr; + e2k_fpcr_t fpcr; + e2k_fpsr_t fpsr; + e2k_pfpfr_t pfpfr; + e2k_cutd_t cutd; + +#ifdef CONFIG_VIRTUALIZATION + struct task_struct *prev_task; /* task switch to current from */ +#endif /* CONFIG_VIRTUALIZATION */ + +#ifdef CONFIG_GREGS_CONTEXT + struct global_regs gregs; +#endif + + /* + * These two are shared by monitors and breakpoints. Monitors + * are accessed by userspace directly through sys_ptrace and + * breakpoints are accessed through CONFIG_HW_BREAKPOINT layer + * (i.e. ptrace does not write directly to breakpoint registers). + * + * For this reason breakpoints related registers are moved out + * from sw_regs as they are managed by arch-independent layer + * instead of arch-dependent switch_to() function. For dibsr and + * ddbsr only monitors-related fields are accessed in switch_to(). + */ + e2k_dibsr_t dibsr; + e2k_ddbsr_t ddbsr; + + u64 dimar0; + u64 dimar1; + e2k_dimcr_t dimcr; + u64 ddmar0; + u64 ddmar1; + e2k_ddmcr_t ddmcr; + e2k_dimtp_t dimtp; + + /* + * in the case we switch from/to a BINCO task, we + * need to backup/restore these registers in task switching + */ + u64 cs_lo; + u64 cs_hi; + u64 ds_lo; + u64 ds_hi; + u64 es_lo; + u64 es_hi; + u64 fs_lo; + u64 fs_hi; + u64 gs_lo; + u64 gs_hi; + u64 ss_lo; + u64 ss_hi; + + /* Additional registers for BINCO */ + u64 rpr_lo; + u64 rpr_hi; +#ifdef CONFIG_TC_STORAGE + u64 tcd; +#endif +} sw_regs_t; + +typedef struct jmp_info { + u64 sigmask; + u64 ip; + u64 cr1lo; + u64 pcsplo; + u64 pcsphi; + u32 pcshtp; + u32 br; + u64 usd_lo; + u32 reserved; + u32 wd_hi32; +} e2k_jmp_info_t; + +#define __HAVE_ARCH_KSTACK_END + +static inline int kstack_end(void *addr) +{ + return (e2k_addr_t)addr >= READ_SBR_REG_VALUE(); +} + +#define NATIVE_SAVE_DAM(__dam) \ +do { \ + int i; \ + e2k_addr_t addr = (REG_DAM_TYPE << REG_DAM_TYPE_SHIFT); \ + for (i = 0; i < DAM_ENTRIES_NUM; i++) \ + (__dam)[i] = NATIVE_READ_DAM_REG(addr | \ + (i << REG_DAM_N_SHIFT)); \ +} while (0) + +/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 + +/* e2k extentions */ +#define PTRACE_PEEKPTR 0x100 +#define PTRACE_POKEPTR 0x101 +#define PTRACE_PEEKTAG 0x120 +#define PTRACE_POKETAG 0x121 +#define PTRACE_EXPAND_STACK 0x130 + +#define from_trap(regs) ((regs)->trap != NULL) +#define from_syscall(regs) (!from_trap(regs)) + +static inline u64 user_stack_pointer(struct pt_regs *regs) +{ + e2k_usd_lo_t usd_lo = regs->stacks.usd_lo; + u64 sp; + + if (!AS(usd_lo).p) { + sp = AS(usd_lo).base; + } else { + e2k_pusd_lo_t pusd_lo; + AW(pusd_lo) = AW(usd_lo); + sp = AS(pusd_lo).base + (regs->stacks.top & ~0xffffffffULL); + } + + return sp; +} + +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return AS(regs->stacks.usd_lo).base; +} + +static inline void native_atomic_load_osgd_to_gd(void) +{ + E2K_LOAD_OSGD_TO_GD(); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n) + +{ + unsigned long addr = kernel_stack_pointer(regs); + + addr += n * sizeof(unsigned long); + + if (addr >= kernel_stack_pointer(regs) && addr < regs->stacks.top) + return *(unsigned long *) addr; + else + return 0; +} + +/* Query offset/name of register from its name/offset */ +extern int regs_query_register_offset(const char *name); +extern const char *regs_query_register_name(unsigned int offset); + +#define REGS_B_REGISTER_FLAG (1 << 30) +#define REGS_PRED_REGISTER_FLAG (1 << 29) +#define REGS_TIR1_REGISTER_FLAG (1 << 28) + +extern unsigned long regs_get_register(const struct pt_regs *regs, + unsigned int offset); + +#define from_trap(regs) ((regs)->trap != NULL) +#define from_syscall(regs) (!from_trap(regs)) + +static inline unsigned long regs_return_value(struct pt_regs *regs) +{ + /* System call audit case: %b[0] is not ready yet */ + if (from_syscall(regs)) + return regs->sys_rval; + + /* kretprobe case - get %b[0] */ + return regs_get_register(regs, 0 | REGS_B_REGISTER_FLAG); +} + +static inline e2k_addr_t +native_check_is_user_address(struct task_struct *task, e2k_addr_t address) +{ + if (likely(address < NATIVE_TASK_SIZE)) + return 0; + pr_err("Address 0x%016lx is native kernel address\n", + address); + return -1; +} +#define NATIVE_IS_GUEST_USER_ADDRESS_TO_PVA(task, address) \ + false /* native kernel has not guests */ +#define NATIVE_IS_GUEST_ADDRESS_TO_HOST(address) \ + false /* native kernel has not guests */ + +/* guest page table is pseudo PT and only host PT is used */ +/* to translate any guest addresses */ +static inline void +native_print_host_user_address_ptes(struct mm_struct *mm, e2k_addr_t address) +{ + /* this function is actual only for guest */ + /* native kernel can not be guest kernel */ +} + +/** + * calculate_e2k_dstack_parameters - get user data stack free area parameters + * @stacks: stack registers + * @sp: stack pointer will be returned here + * @stack_size: free area size will be returned here + * @top: stack area top will be returned here + */ +static inline void calculate_e2k_dstack_parameters( + const struct e2k_stacks *stacks, + u64 *sp, u64 *stack_size, u64 *top) +{ + e2k_usd_lo_t usd_lo = stacks->usd_lo; + e2k_usd_hi_t usd_hi = stacks->usd_hi; + unsigned long sbr = stacks->top; + + if (top) + *top = sbr; + + if (AS(usd_lo).p) { + e2k_pusd_lo_t pusd_lo; + e2k_pusd_hi_t pusd_hi; + unsigned long usbr; + + usbr = sbr & ~E2K_PROTECTED_STACK_BASE_MASK; + AW(pusd_lo) = AW(usd_lo); + AW(pusd_hi) = AW(usd_hi); + *sp = usbr + (AS(pusd_lo).base & ~E2K_ALIGN_PUSTACK_MASK); + *stack_size = AS(pusd_hi).size & ~E2K_ALIGN_PUSTACK_MASK; + } else { + *sp = AS(usd_lo).base; + *stack_size = AS(usd_hi).size; + } +} + +/* virtualization support */ +#include + +struct signal_stack_context { + struct pt_regs regs; + struct trap_pt_regs trap; + struct k_sigaction sigact; + e2k_aau_t aau_regs; +#ifdef CONFIG_GREGS_CONTEXT + struct local_gregs l_gregs; +#endif + u64 sbbp[SBBP_ENTRIES_NUM]; + struct pv_vcpu_ctxt vcpu_ctxt; +}; + +#define __signal_pt_regs_last(ti) \ +({ \ + struct pt_regs __user *__sig_regs; \ + if (ti->signal_stack.used) { \ + __sig_regs = &((struct signal_stack_context __user *) \ + (ti->signal_stack.base))->regs; \ + } else { \ + __sig_regs = NULL; \ + } \ + __sig_regs; \ +}) +#define signal_pt_regs_last() __signal_pt_regs_last(current_thread_info()) + +#define signal_pt_regs_first() \ +({ \ + struct pt_regs __user *__sig_regs; \ + if (current_thread_info()->signal_stack.used) { \ + __sig_regs = &((struct signal_stack_context __user *) \ + (current_thread_info()->signal_stack.base + \ + current_thread_info()->signal_stack.used - \ + sizeof(struct signal_stack_context)))->regs; \ + } else { \ + __sig_regs = NULL; \ + } \ + __sig_regs; \ +}) + +#define signal_pt_regs_for_each(__regs) \ + for (__regs = signal_pt_regs_first(); \ + __regs && (u64) __regs >= \ + current_thread_info()->signal_stack.base; \ + __regs = (struct pt_regs __user *) ((void *) __regs - \ + sizeof(struct signal_stack_context))) + +/** + * signal_pt_regs_to_trap - to be used inside of signal_pt_regs_for_each(); + * will return trap_pt_regs pointer corresponding + * to the passed pt_regs structure. + * @__u_regs: pt_regs pointer returned by signal_pt_regs_for_each() + * + * EXAMPLE: + * signal_pt_regs_for_each(u_regs) { + * struct trap_pt_regs __user *u_trap = signal_pt_regs_to_trap(u_regs); + * if (IS_ERR(u_trap)) + * ;// Caught -EFAULT from get_user() + * if (IS_NULL(u_trap)) + * ;// Not interrupt pt_regs + */ +#define signal_pt_regs_to_trap(__u_regs) \ +({ \ + struct pt_regs __user *__spr_u_regs = (__u_regs); \ + struct trap_pt_regs __user *u_trap; \ + \ + if (__get_user(u_trap, &__spr_u_regs->trap)) {\ + u_trap = ERR_PTR(-EFAULT); \ + } else if (u_trap) { \ + u_trap = (struct trap_pt_regs __user *) \ + ((void __user *) __spr_u_regs - \ + offsetof(struct signal_stack_context, regs) + \ + offsetof(struct signal_stack_context, trap)); \ + } \ + u_trap; \ +}) + +#define arch_ptrace_stop_needed(...) (true) +/* current->thread_info->pt_regs may be zero if ptrace_stop() + * was called from load_elf_binary() (it happens if gdb has + * set PTRACE_O_TRACEEXEC flag). */ +#define arch_ptrace_stop(...) \ +do { \ + struct pt_regs *__pt_regs = current_thread_info()->pt_regs; \ + if (__pt_regs) { \ + user_hw_stacks_copy_full(&__pt_regs->stacks, \ + __pt_regs, NULL); \ + SAVE_AAU_REGS_FOR_PTRACE(__pt_regs, current_thread_info()); \ + if (!paravirt_enabled()) { \ + /* FIXME: it need implement for guest kernel */ \ + NATIVE_SAVE_BINCO_REGS_FOR_PTRACE(__pt_regs); \ + } \ + } \ +} while (0) + +static inline int syscall_from_kernel(const struct pt_regs *regs) +{ + return from_syscall(regs) && !user_mode(regs); +} + +static inline int syscall_from_user(const struct pt_regs *regs) +{ + return from_syscall(regs) && user_mode(regs); +} + +static inline int trap_from_kernel(const struct pt_regs *regs) +{ + return from_trap(regs) && !user_mode(regs); +} + +static inline int trap_from_user(const struct pt_regs *regs) +{ + return from_trap(regs) && user_mode(regs); +} + +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + AS(regs->crs.cr0_hi).ip = val >> 3; +} + +/* IMPORTANT: this only works after parse_TIR_registers() + * has set trap->TIR_lo. So this doesn't work for NMIs. */ +static inline unsigned long get_trap_ip(const struct pt_regs *regs) +{ + e2k_tir_lo_t tir_lo; + + tir_lo.TIR_lo_reg = regs->trap->TIR_lo; + return tir_lo.TIR_lo_ip; +} + +static inline unsigned long get_return_ip(const struct pt_regs *regs) +{ + return (unsigned long) (AS(regs->crs.cr0_hi).ip << 3); +} + +static inline unsigned long instruction_pointer(const struct pt_regs *regs) +{ + return get_return_ip(regs); +} + + +#ifdef CONFIG_DEBUG_PT_REGS +#define CHECK_PT_REGS_LOOP(regs) \ +({ \ + if ((regs) != NULL) { \ + if ((regs)->next == (regs)) { \ + pr_err("LOOP in regs list: regs 0x%px next 0x%px\n", \ + (regs), (regs)->next); \ + dump_stack(); \ + } \ + } \ +}) +#define CHECK_PT_REGS_CHAIN(regs, bottom, top) \ +({ \ + pt_regs_t *next_regs = (regs); \ + pt_regs_t *prev_regs = (pt_regs_t *)(bottom); \ + while ((next_regs) != NULL) { \ + if ((bottom) < TASK_SIZE) \ + break; \ + if ((e2k_addr_t)next_regs > (e2k_addr_t)((top) - sizeof(pt_regs_t))) { \ + pr_err("%s(): next regs %px above top 0x%llx\n", \ + __func__, next_regs, \ + (top) - sizeof(pt_regs_t)); \ + print_pt_regs(next_regs); \ + WARN_ON(true); \ + } else if ((e2k_addr_t)next_regs == (e2k_addr_t)prev_regs) { \ + pr_err("%s(): next regs %px is same as previous %px\n", \ + __func__, next_regs, prev_regs); \ + print_pt_regs(next_regs); \ + BUG_ON(true); \ + } else if ((e2k_addr_t)next_regs < (e2k_addr_t)prev_regs) { \ + pr_err("%s(): next regs %px below previous %px\n", \ + __func__, next_regs, prev_regs); \ + print_pt_regs(next_regs); \ + BUG_ON(true); \ + } \ + prev_regs = next_regs; \ + next_regs = next_regs->next; \ + } \ +}) + +/* + * The hook to find 'ct' command ( return to user) + * be interrapted with cloused interrupt / HARDWARE problem #59886/ + */ +#define CHECK_CT_INTERRUPTED(regs) \ +({ \ + struct pt_regs *__regs = regs; \ + do { \ + if (__call_from_user(__regs) || __trap_from_user(__regs)) \ + break; \ + __regs = __regs->next; \ + } while (__regs); \ + if (!__regs) { \ + printk(" signal delivery started on kernel instruction" \ + " top = 0x%lx TIR_lo=0x%lx " \ + " crs.cr0_hi.ip << 3 = 0x%lx\n", \ + (regs)->stacks.top, (regs)->TIR_lo, \ + instruction_pointer(regs)); \ + dump_stack(); \ + } \ +}) +#else /* ! CONFIG_DEBUG_PT_REGS */ +#define CHECK_PT_REGS_LOOP(regs) /* nothing */ +#define CHECK_PT_REGS_CHAIN(regs, bottom, top) +#define CHECK_CT_INTERRUPTED(regs) +#endif /* CONFIG_DEBUG_PT_REGS */ + +static inline struct pt_regs *find_user_regs(const struct pt_regs *regs) +{ + do { + CHECK_PT_REGS_LOOP(regs); + + if (user_mode(regs)) + break; + + regs = regs->next; + } while (regs); + + return (struct pt_regs *) regs; +} + +/* + * Finds the first pt_regs corresponding to the kernel entry + * (i.e. user mode pt_regs) if this is a user thread. + * + * Finds the first pt_regs structure if this is a kernel thread. + */ +static inline struct pt_regs *find_entry_regs(const struct pt_regs *regs) +{ + const struct pt_regs *prev_regs; + + do { + CHECK_PT_REGS_LOOP(regs); + + if (user_mode(regs)) + goto found; + + prev_regs = regs; + regs = regs->next; + } while (regs); + + /* Return the first pt_regs structure for kernel threads */ + regs = prev_regs; + +found: + return (struct pt_regs *) regs; +} + +static inline struct pt_regs *find_trap_regs(const struct pt_regs *regs) +{ + while (regs) { + CHECK_PT_REGS_LOOP(regs); + + if (from_trap(regs)) + break; + + regs = regs->next; + }; + + return (struct pt_regs *) regs; +} + +#define count_trap_regs(regs) \ +({ \ + struct pt_regs *__regs = regs; \ + int traps = 0; \ + while (__regs) { \ + if (from_trap(regs)) \ + traps++; \ + __regs = __regs->next; \ + } \ + traps; \ +}) +#define current_is_in_trap() \ + (count_trap_regs(current_thread_info()->pt_regs) > 0) + +#define count_user_regs(regs) \ +({ \ + struct pt_regs *__regs = regs; \ + int regs_num = 0; \ + while (__regs) { \ + CHECK_PT_REGS_LOOP(__regs); \ + if (user_mode(regs)) \ + regs_num++; \ + __regs = __regs->next; \ + } \ + regs_num; \ +}) + +#if defined(CONFIG_SMP) +extern unsigned long profile_pc(struct pt_regs *regs); +#else +#define profile_pc(regs) instruction_pointer(regs) +#endif +extern void show_regs(struct pt_regs *); +extern int syscall_trace_entry(struct pt_regs *regs); +extern void syscall_trace_leave(struct pt_regs *regs); + +#define arch_has_single_step() (1) + +#endif /* __ASSEMBLY__ */ +#endif /* _E2K_PTRACE_H */ diff --git a/arch/e2k/include/asm/pv_info.h b/arch/e2k/include/asm/pv_info.h new file mode 100644 index 0000000..58f634c --- /dev/null +++ b/arch/e2k/include/asm/pv_info.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2016 MCST, Salavat Gilyazov atic@mcst.ru + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#ifndef __ASM_E2K_PV_INFO_H +#define __ASM_E2K_PV_INFO_H + +#include + +/* + * e2k kernel general info + */ + +/* + * Even 32-bit applications must have big TASK_SIZE since hardware + * stacks are placed behind the 4Gb boundary. + */ +/* Virtual space is splitted into two parts: user and kernel spaces. */ +/* Kernel virtual space takes high area and starts from the following base */ +#define NATIVE_KERNEL_VIRTUAL_SPACE_BASE 0x0000d00000000000 + +/* direct mapping of all physical memory starts now from kernel virtual */ +/* space beginning, but cannot map all possible 2**48 bytes */ +#define NATIVE_PAGE_OFFSET NATIVE_KERNEL_VIRTUAL_SPACE_BASE + +/* Users virtual spaces take low area from 0 right up to kernel base */ +#define NATIVE_TASK_SIZE NATIVE_KERNEL_VIRTUAL_SPACE_BASE + +#ifdef CONFIG_MMU_SEP_VIRT_SPACE +/* Users Separate Page Tables virtual base at the top of user space */ +/* 0x0000 cf80 0000 0000 */ +#define USER_VPTB_BASE_SIZE PGDIR_SIZE +#define USER_VPTB_BASE_ADDR (NATIVE_TASK_SIZE - USER_VPTB_BASE_SIZE) +#else /* ! CONFIG_MMU_SEP_VIRT_SPACE */ +#define USER_VPTB_BASE_SIZE 0 +#define USER_VPTB_BASE_ADDR KERNEL_VPTB_BASE_ADDR +#endif /* CONFIG_MMU_SEP_VIRT_SPACE */ + +/* virtualization support */ +#include + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ +#define IS_HOST_KERNEL_ADDRESS(addr) ((addr) >= NATIVE_TASK_SIZE) +#define IS_HOST_USER_ADDRESS(addr) ((addr) < NATIVE_TASK_SIZE) +#define IS_GUEST_KERNEL_ADDRESS(addr) false +#define IS_GUEST_USER_ADDRESS(addr) false +#define IS_GUEST_PHYS_ADDRESS(addr) false +#else /* CONFIG_VIRTUALIZATION */ +/* it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ +/* or pure guest kernel (not paravirtualized based on pv_ops) */ +#define HOST_TASK_SIZE (HOST_PAGE_OFFSET) +#define GUEST_TASK_SIZE (GUEST_PAGE_OFFSET) +#define HOST_TASK_TOP HOST_TASK_SIZE +#define GUEST_TASK_TOP GUEST_TASK_SIZE +#define BOOT_HOST_TASK_SIZE HOST_TASK_SIZE +#define BOOT_GUEST_TASK_SIZE GUEST_TASK_SIZE + +#define IS_HOST_KERNEL_ADDRESS(addr) ((addr) >= HOST_TASK_SIZE) +#define IS_HOST_USER_ADDRESS(addr) ((addr) < HOST_TASK_SIZE) +#define IS_GUEST_KERNEL_ADDRESS(addr) ((addr) >= GUEST_TASK_SIZE && \ + (addr) < HOST_TASK_SIZE) +#define IS_GUEST_USER_ADDRESS(addr) ((addr) < GUEST_TASK_SIZE) +#define IS_GUEST_PHYS_ADDRESS(addr) \ + ((e2k_addr_t)(addr) >= GUEST_PAGE_OFFSET && \ + (e2k_addr_t)(addr) < GUEST_PAGE_OFFSET + MAX_PM_SIZE) +#endif /* ! CONFIG_VIRTUALIZATION */ + +#ifdef CONFIG_PARAVIRT_GUEST +/* it is paravirtualized host and guest kernel */ +#define is_paravirt_kernel() true /* it is paravirtualized */ + /* host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* pure guest kernel (not paravirtualized) */ +#define is_paravirt_kernel() false +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or host kernel with virtualization support */ +#define TASK_SIZE NATIVE_TASK_SIZE + +#define paravirt_enabled() (IS_HV_GM() || false) +#define boot_paravirt_enabled() (BOOT_IS_HV_GM() || false) +#define is_paravirt_kernel() false + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without virtualization support */ +#define KERNEL_VIRTUAL_SPACE_BASE NATIVE_KERNEL_VIRTUAL_SPACE_BASE +#define PAGE_OFFSET NATIVE_PAGE_OFFSET +#define VMALLOC_START NATIVE_VMALLOC_START +#define VMALLOC_END NATIVE_VMALLOC_END +#define VMEMMAP_START NATIVE_VMEMMAP_START +#define VMEMMAP_END NATIVE_VMEMMAP_END + +#define BOOT_KERNEL_VIRTUAL_SPACE_BASE KERNEL_VIRTUAL_SPACE_BASE +#define BOOT_PAGE_OFFSET PAGE_OFFSET +#define BOOT_TASK_SIZE TASK_SIZE +#endif /* ! CONFIG_VIRTUALIZATION */ + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASM_E2K_PV_INFO_H */ diff --git a/arch/e2k/include/asm/qspinlock.h b/arch/e2k/include/asm/qspinlock.h new file mode 100644 index 0000000..56f437f --- /dev/null +++ b/arch/e2k/include/asm/qspinlock.h @@ -0,0 +1,98 @@ +#ifndef _ASM_E2K_QSPINLOCK_H +#define _ASM_E2K_QSPINLOCK_H + +#include +#include + +/* + * Ideally, the spinning time should be at least a few times + * the typical cacheline load time from memory (~100 cycles on e2k), + * and atomic_cond_read_relaxed() iteration takes ~20 cycles. + */ +#define _Q_PENDING_LOOPS (1 << 5) + +#ifndef CONFIG_PARAVIRT_SPINLOCKS + +# define queued_spin_unlock queued_spin_unlock +/** + * queued_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + * + * A store_release() on the least-significant byte that also + * acts as a hardware memory barrier on device writes (in place + * of dropped mmiowb()). + */ +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + store_release(&lock->locked, 0); +} + +#else + +#include +#include + +#include +#include + +static __always_inline void pv_wait(u8 *ptr, u8 val) +{ + if (cpu_has(CPU_FEAT_ISET_V6) && READ_CORE_MODE_REG().gmi && + READ_ONCE(*ptr) == val) + HYPERVISOR_pv_wait(); +} + + +static __always_inline void pv_kick(int cpu) +{ + if (cpu_has(CPU_FEAT_ISET_V6) && READ_CORE_MODE_REG().gmi) + HYPERVISOR_pv_kick(cpu); +} + +extern void __pv_init_lock_hash(void); + +/** + * queued_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + * + * A store_release() on the least-significant byte that also + * acts as a hardware memory barrier on device writes (in place + * of dropped mmiowb()). + */ +static inline void native_queued_spin_unlock(struct qspinlock *lock) +{ + store_release(&lock->locked, 0); +} + +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (cpu_has(CPU_FEAT_ISET_V6) && READ_CORE_MODE_REG().gmi) + __pv_queued_spin_lock_slowpath(lock, val); + else + native_queued_spin_lock_slowpath(lock, val); +} + +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +# define queued_spin_unlock queued_spin_unlock +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + if (cpu_has(CPU_FEAT_ISET_V6) && READ_CORE_MODE_REG().gmi) + __pv_queued_spin_unlock(lock); + else + native_queued_spin_unlock(lock); +} + +# define vcpu_is_preempted vcpu_is_preempted +static inline bool vcpu_is_preempted(long cpu) +{ + return false; +} + +#endif /* !CONFIG_PARAVIRT_SPINLOCKS */ + +#include + +#endif /* _ASM_E2K_QSPINLOCK_H */ diff --git a/arch/e2k/include/asm/qspinlock_paravirt.h b/arch/e2k/include/asm/qspinlock_paravirt.h new file mode 100644 index 0000000..98a931c --- /dev/null +++ b/arch/e2k/include/asm/qspinlock_paravirt.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_QSPINLOCK_PARAVIRT_H +#define __ASM_QSPINLOCK_PARAVIRT_H + +#endif diff --git a/arch/e2k/include/asm/regs_state.h b/arch/e2k/include/asm/regs_state.h new file mode 100644 index 0000000..a23e14e --- /dev/null +++ b/arch/e2k/include/asm/regs_state.h @@ -0,0 +1,1317 @@ +#ifndef _E2K_REGS_STATE_H +#define _E2K_REGS_STATE_H + +/* + * Some macroses (start with PREFIX_) can use in three modes and can operate + * with virtualized or paravirtualized functions, resorces, other macroses. + * Such macroses should not be used directly instead of it need use: + * NATIVE_XXX macroses for native, host and hypervisor kernel mode + * in all functions which can be called only on native + * running mode; + * KVM_XXX macroses for guest virtualized kernel in all functions, + * which can be called only on guest running mode; + * PV_XXX macroses for paravirtualized kernel and such macroses use + * pv_ops structures to call paravirtualized actions. + * These macroses can be used in all functions, which can + * be called only on paravirtualized running mode and + * structures pv_ops_yyy exist. + * XXX (pure macros without prefix) macroses for host and guest virtualized + * kernel mode in all functions, which can be called + * both running mode host and guest. These macroses depend + * on configuration (compilation) mode and turn into one + * of above three macroses type + * If kernel configured and compiled as native with or + * without virtualization support/ then XXX turn into + * NATIVE_XXX. + * if kernel configured and compiled as pure guest, then + * XXX turn into KVM_XXX + * if kernel configured and compiled as paravirtualized and + * can be run both mode as host and as guest, then + * XXX turn into PV_XXX + * PV_TYPE argument in macroses is prefix and can be as above: + * NATIVE native kernel with or without virtualization support + * KVM guest kernel (can be run only as paravirtualized + * guest kernel) + * PV paravirtualized kernel (can be run as host and as guest + * paravirtualized kernels) + */ + +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_MLT_STORAGE +#include +#endif + +#endif /* __ASSEMBLY__ */ + +#include + +//#define CONTROL_USD_BASE_SIZE + +#ifdef CONTROL_USD_BASE_SIZE +#define CHECK_USD_BASE_SIZE(regs) \ +({ \ + u64 base = (regs)->stacks.usd_lo.USD_lo_base; \ + u64 size = (regs)->stacks.usd_hi.USD_hi_size; \ + if ((base - size) & ~PAGE_MASK) { \ + printk("Not page size aligned USD_base 0x%lx - " \ + "USD_size 0x%lx = 0x%lx\n", \ + base, size, base - size); \ + dump_stack(); \ + } \ +}) +#else +#define CHECK_USD_BASE_SIZE(regs) +#endif + +/* set/restore some kernel state registers to initial state */ + +static inline void native_set_kernel_CUTD(void) +{ + e2k_cutd_t k_cutd; + + k_cutd.CUTD_reg = 0; + k_cutd.CUTD_base = (e2k_addr_t)kernel_CUT; + NATIVE_NV_NOIRQ_WRITE_CUTD_REG(k_cutd); +} + +#define NATIVE_CLEAR_DAM \ +({ \ + NATIVE_SET_MMUREG(dam_inv, 0); \ +}) + +/* + * Macros to save and restore registers. + */ + +#define COPY_U_HW_STACKS_FROM_TI(__stacks, ti) \ +do { \ + e2k_psp_lo_t __psp_lo = ti->tmp_user_stacks.psp_lo; \ + e2k_psp_hi_t __psp_hi = ti->tmp_user_stacks.psp_hi; \ + e2k_pshtp_t __pshtp = ti->tmp_user_stacks.pshtp; \ + e2k_pcsp_lo_t __pcsp_lo = ti->tmp_user_stacks.pcsp_lo; \ + e2k_pcsp_hi_t __pcsp_hi = ti->tmp_user_stacks.pcsp_hi; \ + e2k_pcshtp_t __pcshtp = ti->tmp_user_stacks.pcshtp; \ +\ + (__stacks)->psp_lo = __psp_lo; \ + (__stacks)->psp_hi = __psp_hi; \ + (__stacks)->psp_hi.PSP_hi_ind += GET_PSHTP_MEM_INDEX(__pshtp); \ + (__stacks)->pcsp_lo = __pcsp_lo; \ + (__stacks)->pcsp_hi = __pcsp_hi; \ + (__stacks)->pcsp_hi.PCSP_hi_ind += PCSHTP_SIGN_EXTEND(__pcshtp); \ + (__stacks)->pshtp = __pshtp; \ + (__stacks)->pcshtp = __pcshtp; \ +} while (0) + +#define COPY_U_HW_STACKS_TO_STACKS(__stacks_to, __stacks_from) \ +do { \ + e2k_stacks_t *stacks_to = (__stacks_to); \ + e2k_stacks_t *stacks_from = (__stacks_from); \ +\ + stacks_to->psp_lo = stacks_from->psp_lo; \ + stacks_to->psp_hi = stacks_from->psp_hi; \ + stacks_to->pcsp_lo = stacks_from->pcsp_lo; \ + stacks_to->pcsp_hi = stacks_from->pcsp_hi; \ + stacks_to->pshtp = stacks_from->pshtp; \ + stacks_to->pcshtp = stacks_from->pcshtp; \ +} while (0) + +/* usd regs are saved already */ +#define PREFIX_SAVE_STACK_REGS(PV_TYPE, regs, ti, from_ti, flushc) \ +do { \ + /* This flush reserves space for the next trap. */ \ + if (flushc) \ + PV_TYPE##_FLUSHC; \ + if (from_ti) { \ + COPY_U_HW_STACKS_FROM_TI(&(regs)->stacks, ti); \ + } else { \ + u64 pshtp; \ + u32 pcshtp; \ + u64 psp_hi; \ + u64 pcsp_hi; \ + pshtp = PV_TYPE##_NV_READ_PSHTP_REG_VALUE(); \ + pcshtp = PV_TYPE##_READ_PCSHTP_REG_SVALUE(); \ + (regs)->stacks.psp_lo.PSP_lo_half = \ + PV_TYPE##_NV_READ_PSP_LO_REG_VALUE(); \ + psp_hi = PV_TYPE##_NV_READ_PSP_HI_REG_VALUE(); \ + pcsp_hi = PV_TYPE##_NV_READ_PCSP_HI_REG_VALUE(); \ + (regs)->stacks.pcsp_lo.PCSP_lo_half = \ + PV_TYPE##_NV_READ_PCSP_LO_REG_VALUE();\ + if (!flushc) \ + pcsp_hi += pcshtp; \ + psp_hi += GET_PSHTP_MEM_INDEX((e2k_pshtp_t)pshtp); \ + AW((regs)->stacks.pshtp) = pshtp; \ + (regs)->stacks.pcshtp = pcshtp; \ + AW((regs)->stacks.psp_hi) = psp_hi; \ + AW((regs)->stacks.pcsp_hi) = pcsp_hi; \ + } \ + AW((regs)->crs.cr0_lo) = PV_TYPE##_NV_READ_CR0_LO_REG_VALUE(); \ + AW((regs)->crs.cr0_hi) = PV_TYPE##_NV_READ_CR0_HI_REG_VALUE(); \ + AW((regs)->crs.cr1_lo) = PV_TYPE##_NV_READ_CR1_LO_REG_VALUE(); \ + AW((regs)->crs.cr1_hi) = PV_TYPE##_NV_READ_CR1_HI_REG_VALUE(); \ + AW((regs)->wd) = PV_TYPE##_READ_WD_REG_VALUE(); \ + CHECK_USD_BASE_SIZE(regs); \ +} while (0) + +/* Save stack registers on kernel native/host/hypervisor mode */ +#define NATIVE_SAVE_STACK_REGS(regs, ti, from_ti, flushc) \ + PREFIX_SAVE_STACK_REGS(NATIVE, regs, ti, from_ti, flushc) + +#define STORE_USER_REGS_TO_THREAD_INFO(thread_info, \ + stk_bottom, stk_top, stk_sz) \ +({ \ + (thread_info)->u_stack.bottom = stk_bottom; \ + (thread_info)->u_stack.top = stk_top; \ + (thread_info)->u_stack.size = stk_sz; \ +}) + +/* + * Remeber state of IRQs at trap point + * Now it usefull to analyze can be traps passed to guest handler + * immediately or should be deferred + */ +#define SAVE_IRQS_STATE(regs, upsr) \ +({ \ + unsigned long psr_val = (regs)->crs.cr1_lo.CR1_lo_psr; \ + unsigned long upsr_val = (upsr).UPSR_reg; \ + (regs)->irqs_disabled = \ + psr_and_upsr_irqs_disabled_flags(psr_val, upsr_val); \ +}) + +/* + * Interrupts should be disabled by caller to read all hardware + * stacks registers in coordinated state + * Hardware stacks do not copy or flush to memory + */ +#define ATOMIC_SAVE_CURRENT_STACK_REGS(stacks, crs) \ +({ \ + ATOMIC_SAVE_ALL_STACKS_REGS(stacks, &(crs)->cr1_hi); \ + \ + (stacks)->top = NATIVE_NV_READ_SBR_REG_VALUE(); \ + (crs)->cr0_lo = NATIVE_NV_READ_CR0_LO_REG(); \ + (crs)->cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); \ + (crs)->cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); \ + \ + /* \ + * Do not copy copy_user_stacks()'s kernel data stack frame \ + */ \ + (stacks)->usd_lo.USD_lo_base += \ + (((crs)->cr1_hi.CR1_hi_ussz << 4) - \ + (stacks)->usd_hi.USD_hi_size); \ + (stacks)->usd_hi.USD_hi_size = \ + ((crs)->cr1_hi.CR1_hi_ussz << 4); \ +}) + +#define NATIVE_DO_SAVE_MONITOR_COUNTERS(sw_regs) \ +do { \ + sw_regs->ddmar0 = NATIVE_READ_DDMAR0_REG_VALUE(); \ + sw_regs->ddmar1 = NATIVE_READ_DDMAR1_REG_VALUE(); \ + sw_regs->dimar0 = NATIVE_READ_DIMAR0_REG_VALUE(); \ + sw_regs->dimar1 = NATIVE_READ_DIMAR1_REG_VALUE(); \ +} while (0) +#define NATIVE_SAVE_MONITOR_COUNTERS(task) \ +do { \ + struct sw_regs *sw_regs = &((task)->thread.sw_regs); \ + NATIVE_DO_SAVE_MONITOR_COUNTERS(sw_regs); \ +} while (0) + +/* + * When we use monitor registers, we count monitor events for the whole system, + * so DIMAR0, DIMAR1, DDMAR0 and DDMAR1 registers are not depend on process and + * need not be saved while process switching. DIMCR and DDMCR registers are not + * depend on process too, but they should be saved while process switching, + * because they are used to determine monitoring start moment during monitor + * events counting for a process. + */ +static inline void native_save_user_only_regs(struct sw_regs *sw_regs) +{ + if (machine.save_dimtp) + machine.save_dimtp(&sw_regs->dimtp); + + /* Skip breakpoints-related fields handled by + * ptrace_hbp_triggered() and arch-independent + * hardware breakpoints support */ + AW(sw_regs->dibsr) &= E2K_DIBSR_MASK_ALL_BP; + AW(sw_regs->dibsr) |= NATIVE_READ_DIBSR_REG_VALUE() & + ~E2K_DIBSR_MASK_ALL_BP; + AW(sw_regs->ddbsr) &= E2K_DDBSR_MASK_ALL_BP; + AW(sw_regs->ddbsr) |= NATIVE_READ_DDBSR_REG_VALUE() & + ~E2K_DDBSR_MASK_ALL_BP; + + sw_regs->ddmcr = NATIVE_READ_DDMCR_REG(); + sw_regs->dimcr = NATIVE_READ_DIMCR_REG(); + if (!MONITORING_IS_ACTIVE) + NATIVE_DO_SAVE_MONITOR_COUNTERS(sw_regs); +} + +#if (E2K_MAXGR_d == 32) + +/* Save/Restore global registers */ +#define SAVE_GREGS_PAIR(gregs, nolo_save, nohi_save, \ + nolo_greg, nohi_greg, iset) \ + NATIVE_SAVE_GREG(&(gregs)[nolo_save], \ + &(gregs)[nohi_save], \ + nolo_greg, \ + nohi_greg, \ + iset) +#define SAVE_GREGS_PAIR_V2(gregs, nolo_save, nohi_save, \ + nolo_greg, nohi_greg) \ + NATIVE_SAVE_GREG_V2(&(gregs)[nolo_save], \ + &(gregs)[nohi_save], \ + nolo_greg, \ + nohi_greg) +#define SAVE_GREGS_PAIR_V5(gregs, nolo_save, nohi_save, \ + nolo_greg, nohi_greg) \ + NATIVE_SAVE_GREG_V5(&(gregs)[nolo_save], \ + &(gregs)[nohi_save], \ + nolo_greg, \ + nohi_greg) + +/* + * Registers gN-g(N+3) are reserved by ABI. Now N=16. + * These registers hold pointers to current, so we can skip saving and + * restoring them on context switch and upon entering/exiting signal handlers + * (they are stored in thread_info) + */ +#define DO_SAVE_GREGS_ON_MASK(gregs, iset, PAIR_MASK_NOT_SAVE) \ +do { \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 0) | (1 << 1))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 0, 1, 0, 1, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 2) | (1 << 3))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 2, 3, 2, 3, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 4) | (1 << 5))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 4, 5, 4, 5, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 6) | (1 << 7))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 6, 7, 6, 7, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 8) | (1 << 9))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 8, 9, 8, 9, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 10) | (1 << 11))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 10, 11, 10, 11, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 12) | (1 << 13))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 12, 13, 12, 13, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 14) | (1 << 15))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 14, 15, 14, 15, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 16) | (1 << 17))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 16, 17, 16, 17, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 18) | (1 << 19))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 18, 19, 18, 19, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 20) | (1 << 21))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 20, 21, 20, 21, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 22) | (1 << 23))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 22, 23, 22, 23, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 24) | (1 << 25))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 24, 25, 24, 25, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 26) | (1 << 27))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 26, 27, 26, 27, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 28) | (1 << 29))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 28, 29, 28, 29, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 30) | (1 << 31))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 30, 31, 30, 31, iset); \ + } \ +} while (0) +#define DO_SAVE_LOCAL_GREGS_ON_MASK(gregs, iset, PAIR_MASK_NOT_SAVE) \ +do { \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 16) | (1 << 17))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 0, 1, 16, 17, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 18) | (1 << 19))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 2, 3, 18, 19, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 20) | (1 << 21))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 4, 5, 20, 21, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 22) | (1 << 23))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 6, 7, 22, 23, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 24) | (1 << 25))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 8, 9, 24, 25, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 26) | (1 << 27))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 10, 11, 26, 27, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 28) | (1 << 29))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 12, 13, 28, 29, iset); \ + } \ + if (((PAIR_MASK_NOT_SAVE) & ((1 << 30) | (1 << 31))) == 0) { \ + SAVE_GREGS_PAIR(gregs, 14, 15, 30, 31, iset); \ + } \ +} while (0) + +#define SAVE_ALL_GREGS(gregs, iset) \ + DO_SAVE_GREGS_ON_MASK(gregs, iset, 0UL) +#define SAVE_GREGS_EXCEPT_NO(gregs, iset, GREGS_PAIR_NO_NOT_SAVE) \ + DO_SAVE_GREGS_ON_MASK(gregs, iset, \ + (1 << GREGS_PAIR_NO_NOT_SAVE)) +#define SAVE_GREGS_EXCEPT_KERNEL(gregs, iset) \ + DO_SAVE_GREGS_ON_MASK(gregs, iset, KERNEL_GREGS_MASK) +#define SAVE_GREGS_EXCEPT_GLOBAL_AND_KERNEL(gregs, iset) \ + DO_SAVE_GREGS_ON_MASK(gregs, iset, \ + (GLOBAL_GREGS_USER_MASK | KERNEL_GREGS_MASK)) + +# define SAVE_GREGS(gregs, save_global, iset) \ +do { \ + if (save_global) { \ + SAVE_GREGS_EXCEPT_KERNEL(gregs, iset); \ + } else { \ + SAVE_GREGS_EXCEPT_GLOBAL_AND_KERNEL(gregs, iset); \ + } \ +} while (false) + +/* Same as SAVE_GREGS but saves %g16-%g31 registers only */ +# define SAVE_GREGS_SIGNAL(gregs, iset) \ +do { \ + DO_SAVE_LOCAL_GREGS_ON_MASK(gregs, iset, \ + (GLOBAL_GREGS_USER_MASK | KERNEL_GREGS_MASK)); \ +} while (false) + +#define RESTORE_GREGS_PAIR(gregs, nolo_save, nohi_save, \ + nolo_greg, nohi_greg, iset) \ + NATIVE_RESTORE_GREG(&(gregs)[nolo_save], \ + &(gregs)[nohi_save], \ + nolo_greg, \ + nohi_greg, \ + iset) +#define RESTORE_GREGS_PAIR_V2(gregs, nolo_save, nohi_save, \ + nolo_greg, nohi_greg) \ + NATIVE_RESTORE_GREG_V2(&(gregs)[nolo_save], \ + &(gregs)[nohi_save], \ + nolo_greg, \ + nohi_greg) +#define RESTORE_GREGS_PAIR_V5(gregs, nolo_save, nohi_save, \ + nolo_greg, nohi_greg) \ + NATIVE_RESTORE_GREG_V5(&(gregs)[nolo_save], \ + &(gregs)[nohi_save], \ + nolo_greg, \ + nohi_greg) + +#define DO_RESTORE_GREGS_ON_MASK(gregs, iset, PAIR_MASK_NOT_RESTORE) \ +do { \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 0) | (1 << 1))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 0, 1, 0, 1, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 2) | (1 << 3))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 2, 3, 2, 3, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 4) | (1 << 5))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 4, 5, 4, 5, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 6) | (1 << 7))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 6, 7, 6, 7, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 8) | (1 << 9))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 8, 9, 8, 9, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 10) | (1 << 11))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 10, 11, 10, 11, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 12) | (1 << 13))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 12, 13, 12, 13, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 14) | (1 << 15))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 14, 15, 14, 15, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 16) | (1 << 17))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 16, 17, 16, 17, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 18) | (1 << 19))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 18, 19, 18, 19, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 20) | (1 << 21))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 20, 21, 20, 21, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 22) | (1 << 23))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 22, 23, 22, 23, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 24) | (1 << 25))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 24, 25, 24, 25, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 26) | (1 << 27))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 26, 27, 26, 27, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 28) | (1 << 29))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 28, 29, 28, 29, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 30) | (1 << 31))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 30, 31, 30, 31, iset); \ + } \ +} while (0) + +#define DO_RESTORE_LOCAL_GREGS_ON_MASK(gregs, iset, PAIR_MASK_NOT_RESTORE) \ +do { \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 16) | (1 << 17))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 0, 1, 16, 17, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 18) | (1 << 19))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 2, 3, 18, 19, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 20) | (1 << 21))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 4, 5, 20, 21, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 22) | (1 << 23))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 6, 7, 22, 23, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 24) | (1 << 25))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 8, 9, 24, 25, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 26) | (1 << 27))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 10, 11, 26, 27, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 28) | (1 << 29))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 12, 13, 28, 29, iset); \ + } \ + if (((PAIR_MASK_NOT_RESTORE) & ((1 << 30) | (1 << 31))) == 0) { \ + RESTORE_GREGS_PAIR(gregs, 14, 15, 30, 31, iset); \ + } \ +} while (0) + +#define RESTORE_ALL_GREGS(gregs, iset) \ + DO_RESTORE_GREGS_ON_MASK(gregs, iset, 0UL) +#define RESTORE_GREGS_EXCEPT_NO(gregs, iset, GREGS_PAIR_NO_NOT_RESTORE) \ + DO_RESTORE_GREGS_ON_MASK(gregs, iset, \ + (1 << GREGS_PAIR_NO_NOT_RESTORE)) +#define RESTORE_GREGS_EXCEPT_KERNEL(gregs, iset) \ + DO_RESTORE_GREGS_ON_MASK(gregs, iset, KERNEL_GREGS_MASK) +#define RESTORE_GREGS_EXCEPT_GLOBAL_AND_KERNEL(gregs, iset) \ + DO_RESTORE_GREGS_ON_MASK(gregs, iset, \ + (GLOBAL_GREGS_USER_MASK | KERNEL_GREGS_MASK)) + +# define RESTORE_GREGS(gregs, restore_global, iset) \ +do { \ + if (restore_global) { \ + RESTORE_GREGS_EXCEPT_KERNEL(gregs, iset); \ + } else { \ + RESTORE_GREGS_EXCEPT_GLOBAL_AND_KERNEL(gregs, iset); \ + } \ +} while (false) + +/* Same as RESTORE_GREGS but restores %g16-%g31 registers only */ +# define RESTORE_GREGS_SIGNAL(gregs, iset) \ +do { \ + DO_RESTORE_LOCAL_GREGS_ON_MASK(gregs, iset, \ + (GLOBAL_GREGS_USER_MASK | KERNEL_GREGS_MASK)); \ +} while (false) + +#ifdef CONFIG_GREGS_CONTEXT +#define NATIVE_INIT_G_REGS() \ +({ \ + init_BGR_reg(); \ + NATIVE_GREGS_SET_EMPTY(); \ + clear_memory_8(¤t_thread_info()->k_gregs, \ + sizeof(current_thread_info()->k_gregs), ETAGEWD); \ +}) +#else /* ! CONFIG_GREGS_CONTEXT */ +#define NATIVE_INIT_G_REGS() +#endif /* CONFIG_GREGS_CONTEXT */ + +#define NATIVE_BOOT_INIT_G_REGS() \ +({ \ + native_boot_init_BGR_reg(); \ + E2K_ALL_GREGS_SET_EMPTY(); \ +}) + +/* ptrace related guys: we do not use them on switching. */ +# define NATIVE_GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase) \ +({ \ + void * g_u = g_user; \ + void * gt_u = gtag_user; \ + \ + E2K_GET_GREGS_FROM_THREAD(g_u, gt_u, gbase); \ +}) + +# define NATIVE_SET_GREGS_TO_THREAD(gbase, g_user, gtag_user) \ +({ \ + void * g_u = g_user; \ + void * gt_u = gtag_user; \ + \ + E2K_SET_GREGS_TO_THREAD(gbase, g_u, gt_u); \ +}) + +#if defined(CONFIG_PARAVIRT_GUEST) +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +#include +#else /* !CONFIG_PARAVIRT_GUEST && !CONFIG_KVM_GUEST_KERNEL */ + +#define GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase) \ + NATIVE_GET_GREGS_FROM_THREAD(g_user, gtag_user, gbase) + +#define SET_GREGS_TO_THREAD(gbase, g_user, gtag_user) \ + NATIVE_SET_GREGS_TO_THREAD(gbase, g_user, gtag_user) + +#endif /* !CONFIG_PARAVIRT_GUEST && !CONFIG_KVM_GUEST_KERNEL */ + +#else /* E2K_MAXGR_d != 32 */ + +# error "Unsupported E2K_MAXGR_d value" + +#endif /* E2K_MAXGR_d */ + +static inline void +native_save_local_glob_regs(local_gregs_t *l_gregs) +{ + void (*save_local_gregs)(struct local_gregs *); + + save_local_gregs = machine.save_local_gregs; + + copy_k_gregs_to_l_gregs(l_gregs, ¤t_thread_info()->k_gregs); + save_local_gregs(l_gregs); +} +static inline void +native_restore_local_glob_regs(local_gregs_t *l_gregs) +{ + void (*restore_local_gregs)(const struct local_gregs *); + + restore_local_gregs = machine.restore_local_gregs; + + get_k_gregs_from_l_regs(¤t_thread_info()->k_gregs, l_gregs); + restore_local_gregs(l_gregs); +} + +static inline void +native_get_all_user_glob_regs(global_regs_t *gregs) +{ + machine.save_gregs(gregs); + copy_k_gregs_to_gregs(gregs, ¤t_thread_info()->k_gregs); +} + +#define DO_SAVE_UPSR_REG_VALUE(upsr_reg, upsr_reg_value) \ + { AS_WORD(upsr_reg) = (upsr_reg_value); } + +#define NATIVE_DO_SAVE_UPSR_REG(upsr_reg) \ + DO_SAVE_UPSR_REG_VALUE((upsr_reg), \ + NATIVE_NV_READ_UPSR_REG_VALUE()) +#define DO_SAVE_UPSR_REG(upsr_reg) \ + DO_SAVE_UPSR_REG_VALUE((upsr_reg), READ_UPSR_REG_VALUE()) + +#define NATIVE_SAVE_UPSR_REG(regs) NATIVE_DO_SAVE_UPSR_REG((regs)->upsr) +#define SAVE_UPSR_REG(regs) DO_SAVE_UPSR_REG((regs)->upsr) + +#define DO_RESTORE_UPSR_REG(upsr_reg) \ + { WRITE_UPSR_REG(upsr_reg); } +#define NATIVE_DO_RESTORE_UPSR_REG(upsr_reg) \ + { NATIVE_WRITE_UPSR_REG(upsr_reg); } + +#define NATIVE_RESTORE_UPSR_REG(regs) NATIVE_DO_RESTORE_UPSR_REG((regs)->upsr) +#define RESTORE_UPSR_REG(regs) DO_RESTORE_UPSR_REG((regs)->upsr) + +#define NATIVE_SAVE_RPR_REGS(regs) \ +({ \ + regs->rpr_lo = NATIVE_READ_RPR_LO_REG_VALUE(); \ + regs->rpr_hi = NATIVE_READ_RPR_HI_REG_VALUE(); \ +}) + +#define NATIVE_SAVE_INTEL_REGS(regs) \ +do { \ + regs->cs_lo = NATIVE_READ_CS_LO_REG_VALUE(); \ + regs->cs_hi = NATIVE_READ_CS_HI_REG_VALUE(); \ + regs->ds_lo = NATIVE_READ_DS_LO_REG_VALUE(); \ + regs->ds_hi = NATIVE_READ_DS_HI_REG_VALUE(); \ + regs->es_lo = NATIVE_READ_ES_LO_REG_VALUE(); \ + regs->es_hi = NATIVE_READ_ES_HI_REG_VALUE(); \ + regs->fs_lo = NATIVE_READ_FS_LO_REG_VALUE(); \ + regs->fs_hi = NATIVE_READ_FS_HI_REG_VALUE(); \ + regs->gs_lo = NATIVE_READ_GS_LO_REG_VALUE(); \ + regs->gs_hi = NATIVE_READ_GS_HI_REG_VALUE(); \ + regs->ss_lo = NATIVE_READ_SS_LO_REG_VALUE(); \ + regs->ss_hi = NATIVE_READ_SS_HI_REG_VALUE(); \ + NATIVE_SAVE_RPR_REGS(regs); \ + if (IS_ENABLED(CONFIG_TC_STORAGE)) { \ + NATIVE_FLUSH_ALL_TC; \ + regs->tcd = NATIVE_GET_TCD(); \ + } \ +} while (0) + +#define NATIVE_RESTORE_INTEL_REGS(regs) \ +do { \ + u64 cs_lo = regs->cs_lo; \ + u64 cs_hi = regs->cs_hi; \ + u64 ds_lo = regs->ds_lo; \ + u64 ds_hi = regs->ds_hi; \ + u64 es_lo = regs->es_lo; \ + u64 es_hi = regs->es_hi; \ + u64 fs_lo = regs->fs_lo; \ + u64 fs_hi = regs->fs_hi; \ + u64 gs_lo = regs->gs_lo; \ + u64 gs_hi = regs->gs_hi; \ + u64 ss_lo = regs->ss_lo; \ + u64 ss_hi = regs->ss_hi; \ + u64 rpr_lo = regs->rpr_lo; \ + u64 rpr_hi = regs->rpr_hi; \ + u64 tcd = regs->tcd; \ + NATIVE_CL_WRITE_CS_LO_REG_VALUE(cs_lo); \ + NATIVE_CL_WRITE_CS_HI_REG_VALUE(cs_hi); \ + NATIVE_CL_WRITE_DS_LO_REG_VALUE(ds_lo); \ + NATIVE_CL_WRITE_DS_HI_REG_VALUE(ds_hi); \ + NATIVE_CL_WRITE_ES_LO_REG_VALUE(es_lo); \ + NATIVE_CL_WRITE_ES_HI_REG_VALUE(es_hi); \ + NATIVE_CL_WRITE_FS_LO_REG_VALUE(fs_lo); \ + NATIVE_CL_WRITE_FS_HI_REG_VALUE(fs_hi); \ + NATIVE_CL_WRITE_GS_LO_REG_VALUE(gs_lo); \ + NATIVE_CL_WRITE_GS_HI_REG_VALUE(gs_hi); \ + NATIVE_CL_WRITE_SS_LO_REG_VALUE(ss_lo); \ + NATIVE_CL_WRITE_SS_HI_REG_VALUE(ss_hi); \ + NATIVE_WRITE_RPR_LO_REG_VALUE(rpr_lo); \ + NATIVE_WRITE_RPR_HI_REG_VALUE(rpr_hi); \ + if (IS_ENABLED(CONFIG_TC_STORAGE)) \ + NATIVE_SET_TCD(tcd); \ +} while (0) + +/* + * Procedure stack (PS) and procedure chain stack (PCS) hardware filling and + * spilling is asynchronous process. Page fault traps can overlay to this + * asynchronous process and some filling and spilling requests can be not + * completed. These requests were dropped by MMU to trap cellar. + * We should save not completed filling data before starting of spilling + * current procedure chain stack to preserve from filling data loss + */ + +#define NATIVE_SAVE_TRAP_CELLAR(regs, trap) \ +({ \ + kernel_trap_cellar_t *kernel_tcellar = \ + (kernel_trap_cellar_t *)KERNEL_TRAP_CELLAR; \ + kernel_trap_cellar_ext_t *kernel_tcellar_ext = \ + (kernel_trap_cellar_ext_t *) \ + ((void *) KERNEL_TRAP_CELLAR + TC_EXT_OFFSET); \ + trap_cellar_t *tcellar = (trap)->tcellar; \ + int cnt, cs_req_num = 0, cs_a4 = 0, off, max_cnt; \ + u64 kstack_pf_addr = 0, stack = (u64) current->stack; \ + bool end_flag = false, is_qp; \ + \ + max_cnt = NATIVE_READ_MMU_TRAP_COUNT(); \ + if (max_cnt < 3) { \ + max_cnt = 3 * HW_TC_SIZE; \ + end_flag = true; \ + } \ + (trap)->curr_cnt = -1; \ + (trap)->ignore_user_tc = 0; \ + (trap)->tc_called = 0; \ + (trap)->is_intc = false; \ + (trap)->from_sigreturn = 0; \ + CLEAR_CLW_REQUEST_COUNT(regs); \ + BUG_ON(max_cnt > 3 * HW_TC_SIZE); \ + for (cnt = 0; 3 * cnt < max_cnt; cnt++) { \ + tc_opcode_t opcode; \ + tc_cond_t condition; \ + \ + if (end_flag) \ + if (AW(kernel_tcellar[cnt].condition) == -1) \ + break; \ + \ + tcellar[cnt].address = kernel_tcellar[cnt].address; \ + condition = kernel_tcellar[cnt].condition; \ + tcellar[cnt].condition = condition; \ + AW(opcode) = AS(condition).opcode; \ + is_qp = (AS(opcode).fmt == LDST_QP_FMT || \ + cpu_has(CPU_FEAT_QPREG) && AS(condition).fmtc && \ + AS(opcode).fmt == LDST_QWORD_FMT); \ + if (AS(condition).clw) { \ + if (GET_CLW_REQUEST_COUNT(regs) == 0) { \ + SET_CLW_FIRST_REQUEST(regs, cnt); \ + } \ + INC_CLW_REQUEST_COUNT(regs); \ + } \ + if (is_qp) \ + tcellar[cnt].mask = kernel_tcellar_ext[cnt].mask; \ + if (AS(condition).store) { \ + NATIVE_MOVE_TAGGED_DWORD( \ + &(kernel_tcellar[cnt].data), \ + &(tcellar[cnt].data)); \ + if (is_qp) { \ + NATIVE_MOVE_TAGGED_DWORD( \ + &(kernel_tcellar_ext[cnt].data), \ + &(tcellar[cnt].data_ext)); \ + } \ + } else if (AS(condition).s_f && AS(condition).sru) { \ + if (cs_req_num == 0) \ + cs_a4 = tcellar[cnt].address & (1 << 4); \ + cs_req_num++; \ + } \ + if (unlikely((AS(condition).s_f || IS_SPILL(tcellar[cnt])) && \ + tcellar[cnt].address >= stack && \ + tcellar[cnt].address < stack + KERNEL_STACKS_SIZE)) \ + kstack_pf_addr = tcellar[cnt].address; \ + tcellar[cnt].flags = 0; \ + } \ + (trap)->tc_count = cnt * 3; \ + if (unlikely(GET_CLW_REQUEST_COUNT(regs) && \ + cpu_has(CPU_HWBUG_CLW_STALE_L1_ENTRY))) \ + (regs)->clw_cpu = raw_smp_processor_id(); \ + if (cs_req_num > 0) { \ + /* recover chain stack pointers to repeat FILL */ \ + e2k_pcshtp_t pcshtp = NATIVE_READ_PCSHTP_REG_SVALUE(); \ + s64 pcshtp_ext = PCSHTP_SIGN_EXTEND(pcshtp); \ + e2k_pcsp_hi_t PCSP_hi = NATIVE_NV_READ_PCSP_HI_REG(); \ + if (!cs_a4) { \ + off = cs_req_num * 32; \ + } else { \ + off = (cs_req_num - 1) * 32 + 16; \ + } \ + pcshtp_ext -= off; \ + PCSP_hi.PCSP_hi_ind += off; \ + NATIVE_WRITE_PCSHTP_REG_SVALUE(pcshtp_ext); \ + NATIVE_NV_NOIRQ_WRITE_PCSP_HI_REG(PCSP_hi); \ + } \ + kstack_pf_addr; \ +}) + +#ifdef CONFIG_CLW_ENABLE +/* + * If requests from CLW unit (user stack window clearing) were not + * completed, and they were droped to the kernel trap cellar, + * then we should save CLW unit state before switch to other stack + * and restore CLW state after return to the user stack + */ +# define CLEAR_CLW_REQUEST_COUNT(regs) ((regs)->clw_count = 0) +# define INC_CLW_REQUEST_COUNT(regs) ((regs)->clw_count++) +# define GET_CLW_REQUEST_COUNT(regs) ((regs)->clw_count) +# define SET_CLW_FIRST_REQUEST(regs, cnt) ((regs)->clw_first = (cnt)) +# define GET_CLW_FIRST_REQUEST(regs) ((regs)->clw_first) +#define ENABLE_US_CLW() \ +do { \ + if (!cpu_has(CPU_HWBUG_CLW)) \ + write_MMU_US_CL_D(0); \ +} while (0) +# define DISABLE_US_CLW() write_MMU_US_CL_D(1) +#else /* !CONFIG_CLW_ENABLE */ +# define CLEAR_CLW_REQUEST_COUNT(regs) +# define INC_CLW_REQUEST_COUNT(regs) +# define GET_CLW_REQUEST_COUNT(regs) (0) +# define SET_CLW_FIRST_REQUEST(regs, cnt) +# define GET_CLW_FIRST_REQUEST(regs) (0) +# define ENABLE_US_CLW() +# define DISABLE_US_CLW() +#endif /* CONFIG_CLW_ENABLE */ + +#define NATIVE_RESTORE_COMMON_REGS(regs) \ +do { \ + u64 ctpr1 = AW(regs->ctpr1), ctpr2 = AW(regs->ctpr2), \ + ctpr3 = AW(regs->ctpr3), ctpr1_hi = AW(regs->ctpr1_hi), \ + ctpr2_hi = AW(regs->ctpr2_hi), ctpr3_hi = AW(regs->ctpr3_hi), \ + lsr = regs->lsr, lsr1 = regs->lsr1, \ + ilcr = regs->ilcr, ilcr1 = regs->ilcr1; \ + \ + NATIVE_RESTORE_COMMON_REGS_VALUES(ctpr1, ctpr2, ctpr3, ctpr1_hi, \ + ctpr2_hi, ctpr3_hi, lsr, lsr1, ilcr, ilcr1); \ +} while (0) + +#define PREFIX_RESTORE_USER_CRs(PV_TYPE, regs) \ +({ \ + u64 cr0_hi = AS_WORD((regs)->crs.cr0_hi); \ + u64 cr0_lo = AS_WORD((regs)->crs.cr0_lo); \ + u64 cr1_hi = AS_WORD((regs)->crs.cr1_hi); \ + u64 cr1_lo = AS_WORD((regs)->crs.cr1_lo); \ + PV_TYPE##_NV_NOIRQ_WRITE_CR0_HI_REG_VALUE(cr0_hi); \ + PV_TYPE##_NV_NOIRQ_WRITE_CR0_LO_REG_VALUE(cr0_lo); \ + PV_TYPE##_NV_NOIRQ_WRITE_CR1_HI_REG_VALUE(cr1_hi); \ + PV_TYPE##_NV_NOIRQ_WRITE_CR1_LO_REG_VALUE(cr1_lo); \ +}) + +#define PREFIX_RESTORE_USER_STACK_REGS(PV_TYPE, regs, in_syscall) \ +({ \ + thread_info_t *ti = current_thread_info(); \ + e2k_stacks_t *stacks; \ + u64 usd_lo; \ + u64 usd_hi; \ + u64 top; \ + \ + stacks = (in_syscall) ? \ + syscall_guest_get_restore_stacks(ti, regs) \ + : \ + trap_guest_get_restore_stacks(ti, regs); \ + usd_lo = AS_WORD(stacks->usd_lo); \ + usd_hi = AS_WORD(stacks->usd_hi); \ + top = stacks->top; \ + PREFIX_RESTORE_USER_CRs(PV_TYPE, regs); \ + CHECK_USD_BASE_SIZE(regs); \ + PV_TYPE##_NV_WRITE_USBR_USD_REG_VALUE(top, usd_hi, usd_lo); \ + RESTORE_USER_CUT_REGS(ti, regs, in_syscall); \ +}) +#define NATIVE_RESTORE_USER_CRs(regs) \ + PREFIX_RESTORE_USER_CRs(NATIVE, regs) +#define NATIVE_RESTORE_USER_STACK_REGS(regs, insyscall) \ + PREFIX_RESTORE_USER_STACK_REGS(NATIVE, regs, insyscall) +#define NATIVE_RESTORE_USER_TRAP_STACK_REGS(regs) \ + NATIVE_RESTORE_USER_STACK_REGS(regs, false) +#define NATIVE_RESTORE_USER_SYSCALL_STACK_REGS(regs) \ + NATIVE_RESTORE_USER_STACK_REGS(regs, true) + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravrtualized) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without any virtualization */ +/* or native host kernel with virtualization support */ + +/* Save stack registers on kernel native/host/hypervisor mode */ +#define SAVE_STACK_REGS(regs, ti, user, trap) \ + NATIVE_SAVE_STACK_REGS(regs, ti, user, trap) + +#define RESTORE_USER_STACK_REGS(regs, in_syscall) \ + NATIVE_RESTORE_USER_STACK_REGS(regs, in_syscall) +#define RESTORE_USER_TRAP_STACK_REGS(regs) \ + RESTORE_USER_STACK_REGS(regs, false) +#define RESTORE_USER_SYSCALL_STACK_REGS(regs) \ + RESTORE_USER_STACK_REGS(regs, true) + +#define INIT_G_REGS() NATIVE_INIT_G_REGS() +#define BOOT_INIT_G_REGS() NATIVE_BOOT_INIT_G_REGS() + +static inline void +save_local_glob_regs(local_gregs_t *l_gregs) +{ + native_save_local_glob_regs(l_gregs); +} +static inline void +restore_local_glob_regs(local_gregs_t *l_gregs) +{ + native_restore_local_glob_regs(l_gregs); +} + +static inline void +get_all_user_glob_regs(global_regs_t *gregs) +{ + native_get_all_user_glob_regs(gregs); +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#define NATIVE_DO_RESTORE_MONITOR_COUNTERS(sw_regs) \ +do { \ + e2k_ddmcr_t ddmcr = sw_regs->ddmcr; \ + u64 ddmar0 = sw_regs->ddmar0; \ + u64 ddmar1 = sw_regs->ddmar1; \ + e2k_dimcr_t dimcr = sw_regs->dimcr; \ + u64 dimar0 = sw_regs->dimar0; \ + u64 dimar1 = sw_regs->dimar1; \ + \ + if (machine.restore_dimtp) \ + machine.restore_dimtp(&sw_regs->dimtp); \ + NATIVE_WRITE_DDMAR0_REG_VALUE(ddmar0); \ + NATIVE_WRITE_DDMAR1_REG_VALUE(ddmar1); \ + NATIVE_WRITE_DIMAR0_REG_VALUE(dimar0); \ + NATIVE_WRITE_DIMAR1_REG_VALUE(dimar1); \ + NATIVE_WRITE_DDMCR_REG(ddmcr); \ + NATIVE_WRITE_DIMCR_REG(dimcr); \ +} while (0) +#define NATIVE_RESTORE_MONITOR_COUNTERS(task) \ +do { \ + struct sw_regs *sw_regs = &task->thread.sw_regs; \ + NATIVE_DO_RESTORE_MONITOR_COUNTERS(sw_regs); \ +} while (0) + +/* + * When we use monitor registers, we count monitor events for the whole system, + * so DIMCR, DDMCR, DIMAR0, DIMAR1, DDMAR0, DDMAR1, DIBSR, DDBSR registers are + * not dependent on process and should not be restored while process switching. + */ +static inline void native_restore_user_only_regs(struct sw_regs *sw_regs) +{ + e2k_dibsr_t dibsr = sw_regs->dibsr; + e2k_ddbsr_t ddbsr = sw_regs->ddbsr; + + /* Skip breakpoints-related fields handled by + * ptrace_hbp_triggered() and arch-independent + * hardware breakpoints support */ + AW(dibsr) &= ~E2K_DIBSR_MASK_ALL_BP; + AW(dibsr) |= NATIVE_READ_DIBSR_REG_VALUE() & E2K_DIBSR_MASK_ALL_BP; + AW(ddbsr) &= ~E2K_DDBSR_MASK_ALL_BP; + AW(ddbsr) |= NATIVE_READ_DDBSR_REG_VALUE() & E2K_DDBSR_MASK_ALL_BP; + + if (!MONITORING_IS_ACTIVE) { + NATIVE_WRITE_DIBSR_REG(dibsr); + NATIVE_WRITE_DDBSR_REG(ddbsr); + NATIVE_DO_RESTORE_MONITOR_COUNTERS(sw_regs); + } +} + +#ifdef CONFIG_PERF_EVENTS +DECLARE_PER_CPU(u8, perf_monitors_used); +DECLARE_PER_CPU(u8, perf_bps_used); +# define is_perf_using_monitors __this_cpu_read(perf_monitors_used) +# define is_perf_using_bps __this_cpu_read(perf_bps_used) +#else /* ! CONFIG_PERF_EVENTS */ +#define is_perf_using_monitors false +#define is_perf_using_bps false +#endif /* CONFIG_PERF_EVENTS */ + +static inline void native_clear_user_only_regs(void) +{ + if (!is_perf_using_bps) { + NATIVE_WRITE_DIBCR_REG_VALUE(0); + NATIVE_WRITE_DDBCR_REG_VALUE(0); + } + if (!MONITORING_IS_ACTIVE && !is_perf_using_monitors) { + NATIVE_WRITE_DIMCR_REG_VALUE(0); + NATIVE_WRITE_DIBSR_REG_VALUE(0); + NATIVE_WRITE_DDMCR_REG_VALUE(0); + NATIVE_WRITE_DDBSR_REG_VALUE(0); + } +} + + +/* + * Set some special registers in accordance with + * E2K API specifications. + */ +#define GET_FPU_DEFAULTS(fpsr, fpcr, pfpfr) \ +({ \ + AW(fpsr) = 0; \ + AW(pfpfr) = 0; \ + AW(fpcr) = 32; \ + \ + /* masks */ \ + AS_STRUCT(pfpfr).im = 1; \ + AS_STRUCT(pfpfr).dm = 1; \ + AS_STRUCT(pfpfr).zm = 1; \ + AS_STRUCT(pfpfr).om = 1; \ + AS_STRUCT(pfpfr).um = 1; \ + AS_STRUCT(pfpfr).pm = 1; \ + \ + /* flags ! NEEDSWORK ! */ \ + AS_STRUCT(pfpfr).pe = 1; \ + AS_STRUCT(pfpfr).ue = 1; \ + AS_STRUCT(pfpfr).oe = 1; \ + AS_STRUCT(pfpfr).ze = 1; \ + AS_STRUCT(pfpfr).de = 1; \ + AS_STRUCT(pfpfr).ie = 1; \ + /* rounding */ \ + AS_STRUCT(pfpfr).rc = 0; \ + \ + AS_STRUCT(pfpfr).fz = 0; \ + AS_STRUCT(pfpfr).dpe = 0; \ + AS_STRUCT(pfpfr).due = 0; \ + AS_STRUCT(pfpfr).doe = 0; \ + AS_STRUCT(pfpfr).dze = 0; \ + AS_STRUCT(pfpfr).dde = 0; \ + AS_STRUCT(pfpfr).die = 0; \ + \ + AS_STRUCT(fpcr).im = 1; \ + AS_STRUCT(fpcr).dm = 1; \ + AS_STRUCT(fpcr).zm = 1; \ + AS_STRUCT(fpcr).om = 1; \ + AS_STRUCT(fpcr).um = 1; \ + AS_STRUCT(fpcr).pm = 1; \ + /* rounding */ \ + AS_STRUCT(fpcr).rc = 0; \ + AS_STRUCT(fpcr).pc = 3; \ + \ + /* flags ! NEEDSWORK ! */ \ + AS_STRUCT(fpsr).pe = 1; \ + AS_STRUCT(fpsr).ue = 1; \ + AS_STRUCT(fpsr).oe = 1; \ + AS_STRUCT(fpsr).ze = 1; \ + AS_STRUCT(fpsr).de = 1; \ + AS_STRUCT(fpsr).ie = 1; \ + \ + AS_STRUCT(fpsr).es = 0; \ + AS_STRUCT(fpsr).c1 = 0; \ +}) +#define INIT_SPECIAL_REGISTERS() \ +({ \ + e2k_fpsr_t fpsr; \ + e2k_pfpfr_t pfpfr; \ + e2k_fpcr_t fpcr; \ + \ + GET_FPU_DEFAULTS(fpsr, fpcr, pfpfr); \ + \ + NATIVE_NV_WRITE_PFPFR_REG(pfpfr); \ + NATIVE_NV_WRITE_FPCR_REG(fpcr); \ + NATIVE_NV_WRITE_FPSR_REG(fpsr); \ +}) + +/* Declarate here to prevent loop #include. */ +#define PT_PTRACED 0x00000001 + +static inline void +NATIVE_DO_SAVE_TASK_USER_REGS_TO_SWITCH(struct sw_regs *sw_regs, + bool task_is_binco, bool task_traced) +{ + if (unlikely(task_is_binco)) + NATIVE_SAVE_INTEL_REGS((sw_regs)); + +#ifdef CONFIG_MLT_STORAGE + machine.invalidate_MLT(); +#endif + + AS_WORD(sw_regs->fpcr) = NATIVE_NV_READ_FPCR_REG_VALUE(); + AS_WORD(sw_regs->fpsr) = NATIVE_NV_READ_FPSR_REG_VALUE(); + AS_WORD(sw_regs->pfpfr) = NATIVE_NV_READ_PFPFR_REG_VALUE(); + sw_regs->cutd = NATIVE_NV_READ_CUTD_REG(); + + if (unlikely(task_traced)) + native_save_user_only_regs(sw_regs); +} + +static inline void +NATIVE_SAVE_TASK_REGS_TO_SWITCH(struct task_struct *task) +{ + const int task_is_binco = TASK_IS_BINCO(task); + struct mm_struct *mm = task->mm; + struct sw_regs *sw_regs = &task->thread.sw_regs; + + WARN_ONCE(!AS(sw_regs->upsr).nmie, + "Non-maskable interrupts are disabled\n"); + + /* Kernel does not use MLT so skip invalidation for kernel threads */ + NATIVE_DO_SAVE_TASK_USER_REGS_TO_SWITCH(sw_regs, task_is_binco, + !!(task->ptrace & PT_PTRACED)); + + if (mm) { +#ifdef CONFIG_GREGS_CONTEXT + machine.save_gregs_dirty_bgr(&task->thread.sw_regs.gregs); +#endif + + /* + * If AAU was not cleared then at a trap exit of next user + * AAU will start working, so clear it explicitly here. + */ + native_clear_apb(); + } + + NATIVE_FLUSHCPU; + + sw_regs->top = NATIVE_NV_READ_SBR_REG_VALUE(); + sw_regs->usd_hi = NATIVE_NV_READ_USD_HI_REG(); + sw_regs->usd_lo = NATIVE_NV_READ_USD_LO_REG(); + + sw_regs->crs.cr1_lo = NATIVE_NV_READ_CR1_LO_REG(); + sw_regs->crs.cr1_hi = NATIVE_NV_READ_CR1_HI_REG(); + sw_regs->crs.cr0_lo = NATIVE_NV_READ_CR0_LO_REG(); + sw_regs->crs.cr0_hi = NATIVE_NV_READ_CR0_HI_REG(); + + /* These will wait for the flush so we give + * the flush some time to finish. */ + sw_regs->psp_hi = NATIVE_NV_READ_PSP_HI_REG(); + sw_regs->psp_lo = NATIVE_NV_READ_PSP_LO_REG(); + sw_regs->pcsp_hi = NATIVE_NV_READ_PCSP_HI_REG(); + sw_regs->pcsp_lo = NATIVE_NV_READ_PCSP_LO_REG(); +} + +/* + * now lcc has problem with structure on registers + * (It moves these structures in stack memory) + */ +static inline void +NATIVE_DO_RESTORE_TASK_USER_REGS_TO_SWITCH(struct sw_regs *sw_regs, + bool task_is_binco, bool task_traced) +{ + u64 fpcr = AS_WORD(sw_regs->fpcr); + u64 fpsr = AS_WORD(sw_regs->fpsr); + u64 pfpfr = AS_WORD(sw_regs->pfpfr); + u64 cutd = AS_WORD(sw_regs->cutd); + + NATIVE_NV_WRITE_FPCR_REG_VALUE(fpcr); + NATIVE_NV_WRITE_FPSR_REG_VALUE(fpsr); + NATIVE_NV_WRITE_PFPFR_REG_VALUE(pfpfr); + NATIVE_NV_NOIRQ_WRITE_CUTD_REG_VALUE(cutd); + + if (unlikely(task_traced)) + native_restore_user_only_regs(sw_regs); + else /* Do this always when we don't test prev_task->ptrace */ + native_clear_user_only_regs(); + + NATIVE_CLEAR_DAM; + + if (unlikely(task_is_binco)) { + if (machine.flushts) + machine.flushts(); + NATIVE_RESTORE_INTEL_REGS(sw_regs); + } +} +static inline void +NATIVE_RESTORE_TASK_REGS_TO_SWITCH(struct task_struct *task, + struct thread_info *ti) +{ + struct sw_regs *sw_regs = &task->thread.sw_regs; + u64 top = sw_regs->top; + u64 usd_lo = AS_WORD(sw_regs->usd_lo); + u64 usd_hi = AS_WORD(sw_regs->usd_hi); + u64 psp_lo = AS_WORD(sw_regs->psp_lo); + u64 psp_hi = AS_WORD(sw_regs->psp_hi); + u64 pcsp_lo = AS_WORD(sw_regs->pcsp_lo); + u64 pcsp_hi = AS_WORD(sw_regs->pcsp_hi); + e2k_mem_crs_t crs = sw_regs->crs; + const int task_is_binco = TASK_IS_BINCO(task); + struct mm_struct *mm = task->mm; + + NATIVE_FLUSHCPU; + + NATIVE_NV_WRITE_USBR_USD_REG_VALUE(top, usd_hi, usd_lo); + NATIVE_NV_WRITE_PSP_REG_VALUE(psp_hi, psp_lo); + NATIVE_NV_WRITE_PCSP_REG_VALUE(pcsp_hi, pcsp_lo); + + NATIVE_NV_NOIRQ_WRITE_CR0_LO_REG(crs.cr0_lo); + NATIVE_NV_NOIRQ_WRITE_CR0_HI_REG(crs.cr0_hi); + NATIVE_NV_NOIRQ_WRITE_CR1_LO_REG(crs.cr1_lo); + NATIVE_NV_NOIRQ_WRITE_CR1_HI_REG(crs.cr1_hi); + + NATIVE_DO_RESTORE_TASK_USER_REGS_TO_SWITCH(sw_regs, task_is_binco, + (task->ptrace & PT_PTRACED) ? true : false); + +#ifdef CONFIG_GREGS_CONTEXT + if (mm) + machine.restore_gregs(&task->thread.sw_regs.gregs); +#endif +} + +static inline void +NATIVE_SWITCH_TO_KERNEL_STACK(e2k_addr_t ps_base, e2k_size_t ps_size, + e2k_addr_t pcs_base, e2k_size_t pcs_size, + e2k_addr_t ds_base, e2k_size_t ds_size) +{ + e2k_rwap_lo_struct_t reg_lo; + e2k_rwap_hi_struct_t reg_hi; + e2k_rwap_lo_struct_t stack_reg_lo; + e2k_rwap_hi_struct_t stack_reg_hi; + e2k_usbr_t usbr; + + /* + * Set Procedure Stack and Procedure Chain stack registers + * to the begining of initial PS and PCS stacks + */ + NATIVE_FLUSHCPU; + reg_lo.PSP_lo_half = 0; + reg_lo.PSP_lo_base = ps_base; + reg_lo._PSP_lo_rw = E2K_PSP_RW_PROTECTIONS; + reg_hi.PSP_hi_half = 0; + reg_hi.PSP_hi_size = ps_size; + reg_hi.PSP_hi_ind = 0; + NATIVE_NV_WRITE_PSP_REG(reg_hi, reg_lo); + reg_lo.PCSP_lo_half = 0; + reg_lo.PCSP_lo_base = pcs_base; + reg_lo._PCSP_lo_rw = E2K_PCSR_RW_PROTECTIONS; + reg_hi.PCSP_hi_half = 0; + reg_hi.PCSP_hi_size = pcs_size; + reg_hi.PCSP_hi_ind = 0; + NATIVE_NV_WRITE_PCSP_REG(reg_hi, reg_lo); + + + /* + * Set stack pointers to the begining of kernel initial data stack + */ + + usbr.USBR_base = ds_base + ds_size; + + /* + * Reserve additional 64 bytes for parameters area. + * Compiler might use it to temporarily store the function's parameters + */ + + stack_reg_lo.USD_lo_half = 0; + stack_reg_lo.USD_lo_p = 0; + stack_reg_lo.USD_lo_base = ds_base + ds_size - 64; + + stack_reg_hi.USD_hi_half = 0; + stack_reg_hi.USD_hi_size = ds_size - 64; + + NATIVE_NV_WRITE_USBR_USD_REG(usbr, stack_reg_hi, stack_reg_lo); +} + +/* + * There are TIR_NUM(19) tir regs. Bits 64 - 56 is current tir nr + * After each E2K_GET_DSREG(tir.lo) we will read next tir. + * For more info see instruction set doc. + * Read tir regs order is significant + */ +#define SAVE_TIRS(TIRs, TIRs_num, from_intc) \ +({ \ + unsigned long nr_TIRs = -1, TIR_hi, TIR_lo = 0; \ + unsigned long all_interrupts = 0; \ + do { \ + TIR_hi = NATIVE_READ_TIR_HI_REG_VALUE(); \ + if (unlikely(from_intc && GET_NR_TIRS(TIR_hi) > 18UL)) \ + break; \ + TIR_lo = NATIVE_READ_TIR_LO_REG_VALUE(); \ + ++nr_TIRs; \ + TIRs[GET_NR_TIRS(TIR_hi)].TIR_lo.TIR_lo_reg = TIR_lo; \ + TIRs[GET_NR_TIRS(TIR_hi)].TIR_hi.TIR_hi_reg = TIR_hi; \ + all_interrupts |= TIR_hi; \ + } while(GET_NR_TIRS(TIR_hi)); \ + TIRs_num = nr_TIRs; \ + \ + /* un-freeze the TIR's LIFO */ \ + UNFREEZE_TIRs(TIR_lo); \ + \ + all_interrupts & (exc_all_mask | aau_exc_mask); \ +}) +#define UNFREEZE_TIRs(TIR_lo) NATIVE_WRITE_TIR_LO_REG_VALUE(TIR_lo) +#define SAVE_SBBP(sbbp) \ +do { \ + int i; \ + for (i = 0; i < SBBP_ENTRIES_NUM; i++) \ + (sbbp)[i] = NATIVE_READ_SBBP_REG_VALUE(); \ +} while (0) + +static inline void set_osgd_task_struct(struct task_struct *task) +{ + e2k_gd_lo_t gd_lo; + e2k_gd_hi_t gd_hi; + + BUG_ON(!IS_ALIGNED((u64) task, E2K_ALIGN_GLOBALS_SZ)); + + AW(gd_lo) = 0; + AW(gd_hi) = 0; + AS(gd_lo).base = (u64) task; + AS(gd_lo).rw = E2K_GD_RW_PROTECTIONS; + AS(gd_hi).size = round_up(sizeof(struct task_struct), + E2K_ALIGN_GLOBALS_SZ); + WRITE_OSGD_LO_REG(gd_lo); + WRITE_OSGD_HI_REG(gd_hi); + atomic_load_osgd_to_gd(); +} + +static inline void +native_set_current_thread_info(struct thread_info *thread, + struct task_struct *task) +{ + NATIVE_WRITE_CURRENT_REG(thread); + E2K_SET_DGREG_NV(CURRENT_TASK_GREG, task); + set_osgd_task_struct(task); +} + + +static inline void +set_current_thread_info(struct thread_info *thread, struct task_struct *task) +{ + WRITE_CURRENT_REG(thread); + E2K_SET_DGREG_NV(CURRENT_TASK_GREG, task); + set_osgd_task_struct(task); +} + +#endif /* _E2K_REGS_STATE_H */ + diff --git a/arch/e2k/include/asm/rlimits.h b/arch/e2k/include/asm/rlimits.h new file mode 100644 index 0000000..71b4efb --- /dev/null +++ b/arch/e2k/include/asm/rlimits.h @@ -0,0 +1,13 @@ +#ifndef _E2K_RLIMITS_H_ +#define _E2K_RLIMITS_H_ + +#define PS_RLIM_CUR (128*1024*1024) +#define PCS_RLIM_CUR (8*1024*1024) + +/* + * Hard stacks rlimits numbers + */ +#define RLIMIT_P_STACK_EXT 16 +#define RLIMIT_PC_STACK_EXT 17 + +#endif /* _E2K_RLIMITS_H_ */ diff --git a/arch/e2k/include/asm/rtc.h b/arch/e2k/include/asm/rtc.h new file mode 100644 index 0000000..cd445ac --- /dev/null +++ b/arch/e2k/include/asm/rtc.h @@ -0,0 +1,6 @@ +#ifndef _E2K_RTC_H +#define _E2K_RTC_H + +extern noinline int sclk_register(void *); + +#endif diff --git a/arch/e2k/include/asm/rwsem.h b/arch/e2k/include/asm/rwsem.h new file mode 100644 index 0000000..728a334 --- /dev/null +++ b/arch/e2k/include/asm/rwsem.h @@ -0,0 +1,212 @@ +/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG + * + * Written by David Howells (dhowells@redhat.com). + * + * Derived from asm-x86/semaphore.h + * + * + * The MSW of the count is the negated number of active writers and waiting + * lockers, and the LSW is the total number of active locks + * + * The lock count is initialized to 0 (no active and no waiting lockers). + * + * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an + * uncontended lock. This can be determined because XADD returns the old value. + * Readers increment by 1 and see a positive value when uncontended, negative + * if there are writers (and maybe) readers waiting (in which case it goes to + * sleep). + * + * The value of WAITING_BIAS supports up to 32766 waiting processes. This can + * be extended to 65534 by manually checking the whole MSW rather than relying + * on the S flag. + * + * The value of ACTIVE_BIAS supports up to 65535 active processes. + * + * This should be totally fair - if anything is waiting, a process that wants a + * lock will go to the back of the queue. When the currently active lock is + * released, if there's a writer at the front of the queue, then that and only + * that will be woken up; if there's a bunch of consequtive readers at the + * front, then they'll all be woken up, but no other readers will be. + */ + +#ifndef _E2K_RWSEM_H +#define _E2K_RWSEM_H + +#ifndef _LINUX_RWSEM_H +#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" +#endif + +#ifdef __KERNEL__ + +#include +#include + +#define RWSEM_UNLOCKED_VALUE 0L +#define RWSEM_ACTIVE_BIAS 1L +#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL +#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + +/* + * lock for reading + */ + +static inline int ___down_read(struct rw_semaphore *sem) +{ + long newcount; + +#ifndef CONFIG_SMP + newcount = sem->count.counter + RWSEM_ACTIVE_READ_BIAS; + sem->count.counter = newcount; +#else + newcount = __api_atomic_op(RWSEM_ACTIVE_READ_BIAS, + &sem->count, d, "addd", LOCK_MB); +#endif + + return (newcount <= 0); +} + +static inline void __down_read(struct rw_semaphore *sem) +{ + if (unlikely(___down_read(sem))) + rwsem_down_read_failed(sem); +} + +static inline int __down_read_killable(struct rw_semaphore *sem) +{ + if (unlikely(___down_read(sem))) + if (IS_ERR(rwsem_down_read_failed_killable(sem))) + return -EINTR; + + return 0; +} + +/* + * trylock for reading -- returns 1 if successful, 0 if contention + */ +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + long newcount; + +#ifndef CONFIG_SMP + if (sem->count.counter >= 0) + sem->count.counter += RWSEM_ACTIVE_READ_BIAS; + newcount = sem->count.counter; +#else + newcount = __api_atomic64_add_if_not_negative(RWSEM_ACTIVE_READ_BIAS, + &sem->count, LOCK_MB); +#endif + + return newcount > 0; +} + +/* + * lock for writing + */ +static inline long ___down_write(struct rw_semaphore *sem) +{ + long newcount; + +#ifndef CONFIG_SMP + newcount = sem->count.counter + RWSEM_ACTIVE_WRITE_BIAS; + sem->count.counter = newcount; +#else + newcount = __api_atomic_op(RWSEM_ACTIVE_WRITE_BIAS, + &sem->count, d, "addd", LOCK_MB); +#endif + return newcount; +} + +static inline void __down_write(struct rw_semaphore *sem) +{ + if (unlikely(___down_write(sem) != RWSEM_ACTIVE_WRITE_BIAS)) + rwsem_down_write_failed(sem); +} + +static inline int __down_write_killable(struct rw_semaphore *sem) +{ + if (unlikely(___down_write(sem) != RWSEM_ACTIVE_WRITE_BIAS)) + if (IS_ERR(rwsem_down_write_failed_killable(sem))) + return -EINTR; + return 0; +} + +/* + * trylock for writing -- returns 1 if successful, 0 if contention + */ +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + long oldcount; + +#ifndef CONFIG_SMP + oldcount = sem->count.counter; + if (oldcount == RWSEM_UNLOCKED_VALUE) + sem->count.counter = RWSEM_ACTIVE_WRITE_BIAS; +#else + oldcount = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); +#endif + + return oldcount == RWSEM_UNLOCKED_VALUE; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + long newcount; + +#ifndef CONFIG_SMP + sem->count.counter -= RWSEM_ACTIVE_READ_BIAS; + newcount = sem->count.counter; +#else + newcount = __api_atomic_op(RWSEM_ACTIVE_READ_BIAS, + &sem->count, d, "subd", RELEASE_MB); +#endif + + if (unlikely(newcount < -1)) + if ((newcount & RWSEM_ACTIVE_MASK) == 0) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + long newcount; + +#ifndef CONFIG_SMP + sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS; + newcount = sem->count.counter; +#else + newcount = __api_atomic_op(RWSEM_ACTIVE_WRITE_BIAS, + &sem->count, d, "subd", RELEASE_MB); +#endif + if (unlikely(newcount < 0)) + rwsem_wake(sem); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + long newcount; + +#ifndef CONFIG_SMP + newcount = sem->count.counter - RWSEM_WAITING_BIAS; + sem->count.counter = newcount; +#else + newcount = __api_atomic_op(RWSEM_WAITING_BIAS, + &sem->count, d, "subd", RELEASE_MB); +#endif + + if (unlikely(newcount < 0)) + rwsem_downgrade_wake(sem); +} + +#endif /* __KERNEL__ */ +#endif /* _E2K_RWSEM_H */ diff --git a/arch/e2k/include/asm/sbus.h b/arch/e2k/include/asm/sbus.h new file mode 100644 index 0000000..8061c7a --- /dev/null +++ b/arch/e2k/include/asm/sbus.h @@ -0,0 +1,59 @@ +#ifndef E2K_SBUS_H +#define E2K_SBUS_H + +#include +#include + + +/** + * SBus accessors. + */ + +#define _ALIGN_MASK (~(uint64_t)0x3) + +static __inline__ u8 _sbus_readb(unsigned long addr) +{ + /* PCI2SBUS doesn't receive 1-byte read good. It's hardware bug */ + + return (*(volatile uint32_t *)(addr & _ALIGN_MASK) >> (addr & (0x3))*0x8) & 0xFF; +} + +static __inline__ u16 _sbus_readw(unsigned long addr) +{ + return be16_to_cpu((readw(addr))); +} + +static __inline__ u32 _sbus_readl(unsigned long addr) +{ + return be32_to_cpu((readl(addr))); +} + +static __inline__ void _sbus_writeb(u8 b, unsigned long addr) +{ + writeb(b, addr); +} + +static __inline__ void _sbus_writew(u16 w, unsigned long addr) +{ + writew(cpu_to_be16(w), addr); +} + +static __inline__ void _sbus_writel(u32 l, unsigned long addr) +{ + writel(cpu_to_be32(l), addr); +} + +#define sbus_readb(a) _sbus_readb((unsigned long)(a)) +#define sbus_readw(a) _sbus_readw((unsigned long)(a)) +#define sbus_readl(a) _sbus_readl((unsigned long)(a)) +#define sbus_writeb(v, a) _sbus_writeb(v, (unsigned long)(a)) +#define sbus_writew(v, a) _sbus_writew(v, (unsigned long)(a)) +#define sbus_writel(v, a) _sbus_writel(v, (unsigned long)(a)) + +static inline int sbus_addr_is_valid(unsigned long ba) +{ + u8 value = sbus_readb(ba); + return (value == 0xFD) || (value == 0xF1); +} + +#endif diff --git a/arch/e2k/include/asm/sclkr.h b/arch/e2k/include/asm/sclkr.h new file mode 100644 index 0000000..424724b --- /dev/null +++ b/arch/e2k/include/asm/sclkr.h @@ -0,0 +1,105 @@ +#ifndef _ASM_E2K_SCLKR_H +#define _ASM_E2K_SCLKR_H + +#include +#include +#include +#include + +#include +#include + +extern __interrupt u64 fast_syscall_read_sclkr(void); + +/* SCLKR/SCLKM1/SCLKM2 implemented only on machine from e2s */ +extern unsigned long native_read_SCLKR_reg_value(void); +extern unsigned long native_read_SCLKM1_reg_value(void); +extern unsigned long native_read_SCLKM2_reg_value(void); +extern void native_write_SCLKR_reg_value(unsigned long reg_value); +extern void native_write_SCLKM1_reg_value(unsigned long reg_value); +extern void native_write_SCLKM2_reg_value(unsigned long reg_value); + +struct prev_sclkr { + atomic64_t res; +} ____cacheline_aligned_in_smp; +extern struct prev_sclkr prev_sclkr; + +#define SCLKR_NO 0 +#define SCLKR_INT 1 +#define SCLKR_RTC 2 +#define SCLKR_EXT 3 + +extern struct clocksource clocksource_sclkr; +extern long long sclkr_sched_offset; +extern int sclkr_initialized; + + +#define SCLKR_SRC_LEN 4 +extern char sclkr_src[SCLKR_SRC_LEN]; +extern int sclkr_mode; +extern int sclk_register(void *); +extern struct clocksource clocksource_sclkr; +extern int proc_sclkr(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern void sclk_set_deviat(int dev); +extern u64 raw_read_sclkr(void); +DECLARE_PER_CPU(int, ema_freq); + +extern __interrupt u64 fast_syscall_read_sclkr(void); +extern struct clocksource *curr_clocksource; +extern int redpill; + +#define xchg_prev_sclkr_res(res) \ + __api_atomic64_fetch_xchg_if_below(res, &prev_sclkr.res.counter, RELAXED_MB) + +#define SHF_ALPHA 2 +static __always_inline u64 sclkr_to_ns(u64 sclkr, u64 freq) +{ + u64 sclkr_sec, sclkr_lo, res, before; + e2k_sclkm1_t sclkm1 = READ_SCLKM1_REG(); +#ifdef CONFIG_SMP + struct thread_info *ti = READ_CURRENT_REG(); + struct task_struct *task = (void *) ti + - offsetof(struct task_struct, thread_info); + typeof(ema_freq) *pema_freq = per_cpu_ptr(&ema_freq, task->cpu); +#else + typeof(ema_freq) *pema_freq = + (typeof(ema_freq) *)per_cpu_ptr(&ema_freq, 0); +#endif + /* we can not use __this_cpu_read/write(ema_freq) in fast syscall : */ + + sclkr_sec = sclkr >> 32; + sclkr_lo = (u64) (u32) sclkr; + + if (sclkr_lo >= freq) + sclkr_lo = freq - 1; + + /* Using exponential moving average (ema) of frequency + * ema = alpha * cur_freq + (1 - alpha) * ema; + * makes got time more smooth but belated frequency is used + * shorter: ema = ema + (cur_freq - ema) * alpha; + * alpha = 2 / (period + 1) + * if moving average period = 3 alpha = 1/2 or use SHF_ALPHA = 1 + * if moving average period = 7 alpha = 1/4 or use SHF_ALPHA = 2 + * + * 1 << (SHF_ALPHA - 1) is added for rounding. + */ + *pema_freq += (freq - *pema_freq + (1 << (SHF_ALPHA - 1))) >> SHF_ALPHA; + res = sclkr_sec * NSEC_PER_SEC + sclkr_lo * NSEC_PER_SEC / *pema_freq; + + /* sclkm3 has a summary time when guest was out of cpu */ + if (!redpill && sclkm1.sclkm3) + res -= READ_SCLKM3_REG(); + before = xchg_prev_sclkr_res(res); + if (before > res) + res = before; + + return res; +} + +static inline bool use_sclkr_sched_clock(void) +{ + return machine.native_iset_ver >= E2K_ISET_V3 && sclkr_initialized; +} + +#endif /* _ASM_E2K_SCLKR_H */ diff --git a/arch/e2k/include/asm/seccomp.h b/arch/e2k/include/asm/seccomp.h new file mode 100644 index 0000000..af4633e --- /dev/null +++ b/arch/e2k/include/asm/seccomp.h @@ -0,0 +1,18 @@ +/* + * copy from arch/arm64/include/asm/seccomp.h + * + * Copyright (C) 2014 Linaro Limited + * Author: AKASHI Takahiro + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_SECCOMP_H +#define _ASM_SECCOMP_H + +#include + +#include + +#endif /* _ASM_SECCOMP_H */ diff --git a/arch/e2k/include/asm/secondary_space.h b/arch/e2k/include/asm/secondary_space.h new file mode 100644 index 0000000..eaaad3b --- /dev/null +++ b/arch/e2k/include/asm/secondary_space.h @@ -0,0 +1,106 @@ +/* + * Secondary space support for E2K binary compiler + * asm/secondary_space.h + */ +#ifndef _SECONDARY_SPACE_H +#define _SECONDARY_SPACE_H + +#ifndef __ASSEMBLY__ +#include +#include +#include +#include +#include +#include +#include +#endif /* !__ASSEMBLY__ */ + +#define BINCO_PROTOCOL_VERSION 4 + +#define NATIVE_IS_NEXT_ELBRUS_2S \ + ((int)machine.native_iset_ver >= ELBRUS_2S_ISET) +#define NATIVE_SS_SIZE \ + ((NATIVE_IS_NEXT_ELBRUS_2S) ? \ + (0x800000000000UL) : (0x100000000UL)) +#define NATIVE_SS_ADDR_START \ + (NATIVE_IS_NEXT_ELBRUS_2S ? \ + 0x0000400000000000L : 0x0000100000000000L) + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* it is native kernel without any virtualization */ +/* or host kernel with virtualization support */ + +#define IS_NEXT_ELBRUS_2S NATIVE_IS_NEXT_ELBRUS_2S +#define SS_SIZE NATIVE_SS_SIZE +#define SS_ADDR_START NATIVE_SS_ADDR_START +#endif /* ! CONFIG_KVM_GUEST_KERNEL */ + +/* + * If updating this value - do not forget to update E2K_ARG3_MASK - + * mask for 63-45 bits and PAGE_OFFSET. + */ +#define SS_ADDR_END (SS_ADDR_START + SS_SIZE) + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +#define ADDR_IN_SS(a) ((a >= SS_ADDR_START) && (a < SS_ADDR_END)) +#else +#define ADDR_IN_SS(a) 0 +#endif + +#define DEBUG_SS_MODE 0 /* Secondary Space Debug */ +#define DebugSS(...) DebugPrint(DEBUG_SS_MODE, ##__VA_ARGS__) + +#ifndef __ASSEMBLY__ + +extern +s64 sys_el_binary(s64 work, s64 arg2, s64 arg3, s64 arg4); + +/* + * Intreface of el_binary() syscall + * Work argument(arg1) values: + */ +#define GET_SECONDARY_SPACE_OFFSET 0 +#define SET_SECONDARY_REMAP_BOUND 1 +#define SET_SECONDARY_DESCRIPTOR 2 +#define SET_SECONDARY_MTRR_DEPRECATED 3 +#define GET_SECONDARY_MTRR_DEPRECATED 4 +#define GET_SNXE_USAGE 5 +#define TGKILL_INFO_DEPRECATED 6 +#define SIG_EXIT_GROUP 7 +#define FLUSH_CMD_CACHES_DEPRECATED 8 +#define SET_SC_RSTRT_IGNORE_DEPRECATED 9 +#define SET_RP_BOUNDS_AND_IP 10 +#define SET_SECONDARY_64BIT_MODE 11 +#define GET_PROTOCOL_VERSION 12 +#define SET_IC_NEED_FLUSH_ON_SWITCH 13 + +/* Selector numbers for GET_SECONDARY_SPACE_OFFSET */ +enum sel_num { + CS_SELECTOR = 0, + DS_SELECTOR = 1, + ES_SELECTOR = 2, + SS_SELECTOR = 3, + FS_SELECTOR = 4, + GS_SELECTOR = 5, +}; + +#define E2K_ARG3_MASK (0xffffe000ffffffffLL) +#define I32_ADDR_TO_E2K(arg) \ +({ \ + s64 argm; \ + argm = arg; \ + if (machine.native_iset_ver < E2K_ISET_V3) { \ + argm &= E2K_ARG3_MASK; \ + argm |= SS_ADDR_START; \ + } \ + argm; \ +}) + +#endif /* !__ASSEMBLY__ */ +#endif /* _SECONDARY_SPACE_H */ diff --git a/arch/e2k/include/asm/sections.h b/arch/e2k/include/asm/sections.h new file mode 100644 index 0000000..05917ef --- /dev/null +++ b/arch/e2k/include/asm/sections.h @@ -0,0 +1,54 @@ +#ifndef _E2K_SECTIONS_H +#define _E2K_SECTIONS_H + +/* nothing to see, move along */ +#ifndef __ASSEMBLY__ +#include +#endif /* ! __ASSEMBLY__ */ + +#if (defined __e2k__) && (defined __LCC__) +#define __interrupt __attribute__((__check_stack__)) +#else +#define __interrupt __attribute__((__interrupt__)) +#endif + +#ifndef CONFIG_RECOVERY +#define __init_recv __init +#define __initdata_recv __initdata +#else +#define __init_recv +#define __initdata_recv +#endif /* ! (CONFIG_RECOVERY) */ + +#if !defined(CONFIG_RECOVERY) && !defined(CONFIG_SERIAL_PRINTK) && \ + !defined(CONFIG_LMS_CONSOLE) && !defined(CONFIG_E2K_KEXEC) +#define __init_cons __init +#else +#define __init_cons +#endif /* boot console used after init completion */ + +#ifndef __ASSEMBLY__ +extern char _start[]; +extern char __ttable_start[]; +extern char __ttable_end[]; +extern char __C1_wait_trap_start[], __C1_wait_trap_end[]; +extern char __C3_wait_trap_start[], __C3_wait_trap_end[]; +extern char __init_text_begin[], __init_text_end[]; +extern char __init_data_begin[], __init_data_end[]; +extern char __node_data_start[], __node_data_end[]; +extern char __common_data_begin[], __common_data_end[]; +extern char _edata_bss[]; +extern char _t_entry[], _t_entry_end[]; +extern char __entry_handlers_start[], __entry_handlers_end[]; +extern char __start_ro_after_init[], __end_ro_after_init[]; +#endif /* ! __ASSEMBLY__ */ + +#ifdef CONFIG_NUMA +#define __nodedata __section(.node.data) +#define __NODEDATA .section ".node.data","aw" +#else /* ! CONFIG_NUMA */ +#define __nodedata __section(data) +#define __NODEDATA .data +#endif /* CONFIG_NUMA */ + +#endif /* _E2K_SECTIONS_H */ diff --git a/arch/e2k/include/asm/sembuf.h b/arch/e2k/include/asm/sembuf.h new file mode 100644 index 0000000..8a95f09 --- /dev/null +++ b/arch/e2k/include/asm/sembuf.h @@ -0,0 +1,22 @@ +#ifndef _E2K_SEMBUF_H_ +#define _E2K_SEMBUF_H_ + +/* + * The semid64_ds structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + __kernel_time_t sem_ctime; /* last change time */ + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _E2K_SEMBUF_H_ */ diff --git a/arch/e2k/include/asm/serial.h b/arch/e2k/include/asm/serial.h new file mode 100644 index 0000000..2482000 --- /dev/null +++ b/arch/e2k/include/asm/serial.h @@ -0,0 +1,455 @@ +/* + * include/asm-i386/serial.h + */ + +/* + * This assumes you have a 1.8432 MHz clock for your UART. + * + * It'd be nice if someone built a serial card with a 24.576 MHz + * clock, since the 16550A is capable of handling a top speed of 1.5 + * megabits/second; but this requires the faster clock. + */ + +#define BASE_BAUD ( 1843200 / 16 ) + +/* Standard COM flags (except for COM4, because of the 8514 problem) */ +#ifdef CONFIG_SERIAL_DETECT_IRQ +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) +#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) +#else +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) +#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF +#endif + +#ifdef CONFIG_SERIAL_MANY_PORTS +#define FOURPORT_FLAGS ASYNC_FOURPORT +#define ACCENT_FLAGS 0 +#define BOCA_FLAGS 0 +#define HUB6_FLAGS 0 +#define RS_TABLE_SIZE 64 +#else +#define RS_TABLE_SIZE +#endif + +#define NS16550_SERIAL_PORT_0 0x3f8 +#define NS16550_SERIAL_PORT_1 0x2f8 +#define NS16550_SERIAL_PORT_2 0x3e8 +#define NS16550_SERIAL_PORT_3 0x2e8 + +#define SERIAL_PORT_DFNS \ + /* UART CLK PORT IRQ FLAGS */ \ + { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ + { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ + { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ + { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ + +#define AM85C30_RES_Tx_P 0x28 +#define AM85C30_EXT_INT_ENAB 0x01 +#define AM85C30_TxINT_ENAB 0x02 +#define AM85C30_RxINT_MASK 0x18 + + +/* AM85C30 WRITE Registers */ + +#define AM85C30_WR0 0x00 +#define AM85C30_WR1 0x01 +#define AM85C30_WR2 0x02 +#define AM85C30_WR3 0x03 +#define AM85C30_WR4 0x04 +#define AM85C30_WR5 0x05 +#define AM85C30_WR6 0x06 +#define AM85C30_WR7 0x07 +#define AM85C30_WR8 0x08 +#define AM85C30_WR9 0x09 +#define AM85C30_WR10 0x0a +#define AM85C30_WR11 0x0b +#define AM85C30_WR12 0x0c +#define AM85C30_WR13 0x0d +#define AM85C30_WR14 0x0e +#define AM85C30_WR15 0x0f + +/* READ (Status) Registers */ + +#define AM85C30_RR0 0x00 +#define AM85C30_RR1 0x01 +#define AM85C30_RR2 0x02 +#define AM85C30_RR3 0x03 +#define AM85C30_RR8 0x08 +#define AM85C30_RR10 0x0a +#define AM85C30_RR12 0x0c +#define AM85C30_RR13 0x0d + +#define AM85C30_D0 (0x01 << 0) +#define AM85C30_D1 (0x01 << 1) +#define AM85C30_D2 (0x01 << 2) +#define AM85C30_D3 (0x01 << 3) +#define AM85C30_D4 (0x01 << 4) +#define AM85C30_D5 (0x01 << 5) +#define AM85C30_D6 (0x01 << 6) +#define AM85C30_D7 (0x01 << 7) + +/* WR0 */ +/* D2,D1,D0 +* Register Access Pointer +* +* 000 - N0, [N8]* +* 001 - N1, [N9]* +* 010 - N2, [N10]* +* 011 - N3, [N11]* +* 100 - N4, [N12]* +* 101 - N5, [N13]* +* 110 - N6, [N14]* +* 111 - N7, [N15]* +* +* if Point High Register Group = 1 +* +* D5,D4,D3 +* +* SCC Command +* +* 000 - Null Code +* 001 - Point High Register Group +* 010 - Reset Ext/Status Interrupts +* 011 - Send Abort +* 100 - Enable Int. on Next Rx Character +* 101 - Reset Tx Int. Pending +* 110 - Error Reset +* 111 - Reset Highest IUS +* +* D7,D6 +* SCC Command +* +* 00 - Null Code +* 01 - Reset Rx CRC Checker +* 10 - Reset Tx CRC Generator +* 11 - Reset Tx Underrun/EOM Latch +*/ + +/* WR1 */ +/* D0 +* Ext. Int. Enable +* D1 +* Tx Int. Enable +* D2 +* Parity is Special Condition +* D4,D3 +* Rx Int Mode +* +* 00 - Rx Int Disable +* 01 - Rx Int on First Char. or Special Condition +* 10 - Int on All Rx Char. or Special Condition +* 11 - Rx Int. on Special Condition Only +* D5 +* Wait/DMA Request on Receive/Transmit +* D6 +* Wait/DMA Request Function +* D7 +* Wait/DMA Request Enable +*/ + +/* WR2 */ +/* D7 - D0 +* Interrupt Vector +*/ + +/* WR3 */ +/* D0 +* Rx Enable +* D1 +* Sync Character Load Inhibit +* D2 +* Address Search Mode (SDLC) +* D3 +* Rx CRC Enable +* D4 +* Enter Hunt Mode +* D5 +* Auto Enable +* D7,D6 +* +* 00 - Rx 5 Bits / Character +* 01 - Rx 6 Bits / Character +* 10 - Rx 7 Bits / Character +* 11 - Rx 8 Bits / Character +*/ + +/* WR4 */ +/* D0 +* ParityEnable +* D1 +* Parity Even(0) / Odd(1) +* D3,D2 +* +* 00 - Sync Modes Enable +* 01 - 1 Stop Bit / Character +* 10 - 1.5 Stop Bits / Character +* 11 - 2 Stop Bits / Character +* D5,D4 +* +* 00 - 8-Bit Sync Character +* 01 - 16-Bit Sync Character +* 10 - SDLC Mode +* 11 - External Sync Mode +* D7,D6 +* +* 00 - X1 Clock Mode +* 01 - X16 Clock Mode +* 10 - X32 Clock Mode +* 11 - X64 Clock Mode +*/ + +/* WR5 */ +/* D0 +* Tx CRC Enable +* D1 +* RTS +* D2 +* SDLC-/CRC-16 +* D3 +* Tx Enable +* D4 +* Send Break +* D6,D5 +* +* 00 - Tx 5 Bits / Character +* 01 - Tx 6 Bits / Character +* 10 - Tx 7 Bits / Character +* 11 - Tx 8 Bits / Character +* D7 +* DTR +*/ + +/* WR6 */ +/* D5-D0 +* xN constant +* D7,D6 +* Reserved (not used in asynchronous mode) +*/ + +/* WR7 */ +/* D6-D0 +* Reserved (not used in asynchronous mode) +* D7 +* xN Mode Enable +*/ + +/* WR8 */ +/* D7-D0 +* Transmit Buffer +*/ + +/* WR9 */ +/* D0 +* Vector Includes Status +* D1 +* No Vector +* D2 +* Disable Lower Chain +* D3 +* Master Interrupt Enable +* D4 +* Status High/Low_ +* D5 +* Interrupt Masking Without INTACK_ +* D7-D6 +* +* 00 - No Reset +* 01 - Channel B Reset +* 10 - Channel A Reset +* 11 - Force Hardware Reset +*/ + +/* WR10 */ +/* D0 +* 6 bit / 8 bit SYNC +* D1 +* Loop Mode +* D2 +* Abort/Flag on Underrun +* D3 +* Mark/Flag Idle +* D4 +* Go Active on Poll +* D6-D5 +* +* 00 - NRZ +* 01 - NRZI +* 10 - FM1 (Transition = 1) +* 11 - FM0 (Transition = 0) +* D7 +* CRC Preset '1' or '0' +*/ + +/* WR11 */ +/* D1-D0 +* +* 00 - TRxC Out = XTAL output +* 01 - TRxC Out = Transmit Clock +* 10 - TRxC Out = BRG output +* 11 - TRxC Out = DPLL output +* D2 +* TRxC O/I +* D4-D3 +* +* 00 - Transmit Clock = RTxC pin +* 01 - Transmit Clock = TRxC pin +* 10 - Transmit Clock = BRG output +* 11 - Transmit Clock = DPLL output +* D6-D5 +* +* 00 - Receive Clock = RTxC pin +* 01 - Receive Clock = TRxC pin +* 10 - Receive Clock = BRG output +* 11 - Receive Clock = DPLL output +* D7 +* RTxC XTAL / NO XTAL +*/ + +/* WR12 */ +/* D7-D0 +* Lower Byte of Time Constant +*/ + +/* WR13 */ +/* D7-D0 +* Upper Byte of Time Constant +*/ + +/* WR14 */ +/* D0 +* BRG Enable +* D1 +* BRG Source +* D2 +* DTR / REQUESTt Function +* D3 +* Auto Echo +* D4 +* Local Loopback +* D7-D5 +* +* 000 - Null Command +* 001 - Enter Search Mode +* 010 - Reset Missing Clock +* 011 - Disable DPLL +* 100 - Set Source = BR Generator +* 101 - Set Source = RTxC_ +* 110 - Set FM Mode +* 111 - Set NRZI Mode +*/ + +/* WR15 */ +/* D0 +* SDLC/HDLC Enhancement Enable +* D1 +* Zero Count IE (Interrupt Enable) +* D2 +* 10 * 19-bit Frame Status FIFO Enable +* D3 +* DCD IE +* D4 +* Sync/Hunt IE +* D5 +* CTS IE +* D6 +* Tx Underrun / EOM IE +* D7 +* Break/Abort IE +*/ + + +/* RR0 */ +/* D0 +* Rx Character Availiable +* D1 +* Zero Count +* D2 +* Tx Buffer Empty +* D3 +* DCD +* D4 +* Sync/Hunt +* D5 +* CTS +* D6 +* Tx Underrun / EOM +* D7 +* Break/Abort +*/ + +/* RR1 */ +/* D0 +* All Sent +* D1 +* Residue Code 2 +* D2 +* Residue Code 1 +* D3 +* Residue Code 0 +* D4 +* Parity Error +* D5 +* Rx Overrun Error +* D6 +* CRC / Framing Error +* D7 +* End of Frame (SDLC) +*/ + +/* RR2 */ +/* D7-D0 +* Interrupt Vector +* +* Channel A RR2 = WR2 +* Channel B RR2 = Interrupt Vector Modified* +* +* * +* D3 D2 D1 Status High/Low = 0 +* D4 D5 D6 Status High/Low = 1 +* +* 0 0 0 Ch B Transmit Buffer Empty +* 0 0 1 Ch B External/Status Change +* 0 1 0 Ch B Receive Char. Availiable +* 0 1 1 Ch B Special Receive Condition +* 1 0 0 Ch A Transmit Buffer Empty +* 1 0 1 Ch A External/Status Change +* 1 1 0 Ch A Receive Char. Availiable +* 1 1 1 Ch A Special Receive Condition +*/ + +/* RR3 */ +/* D0 +* Channel B Ext/Status IP (Interrupt Pending) +* D1 +* Channel B Tx IP +* D2 +* Channel B Rx IP +* D3 +* Channel A Ext/Status IP +* D4 +* Channel A Tx IP +* D5 +* Channel A Rx IP +* D7-D6 +* Always 00 +*/ + +/* RR8 */ +/* D7-D0 +* Receive Buffer +*/ + +/* RR10 */ +/* D7-D0 +* Reserved (not used in asynchronous mode) +*/ + +/* RR12 */ +/* D7-D0 +* Lower Byte of Time Constant +*/ + +/* RR13 */ +/* D7-D0 +* Upper Byte of Time Constant +*/ diff --git a/arch/e2k/include/asm/set_memory.h b/arch/e2k/include/asm/set_memory.h new file mode 100644 index 0000000..638fb7b --- /dev/null +++ b/arch/e2k/include/asm/set_memory.h @@ -0,0 +1,24 @@ +#ifndef _ASM_E2K_SET_MEMORY_H +#define _ASM_E2K_SET_MEMORY_H + +#include +#include + +extern int set_memory_ro(unsigned long addr, int numpages); +extern int set_memory_rw(unsigned long addr, int numpages); +extern int set_memory_x(unsigned long addr, int numpages); +extern int set_memory_nx(unsigned long addr, int numpages); + +extern int set_memory_uc(unsigned long addr, int numpages); +extern int set_memory_wc(unsigned long addr, int numpages); +extern int set_memory_wb(unsigned long addr, int numpages); + +extern int set_pages_array_uc(struct page **pages, int addrinarray); +extern int set_pages_array_wc(struct page **pages, int addrinarray); +extern int set_pages_array_wb(struct page **pages, int addrinarray); + +int set_pages_uc(struct page *page, int numpages); +int set_pages_wc(struct page *page, int numpages); +int set_pages_wb(struct page *page, int numpages); + +#endif diff --git a/arch/e2k/include/asm/setup.h b/arch/e2k/include/asm/setup.h new file mode 100644 index 0000000..b7b7f84 --- /dev/null +++ b/arch/e2k/include/asm/setup.h @@ -0,0 +1,55 @@ +#ifndef __E2K_SETUP_H +#define __E2K_SETUP_H + +#include +#include +#include +#include + +extern void __init e2k_start_kernel(void); +extern void __init native_setup_machine(void); +extern void __init e2k_start_kernel_switched_stacks(void); +extern void e2k_start_secondary_switched_stacks(int cpuid, int cpu); + +static inline void native_bsp_switch_to_init_stack(void) +{ + unsigned long stack_base = (unsigned long) &init_stack; + + NATIVE_SWITCH_TO_KERNEL_STACK( + stack_base + KERNEL_P_STACK_OFFSET, KERNEL_P_STACK_SIZE, + stack_base + KERNEL_PC_STACK_OFFSET, KERNEL_PC_STACK_SIZE, + stack_base + KERNEL_C_STACK_OFFSET, KERNEL_C_STACK_SIZE); +} + +static inline void native_setup_bsp_idle_task(int cpu) +{ + /* + * Set pointer of current task structure to kernel initial task + */ + set_current_thread_info(&init_task.thread_info, &init_task); +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is native guest kernel */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else /* !CONFIG_KVM_GUEST_KERNEL && !CONFIG_PARAVIRT_GUEST */ +/* It is native host or host with virtualization support */ +static inline void arch_setup_machine(void) +{ + native_setup_machine(); +} +static inline void bsp_switch_to_init_stack(void) +{ + native_bsp_switch_to_init_stack(); +} +static inline void setup_bsp_idle_task(int cpu) +{ + native_setup_bsp_idle_task(cpu); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#include +#endif /* __E2K_SETUP_H */ diff --git a/arch/e2k/include/asm/shmbuf.h b/arch/e2k/include/asm/shmbuf.h new file mode 100644 index 0000000..a310688 --- /dev/null +++ b/arch/e2k/include/asm/shmbuf.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _E2K_ASM_SHMBUF_H_ +#define _E2K_ASM_SHMBUF_H_ + +#include + +#if defined(CONFIG_PROTECTED_MODE) +/* Outputs shared segment size for the given ID: */ +unsigned long get_shm_segm_size(int shmid); +#endif + +#endif /* _E2K_ASM_SHMBUF_H_ */ diff --git a/arch/e2k/include/asm/shmparam.h b/arch/e2k/include/asm/shmparam.h new file mode 100644 index 0000000..b19db20 --- /dev/null +++ b/arch/e2k/include/asm/shmparam.h @@ -0,0 +1,11 @@ +#ifndef _E2K_SHMPARAM_H_ +#define _E2K_SHMPARAM_H_ + +/* + * SHMLBA controls minimum alignment at which shared memory segments + * get attached. + */ + +#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ + +#endif /* _E2K_SHMPARAM_H_ */ diff --git a/arch/e2k/include/asm/sic_regs.h b/arch/e2k/include/asm/sic_regs.h new file mode 100644 index 0000000..cd6fd3d --- /dev/null +++ b/arch/e2k/include/asm/sic_regs.h @@ -0,0 +1,1255 @@ +#ifndef _E2K_SIC_REGS_H_ +#define _E2K_SIC_REGS_H_ + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ +#include +#endif /* __ASSEMBLY__ */ + +#define SIC_IO_LINKS_COUNT 2 + +/* + * NBSR registers addresses (offsets in NBSR area) + */ + +#define SIC_st_p 0x00 + +#define SIC_st_core0 0x100 +#define SIC_st_core1 0x104 +#define SIC_st_core2 0x108 +#define SIC_st_core3 0x10c +#define SIC_st_core4 0x110 +#define SIC_st_core5 0x114 +#define SIC_st_core6 0x118 +#define SIC_st_core7 0x11c +#define SIC_st_core8 0x120 +#define SIC_st_core9 0x124 +#define SIC_st_core10 0x128 +#define SIC_st_core11 0x12c +#define SIC_st_core12 0x130 +#define SIC_st_core13 0x134 +#define SIC_st_core14 0x138 +#define SIC_st_core15 0x13c + +#define SIC_st_core(num) (0x100 + (num) * 4) + +#define SIC_rt_ln 0x08 +#define SIC_rt_lcfg0 0x10 +#define SIC_rt_lcfg1 0x14 +#define SIC_rt_lcfg2 0x18 +#define SIC_rt_lcfg3 0x1c + +#define SIC_rt_mhi0 0x20 +#define SIC_rt_mhi1 0x24 +#define SIC_rt_mhi2 0x28 +#define SIC_rt_mhi3 0x2c + +#define SIC_rt_mlo0 0x30 +#define SIC_rt_mlo1 0x34 +#define SIC_rt_mlo2 0x38 +#define SIC_rt_mlo3 0x3c + +#define SIC_rt_pcim0 0x40 +#define SIC_rt_pcim1 0x44 +#define SIC_rt_pcim2 0x48 +#define SIC_rt_pcim3 0x4c + +#define SIC_rt_pciio0 0x50 +#define SIC_rt_pciio1 0x54 +#define SIC_rt_pciio2 0x58 +#define SIC_rt_pciio3 0x5c + +#define SIC_rt_ioapic0 0x60 +#define SIC_rt_ioapic1 0x64 +#define SIC_rt_ioapic2 0x68 +#define SIC_rt_ioapic3 0x6c + +#define SIC_rt_pcimp_b0 0x70 +#define SIC_rt_pcimp_b1 0x74 +#define SIC_rt_pcimp_b2 0x78 +#define SIC_rt_pcimp_b3 0x7c + +#define SIC_rt_pcimp_e0 0x80 +#define SIC_rt_pcimp_e1 0x84 +#define SIC_rt_pcimp_e2 0x88 +#define SIC_rt_pcimp_e3 0x8c + +#define SIC_rt_ioapic10 0x1060 +#define SIC_rt_ioapic11 0x1064 +#define SIC_rt_ioapic12 0x1068 +#define SIC_rt_ioapic13 0x106c + +#define SIC_rt_ioapicintb 0x94 +#define SIC_rt_lapicintb 0xa0 + +#define SIC_rt_msi 0xb0 +#define SIC_rt_msi_h 0xb4 + +#define SIC_rt_pcicfgb 0x90 +#define SIC_rt_pcicfged 0x98 + +/* PREPIC */ +#define SIC_prepic_version 0x8000 +#define SIC_prepic_ctrl 0x8010 +#define SIC_prepic_id 0x8020 +#define SIC_prepic_ctrl2 0x8030 +#define SIC_prepic_err_stat 0x8040 +#define SIC_prepic_err_msg_lo 0x8050 +#define SIC_prepic_err_msg_hi 0x8054 +#define SIC_prepic_err_int 0x8060 +#define SIC_prepic_mcr 0x8070 +#define SIC_prepic_mid 0x8074 +#define SIC_prepic_mar0_lo 0x8080 +#define SIC_prepic_mar0_hi 0x8084 +#define SIC_prepic_mar1_lo 0x8090 +#define SIC_prepic_mar1_hi 0x8094 +#define SIC_prepic_linp0 0x8c00 +#define SIC_prepic_linp1 0x8c04 +#define SIC_prepic_linp2 0x8c08 +#define SIC_prepic_linp3 0x8c0c +#define SIC_prepic_linp4 0x8c10 +#define SIC_prepic_linp5 0x8c14 + +/* Host Controller */ +#define SIC_hc_ctrl 0x0340 + +/* IOMMU */ +#define SIC_iommu_ctrl 0x0380 +#define SIC_iommu_ba_lo 0x0390 +#define SIC_iommu_ba_hi 0x0394 +#define SIC_iommu_dtba_lo 0x0398 +#define SIC_iommu_dtba_hi 0x039c +#define SIC_iommu_flush 0x03a0 +#define SIC_iommu_flushP 0x03a4 +#define SIC_iommu_cmd_c_lo 0x03a0 +#define SIC_iommu_cmd_c_hi 0x03a4 +#define SIC_iommu_cmd_d_lo 0x03a8 +#define SIC_iommu_cmd_d_hi 0x03ac +#define SIC_iommu_err 0x03b0 +#define SIC_iommu_err1 0x03b4 +#define SIC_iommu_err_info_lo 0x03b8 +#define SIC_iommu_err_info_hi 0x03bc + +#define SIC_edbc_iommu_ctrl 0x5080 +#define SIC_edbc_iommu_ba_lo 0x5090 +#define SIC_edbc_iommu_ba_hi 0x5094 +#define SIC_edbc_iommu_dtba_lo 0x5098 +#define SIC_edbc_iommu_dtba_hi 0x509c +#define SIC_edbc_iommu_cmd_c_lo 0x50a0 +#define SIC_edbc_iommu_cmd_c_hi 0x50a4 +#define SIC_edbc_iommu_err 0x50b0 +#define SIC_edbc_iommu_err1 0x50b4 +#define SIC_edbc_iommu_err_info_lo 0x50b8 +#define SIC_edbc_iommu_err_info_hi 0x50bc + +#define SIC_iommu_reg_base SIC_iommu_ctrl +#define SIC_iommu_reg_size 0x0080 +#define SIC_e2c3_iommu_nr 0x0007 +#define SIC_embedded_iommu_base 0x5d00 +#define SIC_embedded_iommu_size SIC_iommu_reg_size + +/* IO link & RDMA */ +#define SIC_iol_csr 0x900 +#define SIC_io_vid 0x700 +#define SIC_io_csr 0x704 +#define SIC_io_str 0x70c +#define SIC_io_str_hi 0x72c +#define SIC_rdma_vid 0x880 +#define SIC_rdma_cs 0x888 + +/* Second IO link */ +#define SIC_iol_csr1 0x1900 +#define SIC_io_vid1 0x1700 +#define SIC_io_csr1 0x1704 +#define SIC_io_str1 0x170c +#define SIC_rdma_vid1 0x1880 +#define SIC_rdma_cs1 0x1888 + +/* DSP */ +#define SIC_ic_ir0 0x2004 +#define SIC_ic_ir1 0x2008 +#define SIC_ic_mr0 0x2010 +#define SIC_ic_mr1 0x2014 + +/* Monitors */ +#define SIC_sic_mcr 0xc30 +#define SIC_sic_mar0_lo 0xc40 +#define SIC_sic_mar0_hi 0xc44 +#define SIC_sic_mar1_lo 0xc48 +#define SIC_sic_mar1_hi 0xc4c + +/* Interrupt register */ +#define SIC_sic_int 0xc60 + +/* MC */ + +#define SIC_MAX_MC_COUNT 4 +#define SIC_MC_COUNT (machine.sic_mc_count) + +#define SIC_mc0_ecc 0x400 +#define SIC_mc1_ecc (machine.sic_mc1_ecc) +#define SIC_mc2_ecc 0x480 +#define SIC_mc3_ecc 0x4c0 + +#define SIC_MC_BASE SIC_mc0_ecc +#define SIC_MC_SIZE (IS_MACHINE_E2S ? 0xa4 : \ + (IS_MACHINE_E8C ? 0xe4 : 0xf4)) + +/* PHY */ +#define SIC_PHY_BASE (IS_MACHINE_E8C2 ? 0x4000 : 0x1000) +#define SIC_PHY_SIZE (IS_MACHINE_E2S ? 0x0c00 : \ + (IS_MACHINE_E8C ? 0x1000 : 0x4000)) + +/* IPCC */ +#define SIC_IPCC_LINKS_COUNT 3 +#define SIC_ipcc_csr1 0x604 +#define SIC_ipcc_csr2 0x644 +#define SIC_ipcc_csr3 0x684 +#define SIC_ipcc_str1 0x60c +#define SIC_ipcc_str2 0x64c +#define SIC_ipcc_str3 0x68c + +/* Power management */ +#define SIC_pwr_mgr 0x280 + +/* E12C/E16C/E2C3 Power Control System (PCS) registers + * PMC base =0x1000 is added */ +#define PMC_FREQ_CFG 0x1100 +#define PMC_FREQ_STEPS 0x1104 +#define PMC_FREQ_C2 0x1108 +#define PMC_FREQ_CORE_0_MON 0x1200 +#define PMC_FREQ_CORE_0_CTRL 0x1204 +#define PMC_FREQ_CORE_0_SLEEP 0x1208 +#define PMC_FREQ_CORE_N_MON(n) (PMC_FREQ_CORE_0_MON + n * 16) +#define PMC_FREQ_CORE_N_CTRL(n) (PMC_FREQ_CORE_0_CTRL + n * 16) +#define PMC_FREQ_CORE_N_SLEEP(n) ((PMC_FREQ_CORE_0_SLEEP) + n * 16) +#define PMC_SYS_MON_1 0x1504 +/* PMC_FREQ_CORE_0_SLEEP fields: */ +typedef union { + struct { + u32 cmd : 3; + u32 pad1 : 13; + u32 status : 3; + u32 pad2 : 8; + u32 ctrl_enable : 1; + u32 alter_disable : 1; + u32 bfs_bypass : 1; + u32 pin_en : 1; + u32 pad3 : 1; + }; + u32 word; +} freq_core_sleep_t; + +/* PMC_FREQ_CORE_0_MON fields: */ +typedef union { + struct { + u32 divF_curr : 6; + u32 divF_target : 6; + u32 divF_limit_hi : 6; + u32 divF_limit_lo : 6; + u32 divF_init : 6; + u32 bfs_bypass : 1; + u32 : 1; + }; + u32 word; +} freq_core_mon_t; + +/* PMC_SYS_MON_1 fields: */ +typedef union { + struct { + u32 machine_gen_alert : 1; + u32 machine_pwr_alert : 1; + u32 cpu_pwr_alert : 1; + u32 mc47_pwr_alert : 1; + u32 mc03_pwr_alert : 1; + u32 mc47_dimm_event : 1; + u32 mc03_dimm_event : 1; + u32 reserved : 2; + u32 mc7_fault : 1; + u32 mc6_fault : 1; + u32 mc5_fault : 1; + u32 mc4_fault : 1; + u32 mc3_fault : 1; + u32 mc2_fault : 1; + u32 mc1_fault : 1; + u32 mc0_fault : 1; + u32 cpu_fault : 1; + u32 pin_sataeth_config : 1; + u32 pin_iplc_pe_pre_det : 2; + u32 pin_iplc_pe_config : 2; + u32 pin_ipla_flip_en : 1; + u32 pin_iowl_pe_pre_det : 4; + u32 pin_iowl_pe_config : 2; + u32 pin_efuse_mode : 2; + }; + u32 word; +} sys_mon_1_t; + +/* Cache L3 */ +#define SIC_l3_ctrl 0x3000 +#define SIC_l3_serv 0x3004 +#define SIC_l3_diag_ac 0x3008 +#define SIC_l3_bnda 0x300c +#define SIC_l3_bndb 0x3010 +#define SIC_l3_bndc 0x3014 +#define SIC_l3_seal 0x3018 +#define SIC_l3_l3tl 0x301c +#define SIC_l3_emrg 0x3020 +/* bank #0 */ +#define SIC_l3_b0_diag_dw 0x3100 +#define SIC_l3_b0_eccd_ld 0x3108 +#define SIC_l3_b0_eccd_dm 0x310c +#define SIC_l3_b0_eerr 0x3110 +#define SIC_l3_b0_bist0 0x3114 +#define SIC_l3_b0_bist1 0x3118 +#define SIC_l3_b0_bist2 0x311c +#define SIC_l3_b0_emrg_r0 0x3120 +#define SIC_l3_b0_emrg_r1 0x3124 +/* bank #1 */ +#define SIC_l3_b1_diag_dw 0x3140 +#define SIC_l3_b1_eccd_ld 0x3148 +#define SIC_l3_b1_eccd_dm 0x314c +#define SIC_l3_b1_eerr 0x3150 +#define SIC_l3_b1_bist0 0x3154 +#define SIC_l3_b1_bist1 0x3158 +#define SIC_l3_b1_bist2 0x315c +#define SIC_l3_b1_emrg_r0 0x3160 +#define SIC_l3_b1_emrg_r1 0x3164 +/* bank #2 */ +#define SIC_l3_b2_diag_dw 0x3180 +#define SIC_l3_b2_eccd_ld 0x3188 +#define SIC_l3_b2_eccd_dm 0x318c +#define SIC_l3_b2_eerr 0x3190 +#define SIC_l3_b2_bist0 0x3194 +#define SIC_l3_b2_bist1 0x3198 +#define SIC_l3_b2_bist2 0x319c +#define SIC_l3_b2_emrg_r0 0x31a0 +#define SIC_l3_b2_emrg_r1 0x31a4 +/* bank #3 */ +#define SIC_l3_b3_diag_dw 0x31c0 +#define SIC_l3_b3_eccd_ld 0x31c8 +#define SIC_l3_b3_eccd_dm 0x31cc +#define SIC_l3_b3_eerr 0x31d0 +#define SIC_l3_b3_bist0 0x31d4 +#define SIC_l3_b3_bist1 0x31d8 +#define SIC_l3_b3_bist2 0x31dc +#define SIC_l3_b3_emrg_r0 0x31e0 +#define SIC_l3_b3_emrg_r1 0x31e4 +/* bank #4 */ +#define SIC_l3_b4_diag_dw 0x3200 +#define SIC_l3_b4_eccd_ld 0x3208 +#define SIC_l3_b4_eccd_dm 0x320c +#define SIC_l3_b4_eerr 0x3210 +#define SIC_l3_b4_bist0 0x3214 +#define SIC_l3_b4_bist1 0x3218 +#define SIC_l3_b4_bist2 0x321c +#define SIC_l3_b4_emrg_r0 0x3220 +#define SIC_l3_b4_emrg_r1 0x3224 +/* bank #5 */ +#define SIC_l3_b5_diag_dw 0x3240 +#define SIC_l3_b5_eccd_ld 0x3248 +#define SIC_l3_b5_eccd_dm 0x324c +#define SIC_l3_b5_eerr 0x3250 +#define SIC_l3_b5_bist0 0x3254 +#define SIC_l3_b5_bist1 0x3258 +#define SIC_l3_b5_bist2 0x325c +#define SIC_l3_b5_emrg_r0 0x3260 +#define SIC_l3_b5_emrg_r1 0x3264 +/* bank #6 */ +#define SIC_l3_b6_diag_dw 0x3280 +#define SIC_l3_b6_eccd_ld 0x3288 +#define SIC_l3_b6_eccd_dm 0x328c +#define SIC_l3_b6_eerr 0x3290 +#define SIC_l3_b6_bist0 0x3294 +#define SIC_l3_b6_bist1 0x3298 +#define SIC_l3_b6_bist2 0x329c +#define SIC_l3_b6_emrg_r0 0x32a0 +#define SIC_l3_b6_emrg_r1 0x32a4 +/* bank #7 */ +#define SIC_l3_b7_diag_dw 0x32c0 +#define SIC_l3_b7_eccd_ld 0x32c8 +#define SIC_l3_b7_eccd_dm 0x32cc +#define SIC_l3_b7_eerr 0x32d0 +#define SIC_l3_b7_bist0 0x32d4 +#define SIC_l3_b7_bist1 0x32d8 +#define SIC_l3_b7_bist2 0x32dc +#define SIC_l3_b7_emrg_r0 0x32e0 +#define SIC_l3_b7_emrg_r1 0x32e4 + +/* Host Controller */ +#define SIC_hc_mcr 0x360 +#define SIC_hc_mid 0x364 +#define SIC_hc_mar0_lo 0x368 +#define SIC_hc_mar0_hi 0x36c +#define SIC_hc_mar1_lo 0x370 +#define SIC_hc_mar1_hi 0x374 +#define SIC_hc_ioapic_eoi 0x37c + +/* Binary compiler Memory protection registers */ +#define BC_MM_CTRL 0x0800 +#define BC_MM_MLO_LB 0x0808 +#define BC_MM_MLO_HB 0x080c +#define BC_MM_MHI_BASE 0x0810 +#define BC_MM_MHI_BASE_H 0x0814 +#define BC_MM_MHI_MASK 0x0818 +#define BC_MM_MHI_MASK_H 0x081c +#define BC_MM_MHI_LB 0x0820 +#define BC_MM_MHI_LB_H 0x0824 +#define BC_MM_MHI_HB 0x0828 +#define BC_MM_MHI_HB_H 0x082c +#define BC_MP_CTRL 0x0830 +#define BC_MP_STAT 0x0834 +#define BC_MP_T_BASE 0x0838 +#define BC_MP_T_BASE_H 0x083c +#define BC_MP_T_H_BASE 0x0840 +#define BC_MP_T_H_BASE_H 0x0844 +#define BC_MP_T_HB 0x0848 +#define BC_MP_T_H_LB 0x0850 +#define BC_MP_T_H_LB_H 0x0854 +#define BC_MP_T_H_HB 0x0858 +#define BC_MP_T_H_HB_H 0x085c +#define BC_MP_T_CORR 0x0860 +#define BC_MP_T_CORR_H 0x0864 +#define BC_MP_B_BASE 0x0868 +#define BC_MP_B_BASE_H 0x086c +#define BC_MP_B_HB 0x0870 +#define BC_MP_B_PUT 0x0874 +#define BC_MP_B_GET 0x0878 +#define BC_MM_REG_END (BC_MP_B_GET + 4) + +#define BC_MM_REG_BASE BC_MM_CTRL +#define BC_MM_REG_SIZE (BC_MM_REG_END - BC_MM_REG_BASE) +#define BC_MM_REG_NUM (BC_MM_REG_SIZE / 4) + +#ifndef __ASSEMBLY__ +/* + * Read/Write RT_LCFGj Regs + */ +#define ES2_CLN_BITS 4 /* 4 bits - cluster # */ +#define E8C_CLN_BITS 2 /* 2 bits - cluster # */ +#if defined(CONFIG_ES2) || defined(CONFIG_E2S) +#define E2K_MAX_CL_NUM ((1 << ES2_CLN_BITS) - 1) +#elif defined(CONFIG_E8C) || defined(CONFIG_E8C2) +#define E2K_MAX_CL_NUM ((1 << E8C_CLN_BITS) - 1) +#elif defined(CONFIG_E12C) || defined(CONFIG_E16C) || defined(CONFIG_E2C3) +#define E2K_MAX_CL_NUM 0 /* Cluster number field was deleted */ +#endif /* CONFIG_ES2 || CONFIG_E2S */ + +/* SCCFG */ +#define SIC_sccfg 0xc00 + +typedef unsigned int e2k_rt_lcfg_t; /* Read/write pointer (32 bits) */ +typedef struct es2_rt_lcfg_fields { + e2k_rt_lcfg_t vp : 1; /* [0] */ + e2k_rt_lcfg_t vb : 1; /* [1] */ + e2k_rt_lcfg_t vics : 1; /* [2] */ + e2k_rt_lcfg_t vio : 1; /* [3] */ + e2k_rt_lcfg_t pln : 2; /* [5:4] */ + e2k_rt_lcfg_t cln : 4; /* [9:6] */ + e2k_rt_lcfg_t unused : 22; /* [31:10] */ +} es2_rt_lcfg_fields_t; +typedef struct e8c_rt_lcfg_fields { + e2k_rt_lcfg_t vp : 1; /* [0] */ + e2k_rt_lcfg_t vb : 1; /* [1] */ + e2k_rt_lcfg_t vics : 1; /* [2] */ + e2k_rt_lcfg_t vio : 1; /* [3] */ + e2k_rt_lcfg_t pln : 2; /* [5:4] */ + e2k_rt_lcfg_t cln : 2; /* [7:6] */ + e2k_rt_lcfg_t unused : 24; /* [31:8] */ +} e8c_rt_lcfg_fields_t; +typedef es2_rt_lcfg_fields_t e2s_rt_lcfg_fields_t; +typedef union e2k_rt_lcfg_struct { /* Structure of lower word */ + es2_rt_lcfg_fields_t es2_fields; /* as fields */ + e8c_rt_lcfg_fields_t e8c_fields; /* as fields */ + e2k_rt_lcfg_t word; /* as entire register */ +} e2k_rt_lcfg_struct_t; + +#define ES2_RT_LCFG_vp(__reg) ((__reg).es2_fields.vp) +#define ES2_RT_LCFG_vb(__reg) ((__reg).es2_fields.vb) +#define ES2_RT_LCFG_vics(__reg) ((__reg).es2_fields.vics) +#define ES2_RT_LCFG_vio(__reg) ((__reg).es2_fields.vio) +#define ES2_RT_LCFG_pln(__reg) ((__reg).es2_fields.pln) +#define ES2_RT_LCFG_cln(__reg) ((__reg).es2_fields.cln) +#define ES2_RT_LCFG_reg(__reg) ((__reg).word) + +#define E2S_RT_LCFG_vp ES2_RT_LCFG_vp +#define E2S_RT_LCFG_vb ES2_RT_LCFG_vb +#define E2S_RT_LCFG_vics ES2_RT_LCFG_vics +#define E2S_RT_LCFG_vio ES2_RT_LCFG_vio +#define E2S_RT_LCFG_pln ES2_RT_LCFG_pln +#define E2S_RT_LCFG_cln ES2_RT_LCFG_cln +#define E2S_RT_LCFG_reg ES2_RT_LCFG_reg + +#define E8C_RT_LCFG_vp(__reg) ((__reg).e8c_fields.vp) +#define E8C_RT_LCFG_vb(__reg) ((__reg).e8c_fields.vb) +#define E8C_RT_LCFG_vics(__reg) ((__reg).e8c_fields.vics) +#define E8C_RT_LCFG_vio(__reg) ((__reg).e8c_fields.vio) +#define E8C_RT_LCFG_pln(__reg) ((__reg).e8c_fields.pln) +#define E8C_RT_LCFG_cln(__reg) ((__reg).e8c_fields.cln) +#define E8C_RT_LCFG_reg(__reg) ((__reg).word) + +/* FIXME: now as on e8c, but can be changed need DOCs */ +#define E12C_RT_LCFG_vp(__reg) E8C_RT_LCFG_vp(__reg) +#define E12C_RT_LCFG_vb(__reg) E8C_RT_LCFG_vb(__reg) +#define E12C_RT_LCFG_vics(__reg) E8C_RT_LCFG_vics(__reg) +#define E12C_RT_LCFG_vio(__reg) E8C_RT_LCFG_vio(__reg) +#define E12C_RT_LCFG_pln(__reg) E8C_RT_LCFG_pln(__reg) +#define E12C_RT_LCFG_cln(__reg) E8C_RT_LCFG_cln(__reg) +#define E12C_RT_LCFG_reg(__reg) E8C_RT_LCFG_reg(__reg) + +#define E2K_RT_LCFG_vp ES2_RT_LCFG_vp +#define E2K_RT_LCFG_vb ES2_RT_LCFG_vb +#define E2K_RT_LCFG_vics ES2_RT_LCFG_vics +#define E2K_RT_LCFG_vio ES2_RT_LCFG_vio +#if defined(CONFIG_ES2) || defined(CONFIG_E2S) +#define E2K_RT_LCFG_pln ES2_RT_LCFG_pln +#define E2K_RT_LCFG_cln ES2_RT_LCFG_cln +#elif defined(CONFIG_E8C) || defined(CONFIG_E8C2) +#define E2K_RT_LCFG_pln E8C_RT_LCFG_pln +#define E2K_RT_LCFG_cln E8C_RT_LCFG_cln +#elif defined(CONFIG_E12C) || defined(CONFIG_E16C) || defined(CONFIG_E2C3) +#define E2K_RT_LCFG_pln E12C_RT_LCFG_pln +#define E2K_RT_LCFG_cln E12C_RT_LCFG_cln +#endif /* CONFIG_ES2 || CONFIG_E2S */ +#define E2K_RT_LCFG_reg ES2_RT_LCFG_reg + +/* + * Read/Write RT_PCIIOj Regs + */ +typedef unsigned int e2k_rt_pciio_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_pciio_fields { + e2k_rt_pciio_t unused1 : 12; /* [11:0] */ + e2k_rt_pciio_t bgn : 4; /* [15:12] */ + e2k_rt_pciio_t unused2 : 12; /* [27:16] */ + e2k_rt_pciio_t end : 4; /* [31:28] */ +} e2k_rt_pciio_fields_t; +typedef union e2k_rt_pciio_struct { /* Structure of lower word */ + e2k_rt_pciio_fields_t fields; /* as fields */ + e2k_rt_pciio_t word; /* as entire register */ +} e2k_rt_pciio_struct_t; + +#define E2K_SIC_ALIGN_RT_PCIIO 12 /* 4 Kb */ +#define E2K_SIC_SIZE_RT_PCIIO (1 << E2K_SIC_ALIGN_RT_PCIIO) +#define E2K_RT_PCIIO_bgn fields.bgn +#define E2K_RT_PCIIO_end fields.end +#define E2K_RT_PCIIO_reg word + +/* + * Read/Write RT_PCIMj Regs + */ +typedef unsigned int e2k_rt_pcim_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_pcim_fields { + e2k_rt_pcim_t unused1 : 11; /* [10:0] */ + e2k_rt_pcim_t bgn : 5; /* [15:11] */ + e2k_rt_pcim_t unused2 : 11; /* [26:16] */ + e2k_rt_pcim_t end : 5; /* [31:27] */ +} e2k_rt_pcim_fields_t; +typedef union e2k_rt_pcim_struct { /* Structure of lower word */ + e2k_rt_pcim_fields_t fields; /* as fields */ + e2k_rt_pcim_t word; /* as entire register */ +} e2k_rt_pcim_struct_t; + +#define E2K_SIC_ALIGN_RT_PCIM 27 /* 128 Mb */ +#define E2K_SIC_SIZE_RT_PCIM (1 << E2K_SIC_ALIGN_RT_PCIM) +#define E2K_RT_PCIM_bgn fields.bgn +#define E2K_RT_PCIM_end fields.end +#define E2K_RT_PCIM_reg word + +/* + * Read/Write RT_PCIMPj Regs + */ +typedef unsigned int e2k_rt_pcimp_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_pcimp_struct { + e2k_rt_pcimp_t addr; /* [PA_MSB: 0] */ +} e2k_rt_pcimp_struct_t; + +#define E2K_SIC_ALIGN_RT_PCIMP 27 /* 128 Mb */ +#define E2K_SIC_SIZE_RT_PCIMP (1 << E2K_SIC_ALIGN_RT_PCIMP) +#define E2K_RT_PCIMP_bgn addr +#define E2K_RT_PCIMP_end addr +#define E2K_RT_PCIMP_reg addr + +/* + * Read/Write RT_PCICFGB Reg + */ +typedef unsigned int e2k_rt_pcicfgb_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_pcicfgb_fields { + e2k_rt_pcicfgb_t unused1 : 3; /* [2:0] */ + e2k_rt_pcicfgb_t bgn : 18; /* [20:3] */ + e2k_rt_pcicfgb_t unused2 : 11; /* [31:21] */ +} e2k_rt_pcicfgb_fields_t; +typedef union e2k_rt_pcicfgb_struct { /* Structure of lower word */ + e2k_rt_pcicfgb_fields_t fields; /* as fields */ + e2k_rt_pcicfgb_t word; /* as entire register */ +} e2k_rt_pcicfgb_struct_t; + +#define E2K_SIC_ALIGN_RT_PCICFGB 28 /* 256 Mb */ +#define E2K_SIC_SIZE_RT_PCICFGB (1 << E2K_SIC_ALIGN_RT_PCICFGB) +#define E2K_RT_PCICFGB_bgn fields.bgn +#define E2K_RT_PCICFGB_reg word + +/* + * Read/Write RT_MLOj Regs + */ +typedef unsigned int e2k_rt_mlo_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_mlo_fields { + e2k_rt_mlo_t unused1 : 11; /* [10:0] */ + e2k_rt_mlo_t bgn : 5; /* [15:11] */ + e2k_rt_mlo_t unused2 : 11; /* [26:16] */ + e2k_rt_mlo_t end : 5; /* [31:27] */ +} e2k_rt_mlo_fields_t; +typedef union e2k_rt_mlo_struct { /* Structure of lower word */ + e2k_rt_mlo_fields_t fields; /* as fields */ + e2k_rt_mlo_t word; /* as entire register */ +} e2k_rt_mlo_struct_t; + +#define E2K_SIC_ALIGN_RT_MLO 27 /* 128 Mb */ +#define E2K_SIC_SIZE_RT_MLO (1 << E2K_SIC_ALIGN_RT_MLO) +#define E2K_RT_MLO_bgn fields.bgn +#define E2K_RT_MLO_end fields.end +#define E2K_RT_MLO_reg word + +/* memory *bank minimum size, so base address of bank align */ +#define E2K_SIC_MIN_MEMORY_BANK (256 * 1024 * 1024) /* 256 Mb */ + +/* + * Read/Write RT_MHIj Regs + */ +typedef unsigned int e2k_rt_mhi_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_mhi_fields { + e2k_rt_mhi_t bgn : 16; /* [15: 0] */ + e2k_rt_mhi_t end : 16; /* [31:16] */ +} e2k_rt_mhi_fields_t; +typedef union e2k_rt_mhi_struct { /* Structure of lower word */ + e2k_rt_mhi_fields_t fields; /* as fields */ + e2k_rt_mhi_t word; /* as entire register */ +} e2k_rt_mhi_struct_t; + +#define E2K_SIC_ALIGN_RT_MHI 32 /* 4 Gb */ +#define E2K_SIC_SIZE_RT_MHI (1UL << E2K_SIC_ALIGN_RT_MHI) +#define E2K_RT_MHI_bgn fields.bgn +#define E2K_RT_MHI_end fields.end +#define E2K_RT_MHI_reg word + +/* + * Read/Write RT_IOAPICj Regs + */ +typedef unsigned int e2k_rt_ioapic_t; /* Read/write pointer (32 bits) */ +typedef struct e2k_rt_ioapic_fields { + e2k_rt_ioapic_t unused1 : 12; /* [11:0] */ + e2k_rt_ioapic_t bgn : 9; /* [20:12] */ + e2k_rt_ioapic_t unused2 : 11; /* [31:21] */ +} e2k_rt_ioapic_fields_t; +typedef union e2k_rt_ioapic_struct { /* Structure of lower word */ + e2k_rt_ioapic_fields_t fields; /* as fields */ + e2k_rt_ioapic_t word; /* as entire register */ +} e2k_rt_ioapic_struct_t; + +#define E2K_SIC_ALIGN_RT_IOAPIC 12 /* 4 Kb */ +#define E2K_SIC_SIZE_RT_IOAPIC (1 << E2K_SIC_ALIGN_RT_IOAPIC) +#define E2K_SIC_IOAPIC_SIZE E2K_SIC_SIZE_RT_IOAPIC +#define E2K_SIC_IOAPIC_FIX_ADDR_SHIFT 21 +#define E2K_SIC_IOAPIC_FIX_ADDR_MASK \ + ~((1UL << E2K_SIC_IOAPIC_FIX_ADDR_SHIFT) - 1) +#define E2K_RT_IOAPIC_bgn fields.bgn +#define E2K_RT_IOAPIC_reg word + +/* + * Read/Write RT_MSI Regs + */ +#define E2K_SIC_ALIGN_RT_MSI 20 /* 1 Mb */ + +typedef unsigned int e2k_rt_msi_t; /* Read/write pointer (low 32 bits) */ +typedef unsigned int e2k_rt_msi_h_t; /* Read/write pointer (high 32 bits) */ +typedef struct e2k_rt_msi_fields { + e2k_rt_msi_t unused : E2K_SIC_ALIGN_RT_MSI; /* [19:0] */ + e2k_rt_msi_t bgn : (32 - E2K_SIC_ALIGN_RT_MSI); /* [31:20] */ +} e2k_rt_msi_fields_t; +typedef struct e2k_rt_msi_h_fields { + e2k_rt_msi_h_t bgn : 32; /* [63:32] */ +} e2k_rt_msi_h_fields_t; +typedef union e2k_rt_msi_struct { /* Structure of lower word */ + e2k_rt_msi_fields_t fields; /* as fields */ + e2k_rt_msi_t word; /* as entire register */ +} e2k_rt_msi_struct_t; +typedef union e2k_rt_msi_h_struct { /* Structure of higher word */ + e2k_rt_msi_h_fields_t fields; /* as fields */ + e2k_rt_msi_h_t word; /* as entire register */ +} e2k_rt_msi_h_struct_t; + +#define E2K_SIC_SIZE_RT_MSI (1 << E2K_SIC_ALIGN_RT_MSI) +#define E2K_RT_MSI_bgn fields.bgn +#define E2K_RT_MSI_end E2K_RT_MSI_bgn +#define E2K_RT_MSI_reg word +#define E2K_RT_MSI_H_bgn fields.bgn +#define E2K_RT_MSI_H_end E2K_RT_MSI_H_bgn +#define E2K_RT_MSI_H_reg word +#define E2K_RT_MSI_DEFAULT_BASE 0x120000000UL + +/* + * Read/Write ST_P Regs + */ +typedef unsigned int e2k_st_p_t; /* Read/write pointer (32 bits) */ +typedef struct es2_st_p_fields { + e2k_st_p_t type : 4; /* [3:0] */ + e2k_st_p_t id : 8; /* [11:4] */ + e2k_st_p_t pn : 8; /* [19:12] */ + e2k_st_p_t coh_on : 1; /* [20] */ + e2k_st_p_t pl_val : 3; /* [23:21] */ + e2k_st_p_t mlc : 1; /* [24] */ + e2k_st_p_t unused : 7; /* [31:25] */ +} es2_st_p_fields_t; +typedef es2_st_p_fields_t e2s_st_p_fields_t; +typedef es2_st_p_fields_t e8c_st_p_fields_t; +typedef union e2k_st_p_struct { /* Structure of lower word */ + es2_st_p_fields_t es2_fields; /* as fields for es2 */ + e2k_st_p_t word; /* as entire register */ +} e2k_st_p_struct_t; + +#define ES2_ST_P_type es2_fields.type +#define ES2_ST_P_id es2_fields.id +#define ES2_ST_P_coh_on es2_fields.coh_on +#define ES2_ST_P_pl_val es2_fields.pl_val +#define ES2_ST_P_mlc es2_fields.mlc +#define ES2_ST_P_pn es2_fields.pn +#define ES2_ST_P_reg word + +#define E2S_ST_P_type ES2_ST_P_type +#define E2S_ST_P_id ES2_ST_P_id +#define E2S_ST_P_coh_on ES2_ST_P_coh_on +#define E2S_ST_P_pl_val ES2_ST_P_pl_val +#define E2S_ST_P_mlc ES2_ST_P_mlc +#define E2S_ST_P_pn ES2_ST_P_pn +#define E2S_ST_P_reg ES2_ST_P_reg + +#define E8C_ST_P_type ES2_ST_P_type +#define E8C_ST_P_id ES2_ST_P_id +#define E8C_ST_P_coh_on ES2_ST_P_coh_on +#define E8C_ST_P_pl_val ES2_ST_P_pl_val +#define E8C_ST_P_mlc ES2_ST_P_mlc +#define E8C_ST_P_pn ES2_ST_P_pn +#define E8C_ST_P_reg ES2_ST_P_reg + +#define E2K_ST_P_type ES2_ST_P_type +#define E2K_ST_P_reg ES2_ST_P_reg + +#define E2K_ST_P_pl_val ES2_ST_P_pl_val +#define E2K_ST_P_mlc ES2_ST_P_mlc +#define E2K_ST_P_pn ES2_ST_P_pn + +/* + * ST_CORE core state register + */ +struct e2k_st_core_fields { + u32 val : 1; /* [0] */ + u32 wait_init : 1; /* [1] */ + u32 wait_trap : 1; /* [2] */ + u32 stop_dbg : 1; /* [3] */ + u32 clk_off : 1; /* [4] */ + u32 unused : 27; /* [31:5] */ +}; +typedef union { + struct { + u32 : 5; + u32 pmc_rst : 1; /* [5] */ + u32 : 26; + } e1cp; + struct { + u32 val : 1; /* [0] */ + u32 wait_init : 1; /* [1] */ + u32 wait_trap : 1; /* [2] */ + u32 stop_dbg : 1; /* [3] */ + u32 clk_off : 1; /* [4] */ + u32 unused : 27; /* [31:5] */ + }; + struct e2k_st_core_fields fields; /* as fields for e2k */ + u32 word; /* as entire register */ +} e2k_st_core_t; + +#define E2K_ST_CORE_val(__reg) ((__reg).fields.val) +#define E2K_ST_CORE_wait_init(__reg) ((__reg).fields.wait_init) +#define E2K_ST_CORE_wait_trap(__reg) ((__reg).fields.wait_trap) +#define E2K_ST_CORE_stop_dbg(__reg) ((__reg).fields.stop_dbg) +#define E2K_ST_CORE_clk_off(__reg) ((__reg).fields.clk_off) +#define E2K_ST_CORE_reg(__reg) ((__reg).word) + +#define ES2_ST_CORE_val E2K_ST_CORE_val +#define ES2_ST_CORE_wait_init E2K_ST_CORE_wait_init +#define ES2_ST_CORE_wait_trap E2K_ST_CORE_wait_trap +#define ES2_ST_CORE_stop_dbg E2K_ST_CORE_stop_dbg +#define ES2_ST_CORE_clk_off E2K_ST_CORE_clk_off +#define ES2_ST_CORE_reg E2K_ST_CORE_reg + +#define E2S_ST_CORE_val E2K_ST_CORE_val +#define E2S_ST_CORE_wait_init E2K_ST_CORE_wait_init +#define E2S_ST_CORE_wait_trap E2K_ST_CORE_wait_trap +#define E2S_ST_CORE_stop_dbg E2K_ST_CORE_stop_dbg +#define E2S_ST_CORE_clk_off E2K_ST_CORE_clk_off +#define E2S_ST_CORE_reg E2K_ST_CORE_reg + +#define E8C_ST_CORE_val E2K_ST_CORE_val +#define E8C_ST_CORE_wait_init E2K_ST_CORE_wait_init +#define E8C_ST_CORE_wait_trap E2K_ST_CORE_wait_trap +#define E8C_ST_CORE_stop_dbg E2K_ST_CORE_stop_dbg +#define E8C_ST_CORE_clk_off E2K_ST_CORE_clk_off +#define E8C_ST_CORE_reg E2K_ST_CORE_reg + +#define E1CP_ST_CORE_val E2K_ST_CORE_val +#define E1CP_ST_CORE_wait_init E2K_ST_CORE_wait_init +#define E1CP_ST_CORE_wait_trap E2K_ST_CORE_wait_trap +#define E1CP_ST_CORE_stop_dbg E2K_ST_CORE_stop_dbg +#define E1CP_ST_CORE_clk_off E2K_ST_CORE_clk_off +#define E1CP_ST_CORE_pmc_rst(__reg) ((__reg).fields.pmc_rst) +#define E1CP_ST_CORE_reg E2K_ST_CORE_reg + +/* + * IO Link control state register + */ +typedef unsigned int e2k_iol_csr_t; /* single word (32 bits) */ +typedef struct e2k_iol_csr_fields { + e2k_iol_csr_t mode : 1; /* [0] */ + e2k_iol_csr_t abtype : 7; /* [7:1] */ + e2k_iol_csr_t unused : 24; /* [31:8] */ +} e2k_iol_csr_fields_t; +typedef union e2k_iol_csr_struct { /* Structure of word */ + e2k_iol_csr_fields_t fields; /* as fields */ + e2k_iol_csr_t word; /* as entire register */ +} e2k_iol_csr_struct_t; + +#define E2K_IOL_CSR_mode fields.mode /* type of controller */ + /* on the link */ +#define E2K_IOL_CSR_abtype fields.abtype /* type of abonent */ + /* on the link */ +#define E2K_IOL_CSR_reg word +#define IOHUB_IOL_MODE 1 /* controller is IO HUB */ +#define RDMA_IOL_MODE 0 /* controller is RDMA */ +#define IOHUB_ONLY_IOL_ABTYPE 1 /* abonent has only IO HUB */ + /* controller */ +#define RDMA_ONLY_IOL_ABTYPE 2 /* abonent has only RDMA */ + /* controller */ +#define RDMA_IOHUB_IOL_ABTYPE 3 /* abonent has RDMA and */ + /* IO HUB controller */ + +/* + * IO channel control/status register + */ +typedef unsigned int e2k_io_csr_t; /* single word (32 bits) */ +typedef struct e2k_io_csr_fields { + e2k_io_csr_t srst : 1; /* [0] */ + e2k_io_csr_t unused1 : 3; /* [3:1] */ + e2k_io_csr_t bsy_ie : 1; /* [4] */ + e2k_io_csr_t err_ie : 1; /* [5] */ + e2k_io_csr_t to_ie : 1; /* [6] */ + e2k_io_csr_t lsc_ie : 1; /* [7] */ + e2k_io_csr_t unused2 : 4; /* [11:8] */ + e2k_io_csr_t bsy_ev : 1; /* [12] */ + e2k_io_csr_t err_ev : 1; /* [13] */ + e2k_io_csr_t to_ev : 1; /* [14] */ + e2k_io_csr_t lsc_ev : 1; /* [15] */ + e2k_io_csr_t unused3 : 14; /* [29:16] */ + e2k_io_csr_t link_tu : 1; /* [30] */ + e2k_io_csr_t ch_on : 1; /* [31] */ +} e2k_io_csr_fields_t; +typedef union e2k_io_csr_struct { /* Structure of word */ + e2k_io_csr_fields_t fields; /* as fields */ + e2k_io_csr_t word; /* as entire register */ +} e2k_io_csr_struct_t; + +#define E2K_IO_CSR_srst fields.srst /* sofrware reset flag */ +#define E2K_IO_CSR_bsy_ie fields.bsy_ie /* flag of interrupt enable */ + /* on receiver busy */ +#define E2K_IO_CSR_err_ie fields.err_ie /* flag of interrupt enable */ + /* on CRC-error */ +#define E2K_IO_CSR_to_ie fields.to_ie /* flag of interrupt enable */ + /* on timeout */ +#define E2K_IO_CSR_lsc_ie fields.lsc_ie /* flag of interrupt enable */ + /* on link state changed */ +#define E2K_IO_CSR_bsy_ev fields.bsy_ev /* flag of interrupt */ + /* on receiver busy */ +#define E2K_IO_CSR_err_ev fields.err_ev /* flag of interrupt */ + /* on CRC-error */ +#define E2K_IO_CSR_to_ev fields.to_ev /* flag of interrupt */ + /* on timeout */ +#define E2K_IO_CSR_lsc_ev fields.lsc_ev /* flag of interrupt */ + /* on link state changed */ +#define E2K_IO_CSR_link_tu fields.link_tu /* flag of trening */ + /* in progress */ +#define E2K_IO_CSR_ch_on fields.ch_on /* flag of chanel */ + /* is ready and online */ +#define E2K_IO_CSR_reg word +#define IO_IS_ON_IO_CSR 1 /* IO controller is ready */ + /* and online */ +/* + * IO channel statistic register + */ +typedef unsigned int e2k_io_str_t; /* single word (32 bits) */ +typedef struct e2k_io_str_fields { + e2k_io_str_t rc : 24; /* [23:0] */ + e2k_io_str_t rcol : 1; /* [24] */ + e2k_io_str_t reserved : 4; /* [28:25] */ + e2k_io_str_t bsy_rce : 1; /* [29] */ + e2k_io_str_t err_rce : 1; /* [30] */ + e2k_io_str_t to_rce : 1; /* [31] */ +} e2k_io_str_fields_t; +typedef union e2k_io_str_struct { /* Structure of word */ + e2k_io_str_fields_t fields; /* as fields */ + e2k_io_str_t word; /* as entire register */ +} e2k_io_str_struct_t; + +#define E2K_IO_STR_rc fields.rc /* repeat counter */ +#define E2K_IO_STR_rcol fields.rcol /* repeat counter overload */ +#define E2K_IO_STR_bsy_rce fields.bsy_rce /* busy repeat count enable */ +#define E2K_IO_STR_err_rce fields.err_rce /* CRC-error repeat count */ + /* enable */ +#define E2K_IO_STR_to_rce fields.to_rce /* TO repeat count enable */ +#define E2K_IO_STR_reg word + +/* + * RDMA controller state register + */ +typedef unsigned int e2k_rdma_cs_t; /* single word (32 bits) */ +typedef struct e2k_rdma_cs_fields { + e2k_rdma_cs_t ptocl : 16; /* [15:0] */ + e2k_rdma_cs_t unused1 : 10; /* [25:16] */ + e2k_rdma_cs_t srst : 1; /* [26] */ + e2k_rdma_cs_t mor : 1; /* [27] */ + e2k_rdma_cs_t mow : 1; /* [28] */ + e2k_rdma_cs_t fch_on : 1; /* [29] */ + e2k_rdma_cs_t link_tu : 1; /* [30] */ + e2k_rdma_cs_t ch_on : 1; /* [31] */ +} e2k_rdma_cs_fields_t; +typedef union e2k_rdma_cs_struct { /* Structure of word */ + e2k_rdma_cs_fields_t fields; /* as fields */ + e2k_rdma_cs_t word; /* as entire register */ +} e2k_rdma_cs_struct_t; + +#define E2K_RDMA_CS_ptocl fields.ptocl /* timeout clock */ +#define E2K_RDMA_CS_srst fields.srst /* sofrware reset flag */ +#define E2K_RDMA_CS_mor fields.mor /* flag of not completed */ + /* readings */ +#define E2K_RDMA_CS_mow fields.mow /* flag of not completed */ + /* writings */ +#define E2K_RDMA_CS_fch_on fields.fch_on /* flag of chanel */ + /* forced set on */ +#define E2K_RDMA_CS_link_tu fields.link_tu /* flag of trenning */ + /* in progress */ +#define E2K_RDMA_CS_ch_on fields.ch_on /* flag of chanel */ + /* is ready and online */ +#define E2K_RDMA_CS_reg word + +/* + * Read/Write PWR_MGR0 register + */ +struct e2k_pwr_mgr_fields { + u32 core0_clk : 1; /* [0] */ + u32 core1_clk : 1; /* [1] */ + u32 ic_clk : 1; /* [2] */ + u32 unused1 : 13; /* [15:3] */ + u32 snoop_wait : 2; /* [17:16] */ + u32 unused2 : 14; /* [31:18] */ +}; +typedef union { + struct { + u32 core0_clk : 1; /* [0] */ + u32 core1_clk : 1; /* [1] */ + u32 ic_clk : 1; /* [2] */ + u32 unused1 : 13; /* [15:3] */ + u32 snoop_wait : 2; /* [17:16] */ + u32 unused2 : 14; /* [31:18] */ + }; + struct e2k_pwr_mgr_fields fields; /* as fields */ + u32 word; /* as entire register */ +} e2k_pwr_mgr_t; + +#define E2K_PWR_MGR0_core0_clk fields.core0_clk /* core #0 clock on/off */ +#define E2K_PWR_MGR0_core1_clk fields.core1_clk /* core #1 clock on/off */ +#define E2K_PWR_MGR0_ic_clk fields.ic_clk /* dsp clock on/off */ +#define E2K_PWR_MGR0_snoop_wait fields.snoop_wait /* delay before off */ + /* for snoop-requests */ + /* handling */ +#define E2K_PWR_MGR0_reg word + +/* + * Monitor control register (SIC_MCR) + */ +typedef unsigned int e2k_sic_mcr_t; /* single word (32 bits) */ +typedef struct e2k_sic_mcr_fields { + e2k_sic_mcr_t v0 : 1; /* [0] */ + e2k_sic_mcr_t unused1 : 1; /* [1] */ + e2k_sic_mcr_t es0 : 6; /* [7:2] */ + e2k_sic_mcr_t v1 : 1; /* [8] */ + e2k_sic_mcr_t unused2 : 1; /* [9] */ + e2k_sic_mcr_t es1 : 6; /* [15:10] */ + e2k_sic_mcr_t mcn : 5; /* [20:16] */ + e2k_sic_mcr_t mcnmo : 1; /* [21:21] */ + e2k_sic_mcr_t unused3 : 10; /* [31:22] */ +} e2k_sic_mcr_fields_t; +typedef union e2k_sic_mcr_struct { /* Structure of word */ + e2k_sic_mcr_fields_t fields; /* as fields */ + e2k_sic_mcr_t word; /* as entire register */ +} e2k_sic_mcr_struct_t; + +#define E2K_SIC_MCR_v0 fields.v0 /* monitor #0 valid */ +#define E2K_SIC_MCR_es0 fields.es0 /* monitor #0 event */ + /* specifier */ +#define E2K_SIC_MCR_v1 fields.v1 /* monitor #1 valid */ +#define E2K_SIC_MCR_es1 fields.es1 /* monitor #1 event */ + /* specifier */ +#define E2K_SIC_MCR_reg word + +/* + * Monitor accumulator register hi part (SIC_MAR0_hi, SIC_MAR1_hi) + */ +typedef unsigned int e2k_sic_mar_hi_t; /* single word (32 bits) */ +typedef struct e2k_sic_mar_hi_fields { + e2k_sic_mar_hi_t val : 31; /* [30:0] */ + e2k_sic_mar_hi_t of : 1; /* [31] */ +} e2k_sic_mar_hi_fields_t; +typedef union e2k_sic_mar_hi_struct { /* Structure of word */ + e2k_sic_mar_hi_fields_t fields; /* as fields */ + e2k_sic_mar_hi_t word; /* as entire register */ +} e2k_sic_mar_hi_struct_t; + +#define E2K_SIC_MAR_HI_val fields.val /* high part of events */ + /* counter */ +#define E2K_SIC_MAR_HI_of fields.of /* overflow flag */ +#define E2K_SIC_MAR_HI_reg word + +/* + * Monitor accumulator register lo part (SIC_MAR0_lo, SIC_MAR1_lo) + */ +typedef unsigned int e2k_sic_mar_lo_t; /* single word (32 bits) */ + +#define E2K_SIC_MAR_LO_val fields.val /* low part of events */ + /* counter */ +#define E2K_SIC_MAR_LO_reg word + +/* + * Read/Write MCX_ECC (X={0, 1, 2, 3}) registers + */ +typedef unsigned int e2k_mc_ecc_t; /* single word (32 bits) */ +typedef struct e2k_mc_ecc_fields { + e2k_mc_ecc_t ee : 1; /* [0] */ + e2k_mc_ecc_t dmode : 1; /* [1] */ + e2k_mc_ecc_t of : 1; /* [2] */ + e2k_mc_ecc_t ue : 1; /* [3] */ + e2k_mc_ecc_t reserved : 12; /* [15:4] */ + e2k_mc_ecc_t secnt : 16; /* [31:16] */ +} e2k_mc_ecc_fields_t; +typedef union e2k_mc_ecc_struct { /* Structure word */ + e2k_mc_ecc_fields_t fields; /* as fields */ + e2k_mc_ecc_t word; /* as entire register */ +} e2k_mc_ecc_struct_t; + +#define E2K_MC_ECC_ee fields.ee /* ECC mode on/off */ +#define E2K_MC_ECC_dmode fields.dmode /* diagnostic mode on/off */ +#define E2K_MC_ECC_of fields.of /* single error counter */ + /* overflow flag */ +#define E2K_MC_ECC_ue fields.ue /* multiple-error flag */ +#define E2K_MC_ECC_secnt fields.secnt /* single error counter */ +#define E2K_MC_ECC_reg word + +/* + * Read/Write IPCC_CSRX (X={1, 2, 3}) registers + */ +typedef unsigned int e2k_ipcc_csr_t; /* single word (32 bits) */ +typedef struct e2k_ipcc_csr_fields { + e2k_ipcc_csr_t link_scale : 4; /* [3:0] */ + e2k_ipcc_csr_t cmd_code : 3; /* [6:4] */ + e2k_ipcc_csr_t cmd_active : 1; /* [7] */ + e2k_ipcc_csr_t reserved : 1; /* [8] */ + e2k_ipcc_csr_t terr_vc_num : 3; /* [11:9] */ + e2k_ipcc_csr_t rx_oflw_uflw : 1; /* [12] */ + e2k_ipcc_csr_t event_imsk : 3; /* [15:13] */ + e2k_ipcc_csr_t ltssm_state : 5; /* [20:16] */ + e2k_ipcc_csr_t cmd_cmpl_sts : 3; /* [23:21] */ + e2k_ipcc_csr_t link_width : 4; /* [27:24] */ + e2k_ipcc_csr_t event_sts : 3; /* [30:28] */ + e2k_ipcc_csr_t link_state : 1; /* [31] */ +} e2k_ipcc_csr_fields_t; +typedef union e2k_ipcc_csr_struct { /* Structure word */ + e2k_ipcc_csr_fields_t fields; /* as fields */ + e2k_ipcc_csr_t word; /* as entire register */ +} e2k_ipcc_csr_struct_t; + +#define E2K_IPCC_CSR_link_scale fields.link_scale +#define E2K_IPCC_CSR_cmd_code fields.cmd_code +#define E2K_IPCC_CSR_cmd_active fields.cmd_active +#define E2K_IPCC_CSR_terr_vc_num fields.terr_vc_num +#define E2K_IPCC_CSR_rx_oflw_uflw fields.rx_oflw_uflw +#define E2K_IPCC_CSR_event_imsk fields.event_imsk +#define E2K_IPCC_CSR_ltssm_state fields.ltssm_state +#define E2K_IPCC_CSR_cmd_cmpl_sts fields.cmd_cmpl_sts +#define E2K_IPCC_CSR_link_width fields.link_width +#define E2K_IPCC_CSR_event_sts fields.event_sts +#define E2K_IPCC_CSR_link_state fields.link_state +#define E2K_IPCC_CSR_reg word + +/* + * Read/Write IPCC_STRX (X={1, 2, 3}) registers + */ +typedef unsigned int e2k_ipcc_str_t; /* single word (32 bits) */ +typedef struct e2k_ipcc_str_fields { + e2k_ipcc_str_t ecnt : 29; /* [28:0] */ + e2k_ipcc_str_t eco : 1; /* [29] */ + e2k_ipcc_str_t ecf : 2; /* [31:30] */ +} e2k_ipcc_str_fields_t; +typedef union e2k_ipcc_str_struct { /* Structure word */ + e2k_ipcc_str_fields_t fields; /* as fields */ + e2k_ipcc_str_t word; /* as entire register */ +} e2k_ipcc_str_struct_t; + +#define E2K_IPCC_STR_ecnt fields.ecnt /* event counter */ +#define E2K_IPCC_STR_eco fields.eco /* event counter overflow */ +#define E2K_IPCC_STR_ecf fields.ecf /* event counter filter */ +#define E2K_IPCC_STR_reg word + +/* + * Read/Write SIC_SCCFG register + */ +typedef unsigned int e2k_sic_sccfg_t; /* single word (32 bits) */ +typedef struct e2k_sic_sccfg_fields { + e2k_sic_sccfg_t diren : 1; /* [0] */ + e2k_sic_sccfg_t dircacheen : 1; /* [1] */ + e2k_sic_sccfg_t unused : 30; /* [31:2] */ +} e2k_sic_sccfg_fields_t; +typedef union e2k_sic_sccfg_struct { /* Structure word */ + e2k_sic_sccfg_fields_t fields; /* as fields */ + e2k_sic_sccfg_t word; /* as entire register */ +} e2k_sic_sccfg_struct_t; + +#define E2K_SIC_SCCFG_diren fields.diren /* directory enabled */ +#define E2K_SIC_SCCFG_dircacheen fields.dircacheen /* directory cache enabled */ +#define E2K_SIC_SCCFG_reg word + +/* + * Cache L3 registers structures + */ +/* Control register */ +typedef unsigned int l3_reg_t; /* Read/write register (32 bits) */ +typedef struct l3_ctrl_fields { + l3_reg_t fl : 1; /* [0] flush L3 */ + l3_reg_t cl : 1; /* [1] clear L3 */ + l3_reg_t rdque : 1; /* [2] read queues */ + l3_reg_t rnc_rrel : 1; /* [3] wait RREL for Rnc */ + l3_reg_t lru_separate : 1; /* [4] LRU separate */ + l3_reg_t pipe_ablk_s1 : 1; /* [5] pipe address block S1 */ + l3_reg_t pipe_ablk_s2 : 1; /* [6] pipe address block S2 */ + l3_reg_t sleep_blk : 1; /* [7] sleep block */ + l3_reg_t wbb_forced : 1; /* [8] WBB forced */ + l3_reg_t wbb_refill : 1; /* [9] WBB refill */ + l3_reg_t wbb_timeron : 1; /* [10] WBB release on timer */ + l3_reg_t wbb_timer : 7; /* [17:11] WBB timer set */ + l3_reg_t wbb_tfullon : 1; /* [18] WBB release on timer */ + /* at full state */ + l3_reg_t wbb_tfull : 7; /* [25:19] WBB timer at full */ + /* state set */ + l3_reg_t seal_gblk : 1; /* [26] sealed global block */ + l3_reg_t seal_lblk : 1; /* [27] sealed local block */ + l3_reg_t reserved : 4; /* [31:28] reserved bits */ +} l3_ctrl_fields_t; +typedef union l3_ctrl { /* Structure word */ + l3_ctrl_fields_t fields; /* as fields */ + l3_reg_t word; /* as entire register */ +} l3_ctrl_t; + +#define E2K_L3_CTRL_fl fields.fl +#define E2K_L3_CTRL_cl fields.cl +#define E2K_L3_CTRL_rdque fields.rdque +#define E2K_L3_CTRL_rnc_rrel fields.rnc_rrel +#define E2K_L3_CTRL_lru_separate fields.lru_separate +#define E2K_L3_CTRL_pipe_ablk_s1 fields.pipe_ablk_s1 +#define E2K_L3_CTRL_pipe_ablk_s2 fields.pipe_ablk_s2 +#define E2K_L3_CTRL_sleep_blk fields.sleep_blk +#define E2K_L3_CTRL_wbb_forced fields.wbb_forced +#define E2K_L3_CTRL_wbb_refill fields.wbb_refill +#define E2K_L3_CTRL_wbb_timeron fields.wbb_timeron +#define E2K_L3_CTRL_wbb_timer fields.wbb_timer +#define E2K_L3_CTRL_wbb_tfullon fields.wbb_tfullon +#define E2K_L3_CTRL_wbb_tfull fields.wbb_tfull +#define E2K_L3_CTRL_seal_gblk fields.seal_gblk +#define E2K_L3_CTRL_seal_lblk fields.seal_lblk +#define E2K_L3_CTRL_reg word + +/* + * Read/Write BC_MP_T_CORR register + */ +typedef unsigned int bc_mp_t_corr_t; /* single word (32 bits) */ +typedef struct bc_mp_t_corr_fields { + bc_mp_t_corr_t corr : 1; /* [0] */ + bc_mp_t_corr_t value : 1; /* [1] */ + bc_mp_t_corr_t unused : 10; /* [11:2] */ + bc_mp_t_corr_t addr : 20; /* [31:12] */ +} bc_mp_t_corr_fields_t; +typedef union bc_mp_t_corr_struct { /* Structure word */ + bc_mp_t_corr_fields_t fields; /* as fields */ + bc_mp_t_corr_t word; /* as entire register */ +} bc_mp_t_corr_struct_t; + +#define E2K_MP_T_CORR_corr fields.corr +#define E2K_MP_T_CORR_value fields.value +#define E2K_MP_T_CORR_addr fields.addr +#define E2K_MP_T_CORR_reg word + +/* + * Read/Write BC_MP_T_CORR_H + */ +typedef unsigned int bc_mp_t_corr_h_t; /* single word (32 bits) */ +typedef struct bc_mp_t_corr_h_fields { + bc_mp_t_corr_h_t addr; /* [31:0]*/ +} bc_mp_t_corr_h_fields_t; +typedef union bc_mp_t_corr_h_struct { /* Structure word */ + bc_mp_t_corr_h_fields_t fields; /* as fields */ + bc_mp_t_corr_h_t word; /* as entire register */ +} bc_mp_t_corr_h_struct_t; + +#define E2K_MP_T_CORR_H_addr fields.addr +#define E2K_MP_T_CORR_H_reg word + +/* + * Read/Write BC_MP_CTRL register + */ +typedef unsigned int bc_mp_ctrl_t; /* single word (32 bits) */ +typedef struct bc_mp_ctrl_fields { + bc_mp_ctrl_t unused0 : 12; /* [11:0] */ + bc_mp_ctrl_t mp_en : 1; /* [12] */ + bc_mp_ctrl_t b_en : 1; /* [13] */ + bc_mp_ctrl_t unused1 : 18; /* [31:14] */ +} bc_mp_ctrl_fields_t; +typedef union bc_mp_ctrl_struct { /* Structure word */ + bc_mp_ctrl_fields_t fields; /* as fields */ + bc_mp_ctrl_t word; /* as entire register */ +} bc_mp_ctrl_struct_t; + +#define E2K_MP_CTRL_mp_en fields.mp_en +#define E2K_MP_CTRL_b_en fields.b_en +#define E2K_MP_CTRL_reg word + +/* + * Read/Write BC_MP_STAT register + */ +typedef unsigned int bc_mp_stat_t; /* single word (32 bits) */ +typedef struct bc_mp_stat_fields { + bc_mp_stat_t unused0 : 12; /* [11:0] */ + bc_mp_stat_t b_ne : 1; /* [12] */ + bc_mp_stat_t b_of : 1; /* [13] */ + bc_mp_stat_t unused1 : 18; /* [31:14] */ +} bc_mp_stat_fields_t; +typedef union bc_mp_stat_struct { /* Structure word */ + bc_mp_stat_fields_t fields; /* as fields */ + bc_mp_stat_t word; /* as entire register */ +} bc_mp_stat_struct_t; + +#define E2K_MP_STAT_b_ne fields.b_ne +#define E2K_MP_STAT_b_of fields.b_of +#define E2K_MP_STAT_reg word + +#endif /* ! __ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* _E2K_SIC_REGS_H_ */ diff --git a/arch/e2k/include/asm/sic_regs_access.h b/arch/e2k/include/asm/sic_regs_access.h new file mode 100644 index 0000000..7d2ae6b --- /dev/null +++ b/arch/e2k/include/asm/sic_regs_access.h @@ -0,0 +1,92 @@ +#ifndef _E2K_SIC_REGS_ACCESS_H_ +#define _E2K_SIC_REGS_ACCESS_H_ + +#ifdef __KERNEL__ + +#include +#include + +#undef DEBUG_BOOT_NBSR_MODE +#undef DebugBNBSR +#define DEBUG_BOOT_NBSR_MODE 0 /* early NBSR access */ +#define DebugBNBSR(fmt, args...) \ + ({ if (DEBUG_BOOT_NBSR_MODE) \ + do_boot_printk(fmt, ##args); }) + +#define SIC_io_reg_offset(io_link, reg) ((reg) + 0x1000 * (io_link)) + +#ifndef CONFIG_BOOT_E2K +#define nbsr_early_read(addr) boot_readl((addr)) +#define nbsr_early_write(value, addr) boot_writel((value), (addr)) +#else /* CONFIG_BOOT_E2K */ +#define nbsr_early_read(addr) boot_native_readl((addr)) +#define nbsr_early_write(value, addr) boot_native_writel((value), (addr)) +#endif /* ! CONFIG_BOOT_E2K */ + +static inline unsigned int +boot_do_sic_read_node_nbsr_reg(unsigned char *node_nbsr, int reg_offset) +{ + unsigned char *addr; + unsigned int reg_value; + + addr = node_nbsr + reg_offset; + reg_value = nbsr_early_read(addr); + DebugBNBSR("boot_sic_read_node_nbsr_reg() the node reg 0x%x read 0x%x " + "from 0x%lx\n", + reg_offset, reg_value, addr); + return reg_value; +} + +static inline void +boot_do_sic_write_node_nbsr_reg(unsigned char *node_nbsr, int reg_offset, + unsigned int reg_val) +{ + unsigned char *addr; + + addr = node_nbsr + reg_offset; + nbsr_early_write(reg_val, addr); + DebugBNBSR("boot_sic_write_node_nbsr_reg() the node reg 0x%x write " + "0x%x to 0x%lx\n", + reg_offset, reg_val, addr); +} +static inline unsigned int +boot_sic_read_node_nbsr_reg(int node_id, int reg_offset) +{ + unsigned char *node_nbsr; + + node_nbsr = BOOT_THE_NODE_NBSR_PHYS_BASE(node_id); + return boot_do_sic_read_node_nbsr_reg(node_nbsr, reg_offset); +} +static inline void +boot_sic_write_node_nbsr_reg(int node_id, int reg_offset, unsigned int reg_val) +{ + unsigned char *node_nbsr; + + node_nbsr = BOOT_THE_NODE_NBSR_PHYS_BASE(node_id); + boot_do_sic_write_node_nbsr_reg(node_nbsr, reg_offset, reg_val); +} + +#define nbsr_read(addr) readl((addr)) +#define nbsr_readll(addr) readq((addr)) +#define nbsr_readw(addr) readw((addr)) +#define nbsr_write(value, addr) writel((value), (addr)) +#define nbsr_writell(value, addr) writeq((value), (addr)) +#define nbsr_writew(value, addr) writew((value), (addr)) +#define nbsr_write_relaxed(value, addr) writel_relaxed((value), (addr)) + +unsigned int sic_get_mc_ecc(int node, int num); +void sic_set_mc_ecc(int node, int num, unsigned int reg_value); + +unsigned int sic_get_ipcc_csr(int node, int num); +void sic_set_ipcc_csr(int node, int num, unsigned int val); + +unsigned int sic_get_ipcc_str(int node, int num); +void sic_set_ipcc_str(int node, int num, unsigned int val); + +unsigned int sic_get_io_str(int node, int num); +void sic_set_io_str(int node, int num, unsigned int val); +#endif /* __KERNEL__ */ + +#include + +#endif /* _E2K_SIC_REGS_ACCESS_H_ */ diff --git a/arch/e2k/include/asm/sigcontext.h b/arch/e2k/include/asm/sigcontext.h new file mode 100644 index 0000000..faf1647 --- /dev/null +++ b/arch/e2k/include/asm/sigcontext.h @@ -0,0 +1,22 @@ +#ifndef _E2K_SIGCONTEXT_H_ +#define _E2K_SIGCONTEXT_H_ + +#include + +#ifdef CONFIG_PROTECTED_MODE +struct sigcontext_prot { + unsigned long long cr0_lo; + unsigned long long cr0_hi; + unsigned long long cr1_lo; + unsigned long long cr1_hi; + unsigned long long sbr; /* 21 Stack base register: top of */ + /* local data (user) stack */ + unsigned long long usd_lo; /* 22 Local data (user) stack */ + unsigned long long usd_hi; /* 23 descriptor: base & size */ + unsigned long long psp_lo; /* 24 Procedure stack pointer: */ + unsigned long long psp_hi; /* 25 base & index & size */ + unsigned long long pcsp_lo; /* 26 Procedure chain stack */ + unsigned long long pcsp_hi; /* 27 pointer: base & index & size */ +}; +#endif +#endif /* _E2K_SIGCONTEXT_H_ */ diff --git a/arch/e2k/include/asm/signal.h b/arch/e2k/include/asm/signal.h new file mode 100644 index 0000000..af8a43f --- /dev/null +++ b/arch/e2k/include/asm/signal.h @@ -0,0 +1,211 @@ +#ifndef _E2K_SIGNAL_H_ +#define _E2K_SIGNAL_H_ + +#include + + +#undef DEBUG_SIG_MODE +#undef DebugSig +#define DEBUG_SIG_MODE 0 /* Signal handling */ +#if DEBUG_SIG_MODE +# define DebugSig printk +#else +# define DebugSig(...) +#endif + +#undef DEBUG_SLJ_MODE +#undef DebugSLJ +#define DEBUG_SLJ_MODE 0 /* Signal long jump handling */ +#define DebugSLJ(...) DebugPrint(DEBUG_SLJ_MODE ,##__VA_ARGS__) + + +#define __ARCH_HAS_SA_RESTORER + +/* + * exc_mem_lock_as can arrive at inside of a critical section since + * it uses non-maskable interrupts, + * + * But in PREEMPT_RT force_sig_info() must be called with + * enabled preemption because spinlocks are mutexes + * + * Fix this by delaying signal sending. + */ +#ifdef CONFIG_PREEMPT_RT +# define ARCH_RT_DELAYS_SIGNAL_SEND +#endif + + +#define _NSIG 64 +#define _NSIG_BPW 64 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +#define _NSIG_BPW32 32 +#define _NSIG_WORDS32 (_NSIG / _NSIG_BPW32) + +#include +#include + +#include +#include + +# ifndef __ASSEMBLY__ + +typedef struct { + e2k_ptr_t ss_sp; + int ss_flags; + size_t ss_size; +} stack_prot_t; + +/* Most things should be clean enough to redefine this at will, if care + is taken to make libc match. */ + +typedef unsigned long old_sigset_t; + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +typedef struct prot_sigaction_old { + e2k_pl_lo_t sa_handler; + u64 sa_flags; + e2k_pl_lo_t sa_restorer; + sigset_t sa_mask; +} prot_sigaction_old_t; + +typedef struct prot_sigaction { + e2k_pl_t sa_handler; + u64 sa_flags; + u64 _unused; + e2k_pl_t sa_restorer; + sigset_t sa_mask; +} prot_sigaction_t; + +#include + +struct pt_regs; +struct siginfo; +struct ucontext; +struct as_sa_handler_arg; + +#define ptrace_signal_deliver() do { } while (0) + + +#define DO_SDBGPRINT(message) \ +do { \ + e2k_tir_lo_t tir_lo; \ + void *cr_ip, *tir_ip; \ + \ + tir_lo.TIR_lo_reg = (regs)->trap->TIR_lo; \ + \ + tir_ip = (void *)tir_lo.TIR_lo_ip; \ + cr_ip = (void *)GET_IP_CR0_HI((regs)->crs.cr0_hi); \ + \ + if (tir_ip == cr_ip) \ + pr_info("%s: IP=%px %s(pid=%d)\n", \ + message, tir_ip, current->comm, \ + current->pid); \ + else \ + pr_info("%s: IP=%px(interrupt IP=%px) %s(pid=%d)\n", \ + message, tir_ip, cr_ip, current->comm, \ + current->pid); \ +} while (false) + +#define SDBGPRINT(message) \ +do { \ + if (debug_signal) \ + DO_SDBGPRINT(message); \ +} while (0) + +#define SDBGPRINT_WITH_STACK(message) \ +do { \ + if (debug_signal) { \ + DO_SDBGPRINT(message); \ + dump_stack(); \ + } \ +} while (0) + +struct signal_stack; +extern void free_signal_stack(struct signal_stack *signal_stack); +extern struct signal_stack_context __user *pop_signal_stack(void); +extern struct signal_stack_context __user *get_signal_stack(void); +extern int setup_signal_stack(struct pt_regs *regs); + +#define GET_SIG_RESTORE_STACK(ti, sbr, usd_lo, usd_hi) \ +do { \ + /* Reserve 64 bytes for kernel per C calling convention */ \ + u64 used_dstack_size = round_up(64, E2K_ALIGN_STACK); \ + \ + sbr = (u64)thread_info_task(ti)->stack + KERNEL_C_STACK_SIZE; \ + AW(usd_lo) = AW((ti)->k_usd_lo); \ + AW(usd_hi) = AW((ti)->k_usd_hi); \ + AS(usd_lo).base -= used_dstack_size; \ + AS(usd_hi).size -= used_dstack_size; \ +} while (false) + +/* The topmost dispatcher for any signals. */ +/* Implemented in arch/e2k/kernel/signal.c */ +extern void do_signal(struct pt_regs *); +extern int signal_rt_frame_setup(struct pt_regs *regs); +extern int prepare_sighandler_frame(struct e2k_stacks *stacks, + u64 pframe[32], e2k_mem_crs_t *crs); + +extern int native_signal_setup(struct pt_regs *regs); + +static inline int native_complete_long_jump(struct pt_regs *regs) +{ + /* nithing to do for native kernel & host */ + return 0; +} + +extern long do_sigreturn(void); +extern void sighandler_trampoline(void); +extern void sighandler_trampoline_continue(void); + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +extern long sys_tgkill_info(int pid, int tgid, struct siginfo __user *uinfo); + +#define set_delayed_signal_handling(ti) \ +do { \ + set_ti_status_flag(ti, TS_DELAYED_SIG_HANDLING); \ +} while (0) + +#define clear_delayed_signal_handling(ti) \ +do { \ + clear_ti_status_flag(ti, TS_DELAYED_SIG_HANDLING); \ +} while (0) + +#define test_delayed_signal_handling(p, ti) \ + (unlikely(test_ti_status_flag(ti, TS_DELAYED_SIG_HANDLING)) && \ + !__fatal_signal_pending(p)) +#else +#define set_delayed_signal_handling(ti) +#define clear_delayed_signal_handling(ti) +#define test_delayed_signal_handling(p, ti) (false) +#endif + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* It is native paravirtualized guest kernel */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* It is paravirtualized kernel (host and guest) */ +#include +#else /* !CONFIG_KVM_GUEST_KERNEL && !CONFIG_PARAVIRT_GUEST */ +/* native kernel with virtualization support */ +/* native kernel without virtualization support */ + +static inline int signal_setup(struct pt_regs *regs) +{ + return native_signal_setup(regs); +} + +static inline int complete_long_jump(struct pt_regs *regs) +{ + return native_complete_long_jump(regs); +} + +#endif /* CONFIG_KVM_GUEST_KERNEL */ + + +# endif /* __ASSEMBLY__ */ + +#endif /* _E2K_SIGNAL_H_ */ diff --git a/arch/e2k/include/asm/simul.h b/arch/e2k/include/asm/simul.h new file mode 100644 index 0000000..56082f5 --- /dev/null +++ b/arch/e2k/include/asm/simul.h @@ -0,0 +1,60 @@ +#ifndef _E2K_SIMUL_H_ +#define _E2K_SIMUL_H_ + +#include + + +#ifdef CONFIG_E2K_MACHINE +# ifdef CONFIG_E2K_SIMULATOR +# define E2K_HALT_OK() E2K_LMS_HALT_OK +# define E2K_HALT_ERROR(err_no) \ +({ \ + dump_stack(); \ + console_flush_on_panic(CONSOLE_REPLAY_ALL); \ + E2K_LMS_HALT_ERROR(err_no); \ +}) +# define BOOT_E2K_HALT_OK() E2K_LMS_HALT_OK +# define BOOT_E2K_HALT_ERROR(err_no) E2K_LMS_HALT_ERROR(err_no) +# else +# define E2K_HALT_OK() {while(1);} +# define E2K_HALT_ERROR(err_no) panic("HALT_ERROR(%d)\n", err_no) +# define BOOT_E2K_HALT_OK() {while(1);} +# define BOOT_E2K_HALT_ERROR(err_no) boot_panic("HALT_ERROR(%d)\n", err_no) +# endif +#else /* ! CONFIG_E2K_MACHINE */ +# define E2K_HALT_OK() \ +({ \ + if (NATIVE_IS_MACHINE_SIM) { \ + E2K_LMS_HALT_OK; \ + } \ + while (1) { \ + } \ +}) +# define E2K_HALT_ERROR(err_no) \ +({ \ + if (NATIVE_IS_MACHINE_SIM) { \ + dump_stack(); \ + console_flush_on_panic(CONSOLE_REPLAY_ALL); \ + E2K_LMS_HALT_ERROR(err_no); \ + } \ + panic("HALT_ERROR(%d)\n", err_no); \ +}) +# define BOOT_E2K_HALT_OK() \ +({ \ + if (BOOT_NATIVE_IS_MACHINE_SIM) { \ + E2K_LMS_HALT_OK; \ + } \ + while (1) { \ + } \ +}) +# define BOOT_E2K_HALT_ERROR(err_no) \ +({ \ + if (BOOT_NATIVE_IS_MACHINE_SIM) { \ + E2K_LMS_HALT_ERROR(err_no); \ + } else { \ + boot_panic("HALT_ERROR(%d)\n", err_no); \ + } \ +}) +#endif /* CONFIG_E2K_MACHINE */ + +#endif /* _E2K_SIMUL_H_ */ diff --git a/arch/e2k/include/asm/smp-boot.h b/arch/e2k/include/asm/smp-boot.h new file mode 100644 index 0000000..155b936 --- /dev/null +++ b/arch/e2k/include/asm/smp-boot.h @@ -0,0 +1,52 @@ +#ifndef __ASM_SMP_BOOT_H +#define __ASM_SMP_BOOT_H + +#include + +#include + +#ifndef ASSEMBLY + +#ifdef CONFIG_SMP + +extern struct task_struct *idle_tasks[NR_CPUS]; + +extern void e2k_start_secondary_switched_stacks(int cpuid, int cpu); +extern void native_setup_secondary_task(int cpu); + +static inline void +native_ap_switch_to_init_stack(e2k_addr_t stack_base, int cpuid, int cpu) +{ + NATIVE_SWITCH_TO_KERNEL_STACK( + stack_base + KERNEL_P_STACK_OFFSET, KERNEL_P_STACK_SIZE, + stack_base + KERNEL_PC_STACK_OFFSET, KERNEL_PC_STACK_SIZE, + stack_base + KERNEL_C_STACK_OFFSET, KERNEL_C_STACK_SIZE); + + E2K_JUMP_WITH_ARGUMENTS(e2k_start_secondary_switched_stacks, 2, + cpuid, cpu); +} + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or native kernel with virtualization support */ +static inline void +ap_switch_to_init_stack(e2k_addr_t stack_base, int cpuid, int cpu) +{ + native_ap_switch_to_init_stack(stack_base, cpuid, cpu); +} + +static inline void setup_secondary_task(int cpu) +{ + native_setup_secondary_task(cpu); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* CONFIG_SMP */ +#endif /* !ASSEMBLY */ +#endif /* __ASM_SMP_BOOT_H */ diff --git a/arch/e2k/include/asm/smp.h b/arch/e2k/include/asm/smp.h new file mode 100644 index 0000000..104b905 --- /dev/null +++ b/arch/e2k/include/asm/smp.h @@ -0,0 +1,240 @@ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +/* + * We need the APIC definitions automatically as part of 'smp.h' + */ +#ifndef ASSEMBLY +#include +#include +#include +#include +#endif + +#ifdef CONFIG_L_LOCAL_APIC +#ifndef ASSEMBLY +#include +#include +#include +#include +#include +#include +#include +#endif /* !ASSEMBLY */ +#endif /* CONFIG_L_LOCAL_APIC */ + +#ifdef CONFIG_SMP +#ifndef ASSEMBLY + +typedef struct tlb_page { + struct vm_area_struct *vma; + e2k_addr_t addr; +} tlb_page_t; + +typedef struct tlb_range { + struct mm_struct *mm; + e2k_addr_t start; + e2k_addr_t end; +} tlb_range_t; + +typedef struct icache_page { + struct vm_area_struct *vma; + struct page *page; +} icache_page_t; + +struct call_data_struct { + void (*func) (void *info); + void *info; + atomic_t started; + atomic_t finished; + int wait; +}; + +/* + * Private routines/data + */ + +extern atomic_t cpu_present_num; +extern unsigned long smp_invalidate_needed; +extern int pic_mode; +extern cpumask_t callin_go; + +extern void smp_alloc_memory(void); +extern void e2k_start_secondary(int cpuid); +extern void start_secondary_resume(int cpuid, int cpu); +extern void wait_for_startup(int cpuid, int hotplug); +extern void smp_flush_tlb(void); +extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); +extern void smp_send_reschedule(int cpu); +extern void smp_invalidate_rcv(void); /* Process an NMI */ +extern void (*mtrr_hook) (void); +extern void zap_low_mappings (void); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +extern void smp_send_refresh(void); + +#ifdef CONFIG_DATA_BREAKPOINT +typedef struct hw_data_bp { + void *address; + int size; + bool write; + bool read; + bool stop; + int cp_num; +} hw_data_bp_t; +extern atomic_t hw_data_breakpoint_num; +#define DATA_BREAKPOINT_ON (atomic_read(&hw_data_breakpoint_num) >= 0) + +extern void smp_set_data_breakpoint(void *address, u64 size, + bool write, bool read, bool stop, const int cp_num); +extern int smp_reset_data_breakpoint(void *address); +#else /* ! CONFIG_DATA_BREAKPOINT */ +#define DATA_BREAKPOINT_ON false +#endif /* CONFIG_DATA_BREAKPOINT */ + +extern void native_wait_for_cpu_booting(void); +extern void native_wait_for_cpu_wake_up(void); +extern int native_activate_cpu(int vcpu_id); +extern int native_activate_all_cpus(void); + +#ifdef CONFIG_RECOVERY +extern int cpu_recover(unsigned int cpu); +extern void smp_prepare_boot_cpu_to_recover(void); +extern void smp_prepare_cpus_to_recover(unsigned int max_cpus); +extern void smp_cpus_recovery_done(unsigned int max_cpus); +#endif /* CONFIG_RECOVERY */ + +/* + * General functions that each host system must provide. + */ + +/* + * This function is needed by all SMP systems. It must _always_ be valid + * from the initial startup. + */ +register unsigned long long __cpu_reg DO_ASM_GET_GREG_MEMONIC( + SMP_CPU_ID_GREG); +#define raw_smp_processor_id() ((unsigned int) __cpu_reg) + +#endif /* !ASSEMBLY */ + +#define NO_PROC_ID 0xFF /* No processor magic marker */ + +/* + * This magic constant controls our willingness to transfer + * a process across CPUs. Such a transfer incurs misses on the L1 + * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My + * gut feeling is this will vary by board in value. For a board + * with separate L2 cache it probably depends also on the RSS, and + * for a board with shared L2 cache it ought to decay fast as other + * processes are run. + */ + +#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */ + +#else /* ! CONFIG_SMP */ +static inline void e2k_start_secondary(int cpuid) { } + +#define native_wait_for_cpu_booting() +#define native_wait_for_cpu_wake_up() +#define native_activate_cpu(vcpu_id) 0 +#define native_activate_all_cpus(void) 0 + +#define DATA_BREAKPOINT_ON false + +#endif /* CONFIG_SMP */ + +#ifndef ASSEMBLY + +extern int hard_smp_processor_id(void); + +#endif /* ! ASSEMBLY */ + +#ifdef CONFIG_HOTPLUG_CPU +/* Upping and downing of CPUs */ +extern int __cpu_disable (void); +extern void __cpu_die (unsigned int cpu); +#endif /* CONFIG_HOTPLUG_CPU */ + +/* this description from include/linux/smp.h */ +/* do not forgot update here, if will be updated there */ +enum { + CSD_FLAG_LOCK = 0x01, + CSD_FLAG_SYNCHRONOUS = 0x02, + CSD_FLAG_LOCK_ASYNC = 0x10, +}; + +#if defined(CONFIG_VIRTUALIZATION) +#include + +extern void native_csd_lock_wait(call_single_data_t *csd); +extern void native_csd_lock(call_single_data_t *csd); +extern void native_arch_csd_lock_async(call_single_data_t *csd); +extern void native_csd_unlock(call_single_data_t *csd); +#endif /* CONFIG_VIRTUALIZATION */ + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or native kernel with virtualization support */ +static inline void +wait_for_cpu_booting(void) +{ + native_wait_for_cpu_booting(); +} +static inline void +wait_for_cpu_wake_up(void) +{ + native_wait_for_cpu_wake_up(); +} +static inline int +activate_cpu(int cpu_id) +{ + return native_activate_cpu(cpu_id); +} +static inline int +activate_all_cpus(void) +{ + return native_activate_all_cpus(); +} + +#if defined(CONFIG_VIRTUALIZATION) +static inline void csd_lock_wait(call_single_data_t *data) +{ + native_csd_lock_wait(data); +} +static inline void csd_lock(call_single_data_t *data) +{ + native_csd_lock(data); +} +static inline void arch_csd_lock_async(call_single_data_t *csd) +{ + native_arch_csd_lock_async(csd); +} +static inline void csd_unlock(call_single_data_t *data) +{ + native_csd_unlock(data); +} +#endif /* CONFIG_VIRTUALIZATION */ + +static inline void +setup_local_pic_virq(unsigned int cpu) +{ + /* native and host kernel does not use virtual IRQs */ + /* and its handlers */ +} +static inline void +startup_local_pic_virq(unsigned int cpuid) +{ + /* native and host kernel does not use virtual IRQs */ + /* and its handlers */ +} + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASM_SMP_H */ diff --git a/arch/e2k/include/asm/socket.h b/arch/e2k/include/asm/socket.h new file mode 100644 index 0000000..6b71384 --- /dev/null +++ b/arch/e2k/include/asm/socket.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/asm/sockios.h b/arch/e2k/include/asm/sockios.h new file mode 100644 index 0000000..def6d47 --- /dev/null +++ b/arch/e2k/include/asm/sockios.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/asm/sparsemem.h b/arch/e2k/include/asm/sparsemem.h new file mode 100644 index 0000000..83e05dc --- /dev/null +++ b/arch/e2k/include/asm/sparsemem.h @@ -0,0 +1,10 @@ +#ifndef _ASM_E2K_SPARSEMEM_H +#define _ASM_E2K_SPARSEMEM_H + +#ifdef CONFIG_SPARSEMEM + +# define SECTION_SIZE_BITS 28 +# define MAX_PHYSMEM_BITS 40 + +#endif /* CONFIG_SPARSEMEM */ +#endif /* _ASM_E2K_SPARSEMEM_H */ diff --git a/arch/e2k/include/asm/spinlock.h b/arch/e2k/include/asm/spinlock.h new file mode 100644 index 0000000..b9900f6 --- /dev/null +++ b/arch/e2k/include/asm/spinlock.h @@ -0,0 +1,22 @@ +#ifndef _ASM_E2K_SPINLOCK_H +#define _ASM_E2K_SPINLOCK_H + +/* How long a lock should spin before we consider blocking */ +#define SPIN_THRESHOLD (1 << 11) + +#include + +/* + * Read-write spinlocks, allowing multiple readers + * but only one writer. + * + * NOTE! it is quite common to have readers in interrupts + * but no interrupt writers. For those circumstances we + * can "mix" irq-safe locks - any writer needs to get a + * irq-safe write-lock, but readers can get non-irqsafe + * read-locks. + */ + +#include + +#endif /* _ASM_E2K_SPINLOCK_H */ diff --git a/arch/e2k/include/asm/spinlock_types.h b/arch/e2k/include/asm/spinlock_types.h new file mode 100644 index 0000000..e4100ac --- /dev/null +++ b/arch/e2k/include/asm/spinlock_types.h @@ -0,0 +1,10 @@ +#ifndef _ASM_E2K_SPINLOCK_TYPES_H +#define _ASM_E2K_SPINLOCK_TYPES_H + +#include + +#include + +#include + +#endif /* _ASM_E2K_SPINLOCK_TYPES_H */ diff --git a/arch/e2k/include/asm/stacks.h b/arch/e2k/include/asm/stacks.h new file mode 100644 index 0000000..6a5f49e --- /dev/null +++ b/arch/e2k/include/asm/stacks.h @@ -0,0 +1,190 @@ +/* + * include/asm-e2k/stack.h + * + * Copyright 2004 Salavat S. Guiliazov (atic@mcst.ru) + */ + +#ifndef _E2K_STACKS_H +#define _E2K_STACKS_H + +#include +#include /* virtualization support */ + +/* + * User's high address space is reserved for tag memory mapping. + * Tags of all user virtual pages are mapped to user virtual space + * To each quad-word of data (16 bytes) corresponds 1 byte of tag + * Virtual pages of tags live at the end of virtual user space + * + * 0x0000 0000 0000 1000 - 0x0000 0100 0000 0000 All user virtula space from + * 'text' start to 'TASK_SIZE' + * 0x0000 00f0 0000 0000 - 0x0000 00ff ffff ffff Tags memory virtual space + */ +#define USER_TAG_MEM_SIZE (TASK_SIZE / 16) /* 1/16 of */ + /* total user */ + /* memory */ + /* size */ +#define USER_TAG_MEM_BASE \ + (TASK_SIZE - USER_VPTB_BASE_SIZE - USER_TAG_MEM_SIZE) + +/* + * User's high address below tags memory space is reserved for CUT. + */ + +#define USER_CUT_AREA_SIZE (PAGE_SIZE) +#define USER_CUT_AREA_BASE (USER_TAG_MEM_BASE - USER_CUT_AREA_SIZE) + +#ifndef __ASSEMBLY__ +/* + * The structure define state of all e2k stacks: + * hardware pointers and registers + */ + +typedef struct e2k_stacks { +#ifdef CONFIG_KVM_HOST_MODE + /* gthread_info uses these fields */ + e2k_addr_t u_top; + e2k_usd_lo_t u_usd_lo; + e2k_usd_hi_t u_usd_hi; +#endif + e2k_addr_t top; /* top address (same as SBR pointer) */ + e2k_usd_lo_t usd_lo; /* curent state of stack pointer */ + e2k_usd_hi_t usd_hi; /* register: base & size */ + e2k_psp_lo_t psp_lo; /* Procedure stack pointer: */ + e2k_psp_hi_t psp_hi; /* base & index & size */ + e2k_pcsp_lo_t pcsp_lo; /* Procedure chain stack */ + e2k_pcsp_hi_t pcsp_hi; /* pointer: base & index & size */ + /* %px[c]sp.ind in this structure holds includes %px[c]shtp part, + * and saved %px[c]shtp values show how much of user stack has + * been SPILLed to kernel. This is done for convenience - add + * %px[c]shtp just once instead of pretty much always. */ + e2k_pshtp_t pshtp; + e2k_pcshtp_t pcshtp; +} e2k_stacks_t; + +typedef struct data_stack { + e2k_addr_t bottom; /* data stack bottom */ + e2k_size_t size; /* data stack size */ + e2k_addr_t top; /* Top of the stack (as SBR) */ +} data_stack_t; + +/* + * Hardware stacks desription: procedure and chain stacks + * Both stacks have resident part at current top of the stack to ensure + * kernel function execution while trap and system calls handling + */ +typedef struct hw_stack_area { + void *base; /* Hardware stack base pointer */ + e2k_size_t size; /* Hardware stack total size */ +} hw_stack_area_t; + +typedef struct hw_stack { + hw_stack_area_t ps; /* Current procedure stack area */ + hw_stack_area_t pcs; /* Current chain stack area */ +} hw_stack_t; + +typedef struct old_pcs_area { + void *base; /* Hardware stack base pointer */ + long size; /* Hardware stack total size */ + struct list_head list_entry; +} old_pcs_area_t; + +#define GET_PS_BASE(hw_stacks) ((hw_stacks)->ps.base) +#define GET_PCS_BASE(hw_stacks) ((hw_stacks)->pcs.base) + +#define CURRENT_PS_BASE() (current_thread_info()->u_hw_stack.ps.base) +#define CURRENT_PCS_BASE() (current_thread_info()->u_hw_stack.pcs.base) + +#define SET_PS_BASE(hw_stacks, val) (GET_PS_BASE(hw_stacks) = (val)) +#define SET_PCS_BASE(hw_stacks, val) (GET_PCS_BASE(hw_stacks) = (val)) + +#endif /* ! __ASSEMBLY__ */ + +/* + * Data and hardware user stacks descriptions. + */ +#define USER_P_STACKS_MAX_SIZE E2K_ALL_STACKS_MAX_SIZE /* 128 Gbytes */ +#define USER_PC_STACKS_MAX_SIZE USER_P_STACKS_MAX_SIZE + +#define _min_(a, b) ((a) < (b) ? (a) : (b)) +#define USER_P_STACKS_BASE (USER_CUT_AREA_BASE - USER_P_STACKS_MAX_SIZE) +#define USER_PC_STACKS_BASE USER_P_STACKS_BASE +#define USER_HW_STACKS_BASE _min_(USER_P_STACKS_BASE, USER_PC_STACKS_BASE) + +#define USER_P_STACK_INIT_SIZE (4 * PAGE_SIZE) +#define USER_PC_STACK_INIT_SIZE PAGE_SIZE + +/* + * Software user stack for 64-bit mode. + */ +#define USER64_C_STACK_BYTE_INCR (4 * PAGE_SIZE) /* 4 pages */ +#define USER64_STACK_TOP (USER_PC_STACKS_BASE) + +/* + * Software user stack for 32-bit mode. + */ +#define USER32_C_STACK_BYTE_INCR (4 * PAGE_SIZE) /* 4 pages */ +#define USER32_STACK_TOP (TASK32_SIZE) + +/* + * These macro definitions are to unify 32- and 64-bit user stack + * handling procedures. + */ +#define USER_C_STACK_BYTE_INCR (current->thread.flags & E2K_FLAG_32BIT ? \ + USER32_C_STACK_BYTE_INCR : USER64_C_STACK_BYTE_INCR) + +/* + * This macro definition is to limit deafault user stack size + * (see asm/resource.h) + */ +#define E2K_STK_LIM USER64_MAIN_C_STACK_SIZE + +/* + * Kernel stack ((software & hardware) descriptions + */ +#define K_DATA_GAP_SIZE E2K_ALIGN_STACK +#define KERNEL_C_STACK_SIZE (5 * PAGE_SIZE - K_DATA_GAP_SIZE) + +/* Maybe implement do_softirq_own_stack() and reduce this to 7 pages + * Having separate stack for hardware interrupts IRQ handling will allow to + * reduce this further - prbly to ~4 pages. */ +#define KERNEL_P_STACK_SIZE (9 * PAGE_SIZE) +#define NATIVE_KERNEL_P_STACK_PAGES (KERNEL_P_STACK_SIZE / PAGE_SIZE) + +#define KERNEL_PC_STACK_SIZE \ + (2 * PAGE_SIZE) /* 8 Kbytes (256 functions calls) */ +#define NATIVE_KERNEL_PC_STACK_PAGES (KERNEL_PC_STACK_SIZE / PAGE_SIZE) + +#define MAX_KERNEL_P_STACK_PAGES \ + (NATIVE_KERNEL_P_STACK_PAGES + VIRT_KERNEL_P_STACK_PAGES) +#define MAX_KERNEL_PC_STACK_PAGES \ + (NATIVE_KERNEL_PC_STACK_PAGES + VIRT_KERNEL_PC_STACK_PAGES) +#define MAX_KERNEL_HW_STACK_PAGES \ + (MAX_KERNEL_P_STACK_PAGES + MAX_KERNEL_PC_STACK_PAGES) + +/* + * 3 kernel stacks are allocated together and lie in memory + * in the following order: + * + * ------------------------------------------------------------------> higher + * K_DATA_GAP_SIZE | DATA | PROCEDURE | PAGE_SIZE | CHAIN | PAGE_SIZE + * + * Two unused pages are needed to properly handle hardware + * stack overflow: on overflow PSR.sge checking is disabled + * and stack will be spilled after its own boundary and then + * kernel_hw_stack_fatal_error() will print full stack. + * + * Arch-independent part expects data stack to be the first + * one (see end_of_stack()), that's also the reason to skip + * the first E2K_ALIGN_STACK bytes to keep the magic value + * intact. + */ +#define KERNEL_STACKS_SIZE (K_DATA_GAP_SIZE + KERNEL_C_STACK_SIZE + \ + KERNEL_P_STACK_SIZE + KERNEL_PC_STACK_SIZE + 2 * PAGE_SIZE) +#define KERNEL_C_STACK_OFFSET K_DATA_GAP_SIZE +#define KERNEL_P_STACK_OFFSET (KERNEL_C_STACK_OFFSET + KERNEL_C_STACK_SIZE) +#define KERNEL_PC_STACK_OFFSET (KERNEL_P_STACK_OFFSET + \ + KERNEL_P_STACK_SIZE + PAGE_SIZE) + +#endif /* _E2K_STACKS_H */ + diff --git a/arch/e2k/include/asm/stacktrace.h b/arch/e2k/include/asm/stacktrace.h new file mode 100644 index 0000000..7aa5cb5 --- /dev/null +++ b/arch/e2k/include/asm/stacktrace.h @@ -0,0 +1,4 @@ +#ifndef _ASM_E2K_STACKTRACE_H +#define _ASM_E2K_STACKTRACE_H + +#endif /* _ASM_E2K_STACKTRACE_H */ diff --git a/arch/e2k/include/asm/stat.h b/arch/e2k/include/asm/stat.h new file mode 100644 index 0000000..36e5dac --- /dev/null +++ b/arch/e2k/include/asm/stat.h @@ -0,0 +1,30 @@ +#ifndef _E2K_STAT_H_ +#define _E2K_STAT_H_ + +#include + + +/* + * "struct stat64" should be the same as glibc "struct stat64" + */ +struct stat64 { + unsigned long st_dev; + unsigned long st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + unsigned long st_rdev; + unsigned long st_size; + unsigned int st_blksize; + unsigned int __unused1; + unsigned long st_blocks; + int st_atime; + unsigned int st_atime_nsec; + int st_mtime; + unsigned int st_mtime_nsec; + int st_ctime; + unsigned int st_ctime_nsec; +}; + +#endif /* _E2K_STAT_H_ */ diff --git a/arch/e2k/include/asm/statfs.h b/arch/e2k/include/asm/statfs.h new file mode 100644 index 0000000..8f2a792 --- /dev/null +++ b/arch/e2k/include/asm/statfs.h @@ -0,0 +1,6 @@ +#ifndef _E2K_STATFS_H_ +#define _E2K_STATFS_H_ + +#include + +#endif /* _E2K_STATFS_H_ */ diff --git a/arch/e2k/include/asm/string.h b/arch/e2k/include/asm/string.h new file mode 100644 index 0000000..fdb12c2 --- /dev/null +++ b/arch/e2k/include/asm/string.h @@ -0,0 +1,538 @@ +#ifndef _E2K_STRING_H_ +#define _E2K_STRING_H_ + +#include + +#include + +#define __HAVE_ARCH_STRNLEN +extern size_t strnlen(const char *s, size_t count) __pure; + +#define __HAVE_ARCH_STRLEN +extern size_t strlen(const char *s) __pure; + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *dst, const void *src, size_t count); + +#define __HAVE_ARCH_MEMCMP +extern int __memcmp(const void *cs, const void *ct, size_t count) __pure; +#define memcmp(dst, src, n) _memcmp((dst), (src), (n)) +static inline int _memcmp(const void *s1, const void *s2, size_t n) +{ + if (__builtin_constant_p(n)) { + u64 v1, v2; + if (n == 16) { + v1 = *(u64 *) s1; + v2 = *(u64 *) s2; + u64 v21 = *(u64 *) (s1 + 8); + u64 v22 = *(u64 *) (s2 + 8); + if (v1 != v2) + return (__swab64(v1) > __swab64(v2)) ? 1 : -1; + if (v21 == v22) + return 0; + return (__swab64(v21) > __swab64(v22)) ? 1 : -1; + } + if (n == 8) { + v1 = *(u64 *) s1; + v2 = *(u64 *) s2; + if (v1 == v2) + return 0; + return (__swab64(v1) > __swab64(v2)) ? 1 : -1; + } + if (n == 4) { + v1 = *(u32 *) s1; + v2 = *(u32 *) s2; + if (v1 == v2) + return 0; + return (__swab32(v1) > __swab32(v2)) ? 1 : -1; + } + if (n == 2) { + v1 = *(u16 *) s1; + v2 = *(u16 *) s2; + return (u32) __swab16(v1) - (u32) __swab16(v2); + } + if (n == 1) { + v1 = *(u8 *) s1; + v2 = *(u8 *) s2; + return v1 - v2; + } + } + + E2K_PREFETCH_L2(s1); + E2K_PREFETCH_L2(s2); + return __memcmp(s1, s2, n); +} + +#define __HAVE_ARCH_MEMSET +#ifdef __HAVE_ARCH_MEMSET +extern void __memset(void *, long, size_t); +#if defined E2K_P2V && !defined CONFIG_BOOT_E2K +extern void *boot_memset(void *s_va, int c, size_t count); +# define memset boot_memset +#else +# define memset(dst, c, n) _memset(dst, c, n, __alignof(*(dst))) +#endif +static inline void *_memset(void *dst, int c, size_t n, + const unsigned long dst_align) +{ + u64 cc; + + if (__builtin_constant_p(c)) { + cc = (u8) c; + cc |= cc << 8; + cc |= cc << 16; + cc |= cc << 32; + } else { + cc = __builtin_e2k_pshufb(c, c, 0); + } + + if (__builtin_constant_p(n) && dst_align >= 8 && n < 136) { + /* Inline small aligned memset's */ + u64 *l_dst = dst; + + if (n >= 8) + l_dst[0] = cc; + if (n >= 16) + l_dst[1] = cc; + if (n >= 24) + l_dst[2] = cc; + if (n >= 32) + l_dst[3] = cc; + if (n >= 40) + l_dst[4] = cc; + if (n >= 48) + l_dst[5] = cc; + if (n >= 56) + l_dst[6] = cc; + if (n >= 64) + l_dst[7] = cc; + if (n >= 72) + l_dst[8] = cc; + if (n >= 80) + l_dst[9] = cc; + if (n >= 88) + l_dst[10] = cc; + if (n >= 96) + l_dst[11] = cc; + if (n >= 104) + l_dst[12] = cc; + if (n >= 112) + l_dst[13] = cc; + if (n >= 120) + l_dst[14] = cc; + if (n >= 128) + l_dst[15] = cc; + + /* Set the tail */ + if (n & 4) + *(u32 *) (dst + (n & ~0x7UL)) = cc; + if (n & 2) + *(u16 *) (dst + (n & ~0x3UL)) = cc; + if (n & 1) + *(u8 *) (dst + (n & ~0x1UL)) = cc; + } else if (__builtin_constant_p(n) && n <= 24) { + int i; + /* Inline small memset's */ + char *c_dst = dst; + for (i = 0; i < n; i++) + c_dst[i] = c; + } else { + __memset(dst, cc, n); + } + + return dst; +} +#endif /* __HAVE_ARCH_MEMSET */ + +#define __HAVE_ARCH_MEMCPY +#ifdef __HAVE_ARCH_MEMCPY +#define memcpy_nocache memcpy_nocache +extern void memcpy_nocache(void *dst, const void *src, size_t n); +extern void *__memcpy(void *dst, const void *src, size_t n); +#if defined E2K_P2V && !defined CONFIG_BOOT_E2K +extern void *boot_memcpy(void *dest_va, const void *src_va, size_t count); +# define memcpy boot_memcpy +#else +# define memcpy(dst, src, n) _memcpy(dst, src, n, __alignof(*(dst))) +#endif +static inline void *_memcpy(void *__restrict dst, + const void *__restrict src, + size_t n, const unsigned long dst_align) +{ + /* + * As measurements show, an unaligned dst causes a 20x slowdown, + * but unaligned src causes only a 2x slowdown. + * + * We can manually assure dst's alignment, but what about src? + * + * Consider the following situations: + * 1) src is 8 bytes aligned. Just do the copy. + * 2) src is 4 bytes aligned. Copying with unaligned loads will cause + * a 100% slowdown, the same as copying with 4-bytes words. So we can + * treat this case the same way as the previous one. + * 3) src is 2-bytes aligned or unaligned. Copying with 2-bytes + * (1-byte for unaligned) will cause a 4x slowdown (8x slowdown for + * unaligned), so copying with unaligned doublewords is preferred + * as it causes only 2x slowdown. + * + * To sum it up: the best way to copy is to assure dst's 8-bytes + * alignment and do the copy with 8-bytes words. + */ + + if (__builtin_constant_p(n) && dst_align >= 8 && n < 136) { + /* Inline small aligned memcpy's */ + const u64 *__restrict l_src = src; + u64 *__restrict l_dst = dst; + + if (n >= 8) + l_dst[0] = l_src[0]; + if (n >= 16) + l_dst[1] = l_src[1]; + if (n >= 24) + l_dst[2] = l_src[2]; + if (n >= 32) + l_dst[3] = l_src[3]; + if (n >= 40) + l_dst[4] = l_src[4]; + if (n >= 48) + l_dst[5] = l_src[5]; + if (n >= 56) + l_dst[6] = l_src[6]; + if (n >= 64) + l_dst[7] = l_src[7]; + if (n >= 72) + l_dst[8] = l_src[8]; + if (n >= 80) + l_dst[9] = l_src[9]; + if (n >= 88) + l_dst[10] = l_src[10]; + if (n >= 96) + l_dst[11] = l_src[11]; + if (n >= 104) + l_dst[12] = l_src[12]; + if (n >= 112) + l_dst[13] = l_src[13]; + if (n >= 120) + l_dst[14] = l_src[14]; + if (n >= 128) + l_dst[15] = l_src[15]; + + /* Copy the tail */ + if (n & 4) + *(u32 *) (dst + (n & ~0x7UL)) = + *(u32 *) (src + (n & ~0x7UL)); + if (n & 2) + *(u16 *) (dst + (n & ~0x3UL)) = + *(u16 *) (src + (n & ~0x3UL)); + if (n & 1) + *(u8 *) (dst + (n & ~0x1UL)) = + *(u8 *) (src + (n & ~0x1UL)); + } else { + E2K_PREFETCH_L2(src); + __memcpy(dst, src, n); + } + + return dst; +} +#endif /* __HAVE_ARCH_MEMCPY */ + +extern unsigned long __recovery_memset_8(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode); +/* Since iset v5 we can use this with 16-bytes aligned addr and len */ +extern unsigned long __recovery_memset_16(void *addr, u64 val, u64 tag, + size_t len, u64 strqp_opcode); +#define recovery_memset_8(addr, val, tag, len, strd_opcode) \ +({ \ + u64 ___strd_opcode = (strd_opcode); \ + unsigned long __ret; \ + __ret = __recovery_memset_8((addr), (val), (tag), (len), \ + ___strd_opcode); \ + if (HAS_HWBUG_WC_DAM && \ + (((___strd_opcode >> LDST_REC_OPC_MAS_SHIFT) & \ + MAS_BYPASS_ALL_CACHES) == MAS_BYPASS_ALL_CACHES)) \ + __E2K_WAIT(_st_c); \ + __ret; \ +}) + +extern void __tagged_memcpy_8(void *dst, const void *src, size_t len); + +/* + * recovery_memcpy_8() - optimized memory copy using strd/ldrd instructions + * + * Maximum allowed size is 8 Kb (it can copy bigger blocks, but performance + * will hurt because of bad prefetching policy). + * + * All parameters must be 8-bytes aligned (but if tags are not copied + * then dst and src can be unaligned). + * + * For the best performance it is recommended to copy memory with 8192 + * bytes blocks. + * + * 'strd_opcode' can be used to specify cache policy: usually L1 cache + * is disabled to avoid its pollution (disabling L2 cache slows copying + * of blocks larger than the size of the memory buffers). + * + * When copying from/to physical/IO memory, disable prefetch through the + * last argument. + * + * On success returns len. On error returns the number of bytes actually + * copied, which can be a little less than the actual copied size. + * (For error returns to work the page fault handler should be set up + * with SET_USR_PFAULT("recovery_memcpy_fault")). + */ +extern unsigned long __recovery_memcpy_8(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch); +/* Since iset v5 we can use this with 16-bytes aligned src, dst and len */ +extern unsigned long __recovery_memcpy_16(void *dst, const void *src, size_t len, + unsigned long strqp_opcode, unsigned long ldrqp_opcode, + int prefetch); +#ifdef E2K_P2V +# define HAS_HWBUG_WC_DAM (IS_ENABLED(CONFIG_CPU_E2S) || \ + IS_ENABLED(CONFIG_CPU_E8C) || IS_ENABLED(CONFIG_CPU_E8C2)) +#else +# define HAS_HWBUG_WC_DAM cpu_has(CPU_HWBUG_WC_DAM) +#endif +#define recovery_memcpy_8(dst, src, len, strd_opcode, ldrd_opcode, prefetch) \ +({ \ + unsigned long __ret; \ + u64 ___strd_opcode = (strd_opcode); \ + __ret = __recovery_memcpy_8((dst), (src), (len), ___strd_opcode, \ + (ldrd_opcode), (prefetch)); \ + if (HAS_HWBUG_WC_DAM && \ + (((___strd_opcode >> LDST_REC_OPC_MAS_SHIFT) & \ + MAS_BYPASS_ALL_CACHES) == MAS_BYPASS_ALL_CACHES)) \ + __E2K_WAIT(_st_c); \ + __ret; \ +}) + +/** + * optimized copy memory along with tags + * using privileged LD/ST recovery operations + */ +static inline unsigned long native_fast_tagged_memory_copy( + void *dst, const void *src, size_t len, + unsigned long strd_opcode, + unsigned long ldrd_opcode, int prefetch) +{ + unsigned long ret; + ldst_rec_op_t st_op, ld_op; + + AW(st_op) = strd_opcode; + AW(ld_op) = ldrd_opcode; + + if (CONFIG_CPU_ISET >= 5 && !st_op.fmt_h && !ld_op.fmt_h && + st_op.fmt == LDST_QWORD_FMT && ld_op.fmt == LDST_QWORD_FMT && + !((u64) dst & 0xf) && !((u64) src & 0xf) && !(len & 0xf)) { + ret = __recovery_memcpy_16(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + } else { + ret = __recovery_memcpy_8(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); + } + + if (HAS_HWBUG_WC_DAM && + ((strd_opcode >> LDST_REC_OPC_MAS_SHIFT) & + MAS_BYPASS_ALL_CACHES) == MAS_BYPASS_ALL_CACHES) + __E2K_WAIT(_st_c); + + return ret; +} + +static inline unsigned long native_fast_tagged_memory_set( + void *addr, u64 val, u64 tag, size_t len, u64 strd_opcode) +{ + unsigned long ret; + ldst_rec_op_t st_op; + + AW(st_op) = strd_opcode; + + if (CONFIG_CPU_ISET >= 5 && !((u64) addr & 0xf) && !(len & 0xf) && + !st_op.fmt_h && st_op.fmt == LDST_QWORD_FMT) { + ret = __recovery_memset_16(addr, val, tag, len, strd_opcode); + } else { + ret = __recovery_memset_8(addr, val, tag, len, strd_opcode); + } + + if (HAS_HWBUG_WC_DAM && + ((strd_opcode >> LDST_REC_OPC_MAS_SHIFT) & + MAS_BYPASS_ALL_CACHES) == MAS_BYPASS_ALL_CACHES) + __E2K_WAIT(_st_c); + + return ret; +} + +#define boot_native_fast_tagged_memory_copy(...) recovery_memcpy_8(__VA_ARGS__) + +#define boot_native_fast_tagged_memory_set(...) recovery_memset_8(__VA_ARGS__) + +static inline int +native_fast_tagged_memory_copy_to_user(void __user *dst, const void *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + /* native kernel does not support any guests */ + return native_fast_tagged_memory_copy((void *)dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline int +native_fast_tagged_memory_copy_from_user(void *dst, const void __user *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + /* native kernel does not support any guests */ + return native_fast_tagged_memory_copy(dst, (const void *)src, len, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline unsigned long +native_extract_tags_32(u16 *dst, const void *src) +{ + NATIVE_EXTRACT_TAGS_32(dst, src); + return 0; +} + +static inline void native_tagged_memcpy_8(void *__restrict dst, + const void *__restrict src, size_t n, + const unsigned long dst_align, + const unsigned long src_align) +{ + if (__builtin_constant_p(n) && src_align >= 8 && dst_align >= 8 && + (n == 64 || n == 56 || n == 48 || n == 40 || + n == 32 || n == 24 || n == 16 || n == 8)) { + /* Inline small aligned memcpy's */ + if (n == 64) + E2K_TAGGED_MEMMOVE_64(dst, src); + else if (n == 56) + E2K_TAGGED_MEMMOVE_56(dst, src); + else if (n == 48) + E2K_TAGGED_MEMMOVE_48(dst, src); + else if (n == 40) + E2K_TAGGED_MEMMOVE_40(dst, src); + else if (n == 32) + E2K_TAGGED_MEMMOVE_32(dst, src); + else if (n == 24) + E2K_TAGGED_MEMMOVE_24(dst, src); + else if (n == 16) + E2K_TAGGED_MEMMOVE_16(dst, src); + else + E2K_TAGGED_MEMMOVE_8(dst, src); + } else { + E2K_PREFETCH_L2(src); + + __tagged_memcpy_8(dst, src, n); + } +} + +/** + * tagged_memcpy_8() - copy memory along with tags + * + * All parameters must be 8-bytes aligned. + */ +#if defined(CONFIG_PARAVIRT_GUEST) +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +#include +#else /* !CONFIG_KVM_GUEST_KERNEL && !CONFIG_PARAVIRT_GUEST */ +#define tagged_memcpy_8(dst, src, n) \ +({ \ + native_tagged_memcpy_8(dst, src, n, \ + __alignof(*(dst)), __alignof(*(src))); \ +}) +#endif /* !CONFIG_KVM_GUEST_KERNEL && !CONFIG_PARAVIRT_GUEST */ + +extern void boot_fast_memcpy(void *, const void *, size_t); +extern notrace void boot_fast_memset(void *s_va, long c, size_t count); + +#ifdef CONFIG_BOOT_E2K +/* own small bios (boot loader) for kernel */ +static inline unsigned long +fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return native_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return native_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host/guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native guest kernel */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel with or without virtualization support */ +/** + * optimized copy memory along with tags + * using privileged LD/ST recovery operations + */ +static inline unsigned long +fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return native_fast_tagged_memory_copy(dst, src, len, + strd_opcode, ldrd_opcode, prefetch); +} +static inline unsigned long +fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + return native_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} + +static inline unsigned long +boot_fast_tagged_memory_copy(void *dst, const void *src, size_t len, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return boot_native_fast_tagged_memory_copy(dst, src, len, strd_opcode, + ldrd_opcode, prefetch); +} +static inline void +boot_fast_tagged_memory_set(void *addr, u64 val, u64 tag, + size_t len, u64 strd_opcode) +{ + boot_native_fast_tagged_memory_set(addr, val, tag, len, strd_opcode); +} +static inline unsigned long +extract_tags_32(u16 *dst, const void *src) +{ + return native_extract_tags_32(dst, src); +} + +#ifndef CONFIG_KVM_HOST_MODE +/* it is native kernel without virtualization support */ +static inline int +fast_tagged_memory_copy_to_user(void __user *dst, const void *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return native_fast_tagged_memory_copy_to_user(dst, src, len, regs, + strd_opcode, ldrd_opcode, prefetch); +} + +static inline int +fast_tagged_memory_copy_from_user(void *dst, const void __user *src, + size_t len, const struct pt_regs *regs, + unsigned long strd_opcode, unsigned long ldrd_opcode, + int prefetch) +{ + return native_fast_tagged_memory_copy_from_user(dst, src, len, regs, + strd_opcode, ldrd_opcode, prefetch); +} +#endif /* !CONFIG_KVM_HOST_MODE */ + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_STRING_H_ */ diff --git a/arch/e2k/include/asm/swap_info.h b/arch/e2k/include/asm/swap_info.h new file mode 100644 index 0000000..c7f3662 --- /dev/null +++ b/arch/e2k/include/asm/swap_info.h @@ -0,0 +1,16 @@ +#ifndef _E2K_SWAP_INFO_H +#define _E2K_SWAP_INFO_H + +#include + +typedef struct swap_page_info { + struct swap_page_info *next; + struct mm_struct* mm; + e2k_addr_t addr; +} swap_page_info_t; + +#define PageSwapInfo(page) (page)->swap_info +#define PageWithSwapInfo(page) (PageSwapInfo(page) != NULL) +#define ClearPageSwapInfo(page) PageSwapInfo(page) = NULL + +#endif diff --git a/arch/e2k/include/asm/switch_to.h b/arch/e2k/include/asm/switch_to.h new file mode 100644 index 0000000..989395f --- /dev/null +++ b/arch/e2k/include/asm/switch_to.h @@ -0,0 +1,106 @@ +#ifndef _ASM_L_SWITCH_TO_H +#define _ASM_L_SWITCH_TO_H + +#ifdef __KERNEL__ + +#include +#include +#include + +extern void preempt_schedule_irq(void); + +extern long __ret_from_fork(struct task_struct *prev); + +static inline struct task_struct * +native_ret_from_fork_get_prev_task(struct task_struct *prev) +{ + return prev; +} + +static inline int +native_ret_from_fork_prepare_hv_stacks(struct pt_regs *regs) +{ + return 0; /* nothing to do */ +} + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else +/* it is native kernel without any virtualization */ +/* or it is host kernel with virtualization support */ +/* or paravirtualized host and guest kernel */ + +static inline struct task_struct * +ret_from_fork_get_prev_task(struct task_struct *prev) +{ + return native_ret_from_fork_get_prev_task(prev); +} + +static inline int +ret_from_fork_prepare_hv_stacks(struct pt_regs *regs) +{ + return native_ret_from_fork_prepare_hv_stacks(regs); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +extern struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next); + +#define native_switch_to(prev, next, last) \ +do { \ + last = __switch_to(prev, next); \ + e2k_finish_switch(last); \ +} while (0) + +#define prepare_arch_switch(next) \ +do { \ + prefetchw_range(&next->thread.sw_regs, \ + offsetof(struct sw_regs, cs_lo)); \ + /* It works under CONFIG_MCST_RT */ \ + SAVE_CURR_TIME_SWITCH_TO; \ + prepare_monitor_regs(next); \ +} while (0) + +#define e2k_finish_switch(prev) \ +do { \ + CALCULATE_TIME_SWITCH_TO; \ + finish_monitor_regs(prev); \ +} while (0) + +#ifdef CONFIG_MONITORS +#define prepare_monitor_regs(next) \ +do { \ + if (MONITORING_IS_ACTIVE) \ + store_monitors_delta(current); \ +} while (0) +#define finish_monitor_regs(prev) \ +do { \ + if (MONITORING_IS_ACTIVE) { \ + prev->thread.sw_regs.ddmcr = NATIVE_READ_DDMCR_REG(); \ + prev->thread.sw_regs.dimcr = NATIVE_READ_DIMCR_REG(); \ + process_monitors(current); \ + } \ +} while (0) +#else /* !CONFIG_MONITORS */ +#define prepare_monitor_regs(next) +#define finish_monitor_regs(next) +#endif /* CONFIG_MONITORS */ + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or native kernel with virtualization support */ + +#define switch_to(prev, next, last) native_switch_to(prev, next, last) + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_L_SWITCH_TO_H */ diff --git a/arch/e2k/include/asm/syscall.h b/arch/e2k/include/asm/syscall.h new file mode 100644 index 0000000..1126378 --- /dev/null +++ b/arch/e2k/include/asm/syscall.h @@ -0,0 +1,62 @@ +#ifndef _E2K_SYSCALLS_H +#define _E2K_SYSCALLS_H + +#include + +/* The system call number is given by the user in 1 */ +static inline int syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + + return (regs && from_syscall(regs)) ? regs->sys_num : -1; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->sys_rval; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + regs->sys_rval = val; +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + unsigned int n = 6, j; + unsigned long *p = ®s->args[1]; + + for (j = 0; j < n; j++) { + args[j] = p[j]; + } +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + const unsigned long *args) +{ + unsigned int n = 6, j; + unsigned long *p = ®s->args[1]; + + for (j = 0; j < n; j++) { + p[j] = args[j]; + } +} + +static inline int syscall_get_arch(struct task_struct *task) +{ + return AUDIT_ARCH_E2K; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + /* Do nothing */ +} + +#endif /* _E2K_SYSCALLS_H */ diff --git a/arch/e2k/include/asm/syscalls.h b/arch/e2k/include/asm/syscalls.h new file mode 100644 index 0000000..fc75f09 --- /dev/null +++ b/arch/e2k/include/asm/syscalls.h @@ -0,0 +1,455 @@ +/* + * syscalls.h - Linux syscall interfaces (arch-specific) + * + * Copyright (c) 2008 Jaswinder Singh Rajput + * + * This file is released under the GPLv2. + * See the file COPYING for more details. + */ + +#ifndef _ASM_E2K_SYSCALLS_H +#define _ASM_E2K_SYSCALLS_H + +#include +#include +#include +#include + +extern unsigned long sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long off); +extern unsigned long sys_mmap2(unsigned long addr, unsigned long len, + int prot, int flags, int fd, long pgoff); +extern pid_t sys_clone_thread(unsigned long flags, unsigned long arg2, + unsigned long long arg3, int __user *parent_tidptr, + int __user *child_tidptr, unsigned long tls); +extern long sys_e2k_longjmp2(struct jmp_info *regs, u64 retval); +extern long sys_e2k_syswork(long syswork, long arg2, + long arg3, long arg4, long arg5); +extern long e2k_sys_execve(const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp); +extern long e2k_sys_execveat(int fd, const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp, + int flags); + +extern long sys_stat64(const char __user *filename, + struct stat64 __user *statbuf); +extern long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf); +extern long sys_lstat64(const char __user *filename, + struct stat64 __user *statbuf); +#ifdef CONFIG_MAC_ +extern int sys_macctl(register int request, register void *data, + register int size); +#endif + +extern asmlinkage long sys_set_backtrace(unsigned long *__user buf, + size_t count, size_t skip, unsigned long flags); +extern asmlinkage long sys_get_backtrace(unsigned long *__user buf, + size_t count, size_t skip, unsigned long flags); +extern long sys_access_hw_stacks(unsigned long mode, + unsigned long long __user *frame_ptr, char __user *buf, + unsigned long buf_size, void __user *real_size); + +extern long e2k_sys_prlimit64(pid_t pid, unsigned int resource, + const struct rlimit64 __user *new_rlim, + struct rlimit64 __user *old_rlim); +extern long e2k_sys_getrlimit(unsigned int resource, + struct rlimit __user *rlim); +#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT +extern long e2k_sys_old_getrlimit(unsigned int resource, + struct rlimit __user *rlim); +#endif +extern long e2k_sys_setrlimit(unsigned int resource, + struct rlimit __user *rlim); + +#ifdef CONFIG_PROTECTED_MODE +extern long protected_sys_clean_descriptors(void __user *addr, + unsigned long size, + const unsigned long flags, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + struct pt_regs *regs); +/* Flags for the function above see in arch/include/uapi/asm/protected_mode.h */ +/* 0 - clean freed descriptor list */ + +extern long protected_sys_rt_sigaction(int sig, + const void __user *ptr, void __user *ptr2, + const size_t sigsetsize); +extern long protected_sys_rt_sigaction_ex(int sig, + const void __user *ptr, void __user *ptr2, + const size_t sigsetsize); +extern long protected_sys_mq_notify(const long a1, + const unsigned long __user a2); +extern long protected_sys_timer_create(const long a1, + const unsigned long __user a2, const unsigned long __user a3); +extern long protected_sys_rt_sigtimedwait(const unsigned long __user a1, + const unsigned long __user a2, const unsigned long __user a3, + const unsigned long a4); +extern long protected_sys_sysctl(const unsigned long __user a1); +extern long protected_sys_clone(const unsigned long a1, /* flags */ + const unsigned long a2, /* new_stackptr */ + const unsigned long __user a3,/* parent_tidptr */ + const unsigned long __user a4,/* child_tidptr */ + const unsigned long __user a5,/* tls */ + const unsigned long unused6, + struct pt_regs *regs); +extern long protected_sys_execve(const unsigned long __user a1,/* filename*/ + const unsigned long __user a2,/* argv[] */ + const unsigned long __user a3,/* envp[] */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_execveat(const unsigned long dirfd, /*a1 */ + const unsigned long __user pathname,/* a2 */ + const unsigned long __user argv, /* a3 */ + const unsigned long __user envp, /* a4 */ + const unsigned long flags, /* a5 */ + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_futex(const unsigned long __user uaddr, + const unsigned long futex_op, + const unsigned long val, + const unsigned long a4, /* timeout/val2 */ + const unsigned long __user uaddr2, + const unsigned long val3, + const struct pt_regs *regs); +extern long protected_sys_getgroups(const unsigned long a1, /* size */ + const unsigned long __user a2, /* list[] */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_ipc(const unsigned long call, /* a1 */ + const long first, /* a2 */ + const unsigned long second, /* a3 */ + const unsigned long third, /* a4 */ + const unsigned long __user ptr, /* a5 */ + const long fifth, /* a6 */ + const struct pt_regs *regs); +extern long protected_sys_mmap(const unsigned long a1, /* start */ + const unsigned long a2, /* length */ + const unsigned long a3, /* prot */ + const unsigned long a4, /* flags */ + const unsigned long a5, /* fd */ + const unsigned long a6, /* offset/bytes */ + struct pt_regs *regs); +extern long protected_sys_mmap2(const unsigned long a1, /* start */ + const unsigned long a2, /* length */ + const unsigned long a3, /* prot */ + const unsigned long a4, /* flags */ + const unsigned long a5, /* fd */ + const unsigned long a6, /* offset/pages */ + struct pt_regs *regs); +extern long protected_sys_munmap(const unsigned long __user a1, /* addr */ + const unsigned long a2, /* length */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + struct pt_regs *regs); +extern long protected_sys_mremap(const unsigned long __user old_address, + const unsigned long old_size, + const unsigned long new_size, + const unsigned long flags, + const unsigned long new_address, + const unsigned long a6, /* unused */ + struct pt_regs *regs); +extern long protected_sys_readv(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_semctl(const long semid, /* a1 */ + const long semnum, /* a2 */ + const long cmd, /* a3 */ + const unsigned long __user ptr, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_shmat(const long shmid, /* a1 */ + const unsigned long __user shmaddr, /* a2 */ + const long shmflg, /* a3 */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + struct pt_regs *regs); +extern long protected_sys_writev(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_preadv(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* offset_l */ + const unsigned long a5, /* offset_h */ + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_pwritev(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* offset_l */ + const unsigned long a5, /* offset_h */ + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_preadv2(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* offset_l */ + const unsigned long a5, /* offset_h*/ + const unsigned long a6, /* flags */ + const struct pt_regs *regs); +extern long protected_sys_pwritev2(const unsigned long a1, /* fd */ + const unsigned long __user a2, /* iov */ + const unsigned long a3, /* iovcnt */ + const unsigned long a4, /* offset_l */ + const unsigned long a5, /* offset_h*/ + const unsigned long a6, /* flags */ + const struct pt_regs *regs); +extern long protected_sys_socketcall(const unsigned long a1, /* call */ + const unsigned long __user a2, /* args */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_sendmsg(const unsigned long sockfd, + const unsigned long __user msg, + const unsigned long flags, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_recvmsg(const unsigned long socket, + const unsigned long __user message, + const unsigned long flags, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_olduselib(const unsigned long __user a1, /* library */ + const unsigned long __user a2); /* umdd */ + /* NB> 'olduselib' is obsolete syscall; unsupported in CPU ISET V6 */ +extern long protected_sys_uselib(const unsigned long __user a1, /* library */ + const unsigned long __user a2); /* umdd */ +extern long protected_sys_sigaltstack(const unsigned long __user a1, /* ss */ + const unsigned long __user a2, /* oss */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_unuselib(const unsigned long __user a1, /* addr */ + const unsigned long a2, + const unsigned long a3, + const unsigned long a4, + const unsigned long a5, + const unsigned long a6, + struct pt_regs *regs); +extern long protected_sys_get_backtrace(const unsigned long __user buf, + size_t count, size_t skip, + unsigned long flags, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_set_backtrace(const unsigned long __user buf, + size_t count, size_t skip, + unsigned long flags, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_set_robust_list( + const unsigned long __user listhead, /* a1 */ + const size_t len, /* a2 */ + const unsigned long unused3, + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_get_robust_list(const unsigned long pid, + unsigned long __user head_ptr, + unsigned long __user len_ptr); +extern long protected_sys_process_vm_readv(const unsigned long pid, /*a1*/ + const struct iovec __user *lvec, /* a2 */ + unsigned long liovcnt, /* a3 */ + const struct iovec __user *rvec, /* a4 */ + unsigned long riovcnt, /* a5 */ + unsigned long flags, /* a6 */ + const struct pt_regs *regs); +extern long protected_sys_process_vm_writev(const unsigned long pid, /*a1*/ + const struct iovec __user *lvec, /* a2 */ + unsigned long liovcnt, /* a3 */ + const struct iovec __user *rvec, /* a4 */ + unsigned long riovcnt, /* a5 */ + unsigned long flags, /* a6 */ + const struct pt_regs *regs); +extern long protected_sys_vmsplice(int fd, /* a1 */ + const struct iovec __user *iov, /* a2 */ + unsigned long nr_segs, /*a3 */ + unsigned int flags, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_keyctl(const int operation, /* a1 */ + const unsigned long arg2, + const unsigned long arg3, + const unsigned long arg4, + const unsigned long arg5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_prctl(const int option, /* a1 */ + const unsigned long arg2, + const unsigned long arg3, + const unsigned long arg4, + const unsigned long arg5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_ioctl(const int fd, /* a1 */ + const unsigned long request,/* a2 */ + void *argp, /* a3 */ + const unsigned long unused4, + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_epoll_ctl(const unsigned long epfd, /* a1 */ + const unsigned long op, /* a2 */ + const unsigned long fd, /* a3 */ + void __user *event, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_epoll_wait(const unsigned long epfd, /* a1 */ + void __user *event, /* a2 */ + const long maxevents, /* a3 */ + const long timeout, /* a4 */ + const unsigned long unused5, + const unsigned long unused6, + const struct pt_regs *regs); +extern long protected_sys_epoll_pwait(const unsigned long epfd, /* a1 */ + void __user *event, /* a2 */ + const long maxevents, /* a3 */ + const long timeout, /* a4 */ + const unsigned long sigmask, /* a5 */ + const unsigned long sigsetsize, /* a6 */ + const struct pt_regs *regs); +extern long protected_sys_pselect6(const long nfds, /* a1 */ + const unsigned long readfds, /* a2 */ + const unsigned long writefds, /* a3 */ + const unsigned long exceptfds, /* a4 */ + const unsigned long timeout, /* a5 */ + const unsigned long sigmask, /* a6 */ + const struct pt_regs *regs); + +extern int arch_init_pm_sc_debug_mode(const int debug_mask); + +/* + * Storing descriptor attributes in the sival_ptr_list list. + * The descriptor may be restored with the function that follows. + */ +static inline +void store_descriptor_attrs(void *kernel_ptr, + const long user_ptr_lo, const long user_ptr_hi, + const int ptr_tags, const int signum) +{ + struct sival_ptr_list *new; + + /* Saving kernel_ptr in sival_ptr_list: */ + new = kmalloc(sizeof(*new), GFP_KERNEL); + new->kernel_ptr = kernel_ptr; + new->user_ptr_lo = user_ptr_lo; + new->user_ptr_hi = user_ptr_hi; + new->user_tags = ptr_tags; + new->signum = signum; + + /* Saving sival_ptr in sival_ptr_list: */ + down_write(&(current->mm->context.sival_ptr_list_sem)); + /* Add new element as head of the list */ + list_add(&new->link, &(current->mm->context.sival_ptr_list_head)); + up_write(&(current->mm->context.sival_ptr_list_sem)); +} + +/* + * Returns record with saved descriptor attributes (in sival_ptr_list) + * for the given pointer/signal number couple or NULL. + */ +static inline +struct sival_ptr_list *get_descriptor_attrs(const void *kernel_ptr, + const int signum) +{ + struct sival_ptr_list *ret = NULL, *siptrl_ln; + + /* + * We look thru sival_ptr_list to find a record with the same + * kernel pointer and signum if specified: + */ + down_read(¤t->mm->context.sival_ptr_list_sem); + list_for_each_entry(siptrl_ln, + ¤t->mm->context.sival_ptr_list_head, link) { + if (siptrl_ln->kernel_ptr == kernel_ptr) { + if (!signum || (siptrl_ln->signum == signum)) { + ret = siptrl_ln; + break; + } + } + } + up_read(¤t->mm->context.sival_ptr_list_sem); + return ret; +} +#endif /* CONFIG_PROTECTED_MODE */ + +#ifdef CONFIG_COMPAT +extern long compat_sys_lseek(unsigned int fd, int offset, unsigned int whence); +extern long compat_sys_sigpending(u32 *); +extern long compat_sys_sigprocmask(int, u32 *, u32 *); +extern long sys32_pread64(unsigned int fd, char __user *ubuf, + compat_size_t count, unsigned long poslo, unsigned long poshi); +extern long sys32_pwrite64(unsigned int fd, char __user *ubuf, + compat_size_t count, unsigned long poslo, unsigned long poshi); +extern long sys32_readahead(int fd, unsigned long offlo, + unsigned long offhi, compat_size_t count); +extern long sys32_fadvise64(int fd, unsigned long offlo, + unsigned long offhi, compat_size_t len, int advice); +extern long sys32_fadvise64_64(int fd, + unsigned long offlo, unsigned long offhi, + unsigned long lenlo, unsigned long lenhi, int advice); +extern long sys32_sync_file_range(int fd, + unsigned long off_low, unsigned long off_high, + unsigned long nb_low, unsigned long nb_high, int flags); +extern long sys32_fallocate(int fd, int mode, + unsigned long offlo, unsigned long offhi, + unsigned long lenlo, unsigned long lenhi); +extern long sys32_truncate64(const char __user *path, + unsigned long low, unsigned long high); +extern long sys32_ftruncate64(unsigned int fd, + unsigned long low, unsigned long high); +extern long compat_e2k_sys_execve(const char __user *filename, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp); +extern long compat_e2k_sys_execveat(int fd, const char __user *filename, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp, + int flags); +extern asmlinkage long compat_sys_set_backtrace(unsigned int *__user buf, + size_t count, size_t skip, unsigned long flags); +extern asmlinkage long compat_sys_get_backtrace(unsigned int *__user buf, + size_t count, size_t skip, unsigned long flags); +extern long compat_sys_access_hw_stacks(unsigned long mode, + unsigned long long __user *frame_ptr, char __user *buf, + unsigned long buf_size, void __user *real_size); +extern long compat_e2k_sys_getrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +extern long compat_e2k_sys_setrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +#endif + + +#endif /* _ASM_E2K_SYSCALLS_H */ diff --git a/arch/e2k/include/asm/system.h b/arch/e2k/include/asm/system.h new file mode 100644 index 0000000..d5e0b3a --- /dev/null +++ b/arch/e2k/include/asm/system.h @@ -0,0 +1,671 @@ +/* + * asm-e2k/system.h + */ +#ifndef _E2K_SYSTEM_H_ +#define _E2K_SYSTEM_H_ + +#ifndef __ASSEMBLY__ +#include +#endif /* !(__ASSEMBLY__) */ +#include +#include +#include +#include +#include +#include +#include +#include + +#define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0) + +#define NATIVE_PSR_SET_LAST_WISH() \ +do { \ + unsigned long __psr = NATIVE_NV_READ_PSR_REG_VALUE(); \ + __psr |= PSR_LW; \ + NATIVE_WRITE_PSR_REG_VALUE(__psr); \ +} while (0) + +#define PSR_SET_LAST_WISH() \ +do { \ + unsigned long __psr = READ_PSR_REG_VALUE(); \ + __psr |= PSR_LW; \ + WRITE_PSR_REG_VALUE(__psr); \ +} while (0) + +#define boot_native_set_sge() \ +({ \ + NATIVE_WRITE_PSR_REG_VALUE( \ + NATIVE_NV_READ_PSR_REG_VALUE() | PSR_SGE); \ +}) + +static inline bool native_sge_is_set(void) +{ + e2k_psr_t psr = NATIVE_NV_READ_PSR_REG(); + + return psr.PSR_sge; +} + +#ifdef CONFIG_E2K_PROFILING + +#define boot_smp_processor_id_() \ + (((e2k_addr_t)current_thread_info() >= TASK_SIZE) ? \ + smp_processor_id() \ + : \ + ((long)READ_CURRENT_REG_VALUE())) + +typedef struct { + // FIRST ELEMENT + long max_disable_interrupt; // max #ticks of disable_interrupt + long sum_disable_interrupt; // all #ticks of disable_interrupt + long number_interrupt; // number of interrupts + long number_irqs; // number of closed irq + long number_system_call; // number of system_call + + long max_disable_interrupt_after_dump; // max #ticks of disable_interrupt + // after last read or write profile file + long interrupts[exc_max_num]; // interrupt table + long interrupts_time[exc_max_num]; // interrupt time (in ticks) + long max_interrupts_time[exc_max_num]; // max interrupt time (in ticks) + long syscall[NR_syscalls]; // syscall table + long syscall_time[NR_syscalls]; // syscall time (in ticks) + long max_syscall_time[NR_syscalls]; // max syscall time + long clk; // time of interrupt's begining + // NR_VECTORS 256 + long max_do_irq_time[256]; // max DO_IRQ's time + long do_irq[256]; // number of DO_IRQ + long do_irq_time[256]; // time of DO_IRQ + long clk_of_do_irq; // time of DO_IRQ's begining + long last_element; +} disable_interrupt_t ; + +extern unsigned long get_cmos_time(void); +extern disable_interrupt_t disable_interrupt[NR_CPUS]; + +#define read_ticks(n) (n = NATIVE_READ_CLKR_REG_VALUE()) + +#define add_info_interrupt(n, ticks) \ +({ long t; int cpu; \ + t = NATIVE_READ_CLKR_REG_VALUE() - ticks; \ + cpu = boot_smp_processor_id_(); \ + disable_interrupt[cpu].interrupts[n]++; \ + disable_interrupt[cpu].interrupts_time[n] += t; \ + if (t > disable_interrupt[cpu].max_interrupts_time[n]) { \ + disable_interrupt[cpu].max_interrupts_time[n] = t; \ + } \ +}) + +#define add_info_syscall(n, ticks) \ +({ long t; int cpu; \ + cpu = boot_smp_processor_id(); \ + t = NATIVE_READ_CLKR_REG_VALUE() - ticks; \ + disable_interrupt[cpu].syscall[n]++; \ + disable_interrupt[cpu].syscall_time[n] += t; \ + if (t > disable_interrupt[cpu].max_syscall_time[n]) { \ + disable_interrupt[cpu].max_syscall_time[n] = t; \ + } \ +}) + +typedef struct { + long max_time; + long full_time; + long begin_time; + long number; + long beg_ip; + long beg_parent_ip; + long end_ip; + long end_parent_ip; + long max_beg_ip; + long max_beg_parent_ip; + long max_end_ip; + long max_begin_time; + long max_end_parent_ip; +} time_info_t; + +/* + * For adding new element you need do following things: + * - add new "time_info_t" element after last one + * - add name of your elemen in system_info_name (file e2k_sysworks.c) + * - create two new define similar below: + * #define info_save_mmu_reg(tick) \ + * store_max_time_in_system_info(tick,max_mmu_reg) + * #define info_save_mmu_reg(tick) - null for not CONFIG_E2K_PROFILING + * - used your new define for merging what you want + */ +typedef struct { + time_info_t max_disabled_interrupt; // max time of disabled inerrupts + time_info_t max_stack_reg; // max time of saving of stack_registers + time_info_t max_tir_reg; // max time for storing TIR + time_info_t max_mmu_reg; // max time for storing mmu registers + time_info_t max_restore_stack_reg; // max time for restoring of stack_registers + time_info_t max_restoring_reg; // max time for restoring all registers + time_info_t max_restore_mmu_reg; // max time for restoring mmu registers + time_info_t max_cpu_idle; // max time for cpu_idle +} system_info_t ; + +extern char* system_info_name[]; +extern system_info_t system_info[NR_CPUS]; +extern int enable_collect_interrupt_ticks; +#define collect_disable_interrupt_ticks() \ +({ int cpu; \ + cpu = boot_smp_processor_id_(); \ + if (system_info[cpu].max_disabled_interrupt.begin_time >0){ \ + store_max_time_in_system_info( \ + system_info[cpu].max_disabled_interrupt.begin_time, \ + max_disabled_interrupt); \ + system_info[cpu].max_disabled_interrupt.begin_time = 0; \ + } \ +}) + +#define mark_disable_interrupt_ticks() \ + store_begin_ip_in_system_info(max_disabled_interrupt) + +#define store_do_irq_ticks() \ +({ int cpu = boot_smp_processor_id_(); \ + disable_interrupt[cpu].clk_of_do_irq = NATIVE_READ_CLKR_REG_VALUE(); \ +}) + +#define define_time_of_do_irq(N) \ +({ long t; int cpu; \ + cpu = boot_smp_processor_id_(); \ + t = NATIVE_READ_CLKR_REG_VALUE() - \ + disable_interrupt[cpu].clk_of_do_irq; \ + disable_interrupt[cpu].do_irq_time[N] += t; \ + disable_interrupt[cpu].do_irq[N]++; \ + if (disable_interrupt[cpu].max_do_irq_time[N] < t) { \ + disable_interrupt[cpu].max_do_irq_time[N] = t; \ + } \ +}) +#define info_save_stack_reg(tick) \ + store_max_time_in_system_info(tick,max_stack_reg) +#define info_restore_stack_reg(tick) \ + store_max_time_in_system_info(tick,max_restore_stack_reg) + +#define info_save_mmu_reg(tick) \ + store_max_time_in_system_info(tick,max_mmu_reg) + +#define info_restore_mmu_reg(tick) \ + store_max_time_in_system_info(tick,max_restore_mmu_reg) + +#define info_save_tir_reg(tick) \ + store_max_time_in_system_info(tick,max_tir_reg) + +#define info_restore_all_reg(tick) \ + store_max_time_in_system_info(tick,max_restoring_reg); \ + +#define cpu_idle_time() \ + store_begin_time_in_system_info(max_cpu_idle) +#define calculate_cpu_idle_time() \ + calculate_max_time_in_system_info(max_cpu_idle) + +#define store_begin_time_in_system_info(FIELD) \ +({ long t; int cpu; \ + register e2k_cr0_hi_t cr0_hi; \ + if (enable_collect_interrupt_ticks) { \ + cpu = boot_smp_processor_id_(); \ + t = NATIVE_READ_CLKR_REG_VALUE(); \ + AS_WORD(cr0_hi) = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + system_info[cpu].FIELD.begin_time = tick; \ + system_info[cpu].FIELD.beg_ip = NATIVE_READ_IP_REG_VALUE(); \ + system_info[cpu].FIELD.beg_parent_ip = \ + (AS_STRUCT(cr0_hi)).ip<<3; \ + } \ +}) +#define store_begin_ip_in_system_info(FIELD) \ +({ \ + int cpu; \ + register e2k_cr0_hi_t cr0_hi; \ + cpu = boot_smp_processor_id_(); \ + disable_interrupt[cpu].clk = NATIVE_READ_CLKR_REG_VALUE(); \ + cr0_hi = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + system_info[cpu].FIELD.beg_ip = NATIVE_READ_IP_REG_VALUE(); \ + system_info[cpu].FIELD.beg_parent_ip = \ + (AS_STRUCT(cr0_hi)).ip<<3; \ +}) + +#define store_begin_time_ip_in_system_info(cpu, tick, FIELD) \ +({ \ + register e2k_cr0_hi_t cr0_hi; \ + if (enable_collect_interrupt_ticks) { \ + system_info[cpu].FIELD.begin_time = tick; \ + cr0_hi = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + system_info[cpu].FIELD.beg_ip = NATIVE_READ_IP_REG_VALUE(); \ + system_info[cpu].FIELD.beg_parent_ip = \ + (AS_STRUCT(cr0_hi)).ip<<3; \ + } \ +}) + +#define store_end_ip_in_system_info(mutex, FIELD) \ +({ \ + int cpu; \ + register e2k_cr0_hi_t cr0_hi; \ + cpu = boot_smp_processor_id_(); \ + cr0_hi = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + system_info[cpu].FIELD.beg_ip = mutex->ip; \ + system_info[cpu].FIELD.beg_parent_ip = mutex->caller; \ + system_info[cpu].FIELD.end_ip = NATIVE_READ_IP_REG_VALUE(); \ + system_info[cpu].FIELD.end_parent_ip = \ + (AS_STRUCT(cr0_hi)).ip<<3; \ +}) + +#define calculate_max_time_in_system_info(FIELD) \ +({ \ + long t; int cpu; \ + register e2k_cr0_hi_t cr0_hi; \ + cpu = boot_smp_processor_id_(); \ + if (enable_collect_interrupt_ticks) { \ + t = NATIVE_READ_CLKR_REG_VALUE()-system_info[cpu]. \ + FIELD.begin_time; \ + system_info[cpu].FIELD.number++; \ + system_info[cpu].FIELD.full_time += t; \ + if (system_info[cpu].FIELD.max_time < t) { \ + system_info[cpu].FIELD.max_time = t; \ + system_info[cpu].FIELD.max_beg_ip = \ + system_info[cpu].FIELD.beg_ip; \ + system_info[cpu].FIELD.max_beg_parent_ip = \ + system_info[cpu].FIELD.beg_parent_ip; \ + system_info[cpu].FIELD.max_end_ip = \ + NATIVE_READ_IP_REG_VALUE(); \ + cr0_hi = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + system_info[cpu].FIELD.max_end_parent_ip = \ + (AS_STRUCT(cr0_hi)).ip<<3; \ + system_info[cpu].FIELD.max_begin_time = \ + system_info[cpu].FIELD.begin_time; \ + } \ + system_info[cpu].FIELD.begin_time = 0; \ + } \ +}) + +extern long TIME; +#define store_max_time_in_system_info(tick, FIELD) \ +({ \ + long t; int cpu; \ + register e2k_cr0_hi_t cr0_hi; \ + cpu = boot_smp_processor_id_(); \ + t = NATIVE_READ_CLKR_REG_VALUE()-tick; \ + if (enable_collect_interrupt_ticks) { \ + system_info[cpu].FIELD.number++; \ + system_info[cpu].FIELD.full_time += t; \ + if (system_info[cpu].FIELD.max_time < t) { \ + system_info[cpu].FIELD.max_time = t; \ + system_info[cpu].FIELD.max_beg_ip = \ + system_info[cpu].FIELD.beg_ip; \ + system_info[cpu].FIELD.max_beg_parent_ip = \ + system_info[cpu].FIELD.beg_parent_ip; \ + system_info[cpu].FIELD.max_end_ip = \ + NATIVE_READ_IP_REG_VALUE(); \ + cr0_hi = NATIVE_NV_READ_CR0_HI_REG_VALUE(); \ + system_info[cpu].FIELD.max_end_parent_ip = \ + (AS_STRUCT(cr0_hi)).ip<<3; \ + system_info[cpu].FIELD.max_begin_time = \ + system_info[cpu].FIELD.begin_time; \ + } \ + system_info[cpu].FIELD.begin_time = 0; \ + } \ +}) + +#define UPSR_RESTORE(__src_upsr) \ +({ \ + unsigned long upsr1 = READ_UPSR_REG_VALUE(); \ + int _cond_ = (upsr1 & UPSR_IE) != ((__src_upsr) & UPSR_IE); \ + if (enable_collect_interrupt_ticks && _cond_) { \ + if (__src_upsr & UPSR_IE) { \ + collect_disable_interrupt_ticks(); \ + } else { \ + mark_disable_interrupt_ticks(); \ + } \ + } \ + WRITE_UPSR_IRQ_BARRIER(__src_upsr); \ +}) + +#define condition_mark_disable_interrupt_ticks(_cond_) \ +({ \ + if (enable_collect_interrupt_ticks) { \ + mark_disable_interrupt_ticks(); \ + } \ +}) + +#define condition_collect_disable_interrupt_ticks(_cond_) \ +({ \ + if (enable_collect_interrupt_ticks && _cond_) { \ + collect_disable_interrupt_ticks(); \ + } \ +}) + +# else /* !CONFIG_E2K_PROFILING */ + +#define store_max_time_in_system_info(tick,FIELD) +#define calculate_max_time_in_system_info(FIELD) +#define store_begin_time_in_system_info(FIELD) +#define store_begin_ip_in_system_info(FIELD) +#define info_save_tir_reg(tick) +#define info_restore_all_reg(tick) +#define info_save_stack_reg(tick) +#define info_restore_stack_reg(tick) +#define info_save_mmu_reg(tick) +#define info_restore_mmu_reg(tick) +#define cpu_idle_time() +#define calculate_cpu_idle_time() +#define store_do_irq_ticks() +#define define_time_of_do_irq(N) +#define condition_collect_disable_interrupt_ticks(_cond_) +#define condition_mark_disable_interrupt_ticks(_cond_) +#define collect_disable_interrupt_ticks() +#define mark_disable_interrupt_ticks() +#define add_info_syscall(n, ticks) +#define add_info_interrupt(n, ticks) +#define read_ticks(n) +#define UPSR_RESTORE(__src_upsr) (WRITE_UPSR_IRQ_BARRIER(__src_upsr)) +#endif /* CONFIG_E2K_PROFILING */ + +#define E2K_KERNEL_PSR_ENABLED ((e2k_psr_t) { { \ + pm : 1, \ + ie : 1, \ + sge : 1, \ + lw : 0, \ + uie : 1, \ + nmie : 1, \ + unmie : 1, \ +} }) + +#define E2K_KERNEL_PSR_DISABLED ((e2k_psr_t) { { \ + pm : 1, \ + ie : 0, \ + sge : 1, \ + lw : 0, \ + uie : 0, \ + nmie : 0, \ + unmie : 0, \ +} }) + +#define E2K_KERNEL_PSR_DIS_LWISH_EN ((e2k_psr_t) { { \ + pm : 1, \ + ie : 0, \ + sge : 1, \ + lw : 1, \ + uie : 0, \ + nmie : 0, \ + unmie : 0, \ +} }) + +#define E2K_KERNEL_PSR_LWISH_DIS ((e2k_psr_t) { { \ + pm : 1, \ + ie : 0, \ + sge : 1, \ + lw : 0, \ + uie : 0, \ + nmie : 0, \ + unmie : 0, \ +} }) + +#ifndef CONFIG_ACCESS_CONTROL +#define E2K_KERNEL_UPSR_ENABLED_ASM 0xa1 +#define E2K_KERNEL_UPSR_DISABLED_ALL_ASM 0x01 +#define E2K_KERNEL_UPSR_DISABLED ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 0, \ + a20 : 0, \ + ie : 0, \ + nmie : 1 \ +} }) +#define E2K_KERNEL_UPSR_ENABLED ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 0, \ + a20 : 0, \ + ie : 1, \ + nmie : 1 \ +} }) +#define E2K_KERNEL_UPSR_DISABLED_ALL ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 0, \ + a20 : 0, \ + ie : 0, \ + nmie : 0 \ +} }) +#else +#define E2K_KERNEL_UPSR_ENABLED_ASM 0xa5 +#define E2K_KERNEL_UPSR_DISABLED_ALL_ASM 0x05 +#define E2K_KERNEL_UPSR_DISABLED ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 1, \ + a20 : 0, \ + ie : 0, \ + nmie : 1 \ +} }) +#define E2K_KERNEL_UPSR_ENABLED ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 1, \ + a20 : 0, \ + ie : 1, \ + nmie : 1 \ +} }) +#define E2K_KERNEL_UPSR_DISABLED_ALL ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 1, \ + a20 : 0, \ + ie : 0, \ + nmie : 0 \ +} }) +#endif /* ! (CONFIG_ACCESS_CONTROL) */ + +#define E2K_KERNEL_INITIAL_UPSR E2K_KERNEL_UPSR_DISABLED +#define E2K_KERNEL_INITIAL_UPSR_WITH_DISABLED_NMI \ + E2K_KERNEL_UPSR_DISABLED_ALL + +#define E2K_USER_INITIAL_UPSR ((e2k_upsr_t) { { \ + fe : 1, \ + se : 0, \ + ac : 0, \ + di : 0, \ + wp : 0, \ + ie : 0, \ + a20 : 0, \ + nmie : 0 \ +} }) + +#define E2K_USER_INITIAL_PSR ((e2k_psr_t) { { \ + pm : 0, \ + ie : 1, \ + sge : 1, \ + lw : 0, \ + uie : 0, \ + nmie : 1, \ + unmie : 0 \ +} }) + +#define PREFIX_INIT_KERNEL_UPSR_REG(PV_TYPE, irq_en, nmirq_dis) \ +do { \ + e2k_upsr_t upsr = E2K_KERNEL_UPSR_DISABLED; \ + if (irq_en) \ + AS(upsr).ie = 1; \ + if (nmirq_dis) \ + AS(upsr).nmie = 0; \ + PV_TYPE##_WRITE_UPSR_REG(upsr); \ +} while (0) +#define BOOT_PREFIX_INIT_KERNEL_UPSR_REG(PV_TYPE, irq_en, nmirq_dis) \ +do { \ + e2k_upsr_t upsr = E2K_KERNEL_UPSR_DISABLED; \ + if (irq_en) \ + AS(upsr).ie = 1; \ + if (nmirq_dis) \ + AS(upsr).nmie = 0; \ + BOOT_##PV_TYPE##_WRITE_UPSR_REG(upsr); \ +} while (0) + +#define NATIVE_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + PREFIX_INIT_KERNEL_UPSR_REG(NATIVE, irq_en, nmirq_dis) + +#define BOOT_NATIVE_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis) \ + BOOT_PREFIX_INIT_KERNEL_UPSR_REG(NATIVE, irq_en, nmirq_dis) + +#define NATIVE_INIT_USER_UPSR_REG() \ + NATIVE_WRITE_UPSR_REG(E2K_USER_INITIAL_UPSR) +#define INIT_USER_UPSR_REG() WRITE_UPSR_REG(E2K_USER_INITIAL_UPSR) + +#define PREFIX_SET_KERNEL_UPSR(PV_TYPE) \ +({ \ + PV_TYPE##_INIT_KERNEL_UPSR_REG(false, false); \ + PV_TYPE##_SWITCH_IRQ_TO_UPSR(); \ +}) + +#define PREFIX_SET_KERNEL_UPSR_WITH_DISABLED_NMI(PV_TYPE) \ +({ \ + PV_TYPE##_INIT_KERNEL_UPSR_REG(false, true); \ + PV_TYPE##_SWITCH_IRQ_TO_UPSR(); \ +}) + +#define BOOT_PREFIX_SET_KERNEL_UPSR(PV_TYPE) \ +({ \ + BOOT_##PV_TYPE##_INIT_KERNEL_UPSR_REG(false, false); \ + BOOT_##PV_TYPE##_SWITCH_IRQ_TO_UPSR(); \ +}) + +#define NATIVE_SET_KERNEL_UPSR_WITH_DISABLED_NMI() \ + PREFIX_SET_KERNEL_UPSR_WITH_DISABLED_NMI(NATIVE) +#define BOOT_NATIVE_SET_KERNEL_UPSR() \ + BOOT_PREFIX_SET_KERNEL_UPSR(NATIVE) + +/* + * UPSR should be saved and set to kernel initial state (where interrupts + * are disabled) independently of trap or interrupt occurred on user + * or kernel process. + * In user process case it is as above. + * In kernel process case: + * Kernel process can be interrupted (so UPSR enable interrupts) + * Hardware trap or system call operation disables interrupts mask + * in PSR and PSR becomes main register to control interrupts. + * Trap handler should switch interrupts control from PSR to UPSR + * previously it should set UPSR to initial state for kernel with disabled + * interrupts (so UPSR disable interrupts) + * If trap handler returns to trap point without UPSR restore, then + * interrupted kernel process will have UPSR with disabled interrupts. + * So UPSR should be saved and restored in any case + * + * Trap can occur on light hypercall, where switch of user data stack + * to kernel stack is not executed, so these traps handle as user traps + * (SBR < TASK_SIZE) Light hypercall already switch control from PSR to UPSR + * so it need save current UPSR state (enable/disable interrupts) + * to restore this state before return to trapped light hypercall. + * Interrupt control by UPSR will be restored from PSR saved into CR1_LO + */ + +#define PREFIX_DO_SWITCH_TO_KERNEL_UPSR(PV_TYPE, pv_type, \ + irq_en, nmirq_dis) \ +({ \ + PV_TYPE##_INIT_KERNEL_UPSR_REG(irq_en, nmirq_dis); \ + PV_TYPE##_SWITCH_IRQ_TO_UPSR(); \ + if (!irq_en) \ + trace_hardirqs_off(); \ +}) +#define PREFIX_SWITCH_TO_KERNEL_UPSR(PV_TYPE, pv_type, \ + upsr_reg, irq_en, nmirq_dis) \ +({ \ + PV_TYPE##_DO_SAVE_UPSR_REG((upsr_reg)); \ + PREFIX_DO_SWITCH_TO_KERNEL_UPSR(PV_TYPE, pv_type, \ + irq_en, nmirq_dis); \ +}) +#define BOOT_PREFIX_SWITCH_TO_KERNEL_UPSR(PV_TYPE, pv_type) \ +({ \ + unsigned long cur_upsr; \ + BOOT_##PV_TYPE##_DO_SAVE_UPSR_REG((cur_upsr)); \ + BOOT_##PV_TYPE##_INIT_KERNEL_UPSR_REG(false, false); \ + BOOT_##PV_TYPE##_SWITCH_IRQ_TO_UPSR(false); \ +}) +/* Native version of macroses (all read/write from/to real registers) */ +#define NATIVE_DO_SWITCH_TO_KERNEL_UPSR(irq_en, nmirq_dis) \ + PREFIX_DO_SWITCH_TO_KERNEL_UPSR(NATIVE, native, \ + irq_en, nmirq_dis) +#define NATIVE_SWITCH_TO_KERNEL_UPSR(upsr_reg, irq_en, nmirq_dis) \ + PREFIX_SWITCH_TO_KERNEL_UPSR(NATIVE, native, \ + upsr_reg, irq_en, nmirq_dis) + +#define NATIVE_RETURN_TO_KERNEL_UPSR(upsr_reg) \ +do { \ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)); \ + NATIVE_DO_RESTORE_UPSR_REG(upsr_reg); \ +} while (false) + +#define PREFIX_RETURN_TO_USER_UPSR(PV_TYPE, pv_type, upsr_reg, under_upsr) \ +do { \ + PV_TYPE##_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DISABLED)); \ + PV_TYPE##_DO_RESTORE_UPSR_REG(upsr_reg); \ +} while (false) +#define NATIVE_RETURN_TO_USER_UPSR(upsr_reg) \ + PREFIX_RETURN_TO_USER_UPSR(NATIVE, native, upsr_reg, true) +#define NATIVE_RETURN_PSR_IRQ_TO_USER_UPSR(upsr_reg, lwish_en) \ +do { \ + if (lwish_en) { \ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_DIS_LWISH_EN)); \ + } else { \ + NATIVE_WRITE_PSR_IRQ_BARRIER(AW(E2K_KERNEL_PSR_LWISH_DIS)); \ + } \ + NATIVE_DO_RESTORE_UPSR_REG(upsr_reg); \ +} while (false) + +#ifdef CONFIG_ACCESS_CONTROL +#define ACCESS_CONTROL_DISABLE_AND_SAVE(upsr_to_save) \ +({ \ + e2k_upsr_t upsr; \ + upsr_to_save = read_UPSR_reg(); \ + upsr = upsr_to_save; \ + AS_STRUCT(upsr).ac = 0; \ + write_UPSR_reg(upsr); \ +}) + +#define ACCESS_CONTROL_RESTORE(upsr_to_restore) \ +({ \ + write_UPSR_reg(upsr_to_restore); \ +}) +#else /* !CONFIG_ACCESS_CONTROL */ +#define ACCESS_CONTROL_DISABLE_AND_SAVE(upsr_to_save) do { } while (0) +#define ACCESS_CONTROL_RESTORE(upsr_to_restore) do { } while (0) +#endif /* CONFIG_ACCESS_CONTROL */ + +extern void * __e2k_read_kernel_return_address(int n); +/* If n == 0 we can read return address directly from cr0.hi */ +#define __e2k_kernel_return_address(n) \ + ({ (n == 0) ? \ + ((void *) (NATIVE_NV_READ_CR0_HI_REG_VALUE() & ~7UL)) \ + : \ + __e2k_read_kernel_return_address(n); }) + +#if CONFIG_CPU_ISET < 5 +typedef void (*clear_rf_t)(void); +extern const clear_rf_t clear_rf_fn[]; + +static __always_inline void clear_rf_kernel_except_current(u64 num_q) +{ + clear_rf_fn[num_q](); +} +#endif /* CONFIG_CPU_ISET < 5 */ + +#define SWITCH_TO_KERNEL_UPSR(upsr_reg, irq_en, nmirq_dis) \ + NATIVE_SWITCH_TO_KERNEL_UPSR(upsr_reg, irq_en, nmirq_dis) +#define RETURN_TO_USER_UPSR(upsr_reg) \ + NATIVE_RETURN_TO_USER_UPSR(upsr_reg) + +#if defined(CONFIG_PARAVIRT_GUEST) +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +#include +#else /* native kernel */ +#define INIT_KERNEL_UPSR_REG NATIVE_INIT_KERNEL_UPSR_REG +#define BOOT_SET_KERNEL_UPSR() BOOT_NATIVE_SET_KERNEL_UPSR() +#define RETURN_TO_KERNEL_UPSR(upsr_reg) \ + NATIVE_RETURN_TO_KERNEL_UPSR(upsr_reg) +#define SET_KERNEL_UPSR_WITH_DISABLED_NMI() \ + NATIVE_SET_KERNEL_UPSR_WITH_DISABLED_NMI() + +static inline void *nested_kernel_return_address(int n) +{ + return __e2k_read_kernel_return_address(n); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_SYSTEM_H_ */ diff --git a/arch/e2k/include/asm/tag_mem.h b/arch/e2k/include/asm/tag_mem.h new file mode 100644 index 0000000..f955039 --- /dev/null +++ b/arch/e2k/include/asm/tag_mem.h @@ -0,0 +1,388 @@ +/* $Id: tag_mem.h,v 1.6 2008/09/18 14:57:23 atic Exp $ + * + * Heading of tag's memory management. + * + * Copyright (C) 2003 Salavat Guiliazov + */ + +#ifndef _E2K_TAG_MEM_H +#define _E2K_TAG_MEM_H + +#include +#include +#include + +#include +#include +#include +#include +#include + +#undef DEBUG_TAG_MODE +#undef DebugTM +#define DEBUG_TAG_MODE 0 /* Tag memory */ +#define DebugTM(...) DebugPrint(DEBUG_TAG_MODE ,##__VA_ARGS__) + +#ifndef __ASSEMBLY__ + + +/* + * Definition to convert data virtual address to appropriate virtual addresses + * of tags and on the contrary: + * + * to each quad-word of data (16 bytes) corresponds 1 byte of tag + * + * 0x0000 00f0 0000 0000 - 0x0000 00ff ffff ffff USER_TAG_MEM_BASE - + * USER_TAG_MEM_SIZE + * + * Data page 0x0000 0000 0000 0000 <-> Tag page 0x0000 00f0 0000 0000 + * 0x0000 0000 0000 1000 <-> 0x0000 00f0 0000 0100 + * ..................... ..................... + * 0x0000 0000 0001 0000 <-> 0x0000 00f0 0000 1000 + * ..................... ..................... + * 0x0000 00e0 0000 0000 <-> 0x0000 00fe 0000 0000 + * ..................... ..................... + * 0x0000 00ef ffff f000 <-> 0x0000 00fe ffff ff00 + */ + +#define DATA_PAGES_PER_TAGS_PAGE(block_size) 16 +#define ONE_PAGE_TAGS_AREA_SIZE (PAGE_SIZE / DATA_PAGES_PER_TAGS_PAGE(0)) +#define virt_to_tag(data_addr) (USER_TAG_MEM_BASE + ((data_addr) >> 4)) +#define tag_to_virt(tag_addr) (((tag_addr) - USER_TAG_MEM_BASE) << 4) + +/* + * Is the specified address from tags virtual space + */ +#define is_tags_area_addr(addr) ((addr) >= USER_TAG_MEM_BASE && \ + (addr) < (USER_TAG_MEM_BASE+USER_TAG_MEM_SIZE)) + +/* + * vma->vm_flags for tags area + */ + +#define TAG_VM_FLAGS (VM_READ | VM_WRITE) + +/* + * Structure of tags into the memory + * One byte contains tag of one quad-word data, which consists of + * two double-word (low and high) and/or + * four single-word (0, 1, 2 and 3) + */ + +typedef struct mem_tag_s { + u8 w0 : 2; /* [1:0] tag of single word # 0 */ + u8 w1 : 2; /* [3:2] tag of single word # 1 */ + u8 w2 : 2; /* [5:4] tag of single word # 2 */ + u8 w3 : 2; /* [7:6] tag of single word # 3 */ +} mem_tag_s_t; + +typedef struct mem_tag_d { + u8 lo : 4; /* [3:0] tag of low double word */ + u8 hi : 4; /* [7:4] tag of high double word */ +} mem_tag_d_t; + +typedef struct mem_tag_q { + u8 qw : 8; /* [7:0] tag of quad-word */ +} mem_tag_q_t; + +typedef union mem_tag { + mem_tag_s_t sing; /* as fields of single word tags */ + mem_tag_d_t doub; /* as fields of double word tags */ + mem_tag_q_t quad; /* as fields of double word tags */ + u8 tags; /* as entire value */ +} mem_tag_t; +#define dw_lo_mem_tag doub.lo +#define dw_hi_mem_tag doub.hi +#define qw_mem_tag quad.qw + +extern void __init swap_info_cache_init(void); + +extern int add_swap_info_to_page(struct mm_struct* mm, struct page* page, e2k_addr_t addr); + +extern int add_swap_info_to_page_next(struct mm_struct* mm, struct page* page, e2k_addr_t addr); + +extern swap_page_info_t* get_swap_info_from_page(struct page* page); + +extern void free_swap_info_struct(swap_page_info_t* info); + +#ifdef CONFIG_SOFTWARE_SWAP_TAGS +extern inline void +remove_swap_info_from_page(struct page* page) { + swap_page_info_t *info; + do { + info = get_swap_info_from_page(page); + free_swap_info_struct(info); + } while (PageWithSwapInfo(page)); +} +#endif /* CONFIG_SOFTWARE_SWAP_TAGS */ + +/* + * Forwards of tags memory management functions + */ + +extern int load_page_tags(struct mm_struct* mm, e2k_addr_t page_addr); + +extern struct vm_area_struct *create_tags_vma(struct mm_struct *mm, + e2k_addr_t data_addr); +extern e2k_addr_t get_tags_address(struct mm_struct *mm, e2k_addr_t data_addr, + int write); +extern struct page *get_tags_page(struct mm_struct *mm, e2k_addr_t data_addr, + int write_access); + +extern int do_tag_munmap(struct mm_struct *mm, e2k_addr_t data_addr, + e2k_size_t data_len); + +/* + * Save tags of data memory area started from address 'data_addr' + * The starting address of data area and area size should be quad-word * 8 + * aligned: + * 1 quad-word (16 bytes) occupies 1 byte of tags + * 8 tags packed into 1 double-word + * Argument 'len' specifies saved area size in bytes, so it should be + * quad-word * 8 multiple + */ +extern inline int +do_save_mem_area_tags(e2k_addr_t data_addr, e2k_addr_t tags_addr, e2k_size_t len, + int copy_data, e2k_addr_t copy_addr) +{ + int tags_in_dw = sizeof (u64) / sizeof (mem_tag_t); + int dw_data_in_dw_tags = tags_in_dw * (sizeof (u64) * 2); + u64 *data_area = (u64 *)(_PAGE_ALIGN_DOWN(data_addr, + dw_data_in_dw_tags)); + u64 *tags_area = (u64 *)(_PAGE_ALIGN_DOWN(tags_addr, tags_in_dw)); + u64 *copy_area = (u64 *)(_PAGE_ALIGN_DOWN(copy_addr, + dw_data_in_dw_tags)); + register u64 q0_dw_lo; + register u64 q0_dw_hi; + register u64 q1_dw_lo; + register u64 q1_dw_hi; + register mem_tag_t q0_tag = {tags : 0}; + register mem_tag_t q1_tag = {tags : 0}; + int all_tags_is_numeric = 1; + u64 tags_dw = 0; + int tag_num, dw_num; + int tag_shift; + int i = 0; + + DebugTM("started for data addr 0x%lx tag addr 0x%lx and size 0x%lx bytes\n", + data_addr, tags_addr, len); + len -= (((e2k_addr_t)data_area) - data_addr); + len = _PAGE_ALIGN_UP(len, tags_in_dw); + + for (tag_num = 0; tag_num < len / dw_data_in_dw_tags; tag_num ++) { + tags_dw = 0; + tag_shift = 0; + for (dw_num = 0; dw_num < dw_data_in_dw_tags / sizeof (u64); + dw_num += 4) { + E2K_LOAD_TAGGED_QWORD_AND_TAGS(&data_area[0], q0_dw_lo, + q0_dw_hi, q0_tag.dw_lo_mem_tag, + q0_tag.dw_hi_mem_tag); + E2K_LOAD_TAGGED_QWORD_AND_TAGS(&data_area[2], q1_dw_lo, + q1_dw_hi, q1_tag.dw_lo_mem_tag, + q1_tag.dw_hi_mem_tag); + data_area += 4; + tags_dw |= (((u64)(q0_tag.qw_mem_tag)) << tag_shift); + tag_shift += (sizeof (mem_tag_t) * 8); + tags_dw |= (((u64)(q1_tag.qw_mem_tag)) << tag_shift); + tag_shift += (sizeof (mem_tag_t) * 8); + if (DEBUG_TAG_MODE && i < 16) { + DebugTM(" data[0x%03x] = 0x%016lx\n", + (u32) (tag_num * dw_data_in_dw_tags + + sizeof (*data_area) * (dw_num + 0)), + q0_dw_lo); + DebugTM(" data[0x%03x] = 0x%016lx\n", + (u32) (tag_num * dw_data_in_dw_tags + + sizeof (*data_area) * (dw_num + 1)), + q0_dw_hi); + DebugTM(" data[0x%03x] = 0x%016lx\n", + (u32) (tag_num * dw_data_in_dw_tags + + sizeof (*data_area) * (dw_num + 2)), + q1_dw_lo); + DebugTM(" data[0x%03x] = 0x%016lx\n", + (u32) (tag_num * dw_data_in_dw_tags + + sizeof (*data_area) * (dw_num + 3)), + q1_dw_hi); + } + if (copy_data) { + E2K_STORE_VALUE_WITH_TAG(©_area[0], + q0_dw_lo, ETAGNVD); + E2K_STORE_VALUE_WITH_TAG(©_area[1], + q0_dw_hi, ETAGNVD); + E2K_STORE_VALUE_WITH_TAG(©_area[2], + q1_dw_lo, ETAGNVD); + E2K_STORE_VALUE_WITH_TAG(©_area[3], + q1_dw_hi, ETAGNVD); + + copy_area += 4; + } + } + tags_area[0] = tags_dw; + if (tags_dw != ETAGNVQ) + all_tags_is_numeric = 0; + if (DEBUG_TAG_MODE && i < 16) { + DebugTM(" tags[0x%03x] = 0x%016lx\n", + (u32)(sizeof (*tags_area) * tag_num), tags_dw); + i += dw_num; + } + tags_area ++; + } + DebugTM("finished with data addr 0x%px tag addr 0x%px\n", + data_area, tags_area); + return all_tags_is_numeric; +} + +extern inline int +save_mem_area_tags(e2k_addr_t data_addr, e2k_addr_t tags_addr, + e2k_size_t len) +{ + return do_save_mem_area_tags(data_addr, tags_addr, len, 0, 0); +} + +extern inline int +save_mem_page_tags(e2k_addr_t data_addr, e2k_addr_t tags_addr) +{ + return save_mem_area_tags(data_addr, tags_addr, PAGE_SIZE); +} + +extern inline int +save_mem_data_page_tags(struct mm_struct *mm, e2k_addr_t data_addr) +{ + e2k_addr_t tags_addr; + + tags_addr = get_tags_address(mm, data_addr, 1); + if (tags_addr == (e2k_addr_t)0) + return -1; + save_mem_page_tags(data_addr, tags_addr); + return 0; +} + +/* + * Restore tags of data memory area started from address 'data_addr' + * The starting address of data area and area size should be quad-word * 8 + * aligned: + * 1 quad-word (16 bytes) occupies 1 byte of tags + * 8 tags packed into 1 double-word + * Argument 'len' specifies restored area size in bytes, so it should be + * quad-word * 8 multiple + */ +extern inline void +do_restore_mem_area_tags(e2k_addr_t data_addr, e2k_addr_t tags_addr, + e2k_size_t len, int copy_data, e2k_addr_t copy_addr) +{ + int tags_in_dw = sizeof (u64) / sizeof (mem_tag_t); + int dw_data_in_dw_tags = tags_in_dw * (sizeof (u64) * 2); + u64 *data_area = (u64 *)(_PAGE_ALIGN_DOWN(data_addr, + dw_data_in_dw_tags)); + u64 *tags_area = (u64 *)(_PAGE_ALIGN_DOWN(tags_addr, tags_in_dw)); + u64 *copy_area = (u64 *)(_PAGE_ALIGN_DOWN(copy_addr, + dw_data_in_dw_tags)); + register u64 q_dw_lo; + register u64 q_dw_hi; + register mem_tag_t q_tag = {tags : 0}; + register u64 tags_dw = 0; + int tag_num, dw_num; + int i = 0; + + DebugTM("started for data addr 0x%lx tag " + "addr 0x%lx and size 0x%lx bytes\n", + data_addr, tags_addr, len); + len -= (((e2k_addr_t)data_area) - data_addr); + len = _PAGE_ALIGN_UP(len, tags_in_dw); + + for (tag_num = 0; tag_num < len / dw_data_in_dw_tags; tag_num ++) { + tags_dw = tags_area[0]; + if (DEBUG_TAG_MODE && i < 16) { + DebugTM(" tags[0x%03x] = 0x%016lx\n", + (u32)(sizeof (*tags_area) * tag_num), tags_dw); + } + for (dw_num = 0; dw_num < dw_data_in_dw_tags / sizeof (u64); + dw_num += 2) { + E2K_LOAD_TAGGED_QWORD(&data_area[0], q_dw_lo, q_dw_hi); + q_tag.qw_mem_tag = tags_dw; + tags_dw >>= (sizeof (mem_tag_t) * 8); + if (!copy_data) { + /* After E2K_PUTTAGD must STRONGLY follow STORE_TAG asm + * to avoid compiler's problems */ + E2K_STORE_TAGGED_QWORD(&data_area[0], + q_dw_lo, q_dw_hi, + q_tag.dw_lo_mem_tag, q_tag.dw_hi_mem_tag); + + } else { + E2K_STORE_TAGGED_QWORD(©_area[0], + q_dw_lo, q_dw_hi, + q_tag.dw_lo_mem_tag, q_tag.dw_hi_mem_tag); + copy_area += 2; + } + data_area += 2; + if (DEBUG_TAG_MODE && i < 16) { + DebugTM(" data[0x%03x] = 0x%016lx\n", + (u32) (tag_num * dw_data_in_dw_tags + + sizeof (*data_area) * (dw_num + 0)), + q_dw_lo); + DebugTM(" data[0x%03x] = 0x%016lx\n", + (u32) (tag_num * dw_data_in_dw_tags + + sizeof (*data_area) * (dw_num + 1)), + q_dw_hi); + i += 2; + } + } + tags_area ++; + } + DebugTM("finished with data addr 0x%px tag " + "addr 0x%px\n", data_area, tags_area); +} +extern inline void +restore_mem_area_tags(e2k_addr_t data_addr, e2k_addr_t tags_addr, + e2k_size_t len) +{ + do_restore_mem_area_tags(data_addr, tags_addr, len, 0, 0); +} + +extern inline void +restore_mem_page_tags(e2k_addr_t data_addr, e2k_addr_t tags_addr) +{ + restore_mem_area_tags(data_addr, tags_addr, PAGE_SIZE); +} + +extern inline int +restore_mem_data_page_tags(struct mm_struct *mm, e2k_addr_t data_addr) +{ + e2k_addr_t tags_addr; + + data_addr &= PAGE_MASK; + tags_addr = get_tags_address(mm, data_addr, 0); + if (tags_addr == (e2k_addr_t)0) + return -1; + restore_mem_page_tags(data_addr, tags_addr); + return 0; +} + +extern int save_swapped_page_tags(struct mm_struct *mm, + struct page *swapped_page, e2k_addr_t data_addr); + +extern int restore_swapped_page_tags(struct mm_struct *mm, + struct page *swapped_page, e2k_addr_t data_addr); + +extern inline int +save_swapped_page_tags2(swap_page_info_t* info, struct page* page) { + return save_swapped_page_tags(info->mm, page, info->addr); +} + +#ifdef CONFIG_SOFTWARE_SWAP_TAGS +extern inline int +save_swapped_page_tags_from_page(struct page* page) { + int ret = 0; + swap_page_info_t* info; + do { + info = get_swap_info_from_page(page); + if (!is_tags_area_addr(info->addr)) + ret = save_swapped_page_tags2(info, page); +// free_swap_info_struct(info); + } while (PageWithSwapInfo(page)); + return ret; +} +#endif /* CONFIG_SOFTWARE_SWAP_TAGS */ + +#endif /* !(__ASSEMBLY__) */ + +#endif /* !(_E2K_TAG_MEM_H) */ diff --git a/arch/e2k/include/asm/tags.h b/arch/e2k/include/asm/tags.h new file mode 100644 index 0000000..960cca9 --- /dev/null +++ b/arch/e2k/include/asm/tags.h @@ -0,0 +1,60 @@ +/* + * + * Definitions of most popular hardware tags. + * + */ + +#ifndef _E2K_TAGS_H_ +#define _E2K_TAGS_H_ + +#ifndef __ASSEMBLY__ + +/* Internal tags list. Old style */ +#define E2K_AP_ITAG 0x0 +#define E2K_PL_ITAG 0x1 +#define E2K_PL_V2_ITAG 0x0 +#define E2K_SAP_ITAG 0x4 + +#define ITAG_MASK ULL(0x4000000040000000) +#define ITAGDWD_DEBUG (ITAG_MASK | ULL(0xdead0000dead)) +#define ITAGDWD_IO_DEBUG (ITAG_MASK | ULL(0x10dead0010dead)) + +/* + * Definition of basic external tags. Old style. + */ +#define E2K_NUMERIC_ETAG 0x0 +#define E2K_NULLPTR_ETAG 0x0 +#define E2K_PL_ETAG 0xA +#define E2K_PLLO_ETAG 0xF +#define E2K_PLHI_ETAG 0xF +#define E2K_AP_HI_ETAG 0xC +#define E2K_AP_LO_ETAG 0xF +#define E2K_SAP_HI_ETAG 0xC +#define E2K_SAP_LO_ETAG 0xF + +/* External tags. New style */ + +#define ETAGNUM 0x00 /* Num. value. generic */ + +#define ETAGNVS 0x00 /* Num. value single */ +#define ETAGEWS 0x01 /* Empty value. single */ +#define ETAGDWS 0x01 /* Diagnostic value. single */ + +#define ETAGNVD 0x00 /* Num. value double */ +#define ETAGNPD 0x00 /* Null pointer */ +#define ETAGEWD 0x05 /* Empty value. double */ +#define ETAGDWD 0x05 /* Diagnostic value. double */ +#define ETAGPLD 0x0A /* Procedure label (v1-v5) */ + +#define ETAGNVQ 0x00 /* Num. value quadro */ +#define ETAGNPQ 0x00 /* Null pointer */ +#define ETAGDWQ 0x55 /* Empty quadro */ +#define ETAGAPQ 0xCF /* Array pointer */ +#define ETAGSAP 0xCF /* Stack array pointer */ +#define ETAGPLQ 0xFF /* Procedure label (v6-...) */ + +#define ETAGBADQ 0xee /* Invalid tag for quadro object */ + +#endif /* !(__ASSEMBLY__) */ + +#endif /* !(_E2K_TAGS_H_) */ diff --git a/arch/e2k/include/asm/termbits.h b/arch/e2k/include/asm/termbits.h new file mode 100644 index 0000000..8484205 --- /dev/null +++ b/arch/e2k/include/asm/termbits.h @@ -0,0 +1,6 @@ +#ifndef _E2K_TERMBITS_H_ +#define _E2K_TERMBITS_H_ + +#include + +#endif /* _E2K_TERMBITS_H_ */ diff --git a/arch/e2k/include/asm/termios.h b/arch/e2k/include/asm/termios.h new file mode 100644 index 0000000..8b3d2b0 --- /dev/null +++ b/arch/e2k/include/asm/termios.h @@ -0,0 +1,6 @@ +#ifndef _E2K_TERMIOS_H_ +#define _E2K_TERMIOS_H_ + +#include + +#endif /* _E2K_TERMIOS_H_ */ diff --git a/arch/e2k/include/asm/thread_info.h b/arch/e2k/include/asm/thread_info.h new file mode 100644 index 0000000..3a734fc --- /dev/null +++ b/arch/e2k/include/asm/thread_info.h @@ -0,0 +1,436 @@ +/* + * $Id: thread_info.h,v 1.29 2009/08/19 07:47:20 panteleev_p Exp $ + * thread_info.h: E2K low-level thread information + * + */ +#ifndef _E2K_THREAD_INFO_H +#define _E2K_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT +#include +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ +#ifdef CONFIG_MONITORS +#include +#endif /* CONFIG_MONITORS */ +#endif /* __ASSEMBLY__ */ + + +#ifndef __ASSEMBLY__ + +typedef struct { + unsigned long seg; +} mm_segment_t; + +struct signal_stack { + unsigned long base; + unsigned long size; + unsigned long used; +}; + +#ifdef CONFIG_VIRTUALIZATION +struct gthread_info; +#endif /* CONFIG_VIRTUALIZATION */ + +typedef struct thread_info { + unsigned long flags; /* low level flags */ + + unsigned long status; /* thread synchronous flags */ + long long irq_enter_clk; /* CPU clock when irq enter */ + /* occured */ + int preempt_count; /* 0 => preemptable, <0 */ + /* => BUG */ + mm_segment_t addr_limit; /* thread address space */ + struct pt_regs *pt_regs; /* head of pt_regs */ + /* structure queue: */ + /* pointer to current */ + /* pt_regs */ + e2k_usd_hi_t k_usd_hi; /* Kernel current data */ + /* stack size */ + e2k_usd_lo_t k_usd_lo; /* Kernel current data */ + /* stack base */ + + /* Kernel's hardware stacks */ + e2k_psp_lo_t k_psp_lo; + e2k_psp_hi_t k_psp_hi; + e2k_pcsp_lo_t k_pcsp_lo; + e2k_pcsp_hi_t k_pcsp_hi; + + /* Because we don't have pt_regs ready upon kernel entry we + * temporarily save stack registers here, then copy to pt_regs */ + struct hw_stacks tmp_user_stacks; + /* Because we have to use the same kernel entry for both user + * and kernel interrupts, we have to save user's global registers + * to some temporary area, only after we copy them to pt_regs if + * this was user interrupt. */ + struct kernel_gregs tmp_k_gregs; + + struct kernel_gregs k_gregs; + + struct kernel_gregs k_gregs_light; + + struct restart_block restart_block; + e2k_upsr_t upsr; /* kernel upsr */ + + data_stack_t u_stack; /* User data stack info */ + hw_stack_t u_hw_stack; /* User hardware stacks info */ + + /* These fields are needed only for uhws_mode = UHWS_MODE_PSEUDO */ + struct list_head old_u_pcs_list; /* chain stack old areas list */ + + struct list_head getsp_adj; + + long usr_pfault_jump; /* where to jump if */ + /* copy_*_user has bad addr */ + e2k_cutd_t u_cutd; /* Compilation Unit Table */ + /* base (register) */ +#ifdef CONFIG_KERNEL_TIMES_ACCOUNT + int times_index; + long times_num; + kernel_times_t times[MAX_KERNEL_TIMES_NUM]; + scall_times_t *fork_scall_times; +#endif /* CONFIG_KERNEL_TIMES_ACCOUNT */ +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT + u64 ss_rmp_bottom; /* lower mremap addr for + * secondary space area */ + u64 rp_start; /* recovery point range start */ + u64 rp_end; /* recovery point range end */ + u64 rp_ret_ip; /* recovery point return IP */ + int last_ic_flush_cpu; /* last cpu, where IC was */ + /* flushed on migration */ +#endif /* CONFIG_SECONDARY_SPACE_SUPPORT */ + +#ifdef CONFIG_PROTECTED_MODE + global_store_t *g_list; + e2k_addr_t multithread_address; /* It needs to interpretate globals + * pointed to stack */ + struct rw_semaphore *lock; /* can't include linux/rt_lock.h*/ + long __user *pm_robust_list; +#endif /* CONFIG_PROTECTED_MODE */ +#ifdef CONFIG_MONITORS + monitor_registers_delta_t monitors_delta; + atomic64_t monitors_count[NR_CPUS][MONITORS_COUNT]; +#endif /* CONFIG_MONITORS */ + + /* follow fields to save guest kernel hardware stacks info to free */ + /* them after sys_execve() and switch to new process */ + void *old_ps_base; /* old kernel procedure stack base */ + e2k_size_t old_ps_size; /* and size */ + void *old_pcs_base; /* old kernel chain stack base */ + e2k_size_t old_pcs_size; /* and size */ + + /* Support {make/get/set}context: current context goes here. + * On thread creation this is NULL and will be allocated as + * needed. */ + struct hw_context *this_hw_context; + + struct { + unsigned long entry; + unsigned long sp; + } execve; + + /* registers for ptrace */ + unsigned long long dam[DAM_ENTRIES_NUM]; + e2k_aalda_t aalda[AALDAS_REGS_NUM]; + + bool last_wish; + struct ksignal ksig; + + /* signal stack area is used to store interrupted context */ + struct signal_stack signal_stack; + +#ifdef CONFIG_VIRTUALIZATION + pgd_t *kernel_image_pgd_p; /* pointer to host kernel image pgd */ + pgd_t kernel_image_pgd; /* host kernel image pgd value */ + pgd_t shadow_image_pgd; /* paravirtualized guest image shadow */ + /* pgd value */ + pgd_t *vcpu_pgd; /* root PGD for the VCPU */ + /* (can be NULL if need not) */ + pgd_t *host_pgd; /* root PGD for the host thread */ + /* (VCPU host thread mm->pgd) */ + void *virt_machine; /* pointer to main structure of */ + /* virtual machine for */ + /* paravirtualized guest */ + struct kvm_vcpu *vcpu; /* KVM VCPU state for host */ + unsigned long vcpu_state_base; /* base of VCPU state fo guest */ + int (*paravirt_page_prefault) /* paravirtualized guest page */ + /* prefault handler */ + (pt_regs_t *regs, trap_cellar_t *tcellar); + struct gthread_info *gthread_info; /* only on host: current */ + /* guest kernel thread info */ + int gpid_nr; /* only on guest: the guest */ + /* kernel thread ID number */ + /* on host kernel */ + int gmmid_nr; /* only on guest: the guest */ + /* thread mm ID number on */ + /* the host */ + struct list_head tasks_to_spin; /* only on host: list of tasks */ + /* to support spin lock/unlock */ + struct gthread_info *gti_to_spin; /* guest thread waitin for the */ + /* spin lock/unlock */ + int should_stop; /* on host: guest kernel thread */ + /* should be stopped */ + /* structure to save state of user global registers, which are */ + /* used to support virtualization and PV OPs by kernel */ + host_gregs_t h_gregs; /* state of user global registers */ + /* used by host to support guest */ + /* kernel */ +#endif /* CONFIG_VIRTUALIZATION */ +} __aligned(SMP_CACHE_BYTES) thread_info_t; + +#endif /* !__ASSEMBLY__ */ + +/* + * Thread information flags: + * + * TIF_SYSCALL_TRACE is known to be 0 via blbs. + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_POLLING_NRFLAG 4 /* poll_idle is polling NEED_RESCHED */ +#define TIF_32BIT 5 /* 32-bit binary */ +#define TIF_MEMDIE 6 +#define TIF_KERNEL_TRACE 7 /* kernel trace active */ +#define TIF_NOHZ 8 +#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ +#define TIF_SECCOMP 10 /* secure computing */ +#define TIF_USD_NOT_EXPANDED 14 /* local data stack cannot be */ + /* expanded (fixed size) */ + /* not used yet */ +#define TIF_BAD_USD_SIZE 15 /* checker detected kernel USD size */ + /* is wrong */ +#define TIF_USR_CONTROL_INTERRUPTS 16 /* user can control interrupts */ +#define TIF_WILL_RESCHED 17 /* task will be rescheduled soon */ +/* Following flags only for virtualization support */ +#define TIF_VM_CREATED 19 /* task is running as virtual kernel */ + /* and created virtual machine */ +#define TIF_MULTITHREADING 20 /* task is running as multithreading */ + /* for example host/guest kernel main */ + /* threads */ +#define TIF_VIRTUALIZED_HOST 21 /* thread is host part of VCPU to run */ + /* virtualized kernel */ +#define TIF_VIRTUALIZED_GUEST 22 /* thread is guest part of VCPU */ + /* to run virtualized kernel */ +#define TIF_PARAVIRT_GUEST 23 /* user is paravitualized guest */ + /* kernel */ +#define TIF_PSEUDOTHREAD 24 /* the thread is pseudo only to run */ + /* on VIRQ VCPU as starter of VIRQ */ + /* handler */ +#define TIF_HOST_AT_VCPU_MODE 25 /* the host thread is switching to */ + /* VCPU running mode and wait for */ + /* interception (trap on PV mode) */ +#define TIF_VIRQS_ACTIVE 26 /* the thread is ready to inject */ + /* VIRQS interrupt */ +#define TIF_VIRQ_HANDLER 27 /* the thread is VIRQ handler and */ + /* should run with max priorety */ +#define TIF_LIGHT_HYPERCALL 28 /* hypervisor is executing light */ + /* hypercall */ +#define TIF_GENERIC_HYPERCALL 29 /* hypervisor is executing generic */ + /* hypercall */ +/* End of flags only for virtualization support */ +#define TIF_SYSCALL_TRACEPOINT 30 /* syscall tracepoint instrumentation */ + +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_32BIT (1 << TIF_32BIT) +#define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE) +#define _TIF_NOHZ (1 << TIF_NOHZ) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_USD_NOT_EXPANDED (1 << TIF_USD_NOT_EXPANDED) +#define _TIF_BAD_USD_SIZE (1 << TIF_BAD_USD_SIZE) +#define _TIF_USR_CONTROL_INTERRUPTS (1 << TIF_USR_CONTROL_INTERRUPTS) +#define _TIF_WILL_RESCHED (1 << TIF_WILL_RESCHED) +#define _TIF_VM_CREATED (1 << TIF_VM_CREATED) +#define _TIF_MULTITHREADING (1 << TIF_MULTITHREADING) +#define _TIF_VIRTUALIZED_HOST (1 << TIF_VIRTUALIZED_HOST) +#define _TIF_VIRTUALIZED_GUEST (1 << TIF_VIRTUALIZED_GUEST) +#define _TIF_PARAVIRT_GUEST (1 << TIF_PARAVIRT_GUEST) +#define _TIF_PSEUDOTHREAD (1 << TIF_PSEUDOTHREAD) +#define _TIF_HOST_AT_VCPU_MODE (1 << TIF_HOST_AT_VCPU_MODE) +#define _TIF_VIRQS_ACTIVE (1 << TIF_VIRQS_ACTIVE) +#define _TIF_VIRQ_HANDLER (1 << TIF_VIRQ_HANDLER) +#define _TIF_LIGHT_HYPERCALL (1 << TIF_LIGHT_HYPERCALL) +#define _TIF_GENERIC_HYPERCALL (1 << TIF_GENERIC_HYPERCALL) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) + +#define _TIF_WORK_SYSCALL_TRACE (_TIF_SYSCALL_TRACE | \ + _TIF_KERNEL_TRACE | \ + _TIF_SYSCALL_TRACEPOINT | \ + _TIF_SYSCALL_AUDIT | \ + _TIF_SECCOMP | \ + _TIF_NOHZ) + +/* Work to do on return to userspace. */ +#define _TIF_WORK_MASK (_TIF_NOTIFY_RESUME | \ + _TIF_SIGPENDING | \ + _TIF_NEED_RESCHED) + +/* Work to do on return to userspace with exception of signals. + * This is used when it is not enough to check _TIF_SIGPENDING. */ +#define _TIF_WORK_MASK_NOSIG (_TIF_NOTIFY_RESUME | \ + _TIF_NEED_RESCHED) + +/* + * Thread-synchronous status. + * + * This is different from the flags in that nobody else + * ever touches our thread-synchronous status, so we don't + * have to worry about atomic accesses. + */ +#define TS_DELAYED_SIG_HANDLING 0x00000001 +#define TS_KEEP_PAGES_VALID 0x00000004 +#define TS_MMAP_PRIVILEGED 0x00000010 +#define TS_MMAP_DONTEXPAND 0x00000020 +#define TS_MMAP_DONTCOPY 0x00000040 +#define TS_KERNEL_SYSCALL 0x00000100 +#define TS_USER_EXECVE 0x00001000 +#define TS_MMAP_PS 0x00010000 +#define TS_MMAP_PCS 0x00020000 +#define TS_MMAP_NOHUGEPAGE 0x00040000 +#define TS_MMAP_SIGNAL_STACK 0x00080000 +#define TS_SINGLESTEP_KERNEL 0x00100000 +#define TS_SINGLESTEP_USER 0x00200000 + +#define THREAD_SIZE KERNEL_STACKS_SIZE + +#ifndef __ASSEMBLY__ + +/* + * flag set/clear/test wrappers + * - pass TS_xxxx constants to these functions + */ + +static inline unsigned long set_ti_status_flag(struct thread_info *ti, + unsigned long flag) +{ + unsigned long old_flags; + + old_flags = ti->status; + ti->status = old_flags | flag; + + return ~old_flags & flag; +} + +static inline void clear_ti_status_flag(struct thread_info *ti, + unsigned long flag) +{ + ti->status &= ~flag; +} + +static inline unsigned long test_ti_status_flag(struct thread_info *ti, + unsigned long flag) +{ + return ti->status & flag; +} + +#define set_ts_flag(flag) \ + set_ti_status_flag(current_thread_info(), flag) +#define clear_ts_flag(flag) \ + clear_ti_status_flag(current_thread_info(), flag) +#define test_ts_flag(flag) \ + test_ti_status_flag(current_thread_info(), flag) + +#define native_current_thread_info() current_thread_info() +#define boot_current_thread_info() BOOT_READ_CURRENT_REG() + +/* + * Registers (%osr0 & %gdN) usually hold pointer to current thread info + * structure. But these registers used to hold CPU # while boot-time + * initialization process + */ +#define boot_set_current_thread_info(cpu_id) \ +({ \ + BOOT_WRITE_CURRENT_REG_VALUE(cpu_id); \ + E2K_SET_DGREG_NV(CURRENT_TASK_GREG, NULL); \ +}) + + /* support multithreading for protected mode */ +#define NUM_THREAD(x) ((x)->orig_psr_lw) /* number of threads (type = TYPE_INIT) */ +#define WAS_MULTITHREADING (current_thread_info()->g_list \ + && NUM_THREAD(current_thread_info()->g_list) >= 1) + +#ifdef CONFIG_PROTECTED_MODE +static inline void clear_g_list(struct thread_info *thread_info) +{ + /* These are initialized from interrupt handler when a thread + * writes SAP to a global variable or when creating a new thread + * (for details see comment in arch/e2k/3p/global_sp.c) */ + thread_info->g_list = NULL; + thread_info->multithread_address = 0; + thread_info->lock = NULL; +} +#else /* CONFIG_PROTECTED_MODE */ +void clear_g_list(struct thread_info *thread_info) { } +#endif + +#define thread_info_task(ti) \ + container_of(ti, struct task_struct, thread_info) + +#define INIT_OLD_U_HW_STACKS \ + .old_u_pcs_list = LIST_HEAD_INIT(init_task.thread_info.old_u_pcs_list), + +#ifdef CONFIG_SECONDARY_SPACE_SUPPORT +# define INIT_LAST_IC_FLUSH_CPU .last_ic_flush_cpu = -1, +#else +# define INIT_LAST_IC_FLUSH_CPU +#endif + +/* + * Macros/functions for gaining access to the thread information structure. + * + * preempt_count needs to be 1 initially, until the scheduler is functional. + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ + .k_usd_lo = (e2k_usd_lo_t) { \ + .word = (unsigned long) init_stack + \ + KERNEL_C_STACK_OFFSET + KERNEL_C_STACK_SIZE, \ + }, \ + .k_usd_hi = (e2k_usd_hi_t) { \ + .fields.size = KERNEL_C_STACK_SIZE \ + }, \ + INIT_OLD_U_HW_STACKS \ + INIT_LAST_IC_FLUSH_CPU \ +} + +extern void arch_task_cache_init(void); + +/* Hardware stacks must be aligned on page boundary */ +#define THREAD_ALIGN PAGE_SIZE + +#ifndef ASM_OFFSETS_C +extern void clear_thread_info(struct task_struct *task); +#endif /* ASM_OFFSETS_C */ + +/* + * Thread information allocation. + */ + +extern unsigned long *alloc_thread_stack_node(struct task_struct *, int); +extern void free_thread_stack(struct task_struct *tsk); +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _E2K_THREAD_INFO_H */ diff --git a/arch/e2k/include/asm/time.h b/arch/e2k/include/asm/time.h new file mode 100644 index 0000000..8ba575d --- /dev/null +++ b/arch/e2k/include/asm/time.h @@ -0,0 +1,42 @@ +/* + * based on include/asm-i386/mach-default/mach_time.h + * + * Machine specific set RTC function for generic. + * Split out from time.c by Osamu Tomita + */ +#ifndef _E2K_TIME_H +#define _E2K_TIME_H + +#include +#include + +#define mach_set_wallclock(nowtime) (machine.set_wallclock(nowtime)) +#define mach_get_wallclock() (machine.get_wallclock()) + +extern void native_clock_init(void); + +#ifdef CONFIG_PARAVIRT +/* It need only to account stolen time by guest */ + +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern unsigned long native_steal_clock(int cpu); +#endif /* CONFIG_PARAVIRT */ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized guest and host kernel */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* native kernel with or without virtualization support */ +static inline void arch_clock_init(void) +{ + native_clock_init(); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +extern void arch_clock_setup(void); + +#endif /* !_E2K_TIME_H */ diff --git a/arch/e2k/include/asm/timer.h b/arch/e2k/include/asm/timer.h new file mode 100644 index 0000000..7e5cfd6 --- /dev/null +++ b/arch/e2k/include/asm/timer.h @@ -0,0 +1,12 @@ +#ifndef _ASM_TIMER_H +#define _ASM_TIMER_H + +#include +#include + +#define TICK_SIZE (tick_nsec / 1000) + +extern bool disable_apic_timer; +extern bool disable_epic_timer; + +#endif diff --git a/arch/e2k/include/asm/timex.h b/arch/e2k/include/asm/timex.h new file mode 100644 index 0000000..24fa302 --- /dev/null +++ b/arch/e2k/include/asm/timex.h @@ -0,0 +1,80 @@ +/* + * linux/include/asm-e2k/timex.h + * + * E2K architecture timex specifications + */ +#ifndef _E2K_TIMEX_H_ +#define _E2K_TIMEX_H_ + +#include + +/* Note that this is lt_timer tick rate. */ +#define CLOCK_TICK_RATE (machine.clock_tick_rate) + +typedef unsigned long cycles_t; + +#define ARCH_HAS_READ_CURRENT_TIMER +static inline cycles_t get_cycles(void) +{ + return NATIVE_READ_CLKR_REG_VALUE(); +} +#define UNSET_CPU_FREQ ((u32)(-1)) +extern u32 cpu_freq_hz; +extern u64 cpu_clock_psec; /* number of pikoseconds in one CPU clock */ + +static inline long long cycles_2_psec(cycles_t cycles) +{ + return cycles * cpu_clock_psec; +} + +static inline long long cycles_2nsec(cycles_t cycles) +{ + return cycles_2_psec(cycles) / 1000; +} + +static inline long long cycles_2usec(cycles_t cycles) +{ + return cycles_2_psec(cycles) / 1000000; +} + +static inline cycles_t psecs_2_cycles(long long psecs) +{ + return psecs / cpu_clock_psec; +} + +static inline cycles_t nsecs_2cycles(long long nsecs) +{ + return psecs_2_cycles(nsecs * 1000); +} + +static inline cycles_t usecs_2cycles(long long usecs) +{ + return psecs_2_cycles(usecs * 1000000); +} + +static inline cycles_t get_cycles_rate(void) +{ + return (cycles_t)cpu_freq_hz; +} + +extern void __init native_time_init(void); +extern int native_read_current_timer(unsigned long *timer_val); + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized guest and host kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#else /* native kernel or native kernel with virtualization support */ +static inline void time_init(void) +{ + native_time_init(); +} +static inline int read_current_timer(unsigned long *timer_val) +{ + return native_read_current_timer(timer_val); +} +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* _E2K_TIMEX_H_ */ diff --git a/arch/e2k/include/asm/tlb.h b/arch/e2k/include/asm/tlb.h new file mode 100644 index 0000000..94194c4 --- /dev/null +++ b/arch/e2k/include/asm/tlb.h @@ -0,0 +1,37 @@ +#ifndef _E2K_TLB_H +#define _E2K_TLB_H + +#include + + +static inline void __tlb_remove_table(void *_table) +{ + free_page_and_swap_cache((struct page *)_table); +} + +#define tlb_flush(tlb) \ +{ \ + if (!(tlb)->fullmm && !(tlb)->need_flush_all) \ + flush_tlb_mm_range((tlb)->mm, (tlb)->start, (tlb)->end); \ + else \ + flush_tlb_mm((tlb)->mm); \ +} + +#define tlb_start_vma(tlb, vma) \ +do { \ +} while (0) + +#define tlb_end_vma(tlb, vma) \ +do { \ +} while (0) + +#define __tlb_remove_tlb_entry(tlb, ptep, address) \ + do { } while (0) + +#include + +#define __pud_free_tlb(tlb, pudp, start) pud_free((tlb)->mm, pudp) +#define __pmd_free_tlb(tlb, pmdp, start) pmd_free((tlb)->mm, pmdp) +#define __pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) + +#endif /* _E2K_TLB_H */ diff --git a/arch/e2k/include/asm/tlb_regs_access.h b/arch/e2k/include/asm/tlb_regs_access.h new file mode 100644 index 0000000..b178466 --- /dev/null +++ b/arch/e2k/include/asm/tlb_regs_access.h @@ -0,0 +1,107 @@ +#ifndef _E2K_TLB_REGS_ACCESS_H_ +#define _E2K_TLB_REGS_ACCESS_H_ + +#include +#include + +/* + * DTLB/ITLB registers operations + */ + +/* + * Write Data TLB tag register + */ +static inline void +write_DTLB_tag_reg(tlb_addr_t tlb_addr, tlb_tag_t tlb_tag) +{ + DebugMR("Write DTLB addr 0x%lx tag 0x%lx\n", + tlb_addr_val(tlb_addr), tlb_tag_val(tlb_tag)); + WRITE_DTLB_REG(tlb_addr_val(tlb_addr), tlb_tag_val(tlb_tag)); +} + +/* + * Write Data TLB entry register + */ +static inline void +write_DTLB_entry_reg(tlb_addr_t tlb_addr, mmu_reg_t pte) +{ + DebugMR("Write DTLB addr 0x%lx entry 0x%llx\n", + tlb_addr_val(tlb_addr), pte); + WRITE_DTLB_REG(tlb_addr_val(tlb_addr), pte); +} + +/* + * Read Data TLB tag register + */ +static inline tlb_tag_t +read_DTLB_tag_reg(tlb_addr_t tlb_addr) +{ + tlb_tag_t tlb_tag; + tlb_tag_val(tlb_tag) = READ_DTLB_REG(tlb_addr_val(tlb_addr)); + DebugTLB("Read DTLB tag 0x%lx for addr 0x%lx\n", + tlb_tag_val(tlb_tag), tlb_addr_val(tlb_addr)); + return tlb_tag; +} +static inline tlb_tag_t +read_DTLB_va_tag_reg(e2k_addr_t virt_addr, int set_num, int large_page) +{ + tlb_addr_t tlb_addr; + tlb_addr = tlb_addr_tag_access; + tlb_addr = tlb_addr_set_vaddr_line_num(tlb_addr, virt_addr, + large_page); + tlb_addr = tlb_addr_set_set_num(tlb_addr, set_num); + return read_DTLB_tag_reg(tlb_addr); +} + +/* + * Read Data TLB entry register + */ +static inline mmu_reg_t +read_DTLB_entry_reg(tlb_addr_t tlb_addr) +{ + mmu_reg_t pte; + pte = READ_DTLB_REG(tlb_addr_val(tlb_addr)); + DebugTLB("Read DTLB entry 0x%llx for addr 0x%lx\n", + pte, tlb_addr_val(tlb_addr)); + return pte; +} +static inline mmu_reg_t +read_DTLB_va_entry_reg(e2k_addr_t virt_addr, int set_num, int large_page) +{ + tlb_addr_t tlb_addr; + tlb_addr = tlb_addr_entry_access; + tlb_addr = tlb_addr_set_vaddr_line_num(tlb_addr, virt_addr, + large_page); + tlb_addr = tlb_addr_set_set_num(tlb_addr, set_num); + return read_DTLB_entry_reg(tlb_addr); +} + +/* + * Get Entry probe for virtual address + */ + +#define GET_MMU_DTLB_ENTRY(virt_addr) \ + (unsigned long)ENTRY_PROBE_MMU_OP(probe_addr_val(virt_addr)) +static inline probe_entry_t +get_MMU_DTLB_ENTRY(e2k_addr_t virt_addr) +{ + DebugMR("Get DTLB entry probe for virtual address 0x%lx\n", + virt_addr); + return __probe_entry(GET_MMU_DTLB_ENTRY(virt_addr)); +} + +/* + * Get physical address for virtual address + */ + +#define GET_MMU_PHYS_ADDR(virt_addr) \ + ((unsigned long)ADDRESS_PROBE_MMU_OP(probe_addr_val(virt_addr))) +static inline probe_entry_t +get_MMU_phys_addr(e2k_addr_t virt_addr) +{ + DebugMR("Get physical address for virtual address 0x%lx\n", + virt_addr); + return __probe_entry(GET_MMU_PHYS_ADDR(virt_addr)); +} + +#endif diff --git a/arch/e2k/include/asm/tlb_regs_types.h b/arch/e2k/include/asm/tlb_regs_types.h new file mode 100644 index 0000000..da9f27e --- /dev/null +++ b/arch/e2k/include/asm/tlb_regs_types.h @@ -0,0 +1,428 @@ +#ifndef _E2K_TLB_REGS_TYPES_H_ +#define _E2K_TLB_REGS_TYPES_H_ + +#include +#include + + +/* now DTLB entry format is different on iset V6 vs V2-V5 */ +#if CONFIG_CPU_ISET >= 6 +# ifdef CONFIG_MMU_PT_V6 +# define MMU_IS_DTLB_V6() true +# else /* ! CONFIG_MMU_PT_V6 */ +# define MMU_IS_DTLB_V6() false +# endif /* CONFIG_MMU_PT_V6 */ +#elif CONFIG_CPU_ISET >= 2 +# define MMU_IS_DTLB_V6() false +#elif CONFIG_CPU_ISET == 0 +# ifdef E2K_P2V +# define MMU_IS_DTLB_V6() \ + (boot_machine.mmu_pt_v6) +# else /* ! E2K_P2V */ +# define MMU_IS_DTLB_V6() \ + (machine.mmu_pt_v6) +# endif /* E2K_P2V */ +#else /* CONFIG_CPU_ISET undefined or negative */ +# warning "Undefined CPU ISET VERSION #, MMU pt_v6 mode is defined dinamicaly" +# ifdef E2K_P2V +# define MMU_IS_DTLB_V6() \ + (boot_machine.mmu_pt_v6) +# else /* ! E2K_P2V */ +# define MMU_IS_DTLB_V6() \ + (machine.mmu_pt_v6) +# endif /* E2K_P2V */ +#endif /* CONFIG_CPU_ISET 0-6 */ + + +/* + * TLB (DTLB & ITLB) structure + */ + +#define NATIVE_TLB_LINES_BITS_NUM (machine.tlb_lines_bits_num) +#define BOOT_NATIVE_TLB_LINES_BITS_NUM (boot_machine.tlb_lines_bits_num) + +#define NATIVE_TLB_LINES_NUM (1 << NATIVE_TLB_LINES_BITS_NUM) +#define BOOT_NATIVE_TLB_LINES_NUM (1 << BOOT_NATIVE_TLB_LINES_BITS_NUM) + +#define NATIVE_MAX_TLB_LINES_NUM (1 << ES2_TLB_LINES_BITS_NUM) + +#define NATIVE_TLB_SETS_NUM 4 +#define BOOT_NATIVE_TLB_SETS_NUM NATIVE_TLB_SETS_NUM +#define NATIVE_TLB_LARGE_PAGE_SET_NO 3 /* large page entries */ + /* occupied this set in each */ + /* line */ + +/* + * DTLB/ITLB registers operations + */ + +/* DTLB/ITLB registers access operations address */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t tlb_addr_t; +typedef tlb_addr_t dtlb_addr_t; +typedef tlb_addr_t itlb_addr_t; +#endif /* ! __ASSEMBLY__ */ + +#define tlb_addr_val(tlb_addr) (tlb_addr) +#define dtlb_addr_val(dtlb_addr) tlb_addr_val(dtlb_addr) +#define itlb_addr_val(itlb_addr) tlb_addr_val(itlb_addr) + +#define __tlb_addr(tlb_addr_val) (tlb_addr_val) +#define __dtlb_addr(dtlb_addr_val) __tlb_addr(dtlb_addr_val) +#define __itlb_addr(dtlb_addr_val) __tlb_addr(itlb_addr_val) + + +/* Virtual page address translation to TLB line & set */ + +#define _TLB_ADDR_LINE_NUM_SHIFT 12 /* [19:12] */ + +#define E2K_PG_4K_TO_TLB_LINE_NUM(virt_addr) \ + (((virt_addr) & (machine.tlb_addr_line_num)) >> \ + _TLB_ADDR_LINE_NUM_SHIFT) +#define BOOT_E2K_PG_4K_TO_TLB_LINE_NUM(virt_addr) \ + (((virt_addr) & (boot_machine.tlb_addr_line_num)) >> \ + _TLB_ADDR_LINE_NUM_SHIFT) + +#define E2K_PG_LARGE_TO_TLB_LINE_NUM(virt_addr) \ + (((virt_addr) & (machine.tlb_addr_line_num2)) >> \ + (machine.tlb_addr_line_num_shift2)) +#define BOOT_E2K_PG_LARGE_TO_TLB_LINE_NUM(virt_addr) \ + (((virt_addr) & (boot_machine.tlb_addr_line_num2)) >> \ + (boot_machine.tlb_addr_line_num_shift2)) + +#define VADDR_TO_TLB_LINE_NUM(virt_addr, large_page) \ + ((large_page) ? E2K_PG_LARGE_TO_TLB_LINE_NUM(virt_addr) : \ + E2K_PG_4K_TO_TLB_LINE_NUM(virt_addr)) +#define BOOT_VADDR_TO_TLB_LINE_NUM(virt_addr, large_page) \ + ((large_page) ? BOOT_E2K_PG_LARGE_TO_TLB_LINE_NUM(virt_addr) : \ + BOOT_E2K_PG_4K_TO_TLB_LINE_NUM(virt_addr)) + +#define _TLB_ADDR_TYPE 0x0000000000000007 /* type of operation */ +#define _TLB_ADDR_TAG_ACCESS 0x0000000000000000 /* tag access oper. */ + /* type */ +#define _TLB_ADDR_ENTRY_ACCESS 0x0000000000000001 /* entry access oper. */ + /* type */ + +#define tlb_addr_set_type(tlb_addr, type) \ + (__tlb_addr((tlb_addr_val(tlb_addr) & ~_TLB_ADDR_TYPE) | \ + ((type) & _TLB_ADDR_TYPE))) +#define tlb_addr_set_tag_access(tlb_addr) \ + tlb_addr_set_type(tlb_addr, _TLB_ADDR_TAG_ACCESS) +#define tlb_addr_set_entry_access(tlb_addr) \ + tlb_addr_set_type(tlb_addr, _TLB_ADDR_ENTRY_ACCESS) +#define tlb_addr_tag_access _TLB_ADDR_TAG_ACCESS +#define tlb_addr_entry_access _TLB_ADDR_ENTRY_ACCESS + +#define tlb_addr_set_vaddr_line_num(tlb_addr, virt_addr, large_page) \ + (__tlb_addr((tlb_addr_val(tlb_addr) & \ + ~((machine.tlb_addr_line_num) | \ + (machine.tlb_addr_line_num2))) | \ + (VADDR_TO_TLB_LINE_NUM(virt_addr, large_page) << \ + _TLB_ADDR_LINE_NUM_SHIFT) | \ + (VADDR_TO_TLB_LINE_NUM(virt_addr, large_page) << \ + (machine.tlb_addr_line_num_shift2)))) +#define boot_tlb_addr_set_vaddr_line_num(tlb_addr, virt_addr, large_page) \ + (__tlb_addr((tlb_addr_val(tlb_addr) & \ + ~((boot_machine.tlb_addr_line_num) | \ + (boot_machine.tlb_addr_line_num2))) | \ + (BOOT_VADDR_TO_TLB_LINE_NUM(virt_addr, large_page) << \ + _TLB_ADDR_LINE_NUM_SHIFT) | \ + (BOOT_VADDR_TO_TLB_LINE_NUM(virt_addr, large_page) << \ + (boot_machine.tlb_addr_line_num_shift2)))) + +#define tlb_addr_set_set_num(tlb_addr, set_num) \ + (__tlb_addr((tlb_addr_val(tlb_addr) & \ + ~(machine.tlb_addr_set_num)) | \ + (((set_num) << (machine.tlb_addr_set_num_shift)) & \ + (machine.tlb_addr_set_num)))) +#define boot_tlb_addr_set_set_num(tlb_addr, set_num) \ + (__tlb_addr((tlb_addr_val(tlb_addr) & \ + ~(boot_machine.tlb_addr_set_num)) | \ + (((set_num) << (boot_machine.tlb_addr_set_num_shift)) & \ + (boot_machine.tlb_addr_set_num)))) + +/* DTLB/ITLB tag structure */ + +#ifndef __ASSEMBLY__ +typedef e2k_addr_t tlb_tag_t; +typedef tlb_tag_t dtlb_tag_t; +typedef tlb_tag_t itlb_tag_t; +#endif /* ! __ASSEMBLY__ */ + +#define tlb_tag_val(tlb_tag) (tlb_tag) +#define dtlb_tag_val(dtlb_tag) tlb_tag_val(dtlb_tag) +#define itlb_tag_val(itlb_tag) tlb_tag_val(itlb_tag) + +#define __tlb_tag(tlb_tag_val) (tlb_tag_val) +#define __dtlb_tag(dtlb_tag_val) __tlb_tag(dtlb_tag_val) +#define __itlb_tag(dtlb_tag_val) __tlb_tag(itlb_tag_val) + +#define _TLB_TAG_VA_TAG_SHIFT 7 /* [35: 7] */ +#define _TLB_TAG_CONTEXT_SHIFT 36 /* [47:36] */ + +#define _TLB_TAG_VA_TAG 0x0000000fffffff80 /* tag of virtual */ + /* address [47:19] */ + /* [18:12] - line # */ +#define _TLB_TAG_CONTEXT 0x0000fff000000000 /* context # */ +#define _TLB_TAG_ROOT 0x0001000000000000 /* should be 0 */ +#define _TLB_TAG_PHYS 0x0002000000000000 /* should be 0 */ +#define _TLB_TAG_G 0x0004000000000000 /* global page */ +#define _TLB_TAG_USED 0x0008000000000000 /* used flag */ +#define _TLB_TAG_VALID 0x0010000000000000 /* valid bit */ + +#define TLB_VADDR_TO_VA_TAG(virt_addr) \ + ((((virt_addr) >> PAGE_SHIFT) & _TLB_TAG_VA_TAG) << \ + _TLB_TAG_VA_TAG_SHIFT) + +#define _TLB_TAG_KERNEL_IMAGE (_TLB_TAG_VALID | _TLB_TAG_USED | \ + ((long)E2K_KERNEL_CONTEXT << _TLB_TAG_CONTEXT_SHIFT)) +#define _TLB_KERNEL_SWITCHING_IMAGE _TLB_TAG_KERNEL_IMAGE +#define _TLB_KERNEL_US_STACK (_TLB_TAG_VALID | _TLB_TAG_USED | \ + ((long)E2K_KERNEL_CONTEXT << _TLB_TAG_CONTEXT_SHIFT)) + +#define TLB_KERNEL_SWITCHING_TEXT __tlb_tag(_TLB_KERNEL_SWITCHING_IMAGE) +#define TLB_KERNEL_SWITCHING_DATA __tlb_tag(_TLB_KERNEL_SWITCHING_IMAGE) +#define TLB_KERNEL_SWITCHING_US_STACK __tlb_tag(_TLB_KERNEL_US_STACK) + +#define tlb_tag_get_va_tag(tlb_tag) \ + (tlb_tag_val(tlb_tag) & _TLB_TAG_VA_TAG) +#define tlb_tag_set_va_tag(tlb_tag, va_page) \ + (__tlb_tag((tlb_tag_val(tlb_tag) & ~_TLB_TAG_VA_TAG) | \ + ((va_page) & _TLB_TAG_VA_TAG))) +#define tlb_tag_set_vaddr_va_tag(tlb_tag, virt_addr) \ + (__tlb_tag((tlb_tag_val(tlb_tag) & ~_TLB_TAG_VA_TAG) | \ + TLB_VADDR_TO_VA_TAG(virt_addr))) + +#define tlb_tag_get_context(tlb_tag) \ + (tlb_tag_val(tlb_tag) & _TLB_TAG_CONTEXT) +#define tlb_tag_set_context(tlb_tag, context) \ + (__tlb_tag((tlb_tag_val(tlb_tag) & ~_TLB_TAG_CONTEXT) | \ + ((context) << _TLB_TAG_CONTEXT_SHIFT) & _TLB_TAG_CONTEXT)) + +/* + * This takes a virtual page address and protection bits to make + * TLB tag: tlb_tag_t + */ +#define mk_tlb_tag_vaddr(virt_addr, tag_pgprot) \ + (__tlb_tag(TLB_VADDR_TO_VA_TAG(virt_addr) | tlb_tag_val(tag_pgprot))) + +/* DTLB/ITLB entry structure is the same as PTE structure of page tables */ + +/* + * TLB address probe operations , TLB Entry_probe operations + */ + +/* Virtual address for TLB address probe & Entry probe operations */ +#ifndef __ASSEMBLY__ +typedef e2k_addr_t probe_addr_t; + +#define probe_addr_val(probe_addr) (probe_addr) + +#define __probe_addr(probe_addr_val) (probe_addr_val) +#endif /* __ASSEMBLY__ */ + +#define _PROBE_ADDR_VA 0x0000ffffffffffff /* virtual address */ + /* [47: 0] */ + +/* Result of TLB Entry probe operation */ +#ifndef __ASSEMBLY__ +typedef unsigned long probe_entry_t; + +#define probe_entry_val(probe_entry) (probe_entry) + +#define __probe_entry(probe_entry_val) (probe_entry_val) + +#include +#include +#include + +#if DTLB_ENTRY_PH_BOUND_V2 == DTLB_ENTRY_PH_BOUND_V6 +# define DTLB_ENTRY_PH_BOUND DTLB_ENTRY_PH_BOUND_V6 +#else +# error "Page table PH_BOUND bit is different for V2 vs V6" +#endif +#if DTLB_ENTRY_ILLEGAL_PAGE_V2 == DTLB_ENTRY_ILLEGAL_PAGE_V6 +# define DTLB_ENTRY_ILLEGAL_PAGE DTLB_ENTRY_ILLEGAL_PAGE_V6 +#else +# error "Page table ILLEGAL_PAGE bit is different for V2 vs V6" +#endif +#if DTLB_ENTRY_PAGE_MISS_V2 == DTLB_ENTRY_PAGE_MISS_V6 +# define DTLB_ENTRY_PAGE_MISS DTLB_ENTRY_PAGE_MISS_V6 +#else +# error "Page table PAGE_MISS bit is different for V2 vs V6" +#endif + +static inline probe_entry_t +mmu_fill_dtlb_val_flags(const uni_dtlb_t uni_flags, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return fill_dtlb_val_v6_flags(uni_flags); + else + return fill_dtlb_val_v2_flags(uni_flags); +} +static inline probe_entry_t +mmu_get_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return get_dtlb_val_v6_flags(dtlb_val, uni_flags); + else + return get_dtlb_val_v2_flags(dtlb_val, uni_flags); +} +static inline bool +mmu_test_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags, + bool mmu_pt_v6) +{ + return mmu_get_dtlb_val_flags(dtlb_val, uni_flags, mmu_pt_v6) != 0; +} +static inline probe_entry_t +mmu_set_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return set_dtlb_val_v6_flags(dtlb_val, uni_flags); + else + return set_dtlb_val_v2_flags(dtlb_val, uni_flags); +} +static inline probe_entry_t +mmu_clear_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags, + bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return clear_dtlb_val_v6_flags(dtlb_val, uni_flags); + else + return clear_dtlb_val_v2_flags(dtlb_val, uni_flags); +} +static inline probe_entry_t +fill_dtlb_val_flags(const uni_dtlb_t uni_flags) +{ + return mmu_fill_dtlb_val_flags(uni_flags, MMU_IS_DTLB_V6()); +} +static inline probe_entry_t +get_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return mmu_get_dtlb_val_flags(dtlb_val, uni_flags, MMU_IS_DTLB_V6()); +} +static inline bool +test_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return mmu_test_dtlb_val_flags(dtlb_val, uni_flags, MMU_IS_DTLB_V6()); +} +static inline probe_entry_t +set_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return mmu_set_dtlb_val_flags(dtlb_val, uni_flags, MMU_IS_DTLB_V6()); +} +static inline probe_entry_t +clear_dtlb_val_flags(probe_entry_t dtlb_val, const uni_dtlb_t uni_flags) +{ + return mmu_clear_dtlb_val_flags(dtlb_val, uni_flags, MMU_IS_DTLB_V6()); +} +#define DTLB_ENTRY_INIT(uni_flags) fill_dtlb_val_flags(uni_flags) +#define DTLB_ENTRY_GET(dtlb_val, uni_flags) \ + get_dtlb_val_flags(dtlb_val, uni_flags) +#define DTLB_ENTRY_TEST(dtlb_val, uni_flags) \ + test_dtlb_val_flags(dtlb_val, uni_flags) +#define DTLB_ENTRY_SET(dtlb_val, uni_flags) \ + set_dtlb_val_flags(dtlb_val, uni_flags) +#define DTLB_ENTRY_CLEAR(dtlb_val, uni_flags) \ + clear_dtlb_val_flags(dtlb_val, uni_flags) + +#define DTLB_ENTRY_ERROR_MASK DTLB_ENTRY_INIT(UNI_DTLB_ERROR_MASK) +#define DTLB_ENTRY_MISS_LEVEL_MASK \ + DTLB_ENTRY_INIT(UNI_DTLB_MISS_LEVEL) +#define DTLB_ENTRY_PROBE_SUCCESSFUL \ + DTLB_ENTRY_INIT(UNI_DTLB_SUCCESSFUL) +#define DTLB_ENTRY_RES_BITS DTLB_ENTRY_INIT(UNI_DTLB_RES_BITS) +#define DTLB_ENTRY_WR DTLB_ENTRY_INIT(UNI_PAGE_WRITE) +#define DTLB_ENTRY_PV DTLB_ENTRY_INIT(UNI_PAGE_PRIV) +#define DTLB_ENTRY_VVA DTLB_ENTRY_INIT(UNI_PAGE_VALID) +#define DTLB_EP_RES DTLB_ENTRY_INIT(UNI_DTLB_EP_RES) +#define DTLB_EP_FAULT_RES (~DTLB_EP_RES) +#define DTLB_ENTRY_TEST_WRITEABLE(dtlb_val) \ + DTLB_ENTRY_TEST(dtlb_val, UNI_PAGE_WRITE) +#define DTLB_ENTRY_TEST_VVA(dtlb_val) \ + DTLB_ENTRY_TEST(dtlb_val, UNI_PAGE_VALID) +#define DTLB_ENTRY_TEST_SUCCESSFUL(dtlb_val) \ + ((MMU_IS_DTLB_V6()) ? \ + DTLB_ENTRY_TEST(dtlb_val, UNI_DTLB_SUCCESSFUL) \ + : \ + !DTLB_ENTRY_TEST(dtlb_val, UNI_DTLB_SUCCESSFUL)) + +static inline probe_entry_t +mmu_phys_addr_to_dtlb_pha(e2k_addr_t phys_addr, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return PA_TO_DTLB_ENTRY_PHA_V6(phys_addr); + else + return PA_TO_DTLB_ENTRY_PHA_V2(phys_addr); +} +static inline e2k_addr_t +mmu_dtlb_pha_to_phys_addr(probe_entry_t dtlb_val, bool mmu_pt_v6) +{ + if (mmu_pt_v6) + return DTLB_ENTRY_PHA_TO_PA_V6(dtlb_val); + else + return DTLB_ENTRY_PHA_TO_PA_V2(dtlb_val); +} + +static inline probe_entry_t +phys_addr_to_dtlb_pha(e2k_addr_t phys_addr) +{ + return mmu_phys_addr_to_dtlb_pha(phys_addr, MMU_IS_DTLB_V6()); +} +static inline e2k_addr_t +dtlb_pha_to_phys_addr(probe_entry_t dtlb_val) +{ + return mmu_dtlb_pha_to_phys_addr(dtlb_val, MMU_IS_DTLB_V6()); +} +#define PA_TO_DTLB_ENTRY_PHA(phys_addr) phys_addr_to_dtlb_pha(phys_addr) +#define DTLB_ENTRY_PHA_TO_PA(dtlb_val) dtlb_pha_to_phys_addr(dtlb_val) + +/* physical memory bound (x86) [63] */ +#define PH_BOUND_EP_RES DTLB_ENTRY_PH_BOUND +/* illegal page [62] */ +#define ILLEGAL_PAGE_EP_RES DTLB_ENTRY_ILLEGAL_PAGE +/* page miss [61] */ +#define PAGE_MISS_EP_RES DTLB_ENTRY_PAGE_MISS +/* miss level [60:59] */ +#define MISS_LEVEL_EP_RES DTLB_ENTRY_MISS_LEVEL_MASK +/* reserved bits [57] */ +#define RES_BITS_EP_RES DTLB_ENTRY_RES_BITS + +/* + * DTLB address probe result format + */ +/* Physical address of successfull DTLB address probe [39: 0]/[47:0] */ +#define PH_ADDR_AP_RES DTLB_ENTRY_INIT(UNI_DTLB_PH_ADDR_AP_RES) +/* AP disable result [62] */ +#define DISABLE_AP_RES ILLEGAL_PAGE_EP_RES +/* page miss [61] */ +#define PAGE_MISS_AP_RES PAGE_MISS_EP_RES +/* illegal page [58] */ +#define ILLEGAL_PAGE_AP_RES ILLEGAL_PAGE_EP_RES + +#define PH_ADDR_IS_PRESENT(ap_res) (((ap_res) & ~PH_ADDR_AP_RES) == 0) +#define PH_ADDR_IS_MISS(ap_res) ((ap_res) & PAGE_MISS_AP_RES) +#define PH_ADDR_IS_VALID(ap_res) ((PH_ADDR_IS_PRESENT(ap_res) || \ + PH_ADDR_IS_MISS(ap_res))) +#define PH_ADDR_IS_INVALID(ap_res) ((ap_res) & ILLEGAL_PAGE_AP_RES) +#define GET_PROBE_PH_ADDR(ap_res) ((ap_res) & PH_ADDR_AP_RES)\ + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is pure guest kernel (not paravirtualized based on pv_ops) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#else + #error "Unknown virtualization type" +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/e2k/include/asm/tlbflush.h b/arch/e2k/include/asm/tlbflush.h new file mode 100644 index 0000000..fabcd44 --- /dev/null +++ b/arch/e2k/include/asm/tlbflush.h @@ -0,0 +1,131 @@ +/* $Id: tlbflush.h,v 1.3 2006/09/12 13:12:54 tokot Exp $ + * pgalloc.h: the functions and defines necessary to allocate + * page tables. + * + * Copyright 2001 Salavat S. Guiliazov (atic@mcst.ru) + */ +#ifndef _E2K_TLBFLUSH_H +#define _E2K_TLBFLUSH_H + +#include + + +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLBs + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(mm, start, end) flushes a range of pages + */ + +extern void __flush_tlb_all(void); +extern void __flush_tlb_mm(struct mm_struct *mm); +extern void __flush_tlb_page(struct mm_struct *mm, e2k_addr_t addr); +extern void __flush_tlb_range(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end); +extern void __flush_pmd_tlb_range(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void __flush_tlb_pgtables(struct mm_struct *mm, e2k_addr_t start, + e2k_addr_t end); +extern void __flush_tlb_page_and_pgtables(struct mm_struct *mm, + unsigned long addr); + +extern void __flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +extern void __flush_tlb_address(e2k_addr_t addr); +extern void __flush_tlb_address_pgtables(e2k_addr_t addr); + +#ifdef CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT +extern void __flush_cpu_root_pt_mm(struct mm_struct *mm); +extern void __flush_cpu_root_pt(void); +#else /* ! CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ +#define __flush_cpu_root_pt_mm(mm) +#define __flush_cpu_root_pt() +#endif /* CONFIG_COPY_USER_PGD_TO_KERNEL_ROOT_PT */ + + +#if defined(CONFIG_PARAVIRT_GUEST) +/* it is paravirtualized host and guest kernel */ +#include +#elif defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native guest kernel */ +#include +#define flush_tlb_all kvm_flush_tlb_all +#define flush_tlb_mm kvm_flush_tlb_mm +#define flush_tlb_page(vma, addr) kvm_flush_tlb_page(vma, addr) +#define flush_tlb_range(vma, start, end) \ + kvm_flush_tlb_range(vma->vm_mm, start, end) +#define flush_tlb_kernel_range(start, end) \ + kvm_flush_tlb_kernel_range(start, end) +#define flush_pmd_tlb_range(vma, start, end) \ + kvm_flush_pmd_tlb_range(vma->vm_mm, start, end) +#define flush_tlb_mm_range(mm, start, end) \ + kvm_flush_tlb_range(mm, start, end) +#define flush_tlb_range_and_pgtables(mm, start, end) \ + kvm_flush_tlb_range_and_pgtables(mm, start, end) +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without virtualization support */ +/* or native kernel with virtualization support */ + +#ifndef CONFIG_SMP + +#define flush_tlb_all __flush_tlb_all +#define flush_tlb_mm __flush_tlb_mm +#define flush_tlb_page(vma, addr) __flush_tlb_page(vma->vm_mm, addr) +#define flush_tlb_range(vma, start, end) \ + __flush_tlb_range(vma->vm_mm, start, end) +#define flush_pmd_tlb_range(vma, start, end) \ + __flush_pmd_tlb_range(vma->vm_mm, start, end) +#define flush_tlb_mm_range(mm, start, end) \ + __flush_tlb_range(mm, start, end) +#define flush_tlb_range_and_pgtables(mm, start, end) \ + __flush_tlb_range_and_pgtables(mm, start, end) + +#else /* CONFIG_SMP */ + +#include + +extern void native_smp_flush_tlb_all(void); +extern void native_smp_flush_tlb_mm(struct mm_struct *mm); +extern void native_smp_flush_tlb_page(struct vm_area_struct *vma, + e2k_addr_t addr); +extern void native_smp_flush_tlb_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +extern void native_smp_flush_pmd_tlb_range(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); +extern void native_smp_flush_tlb_range_and_pgtables(struct mm_struct *mm, + e2k_addr_t start, e2k_addr_t end); + +#define flush_tlb_all native_smp_flush_tlb_all +#define flush_tlb_mm native_smp_flush_tlb_mm +#define flush_tlb_page(vma, addr) native_smp_flush_tlb_page(vma, addr) +#define flush_tlb_range(vma, start, end) \ + native_smp_flush_tlb_range(vma->vm_mm, start, end) +#define flush_pmd_tlb_range(vma, start, end) \ + native_smp_flush_pmd_tlb_range(vma->vm_mm, start, end) +#define flush_tlb_mm_range(mm, start, end) \ + native_smp_flush_tlb_range(mm, start, end) +#define flush_tlb_range_and_pgtables(mm, start, end) \ + native_smp_flush_tlb_range_and_pgtables(mm, start, end) + +#endif /* CONFIG_SMP */ + +#define flush_tlb_kernel_range(start, end) flush_tlb_all() + +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + + +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t *pte) +{ +} + +static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd) +{ +} + +#include + +#endif /* _E2K_TLBFLUSH_H */ diff --git a/arch/e2k/include/asm/topology.h b/arch/e2k/include/asm/topology.h new file mode 100644 index 0000000..2353ab2 --- /dev/null +++ b/arch/e2k/include/asm/topology.h @@ -0,0 +1,173 @@ +#ifndef _E2K_TOPOLOGY_H_ +#define _E2K_TOPOLOGY_H_ + +#include +#ifdef CONFIG_NUMA +#include +#endif /* CONFIG_NUMA */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Max CPUS needs to allocate static array of structures */ +#define MAX_NR_CPUS CONFIG_NR_CPUS + +/* + * IO links/controllers/buses topology: + * each node of e2k machines can have from 1 to MAX_NODE_IOLINKS IO links + * which can be connected to IOHUB or RDMA + * Real possible number of IO links on node is described by following + * macroses for every type of machines + */ + +#define MAX_NODE_IOLINKS E2K_MAX_NODE_IOLINKS +#define E2K_NODE_IOLINKS (machine.node_iolinks) +#define MACH_NODE_NUMIOLINKS E2K_NODE_IOLINKS + +/* + * IOLINK can be represented by global domain number (unique at system and + * corresponds to bit number at iolinkmask_t bit map structure) + * and as pair: node # and local link number on the node. + * It needs convert from one presentation to other + */ + +#define node_iolink_to_domain(node, link) \ + ((node) * (E2K_NODE_IOLINKS) + (link)) +#define node_iohub_to_domain(node, link) \ + node_iolink_to_domain((node), (link)) +#define node_rdma_to_domain(node, link) \ + node_iolink_to_domain((node), (link)) +#define iolink_domain_to_node(domain) \ + ((domain) / (E2K_NODE_IOLINKS)) +#define iolink_domain_to_link(domain) \ + ((domain) % (E2K_NODE_IOLINKS)) +#define iohub_domain_to_node(domain) iolink_domain_to_node(domain) +#define iohub_domain_to_link(domain) iolink_domain_to_link(domain) + +#define for_each_iolink_of_node(link) \ + for ((link) = 0; (link) < E2K_NODE_IOLINKS; (link) ++) + +#define pcibus_to_node(bus) __pcibus_to_node(bus) +#define pcibus_to_link(bus) __pcibus_to_link(bus) + +#define mach_early_iohub_online(node, link) \ + e2k_early_iohub_online((node), (link)) +#define mach_early_sic_init() + +extern int __init_recv cpuid_to_cpu(int cpuid); + +#ifdef CONFIG_L_LOCAL_APIC +DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); +#define cpu_to_cpuid(cpu) early_per_cpu(x86_cpu_to_apicid, cpu) +#else +/* + * That case wouldn't work, we should delete CONFIG_L_LOCAL_APIC in future + */ +#define cpu_to_cpuid(cpu) BUILD_BUG() +#endif + +#ifdef CONFIG_NUMA +extern void __init numa_init(void); + +extern s16 __apicid_to_node[NR_CPUS]; + +extern int __nodedata __cpu_to_node[NR_CPUS]; +#define cpu_to_node(cpu) __cpu_to_node[cpu] + +extern cpumask_t __nodedata __node_to_cpu_mask[MAX_NUMNODES]; +#define node_to_cpu_mask(node) __node_to_cpu_mask[node] + +#define numa_node_id() (cpu_to_node(raw_smp_processor_id())) + +#define __node_to_cpumask_and(node, cpu_mask) \ +({ \ + cpumask_t cpumask = node_to_cpu_mask(node); \ + cpumask_and(&cpumask, &cpumask, &cpu_mask); \ + cpumask; \ +}) + +#define node_to_cpumask(node) \ + __node_to_cpumask_and(node, *cpu_online_mask) +#define node_to_present_cpumask(node) \ + __node_to_cpumask_and(node, *cpu_present_mask) + +#define __node_to_first_cpu(node, cpu_mask) \ +({ \ + cpumask_t node_cpumask; \ + node_cpumask = __node_to_cpumask_and(node, cpu_mask); \ + cpumask_first((const struct cpumask *)&node_cpumask); \ +}) + +#define node_to_first_cpu(node) \ + __node_to_first_cpu(node, *cpu_online_mask) +#define node_to_first_present_cpu(node) \ + __node_to_first_cpu(node, *cpu_present_mask) + +#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == NUMA_NO_NODE ? \ + cpu_online_mask : \ + cpumask_of_node(pcibus_to_node(bus))) + +/* Mappings between node number and cpus on that node. */ +extern struct cpumask node_to_cpumask_map[MAX_NUMNODES]; + +/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ +static inline const struct cpumask *cpumask_of_node(int node) +{ + return &node_to_cpumask_map[node]; +} + +extern void setup_node_to_cpumask_map(void); + +extern nodemask_t __nodedata node_has_dup_kernel_map; +extern atomic_t __nodedata node_has_dup_kernel_num; +extern int __nodedata all_nodes_dup_kernel_nid[/*MAX_NUMNODES*/]; + +#define node_dup_kernel_nid(nid) (all_nodes_dup_kernel_nid[nid]) +#define THERE_IS_DUP_KERNEL atomic_read(&node_has_dup_kernel_num) +#define DUP_KERNEL_NUM \ + (atomic_read(&node_has_dup_kernel_num) + 1) + +#define topology_physical_package_id(cpu) cpu_to_node(cpu) +#else /* ! CONFIG_NUMA */ + +#define numa_node_id() 0 + +static inline void numa_init(void) { } + +#define node_has_dup_kernel_map nodemask_of_node(0) +#define node_has_dup_kernel_num 0 +#define node_dup_kernel_nid(nid) 0 +#define THERE_IS_DUP_KERNEL 0 + +#define node_to_first_cpu(node) 0 +#define node_to_first_present_cpu(node) 0 +#define node_to_present_cpumask(node) (*cpu_present_mask) +#define node_to_possible_cpumask(node) cpumask_of_cpu(0) + +#define topology_physical_package_id(cpu) 0 +#endif /* CONFIG_NUMA */ + +#define node_has_online_mem(nid) (nodes_phys_mem[nid].pfns_num != 0) + +#define topology_core_id(cpu) (cpu) +#define topology_core_cpumask(cpu) cpumask_of_node(cpu_to_node(cpu)) + +#include + +static inline void arch_fix_phys_package_id(int num, u32 slot) +{ +} + +static inline int is_duplicated_code(unsigned long ip) +{ + return ip >= (unsigned long) _stext && ip < (unsigned long) _etext; +} +extern const struct cpumask *cpu_coregroup_mask(int cpu); +#endif /* _E2K_TOPOLOGY_H_ */ diff --git a/arch/e2k/include/asm/trace-clock.h b/arch/e2k/include/asm/trace-clock.h new file mode 100644 index 0000000..b5576ca --- /dev/null +++ b/arch/e2k/include/asm/trace-clock.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2008, Mathieu Desnoyers + * + * Trace clock definitions for Sparc64. + */ + +#ifndef _ASM_E2K_TRACE_CLOCK_H +#define _ASM_E2K_TRACE_CLOCK_H + +#include + +static inline u32 trace_clock_read32(void) +{ + return get_cycles(); +} + +static inline u64 trace_clock_read64(void) +{ + return get_cycles(); +} + +static inline unsigned int trace_clock_frequency(void) +{ + return get_cycles_rate(); +} + +static inline u32 trace_clock_freq_scale(void) +{ + return 1; +} + +static inline void get_trace_clock(void) +{ +} + +static inline void put_trace_clock(void) +{ +} + +static inline void set_trace_clock_is_sync(int state) +{ +} +#endif /* _ASM_E2K_TRACE_CLOCK_H */ diff --git a/arch/e2k/include/asm/trace-mmu-dtlb-v2.h b/arch/e2k/include/asm/trace-mmu-dtlb-v2.h new file mode 100644 index 0000000..bcee44a --- /dev/null +++ b/arch/e2k/include/asm/trace-mmu-dtlb-v2.h @@ -0,0 +1,37 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM e2k + +#if !defined(_TRACE_E2K_MMU_DTLB_V2_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_E2K_MMU_DTLB_V2_H + +#include + +#define E2K_TRACE_PRINT_DTLB_ENTRY_V2(entry) \ + ((entry & DTLB_ENTRY_ERROR_MASK_V2) ? \ + __print_flags(entry & DTLB_ENTRY_ERROR_MASK_V2, "|", \ + { DTLB_ENTRY_PH_BOUND_V2, "ph_bound" }, \ + { DTLB_ENTRY_PAGE_MISS_V2, "page_miss" }, \ + { DTLB_ENTRY_PROBE_DISABLED_V2, \ + "DTLB probe disabled" }, \ + { DTLB_ENTRY_RES_BITS_V2, "res_bits" } \ + ) : \ + __print_flags(entry & ~DTLB_ENTRY_PHA_V2, "|", \ + { DTLB_ENTRY_WR_V2, "writable" }, \ + { DTLB_ENTRY_NON_EX_U_S_V2, "Non_ex-U_S" }, \ + { DTLB_ENTRY_PWT_V2, "PWT" }, \ + { DTLB_ENTRY_PCD1_V2, "CD1" }, \ + { DTLB_ENTRY_PCD2_V2, "CD2" }, \ + { DTLB_ENTRY_D_V2, "dirty" }, \ + { DTLB_ENTRY_G_V2, "global" }, \ + { DTLB_ENTRY_NWA_V2, "NWA" }, \ + { DTLB_ENTRY_VVA_V2, "valid" }, \ + { DTLB_ENTRY_PV_V2, "privileged" }, \ + { DTLB_ENTRY_INT_PR_NON_EX_V2, \ + "int_pr-non_ex" }, \ + { DTLB_ENTRY_INTL_RD_V2, "intl_rd" }, \ + { DTLB_ENTRY_INTL_WR_V2, "intl_wr" }, \ + { DTLB_ENTRY_WP_V2, "WP" }, \ + { DTLB_ENTRY_UC_V2, "UC" } \ + )) + +#endif /* _TRACE_E2K_MMU_DTLB_V2_H */ diff --git a/arch/e2k/include/asm/trace-mmu-dtlb-v6.h b/arch/e2k/include/asm/trace-mmu-dtlb-v6.h new file mode 100644 index 0000000..8dc7f19 --- /dev/null +++ b/arch/e2k/include/asm/trace-mmu-dtlb-v6.h @@ -0,0 +1,42 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM e2k + +#if !defined(_TRACE_E2K_MMU_DTLB_V6_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_E2K_MMU_DTLB_V6_H + +#include + +#define E2K_TRACE_PRINT_DTLB_ENTRY_V6(entry) \ + ((entry & DTLB_ENTRY_ERROR_MASK_V6) ? \ + __print_flags(entry & DTLB_ENTRY_ERROR_MASK_V6, "|", \ + { DTLB_ENTRY_PH_BOUND_V6, "ph_bound" }, \ + { DTLB_ENTRY_PAGE_MISS_V6, "page_miss" }, \ + { DTLB_ENTRY_PROBE_SUCCESSFUL_V6, \ + "DTLB probe successful" }, \ + { DTLB_ENTRY_RES_BITS_V6, "res_bits" } \ + ) : \ + ({ __print_flags(entry & ~DTLB_ENTRY_PHA_V6, "|", \ + { DTLB_ENTRY_WR_exc_V6, "writable" }, \ + { DTLB_ENTRY_PV_or_U_S_V6, "Priv/U_S" }, \ + { DTLB_ENTRY_D_V6, "dirty" }, \ + { DTLB_ENTRY_G_V6, "global" }, \ + { DTLB_ENTRY_NWA_V6, "NWA" }, \ + { DTLB_ENTRY_VVA_V6, "valid" }, \ + { DTLB_ENTRY_NON_EX_V6, "non_ex" }, \ + { DTLB_ENTRY_INT_PR_V6, "int_pr" }, \ + { DTLB_ENTRY_INTL_RD_V6, "intl_rd" }, \ + { DTLB_ENTRY_INTL_WR_V6, "intl_wr" }, \ + { DTLB_ENTRY_WR_int_V6, "WR_int" } \ + ); \ + __print_symbolic(DTLB_ENTRY_MT_exc_GET_VAL(entry), \ + { GEN_CACHE_MT, "General Cacheable" }, \ + { GEN_NON_CACHE_MT, "General nonCacheable" }, \ + { EXT_PREFETCH_MT, "External Prefetchable" }, \ + { EXT_NON_PREFETCH_MT, "External nonPrefetchable" }, \ + { EXT_CONFIG_MT, "External Configuration" }, \ + { 2, "Reserved-2" }, \ + { 3, "Reserved-3" }, \ + { 5, "Reserved-5" }); \ + })) + +#endif /* _TRACE_E2K_MMU_DTLB_V6_H */ diff --git a/arch/e2k/include/asm/trace.h b/arch/e2k/include/asm/trace.h new file mode 100644 index 0000000..b26d62c --- /dev/null +++ b/arch/e2k/include/asm/trace.h @@ -0,0 +1,401 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM e2k + +#if !defined(_TRACE_E2K_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_E2K_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define E2K_TC_TYPE_STORE (1ULL << 17) +#define E2K_TC_TYPE_S_F (1ULL << 19) +#define E2K_TC_TYPE_ROOT (1ULL << 27) +#define E2K_TC_TYPE_SCAL (1ULL << 28) +#define E2K_TC_TYPE_SRU (1ULL << 29) +#define E2K_TC_TYPE_SPEC (1ULL << 30) +#define E2K_TC_TYPE_PM (1ULL << 31) +#define E2K_TC_TYPE_NUM_ALIGN (1ULL << 50) +#define E2K_TC_TYPE_EMPT (1ULL << 51) +#define E2K_TC_TYPE_CLW (1ULL << 52) + +#define E2K_TC_TYPE (E2K_TC_TYPE_STORE | E2K_TC_TYPE_S_F | E2K_TC_TYPE_ROOT | \ + E2K_TC_TYPE_SCAL | E2K_TC_TYPE_SRU | E2K_TC_TYPE_SPEC | \ + E2K_TC_TYPE_PM | E2K_TC_TYPE_NUM_ALIGN | \ + E2K_TC_TYPE_EMPT | E2K_TC_TYPE_CLW) + +#define E2K_FAULT_TYPE_GLOBAL_SP (1ULL << 0) +#define E2K_FAULT_TYPE_PAGE_BOUND (1ULL << 1) +#define E2K_FAULT_TYPE_EXC_MEM_LOCK (1ULL << 2) +#define E2K_FAULT_TYPE_PH_PR_PAGE (1ULL << 3) +#define E2K_FAULT_TYPE_IO_PAGE (1ULL << 4) +#define E2K_FAULT_TYPE_ISYS_PAGE (1ULL << 5) +#define E2K_FAULT_TYPE_PROT_PAGE (1ULL << 6) +#define E2K_FAULT_TYPE_PRIV_PAGE (1ULL << 7) +#define E2K_FAULT_TYPE_ILLEGAL_PAGE (1ULL << 8) +#define E2K_FAULT_TYPE_NWRITE_PAGE (1ULL << 9) +#define E2K_FAULT_TYPE_PAGE_MISS (1ULL << 10) +#define E2K_FAULT_TYPE_PH_BOUND (1ULL << 11) +#define E2K_FAULT_TYPE_INTL_RES_BITS (1ULL << 12) + +TRACE_EVENT( + trap_cellar, + + TP_PROTO(const trap_cellar_t *tc, int nr), + + TP_ARGS(tc, nr), + + TP_STRUCT__entry( + __field( int, nr ) + __field( u64, address ) + __field( u64, data_val ) + __field( u64, data_ext_val ) + __field( u8, data_tag ) + __field( u8, data_ext_tag ) + __field( u64, condition ) + __field( u64, mask ) + ), + + TP_fast_assign( + __entry->nr = nr; + __entry->address = tc->address; + load_value_and_tagd(&tc->data, + &__entry->data_val, &__entry->data_tag); + load_value_and_tagd(&tc->data_ext, + &__entry->data_ext_val, &__entry->data_ext_tag); + __entry->condition = AW(tc->condition); + __entry->mask = AW(tc->mask); + ), + + TP_printk("\n" + "Entry %d: address 0x%llx data %hhx 0x%llx data_ext %hhx 0x%llx\n" + "Register: address=0x%02hhx, vl=%d, vr=%d\n" + "Opcode: fmt=%d, n_prot=%d, fmtc=%d\n" + "Info1: chan=%d, mas=0x%02hhx, miss_lvl=%d, rcv=%d, dst_rcv=0x%03x\n" + "Info2: %s\n" + "Ftype: %s" + , + __entry->nr, __entry->address, __entry->data_tag, + __entry->data_val, __entry->data_ext_tag, __entry->data_ext_val, + AS((tc_cond_t) __entry->condition).address, + AS((tc_cond_t) __entry->condition).vl, + AS((tc_cond_t) __entry->condition).vr, + AS((tc_cond_t) __entry->condition).fmt, + AS((tc_cond_t) __entry->condition).npsp, + AS((tc_cond_t) __entry->condition).fmtc, + AS((tc_cond_t) __entry->condition).chan, + AS((tc_cond_t) __entry->condition).mas, + AS((tc_cond_t) __entry->condition).miss_lvl, + AS((tc_cond_t) __entry->condition).rcv, + AS((tc_cond_t) __entry->condition).dst_rcv, + __print_flags(__entry->condition & E2K_TC_TYPE, "|", + { E2K_TC_TYPE_STORE, "store" }, + { E2K_TC_TYPE_S_F, "s_f" }, + { E2K_TC_TYPE_ROOT, "root" }, + { E2K_TC_TYPE_SCAL, "scal" }, + { E2K_TC_TYPE_SRU, "sru" }, + { E2K_TC_TYPE_SPEC, "spec" }, + { E2K_TC_TYPE_PM, "pm" }, + { E2K_TC_TYPE_NUM_ALIGN, "num_align" }, + { E2K_TC_TYPE_EMPT, "empt" }, + { E2K_TC_TYPE_CLW, "clw" } + ), + __print_flags(AS((tc_cond_t) __entry->condition).fault_type, "|", + { E2K_FAULT_TYPE_GLOBAL_SP, "global_sp" }, + { E2K_FAULT_TYPE_PAGE_BOUND, "page_bound" }, + { E2K_FAULT_TYPE_EXC_MEM_LOCK, "exc_mem_lock" }, + { E2K_FAULT_TYPE_PH_PR_PAGE, "ph_pr_page" }, + { E2K_FAULT_TYPE_IO_PAGE, "io_page" }, + { E2K_FAULT_TYPE_ISYS_PAGE, "isys_page" }, + { E2K_FAULT_TYPE_PROT_PAGE, "prot_page" }, + { E2K_FAULT_TYPE_PRIV_PAGE, "priv_page" }, + { E2K_FAULT_TYPE_ILLEGAL_PAGE, "illegal_page" }, + { E2K_FAULT_TYPE_NWRITE_PAGE, "nwrite_page" }, + { E2K_FAULT_TYPE_PAGE_MISS, "page_miss" }, + { E2K_FAULT_TYPE_PH_BOUND, "ph_bound" }, + { E2K_FAULT_TYPE_INTL_RES_BITS, "intl_res_bits" } + )) +); + +#define mmu_print_pt_flags(entry, print, mmu_pt_v6) \ + (mmu_pt_v6) ? E2K_TRACE_PRINT_PT_V6_FLAGS(entry, print) \ + : \ + E2K_TRACE_PRINT_PT_V2_FLAGS(entry, print) +#define print_pt_flags(entry, print) \ + mmu_print_pt_flags(entry, print, MMU_IS_PT_V6()) + +#define E2K_TRACE_PRINT_PT_FLAGS(entry, print) print_pt_flags(entry, print) + + +#define mmu_print_dtlb_entry(entry, mmu_dtlb_v6) \ + ((mmu_dtlb_v6) ? E2K_TRACE_PRINT_DTLB_ENTRY_V2(entry) \ + : \ + E2K_TRACE_PRINT_DTLB_ENTRY_V6(entry)) +#define print_dtlb_entry(entry) \ + mmu_print_dtlb_entry(entry, MMU_IS_DTLB_V6()) + +#define E2K_TRACE_PRINT_DTLB(entry) print_dtlb_entry(entry) + +TRACE_EVENT( + unhandled_page_fault, + + TP_PROTO(unsigned long address), + + TP_ARGS(address), + + TP_STRUCT__entry( + __field( unsigned long, address ) + __field( u64, dtlb_entry ) + __field( u64, dtlb_pud ) + __field( u64, dtlb_pmd ) + __field( u64, dtlb_pte ) + __field( u64, pgd ) + __field( u64, pud ) + __field( u64, pmd ) + __field( u64, pte ) + __field( int, pt_level ) + ), + + TP_fast_assign( + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + __entry->address = address; + + /* + * Save page table entries + */ + __entry->pt_level = 0; + + if (address < TASK_SIZE) { + struct mm_struct *mm = current->mm; + + pgdp = pgd_offset(mm, address); + + __entry->pgd = pgd_val(*pgdp); + __entry->pt_level = 1; + + if (!pgd_huge(*pgdp) && !pgd_none(*pgdp) && + !pgd_bad(*pgdp)) { + pudp = pud_offset(pgdp, address); + + __entry->pud = pud_val(*pudp); + __entry->pt_level = 2; + + if (!pud_huge(*pudp) && !pud_none(*pudp) && + !pud_bad(*pudp)) { + pmdp = pmd_offset(pudp, address); + + __entry->pmd = pmd_val(*pmdp); + __entry->pt_level = 3; + + if (!pmd_huge(*pmdp) && + !pmd_none(*pmdp) && + !pmd_bad(*pmdp)) { + ptep = pte_offset_map(pmdp, + address); + + __entry->pte = pte_val(*ptep); + __entry->pt_level = 4; + } + } + } + } else { + pgdp = pgd_offset_k(address); + + __entry->pgd = pgd_val(*pgdp); + __entry->pt_level = 1; + + if (!kernel_pgd_huge(*pgdp) && + !pgd_none(*pgdp) && !pgd_bad(*pgdp)) { + pudp = pud_offset(pgdp, address); + + __entry->pud = pud_val(*pudp); + __entry->pt_level = 2; + + if (!kernel_pud_huge(*pudp) && + !pud_none(*pudp) && !pud_bad(*pudp)) { + pmdp = pmd_offset(pudp, address); + + __entry->pmd = pmd_val(*pmdp); + __entry->pt_level = 3; + + if (!kernel_pmd_huge(*pmdp) && + !pmd_none(*pmdp) && + !pmd_bad(*pmdp)) { + ptep = pte_offset_kernel(pmdp, + address); + + __entry->pte = pte_val(*ptep); + __entry->pt_level = 4; + } + } + } + } + + /* + * Save DTLB entries. + * + * Do not access not existing entries to avoid + * creating "empty" records in DTLB for no reason. + */ + __entry->dtlb_entry = get_MMU_DTLB_ENTRY(address); + + if (__entry->pt_level >= 2) + __entry->dtlb_pud = get_MMU_DTLB_ENTRY( + pud_virt_offset(address)); + + if (__entry->pt_level >= 3) + __entry->dtlb_pmd = get_MMU_DTLB_ENTRY( + pmd_virt_offset(address)); + + if (__entry->pt_level >= 4) + __entry->dtlb_pte = get_MMU_DTLB_ENTRY( + pte_virt_offset(address)); + ), + + TP_printk("\n" + "Page table for address 0x%lx (all f's are returned if the entry has not been read)\n" + " pgd 0x%llx: %s\n" + " Access mode: %s%s\n" + " pud 0x%llx: %s\n" + " Access mode: %s%s\n" + " pmd 0x%llx: %s\n" + " Access mode: %s%s\n" + " pte 0x%llx: %s\n" + " Access mode: %s%s\n" + "Probed DTLB entries:\n" + " pud address entry 0x%llx: %s\n" + " pmd address entry 0x%llx: %s\n" + " pte address entry 0x%llx: %s\n" + " address entry 0x%llx: %s" + , + __entry->address, + (__entry->pt_level >= 1) ? __entry->pgd : -1ULL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->pgd, __entry->pt_level >= 1), + (__entry->pt_level >= 2) ? __entry->pud : -1ULL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->pud, __entry->pt_level >= 2), + (__entry->pt_level >= 3) ? __entry->pmd : -1ULL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->pmd, __entry->pt_level >= 3), + (__entry->pt_level >= 4) ? __entry->pte : -1ULL, + E2K_TRACE_PRINT_PT_FLAGS(__entry->pte, __entry->pt_level >= 4), + (__entry->pt_level >= 2) ? __entry->dtlb_pud : -1ULL, + (__entry->pt_level >= 2) ? + E2K_TRACE_PRINT_DTLB(__entry->dtlb_pud) : "(not read)", + (__entry->pt_level >= 3) ? __entry->dtlb_pmd : -1ULL, + (__entry->pt_level >= 3) ? + E2K_TRACE_PRINT_DTLB(__entry->dtlb_pmd) : "(not read)", + (__entry->pt_level >= 4) ? __entry->dtlb_pte : -1ULL, + (__entry->pt_level >= 4) ? + E2K_TRACE_PRINT_DTLB(__entry->dtlb_pte) : "(not read)", + __entry->dtlb_entry, + E2K_TRACE_PRINT_DTLB(__entry->dtlb_entry)) +); + +#define TIRHI_EXC_MASK 0x00000fffffffffffULL +#define TIRHI_ALS_MASK 0x0003f00000000000ULL +#define TIRHI_ALS_SHIFT 44ULL +#define TIRHI_MOVA_MASK 0x00f0000000000000ULL +#define TIRHI_MOVA0_MASK 0x0010000000000000ULL +#define TIRHI_MOVA1_MASK 0x0020000000000000ULL +#define TIRHI_MOVA2_MASK 0x0040000000000000ULL +#define TIRHI_MOVA3_MASK 0x0080000000000000ULL + +#define E2K_TRACE_PRINT_TIR_HI(entry) \ + (entry & (TIRHI_EXC_MASK | TIRHI_MOVA_MASK)) ? \ + __print_flags(entry & (TIRHI_EXC_MASK | TIRHI_MOVA_MASK), "|", \ + { TIRHI_MOVA0_MASK, "mova0" }, \ + { TIRHI_MOVA1_MASK, "mova1" }, \ + { TIRHI_MOVA2_MASK, "mova2" }, \ + { TIRHI_MOVA3_MASK, "mova3" }, \ + { exc_illegal_opcode_mask, "illegal_opcode" }, \ + { exc_priv_action_mask, "priv_action" }, \ + { exc_fp_disabled_mask, "fp_disabled" }, \ + { exc_fp_stack_u_mask, "fp_stack_u" }, \ + { exc_d_interrupt_mask, "d_interrupt" }, \ + { exc_diag_ct_cond_mask, "diag_ct_cond" }, \ + { exc_diag_instr_addr_mask, "diag_instr_addr" }, \ + { exc_illegal_instr_addr_mask, "illegal_instr_addr" }, \ + { exc_instr_debug_mask, "instr_debug" }, \ + { exc_window_bounds_mask, "window_bounds" }, \ + { exc_user_stack_bounds_mask, "user_stack_bounds" }, \ + { exc_proc_stack_bounds_mask, "proc_stack_bounds" }, \ + { exc_chain_stack_bounds_mask, "chain_stack_bounds" }, \ + { exc_fp_stack_o_mask, "fp_stack_o" }, \ + { exc_diag_cond_mask, "diag_cond" }, \ + { exc_diag_operand_mask, "diag_operand" }, \ + { exc_illegal_operand_mask, "illegal_operand" }, \ + { exc_array_bounds_mask, "array_bounds" }, \ + { exc_access_rights_mask, "access_rights" }, \ + { exc_addr_not_aligned_mask, "addr_not_aligned" }, \ + { exc_instr_page_miss_mask, "instr_page_miss" }, \ + { exc_instr_page_prot_mask, "instr_page_prot" }, \ + { exc_ainstr_page_miss_mask, "ainstr_page_miss" }, \ + { exc_ainstr_page_prot_mask, "ainstr_page_prot" }, \ + { exc_last_wish_mask, "last_wish" }, \ + { exc_base_not_aligned_mask, "base_not_aligned" }, \ + { exc_software_trap_mask, "software_trap" }, \ + { exc_data_debug_mask, "data_debug" }, \ + { exc_data_page_mask, "data_page" }, \ + { exc_recovery_point_mask, "recovery_point" }, \ + { exc_interrupt_mask, "interrupt" }, \ + { exc_nm_interrupt_mask, "nm_interrupt" }, \ + { exc_div_mask, "div" }, \ + { exc_fp_mask, "fp" }, \ + { exc_mem_lock_mask, "mem_lock" }, \ + { exc_mem_lock_as_mask, "mem_lock_as" }, \ + { exc_mem_error_out_cpu_mask, "mem_error_out_cpu" }, \ + { exc_mem_error_MAU_mask, "mem_error_MAU" }, \ + { exc_mem_error_L2_mask, "mem_error_L2" }, \ + { exc_mem_error_L1_35_mask, "mem_error_L1_35" }, \ + { exc_mem_error_L1_02_mask, "mem_error_L1_02" }, \ + { exc_mem_error_ICACHE_mask, "mem_error_ICACHE" } \ + ) : "(none)" + +TRACE_EVENT( + tir, + + TP_PROTO(u64 tir_lo, u64 tir_hi), + + TP_ARGS(tir_lo, tir_hi), + + TP_STRUCT__entry( + __field( u64, tir_lo ) + __field( u64, tir_hi ) + ), + + TP_fast_assign( + __entry->tir_lo = tir_lo; + __entry->tir_hi = tir_hi; + ), + + TP_printk("\n" + "TIR%lld: ip 0x%llx, als 0x%llx\n" + " exceptions: %s" + , + __entry->tir_hi >> 56, + __entry->tir_lo & E2K_VA_MASK, + (__entry->tir_hi & TIRHI_ALS_MASK) >> TIRHI_ALS_SHIFT, + E2K_TRACE_PRINT_TIR_HI(__entry->tir_hi) + ) +); + + +#endif /* _TRACE_E2K_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/e2k/include/asm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace +#include diff --git a/arch/e2k/include/asm/trace_clock.h b/arch/e2k/include/asm/trace_clock.h new file mode 100644 index 0000000..4bdc71b --- /dev/null +++ b/arch/e2k/include/asm/trace_clock.h @@ -0,0 +1,12 @@ +#ifndef _ASM_E2K_TRACE_CLOCK_H +#define _ASM_E2K_TRACE_CLOCK_H + +#include +#include + +extern notrace u64 trace_clock_e2k_clkr(void); + +#define ARCH_TRACE_CLOCKS \ + { trace_clock_e2k_clkr, "e2k-clkr", .in_ns = 0 }, + +#endif /* _ASM_E2K_TRACE_CLOCK_H */ diff --git a/arch/e2k/include/asm/trace_pgtable-v2.h b/arch/e2k/include/asm/trace_pgtable-v2.h new file mode 100644 index 0000000..530b2e0 --- /dev/null +++ b/arch/e2k/include/asm/trace_pgtable-v2.h @@ -0,0 +1,38 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM e2k + +#if !defined(_TRACE_E2K_PGTABLE_V2_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_E2K_PGTABLE_V2_H + +#include + +#define E2K_TRACE_PRINT_PT_V2_FLAGS(entry, print) \ + (print) ? (__print_flags(entry & (_PAGE_P_V2 | _PAGE_VALID_V2 | \ + _PAGE_HUGE_V2 | _PAGE_G_V2 | \ + _PAGE_NWA_V2 | _PAGE_AVAIL_V2 | \ + _PAGE_INT_PR_V2), "|", \ + { _PAGE_P_V2 , "present" }, \ + { _PAGE_VALID_V2 , "valid" }, \ + { _PAGE_HUGE_V2, "large" }, \ + { _PAGE_G_V2, "global" }, \ + { _PAGE_NWA_V2, "not_write_address" }, \ + { _PAGE_AVAIL_V2, "OS" }, \ + { _PAGE_INT_PR_V2, "integer_protect" } \ + )) : "(none)", \ + (print) ? (__print_flags(entry & (_PAGE_PV_V2 | _PAGE_NON_EX_V2 | \ + _PAGE_W_V2 | _PAGE_D_V2 | \ + _PAGE_A_HW_V2), "|", \ + { _PAGE_PV_V2, "privileged" }, \ + { _PAGE_NON_EX_V2, "non_executable" }, \ + { _PAGE_W_V2, "writable" }, \ + { _PAGE_D_V2, "dirty" }, \ + { _PAGE_A_HW_V2, "accessed" } \ + )) : "(none)", \ + (print && entry != -1ULL && (entry & ~_PAGE_VALID_V2)) ? \ + (((entry & _PAGE_CD_MASK_V2) != _PAGE_CD_MASK_V2) ? \ + "|cacheable" \ + : ((entry & _PAGE_PWT_V2) ? \ + "|uncacheable" : "|write_combine")) \ + : "" \ + +#endif /* _TRACE_E2K_PGTABLE_V2_H */ diff --git a/arch/e2k/include/asm/trace_pgtable-v6.h b/arch/e2k/include/asm/trace_pgtable-v6.h new file mode 100644 index 0000000..a21938c --- /dev/null +++ b/arch/e2k/include/asm/trace_pgtable-v6.h @@ -0,0 +1,48 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM e2k + +#if !defined(_TRACE_E2K_PGTABLE_V6_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_E2K_PGTABLE_V6_H + +#include + +#define E2K_TRACE_PRINT_PT_V6_FLAGS(entry, print) \ + (print) ? (__print_flags(entry & (_PAGE_P_V6 | _PAGE_VALID_V6 | \ + _PAGE_HUGE_V6 | _PAGE_G_V6 | \ + _PAGE_NWA_V6 | \ + _PAGE_SW1_V6 | _PAGE_SW2_V6 | \ + _PAGE_INT_PR_V6), "|", \ + { _PAGE_P_V6 , "present" }, \ + { _PAGE_VALID_V6 , "valid" }, \ + { _PAGE_HUGE_V6, "large" }, \ + { _PAGE_G_V6, "global" }, \ + { _PAGE_NWA_V6, "not_write_address" }, \ + { _PAGE_SW1_V6, "OS-1" }, \ + { _PAGE_SW2_V6, "OS-2" }, \ + { _PAGE_INT_PR_V6, "integer_protect" } \ + )) : "(none)", \ + (print) ? (__print_flags(entry & (_PAGE_PV_V6 | _PAGE_NON_EX_V6 | \ + _PAGE_W_V6 | _PAGE_D_V6 | \ + _PAGE_A_HW_V6), "|", \ + { _PAGE_PV_V6, "privileged" }, \ + { _PAGE_NON_EX_V6, "non_executable" }, \ + { _PAGE_W_V6, "writable" }, \ + { _PAGE_D_V6, "dirty" }, \ + { _PAGE_A_HW_V6, "accessed" } \ + )) : "(none)", \ + (print && entry != -1ULL && (entry & ~_PAGE_VALID_V6)) ? \ + (__print_symbolic(_PAGE_MT_GET_VAL(entry), \ + { GEN_CACHE_MT, "General Cacheable" }, \ + { GEN_NON_CACHE_MT, "General nonCacheable" }, \ + { GEN_NON_CACHE_ORDERED_MT, \ + "General nonCacheable Ordered (same as GnC in hardware)" }, \ + { EXT_PREFETCH_MT, "External Prefetchable" }, \ + { EXT_NON_PREFETCH_MT, "External nonPrefetchable" }, \ + { EXT_CONFIG_MT, "External Configuration" }, \ + { EXT_CACHE_MT, "External Cached (same as GC in hardware)" }, \ + { 2, "Reserved-2" }, \ + { 3, "Reserved-3" }, \ + { 5, "Reserved-5" })) \ + : "" \ + +#endif /* _TRACE_E2K_PGTABLE_V6_H */ diff --git a/arch/e2k/include/asm/trap_def.h b/arch/e2k/include/asm/trap_def.h new file mode 100644 index 0000000..9b00bd7 --- /dev/null +++ b/arch/e2k/include/asm/trap_def.h @@ -0,0 +1,275 @@ +/* + * + * Copyright (C) 2001 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_TRAP_DEF_H +#define _E2K_TRAP_DEF_H + +#include + +#define GET_NR_TIRS(tir_hi) ((tir_hi >> 56) & 0xff) +#define GET_CLEAR_TIR_HI(tir_no) (((tir_no) & 0xffUL) << 56) +#define GET_CLEAR_TIR_LO(tir_no) 0UL + +/* get aa field of tir_hi register */ +#define GET_AA_TIRS(tir_hi) ((tir_hi >> 52) & 0x0f) +#define SET_AA_TIRS(tir_hi, aa_field) ((tir_hi) | (((aa_field) & 0x0f) << 52)) +/* get IP field of tir_lo register */ +#define GET_IP_TIRS(tir_lo) ((tir_lo) & E2K_VA_MASK) +/* get IP field of cr0_hi register */ +#define GET_IP_CR0_HI(cr0_hi) ((cr0_hi).CR0_hi_ip << E2K_ALIGN_INS) + +/* + * Trap Info Register: the numbers of exceptions + */ + +#define exc_illegal_opcode_num 0 /* 00 */ +#define exc_priv_action_num 1 /* 01 */ +#define exc_fp_disabled_num 2 /* 02 */ +#define exc_fp_stack_u_num 3 /* 03 */ +#define exc_d_interrupt_num 4 /* 04 */ +#define exc_diag_ct_cond_num 5 /* 05 */ +#define exc_diag_instr_addr_num 6 /* 06 */ +#define exc_illegal_instr_addr_num 7 /* 07 */ +#define exc_instr_debug_num 8 /* 08 */ +#define exc_window_bounds_num 9 /* 09 */ +#define exc_user_stack_bounds_num 10 /* 10 */ +#define exc_proc_stack_bounds_num 11 /* 11 */ +#define exc_chain_stack_bounds_num 12 /* 12 */ +#define exc_fp_stack_o_num 13 /* 13 */ +#define exc_diag_cond_num 14 /* 14 */ +#define exc_diag_operand_num 15 /* 15 */ +#define exc_illegal_operand_num 16 /* 16 */ +#define exc_array_bounds_num 17 /* 17 */ +#define exc_access_rights_num 18 /* 18 */ +#define exc_addr_not_aligned_num 19 /* 19 */ +#define exc_instr_page_miss_num 20 /* 20 */ +#define exc_instr_page_prot_num 21 /* 21 */ +#define exc_ainstr_page_miss_num 22 /* 22 */ +#define exc_ainstr_page_prot_num 23 /* 23 */ +#define exc_last_wish_num 24 /* 24 */ +#define exc_base_not_aligned_num 25 /* 25 */ +#define exc_software_trap_num 26 /* 26 */ + +#define exc_data_debug_num 28 /* 28 */ +#define exc_data_page_num 29 /* 29 */ + +#define exc_recovery_point_num 31 /* 31 */ +#define exc_interrupt_num 32 /* 32 */ +#define exc_nm_interrupt_num 33 /* 33 */ +#define exc_div_num 34 /* 34 */ +#define exc_fp_num 35 /* 35 */ +#define exc_mem_lock_num 36 /* 36 */ +#define exc_mem_lock_as_num 37 /* 37 */ +#define exc_data_error_num 38 /* 38 */ +#define exc_mem_error_out_cpu_num 38 /* 38 */ +#define exc_mem_error_MAU_num 39 /* 39 */ +#define exc_mem_error_L2_num 40 /* 40 */ +#define exc_mem_error_L1_35_num 41 /* 41 */ +#define exc_mem_error_L1_02_num 42 /* 42 */ +#define exc_mem_error_ICACHE_num 43 /* 43 */ + +#define exc_max_num 43 + +#define exc_mova_ch_0_num 52 /* [52] TIR.aa.[0] */ +#define exc_mova_ch_1_num 53 /* [53] TIR.aa.[1] */ +#define exc_mova_ch_2_num 54 /* [54] TIR.aa.[2] */ +#define exc_mova_ch_3_num 55 /* [55] TIR.aa.[3] */ + +#define core_dump_num 38 /* 38 */ +#define masked_hw_stack_bounds_num 60 /* hardware stacks bounds */ + /* trap is occured but masked */ + +/* + * Trap Info Register: the bit mask of exceptions + */ + +#define exc_illegal_opcode_mask (1UL << exc_illegal_opcode_num) +#define exc_priv_action_mask (1UL << exc_priv_action_num) +#define exc_fp_disabled_mask (1UL << exc_fp_disabled_num) +#define exc_fp_stack_u_mask (1UL << exc_fp_stack_u_num) +#define exc_d_interrupt_mask (1UL << exc_d_interrupt_num) +#define exc_diag_ct_cond_mask (1UL << exc_diag_ct_cond_num) +#define exc_diag_instr_addr_mask (1UL << exc_diag_instr_addr_num) +#define exc_illegal_instr_addr_mask (1UL << exc_illegal_instr_addr_num) +#define exc_instr_debug_mask (1UL << exc_instr_debug_num) +#define exc_window_bounds_mask (1UL << exc_window_bounds_num) +#define exc_user_stack_bounds_mask (1UL << exc_user_stack_bounds_num) +#define exc_proc_stack_bounds_mask (1UL << exc_proc_stack_bounds_num) +#define exc_chain_stack_bounds_mask (1UL << exc_chain_stack_bounds_num) +#define exc_fp_stack_o_mask (1UL << exc_fp_stack_o_num) +#define exc_diag_cond_mask (1UL << exc_diag_cond_num) +#define exc_diag_operand_mask (1UL << exc_diag_operand_num) +#define exc_illegal_operand_mask (1UL << exc_illegal_operand_num) +#define exc_array_bounds_mask (1UL << exc_array_bounds_num) +#define exc_access_rights_mask (1UL << exc_access_rights_num) +#define exc_addr_not_aligned_mask (1UL << exc_addr_not_aligned_num) +#define exc_instr_page_miss_mask (1UL << exc_instr_page_miss_num) +#define exc_instr_page_prot_mask (1UL << exc_instr_page_prot_num) +#define exc_ainstr_page_miss_mask (1UL << exc_ainstr_page_miss_num) +#define exc_ainstr_page_prot_mask (1UL << exc_ainstr_page_prot_num) +#define exc_last_wish_mask (1UL << exc_last_wish_num) +#define exc_base_not_aligned_mask (1UL << exc_base_not_aligned_num) +#define exc_software_trap_mask (1UL << exc_software_trap_num) + +#define exc_data_debug_mask (1UL << exc_data_debug_num) +#define exc_data_page_mask (1UL << exc_data_page_num) + +#define exc_recovery_point_mask (1UL << exc_recovery_point_num) +#define exc_interrupt_mask (1UL << exc_interrupt_num) +#define exc_nm_interrupt_mask (1UL << exc_nm_interrupt_num) +#define exc_div_mask (1UL << exc_div_num) +#define exc_fp_mask (1UL << exc_fp_num) +#define exc_mem_lock_mask (1UL << exc_mem_lock_num) +#define exc_mem_lock_as_mask (1UL << exc_mem_lock_as_num) +#define exc_data_error_mask (1UL << exc_data_error_num) +#define exc_mem_error_out_cpu_mask (1UL << exc_mem_error_out_cpu_num) +#define exc_mem_error_MAU_mask (1UL << exc_mem_error_MAU_num) +#define exc_mem_error_L2_mask (1UL << exc_mem_error_L2_num) +#define exc_mem_error_L1_35_mask (1UL << exc_mem_error_L1_35_num) +#define exc_mem_error_L1_02_mask (1UL << exc_mem_error_L1_02_num) +#define exc_mem_error_ICACHE_mask (1UL << exc_mem_error_ICACHE_num) +#define exc_mem_error_mask (exc_mem_error_out_cpu_mask | \ + exc_mem_error_MAU_mask | \ + exc_mem_error_L2_mask | \ + exc_mem_error_L1_35_mask | \ + exc_mem_error_L1_02_mask | \ + exc_mem_error_ICACHE_mask) + +#define exc_mova_ch_0_mask (1UL << exc_mova_ch_0_num) +#define exc_mova_ch_1_mask (1UL << exc_mova_ch_1_num) +#define exc_mova_ch_2_mask (1UL << exc_mova_ch_2_num) +#define exc_mova_ch_3_mask (1UL << exc_mova_ch_3_num) + +#define exc_all_mask ((1UL << (exc_max_num + 1)) - 1UL) +#define aau_exc_mask (exc_mova_ch_0_mask | \ + exc_mova_ch_1_mask | \ + exc_mova_ch_2_mask | \ + exc_mova_ch_3_mask) + +#define core_dump_mask (1UL << core_dump_num) +#define masked_hw_stack_bounds_mask (1UL << masked_hw_stack_bounds_num) + +#define sync_exc_mask (exc_illegal_opcode_mask | \ + exc_priv_action_mask | \ + exc_fp_disabled_mask | \ + exc_fp_stack_u_mask | \ + exc_diag_ct_cond_mask | \ + exc_diag_instr_addr_mask | \ + exc_illegal_instr_addr_mask | \ + exc_window_bounds_mask | \ + exc_user_stack_bounds_mask | \ + exc_fp_stack_o_mask | \ + exc_diag_cond_mask | \ + exc_diag_operand_mask | \ + exc_illegal_operand_mask | \ + exc_array_bounds_mask | \ + exc_access_rights_mask | \ + exc_addr_not_aligned_mask | \ + exc_instr_page_miss_mask | \ + exc_instr_page_prot_mask | \ + exc_base_not_aligned_mask | \ + exc_software_trap_mask) + +#define async_exc_mask (exc_proc_stack_bounds_mask | \ + exc_chain_stack_bounds_mask | \ + exc_instr_debug_mask | \ + exc_ainstr_page_miss_mask | \ + exc_ainstr_page_prot_mask | \ + exc_interrupt_mask | \ + exc_nm_interrupt_mask | \ + exc_mem_lock_as_mask | \ + exc_data_error_mask | \ + exc_mem_error_mask) + +#define defer_exc_mask (exc_data_page_mask | \ + exc_mem_lock_mask | \ + exc_d_interrupt_mask | \ + exc_last_wish_mask) + +/* Mask of non-maskable interrupts. "exc_data_debug" and "exc_instr_debug" + * actually can be either maskable or non-maskable depending on the watched + * event, but we assume the worst case (non-maskable). */ +#define non_maskable_exc_mask (exc_nm_interrupt_mask | \ + exc_data_debug_mask | \ + exc_instr_debug_mask | \ + exc_mem_lock_as_mask) + +#define have_tc_exc_mask (exc_data_page_mask | \ + exc_mem_lock_mask) + +#define fp_es (1UL << 7) /* - error summary status; es set if anyone */ + /* of unmasked exception flags is set; */ + +#define fp_pe (1UL << 5) /* - precision exception flag; */ +#define fp_ue (1UL << 4) /* - underflow exception flag; */ +#define fp_oe (1UL << 3) /* - overflow exception flag; */ +#define fp_ze (1UL << 2) /* - divide by zero exception flag; */ +#define fp_de (1UL << 1) /* - denormalized operand exception flag; */ +#define fp_ie (1UL << 0) /* - invalid operation exception flag; */ + +#ifndef __ASSEMBLY__ +/* + * do_page_fault() return values + */ +enum pf_ret { + /* Could not handle fault, must return to handle signals */ + PFR_SIGPENDING = 1, + /* The page fault was handled */ + PFR_SUCCESS, + /* In some cases kernel addresses can be in Trap Cellar if VLIW command + * consisted of a several load/store operations and one of them caused + * page fault trap */ + PFR_KERNEL_ADDRESS, + /* Do not handle speculative access */ + PFR_IGNORE, + /* Controlled access from kernel to user memory */ + PFR_CONTROLLED_ACCESS, + /* needs to change SAP to AP for multi_threading of protected mode */ + PFR_AP_THREAD_READ, + /* trap on paravirtualized guest kernel and is handled by host: */ + /* such result means the trap was handled by hypervisor and it need */ + /* recover faulted operation */ + PFR_KVM_KERNEL_ADDRESS, +}; +#endif /* ! __ASSEMBLY__ */ + +/* + * Common system calls (trap table entries numbers) + */ +#define LINUX_SYSCALL32_TRAPNUM 1 /* Use E2K trap entry #1 */ +#define LINUX_SYSCALL64_TRAPNUM 3 /* Use E2K trap entry #3 */ +#define LINUX_SYSCALL_TRAPNUM_OLD 4 /* Deprecated */ +#define LINUX_FAST_SYSCALL32_TRAPNUM 5 +#define LINUX_FAST_SYSCALL64_TRAPNUM 6 +#define LINUX_FAST_SYSCALL128_TRAPNUM 7 + +/* + * Hypercalls + */ +#define LINUX_HCALL_GENERIC_TRAPNUM 0 /* guest hardware hypercalls */ +#define LINUX_HCALL_LIGHT_TRAPNUM 1 /* guest light hardware hypercalls */ + +/* + * Definition of ttable entry number used for protected system calls. + * This is under agreement with protected mode compiler/plib team. + */ +#define PMODE_SYSCALL_TRAPNUM 8 +#define PMODE_NEW_SYSCALL_TRAPNUM 10 + +#define GENERIC_HYPERCALL_TRAPNUM 16 /* guest software hypercalls */ +#define LIGHT_HYPERCALL_TRAPNUM 17 /* guest light software hypercalls */ + +#define HYPERCALLS_TRAPS_MASK ((1U << GENERIC_HYPERCALL_TRAPNUM) | \ + (1U << LIGHT_HYPERCALL_TRAPNUM)) + +/* + * One trap table entry byte size + */ +#define LINUX_SYSCALL_ENTRY_SIZE 0x800 /* one entry max size is */ + /* PAGE_SIZE / 2 */ + +#endif /* _E2K_TRAP_DEF_H */ diff --git a/arch/e2k/include/asm/trap_table.S.h b/arch/e2k/include/asm/trap_table.S.h new file mode 100644 index 0000000..2281f7a --- /dev/null +++ b/arch/e2k/include/asm/trap_table.S.h @@ -0,0 +1,150 @@ +/* + * + * Copyright (C) 2020 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_TRAP_TABLE_ASM_H +#define _E2K_TRAP_TABLE_ASM_H + +#ifdef __ASSEMBLY__ + +#include +#include +#include +#include +#include + +#include + +#if defined CONFIG_SMP +# define SMP_ONLY(...) __VA_ARGS__ +#else +# define SMP_ONLY(...) +#endif + +/* + * Important: the first memory access in kernel is store, not load. + * This is needed to flush SLT before trying to load anything. + */ +#define SWITCH_HW_STACKS_FROM_USER(...) \ + { \ + /* Disable load/store generations */ \ + crp; \ + } \ + { \ + /* Wait for FPU exceptions before switching stacks */ \ + wait all_e = 1; \ + rrd %osr0, GVCPUSTATE; \ + stgdq,sm %qg16, 0, TSK_TI_G_VCPU_STATE; \ + } \ + { \ + rrd %psp.hi, GCURTASK; \ + stgdq,sm %qg18, 0, TSK_TI_G_MY_CPU_OFFSET; \ + cmpesb,1 0, 0, %pred0; \ + } \ + SWITCH_HW_STACKS(TSK_TI_, ##__VA_ARGS__) + +/* + * This assumes that GVCPUSTATE points to current_thread_info() + * and %psp.hi has been read into GCURTASK + * + * Does the following: + * + * 1) Saves global registers either to 'thread_info.tmp_k_gregs' or to + * 'thread_info.k_gregs'. The first area is used for trap handler since + * we do not know whether it is from user or from kernel and whether + * global registers have been saved already to 'thread_info.k_gregs'. + * + * 2) Saves stack registers to 'thread_info.tmp_user_stacks'. If this is + * not a kernel trap then these values will be copied to pt_regs later. + * + * 3) Updates global and stack registers with kernel values (if not in + * kernel trap where it's been done already) + */ +#define SWITCH_HW_STACKS(PREFIX, ...) \ + { \ + ldd,0 GVCPUSTATE, TI_K_PSP_LO, GCPUOFFSET; \ + ldd,2 GVCPUSTATE, TI_K_PCSP_LO, GCPUID; \ + __VA_ARGS__ \ + } \ + { \ + rrd %psp.lo, GCURTASK ? %pred0; \ + stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PSP_HI ? %pred0; \ + SMP_ONLY(ldw,5 GVCPUSTATE, TSK_TI_CPU_DELTA, GCPUID ? ~ %pred0;) \ + } \ + { \ + rrd %pcsp.hi, GCURTASK ? %pred0; \ + stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PSP_LO ? %pred0; \ + /* Nothing to do if this is kernel (%pred0 == false) */ \ + subd,1 GVCPUSTATE, TSK_TI, GCURTASK ? ~ %pred0; \ + ibranch trap_handler_switched_stacks ? ~ %pred0; \ + } \ + { \ + nop 1; /* ldd -> use */ \ + rrd %pcsp.lo, GCURTASK; \ + stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PCSP_HI; \ + } \ + { \ + rwd GCPUOFFSET, %psp.lo; \ + stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PCSP_LO; \ + ldrd,5 GVCPUSTATE, TAGGED_MEM_LOAD_REC_OPC | PREFIX##G_MY_CPU_OFFSET_EXT, GCPUOFFSET; \ + } \ + { \ + rwd GCPUID, %pcsp.lo; \ + ldd,2 GVCPUSTATE, TI_K_PSP_HI, GCURTASK; \ + ldrd,5 GVCPUSTATE, TAGGED_MEM_LOAD_REC_OPC | PREFIX##G_VCPU_STATE_EXT, GCPUID; \ + } \ + { \ + rwd GCURTASK, %psp.hi; \ + ldd,2 GVCPUSTATE, TI_K_PCSP_HI, GCURTASK; \ + } \ + { \ + rwd GCURTASK, %pcsp.hi; \ + } \ + { \ + rrd %pshtp, GCURTASK; \ + } \ + { \ + ldrd,3 GVCPUSTATE, TAGGED_MEM_LOAD_REC_OPC | PREFIX##G_CPU_ID, GCPUOFFSET; \ + strd,5 GCPUOFFSET, GVCPUSTATE, TAGGED_MEM_STORE_REC_OPC | PREFIX##G_CPU_ID; \ + } \ + { \ + ldrd,3 GVCPUSTATE, TAGGED_MEM_LOAD_REC_OPC | PREFIX##G_TASK, GCPUID; \ + strd,5 GCPUID, GVCPUSTATE, TAGGED_MEM_STORE_REC_OPC | PREFIX##G_TASK; \ + } \ + { \ + nop 2; /* ld -> use */ \ + rrd %pcshtp, GCURTASK; \ + stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PSHTP; \ + } \ + { \ + subd,1 GVCPUSTATE, TSK_TI, GCURTASK; \ + stgdd,2 GCURTASK, 0, TSK_TI_TMP_U_PCSHTP; \ + strd,5 GCPUOFFSET, GVCPUSTATE, TAGGED_MEM_STORE_REC_OPC | PREFIX##G_MY_CPU_OFFSET_EXT; \ + } \ + { \ + SMP_ONLY(ldw,3 GVCPUSTATE, TSK_TI_CPU_DELTA, GCPUID;) \ + strd,5 GCPUID, GVCPUSTATE, TAGGED_MEM_STORE_REC_OPC | PREFIX##G_VCPU_STATE_EXT; \ + } + +.macro HANDLER_TRAMPOLINE ctprN, scallN, fn, wbsL +/* Force load OSGD->GD. Alternative is to use non-0 CUI for kernel */ +{ + nop + sdisp \ctprN, \scallN +} + call \ctprN, wbs=\wbsL + disp \ctprN, \fn + SWITCH_HW_STACKS_FROM_USER() + SMP_ONLY(shld,1 GCPUID, 3, GCPUOFFSET) +{ + SMP_ONLY(ldd,2 [ __per_cpu_offset + GCPUOFFSET ], GCPUOFFSET) + ct \ctprN +} +.endm /* HANDLER_TRAMPOLINE */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _E2K_TRAP_TABLE_ASM_H */ diff --git a/arch/e2k/include/asm/trap_table.h b/arch/e2k/include/asm/trap_table.h new file mode 100644 index 0000000..78cdea0 --- /dev/null +++ b/arch/e2k/include/asm/trap_table.h @@ -0,0 +1,258 @@ +/* + * + * Copyright (C) 2001 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_TRAP_TABLE_H +#define _E2K_TRAP_TABLE_H + +#include +#include +#include +#include +#include +#include +#include + +#ifndef __ASSEMBLY__ +#include +#endif /* __ASSEMBLY__ */ + +#ifdef __ASSEMBLY__ +#include +#endif /* __ASSEMBLY__ */ + +#ifndef __ASSEMBLY__ + +#define GDB_BREAKPOINT_STUB_MASK 0xffffffffffffff8fUL +#define GDB_BREAKPOINT_STUB 0x0dc0c08004000001UL + +typedef long (*ttable_entry_args_t)(int sys_num, ...); + +static inline bool +is_gdb_breakpoint_trap(struct pt_regs *regs) +{ + u64 *instr = (u64 *)GET_IP_CR0_HI(regs->crs.cr0_hi); + + return (*instr & GDB_BREAKPOINT_STUB_MASK) == GDB_BREAKPOINT_STUB; +} + +extern void kernel_stack_overflow(unsigned int overflows); + +static inline unsigned int +native_is_kernel_data_stack_bounds(bool trap_on_kernel, e2k_usd_lo_t usd_lo) +{ + /* In native case this check is done in assembler in ttable_entry0 */ + return false; +} + +static inline void +native_stack_bounds_trap_enable(void) +{ + /* 'sge' flag unused while trap/system calls handling */ + /* so nithing to do */ +} +static inline void +native_correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip) +{ + if (regs == NULL) { + regs = current_thread_info()->pt_regs; + BUG_ON(regs == NULL); + } + regs->crs.cr0_hi.CR0_hi_IP = return_ip; +} +static inline void +native_handle_deferred_traps_in_syscall(struct pt_regs *regs) +{ + /* none deferred traps in system call */ +} +static inline bool +native_have_deferred_traps(struct pt_regs *regs) +{ + return false; /* none deferred traps in system call */ +} + +static inline void +native_do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const unsigned int aa_no) +{ + (void)do_page_fault(regs, address, condition, mask, 0); +} + +extern long native_ttable_entry1(int sys_num, ...); +extern long native_ttable_entry3(int sys_num, ...); +extern long native_ttable_entry4(int sys_num, ...); + +#define do_ttable_entry_name(entry) # entry +#define ttable_entry_name(entry) do_ttable_entry_name(entry) + +#define ttable_entry1_name ttable_entry_name(ttable_entry1) +#define ttable_entry3_name ttable_entry_name(ttable_entry3) +#define ttable_entry4_name ttable_entry_name(ttable_entry4) + +#define ttable_entry1_func(sys_num, ...) \ +({ \ + long rval; \ + rval = ttable_entry1(sys_num, ##__VA_ARGS__); \ + rval; \ +}) +#define ttable_entry3_func(sys_num, ...) \ +({ \ + long rval; \ + rval = ttable_entry3(sys_num, ##__VA_ARGS__); \ + rval; \ +}) +#define ttable_entry4_func(sys_num, ...) \ +({ \ + long rval; \ + rval = ttable_entry4(sys_num, ##__VA_ARGS__); \ + rval; \ +}) + +#ifndef CONFIG_VIRTUALIZATION +#if CONFIG_CPU_ISET >= 5 +# define SYS_RET_TYPE long +#else /* ! CONFIG_CPU_ISET < 5 */ +# define SYS_RET_TYPE void +#endif /* CONFIG_CPU_ISET >= 5 */ +#else /* CONFIG_VIRTUALIZATION */ +# define SYS_RET_TYPE long +#endif /* ! CONFIG_VIRTUALIZATION */ + +typedef unsigned long (*system_call_func)(unsigned long arg1, + unsigned long arg2, + unsigned long arg3, + unsigned long arg4, + unsigned long arg5, + unsigned long arg6); + +typedef unsigned long (*protected_system_call_func)(unsigned long arg1, + unsigned long arg2, + unsigned long arg3, + unsigned long arg4, + unsigned long arg5, + unsigned long arg6, + struct pt_regs *regs); +static inline void +native_exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi, + e2k_usd_lo_t usd_lo, e2k_upsr_t upsr) +{ + NATIVE_EXIT_HANDLE_SYSCALL(sbr, usd_hi.USD_hi_half, usd_lo.USD_lo_half, + upsr.UPSR_reg); +} + +extern SYS_RET_TYPE notrace handle_sys_call(system_call_func sys_call, + long arg1, long arg2, long arg3, long arg4, + long arg5, long arg6, struct pt_regs *regs); + +extern const system_call_func sys_call_table[NR_syscalls]; +extern const system_call_func sys_call_table_32[NR_syscalls]; +extern const protected_system_call_func sys_call_table_entry8[NR_syscalls]; +extern const system_call_func sys_protcall_table[NR_syscalls]; +extern const system_call_func sys_call_table_deprecated[NR_syscalls]; + +#if !defined(CONFIG_PARAVIRT_GUEST) && !defined(CONFIG_KVM_GUEST_KERNEL) +/* it is native kernel without any virtualization */ +/* or it is host kernel with virtualization support */ + +#define FILL_HARDWARE_STACKS() NATIVE_FILL_HARDWARE_STACKS() + +static inline void +correct_trap_return_ip(struct pt_regs *regs, unsigned long return_ip) +{ + native_correct_trap_return_ip(regs, return_ip); +} +static inline void +handle_deferred_traps_in_syscall(struct pt_regs *regs) +{ + native_handle_deferred_traps_in_syscall(regs); +} +static inline void +stack_bounds_trap_enable(void) +{ + native_stack_bounds_trap_enable(); +} + +#define ttable_entry1 native_ttable_entry1 +#define ttable_entry3 native_ttable_entry3 +#define ttable_entry4 native_ttable_entry4 + +#define get_ttable_entry1 ((ttable_entry_args_t)native_ttable_entry1) +#define get_ttable_entry3 ((ttable_entry_args_t)native_ttable_entry3) +#define get_ttable_entry4 ((ttable_entry_args_t)native_ttable_entry4) + +static inline void +exit_handle_syscall(e2k_addr_t sbr, e2k_usd_hi_t usd_hi, + e2k_usd_lo_t usd_lo, e2k_upsr_t upsr) +{ + native_exit_handle_syscall(sbr, usd_hi, usd_lo, upsr); +} + +static inline unsigned long +kvm_mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return 0; +} + +#ifndef CONFIG_VIRTUALIZATION +/* it is native kernel without any virtualization */ + +#define instr_page_fault(__regs, __ftype, __async) \ + native_do_instr_page_fault(__regs, __ftype, __async) + +static inline void +do_aau_page_fault(struct pt_regs *const regs, e2k_addr_t address, + const tc_cond_t condition, const tc_mask_t mask, + const unsigned int aa_no) +{ + native_do_aau_page_fault(regs, address, condition, mask, aa_no); +} + +static inline unsigned int +is_kernel_data_stack_bounds(bool on_kernel, e2k_usd_lo_t usd_lo) +{ + return native_is_kernel_data_stack_bounds(on_kernel, usd_lo); +} +#endif /* ! CONFIG_VIRTUALIZATION */ + +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#else /* __ASSEMBLY__ */ + +/* + * Global registers map used by kernel + * Numbers of used global registers see at arch/e2k/include/asm/glob_regs.h + */ + +#define GET_GREG_MEMONIC(greg_no) %dg ## greg_no +#define DO_GET_GREG_MEMONIC(greg_no) GET_GREG_MEMONIC(greg_no) + +#define GCURTASK DO_GET_GREG_MEMONIC(CURRENT_TASK_GREG) +#define GCPUOFFSET DO_GET_GREG_MEMONIC(MY_CPU_OFFSET_GREG) +#define GCPUID DO_GET_GREG_MEMONIC(SMP_CPU_ID_GREG) +/* Macroses for virtualization support on assembler */ +#define GVCPUSTATE DO_GET_GREG_MEMONIC(GUEST_VCPU_STATE_GREG) + +#endif /* ! __ASSEMBLY__ */ + +#include + +#ifndef __ASSEMBLY__ +static inline void init_pt_regs_for_syscall(struct pt_regs *regs) +{ + regs->next = NULL; + regs->trap = NULL; + +#ifdef CONFIG_USE_AAU + regs->aau_context = NULL; +#endif + + regs->flags = 0; + init_guest_syscalls_handling(regs); +} +#endif + +#endif /* _E2K_TRAP_TABLE_H */ diff --git a/arch/e2k/include/asm/traps.h b/arch/e2k/include/asm/traps.h new file mode 100644 index 0000000..e8ef0fd --- /dev/null +++ b/arch/e2k/include/asm/traps.h @@ -0,0 +1,239 @@ +/* linux/include/asm-e2k/traps.h, v 1.0 03/07/2001. + * + * Copyright (C) 2001 MCST + * + * Defenition of traps handling routines. + */ + +#ifndef _E2K_TRAPS_H +#define _E2K_TRAPS_H + +#include +#include +#include + +typedef void (*exc_function)(struct pt_regs *regs); +extern const exc_function exc_tbl[]; +extern const char *exc_tbl_name[]; + +#define S_SIG(regs, signo, trapno, code) \ +do { \ + int nr_TIRs; \ + e2k_tir_hi_t tir_hi; \ + struct trap_pt_regs *trap = (regs)->trap; \ + void __user *addr; \ + \ + if (trap) { \ + AW(tir_hi) = trap->TIR_hi; \ + nr_TIRs = GET_NR_TIRS(AW(tir_hi)); \ + addr = (void __user *) (trap->TIRs[nr_TIRs].TIR_lo.TIR_lo_ip); \ + } else { \ + addr = 0; \ + } \ + \ + force_sig_fault(signo, code, addr, trapno); \ +} while (0) + +extern void parse_TIR_registers(struct pt_regs *regs, u64 exceptions); +extern void do_aau_fault(int aa_field, struct pt_regs *regs); +extern int handle_proc_stack_bounds(struct e2k_stacks *stacks, + struct trap_pt_regs *trap); +extern int handle_chain_stack_bounds(struct e2k_stacks *stacks, + struct trap_pt_regs *trap); +extern int do_page_fault(struct pt_regs *const regs, e2k_addr_t address, + tc_cond_t condition, tc_mask_t mask, const int instr_page); +#ifdef CONFIG_KVM_ASYNC_PF +extern void do_pv_apf_wake(struct pt_regs *regs); +#endif /* */ +extern void do_trap_cellar(struct pt_regs *regs, int only_system_tc); + +extern irqreturn_t native_do_interrupt(struct pt_regs *regs); +extern void do_nm_interrupt(struct pt_regs *regs); +extern void native_instr_page_fault(struct pt_regs *regs, tc_fault_type_t ftype, + const int async_instr); + +extern void do_trap_cellar(struct pt_regs *regs, int only_system_tc); + +extern int constrict_user_data_stack(struct pt_regs *regs, unsigned long incr); +extern int expand_user_data_stack(struct pt_regs *regs, unsigned long incr); +extern void do_notify_resume(struct pt_regs *regs); + +extern int parse_getsp_operation(struct trap_pt_regs *regs, int *incr); + +extern void coredump_in_future(void); + +enum { + GETSP_OP_IGNORE, + GETSP_OP_INCREMENT, + GETSP_OP_DECREMENT +}; + +static inline unsigned int user_trap_init(void) +{ + /* Enable system calls for user's processes. */ + unsigned int linux_osem = 0; + + /* Enable deprecated generic ttable2 syscall entry. */ + linux_osem = 1 << LINUX_SYSCALL_TRAPNUM_OLD; + + /* Enable ttable1 syscall entry - 32-bit syscalls only */ + linux_osem |= 1 << LINUX_SYSCALL32_TRAPNUM; + /* Enable ttable3 syscall entry - 64-bit syscalls only */ + linux_osem |= 1 << LINUX_SYSCALL64_TRAPNUM; + + /* Enable fast syscalls entries. */ + linux_osem |= 1 << LINUX_FAST_SYSCALL32_TRAPNUM; + linux_osem |= 1 << LINUX_FAST_SYSCALL64_TRAPNUM; + linux_osem |= 1 << LINUX_FAST_SYSCALL128_TRAPNUM; + +#ifdef CONFIG_PROTECTED_MODE + linux_osem |= (1 << PMODE_SYSCALL_TRAPNUM); + linux_osem |= (1 << PMODE_NEW_SYSCALL_TRAPNUM); +#endif /* CONFIG_PROTECTED_MODE */ + + return linux_osem; +} + +static inline unsigned int guest_trap_init(void) +{ + /* Enable system calls for user's processes. */ + unsigned int linux_osem = user_trap_init(); + +#ifdef CONFIG_KVM_HOST_MODE + linux_osem |= HYPERCALLS_TRAPS_MASK; +#endif + + return linux_osem; +} + +static inline unsigned int user_hcall_init(void) +{ + unsigned int linux_hcem = 0; + + linux_hcem = 1 << LINUX_HCALL_GENERIC_TRAPNUM; + linux_hcem |= 1 << LINUX_HCALL_LIGHT_TRAPNUM; + + return linux_hcem; +} + +extern char __hypercalls_begin[]; +static inline void kernel_trap_mask_init(void) +{ + WRITE_OSEM_REG(user_trap_init()); +#ifdef CONFIG_KVM_HOST_MODE + machine.rwd(E2K_REG_HCEM, user_hcall_init()); + machine.rwd(E2K_REG_HCEB, (unsigned long) __hypercalls_begin); +#endif +} + +static inline int +native_host_apply_psp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + /* native & host kernel cannot be paravirtualized guest */ + return 0; +} + +static inline int +native_host_apply_pcsp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + /* native & host kernel cannot be paravirtualized guest */ + return 0; +} + +/* + * MMIO page fault cannot occur on native or host mode, + * so ignore such traps + */ +static inline unsigned long +native_mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return 0; +} + +#ifdef CONFIG_VIRTUALIZATION +extern void native_sysrq_showstate_interrupt(struct pt_regs *regs); +static inline void +native_init_guest_system_handlers_table(void) +{ + if (paravirt_enabled()) { + /* It is native guest */ + setup_APIC_vector_handler(SYSRQ_SHOWSTATE_EPIC_VECTOR, + native_sysrq_showstate_interrupt, 1, + "native_sysrq_showstate_interrupt"); + } +} +# ifndef CONFIG_KVM +# error "Undefined guest virtualization type" +# endif /* CONFIG_KVM */ +#else /* ! CONFIG_VIRTUALIZATION */ +/* it is native host kernel without virtualization support */ +static inline void +native_init_guest_system_handlers_table(void) +{ + /* Nothing to do */ +} +# define SET_RUNSTATE_IN_USER_TRAP() +# define SET_RUNSTATE_OUT_USER_TRAP() +# define SET_RUNSTATE_IN_KERNEL_TRAP(cur_runstate) +# define SET_RUNSTATE_OUT_KERNEL_TRAP(cur_runstate) +#endif /* CONFIG_VIRTUALIZATION */ + +#if defined(CONFIG_KVM_GUEST_KERNEL) +/* It is pure guest kernel (not paravirtualized) */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* It is paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ +/* it is native kernel without any virtualization */ +/* or it is native host kernel with virtualization support */ + +#define TIR0_clear_false_exceptions(TIR_hi, nr_TIRs) \ + native_TIR0_clear_false_exceptions(TIR_hi, nr_TIRs) + +static inline void +handle_interrupt(struct pt_regs *regs) +{ + native_do_interrupt(regs); +} + +extern int apply_psp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, unsigned long end, + unsigned long delta); +extern int apply_pcsp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, unsigned long end, + unsigned long delta); + +static inline int host_apply_psp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + return native_host_apply_psp_delta_to_signal_stack(base, size, + start, end, delta); +} + +static inline int host_apply_pcsp_delta_to_signal_stack(unsigned long base, + unsigned long size, unsigned long start, + unsigned long end, unsigned long delta) +{ + return native_host_apply_pcsp_delta_to_signal_stack(base, size, + start, end, delta); +} + +static inline unsigned long +mmio_page_fault(struct pt_regs *regs, trap_cellar_t *tcellar) +{ + return native_mmio_page_fault(regs, tcellar); +} +static inline void +init_guest_system_handlers_table(void) +{ + native_init_guest_system_handlers_table(); +} +#endif /* ! CONFIG_PARAVIRT_GUEST && ! CONFIG_KVM_GUEST_KERNEL */ + +#endif /* _E2K_TRAPS_H */ diff --git a/arch/e2k/include/asm/types.h b/arch/e2k/include/asm/types.h new file mode 100644 index 0000000..c574c9f --- /dev/null +++ b/arch/e2k/include/asm/types.h @@ -0,0 +1,11 @@ +#ifndef _E2K_TYPES_H_ +#define _E2K_TYPES_H_ + +#include + +#ifndef __ASSEMBLY__ + +typedef struct linux_binprm linux_binprm_t; + +#endif /* !(__ASSEMBLY__) */ +#endif /* _E2K_TYPES_H_ */ diff --git a/arch/e2k/include/asm/uaccess.h b/arch/e2k/include/asm/uaccess.h new file mode 100644 index 0000000..4383af2 --- /dev/null +++ b/arch/e2k/include/asm/uaccess.h @@ -0,0 +1,441 @@ +#ifndef _E2K_UACCESS_H_ +#define _E2K_UACCESS_H_ + +/* + * User space memory access functions + * asm/uaccess.h + */ +#include + +#include +#include +#include +#include +#include +#ifdef CONFIG_PROTECTED_MODE +#include +#endif + +#undef DEBUG_UACCESS_MODE +#undef DEBUG_UACCESS_FAULT +#undef DebugUA +#undef DebugUAF +#define DEBUG_UACCESS_MODE 0 +#define DEBUG_UACCESS_FAULT 0 +#define DebugUA \ + if (DEBUG_UACCESS_MODE) printk +#if DEBUG_UACCESS_MODE || DEBUG_UACCESS_FAULT +# define DebugUAF printk +#else +# define DebugUAF(...) +#endif + + + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons, these macros are grossly misnamed. + */ + +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) + +/* Even kernel should not access page tables with get_user()/put_user() */ +#define KERNEL_DS MAKE_MM_SEG(KERNEL_VPTB_BASE_ADDR) + +#define USER_ADDR_MAX USER_HW_STACKS_BASE +#define USER_DS MAKE_MM_SEG(USER_ADDR_MAX) + +/* + * Sometimes kernel wants to access hardware stacks, + * in which case we can use this limit. + * + * IMPORTANT: in this case kernel must check that it accesses + * only the stacks of the current thread. Writing another + * thread's hardware stacks shall not be possible. + */ +#define K_USER_DS MAKE_MM_SEG(PAGE_OFFSET) + +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define segment_eq(a,b) ((a).seg == (b).seg) + +#define user_addr_max() (current_thread_info()->addr_limit.seg) + +extern int __verify_write(const void *addr, unsigned long size); +extern int __verify_read(const void *addr, unsigned long size); + +static inline bool __range_ok(unsigned long addr, unsigned long size, + unsigned long limit) +{ + BUILD_BUG_ON(!__builtin_constant_p(TASK32_SIZE)); + + if (__builtin_constant_p(size) && size <= TASK32_SIZE) + return likely(addr <= limit - size); + + /* Arbitrary sizes? Be careful about overflow */ + return likely(addr + size >= size && addr + size <= limit); +} + +#define access_ok(addr, size) \ +({ \ + __chk_user_ptr(addr); \ + likely(__range_ok((unsigned long __force) (addr), (size), \ + user_addr_max())); \ +}) + +struct exception_table_entry +{ + unsigned long insn; + unsigned long fixup; +}; + + +/* + * The macros to work safely in kernel with user memory: + * + * TRY_USR_PFAULT { + * ... // "try" code which accesses user memory + * } CATCH_USR_PFAULT { + * ... // "catch" code after page fault (bad user's address) + * } END_USR_PFAULT + * + * NOTE1: these macros can be used inside of "catch" code, + * but can *not* be used inside of "try" code + * NOTE2: the compiler believes that after global label we CAN'T use + * local context so __result__ must be initialized after the label. + * NOTE3: any variable that is used in both "if" and "else" blocks must + * be marked with "volatile" keyword. Caveat: mark the variable + * itself and not the memory it is pointing to, i.e: + * int *volatile ptr <=== RIGHT + * volatile int *ptr <=== WRONG + */ + +#define TRY_USR_PFAULT \ + might_fault(); \ + __TRY_USR_PFAULT + +#define __TRY_USR_PFAULT \ + unsigned long _usr_pfault_jmp = current_thread_info()->usr_pfault_jump;\ + SAVE_CURRENT_ADDR(¤t_thread_info()->usr_pfault_jump); \ + if (likely(current_thread_info()->usr_pfault_jump)) { + +#define CATCH_USR_PFAULT \ + E2K_CMD_SEPARATOR; \ + current_thread_info()->usr_pfault_jump = _usr_pfault_jmp; \ + } else { \ + current_thread_info()->usr_pfault_jump = _usr_pfault_jmp; + +#define END_USR_PFAULT \ + E2K_CMD_SEPARATOR; \ + } + +#define SET_USR_PFAULT(name) \ + unsigned long _usr_pfault_jmp = \ + current_thread_info()->usr_pfault_jump; \ + GET_LBL_ADDR(name, current_thread_info()->usr_pfault_jump) + +#define RESTORE_USR_PFAULT \ +({ \ + unsigned long __pfault_result = current_thread_info()->usr_pfault_jump;\ + current_thread_info()->usr_pfault_jump = _usr_pfault_jmp; \ + unlikely(!__pfault_result); \ +}) + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * This gets kind of ugly. We want to return _two_ values in "get_user()" + * and yet we don't want to do any pointers, because that is too much + * of a performance impact. Thus we have a few rather ugly macros here, + * and hide all the uglyness from the user. + * + * The "__xxx" versions of the user access functions are versions that + * do not verify the address space, that must have been done previously + * with a separate "access_ok()" call (this is used when we do multiple + * accesses to the same area of user memory). + */ + + /** + * get user + */ + +extern int __get_user_bad(void) __attribute__((noreturn)); + +#define __get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *___gu_ptr = (ptr); \ + int __ret_gu; \ + switch (sizeof(*___gu_ptr)) { \ + case 1: \ + GET_USER_ASM(x, ___gu_ptr, b, __ret_gu); break; \ + case 2: \ + GET_USER_ASM(x, ___gu_ptr, h, __ret_gu); break; \ + case 4: \ + GET_USER_ASM(x, ___gu_ptr, w, __ret_gu); break; \ + case 8: \ + GET_USER_ASM(x, ___gu_ptr, d, __ret_gu); break; \ + default: \ + __ret_gu = -EFAULT; __get_user_bad(); break; \ + } \ + (int) builtin_expect_wrapper(__ret_gu, 0); \ +}) + +#define get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + might_fault(); \ + access_ok(__gu_ptr, sizeof(*__gu_ptr)) ? \ + __get_user((x), __gu_ptr) : -EFAULT; \ +}) + + + /** + * put user + */ + +extern int __put_user_bad(void) __attribute__((noreturn)); + +#define __put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *___pu_ptr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + int __ret_pu; \ + switch (sizeof(*___pu_ptr)) { \ + case 1: \ + PUT_USER_ASM(__pu_val, ___pu_ptr, b, __ret_pu); break; \ + case 2: \ + PUT_USER_ASM(__pu_val, ___pu_ptr, h, __ret_pu); break; \ + case 4: \ + PUT_USER_ASM(__pu_val, ___pu_ptr, w, __ret_pu); break; \ + case 8: \ + PUT_USER_ASM(__pu_val, ___pu_ptr, d, __ret_pu); break; \ + default: \ + __ret_pu = -EFAULT; __put_user_bad(); break; \ + } \ + (int) builtin_expect_wrapper(__ret_pu, 0); \ +}) + +#define put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) *__pu_ptr = (ptr); \ + might_fault(); \ + (access_ok(__pu_ptr, sizeof(*__pu_ptr))) ? \ + __put_user((x), __pu_ptr) : -EFAULT; \ +}) + +#define raw_copy_to_user raw_copy_in_user + +extern unsigned long raw_copy_from_user(void *to, const void *from, + unsigned long n); +extern unsigned long raw_copy_in_user(void *to, const void *from, + unsigned long n); + +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +extern unsigned long __copy_user_with_tags(void *to, const void *from, + unsigned long n); + +#define __copy_in_user_with_tags __copy_user_with_tags +#define __copy_to_user_with_tags __copy_user_with_tags +#define __copy_from_user_with_tags __copy_user_with_tags + +static inline +unsigned long copy_in_user_with_tags(void __user *to, const void __user *from, + unsigned long n) +{ + if (likely(access_ok(from, n) && access_ok(to, n))) + n = __copy_in_user_with_tags(to, from, n); + + return n; +} + +static inline +unsigned long copy_to_user_with_tags(void __user *to, const void *from, + unsigned long n) +{ + if (access_ok(to, n)) + n = __copy_to_user_with_tags(to, from, n); + + return n; +} + +static inline +unsigned long copy_from_user_with_tags(void *to, const void __user *from, + unsigned long n) +{ + if (access_ok(from, n)) + n = __copy_from_user_with_tags(to, from, n); + + return n; +} + +#define strlen_user(str) strnlen_user(str, ~0UL >> 1) +long strnlen_user(const char __user *str, long count) __pure; + +long __strncpy_from_user(char *dst, const char *src, long count); + +static inline long +strncpy_from_user(char *dst, const char __user *src, long count) +{ + if (!access_ok(src, 1)) + return -EFAULT; + return __strncpy_from_user(dst, src, count); +} + + +unsigned long __fill_user(void __user *mem, unsigned long len, const u8 b); + +static inline __must_check unsigned long +fill_user(void __user *to, unsigned long n, const u8 b) +{ + if (!access_ok(to, n)) + return n; + + return __fill_user(to, n, b); +} + +#define __clear_user(mem, len) __fill_user(mem, len, 0) +#define clear_user(to, n) fill_user(to, n, 0) + + +unsigned long __fill_user_with_tags(void *, unsigned long, unsigned long, unsigned long); + +/* Filling aligned user pointer 'to' with 'n' bytes of 'dw' double words: */ +static inline __must_check unsigned long +fill_user_with_tags(void __user *to, unsigned long n, unsigned long tag, unsigned long dw) +{ + if (!access_ok(to, n)) + return n; + + return __fill_user_with_tags(to, n, tag, dw); +} + +static inline __must_check unsigned long +clear_user_with_tags(void __user *ptr, unsigned long length, unsigned long tag) +{ + return fill_user_with_tags(ptr, length, tag, 0); +} + +#ifdef CONFIG_PROTECTED_MODE + +static inline int PUT_USER_AP(e2k_ptr_t *ptr, u64 base, + u64 len, u64 off, u64 rw) +{ + if ((long)ptr & 0xf) { + /* not aligned */ + return -EFAULT; + } + + TRY_USR_PFAULT { + if (base == 0) { + E2K_STORE_NULLPTR_QWORD(&AWP(ptr).lo); + } else { + u64 tmp; + + tmp = MAKE_AP_HI(base, len, off, rw); + /* FIXME: need implement for guest kernel + * to support virtualization */ + NATIVE_STORE_VALUE_WITH_TAG(&AWP(ptr).hi, tmp, + E2K_AP_HI_ETAG); + tmp = MAKE_AP_LO(base, len, off, rw); + /* FIXME: need implement for guest kernel + * to support virtualization */ + NATIVE_STORE_VALUE_WITH_TAG(&AWP(ptr).lo, tmp, + E2K_AP_LO_ETAG); + } + } CATCH_USR_PFAULT { + return -EFAULT; + } END_USR_PFAULT + + return 0; +} + +static inline int PUT_USER_PL_V2(e2k_pl_lo_t *plp, u64 entry) +{ + e2k_pl_lo_t tmp = MAKE_PL_V2(entry).lo; + + if ((long)plp & (sizeof(e2k_pl_lo_t) - 1)) { + /* not aligned */ + return -EFAULT; + } + + TRY_USR_PFAULT { + /* FIXME: need implement for guest kernel + * to support virtualization */ + NATIVE_STORE_VALUE_WITH_TAG(plp, tmp.PL_lo_value, E2K_PL_ETAG); + } CATCH_USR_PFAULT { + return -EFAULT; + } END_USR_PFAULT + + return 0; +} + +static inline int PUT_USER_PL_V6(e2k_pl_t *plp, u64 entry, u32 cui) +{ + e2k_pl_t tmp = MAKE_PL_V6(entry, cui); + + if ((long)plp & (sizeof(e2k_pl_t) - 1)) { + /* not aligned */ + return -EFAULT; + } + + TRY_USR_PFAULT { + /* FIXME: need implement for guest kernel + * to support virtualization */ + NATIVE_STORE_TAGGED_QWORD(plp, tmp.PLLO_value, tmp.PLHI_value, + E2K_PLLO_ETAG, E2K_PLHI_ETAG); + } CATCH_USR_PFAULT { + return -EFAULT; + } END_USR_PFAULT + + return 0; +} + +static inline int CPU_PUT_USER_PL(e2k_pl_t *plp, u64 entry, u32 cui, + bool is_cpu_iset_v6) +{ + if (is_cpu_iset_v6) { + return PUT_USER_PL_V6(plp, entry, cui); + } else { + int ret = put_user(0UL, &plp->PLHI_value); + if (ret) + return ret; + return PUT_USER_PL_V2(&plp->PLLO_item, entry); + } +} + +static inline int PUT_USER_PL(e2k_pl_t *plp, u64 entry, u32 cui) +{ + return CPU_PUT_USER_PL(plp, entry, cui, IS_CPU_ISET_V6()); +} + +#define __GET_USER_VAL_TAGD(val, tag, ptr) \ +({ \ + int res; \ + TRY_USR_PFAULT { \ + /* FIXME: should be paravirtualized */ \ + NATIVE_LOAD_VAL_AND_TAGD((ptr), (val), (tag)); \ + res = 0; \ + } CATCH_USR_PFAULT { \ + res = -EFAULT; \ + } END_USR_PFAULT \ + res; \ +}) + +#define GET_USER_VAL_TAGD(val, tag, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__guvt_ptr = (ptr); \ + access_ok(__guvt_ptr, sizeof(*__guvt_ptr)) ? \ + __GET_USER_VAL_TAGD((val), (tag), __guvt_ptr) : -EFAULT; \ +}) + +#endif /* CONFIG_PROTECTED_MODE */ + +#endif /* _E2K_UACCESS_H_ */ diff --git a/arch/e2k/include/asm/ucontext.h b/arch/e2k/include/asm/ucontext.h new file mode 100644 index 0000000..46144b2 --- /dev/null +++ b/arch/e2k/include/asm/ucontext.h @@ -0,0 +1,48 @@ +#ifndef _E2K_UCONTEXT_H +#define _E2K_UCONTEXT_H + +#include +#include + +struct ucontext_32 { + unsigned int uc_flags; + unsigned int uc_link; + compat_stack_t uc_stack; + struct sigcontext uc_mcontext; + union { + compat_sigset_t uc_sigmask;/* mask last for extensibility */ + unsigned long long pad[16]; + }; + struct extra_ucontext uc_extra; /* for compatibility */ +}; + +#ifdef CONFIG_PROTECTED_MODE +struct ucontext_prot { + unsigned long uc_flags; + unsigned long __align; + e2k_ptr_t uc_link; + stack_prot_t uc_stack; + struct sigcontext_prot uc_mcontext; + union { + sigset_t uc_sigmask; + unsigned long long pad[16]; + }; + struct extra_ucontext uc_extra; /* for compatibility */ +}; +#endif /* CONFIG_PROTECTED_MODE */ + +typedef struct rt_sigframe { + u64 __pad_args[8]; /* Reserve space in data stack for the handler */ + union { + siginfo_t info; + compat_siginfo_t compat_info; + }; + union { + struct ucontext uc; + struct ucontext_32 uc_32; + struct ucontext_prot uc_prot; + }; +} rt_sigframe_t; + +extern int restore_rt_frame(rt_sigframe_t *, struct k_sigaction *); +#endif /* ! _E2K_UCONTEXT_H */ diff --git a/arch/e2k/include/asm/umalloc.h b/arch/e2k/include/asm/umalloc.h new file mode 100644 index 0000000..ae7335f --- /dev/null +++ b/arch/e2k/include/asm/umalloc.h @@ -0,0 +1,70 @@ + +#ifndef _E2K_UMALLOC_H_ +#define _E2K_UMALLOC_H_ + + +#include + + + +typedef struct { + u32 m_size; + u32 m_alloced; + u32 m_used; + u32 m_real; +} mallocstat_t; + + + // Small chunk pools + +struct subpoolhdr; +typedef struct subpoolhdr subpoolhdr_t; + +struct mem_moved_poolhdr; +typedef struct mem_moved_poolhdr mem_moved_poolhdr_t; + +// we can't use include/list.h !!! +struct list_head_um{ + struct list_head_um *next, *prev; +}; + +// we can't use linux/rt_lock.h !!! +struct rt_mutex_um{ + u64 tmp[30]; +}; +typedef struct { + struct list_head_um head; + struct rt_mutex_um lock; + u32 mainp; + u32 size; // size of chunk +} umlc_pool_t; + + +#define MAX_CHUNKS 10 +// index of big chunk +#define BIG_CHUNK_IND (MAX_CHUNKS-1) + + +// heap descriptor +typedef struct { + umlc_pool_t pools[MAX_CHUNKS]; + atomic_t gc_lock; // lock for garber collection + mem_moved_poolhdr_t *mem_moved; // reference to mem_moved hdr + u32 allused; // common size of valid arrays to the moment + u32 allreal; // sum real sizes of mallocs + u32 allsize; // common size occupied by heap +} allpools_t; + + +extern void dump_malloc_cart(void); +extern e2k_addr_t sys_malloc(size_t size); +extern void sys_free(e2k_addr_t addr, size_t size); +extern void init_sem_malloc(allpools_t *allpools); +extern int mem_set_empty_tagged_dw(void __user *ptr, s64 size, u64 dw); +struct task_struct; +extern void init_pool_malloc(struct task_struct *, struct task_struct *); + +extern int clean_descriptors(void __user *list, unsigned long list_size); +extern int clean_single_descriptor(e2k_ptr_t descriptor); +#endif /* _E2K_UMALLOC_H_ */ + diff --git a/arch/e2k/include/asm/unaligned.h b/arch/e2k/include/asm/unaligned.h new file mode 100644 index 0000000..e203c29 --- /dev/null +++ b/arch/e2k/include/asm/unaligned.h @@ -0,0 +1,25 @@ +#ifndef _E2K_UNALIGNED_H_ +#define _E2K_UNALIGNED_H_ + +/* + * The e2K arch can do unaligned accesses itself as i386. + * + * The strange macros are there to make sure these can't + * be misused in a way that makes them not work on other + * architectures where unaligned accesses aren't as simple. + * + * BUT there is a hardware bug which forbids usage of + * unaligned accesses and DAM together. + */ + +#ifdef CONFIG_ACCESS_CONTROL +# include +#else +# include +# include + +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#endif + +#endif /* _E2K_UNALIGNED_H_ */ diff --git a/arch/e2k/include/asm/unistd.h b/arch/e2k/include/asm/unistd.h new file mode 100644 index 0000000..b0fed80 --- /dev/null +++ b/arch/e2k/include/asm/unistd.h @@ -0,0 +1,59 @@ +#ifndef _E2K_UNISTD_H_ +#define _E2K_UNISTD_H_ + +#include +#ifndef __ASSEMBLY__ +#include +#endif /* __ASSEMBLY__ */ +#include + +#define NR_fast_syscalls_mask 0x7 +/* Must be a power of 2 (for simpler checks in assembler entry) */ +#define NR_fast_syscalls 8 + + +#define __NR__brk __NR_brk +#define __NR_newstat __NR_stat +#define __NR_newlstat __NR_lstat +#define __NR_newfstat __NR_fstat +#define __NR_olduselib __NR_uselib + +/* On e2k these are called "pread" and "pwrite" */ +#define __IGNORE_pread64 +#define __IGNORE_pwrite64 + +#define __IGNORE_semget +#define __IGNORE_semctl + +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_OLD_STAT +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_IPC +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SGETMASK +#define __ARCH_WANT_SYS_SIGNAL +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_TIME32 +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_UTIME32 +#define __ARCH_WANT_SYS_WAITPID +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NEWFSTATAT +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_COMPAT_SYS_TIME +#define __ARCH_WANT_COMPAT_SYS_GETDENTS64 + +#endif /* _E2K_UNISTD_H_ */ diff --git a/arch/e2k/include/asm/user.h b/arch/e2k/include/asm/user.h new file mode 100644 index 0000000..4cebe8e --- /dev/null +++ b/arch/e2k/include/asm/user.h @@ -0,0 +1,225 @@ +#ifndef _E2K_USER_H_ +#define _E2K_USER_H_ + +#include +#include + + +/* When the kernel dumps core, it starts by dumping the user struct - + * this will be used by gdb to figure out where the data and stack segments + * are within the file, and what virtual addresses to use. + */ + +#define __CHECK_USER 0x1034567887654301ULL +#define __CHECK_KERNEL 0x1234567887654321ULL +#define MLT_NUM (16 * 3) + +/* FIXME comments + * ATTENTION!!! Any change should be sumited to debuger + */ + +struct user_regs_struct { + unsigned long long sizeof_struct; // interface with debuger + unsigned long long g[32]; + + unsigned long long psr; + unsigned long long upsr; + + unsigned long long oscud_lo; + unsigned long long oscud_hi; + unsigned long long osgd_lo; + unsigned long long osgd_hi; + unsigned long long osem; + unsigned long long osr0; + + unsigned long long pfpfr; + unsigned long long fpcr; + unsigned long long fpsr; + + unsigned long long usbr; + unsigned long long usd_lo; + unsigned long long usd_hi; + + unsigned long long psp_lo; + unsigned long long psp_hi; + unsigned long long pshtp; + + unsigned long long cr0_lo; + unsigned long long cr0_hi; + unsigned long long cr1_lo; + unsigned long long cr1_hi; + + unsigned long long cwd; + + unsigned long long pcsp_lo; + unsigned long long pcsp_hi; + unsigned long long pcshtp; + + unsigned long long cud_lo; + unsigned long long cud_hi; + unsigned long long gd_lo; + unsigned long long gd_hi; + + unsigned long long cs_lo; + unsigned long long cs_hi; + unsigned long long ds_lo; + unsigned long long ds_hi; + unsigned long long es_lo; + unsigned long long es_hi; + unsigned long long fs_lo; + unsigned long long fs_hi; + unsigned long long gs_lo; + unsigned long long gs_hi; + unsigned long long ss_lo; + unsigned long long ss_hi; + + unsigned long long aad[32*2]; /* %aad0.lo, %aad0.hi, %aad1.lo ... */ + unsigned long long aaind[16]; + unsigned long long aaincr[8]; + unsigned long long aaldi[64]; + unsigned long long aaldv; + unsigned long long aalda[64]; + unsigned long long aaldm; + unsigned long long aasr; + unsigned long long aafstr; + unsigned long long aasti[16]; + + unsigned long long clkr; + unsigned long long dibcr; + unsigned long long ddbcr; + unsigned long long dibar[4]; + unsigned long long ddbar[4]; + unsigned long long dimcr; + unsigned long long ddmcr; + unsigned long long dimar[2]; + unsigned long long ddmar[2]; + unsigned long long dibsr; + unsigned long long ddbsr; + unsigned long long dtcr; + unsigned long long dtarf; + unsigned long long dtart; + + unsigned long long wd; + + unsigned long long br; + unsigned long long bgr; + + unsigned long long ip; + unsigned long long nip; + unsigned long long ctpr1; + unsigned long long ctpr2; + unsigned long long ctpr3; + + unsigned long long eir; + + unsigned long long tr; /* unused */ + + unsigned long long cutd; + unsigned long long cuir; + unsigned long long tsd; /* unused */ + + unsigned long long lsr; + unsigned long long ilcr; + + long long sys_rval; + long long sys_num; + long long arg1; + long long arg2; + long long arg3; + long long arg4; + long long arg5; + long long arg6; + +/* + * Some space for backup/restore of extensions and tags of global registers. + * now places in the end of structure + */ + unsigned char gtag[32]; + unsigned short gext[32]; +/* + * additional part (for binary compiler) + */ + unsigned long long rpr_hi; + unsigned long long rpr_lo; + + unsigned long long tir_lo [TIR_NUM]; + unsigned long long tir_hi [TIR_NUM]; + + unsigned long long trap_cell_addr [MAX_TC_SIZE]; + unsigned long long trap_cell_val [MAX_TC_SIZE]; + unsigned char trap_cell_tag [MAX_TC_SIZE]; + unsigned long long trap_cell_info [MAX_TC_SIZE]; + + unsigned long long dam [DAM_ENTRIES_NUM]; + + unsigned long long sbbp [SBBP_ENTRIES_NUM]; + + unsigned long long mlt [MLT_NUM]; + +/* + * CPU info + */ + unsigned long long idr; + unsigned long long core_mode; + +/* + * iset v5 additions + */ + unsigned long long lsr1; + unsigned long long ilcr1; + + unsigned long long gext_v5[32]; + unsigned char gext_tag_v5[32]; +/* + * Not actual registers, but still useful information + */ + unsigned long long chain_stack_base; + unsigned long long proc_stack_base; + +/* + * iset v6 additions + */ + unsigned long long dimtp_lo; + unsigned long long dimtp_hi; + unsigned long long ctpr1_hi; + unsigned long long ctpr2_hi; + unsigned long long ctpr3_hi; +/* + * Please, include new fields below + */ +}; + +struct user_pt_regs { + /* empty */ +}; + +struct user{ +/* + * We start with the registers, to mimic the way that "memory" is returned + * from the ptrace(3,...) function. + */ + struct user_regs_struct regs; /* Where the registers */ + /* are actually stored */ + +/* The rest of this junk is to help gdb figure out what goes where */ + + unsigned long int u_tsize; /* Text segment size (pages). */ + unsigned long int u_dsize; /* Data segment size (pages). */ + unsigned long int u_ssize; /* Stack segment size (pages). */ + unsigned long start_code; /* text starting address */ + unsigned long start_data; /* data starting address */ + unsigned long start_stack; /* stack starting address */ + long int signal; /* Signal that caused the core dump. */ + int reserved; /* No longer used */ + struct user_pt_regs * u_ar0; /* Used by gdb to help find the */ + /* values for the registers. */ + unsigned long magic; /* To uniquely identify a core file */ + char u_comm[32]; /* User command that was responsible */ +}; + +#define NBPG PAGE_SIZE +#define UPAGES 1 +#define HOST_TEXT_START_ADDR (u.start_code) +#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) + +#endif /* _E2K_USER_H_ */ diff --git a/arch/e2k/include/asm/vga.h b/arch/e2k/include/asm/vga.h new file mode 100644 index 0000000..7c0324f --- /dev/null +++ b/arch/e2k/include/asm/vga.h @@ -0,0 +1,86 @@ +/* + * Access to VGA videoram + * + * (c) 1998 Martin Mares + */ + +#ifndef _LINUX_ASM_VGA_H_ +#define _LINUX_ASM_VGA_H_ + +#include +#include + +/* + * On the PC, we can just recalculate addresses and then + * access the videoram directly without any black magic. + */ + +#define E2K_VGA_DIRECT_IOMEM + +#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) + +#define VGA_VRAM_PHYS_BASE 0x00000a0000UL /* VGA video RAM low memory */ +#define VGA_VRAM_SIZE 0x0000020000UL /* a0000 - c0000 */ + +#ifdef E2K_VGA_DIRECT_IOMEM + +#define native_scr_writew(val, addr) (*(addr) = (val)) +#define native_scr_readw(addr) (*(addr)) + +#define native_vga_readb(addr) (*(addr)) +#define native_vga_writeb(val, addr) (*(addr) = (val)) + +#else + +#define VT_BUF_HAVE_RW + +static inline void native_scr_writew(u16 val, volatile u16 *addr) +{ + native_writew(val, addr); +} +static inline u16 native_scr_readw(volatile const u16 *addr) +{ + return native_readw(addr); +} +static inline void native_vga_writeb(u8 val, volatile u8 *addr) +{ + native_writeb(val, addr); +} + +static inline u8 native_vga_readb(volatile const u8 *addr) +{ + return native_readb(addr); +} + +#endif /* E2K_VGA_DIRECT_IOMEM */ + +#ifdef CONFIG_KVM_GUEST_KERNEL +/* native guest kernel */ +#include +#elif defined(CONFIG_PARAVIRT_GUEST) +/* paravirtualized host and guest kernel */ +#include +#else /* ! CONFIG_KVM_GUEST_KERNEL && ! CONFIG_PARAVIRT_GUEST */ +/* native host kernel with or whithout visrtualizaton */ + +static inline void scr_writew(u16 val, volatile u16 *addr) +{ + native_scr_writew(val, addr); +} + +static inline u16 scr_readw(volatile const u16 *addr) +{ + return native_scr_readw(addr); +} +static inline void vga_writeb(u8 val, volatile u8 *addr) +{ + native_vga_writeb(val, addr); +} + +static inline u8 vga_readb(volatile const u8 *addr) +{ + return native_vga_readb(addr); +} +#endif /* CONFIG_KVM_GUEST_KERNEL */ + +#endif diff --git a/arch/e2k/include/asm/vmlinux.lds.h b/arch/e2k/include/asm/vmlinux.lds.h new file mode 100644 index 0000000..b6b1d96 --- /dev/null +++ b/arch/e2k/include/asm/vmlinux.lds.h @@ -0,0 +1,14 @@ +#ifndef __ASM_E2K_VMLINUX_LDS_H +#define __ASM_E2K_VMLINUX_LDS_H + +#include + +#define E2K_BOOT_SETUP(bootsetup_align) \ + .boot.data : AT(ADDR(.boot.data) - LOAD_OFFSET) { \ + . = ALIGN(bootsetup_align); \ + __boot_setup_start = .; \ + *(.boot.setup) \ + __boot_setup_end = .; \ + } + +#endif /* __ASM_E2K_VMLINUX_LDS_H */ diff --git a/arch/e2k/include/asm/word-at-a-time.h b/arch/e2k/include/asm/word-at-a-time.h new file mode 100644 index 0000000..be22d42 --- /dev/null +++ b/arch/e2k/include/asm/word-at-a-time.h @@ -0,0 +1,50 @@ +#ifndef _ASM_WORD_AT_A_TIME_H +#define _ASM_WORD_AT_A_TIME_H + +#include + +/* Unused */ +struct word_at_a_time { }; +#define WORD_AT_A_TIME_CONSTANTS { } + +/* This will give us 0xff for a zero char and 0x00 elsewhere */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, + const struct word_at_a_time *c) +{ + *bits = __builtin_e2k_pcmpeqb(a, 0); + return *bits; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, + const struct word_at_a_time *c) +{ + return bits; +} + +/* This will give us 0xff until the first zero char (excluding it) */ +static inline unsigned long create_zero_mask(unsigned long bits) +{ + return (bits - 1) & ~bits; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + return __builtin_e2k_popcntd(mask) >> 3; +} + +/* + * Load an unaligned word from kernel space. + * + * In the (very unlikely) case of the word being a page-crosser + * and the next page not being mapped, take the exception and + * return zeroes in the non-existing part. + */ +static inline unsigned long load_unaligned_zeropad(const void *addr) +{ + return LOAD_UNALIGNED_ZEROPAD(addr); +} + +#endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/e2k/include/uapi/asm/Kbuild b/arch/e2k/include/uapi/asm/Kbuild new file mode 100644 index 0000000..f66554c --- /dev/null +++ b/arch/e2k/include/uapi/asm/Kbuild @@ -0,0 +1 @@ +# SPDX-License-Identifier: GPL-2.0 diff --git a/arch/e2k/include/uapi/asm/a.out.h b/arch/e2k/include/uapi/asm/a.out.h new file mode 100644 index 0000000..15284b3 --- /dev/null +++ b/arch/e2k/include/uapi/asm/a.out.h @@ -0,0 +1,35 @@ +#ifndef __E2K_A_OUT_H__ +#define __E2K_A_OUT_H__ + +#ifndef __ASSEMBLY__ + +struct exec { + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ + unsigned int a_text; /* length of text, in bytes */ + unsigned int a_data; /* length of data, in bytes */ + + /* length of uninitialized data area for file, in bytes */ + unsigned int a_bss; + + /* length of symbol table data in file, in bytes */ + unsigned int a_syms; + unsigned int a_entry; /* start address */ + + /* length of relocation info for text, in bytes */ + unsigned int a_trsize; + + /* length of relocation info for data, in bytes */ + unsigned int a_drsize; +}; + +#endif /* __ASSEMBLY__ */ + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#ifdef __KERNEL__ + +#endif + +#endif /* __E2K_A_OUT_H__ */ diff --git a/arch/e2k/include/uapi/asm/auxvec.h b/arch/e2k/include/uapi/asm/auxvec.h new file mode 100644 index 0000000..777896c --- /dev/null +++ b/arch/e2k/include/uapi/asm/auxvec.h @@ -0,0 +1,4 @@ +#ifndef _E2K_AUXVEC_H +#define _E2K_AUXVEC_H + +#endif /* _E2K_AUXVEC_H */ diff --git a/arch/e2k/include/uapi/asm/bitsperlong.h b/arch/e2k/include/uapi/asm/bitsperlong.h new file mode 100644 index 0000000..0697e90 --- /dev/null +++ b/arch/e2k/include/uapi/asm/bitsperlong.h @@ -0,0 +1,8 @@ +#ifndef __ASM_E2K_BITSPERLONG_H +#define __ASM_E2K_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* __ASM_E2K_BITSPERLONG_H */ diff --git a/arch/e2k/include/uapi/asm/bootinfo.h b/arch/e2k/include/uapi/asm/bootinfo.h new file mode 100644 index 0000000..cddc2ee --- /dev/null +++ b/arch/e2k/include/uapi/asm/bootinfo.h @@ -0,0 +1,211 @@ +#ifndef _UAPI_E2K_BOOTINFO_H_ +#define _UAPI_E2K_BOOTINFO_H_ + +/* + * The mother board types + */ + +#define MB_TYPE_MIN 0 +#define MB_TYPE_E2K_BASE 0x00 +#define MB_TYPE_ES2_BASE (MB_TYPE_E2K_BASE + 20) +#define MB_TYPE_E1CP_BASE (MB_TYPE_E2K_BASE + 50) +#define MB_TYPE_ES4_BASE (MB_TYPE_E2K_BASE + 70) +#define MB_TYPE_E8C_BASE (MB_TYPE_E2K_BASE + 80) +#define MB_TYPE_MAX a0 + +#define MB_TYPE_ES2_PLATO1 (MB_TYPE_ES2_BASE + 0) +#define MB_TYPE_ES2_BUTTERFLY (MB_TYPE_ES2_BASE + 1) +#define MB_TYPE_ES2_RTC_FM33256 (MB_TYPE_ES2_BASE + 2) /* FM332aa56 rtc */ +#define MB_TYPE_ES2_RTC_CY14B101P (MB_TYPE_ES2_BASE + 3) /* CY14B101P rtc */ +#define MB_TYPE_ES2_APORIA (MB_TYPE_ES2_BASE + 5) /* APORIA */ +#define MB_TYPE_ES2_NT (MB_TYPE_ES2_BASE + 6) /* Nosimyi terminal */ +/* Use this when CLKRs are not synchronized across the system */ +#define MB_TYPE_ES2_RTC_CY14B101P_MULTICLOCK (MB_TYPE_ES2_BASE + 7) +#define MB_TYPE_ES2_CUB_COM (MB_TYPE_ES2_BASE + 8) +#define MB_TYPE_ES2_MBCUB_C (MB_TYPE_ES2_BASE + 11) +#define MB_TYPE_ES2_MB3S1_C (MB_TYPE_ES2_BUTTERFLY) +#define MB_TYPE_ES2_MB3S_C_K (MB_TYPE_ES2_BASE + 14) +#define MB_TYPE_ES2_MGA3D (MB_TYPE_ES2_BASE + 15) +#define MB_TYPE_ES2_BC_M4211 (MB_TYPE_ES2_BASE + 16) +#define MB_TYPE_ES2_EL2S4 (MB_TYPE_ES2_BASE + 17) +/* By default all mb_versions > MB_TYPE_ES2_EL2S4 + * have cy14b101p rt clock. If no - fix is_cy14b101p_exist() + * in arch/l/kernel/i2c-spi/core.c + */ + +#define MB_TYPE_E1CP_PMC (MB_TYPE_E1CP_BASE + 0) /* E1CP with PMC */ +#define MB_TYPE_E1CP_IOHUB2_RAZBRAKOVSCHIK /* IOHUB2 razbrakovschik */ \ + (MB_TYPE_E1CP_BASE + 1) +#define MB_TYPE_MBE1C_PC (MB_TYPE_E1CP_BASE + 2) /* E1CP with PMC */ + +#define MB_TYPE_ES4_MBE2S_PC (MB_TYPE_ES4_BASE + 0) +#define MB_TYPE_ES4_PC401 (MB_TYPE_ES4_BASE + 1) + +#define MB_TYPE_E8C (MB_TYPE_E8C_BASE + 0) + + +/* + * The cpu types + */ + +#define CPU_TYPE_E2S 0x03 /* E2S */ +#define CPU_TYPE_ES2_DSP 0x04 /* E2C+ */ +#define CPU_TYPE_ES2_RU 0x06 /* E2C Micron */ +#define CPU_TYPE_E8C 0x07 /* E8C */ +#define CPU_TYPE_E1CP 0x08 /* E1C+ */ +#define CPU_TYPE_E8C2 0x09 /* E8C */ +#define CPU_TYPE_E12C 0xa /* E12C */ +#define CPU_TYPE_E16C 0xb /* E16C */ +#define CPU_TYPE_E2C3 0xc /* E2C3 */ + +#define CPU_TYPE_SIMUL 0x3e /* simulator */ + +#define CPU_TYPE_MASK 0x3f /* mask of CPU type */ +#define PROC_TYPE_MASK 0xc0 /* mask of MicroProcessor type */ + +#define GET_CPU_TYPE(type) (((type) & CPU_TYPE_MASK) >> 0) + +/* + * The cpu types names + */ + +#define GET_CPU_TYPE_NAME(type_field) \ +({ \ + unsigned char type = GET_CPU_TYPE(type_field); \ + char *name; \ + \ + switch (type) { \ + case CPU_TYPE_E2S: \ + name = "E2S"; \ + break; \ + case CPU_TYPE_ES2_DSP: \ + name = "E2C+DSP"; \ + break; \ + case CPU_TYPE_ES2_RU: \ + name = "E1C"; \ + break; \ + case CPU_TYPE_E8C: \ + name = "E8C"; \ + break; \ + case CPU_TYPE_E1CP: \ + name = "E1C+"; \ + break; \ + case CPU_TYPE_E8C2: \ + name = "E8C2"; \ + break; \ + case CPU_TYPE_E12C: \ + name = "E12C"; \ + break; \ + case CPU_TYPE_E16C: \ + name = "E16C"; \ + break; \ + case CPU_TYPE_E2C3: \ + name = "E2C3"; \ + break; \ + case CPU_TYPE_SIMUL: \ + name = "SIMUL"; \ + break; \ + default: \ + name = "unknown"; \ + } \ + \ + name; \ +}) + +/* + * The mother board types names + */ + +#define GET_MB_TYPE_NAME(type) \ +({ \ + char *name; \ + \ + switch (type) { \ + case MB_TYPE_ES2_MB3S1_C: \ + name = "MB3S1/C"; \ + break; \ + case MB_TYPE_ES2_MBCUB_C: \ + case MB_TYPE_ES2_PLATO1: \ + name = "MBKUB/C"; \ + break; \ + case MB_TYPE_ES2_MB3S_C_K: \ + name = "MB3S/C-K"; \ + break; \ + case MB_TYPE_ES2_NT: \ + name = "NT-ELBRUS-S"; \ + break; \ + case MB_TYPE_ES2_CUB_COM: \ + name = "CUB-COM"; \ + break; \ + case MB_TYPE_ES2_RTC_FM33256: \ + name = "MONOCUB+FM33256"; \ + break; \ + case MB_TYPE_ES2_RTC_CY14B101P: \ + name = "MONOCUB"; \ + break; \ + case MB_TYPE_ES2_RTC_CY14B101P_MULTICLOCK: \ + name = "MP1C1/V"; \ + break; \ + case MB_TYPE_ES2_EL2S4: \ + name = "EL2S4"; \ + break; \ + case MB_TYPE_ES2_MGA3D: \ + name = "MGA3D"; \ + break; \ + case MB_TYPE_ES2_BC_M4211: \ + name = "BC-M4211"; \ + break; \ + case MB_TYPE_E1CP_PMC: \ + name = "E1C+ PMC"; \ + break; \ + case MB_TYPE_E1CP_IOHUB2_RAZBRAKOVSCHIK: \ + name = "IOHUB2 razbrakovschik"; \ + break; \ + case MB_TYPE_MBE1C_PC: \ + name = "MBE1C-PC"; \ + break; \ + case MB_TYPE_ES4_MBE2S_PC: \ + name = "MBE2S-PC"; \ + break; \ + case MB_TYPE_ES4_PC401: \ + name = "PC-401"; \ + break; \ + case MB_TYPE_E8C: \ + name = "E8C"; \ + break; \ + default: \ + name = "unknown"; \ + } \ + \ + name; \ +}) + +#define GET_MB_USED_IN(type) \ +({ \ + char *name; \ + \ + switch (type) { \ + case MB_TYPE_ES2_PLATO1: \ + name = "Plato with softreset error"; \ + break; \ + case MB_TYPE_ES2_MBCUB_C: \ + name = "APM VK-2, APM VK-120, BV632, BV631"; \ + break; \ + case MB_TYPE_ES2_MB3S1_C: \ + name = "ELBRUS-3C-CVS, ELBRUS-3C"; \ + break; \ + case MB_TYPE_ES2_RTC_FM33256: \ + name = "MONOCUB+FM33256"; \ + break; \ + case MB_TYPE_ES2_RTC_CY14B101P: \ + name = "MONOCUB-M, MONOCUB-PC"; \ + break; \ + default: \ + name = NULL; \ + } \ + \ + name; \ +}) + + +#endif /* _UAPI_E2K_BOOTINFO_H_ */ diff --git a/arch/e2k/include/uapi/asm/byteorder.h b/arch/e2k/include/uapi/asm/byteorder.h new file mode 100644 index 0000000..77bdb79 --- /dev/null +++ b/arch/e2k/include/uapi/asm/byteorder.h @@ -0,0 +1,10 @@ +#ifndef _E2K_BYTEORDER_H_ +#define _E2K_BYTEORDER_H_ + +#include + +#define __BYTEORDER_HAS_U64__ + +#include + +#endif /* _E2K_BYTEORDER_H_ */ diff --git a/arch/e2k/include/uapi/asm/e2k_api.h b/arch/e2k/include/uapi/asm/e2k_api.h new file mode 100644 index 0000000..8348659 --- /dev/null +++ b/arch/e2k/include/uapi/asm/e2k_api.h @@ -0,0 +1,353 @@ +#ifndef _UAPI_E2K_API_H_ +#define _UAPI_E2K_API_H_ + + +#ifndef __ASSEMBLY__ +typedef unsigned char __e2k_u8_t; +typedef unsigned short int __e2k_u16_t; +typedef unsigned int __e2k_u32_t; +typedef unsigned long long __e2k_u64_t; +typedef void *__e2k_ptr_t; +#endif /* __ASSEMBLY__ */ + +#ifndef __KERNEL__ + +#define E2K_SET_REG(reg_no, val) \ +({ \ + asm volatile ("adds \t0x0, %0, %%r" #reg_no \ + : \ + : "ri" ((__e2k_u32_t) (val))); \ +}) + +#define E2K_SET_DREG(reg_no, val) \ +({ \ + asm volatile ("addd \t0x0, %0, %%dr" #reg_no \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +#define E2K_SET_DGREG(reg_no, val) \ +({ \ + asm volatile ("addd \t0x0, %0, %%dg" #reg_no \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) +#define E2K_SET_DGREG_NV(reg_no, val) \ +({ \ + asm ("addd \t%0, 0, %%dg" #reg_no \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + + +#define E2K_GET_BREG(reg_no) \ +({ \ + register __e2k_u32_t res; \ + asm volatile ("adds \t0x0, %%b[" #reg_no "], %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_GET_DBREG(reg_no) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("addd \t0x0, %%db[" #reg_no "], %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_SET_BREG(reg_no, val) \ +({ \ + asm volatile ("adds \t0x0, %0, %%b[" #reg_no "]" \ + : \ + : "ri" ((__e2k_u32_t) (val))); \ +}) + +#define E2K_SET_DBREG(reg_no, val) \ +({ \ + asm volatile ("addd \t0x0, %0, %%db[" #reg_no "]" \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +#define E2K_GET_SREG(reg_mnemonic) \ +({ \ + register __e2k_u32_t res; \ + asm volatile ("rrs \t%%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_GET_DSREG(reg_mnemonic) \ +({ \ + register __e2k_u64_t res; \ + asm volatile ("rrd \t%%" #reg_mnemonic ", %0" \ + : "=r" (res)); \ + res; \ +}) + +#define E2K_SET_SREG(reg_mnemonic, val) \ +({ \ + asm volatile ("rws \t%0, %%" #reg_mnemonic \ + : \ + : "ri" ((__e2k_u32_t) (val))); \ +}) + +#define E2K_SET_DSREG(reg_mnemonic, val) \ +({ \ + asm volatile ("rwd \t%0, %%" #reg_mnemonic \ + : \ + : "ri" ((__e2k_u64_t) (val))); \ +}) + +#endif /* __KERNEL__ */ + +#ifndef __ASSEMBLY__ + +typedef unsigned long long __e2k_syscall_arg_t; + +#define E2K_SYSCALL_CLOBBERS \ + "ctpr1", "ctpr2", "ctpr3", \ + "b[0]", "b[1]", "b[2]", "b[3]", \ + "b[4]", "b[5]", "b[6]", "b[7]" + +/* Transaction operation transaction of argument type + * __e2k_syscall_arg_t */ +#ifdef __ptr64__ +#define __E2K_SYSCAL_ARG_ADD "addd,s" +#else +#define __E2K_SYSCAL_ARG_ADD "adds,s" +#endif + +#define __E2K_SYSCALL_0(_trap, _sys_num, _arg1) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_1(_trap, _sys_num, _arg1) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_2(_trap, _sys_num, _arg1, _arg2) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_3(_trap, _sys_num, _arg1, _arg2, _arg3) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_4(_trap, _sys_num, _arg1, _arg2, _arg3, _arg4) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_5(_trap, _sys_num, _arg1, _arg2, _arg3, _arg4, _arg5) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg5], %%b[5]\n\t" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + "}\n" \ + "call %%ctpr1, wbs = %#\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)), \ + [arg5] "ri" ((__e2k_syscall_arg_t) (_arg5)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_6(_trap, _sys_num, _arg1, \ + _arg2, _arg3, _arg4, _arg5, _arg6) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg5], %%b[5]\n\t" \ + "}\n" \ + "{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg6], %%b[6]\n\t" \ + "call %%ctpr1, wbs = %#\n\t" \ + "}\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)), \ + [arg5] "ri" ((__e2k_syscall_arg_t) (_arg5)), \ + [arg6] "ri" ((__e2k_syscall_arg_t) (_arg6)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define __E2K_SYSCALL_7(_trap, _sys_num, _arg1, \ + _arg2, _arg3, _arg4, _arg5, _arg6, _arg7) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ("{\n" \ + "sdisp %%ctpr1, %[trap]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[sys_num], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg5], %%b[5]\n\t" \ + "}\n" \ + "{\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg6], %%b[6]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg7], %%b[7]\n\t" \ + "call %%ctpr1, wbs = %#\n\t" \ + "}\n" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]" \ + : [res] "=r" (__res) \ + : [trap] "i" ((int) (_trap)), \ + [sys_num] "ri" ((__e2k_syscall_arg_t) (_sys_num)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)), \ + [arg5] "ri" ((__e2k_syscall_arg_t) (_arg5)), \ + [arg6] "ri" ((__e2k_syscall_arg_t) (_arg6)), \ + [arg7] "ri" ((__e2k_syscall_arg_t) (_arg7)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#define E2K_SYSCALL(trap, sys_num, num_args, args...) \ + __E2K_SYSCALL_##num_args(trap, sys_num, args) + +#define ASM_CALL_8_ARGS(func_name_to_call, _arg0, _arg1, _arg2, _arg3, \ + _arg4, _arg5, _arg6, _arg7) \ +({ \ + register __e2k_syscall_arg_t __res; \ + asm volatile ( \ + "{\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg0], %%b[0]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg1], %%b[1]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg2], %%b[2]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg3], %%b[3]\n\t" \ + "disp %%ctpr1, " #func_name_to_call "\n\t" \ + "}\n\t" \ + "{\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg4], %%b[4]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg5], %%b[5]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg6], %%b[6]\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %[arg7], %%b[7]\n\t" \ + "call %%ctpr1, wbs = %#\n\t" \ + "}\n\t" \ + __E2K_SYSCAL_ARG_ADD " 0x0, %%b[0], %[res]\n\t" \ + : \ + [res] "=r" (__res) \ + : \ + [arg0] "ri" ((__e2k_syscall_arg_t) (_arg0)), \ + [arg1] "ri" ((__e2k_syscall_arg_t) (_arg1)), \ + [arg2] "ri" ((__e2k_syscall_arg_t) (_arg2)), \ + [arg3] "ri" ((__e2k_syscall_arg_t) (_arg3)), \ + [arg4] "ri" ((__e2k_syscall_arg_t) (_arg4)), \ + [arg5] "ri" ((__e2k_syscall_arg_t) (_arg5)), \ + [arg6] "ri" ((__e2k_syscall_arg_t) (_arg6)), \ + [arg7] "ri" ((__e2k_syscall_arg_t) (_arg7)) \ + : E2K_SYSCALL_CLOBBERS); \ + __res; \ +}) + +#endif /* !__ASSEMBLY__ */ + + +#endif /* _UAPI_E2K_API_H_ */ diff --git a/arch/e2k/include/uapi/asm/e2k_syswork.h b/arch/e2k/include/uapi/asm/e2k_syswork.h new file mode 100644 index 0000000..00bb50a --- /dev/null +++ b/arch/e2k/include/uapi/asm/e2k_syswork.h @@ -0,0 +1,96 @@ +#ifndef _UAPI_E2K_SYSWORK_H_ +#define _UAPI_E2K_SYSWORK_H_ + +#include +#include + +/* + * IDE info + */ +#define ALL_IDE 1 +#define USING_DMA 2 + +/* + * works for e2k_syswork + */ +#define PRINT_MMAP 1 +#define PRINT_STACK 2 +#define PRINT_TASKS 3 +#define GET_ADDR_PROT 4 +#define PRINT_REGS 6 +#define PRINT_ALL_MMAP 7 +#define FLUSH_CMD_CACHES 8 +#define SET_TAGS 17 +#define CHECK_TAGS 18 +#define IDE_INFO 20 +#define INSTR_EXEC 21 +#define IREQ_SET_TO_CPU 22 +#define PRINT_T_TRUSS 23 +#define START_CLI_INFO 24 +#define PRINT_CLI_INFO 25 +#define SYS_MKNOD 26 +#define SET_DBG_MODE 27 +#define E2K_SC_RESTART 28 +#define PRINT_PIDS 29 +#define ADD_END_OF_WORK 30 +#define GET_END_OF_WORK 31 +#define START_OF_WORK 32 +#define DO_E2K_HALT 33 +#ifdef CONFIG_PROC_FS +#define PRINT_STATM 35 +#endif +#define READ_ECMOS 36 +#define WRITE_ECMOS 37 +#define KERNEL_TRACE_BEGIN 38 +#define KERNEL_TRACE_END 39 +#define PRINT_INTERRUPT_INFO 40 +#define CLEAR_INTERRUPT_INFO 41 +#define STOP_INTERRUPT_INFO 42 +#define START_TRACE_EVENT 45 +#define STOP_TRACE_EVENT 46 +#define START_TRACE_LAST_EVENT 48 +#define STOP_TRACE_LAST_EVENT 49 +#define READ_BOOT 53 +#define WRITE_BOOT 54 +#define READ_PTRACE_REGS 55 +#define WRITE_PTRACE_REGS 56 +#define GET_CONTEXT 57 +#define FAST_RETURN 58 /* Using to estimate time needed */ + /* for entering to OS */ +#define TEST_OVERFLOW 59 /* To test kernel procedure/chain */ + /* stack overflow */ +#define E2K_ACCESS_VM 60 /* Read/write current procedure */ + /* stack */ +#define KERNEL_FTRACE 61 +#define USER_CONTROL_INTERRUPT 62 /* user can control all interrupts */ + /* (for degugging hardware) */ + + +/* modes for sys_access_hw_stacks */ +enum { + E2K_READ_CHAIN_STACK, + E2K_READ_PROCEDURE_STACK, + E2K_WRITE_PROCEDURE_STACK, + E2K_GET_CHAIN_STACK_OFFSET, + E2K_GET_CHAIN_STACK_SIZE, + E2K_GET_PROCEDURE_STACK_SIZE, + E2K_READ_CHAIN_STACK_EX, + E2K_READ_PROCEDURE_STACK_EX, + E2K_WRITE_PROCEDURE_STACK_EX, + E2K_WRITE_CHAIN_STACK_EX, +}; + +typedef struct icache_range { + unsigned long long start; + unsigned long long end; +} icache_range_t; + +#define e2k_syswork(arg1, arg2, arg3) \ +({ \ + long __res; \ + __res = E2K_SYSCALL(LINUX_SYSCALL_TRAPNUM, __NR_e2k_syswork, 3, \ + arg1, arg2, arg3); \ + (int)__res; \ +}) + +#endif /* _UAPI_E2K_SYSWORK_H_ */ diff --git a/arch/e2k/include/uapi/asm/errno.h b/arch/e2k/include/uapi/asm/errno.h new file mode 100644 index 0000000..969b343 --- /dev/null +++ b/arch/e2k/include/uapi/asm/errno.h @@ -0,0 +1,6 @@ +#ifndef _I386_ERRNO_H +#define _I386_ERRNO_H + +#include + +#endif diff --git a/arch/e2k/include/uapi/asm/fcntl.h b/arch/e2k/include/uapi/asm/fcntl.h new file mode 100644 index 0000000..27fa498 --- /dev/null +++ b/arch/e2k/include/uapi/asm/fcntl.h @@ -0,0 +1,2 @@ +#include +#include diff --git a/arch/e2k/include/uapi/asm/ioctl.h b/arch/e2k/include/uapi/asm/ioctl.h new file mode 100644 index 0000000..b279fe0 --- /dev/null +++ b/arch/e2k/include/uapi/asm/ioctl.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/uapi/asm/ioctls.h b/arch/e2k/include/uapi/asm/ioctls.h new file mode 100644 index 0000000..dd12291 --- /dev/null +++ b/arch/e2k/include/uapi/asm/ioctls.h @@ -0,0 +1,17 @@ +#ifndef _E2K_IOCTLS_H_ +#define _E2K_IOCTLS_H_ + +/* + * We are too far from real ioctl handling and it is difficult to predict + * any errors now. So I accept i386(ia64) ioctl's stuff as the basis. + */ + + +#include +#include + +#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ +#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +#define TIOCGDB 0x547F /* enable GDB stub mode on this tty */ + +#endif /* _E2K_IOCTLS_H_ */ diff --git a/arch/e2k/include/uapi/asm/ipcbuf.h b/arch/e2k/include/uapi/asm/ipcbuf.h new file mode 100644 index 0000000..61689ce --- /dev/null +++ b/arch/e2k/include/uapi/asm/ipcbuf.h @@ -0,0 +1,27 @@ +#ifndef _E2K_IPCBUF_H_ +#define _E2K_IPCBUF_H_ + +/* + * The ipc64_perm structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit seq + * - 2 miscellaneous 64-bit values + */ + +struct ipc64_perm { + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + unsigned short seq; + unsigned short __pad1; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _E2K_IPCBUF_H_ */ diff --git a/arch/e2k/include/uapi/asm/iset_ver.h b/arch/e2k/include/uapi/asm/iset_ver.h new file mode 100644 index 0000000..e288fcd --- /dev/null +++ b/arch/e2k/include/uapi/asm/iset_ver.h @@ -0,0 +1,55 @@ +#ifndef _E2K_UAPI_ISET_VER_H_ +#define _E2K_UAPI_ISET_VER_H_ + +#ifndef __ASSEMBLY__ + +/* + * IMPORTANT: instruction sets are numbered in increasing order, + * each next iset being backwards compatible with all the + * previous ones. + */ +typedef enum e2k_iset_ver { + E2K_ISET_GENERIC, + E2K_ISET_V2 = 2, + E2K_ISET_V3 = 3, + E2K_ISET_V4 = 4, + E2K_ISET_V5 = 5, + E2K_ISET_V6 = 6, +} e2k_iset_ver_t; + +#define E2K_ISET_V2_MASK (1 << E2K_ISET_V2) +#define E2K_ISET_V3_MASK (1 << E2K_ISET_V3) +#define E2K_ISET_V4_MASK (1 << E2K_ISET_V4) +#define E2K_ISET_V5_MASK (1 << E2K_ISET_V5) +#define E2K_ISET_V6_MASK (1 << E2K_ISET_V6) + +#define E2K_ISET_SINCE_V2_MASK (-1) +#define E2K_ISET_SINCE_V3_MASK (E2K_ISET_SINCE_V2_MASK & ~E2K_ISET_V2_MASK) +#define E2K_ISET_SINCE_V4_MASK (E2K_ISET_SINCE_V3_MASK & ~E2K_ISET_V3_MASK) +#define E2K_ISET_SINCE_V5_MASK (E2K_ISET_SINCE_V4_MASK & ~E2K_ISET_V4_MASK) +#define E2K_ISET_SINCE_V6_MASK (E2K_ISET_SINCE_V5_MASK & ~E2K_ISET_V5_MASK) + +enum { + /* generic e2k iset */ + ELBRUS_GENERIC_ISET = E2K_ISET_GENERIC, + /* Cubic, Turmalin */ + ELBRUS_S_ISET = E2K_ISET_V2, + /* E2S (E4C) */ + ELBRUS_2S_ISET = E2K_ISET_V3, + /* E8C */ + ELBRUS_8C_ISET = E2K_ISET_V4, + /* E1C+ */ + ELBRUS_1CP_ISET = E2K_ISET_V4, + /* E8C2 */ + ELBRUS_8C2_ISET = E2K_ISET_V5, + /* E12C */ + ELBRUS_12C_ISET = E2K_ISET_V6, + /* E16C */ + ELBRUS_16C_ISET = E2K_ISET_V6, + /* E2C3 */ + ELBRUS_2C3_ISET = E2K_ISET_V6, +}; + +#endif /* !__ASSEMBLY__ */ + +#endif /* !_E2K_UAPI_ISET_VER_H_ */ diff --git a/arch/e2k/include/uapi/asm/kexec.h b/arch/e2k/include/uapi/asm/kexec.h new file mode 100644 index 0000000..9f80db8 --- /dev/null +++ b/arch/e2k/include/uapi/asm/kexec.h @@ -0,0 +1,26 @@ +#ifndef _UAPI_E2K_KEXEC_H_ +#define _UAPI_E2K_KEXEC_H_ + +#include +#include + +#define E2K_KEXEC_IOCTL_BASE 'E' + +struct kexec_reboot_param { + char *cmdline; + int cmdline_size; + void *image; + u64 image_size; + void *initrd; + u64 initrd_size; +}; + +struct lintel_reboot_param { + void *image; + u64 image_size; +}; + +#define KEXEC_REBOOT _IOR(E2K_KEXEC_IOCTL_BASE, 0, struct kexec_reboot_param) +#define LINTEL_REBOOT _IOR(E2K_KEXEC_IOCTL_BASE, 0, struct lintel_reboot_param) + +#endif diff --git a/arch/e2k/include/uapi/asm/kvm.h b/arch/e2k/include/uapi/asm/kvm.h new file mode 100644 index 0000000..2147e9a --- /dev/null +++ b/arch/e2k/include/uapi/asm/kvm.h @@ -0,0 +1,488 @@ +#ifndef _UAPI_ASM_E2K_KVM_H +#define _UAPI_ASM_E2K_KVM_H + +/* + * KVM e2k specific structures and definitions + * + */ + +#ifndef __ASSEMBLY__ + +#include +#include + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + +/* Select e2k specific features in */ +#define __KVM_HAVE_IOAPIC +#define __KVM_HAVE_IRQ_LINE +#define __KVM_HAVE_PIT +#define __KVM_HAVE_DEVICE_ASSIGNMENT +#define __KVM_HAVE_USER_NMI +#define __KVM_HAVE_GUEST_DEBUG +#define __KVM_HAVE_MSIX +#define __KVM_HAVE_MCE +#define __KVM_HAVE_VCPU_EVENTS + +/* KVM (for /dev/kvm fds) capabilities (especially for e2k arch) */ +/* number of CAPs is advisedly very big to don't intersect with other arch'es */ +/* Other arch'es CAPs can be amplified and appended in future */ +/* The better place for these defines should be at arch-indep header */ +/* include/uapi/linux/kvm.h as for all other arch'es */ +#define KVM_CAP_E2K_SV_VM 300 /* paravirtualized guest without any */ + /* hardware support */ +#define KVM_CAP_E2K_SW_PV_VM 301 /* paravirtualized kernel without any */ + /* hardware support and can be run */ + /* as host (hypervisor) and as guest */ + /* especial case to debug purposes */ +#define KVM_CAP_E2K_HV_VM 302 /* fully virtualized guest machines */ + /* using hardware extensions */ +#define KVM_CAP_E2K_HW_PV_VM 303 /* paravirtualized guest machines */ + /* using hardware extensions */ + +/* Flags are bits 63:32 of KVM_CREATE_VM argument */ +#define KVM_E2K_VM_TYPE_MASK 0x00000000ffffffffULL +#define KVM_E2K_VM_FLAG_MASK 0xffffffff00000000ULL +/* VM types, to be used as argument to KVM_CREATE_VM */ +#define KVM_E2K_SV_VM_TYPE 0 /* software virtualized guest without */ + /* any hardware support */ + /* now it is default VM type because */ + /* of can be available for all CPUs */ +#define KVM_E2K_SW_PV_VM_TYPE 1 /* paravirtualized kernel without any */ + /* hardware support and can be run */ + /* as host (hypervisor) and as guest */ + /* especial case to debug purposes */ +#define KVM_E2K_HV_VM_TYPE 2 /* fully virtualized guest machines */ + /* using hardware extensions */ +#define KVM_E2K_HW_PV_VM_TYPE 3 /* paravirtualized guest machines */ + /* using hardware extensions */ + +#define KVM_E2K_EPIC_VM_FLAG 0x100000000ULL /* choose between paravirt */ + /* APIC and EPIC models */ + +/* KVM MMU capabilities */ +#define KVM_CAP_E2K_SHADOW_PT_MMU 310 /* is shadow PT enabled */ +#define KVM_CAP_E2K_TDP_MMU 311 /* is Two Dimensial Paging */ + /* mode enabled */ + +/* Architectural interrupt line count. */ +#define KVM_NR_INTERRUPTS 256 + +typedef struct kvm_memory_alias { + __u32 slot; /* this has a different namespace than memory slots */ + __u32 flags; + __u64 guest_alias_addr; + __u64 memory_size; + __u64 target_addr; +} kvm_memory_alias_t; + +/* arch e2k additions flags for kvm_memory_region::flags */ +#define KVM_MEM_ADD_TYPE 0x0010 /* region should be added with */ + /* type of memory */ +#define KVM_MEM_VCPU_RAM 0x0020 /* memory region is common RAM */ +#define KVM_MEM_VCPU_VRAM 0x0040 /* memory region is virtual */ + /* registers emulation memory */ +#define KVM_MEM_IO_VRAM 0x0080 /* memory region is virtual IO memory */ + /* to emulate ISA, VGA VRAM (low) */ +#define KVM_MEM_USER_RAM 0x0100 /* memory is mapped to user space of */ + /* host application */ + /* (in our case QEMU) */ +#define KVM_MEM_TYPE_MASK (KVM_MEM_VCPU_RAM | KVM_MEM_VCPU_VRAM | \ + KVM_MEM_IO_VRAM | KVM_MEM_USER_RAM) + +typedef enum { + guest_ram_mem_type = 0x01, /* memory is common RAM (low & high) */ + guest_vram_mem_type = 0x02, /* memory is virtual registers */ + /* memory (VCPU, VMMU ... emulation) */ + guest_io_vram_mem_type = 0x03, /* memory is virtual IO memory */ + /* to emulate ISA, VGA-VRAM (low) */ + guest_user_ram_mem_type = 0x04, /* memory is mapped to user space of */ + /* host application (QEMU) */ +} kvm_guest_mem_type_t; + +#define KVM_MAX_VRAM_AREAS 4 /* max number of VRAM areas */ + +/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */ +struct kvm_pic_state { + __u8 last_irr; /* edge detection */ + __u8 irr; /* interrupt request register */ + __u8 imr; /* interrupt mask register */ + __u8 isr; /* interrupt service register */ + __u8 priority_add; /* highest irq priority */ + __u8 irq_base; + __u8 read_reg_select; + __u8 poll; + __u8 special_mask; + __u8 init_state; + __u8 auto_eoi; + __u8 rotate_on_auto_eoi; + __u8 special_fully_nested_mode; + __u8 init4; /* true if 4 byte init */ + __u8 elcr; /* PIIX edge/trigger selection */ + __u8 elcr_mask; +}; + +typedef struct kvm_kernel_area_shadow { + __u32 slot; /* this has a different namespace than memory and */ + /* alias slots */ + __u32 flags; + __u64 kernel_addr; /* host kernel area base address */ + __u64 area_size; + __u64 guest_shadow_addr; /* guest kernel base address */ + /* should be alias of memory region */ +} kvm_kernel_area_shadow_t; + + +#define KVM_IOAPIC_NUM_PINS 24 + +struct kvm_ioapic_state { + __u64 base_address; + __u32 ioregsel; + __u32 id; + __u32 irr; + __u32 pad; + union { + __u64 bits; + struct { + __u8 vector; + __u8 delivery_mode:3; + __u8 dest_mode:1; + __u8 delivery_status:1; + __u8 polarity:1; + __u8 remote_irr:1; + __u8 trig_mode:1; + __u8 mask:1; + __u8 reserve:7; + __u8 reserved[4]; + __u8 dest_id; + } fields; + } redirtbl[KVM_IOAPIC_NUM_PINS]; + __u32 node_id; +}; + +#define KVM_IOEPIC_NUM_PINS 64 + +#define KVM_IRQCHIP_PIC_MASTER 0 +#define KVM_IRQCHIP_PIC_SLAVE 1 +#define KVM_IRQCHIP_IOAPIC 2 +#define KVM_IRQCHIP_IOEPIC_NODE0 3 +#define KVM_IRQCHIP_IOEPIC_NODE1 4 +#define KVM_IRQCHIP_IOEPIC_NODE2 5 +#define KVM_IRQCHIP_IOEPIC_NODE3 6 +#define KVM_NR_IRQCHIPS 7 + +/* for KVM_GET_REGS and KVM_SET_REGS */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 upsr; +}; + +/* for KVM_GET_LAPIC and KVM_SET_LAPIC */ +#define KVM_APIC_REG_SIZE 0x400 +struct kvm_lapic_state { + char regs[KVM_APIC_REG_SIZE]; +}; + +/* for KVM_GET_SREGS and KVM_SET_SREGS */ +struct kvm_sregs { + /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */ + __u64 USD_lo, USD_hi; + __u64 PSP_lo, PSP_hi; + __u64 PCSP_lo, PCSP_hi; + __u64 apic_base; + __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64]; +}; + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { +}; + +struct kvm_debug_exit_arch { + __u32 exception; + __u32 pad; + __u64 pc; + __u64 dr6; + __u64 dr7; +}; + +#define KVM_GUESTDBG_USE_SW_BP 0x00010000 +#define KVM_GUESTDBG_USE_HW_BP 0x00020000 +#define KVM_GUESTDBG_INJECT_DB 0x00040000 +#define KVM_GUESTDBG_INJECT_BP 0x00080000 + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { + __u64 debugreg[8]; +}; + +/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */ +#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001 +#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 + +/* for KVM_GET/SET_VCPU_EVENTS */ +struct kvm_vcpu_events { + struct { + __u8 injected; + __u8 nr; + __u8 has_error_code; + __u8 pad; + __u32 error_code; + } exception; + struct { + __u8 injected; + __u8 nr; + __u8 soft; + __u8 pad; + } interrupt; + struct { + __u8 injected; + __u8 pending; + __u8 masked; + __u8 pad; + } nmi; + __u32 sipi_vector; + __u32 flags; + __u32 reserved[10]; +}; + +/* for KVM_GET_PIT and KVM_SET_PIT */ +struct kvm_pit_channel_state { + __u32 count; /* can be 65536 */ + __u16 latched_count; + __u8 count_latched; + __u8 status_latched; + __u8 status; + __u8 read_state; + __u8 write_state; + __u8 write_latch; + __u8 rw_mode; + __u8 mode; + __u8 bcd; + __u8 gate; + __s64 count_load_time; +}; + +struct kvm_pit_state { + struct kvm_pit_channel_state channels[3]; +}; +#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001 +struct kvm_pit_state2 { + struct kvm_pit_channel_state channels[3]; + __u32 flags; + __u32 reserved[9]; +}; + +struct kvm_reinject_control { + __u8 pit_reinject; + __u8 reserved[31]; +}; +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* + * e2k arch specific kvm dev/vm/vcpu ioctl's + */ + +/* + * Guest machine info + */ +typedef struct kvm_guest_info { + int cpu_mdl; /* guest CPU model (as at IDR) */ + int cpu_rev; /* guest CPU revision (as at IDR) */ + int cpu_iset; /* guest CPU instruction set version */ + _Bool is_stranger; /* guest is stranger type */ + /* of CPU/machine */ + _Bool mmu_support_pt_v6; /* guest MMU support new MMU Page */ + /* Tables structures V6 */ + _Bool is_pv; /* guest is paravirtualized */ + /* and should not be run as bare */ + unsigned long features; /* guest features */ + /* see details */ +} kvm_guest_info_t; + +typedef struct kvm_guest_area_alloc { + void *region; /* guest memory region to allocate area */ + /* or NULL if any region */ + void *area; /* allocated area - result of ioctl() */ + unsigned long start; /* start address to allocate */ + /* or 0 if any address */ + unsigned long size; /* area size (bytes) */ + unsigned long align; /* the area beginning align */ + unsigned long flags; /* allocation flags and modes */ + kvm_guest_mem_type_t type; /* type of memory: RAM, VRAM */ +} kvm_guest_area_alloc_t; + +typedef struct kvm_guest_area_reserve { + unsigned long start; /* start address to reserve */ + unsigned long size; /* area size (bytes) */ + kvm_guest_mem_type_t type; /* type of memory: RAM, VRAM */ +} kvm_guest_area_reserve_t; + +/* guest area allocation flags */ +#define KVM_ALLOC_AREA_PRESENT 0x00000001ULL +#define KVM_ALLOC_AREA_LOCKED 0x00000002ULL +#define KVM_ALLOC_AREA_ZEROED 0x00000004ULL +/* protections as part of flags */ +#define KVM_ALLOC_AREA_PROT_READ 0x00010000ULL +#define KVM_ALLOC_AREA_PROT_WRITE 0x00020000ULL +#define KVM_ALLOC_AREA_PROT_EXEC 0x00040000ULL +/* some additional features */ +#define KVM_ALLOC_AREA_HUGE 0x00100000ULL /* prefered mapping */ + /* to huge pages */ +#define KVM_ALLOC_AREA_MAP_FLAGS \ + (KVM_ALLOC_AREA_PROT_READ | KVM_ALLOC_AREA_PROT_WRITE | \ + KVM_ALLOC_AREA_PROT_EXEC | \ + KVM_ALLOC_AREA_HUGE) + +/* guest addresses map */ +#define KVM_GUEST_PAGE_OFFSET 0x00000010 +#define KVM_GUEST_KERNEL_IMAGE_BASE 0x00000020 +#define KVM_GUEST_VCPU_VRAM_PHYS_BASE 0x00000040 +#define KVM_GUEST_VCPU_VRAM_VIRT_BASE 0x00000080 +#define KVM_GUEST_VCPU_VRAM_SIZE 0x00000100 +#define KVM_GUEST_IO_VRAM_PHYS_BASE 0x00000200 +#define KVM_GUEST_IO_VRAM_VIRT_BASE 0x00000400 +#define KVM_GUEST_IO_VRAM_SIZE 0x00000800 +#define KVM_HOST_PAGE_OFFSET 0x00001000 +#define KVM_HOST_KERNEL_IMAGE_BASE 0x00002000 +#define KVM_KERNEL_AREAS_SIZE 0x00004000 +#define KVM_SHADOW_KERNEL_IMAGE_BASE 0x00008000 +#define KVM_GUEST_IO_PORTS_BASE 0x00010000 +#define KVM_GUEST_NBSR_BASE_NODE_0 0x00020000 +#define KVM_GUEST_NBSR_BASE_NODE_1 0x00040000 +#define KVM_GUEST_NBSR_BASE_NODE_2 0x00080000 +#define KVM_GUEST_NBSR_BASE_NODE_3 0x00100000 + +/* flags of IO ports area mapping for guest */ +#define KVM_IO_PORTS_MMAP 0x1ff00000000 /* > max physical memory */ + +#define KVM_VCPU_MAX_GUEST_ARGS 4 + +typedef struct kvm_vcpu_guest_startup { + char *kernel_base; /* base address of guest kernel image */ + long kernel_size; /* guest kernel image size */ + char *entry_point; /* entry point to startup guest image */ + int args_num; /* number of additional arguments to pass */ + /* to guest image */ + unsigned long args[KVM_VCPU_MAX_GUEST_ARGS]; /* arguments */ + unsigned long flags; /* flags specifying guest properties */ + /* see details below */ + unsigned long trap_off; /* trap entry (ttable #0) offset from guest */ + /* kernel image base */ +} kvm_vcpu_guest_startup_t; + +typedef enum kvm_pci_region_type { + kvm_pci_undef_region_type, + kvm_pci_io_type, /* PCI IO ports region */ + kvm_pci_mem_type, /* PCI memory region */ + kvm_pci_pref_mem_type, /* PCI prefetchable memory region */ +} kvm_pci_region_type_t; + +typedef struct kvm_pci_region { + int node_id; /* the node # */ + kvm_pci_region_type_t type; /* the region type: IO/MEM/PMEM */ + unsigned long base; /* the base address of the region */ + unsigned long size; /* the region size */ +} kvm_pci_region_t; + +typedef struct kvm_base_addr_node { + int node_id; /* the node # */ + unsigned long base; /* the base address */ +} kvm_base_addr_node_t; + +typedef struct kvm_guest_nbsr_state { + int node_id; + unsigned int rt_pcim0; + unsigned int rt_pcim1; + unsigned int rt_pcim2; + unsigned int rt_pcim3; + unsigned int rt_pciio0; + unsigned int rt_pciio1; + unsigned int rt_pciio2; + unsigned int rt_pciio3; + unsigned int rt_pcimp_b0; + unsigned int rt_pcimp_b1; + unsigned int rt_pcimp_b2; + unsigned int rt_pcimp_b3; + unsigned int rt_pcimp_e0; + unsigned int rt_pcimp_e1; + unsigned int rt_pcimp_e2; + unsigned int rt_pcimp_e3; + unsigned int rt_pcicfgb; + unsigned long rt_msi; + unsigned int iommu_ctrl; + unsigned long iommu_ptbar; + unsigned long iommu_dtbar; + unsigned long iommu_err; + unsigned long iommu_err_info; + unsigned int prepic_ctrl2; + unsigned int prepic_err_stat; + unsigned int prepic_err_int; + unsigned int prepic_linp0; + unsigned int prepic_linp1; + unsigned int prepic_linp2; + unsigned int prepic_linp3; + unsigned int prepic_linp4; + unsigned int prepic_linp5; +} kvm_guest_nbsr_state_t; + +#endif /* __ASSEMBLY__ */ + +/* + * Flags specifying guest properties (see field flags above) + */ +#define NATIVE_KERNEL_IMAGE_GUEST_FLAG 0x0000000000000001UL +#define PARAVIRT_KERNEL_IMAGE_GUEST_FLAG 0x0000000000000002UL +#define LINTEL_IMAGE_GUEST_FLAG 0x0000000000000100UL + +#define E2K_SYSCALL_TRAP_ENTRY_SIZE (2 * 1024) /* 2Kb */ +#define KVM_GUEST_STARTUP_SYSCALL_NUM 12 /* # of system call to launch */ + /* guest using such method */ +#define KVM_GUEST_STARTUP_ENTRY_NUM 32 /* # of trap table entry to */ + /* launch guest using direct */ + /* control transfer (call or */ + /* return) */ +#define KVM_PV_VCPU_TRAP_ENTRY_NUM 36 /* # of trap table entry to */ + /* launch trap handler of */ + /* paravirtualized guest */ + /* (same as ttable #0) */ + +#ifndef __ASSEMBLY__ + +#define KVM_GET_GUEST_ADDRESS _IOWR(KVMIO, 0xe2, unsigned long *) +#define KVM_SETUP_VCPU _IO(KVMIO, 0xe3) +#define KVM_ALLOC_GUEST_AREA _IOWR(KVMIO, 0xe4, \ + kvm_guest_area_alloc_t) +#define KVM_VCPU_GUEST_STARTUP _IOW(KVMIO, 0xe5, \ + kvm_vcpu_guest_startup_t) +#define KVM_SET_KERNEL_IMAGE_SHADOW _IOW(KVMIO, 0xe6, \ + kvm_kernel_area_shadow_t) +#define KVM_SET_IRQCHIP_BASE _IOW(KVMIO, 0xe7, unsigned long) +#define KVM_SET_SYS_TIMER_BASE _IOW(KVMIO, 0xe8, unsigned long) +#define KVM_SET_SPMC_CONF_BASE _IOW(KVMIO, 0xe9, unsigned long) +#define KVM_RESERVE_GUEST_AREA _IOWR(KVMIO, 0xea, \ + kvm_guest_area_reserve_t) +#define KVM_SET_SPMC_CONF_BASE_SPMC_IN_QEMU \ + _IOW(KVMIO, 0xeb, unsigned long) +#define KVM_SET_I2C_SPI_CONF_BASE _IOW(KVMIO, 0xec, unsigned long) +#define KVM_SET_GUEST_INFO _IOW(KVMIO, 0xed, unsigned long) +#define KVM_GET_NBSR_STATE _IOR(KVMIO, 0xee, \ + kvm_guest_nbsr_state_t) +#define KVM_CREATE_SIC_NBSR _IO(KVMIO, 0xef) +#define KVM_SET_PCI_REGION _IOW(KVMIO, 0xf0, kvm_pci_region_t) +#define KVM_SET_COUNT_NUMA_NODES _IOW(KVMIO, 0xf1, unsigned long) +#define KVM_SET_MAX_NR_NODE_CPU _IOW(KVMIO, 0xf2, unsigned long) +#define KVM_SET_CEPIC_FREQUENCY _IOW(KVMIO, 0xf3, unsigned long) +#define KVM_SET_WD_PRESCALER_MULT _IOW(KVMIO, 0xf4, unsigned long) +/* IOCTL 0xf5 reserved for Imagination GPU passthrough */ +#define KVM_SET_LEGACY_VGA_PASSTHROUGH _IOW(KVMIO, 0xf6, unsigned long) + +/* e2k-specific exit reasons from KVM to userspace assistance */ +#define KVM_EXIT_E2K_NOTIFY_IO 33 +#define KVM_EXIT_E2K_RESTART 37 +#define KVM_EXIT_E2K_PANIC 38 + +#endif /* __ASSEMBLY__ */ + +#endif /* _UAPI_ASM_E2K_KVM_H */ diff --git a/arch/e2k/include/uapi/asm/kvm_para.h b/arch/e2k/include/uapi/asm/kvm_para.h new file mode 100644 index 0000000..daae6cd --- /dev/null +++ b/arch/e2k/include/uapi/asm/kvm_para.h @@ -0,0 +1,57 @@ +#ifndef _ASM_E2K_KVM_PARA_H +#define _ASM_E2K_KVM_PARA_H + +#include + +#define KVM_FEATURE_CLOCKSOURCE 0 +#define KVM_FEATURE_NOP_IO_DELAY 1 +#define KVM_FEATURE_MMU_OP 2 + +#define MSR_KVM_WALL_CLOCK 0x11 +#define MSR_KVM_SYSTEM_TIME 0x12 + +#define KVM_MAX_MMU_OP_BATCH 32 + +/* Operations for KVM_HC_MMU_OP */ +#define KVM_MMU_OP_WRITE_PTE 1 +#define KVM_MMU_OP_FLUSH_TLB 2 +#define KVM_MMU_OP_RELEASE_PT 3 + +/* Payload for KVM_HC_MMU_OP */ +struct kvm_mmu_op_header { + __u32 op; + __u32 pad; +}; + +struct kvm_mmu_op_write_pte { + struct kvm_mmu_op_header header; + __u64 pte_phys; + __u64 pte_val; +}; + +struct kvm_mmu_op_flush_tlb { + struct kvm_mmu_op_header header; +}; + +struct kvm_mmu_op_release_pt { + struct kvm_mmu_op_header header; + __u64 pt_phys; +}; + +#ifdef __KERNEL__ + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + +extern void kvmclock_init(void); + +#endif + +#endif /* _ASM_E2K_KVM_PARA_H */ diff --git a/arch/e2k/include/uapi/asm/mas.h b/arch/e2k/include/uapi/asm/mas.h new file mode 100644 index 0000000..93bc317 --- /dev/null +++ b/arch/e2k/include/uapi/asm/mas.h @@ -0,0 +1,411 @@ +#ifndef _E2K_UAPI_MAS_H_ +#define _E2K_UAPI_MAS_H_ + +/* + * Memory Address Specifier. + * + * +-----------------------+-------+ + * | opc | | + * +-------+-------+-------+ mod + + * | dc | na | be | | + * +-------+-------+-------+-------+ + * 6-----5 4 3 2-----0 + * + * be - big endian flag + * na - non-aligned access flag + * dc - DCACHEs disable flag + * opc - special MMU or AAU operation opcode + * mod - operation modifier + * + */ + + + +/* MAS masks */ + +#define MAS_MOD_MASK 0x07 +#define MAS_OPC_MASK 0x78 +#define MAS_ENDIAN_MASK 0x08 +#define MAS_NONALIGNED_MASK 0x10 +#define MAS_DCACHE_MASK 0x60 + +/* MAS bits */ + +#define MAS_MOD_BITS 3 +#define MAS_OPC_BITS 4 +#define MAS_ENDIAN_BITS 1 +#define MAS_NONALIGNED_BITS 1 +#define MAS_DCACHE_BITS 2 + +/* MAS shifts */ + +#define MAS_MOD_SHIFT 0x00 +#define MAS_OPC_SHIFT 0x03 +#define MAS_ENDIAN_SHIFT 0x03 +#define MAS_NONALIGNED_SHIFT 0x04 +#define MAS_DCACHE_SHIFT 0x05 + +#define MAS_MOD_DEFAULT 0x0UL +#define MAS_ENDIAN_DEFAULT 0x0UL +#define MAS_NONALIGNED_DEFAULT 0x0UL +#define MAS_DCACHE_DEFAULT 0x0UL + +/* LOAD (non-speculative) MAS modes for channels 0 and 3 */ + +#define _MAS_MODE_LOAD_OPERATION 0UL /* an operation */ +#define _MAS_MODE_LOAD_PA 1UL /* reading by physical */ + /* address */ +#define _MAS_MODE_LOAD_OP_CHECK 2UL /* conditional operation */ + /* depending on the "check" */ + /* lock state */ +#define _MAS_MODE_LOAD_OP_UNLOCK 3UL /* an operation with memory */ + /* location depending on the */ + /* "check" lock state and */ + /* memory location unlocking */ +#define _MAS_MODE_LOAD_OP_SPEC 3UL /* speculative load */ +#define _MAS_MODE_FILL_OP 4UL /* fill operation */ +#define _MAS_MODE_LOAD_OP_LOCK_CHECK 4UL /* semi-speculative operation */ + /* with memory lock check */ +#define _MAS_MODE_LOAD_OP_TRAP_ON_STORE 5UL /* an operation with locking */ + /* of the memory location by */ + /* the "trap on store" lock - */ + /* until v2 */ +#define _MAS_MODE_LOAD_OP_TRAP_ON_LD 6UL /* an operation with locking */ + /* of the memory location by */ + /* the "trap on load/store" */ +#define _MAS_MODE_LOAD_OP_WAIT 7UL /* an operation with locking */ + /* of the memory location by */ + /* the "wait" lock */ +#define _MAS_MODE_LOAD_OP_SPEC_LOCK_CHECK 7UL /* speculative operation */ + /* with memory lock check */ +#define _MAS_MODE_DAM_LOAD 7UL /* an operation with looking */ + /* of the disambiguation*/ + /* memory table */ + +/* LOAD (non-speculative) MAS modes for channels 0 and 2 */ +#define _MAS_MODE_LOAD_OP_WAIT_1 5UL /* an operation with locking */ + /* of the memory location by */ + /* the "wait" lock - since V5 */ + +/* LOAD (non-speculative) MAS modes for channels 2 and 5. */ + +#define MAS_MODE_LOAD_OPERATION 0UL /* an operation */ +#define MAS_MODE_LOAD_PA 1UL /* reading by physical */ + /* address */ +#define MAS_MODE_LOAD_OP_CHECK 2UL /* conditional operation */ + /* depending on the "check" */ + /* lock state */ +#define MAS_MODE_LOAD_OP_UNLOCK 3UL /* an operation with memory */ + /* location depending on the */ + /* "check" lock state and */ + /* memory location unlocking */ +#define MAS_MODE_LOAD_OP_SPEC 3UL /* speculative load */ +#define MAS_MODE_FILL_OP 4UL /* fill operation */ +#define MAS_MODE_LOAD_OP_LOCK_CHECK 4UL /* semi-speculative operation */ + /* with memory lock check */ +#define MAS_MODE_LOAD_RESERVED3 5UL /* reserved */ +#define MAS_MODE_LOAD_IOPAGE 6UL /* I/O page access operation */ +#define MAS_MODE_LOAD_MMU_AAU_SPEC 7UL /* special MMU or AAU */ + /* operation */ +#define MAS_MODE_LOAD_OP_SPEC_LOCK_CHECK 7UL /* speculative operation */ + /* with memory lock check */ + + +/* STORE MAS modes for channels 2 and 5. */ + +#ifndef __ASSEMBLY__ +#define MAS_MODE_STORE_OPERATION 0UL /* an operation */ +#define MAS_MODE_STORE_PA 1UL /* writing by physical */ + /* address */ +#define MAS_MODE_STORE_OP_WAIT 2UL /* conditional operation */ + /* depending on the "wait" */ + /* lock state with memory */ + /* location unlocking */ +#define MAS_MODE_STORE_OP_UNLOCK 3UL /* an operation with memory */ + /* location unlocking */ +#define MAS_MODE_STORE_RESERVED2 4UL +#define MAS_MODE_STORE_NOP_UNLOCK 5UL /* same as */ + /* MAS_MODE_STORE_OP_UNLOCK */ + /* but no operation performed */ +#define MAS_MODE_STORE_IOPAGE 6UL /* I/O page access operation */ +#define MAS_MODE_STORE_MMU_AAU_SPEC 7UL /* special MMU or AAU */ + /* operation */ +#else /* __ASSEMBLY__ */ +#define MAS_MODE_STORE_OPERATION 0 /* an operation */ +#define MAS_MODE_STORE_PA 1 /* writing by physical */ + /* address */ +#define MAS_MODE_STORE_OP_WAIT 2 /* conditional operation */ + /* depending on the "wait" */ + /* lock state with memory */ + /* location unlocking */ +#define MAS_MODE_STORE_OP_UNLOCK 3 /* an operation with memory */ + /* location unlocking */ +#define MAS_MODE_STORE_RESERVED2 4 +#define MAS_MODE_STORE_NOP_UNLOCK 5 /* same as */ + /* MAS_MODE_STORE_OP_UNLOCK */ + /* but no operation performed */ +#define MAS_MODE_STORE_IOPAGE 6 /* I/O page access operation */ +#define MAS_MODE_STORE_MMU_AAU_SPEC 7 /* special MMU or AAU */ + /* operation */ +#endif /* ! __ASSEMBLY__ */ + + +/* "Secondary" MAS'es (appeared in e2s) */ + +/* LOAD */ + +#define MAS_TRAP_ON_STORE_MASK 0x3 +#define MAS_LOAD_SEC_TRAP_ON_STORE 1UL /* secondary trap on store */ + +#define MAS_TRAP_ON_LD_ST_MASK 0x3 /* secondary trap */ +#define MAS_LOAD_SEC_TRAP_ON_LD_ST 2UL /* on load/store */ + +/* STORE */ + +#define MAS_SEC_NOP_UNLOCK_MASK 0x3 +#define MAS_MODE_STORE_SEC_NOP_UNLOCK 2UL /* secondary unlock */ + +/* BOTH */ + +/* Secondary SLT operation - both ld and st + * ld: channels 0 and 2 + * st: channels 2 and 5 */ +#define MAS_SEC_SLT 0x78UL + + +/* MAS "endian"-ness */ + +#define MAS_ENDIAN_LITTLE 0UL +#define MAS_ENDIAN_BIG 1UL + +/* MAS non-aligned access switch */ + +#define MAS_ALIGNED_ADDR 0UL +#define MAS_NONALIGNED_ADDR 1UL + + +/* MAS cache enablers */ + +#define MAS_CACHE_12E_V_E 0UL /* virtual, all caches enabled */ +#define MAS_CACHE_2E_P_E 0UL /* physical, DCACHE2,ECACHE */ + /* enabled only */ +#define MAS_CACHE_2E_E 1UL /* DCACHE2 and ECACHE enabled only*/ +#define MAS_CACHE_E_E 2UL /* ECACHE enabled only */ +#define MAS_CACHE_N_E 3UL /* nothing enabled */ + +/* MAS cache disablers */ + +#define MAS_CACHE_N_V_D 0UL /* virtual, nothing disabled */ +#define MAS_CACHE_1_P_D 0UL /* physical, DCACHE1 disabled only */ +#define MAS_CACHE_1_D 1UL /* DCACHE1 disabled only */ +#define MAS_CACHE_12_D 2UL /* DCACHE1 and DCACHE2 disabled */ +#define MAS_CACHE_ALL_D 3UL /* all caches disabled */ + +/* + * MAS OPCs + */ + +#ifndef __ASSEMBLY__ +/* mandatory group */ +#define MAS_OPC_CACHE_FLUSH 0UL /* Cache(s) flush operations */ +#define MAS_OPC_DCACHE_LINE_FLUSH 1UL /* Data cache(s) line flush */ + /* operations */ + +#define MAS_OPC_ICACHE_LINE_FLUSH 2UL /* Instruction cache(s) line */ + /* flush */ + /* operations */ +#define MAS_OPC_TLB_PAGE_FLUSH 2UL /* TLB page flush operations */ + +#define MAS_OPC_RESERVED1 3UL + +#define MAS_OPC_ICACHE_FLUSH 4UL /* Instruction cache(s) flush */ + /* operations */ +#define MAS_OPC_TLB_FLUSH 4UL /* TLB flush operations */ + +#define MAS_OPC_TLB_ADDR_PROBE 5UL /* TLB address probe */ + /* operations */ +#define MAS_OPC_TLB_ENTRY_PROBE 6UL /* TLB entry probe operations */ +#define MAS_OPC_AAU_REG 7UL /* AAU registers access */ + +/* optional group */ +#define MAS_OPC_MMU_REG 8UL /* MMU registers access */ +#define MAS_OPC_DTLB_REG 9UL /* DTLB registers access */ +#define MAS_OPC_L1_REG 10UL /* L1 cache registers access */ +#define MAS_OPC_L2_REG 11UL /* L2 cache registers access */ + +#define MAS_OPC_ICACHE_REG 12UL /* ICACHE registers access */ +#define MAS_OPC_ITLB_REG 12UL /* ITLB registers access */ + +#define MAS_OPC_DAM_REG 13UL /* DAM register(s) access */ +#define MAS_OPC_MLT_REG 13UL /* MLT register(s) access */ +#define MAS_OPC_CLW_REG 13UL /* CLW register(s) access */ +#define MAS_OPC_SNOOP_REG 13UL /* SNOOP register(s) access */ +#define MAS_OPC_MMU_DEBUG_REG 13UL /* MMU DEBUG registers access */ + +#define MAS_OPC_PCS_REG 14UL /* PCS (Procedure Chain Stack)*/ + /* registers */ + /* operations */ +#define MAS_OPC_RESERVED2 15UL +#else /* __ASSEMBLY__ */ +/* mandatory group */ +#define MAS_OPC_CACHE_FLUSH 0 /* Cache(s) flush operations */ +#define MAS_OPC_DCACHE_LINE_FLUSH 1 /* Data cache(s) line flush */ + /* operations */ + +#define MAS_OPC_ICACHE_LINE_FLUSH 2 /* Instruction cache(s) line */ + /* flush */ + /* operations */ +#define MAS_OPC_TLB_PAGE_FLUSH 2 /* TLB page flush operations */ + +#define MAS_OPC_RESERVED1 3 + +#define MAS_OPC_ICACHE_FLUSH 4 /* Instruction cache(s) flush */ + /* operations */ +#define MAS_OPC_TLB_FLUSH 4 /* TLB flush operations */ + +#define MAS_OPC_TLB_ADDR_PROBE 5 /* TLB address probe */ + /* operations */ +#define MAS_OPC_TLB_ENTRY_PROBE 6 /* TLB entry probe operations */ +#define MAS_OPC_AAU_REG 7 /* AAU registers access */ + +/* optional group */ +#define MAS_OPC_MMU_REG 8 /* MMU registers access */ +#define MAS_OPC_DTLB_REG 9 /* DTLB registers access */ +#define MAS_OPC_L1_REG 10 /* L1 cache registers access */ +#define MAS_OPC_L2_REG 11 /* L2 cache registers access */ + +#define MAS_OPC_ICACHE_REG 12 /* ICACHE registers access */ +#define MAS_OPC_ITLB_REG 12 /* ITLB registers access */ + +#define MAS_OPC_DAM_REG 13 /* DAM register(s) access */ +#define MAS_OPC_MLT_REG 13 /* MLT register(s) access */ +#define MAS_OPC_CLW_REG 13 /* CLW register(s) access */ +#define MAS_OPC_SNOOP_REG 13 /* SNOOP register(s) access */ +#define MAS_OPC_MMU_DEBUG_REG 13 /* MMU DEBUG registers access */ + +#define MAS_OPC_PCS_REG 14 /* PCS (Procedure Chain Stack)*/ + /* registers */ + /* operations */ +#define MAS_OPC_RESERVED2 15 +#endif /* ! __ASSEMBLY__ */ + +/* Popular complex MASes for some special Linux/E2K situations */ + +#define MAS_LOAD_OPERATION (MAS_MODE_LOAD_OPERATION << MAS_MOD_SHIFT) +#define MAS_STORE_OPERATION (MAS_MODE_STORE_OPERATION << MAS_MOD_SHIFT) +#define MAS_BYPASS_L1_CACHE (MAS_CACHE_1_D << MAS_DCACHE_SHIFT) +#define MAS_BYPASS_L12_CACHES (MAS_CACHE_12_D << MAS_DCACHE_SHIFT) +#define MAS_BYPASS_ALL_CACHES (MAS_CACHE_ALL_D << MAS_DCACHE_SHIFT) +#define MAS_NONALIGNED (MAS_NONALIGNED_ADDR << MAS_NONALIGNED_SHIFT) +#define MAS_IOADDR (MAS_MODE_STORE_IOPAGE << MAS_MOD_SHIFT) +#define MAS_BIGENDIAN (MAS_ENDIAN_BIG << MAS_ENDIAN_SHIFT) +#define MAS_FILL_OPERATION (MAS_MODE_FILL_OP << MAS_MOD_SHIFT) +#define MAS_LOAD_PA (MAS_MODE_LOAD_PA << MAS_MOD_SHIFT) +#define MAS_LOAD_SPEC (MAS_MODE_LOAD_OP_SPEC << MAS_MOD_SHIFT) +#define MAS_STORE_PA (MAS_MODE_STORE_PA << MAS_MOD_SHIFT) + +/* CACHE(s) flush */ +#define MAS_CACHE_FLUSH ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_CACHE_FLUSH << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* DCACHE line flush */ +#define MAS_DCACHE_LINE_FLUSH ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_DCACHE_LINE_FLUSH << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* DCACHE L1 registers access */ +#define MAS_DCACHE_L1_REG ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_L1_REG << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* DCACHE L2 registers access */ +#define MAS_DCACHE_L2_REG ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_L2_REG << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* ICACHE line and DTLB page flush */ +#define MAS_ICACHE_LINE_FLUSH ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_ICACHE_LINE_FLUSH << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) +#define MAS_TLB_PAGE_FLUSH ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_TLB_PAGE_FLUSH << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* ICACHE and TLB flush */ +#define MAS_ICACHE_FLUSH ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_ICACHE_FLUSH << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) +#define MAS_TLB_FLUSH ((MAS_MODE_STORE_MMU_AAU_SPEC << \ + MAS_MOD_SHIFT) | \ + ((MAS_OPC_TLB_FLUSH << \ + MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* MMU registers access */ +#define MAS_MMU_REG ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + ((MAS_OPC_MMU_REG << MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* Data TLB registers access */ +#define MAS_DTLB_REG ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + ((MAS_OPC_DTLB_REG << MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* CLW registers access */ +#define MAS_CLW_REG ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + ((MAS_OPC_CLW_REG << MAS_OPC_SHIFT) & MAS_OPC_MASK)) + +/* MMU DEBUG registers access */ +#define MAS_MMU_DEBUG_REG \ + ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + ((MAS_OPC_MMU_DEBUG_REG << MAS_OPC_SHIFT) & \ + MAS_OPC_MASK)) + +/* VA probe */ +#define MAS_VA_PROBE ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + ((MAS_OPC_TLB_ADDR_PROBE << MAS_OPC_SHIFT) & \ + MAS_OPC_MASK)) + +/* DTLB entry probe */ +#define MAS_ENTRY_PROBE ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + ((MAS_OPC_TLB_ENTRY_PROBE << MAS_OPC_SHIFT) & \ + MAS_OPC_MASK)) + +/* Locking "wait" */ +#define MAS_WAIT_LOCK (_MAS_MODE_LOAD_OP_WAIT << MAS_MOD_SHIFT) +#define MAS_WAIT_LOCK_Q (_MAS_MODE_LOAD_OP_WAIT_1 << MAS_MOD_SHIFT) + +/* Ulocking "wait" */ +#define MAS_WAIT_UNLOCK (MAS_MODE_STORE_OP_WAIT << MAS_MOD_SHIFT) + +/* Locking trap on store */ +#define MAS_MLT_STORE_LOCK (_MAS_MODE_LOAD_OP_TRAP_ON_STORE << \ + MAS_MOD_SHIFT) +#define MAS_MLT_SEC_STORE_LOCK (MAS_LOAD_SEC_TRAP_ON_STORE) + +/* Locking trap on load/store */ +#define MAS_MLT_LOAD_LOCK (_MAS_MODE_LOAD_OP_TRAP_ON_LD << MAS_MOD_SHIFT) +#define MAS_MLT_SEC_LD_ST_LOCK (MAS_LOAD_SEC_TRAP_ON_LD_ST) + +#define MAS_MLT_STORE_UNLOCK (MAS_MODE_STORE_OP_UNLOCK << MAS_MOD_SHIFT) + +#define MAS_MLT_NOP_UNLOCK (MAS_MODE_STORE_NOP_UNLOCK << MAS_MOD_SHIFT) + +#define MAS_MLT_SEC_NOP_UNLOCK (MAS_MODE_STORE_SEC_NOP_UNLOCK << MAS_MOD_SHIFT) + +#define MAS_MLT_REG ((MAS_MODE_STORE_MMU_AAU_SPEC << MAS_MOD_SHIFT) | \ + (((MAS_OPC_MMU_REG | MAS_OPC_ICACHE_FLUSH | \ + MAS_OPC_DCACHE_LINE_FLUSH) << MAS_OPC_SHIFT) & \ + MAS_OPC_MASK)) + +/* DAM table */ +#define MAS_DAM_REG ((_MAS_MODE_DAM_LOAD << MAS_MOD_SHIFT) | \ + (((MAS_OPC_DAM_REG) << MAS_OPC_SHIFT) & \ + MAS_OPC_MASK)) + +#endif /* _E2K_UAPI_MAS_H_ */ diff --git a/arch/e2k/include/uapi/asm/mman.h b/arch/e2k/include/uapi/asm/mman.h new file mode 100644 index 0000000..4a0c90e --- /dev/null +++ b/arch/e2k/include/uapi/asm/mman.h @@ -0,0 +1,101 @@ +#ifndef _UAPI_E2K_MMAN_H_ +#define _UAPI_E2K_MMAN_H_ + + +/* + * Copyright (C) 1998-2000 Hewlett-Packard Co + * Copyright (C) 1998-2000 David Mosberger-Tang + * + * Adopted for Linux/E2K. To be extended for proper E2K mem. management. + */ + +#define PROT_NONE 0x0 /* page can not be accessed */ +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x8 /* page may be used for atomic ops */ +#define PROT_GROWSDOWN 0x20 /* mprotect flag: extend change */ + /* to start of growsdown vma */ +#define PROT_GROWSUP 0x40 /* mprotect flag: extend change */ + /* to end of growsup vma */ +#define PROT_CUI 0xffff00 +#define PROT_CUI_SHIFT 8 +#define PROT_CUI_MASK 0xFFFF + +#define GET_CUI_FROM_INT_PROT(prot) (((prot) >> PROT_CUI_SHIFT) & \ + PROT_CUI_MASK) +#define PUT_CUI_TO_INT_PROT(prot, cui) ((((cui) & PROT_CUI_MASK) << \ + PROT_CUI_SHIFT) | prot) + +/* 0x01 - 0x03 are defined in linux/mman.h */ +#define MAP_TYPE 0x00000f /* Mask for type of mapping */ +#define MAP_ANONYMOUS 0x000010 /* don't use a file */ +#define MAP_FIXED 0x000100 /* Interpret addr exactly */ +#define MAP_DENYWRITE 0x000800 /* ETXTBSY */ +#define MAP_GROWSDOWN 0x001000 /* stack-like segment */ +#define MAP_GROWSUP 0x002000 /* register stack-like segment */ +#define MAP_EXECUTABLE 0x004000 /* mark it as an executable */ +#define MAP_LOCKED 0x008000 /* pages are locked */ +#define MAP_NORESERVE 0x010000 /* don't check for reservations */ +#define MAP_POPULATE 0x020000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x040000 /* do not block on IO */ +#define MAP_FIRST32 0x080000 /* in protected mode map in */ + /* first 2 ** 32 area */ +#define MAP_WRITECOMBINED 0x100000 /* Write combine */ +#define MAP_HUGETLB 0x200000 /* create a huge page mapping */ +#define MAP_FIXED_NOREPLACE 0x400000 /* MAP_FIXED which doesn't unmap */ + /* underlying mapping */ + +#define MAP_STACK MAP_GROWSDOWN + +#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */ + +#define MS_ASYNC 1 /* sync memory asynchronously */ +#define MS_INVALIDATE 2 /* invalidate the caches */ +#define MS_SYNC 4 /* synchronous memory sync */ + +#define MCL_CURRENT 1 /* lock all current mappings */ +#define MCL_FUTURE 2 /* lock all future mappings */ +#define MCL_ONFAULT 4 /* lock all pages that are faulted in */ + + +#define MADV_NORMAL 0 /* no further special treatment */ +#define MADV_RANDOM 1 /* expect random page references */ +#define MADV_SEQUENTIAL 2 /* expect sequential page references */ +#define MADV_WILLNEED 3 /* will need these pages */ +#define MADV_DONTNEED 4 /* don't need these pages */ + +/* common parameters: try to keep these consistent across architectures */ +#define MADV_FREE 8 /* free pages only if memory pressure */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_HWPOISON 100 /* poison a page for testing */ +#define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */ + +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ + +#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, + overrides the coredump filter bits */ +#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ + +#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ + +#define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ + +/* compatibility flags */ +#define MAP_ANON MAP_ANONYMOUS +#define MAP_FILE 0 + +#define PKEY_DISABLE_ACCESS 0x1 +#define PKEY_DISABLE_WRITE 0x2 +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ + PKEY_DISABLE_WRITE) + +#endif /* _UAPI_E2K_MMAN_H_ */ diff --git a/arch/e2k/include/uapi/asm/msgbuf.h b/arch/e2k/include/uapi/asm/msgbuf.h new file mode 100644 index 0000000..d8d912d --- /dev/null +++ b/arch/e2k/include/uapi/asm/msgbuf.h @@ -0,0 +1,27 @@ +#ifndef _E2K_MSGBUF_H_ +#define _E2K_MSGBUF_H_ + +/* + * The msqid64_ds structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + __kernel_time_t msg_rtime; /* last msgrcv time */ + __kernel_time_t msg_ctime; /* last change time */ + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _E2K_MSGBUF_H_ */ diff --git a/arch/e2k/include/uapi/asm/mtrr.h b/arch/e2k/include/uapi/asm/mtrr.h new file mode 100644 index 0000000..4d97f11 --- /dev/null +++ b/arch/e2k/include/uapi/asm/mtrr.h @@ -0,0 +1,78 @@ +/* Generic MTRR (Memory Type Range Register) ioctls. + + Copyright (C) 1997-1999 Richard Gooch + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with this library; if not, write to the Free + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + Richard Gooch may be reached by email at rgooch@atnf.csiro.au + The postal address is: + Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. +*/ +#ifndef _UAPI_LINUX_MTRR_H +#define _UAPI_LINUX_MTRR_H + +#include + +#define MTRR_IOCTL_BASE 'M' + +struct mtrr_sentry { + unsigned long base; /* Base address */ + unsigned long size; /* Size of region */ + unsigned int type; /* Type of region */ +}; + +struct mtrr_gentry { + unsigned int regnum; /* Register number */ + unsigned long base; /* Base address */ + unsigned long size; /* Size of region */ + unsigned int type; /* Type of region */ +}; + +/* These are the various ioctls */ +#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) +#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) +#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) +#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry) +#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry) +#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry) +#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry) +#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry) +#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry) +#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry) + +/* These are the region types */ +#define MTRR_TYPE_UNCACHABLE 0 +#define MTRR_TYPE_WRCOMB 1 +/*#define MTRR_TYPE_ 2*/ +/*#define MTRR_TYPE_ 3*/ +#define MTRR_TYPE_WRTHROUGH 4 +#define MTRR_TYPE_WRPROT 5 +#define MTRR_TYPE_WRBACK 6 +#define MTRR_NUM_TYPES 7 + +#ifdef MTRR_NEED_STRINGS +static char *mtrr_strings[MTRR_NUM_TYPES] = { + "uncachable", /* 0 */ + "write-combining", /* 1 */ + "?", /* 2 */ + "?", /* 3 */ + "write-through", /* 4 */ + "write-protect", /* 5 */ + "write-back", /* 6 */ +}; +#endif + + +#endif /* _UAPI_LINUX_MTRR_H */ diff --git a/arch/e2k/include/uapi/asm/param.h b/arch/e2k/include/uapi/asm/param.h new file mode 100644 index 0000000..d1c59aa --- /dev/null +++ b/arch/e2k/include/uapi/asm/param.h @@ -0,0 +1,20 @@ +/* $Id: param.h,v 1.4 2008/12/19 12:44:14 atic Exp $ */ + +#ifndef _UAPI_E2K_PARAM_H_ +#define _UAPI_E2K_PARAM_H_ + + +#ifndef __KERNEL__ +#define HZ 100 +#endif + +#define EXEC_PAGESIZE 4096 /* Intel size. */ + /* Check for 64K native pages if used. */ + +#ifndef NOGROUP +#define NOGROUP (-1) +#endif + +#define MAXHOSTNAMELEN 64 /* max length of hostname */ + +#endif /* _UAPI_E2K_PARAM_H_ */ diff --git a/arch/e2k/include/uapi/asm/poll.h b/arch/e2k/include/uapi/asm/poll.h new file mode 100644 index 0000000..c98509d --- /dev/null +++ b/arch/e2k/include/uapi/asm/poll.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/uapi/asm/posix_types.h b/arch/e2k/include/uapi/asm/posix_types.h new file mode 100644 index 0000000..575deb1 --- /dev/null +++ b/arch/e2k/include/uapi/asm/posix_types.h @@ -0,0 +1,11 @@ +#ifndef _E2K_POSIX_TYPES_H_ +#define _E2K_POSIX_TYPES_H_ + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. + */ + +#include + +#endif /* _E2K_POSIX_TYPES_H_ */ diff --git a/arch/e2k/include/uapi/asm/protected_mode.h b/arch/e2k/include/uapi/asm/protected_mode.h new file mode 100644 index 0000000..bde0b9d --- /dev/null +++ b/arch/e2k/include/uapi/asm/protected_mode.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * arch/e2k/include/asm/protected_mode.h, v 1.0 07/04/2020. + * + * Copyright (C) 2020 MCST + */ + +/****************** E2K PROTECTED MODE SPECIFIC STUFF *******************/ + +#ifndef _E2K_PROTECTED_MODE_H_ +#define _E2K_PROTECTED_MODE_H_ + +/* + * PROTECTED MODE DEBUG CONTROLS: + * When control below is set, kernel reports extra info and issues + * identified to journal. Use command 'dmesg' to display these messages. + * Set up corresponding env vars to 0/1 to control particular checks + * or use arch_prctl() syscall to setup debug mode. + */ +/* Protected syscall debug mode initialized: */ +#define PM_SC_DBG_MODE_INIT 1 +/* Output debug info on system calls to system journal: */ +#define PM_SC_DBG_MODE_DEBUG 2 +/* Output debug info on protected complex syscall wrappers to system journal: */ +#define PM_SC_DBG_MODE_COMPLEX_WRAPPERS 4 +/* Report issue to journal if syscall arg doesn't match expected format: */ +#define PM_SC_DBG_MODE_CHECK 8 +/* If error in arg format detected, don't block syscall but run it anyway: */ +#define PM_SC_DBG_MODE_WARN_ONLY 16 +/* Output to journal debug info on converting structures in syscall args: */ +#define PM_SC_DBG_MODE_CONV_STRUCT 32 +/* Output to journal debug info related to signal manipulation: */ +#define PM_SC_DBG_MODE_SIGNALS 64 +/* Don't output to journal warnings/alerts/errors (for better performance): */ +#define PM_SC_DBG_MODE_NO_ERR_MESSAGES 128 + +/* libc specific mmu control stuff: */ + +/* Enable check for dangling descriptors: */ +#define PM_MM_CHECK_4_DANGLING_POINTERS 256 +/* Zeroing freed descriptor contents: */ +#define PM_MM_ZEROING_FREED_POINTERS 512 +/* Emptying freed descriptor contents / light check for dangling descriptors: */ +#define PM_MM_EMPTYING_FREED_POINTERS 1024 + +/* Enable all debug/diagnostic output to system journal: */ +#define PM_SC_DBG_MODE_ALL 0xff7f +/* Disable debug/diagnostic output to system journal: */ +#define PM_SC_DBG_MODE_DISABLED PM_SC_DBG_MODE_INIT + +#define IF_PM_DBG_MODE(mask) \ + (current->mm->context.pm_sc_debug_mode & (mask)) + +#define PM_SC_DBG_MODE_DEFAULT (PM_SC_DBG_MODE_CHECK \ + | PM_SC_DBG_MODE_WARN_ONLY \ + | PM_MM_EMPTYING_FREED_POINTERS) + + +/* + * Arch-specific options for arch_prctl() syscall: + */ + +/* PM debug mode controls */ +# define PR_PM_DBG_MODE_SET 8192 +# define PR_PM_DBG_MODE_GET 8193 +# define PR_PM_DBG_MODE_RESET 8194 +# define PR_PM_DBG_MODE_ADD 8195 /* adds to existing debug mode */ +# define PR_PM_DBG_MODE_DEL 8196 /* removes from existing mode */ + + +/* + * Flags for the protected_sys_clean_descriptors() function: + */ +/* 0 - clean freed descriptor list */ +#define CLEAN_DESCRIPTORS_SINGLE 1 /* clean single descriptor 'addr' */ +#define CLEAN_DESCRIPTORS_NO_GARB_COLL 2 /* No garbidge collection */ + +#endif /* _E2K_PROTECTED_MODE_H_ */ diff --git a/arch/e2k/include/uapi/asm/ptrace-abi.h b/arch/e2k/include/uapi/asm/ptrace-abi.h new file mode 100644 index 0000000..3043bd7 --- /dev/null +++ b/arch/e2k/include/uapi/asm/ptrace-abi.h @@ -0,0 +1,78 @@ +#ifndef _ASM_PTRACE_ABI_H +#define _ASM_PTRACE_ABI_H + +#define PTRACE_OLDSETOPTIONS 21 + +/* only useful for access 32bit programs / kernels */ +#define PTRACE_GET_THREAD_AREA 25 +#define PTRACE_SET_THREAD_AREA 26 + +#ifdef __x86_64__ +# define PTRACE_ARCH_PRCTL 30 +#endif + +#define PTRACE_SYSEMU 31 +#define PTRACE_SYSEMU_SINGLESTEP 32 + +#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ + +#ifndef __ASSEMBLY__ +#include + +/* configuration/status structure used in PTRACE_BTS_CONFIG and + PTRACE_BTS_STATUS commands. +*/ +struct ptrace_bts_config { + /* requested or actual size of BTS buffer in bytes */ + __u32 size; + /* bitmask of below flags */ + __u32 flags; + /* buffer overflow signal */ + __u32 signal; + /* actual size of bts_struct in bytes */ + __u32 bts_size; +}; +#endif /* __ASSEMBLY__ */ + +#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */ +#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */ +#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG on buffer overflow + instead of wrapping around */ +#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */ + +#define PTRACE_BTS_CONFIG 40 +/* Configure branch trace recording. + ADDR points to a struct ptrace_bts_config. + DATA gives the size of that buffer. + A new buffer is allocated, if requested in the flags. + An overflow signal may only be requested for new buffers. + Returns the number of bytes read. +*/ +#define PTRACE_BTS_STATUS 41 +/* Return the current configuration in a struct ptrace_bts_config + pointed to by ADDR; DATA gives the size of that buffer. + Returns the number of bytes written. +*/ +#define PTRACE_BTS_SIZE 42 +/* Return the number of available BTS records for draining. + DATA and ADDR are ignored. +*/ +#define PTRACE_BTS_GET 43 +/* Get a single BTS record. + DATA defines the index into the BTS array, where 0 is the newest + entry, and higher indices refer to older entries. + ADDR is pointing to struct bts_struct (see asm/ds.h). +*/ +#define PTRACE_BTS_CLEAR 44 +/* Clear the BTS buffer. + DATA and ADDR are ignored. +*/ +#define PTRACE_BTS_DRAIN 45 +/* Read all available BTS records and clear the buffer. + ADDR points to an array of struct bts_struct. + DATA gives the size of that buffer. + BTS records are read from oldest to newest. + Returns number of BTS records drained. +*/ + +#endif /* _ASM_PTRACE_ABI_H */ diff --git a/arch/e2k/include/uapi/asm/ptrace.h b/arch/e2k/include/uapi/asm/ptrace.h new file mode 100644 index 0000000..7b46542 --- /dev/null +++ b/arch/e2k/include/uapi/asm/ptrace.h @@ -0,0 +1,11 @@ +#ifndef _UAPI_E2K_PTRACE_H +#define _UAPI_E2K_PTRACE_H + + +#ifndef __ASSEMBLY__ + +/* 0x4200-0x4300 are reserved for architecture-independent additions. */ +#define PTRACE_SETOPTIONS 0x4200 + +#endif /* __ASSEMBLY__ */ +#endif /* _UAPI_E2K_PTRACE_H */ diff --git a/arch/e2k/include/uapi/asm/resource.h b/arch/e2k/include/uapi/asm/resource.h new file mode 100644 index 0000000..5b56568 --- /dev/null +++ b/arch/e2k/include/uapi/asm/resource.h @@ -0,0 +1,12 @@ +#ifndef _E2K_RESOURCE_H_ +#define _E2K_RESOURCE_H_ + +#include + +/* + * Redefine resource limits for e2k + */ +#undef _STK_LIM +#define _STK_LIM (16*1024*1024) + +#endif /* _E2K_RESOURCE_H_ */ diff --git a/arch/e2k/include/uapi/asm/sembuf.h b/arch/e2k/include/uapi/asm/sembuf.h new file mode 100644 index 0000000..816081c --- /dev/null +++ b/arch/e2k/include/uapi/asm/sembuf.h @@ -0,0 +1,22 @@ +#ifndef _E2K_SEMBUF_H_ +#define _E2K_SEMBUF_H_ + +/* + * The semid64_ds structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + __kernel_time_t sem_ctime; /* last change time */ + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _E2K_SEMBUF_H_ */ diff --git a/arch/e2k/include/uapi/asm/setup.h b/arch/e2k/include/uapi/asm/setup.h new file mode 100644 index 0000000..7b4ae51 --- /dev/null +++ b/arch/e2k/include/uapi/asm/setup.h @@ -0,0 +1,6 @@ +#ifndef _UAPI_ASM_E2K_SETUP_H +#define _UAPI_ASM_E2K_SETUP_H + +#define COMMAND_LINE_SIZE 512 + +#endif /* _UAPI_ASM_E2K_SETUP_H */ diff --git a/arch/e2k/include/uapi/asm/shmbuf.h b/arch/e2k/include/uapi/asm/shmbuf.h new file mode 100644 index 0000000..c4bbaf6 --- /dev/null +++ b/arch/e2k/include/uapi/asm/shmbuf.h @@ -0,0 +1,38 @@ +#ifndef _E2K_SHMBUF_H_ +#define _E2K_SHMBUF_H_ + +/* + * The shmid64_ds structure for E2K architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + __kernel_time_t shm_dtime; /* last detach time */ + __kernel_time_t shm_ctime; /* last change time */ + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + unsigned long shm_nattch; /* no. of current attaches */ + unsigned long __unused1; + unsigned long __unused2; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _E2K_SHMBUF_H_ */ diff --git a/arch/e2k/include/uapi/asm/sigcontext.h b/arch/e2k/include/uapi/asm/sigcontext.h new file mode 100644 index 0000000..b129843 --- /dev/null +++ b/arch/e2k/include/uapi/asm/sigcontext.h @@ -0,0 +1,72 @@ +#ifndef _UAPI_E2K_SIGCONTEXT_H_ +#define _UAPI_E2K_SIGCONTEXT_H_ + +#define MAX_TC_SIZE 10 + +#define TIR_NUM 19 +#define DAM_ENTRIES_NUM 32 +#define SBBP_ENTRIES_NUM 32 + +/* from user.h !!! */ +#define MLT_NUM (16 * 3) + +struct sigcontext { + unsigned long long cr0_lo; + unsigned long long cr0_hi; + unsigned long long cr1_lo; + unsigned long long cr1_hi; + unsigned long long sbr; /* 21 Stack base register: top of */ + /* local data (user) stack */ + unsigned long long usd_lo; /* 22 Local data (user) stack */ + unsigned long long usd_hi; /* 23 descriptor: base & size */ + unsigned long long psp_lo; /* 24 Procedure stack pointer: */ + unsigned long long psp_hi; /* 25 base & index & size */ + unsigned long long pcsp_lo; /* 26 Procedure chain stack */ + unsigned long long pcsp_hi; /* 27 pointer: base & index & size */ +/* + * additional part (for binary compiler) + */ + unsigned long long rpr_hi; + unsigned long long rpr_lo; + + unsigned long long nr_TIRs; + unsigned long long tir_lo[TIR_NUM]; + unsigned long long tir_hi[TIR_NUM]; + unsigned long long trap_cell_addr[MAX_TC_SIZE]; + unsigned long long trap_cell_val[MAX_TC_SIZE]; + unsigned char trap_cell_tag[MAX_TC_SIZE]; + unsigned long long trap_cell_info[MAX_TC_SIZE]; + + unsigned long long dam[DAM_ENTRIES_NUM]; + + unsigned long long sbbp[SBBP_ENTRIES_NUM]; + + unsigned long long mlt[MLT_NUM]; + + unsigned long long upsr; +}; +/* + * This structure is used for compatibility + * All new fields must be added in this structure + */ +struct extra_ucontext { + int sizeof_extra_uc; /* size of used fields(in bytes) */ + int curr_cnt; /* current index into trap_celler */ + int tc_count; /* trap_celler records count */ + + /* + * For getcontext() + */ + int fpcr; + int fpsr; + int pfpfr; + + unsigned long long ctpr1; + unsigned long long ctpr2; + unsigned long long ctpr3; + + int sc_need_rstrt; +}; + + +#endif /* _UAPI_E2K_SIGCONTEXT_H_ */ diff --git a/arch/e2k/include/uapi/asm/siginfo.h b/arch/e2k/include/uapi/asm/siginfo.h new file mode 100644 index 0000000..ddc8b3f --- /dev/null +++ b/arch/e2k/include/uapi/asm/siginfo.h @@ -0,0 +1,34 @@ +#ifndef _E2K_SIGINFO_H_ +#define _E2K_SIGINFO_H_ + +#include +#include + +#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) +#define __ARCH_SI_TRAPNO +#define __ARCH_SI_BAND_T int + +#include + +#define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3) +#define SIGEV_PAD_SIZE32 ((SIGEV_MAX_SIZE/sizeof(int)) - 3) + +/* + * SIGSEGV si_codes + */ +#define SEGV_BOUNDS 3 /* Bounds overflow */ +#undef NSIGSEGV +#define NSIGSEGV 3 + +/* + * SIGTRAP si_codes + */ +#define DIAG_CT 3 /* Diagnostic CT condition */ +#define DIAG_ADDR 4 /* Diagnostic address */ +#define DIAG_PRED 5 /* Diagnostic predicate */ +#define DIAG_OP 6 /* Diagnostic operand */ +#define MEM_LOCK 7 /* Memory lock */ +#undef NSIGTRAP +#define NSIGTRAP 6 + +#endif /* _E2K_SIGINFO_H_ */ diff --git a/arch/e2k/include/uapi/asm/signal.h b/arch/e2k/include/uapi/asm/signal.h new file mode 100644 index 0000000..c8c9ce7 --- /dev/null +++ b/arch/e2k/include/uapi/asm/signal.h @@ -0,0 +1,97 @@ +#ifndef _UAPI_E2K_SIGNAL_H_ +#define _UAPI_E2K_SIGNAL_H_ + +#include + +/* Avoid too many header ordering problems. */ +struct siginfo; + +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGIOT 6 +#define SIGBUS 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGUSR1 10 +#define SIGSEGV 11 +#define SIGUSR2 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGSTKFLT 16 +#define SIGCHLD 17 +#define SIGCONT 18 +#define SIGSTOP 19 +#define SIGTSTP 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGURG 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGIO 29 +#define SIGPOLL SIGIO +/* +#define SIGLOST 29 +*/ +#define SIGPWR 30 +#define SIGSYS 31 +/* signal 31 is no longer "unused", but the SIGUNUSED macro remains for + * backwards compatibility */ +#define SIGUNUSED 31 + +#define SIGRESTART 33 + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ +#define SA_NOCLDSTOP 0x00000001 +#define SA_NOCLDWAIT 0x00000002 /* not supported yet */ +#define SA_SIGINFO 0x00000004 +#define SA_ONSTACK 0x08000000 +#define SA_RESTART 0x10000000 +#define SA_NODEFER 0x40000000 +#define SA_RESETHAND 0x80000000 + +#define SA_NOMASK SA_NODEFER +#define SA_ONESHOT SA_RESETHAND +#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ + +#define SA_RESTORER 0x04000000 + +#define MINSIGSTKSZ 4096 +#define SIGSTKSZ 8192 + + +# ifndef __ASSEMBLY__ +typedef struct sigaltstack { + void __user *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + + +# endif /* __ASSEMBLY__ */ + +#endif /* _UAPI_E2K_SIGNAL_H_ */ diff --git a/arch/e2k/include/uapi/asm/socket.h b/arch/e2k/include/uapi/asm/socket.h new file mode 100644 index 0000000..6b71384 --- /dev/null +++ b/arch/e2k/include/uapi/asm/socket.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/uapi/asm/sockios.h b/arch/e2k/include/uapi/asm/sockios.h new file mode 100644 index 0000000..def6d47 --- /dev/null +++ b/arch/e2k/include/uapi/asm/sockios.h @@ -0,0 +1 @@ +#include diff --git a/arch/e2k/include/uapi/asm/stat.h b/arch/e2k/include/uapi/asm/stat.h new file mode 100644 index 0000000..2d430b5 --- /dev/null +++ b/arch/e2k/include/uapi/asm/stat.h @@ -0,0 +1,46 @@ +#ifndef _UAPI_E2K_STAT_H_ +#define _UAPI_E2K_STAT_H_ + +/* + * Tuned up to match GNU libc defaults. + */ + +#include + +#define STAT_HAVE_NSEC 1 + +struct __old_kernel_stat { + unsigned short st_dev; + unsigned short st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; +}; + +struct stat { + dev_t st_dev; + ino_t st_ino; + mode_t st_mode; + nlink_t st_nlink; + uid_t st_uid; + gid_t st_gid; + dev_t st_rdev; + off_t st_size; + off_t st_blksize; + off_t st_blocks; + time_t st_atime; + unsigned long st_atime_nsec; + time_t st_mtime; + unsigned long st_mtime_nsec; + time_t st_ctime; + unsigned long st_ctime_nsec; +}; + + +#endif /* _UAPI_E2K_STAT_H_ */ diff --git a/arch/e2k/include/uapi/asm/statfs.h b/arch/e2k/include/uapi/asm/statfs.h new file mode 100644 index 0000000..8f2a792 --- /dev/null +++ b/arch/e2k/include/uapi/asm/statfs.h @@ -0,0 +1,6 @@ +#ifndef _E2K_STATFS_H_ +#define _E2K_STATFS_H_ + +#include + +#endif /* _E2K_STATFS_H_ */ diff --git a/arch/e2k/include/uapi/asm/termbits.h b/arch/e2k/include/uapi/asm/termbits.h new file mode 100644 index 0000000..8484205 --- /dev/null +++ b/arch/e2k/include/uapi/asm/termbits.h @@ -0,0 +1,6 @@ +#ifndef _E2K_TERMBITS_H_ +#define _E2K_TERMBITS_H_ + +#include + +#endif /* _E2K_TERMBITS_H_ */ diff --git a/arch/e2k/include/uapi/asm/termios.h b/arch/e2k/include/uapi/asm/termios.h new file mode 100644 index 0000000..8b3d2b0 --- /dev/null +++ b/arch/e2k/include/uapi/asm/termios.h @@ -0,0 +1,6 @@ +#ifndef _E2K_TERMIOS_H_ +#define _E2K_TERMIOS_H_ + +#include + +#endif /* _E2K_TERMIOS_H_ */ diff --git a/arch/e2k/include/uapi/asm/types.h b/arch/e2k/include/uapi/asm/types.h new file mode 100644 index 0000000..4b9667b --- /dev/null +++ b/arch/e2k/include/uapi/asm/types.h @@ -0,0 +1,28 @@ +#ifndef _UAPI_E2K_TYPES_H_ +#define _UAPI_E2K_TYPES_H_ + +#include + +/* + * This file is never included by application software unless + * explicitly requested (e.g., via linux/types.h) in which case the + * application is Linux specific so (user-) name space pollution is + * not a major issue. However, for interoperability, libraries still + * need to be careful to avoid a name clashes. + */ + +#ifndef __ASSEMBLY__ + +typedef unsigned long e2k_addr_t; /* phys & virt address (64 bits) */ +typedef unsigned long e2k_size_t; /* size of objects (64 bits) */ + /* what should it be ????? */ + +/* + * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the + * header files exported to user space + */ + + +#endif /* !(__ASSEMBLY__) */ + +#endif /* _UAPI_E2K_TYPES_H_ */ diff --git a/arch/e2k/include/uapi/asm/ucontext.h b/arch/e2k/include/uapi/asm/ucontext.h new file mode 100644 index 0000000..51f3af1 --- /dev/null +++ b/arch/e2k/include/uapi/asm/ucontext.h @@ -0,0 +1,17 @@ +#ifndef _UAPI_E2K_UCONTEXT_H +#define _UAPI_E2K_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext uc_mcontext; + union { + sigset_t uc_sigmask;/* mask last for extensibility */ + unsigned long long pad[16]; + }; + struct extra_ucontext uc_extra; /* for compatibility */ +}; + + +#endif /* _UAPI_E2K_UCONTEXT_H */ diff --git a/arch/e2k/include/uapi/asm/unistd.h b/arch/e2k/include/uapi/asm/unistd.h new file mode 100644 index 0000000..0b0ea39 --- /dev/null +++ b/arch/e2k/include/uapi/asm/unistd.h @@ -0,0 +1,487 @@ +#ifndef _UAPI_E2K_UNISTD_H_ +#define _UAPI_E2K_UNISTD_H_ + +/* + * Taken from i386 sub-tree. + * Migration to E2K is still in progress. Please, be patient. + */ + + +#ifdef __ptr64__ +#define LINUX_SYSCALL_TRAPNUM LINUX_SYSCALL64_TRAPNUM +#else /* !__ptr64__ */ +#define LINUX_SYSCALL_TRAPNUM LINUX_SYSCALL32_TRAPNUM +#endif /* __ptr64__ */ + +#define LINUX_SYSCALL_TRAPNUM_OLD 4 /* Deprecated */ +#define LINUX_SYSCALL32_TRAPNUM 1 /* Use E2K trap entry #1 */ +#define LINUX_SYSCALL64_TRAPNUM 3 /* Use E2K trap entry #3 */ +#define LINUX_FAST_SYSCALL32_TRAPNUM 5 +#define LINUX_FAST_SYSCALL64_TRAPNUM 6 +#define LINUX_FAST_SYSCALL128_TRAPNUM 7 + +/* + * This file contains the system call numbers. + */ + +#define __NR_restart_syscall 0 +#define __NR_exit 1 +#define __NR_fork 2 +#define __NR_read 3 +#define __NR_write 4 +#define __NR_open 5 +#define __NR_close 6 +#define __NR_waitpid 7 +#define __NR_creat 8 +#define __NR_link 9 +#define __NR_unlink 10 +#define __NR_execve 11 +#define __NR_chdir 12 +#define __NR_time 13 +#define __NR_mknod 14 +#define __NR_chmod 15 +#define __NR_lchown 16 +#define __NR_break 17 +#define __NR_oldstat 18 +#define __NR_lseek 19 +#define __NR_getpid 20 +#define __NR_mount 21 +#define __NR_umount 22 +#define __NR_setuid 23 +#define __NR_getuid 24 +#define __NR_stime 25 +#define __NR_ptrace 26 +#define __NR_alarm 27 +#define __NR_oldfstat 28 +#define __NR_pause 29 +#define __NR_utime 30 +#define __NR_stty 31 +#define __NR_gtty 32 +#define __NR_access 33 +#define __NR_nice 34 +#define __NR_ftime 35 +#define __NR_sync 36 +#define __NR_kill 37 +#define __NR_rename 38 +#define __NR_mkdir 39 +#define __NR_rmdir 40 +#define __NR_dup 41 +#define __NR_pipe 42 +#define __NR_times 43 +#define __NR_prof 44 +#define __NR_brk 45 +#define __NR_setgid 46 +#define __NR_getgid 47 +#define __NR_signal 48 +#define __NR_geteuid 49 +#define __NR_getegid 50 +#define __NR_acct 51 +#define __NR_umount2 52 +#define __NR_lock 53 +#define __NR_ioctl 54 +#define __NR_fcntl 55 +#define __NR_mpx 56 +#define __NR_setpgid 57 +#define __NR_ulimit 58 +#define __NR_oldolduname 59 +#define __NR_umask 60 +#define __NR_chroot 61 +#define __NR_ustat 62 +#define __NR_dup2 63 +#define __NR_getppid 64 +#define __NR_getpgrp 65 +#define __NR_setsid 66 +#define __NR_sigaction 67 +#define __NR_sgetmask 68 +#define __NR_ssetmask 69 +#define __NR_setreuid 70 +#define __NR_setregid 71 +#define __NR_sigsuspend 72 +#define __NR_sigpending 73 +#define __NR_sethostname 74 +#define __NR_setrlimit 75 +#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ +#define __NR_getrusage 77 +#define __NR_gettimeofday 78 +#define __NR_settimeofday 79 +#define __NR_getgroups 80 +#define __NR_setgroups 81 +#define __NR_select 82 +#define __NR_symlink 83 +#define __NR_oldlstat 84 +#define __NR_readlink 85 +#define __NR_uselib 86 +#define __NR_swapon 87 +#define __NR_reboot 88 +#define __NR_readdir 89 +#define __NR_mmap 90 +#define __NR_munmap 91 +#define __NR_truncate 92 +#define __NR_ftruncate 93 +#define __NR_fchmod 94 +#define __NR_fchown 95 +#define __NR_getpriority 96 +#define __NR_setpriority 97 +#define __NR_profil 98 +#define __NR_statfs 99 +#define __NR_fstatfs 100 +#define __NR_ioperm 101 +#define __NR_socketcall 102 +#define __NR_syslog 103 +#define __NR_setitimer 104 +#define __NR_getitimer 105 +#define __NR_stat 106 +#define __NR_lstat 107 +#define __NR_fstat 108 +#define __NR_olduname 109 +#define __NR_iopl 110 +#define __NR_vhangup 111 +#define __NR_idle 112 +#define __NR_vm86old 113 +#define __NR_wait4 114 +#define __NR_swapoff 115 +#define __NR_sysinfo 116 +#define __NR_ipc 117 +#define __NR_fsync 118 +#define __NR_sigreturn 119 +#define __NR_clone 120 +#define __NR_setdomainname 121 +#define __NR_uname 122 +#define __NR_modify_ldt 123 +#define __NR_adjtimex 124 +#define __NR_mprotect 125 +#define __NR_sigprocmask 126 +#define __NR_create_module 127 +#define __NR_init_module 128 +#define __NR_delete_module 129 +#define __NR_get_kernel_syms 130 +#define __NR_quotactl 131 +#define __NR_getpgid 132 +#define __NR_fchdir 133 +#define __NR_bdflush 134 +#define __NR_sysfs 135 +#define __NR_personality 136 +#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ +#define __NR_setfsuid 138 +#define __NR_setfsgid 139 +#define __NR__llseek 140 +#define __NR_getdents 141 +#define __NR__newselect 142 +#define __NR_flock 143 +#define __NR_msync 144 +#define __NR_readv 145 +#define __NR_writev 146 +#define __NR_getsid 147 +#define __NR_fdatasync 148 +#define __NR__sysctl 149 +#define __NR_mlock 150 +#define __NR_munlock 151 +#define __NR_mlockall 152 +#define __NR_munlockall 153 +#define __NR_sched_setparam 154 +#define __NR_sched_getparam 155 +#define __NR_sched_setscheduler 156 +#define __NR_sched_getscheduler 157 +#define __NR_sched_yield 158 +#define __NR_sched_get_priority_max 159 +#define __NR_sched_get_priority_min 160 +#define __NR_sched_rr_get_interval 161 +#define __NR_nanosleep 162 +#define __NR_mremap 163 +#define __NR_setresuid 164 +#define __NR_getresuid 165 +#define __NR_vm86 166 +#define __NR_query_module 167 +#define __NR_poll 168 +#define __NR_nfsservctl 169 +#define __NR_setresgid 170 +#define __NR_getresgid 171 +#define __NR_prctl 172 +#define __NR_rt_sigreturn 173 +#define __NR_rt_sigaction 174 +#define __NR_rt_sigprocmask 175 +#define __NR_rt_sigpending 176 +#define __NR_rt_sigtimedwait 177 +#define __NR_rt_sigqueueinfo 178 +#define __NR_rt_sigsuspend 179 +#define __NR_pread 180 +#define __NR_pwrite 181 +#define __NR_chown 182 +#define __NR_getcwd 183 +#define __NR_capget 184 +#define __NR_capset 185 +#define __NR_sigaltstack 186 +#define __NR_sendfile 187 +#define __NR_getpmsg 188 /* some people actually want streams */ +#define __NR_putpmsg 189 /* some people actually want streams */ +#define __NR_vfork 190 +#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ +#define __NR_mmap2 192 +#define __NR_truncate64 193 +#define __NR_ftruncate64 194 +#define __NR_stat64 195 +#define __NR_lstat64 196 +#define __NR_fstat64 197 +#define __NR_lchown32 198 +#define __NR_getuid32 199 +#define __NR_getgid32 200 +#define __NR_geteuid32 201 +#define __NR_getegid32 202 +#define __NR_setreuid32 203 +#define __NR_setregid32 204 +#define __NR_pidfd_send_signal 205 +#define __NR_pidfd_open 206 +#define __NR_fchown32 207 +#define __NR_setresuid32 208 +#define __NR_getresuid32 209 +#define __NR_setresgid32 210 +#define __NR_getresgid32 211 +#define __NR_chown32 212 +#define __NR_setuid32 213 +#define __NR_setgid32 214 +#define __NR_setfsuid32 215 +#define __NR_setfsgid32 216 +#define __NR_pivot_root 217 +#define __NR_mincore 218 +#define __NR_madvise 219 +#define __NR_madvise1 219 /* delete when C lib stub is removed */ +#define __NR_getdents64 220 +#define __NR_fcntl64 221 +#define __NR_core 222 /* for analys kernel core */ +#define __NR_macctl 223 /* MCST trust linux */ +#define __NR_newfstatat 224 +#define __NR_emergency 225 +#define __NR_e2k_sigsetjmp 226 /* setjmp e2k specific */ +#define __NR_e2k_longjmp 227 /* longjmp e2k specific */ +#define __NR_e2k_syswork 228 /* e2k_syswork */ +#define __NR_clone_thread 229 +#define __NR_clone2 __NR_clone_thread /* don't delete old name */ +#define __NR_e2k_longjmp2 230 /* Second Edition */ +#define __NR_soft_debug 231 +#define __NR_setxattr 232 +#define __NR_lsetxattr 233 +#define __NR_fsetxattr 234 +#define __NR_getxattr 235 +#define __NR_lgetxattr 236 +#define __NR_fgetxattr 237 +#define __NR_listxattr 238 +#define __NR_llistxattr 239 +#define __NR_flistxattr 240 +#define __NR_removexattr 241 +#define __NR_lremovexattr 242 +#define __NR_fremovexattr 243 +#define __NR_gettid 244 +#define __NR_readahead 245 +#define __NR_tkill 246 +#define __NR_sendfile64 247 +#define __NR_futex 248 +#define __NR_sched_setaffinity 249 +#define __NR_sched_getaffinity 250 +#define __NR_pipe2 251 +#define __NR_set_backtrace 252 +#define __NR_get_backtrace 253 +#define __NR_access_hw_stacks 254 +#define __NR_el_posix 255 +#define __NR_io_uring_setup 256 +#define __NR_io_uring_enter 257 +#define __NR_io_uring_register 258 +#define __NR_set_tid_address 259 +#define __NR_el_binary 260 +#define __NR_timer_create 261 +#define __NR_timer_settime 262 +#define __NR_timer_gettime 263 +#define __NR_timer_getoverrun 264 +#define __NR_timer_delete 265 +#define __NR_clock_settime 266 +#define __NR_clock_gettime 267 +#define __NR_clock_getres 268 +#define __NR_clock_nanosleep 269 + +/* added for compatibility with x86_64 */ +#define __NR_msgget 270 +#define __NR_msgctl 271 +#define __NR_msgrcv 272 +#define __NR_msgsnd 273 +#define __NR_semget 274 +#define __NR_semctl 275 +#define __NR_semtimedop 276 +#define __NR_semop 277 +#define __NR_shmget 278 +#define __NR_shmctl 279 +#define __NR_shmat 280 +#define __NR_shmdt 281 + +#define __NR_open_tree 282 +#define __NR_move_mount 283 + +#define __NR_rseq 284 +#define __NR_io_pgetevents 285 +#define __NR_accept4 286 + +#define __NR_sched_setattr 287 +#define __NR_sched_getattr 288 + +#define __NR_ioprio_set 289 +#define __NR_ioprio_get 290 +#define __NR_inotify_init 291 +#define __NR_inotify_add_watch 292 +#define __NR_inotify_rm_watch 293 + +#define __NR_io_setup 294 +#define __NR_io_destroy 295 +#define __NR_io_getevents 296 +#define __NR_io_submit 297 +#define __NR_io_cancel 298 +#define __NR_fadvise64 299 + +#define __NR_exit_group 300 +#define __NR_lookup_dcookie 301 +#define __NR_epoll_create 302 +#define __NR_epoll_ctl 303 +#define __NR_epoll_wait 304 +#define __NR_remap_file_pages 305 +#define __NR_statfs64 306 +#define __NR_fstatfs64 307 +#define __NR_tgkill 308 +#define __NR_utimes 309 +#define __NR_fadvise64_64 310 +#define __NR_vserver 311 +#define __NR_mbind 312 +#define __NR_get_mempolicy 313 +#define __NR_set_mempolicy 314 +#define __NR_mq_open 315 +#define __NR_mq_unlink 316 +#define __NR_mq_timedsend 317 +#define __NR_mq_timedreceive 318 +#define __NR_mq_notify 319 +#define __NR_mq_getsetattr 320 +#define __NR_kexec_load 321 +#define __NR_waitid 322 +#define __NR_add_key 323 +#define __NR_request_key 324 +#define __NR_keyctl 325 +#define __NR_mcst_rt 326 +#define __NR_getcpu 327 +#define __NR_move_pages 328 +#define __NR_splice 329 +#define __NR_vmsplice 330 +#define __NR_tee 331 +#define __NR_migrate_pages 332 +#define __NR_utimensat 333 +#define __NR_rt_tgsigqueueinfo 334 +#define __NR_openat 335 +#define __NR_mkdirat 336 +#define __NR_mknodat 337 +#define __NR_fchownat 338 +#define __NR_unlinkat 339 +#define __NR_renameat 340 +#define __NR_linkat 341 +#define __NR_symlinkat 342 +#define __NR_readlinkat 343 +#define __NR_fchmodat 344 +#define __NR_faccessat 345 +#define __NR_epoll_pwait 346 +#define __NR_signalfd4 347 +#define __NR_eventfd2 348 +#define __NR_recvmmsg 349 +#define __NR_cnt_point 350 +#define __NR_timerfd_create 351 +#define __NR_timerfd_settime 352 +#define __NR_timerfd_gettime 353 +#define __NR_preadv 354 +#define __NR_pwritev 355 +#define __NR_fallocate 356 +#define __NR_sync_file_range 357 +#define __NR_dup3 358 +#define __NR_inotify_init1 359 +#define __NR_epoll_create1 360 +#define __NR_fstatat64 361 +#define __NR_futimesat 362 +#define __NR_perf_event_open 363 +#define __NR_unshare 364 +#define __NR_get_robust_list 365 +#define __NR_set_robust_list 366 +#define __NR_pselect6 367 +#define __NR_ppoll 368 +#define __NR_setcontext 369 +#define __NR_makecontext 370 +#define __NR_swapcontext 371 +#define __NR_freecontext 372 +#define __NR_fanotify_init 373 +#define __NR_fanotify_mark 374 +#define __NR_prlimit64 375 +#define __NR_clock_adjtime 376 +#define __NR_syncfs 377 +#define __NR_sendmmsg 378 +#define __NR_setns 379 +#define __NR_process_vm_readv 380 +#define __NR_process_vm_writev 381 +#define __NR_kcmp 382 +#define __NR_finit_module 383 +#define __NR_renameat2 384 +#define __NR_getrandom 385 +#define __NR_memfd_create 386 +#define __NR_bpf 387 +#define __NR_execveat 388 +#define __NR_userfaultfd 389 +#define __NR_membarrier 390 +#define __NR_mlock2 391 +#define __NR_seccomp 392 +#define __NR_shutdown 393 +#define __NR_copy_file_range 394 +#define __NR_preadv2 395 +#define __NR_pwritev2 396 +#define __NR_pkey_mprotect 397 +#define __NR_pkey_alloc 398 +#define __NR_pkey_free 399 +#define __NR_name_to_handle_at 400 +#define __NR_open_by_handle_at 401 +#define __NR_statx 402 +/* added for compatibility with x86_64 */ +#define __NR_socket 403 +#define __NR_connect 404 +#define __NR_accept 405 +#define __NR_sendto 406 +#define __NR_recvfrom 407 +#define __NR_sendmsg 408 +#define __NR_recvmsg 409 +#define __NR_bind 410 +#define __NR_listen 411 +#define __NR_getsockname 412 +#define __NR_getpeername 413 +#define __NR_socketpair 414 +#define __NR_setsockopt 415 +#define __NR_getsockopt 416 +/* free (unused) entries - reserve 417 - 418 */ +#define __NR_arch_prctl 419 +/* added for combability of protected system calls v1-v5 & v6 */ +#define __NR_newuselib 420 +#define __NR_rt_sigaction_ex 421 +/* protected Mode specific memory allocation syscall number */ +#define __NR_get_mem 422 +#define __NR_free_mem 423 +/* protected mode specific clean memory from old invalid descriptors */ +#define __NR_clean_descriptors 424 +/* protected mode specific unloading module from memory */ +#define __NR_unuselib 425 +#define __NR_clone3 426 +#define __NR_fsopen 427 +#define __NR_fsconfig 428 +#define __NR_fsmount 429 +#define __NR_fspick 430 + +#define NR_syscalls 431 + +/* compatibility with x86_64 */ +#define __NR_pread64 __NR_pread +#define __NR_pwrite64 __NR_pwrite + +/* Fast system calls */ +#define __NR_fast_sys_gettimeofday 0 +#define __NR_fast_sys_clock_gettime 1 +#define __NR_fast_sys_getcpu 2 +#define __NR_fast_sys_siggetmask 3 +#define __NR_fast_sys_getcontext 4 +#define __NR_fast_sys_set_return 5 + +#endif /* _UAPI_E2K_UNISTD_H_ */ diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h new file mode 100644 index 0000000..c2acd29 --- /dev/null +++ b/include/acpi/acbuffer.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acbuffer.h - Support for buffers returned by ACPI predefined names + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACBUFFER_H__ +#define __ACBUFFER_H__ + +/* + * Contains buffer structures for these predefined names: + * _FDE, _GRT, _GTM, _PLD, _SRT + */ + +/* + * Note: C bitfields are not used for this reason: + * + * "Bitfields are great and easy to read, but unfortunately the C language + * does not specify the layout of bitfields in memory, which means they are + * essentially useless for dealing with packed data in on-disk formats or + * binary wire protocols." (Or ACPI tables and buffers.) "If you ask me, + * this decision was a design error in C. Ritchie could have picked an order + * and stuck with it." Norman Ramsey. + * See http://stackoverflow.com/a/1053662/41661 + */ + +/* _FDE return value */ + +struct acpi_fde_info { + u32 floppy0; + u32 floppy1; + u32 floppy2; + u32 floppy3; + u32 tape; +}; + +/* + * _GRT return value + * _SRT input value + */ +struct acpi_grt_info { + u16 year; + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 valid; + u16 milliseconds; + u16 timezone; + u8 daylight; + u8 reserved[3]; +}; + +/* _GTM return value */ + +struct acpi_gtm_info { + u32 pio_speed0; + u32 dma_speed0; + u32 pio_speed1; + u32 dma_speed1; + u32 flags; +}; + +/* + * Formatted _PLD return value. The minimum size is a package containing + * one buffer. + * Revision 1: Buffer is 16 bytes (128 bits) + * Revision 2: Buffer is 20 bytes (160 bits) + * + * Note: This structure is returned from the acpi_decode_pld_buffer + * interface. + */ +struct acpi_pld_info { + u8 revision; + u8 ignore_color; + u8 red; + u8 green; + u8 blue; + u16 width; + u16 height; + u8 user_visible; + u8 dock; + u8 lid; + u8 panel; + u8 vertical_position; + u8 horizontal_position; + u8 shape; + u8 group_orientation; + u8 group_token; + u8 group_position; + u8 bay; + u8 ejectable; + u8 ospm_eject_required; + u8 cabinet_number; + u8 card_cage_number; + u8 reference; + u8 rotation; + u8 order; + u8 reserved; + u16 vertical_offset; + u16 horizontal_offset; +}; + +/* + * Macros to: + * 1) Convert a _PLD buffer to internal struct acpi_pld_info format - ACPI_PLD_GET* + * (Used by acpi_decode_pld_buffer) + * 2) Construct a _PLD buffer - ACPI_PLD_SET* + * (Intended for BIOS use only) + */ +#define ACPI_PLD_REV1_BUFFER_SIZE 16 /* For Revision 1 of the buffer (From ACPI spec) */ +#define ACPI_PLD_REV2_BUFFER_SIZE 20 /* For Revision 2 of the buffer (From ACPI spec) */ +#define ACPI_PLD_BUFFER_SIZE 20 /* For Revision 2 of the buffer (From ACPI spec) */ + +/* First 32-bit dword, bits 0:32 */ + +#define ACPI_PLD_GET_REVISION(dword) ACPI_GET_BITS (dword, 0, ACPI_7BIT_MASK) +#define ACPI_PLD_SET_REVISION(dword,value) ACPI_SET_BITS (dword, 0, ACPI_7BIT_MASK, value) /* Offset 0, Len 7 */ + +#define ACPI_PLD_GET_IGNORE_COLOR(dword) ACPI_GET_BITS (dword, 7, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_IGNORE_COLOR(dword,value) ACPI_SET_BITS (dword, 7, ACPI_1BIT_MASK, value) /* Offset 7, Len 1 */ + +#define ACPI_PLD_GET_RED(dword) ACPI_GET_BITS (dword, 8, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_RED(dword,value) ACPI_SET_BITS (dword, 8, ACPI_8BIT_MASK, value) /* Offset 8, Len 8 */ + +#define ACPI_PLD_GET_GREEN(dword) ACPI_GET_BITS (dword, 16, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_GREEN(dword,value) ACPI_SET_BITS (dword, 16, ACPI_8BIT_MASK, value) /* Offset 16, Len 8 */ + +#define ACPI_PLD_GET_BLUE(dword) ACPI_GET_BITS (dword, 24, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_BLUE(dword,value) ACPI_SET_BITS (dword, 24, ACPI_8BIT_MASK, value) /* Offset 24, Len 8 */ + +/* Second 32-bit dword, bits 33:63 */ + +#define ACPI_PLD_GET_WIDTH(dword) ACPI_GET_BITS (dword, 0, ACPI_16BIT_MASK) +#define ACPI_PLD_SET_WIDTH(dword,value) ACPI_SET_BITS (dword, 0, ACPI_16BIT_MASK, value) /* Offset 32+0=32, Len 16 */ + +#define ACPI_PLD_GET_HEIGHT(dword) ACPI_GET_BITS (dword, 16, ACPI_16BIT_MASK) +#define ACPI_PLD_SET_HEIGHT(dword,value) ACPI_SET_BITS (dword, 16, ACPI_16BIT_MASK, value) /* Offset 32+16=48, Len 16 */ + +/* Third 32-bit dword, bits 64:95 */ + +#define ACPI_PLD_GET_USER_VISIBLE(dword) ACPI_GET_BITS (dword, 0, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_USER_VISIBLE(dword,value) ACPI_SET_BITS (dword, 0, ACPI_1BIT_MASK, value) /* Offset 64+0=64, Len 1 */ + +#define ACPI_PLD_GET_DOCK(dword) ACPI_GET_BITS (dword, 1, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_DOCK(dword,value) ACPI_SET_BITS (dword, 1, ACPI_1BIT_MASK, value) /* Offset 64+1=65, Len 1 */ + +#define ACPI_PLD_GET_LID(dword) ACPI_GET_BITS (dword, 2, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_LID(dword,value) ACPI_SET_BITS (dword, 2, ACPI_1BIT_MASK, value) /* Offset 64+2=66, Len 1 */ + +#define ACPI_PLD_GET_PANEL(dword) ACPI_GET_BITS (dword, 3, ACPI_3BIT_MASK) +#define ACPI_PLD_SET_PANEL(dword,value) ACPI_SET_BITS (dword, 3, ACPI_3BIT_MASK, value) /* Offset 64+3=67, Len 3 */ + +#define ACPI_PLD_GET_VERTICAL(dword) ACPI_GET_BITS (dword, 6, ACPI_2BIT_MASK) +#define ACPI_PLD_SET_VERTICAL(dword,value) ACPI_SET_BITS (dword, 6, ACPI_2BIT_MASK, value) /* Offset 64+6=70, Len 2 */ + +#define ACPI_PLD_GET_HORIZONTAL(dword) ACPI_GET_BITS (dword, 8, ACPI_2BIT_MASK) +#define ACPI_PLD_SET_HORIZONTAL(dword,value) ACPI_SET_BITS (dword, 8, ACPI_2BIT_MASK, value) /* Offset 64+8=72, Len 2 */ + +#define ACPI_PLD_GET_SHAPE(dword) ACPI_GET_BITS (dword, 10, ACPI_4BIT_MASK) +#define ACPI_PLD_SET_SHAPE(dword,value) ACPI_SET_BITS (dword, 10, ACPI_4BIT_MASK, value) /* Offset 64+10=74, Len 4 */ + +#define ACPI_PLD_GET_ORIENTATION(dword) ACPI_GET_BITS (dword, 14, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_ORIENTATION(dword,value) ACPI_SET_BITS (dword, 14, ACPI_1BIT_MASK, value) /* Offset 64+14=78, Len 1 */ + +#define ACPI_PLD_GET_TOKEN(dword) ACPI_GET_BITS (dword, 15, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_TOKEN(dword,value) ACPI_SET_BITS (dword, 15, ACPI_8BIT_MASK, value) /* Offset 64+15=79, Len 8 */ + +#define ACPI_PLD_GET_POSITION(dword) ACPI_GET_BITS (dword, 23, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_POSITION(dword,value) ACPI_SET_BITS (dword, 23, ACPI_8BIT_MASK, value) /* Offset 64+23=87, Len 8 */ + +#define ACPI_PLD_GET_BAY(dword) ACPI_GET_BITS (dword, 31, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_BAY(dword,value) ACPI_SET_BITS (dword, 31, ACPI_1BIT_MASK, value) /* Offset 64+31=95, Len 1 */ + +/* Fourth 32-bit dword, bits 96:127 */ + +#define ACPI_PLD_GET_EJECTABLE(dword) ACPI_GET_BITS (dword, 0, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_EJECTABLE(dword,value) ACPI_SET_BITS (dword, 0, ACPI_1BIT_MASK, value) /* Offset 96+0=96, Len 1 */ + +#define ACPI_PLD_GET_OSPM_EJECT(dword) ACPI_GET_BITS (dword, 1, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_OSPM_EJECT(dword,value) ACPI_SET_BITS (dword, 1, ACPI_1BIT_MASK, value) /* Offset 96+1=97, Len 1 */ + +#define ACPI_PLD_GET_CABINET(dword) ACPI_GET_BITS (dword, 2, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_CABINET(dword,value) ACPI_SET_BITS (dword, 2, ACPI_8BIT_MASK, value) /* Offset 96+2=98, Len 8 */ + +#define ACPI_PLD_GET_CARD_CAGE(dword) ACPI_GET_BITS (dword, 10, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_CARD_CAGE(dword,value) ACPI_SET_BITS (dword, 10, ACPI_8BIT_MASK, value) /* Offset 96+10=106, Len 8 */ + +#define ACPI_PLD_GET_REFERENCE(dword) ACPI_GET_BITS (dword, 18, ACPI_1BIT_MASK) +#define ACPI_PLD_SET_REFERENCE(dword,value) ACPI_SET_BITS (dword, 18, ACPI_1BIT_MASK, value) /* Offset 96+18=114, Len 1 */ + +#define ACPI_PLD_GET_ROTATION(dword) ACPI_GET_BITS (dword, 19, ACPI_4BIT_MASK) +#define ACPI_PLD_SET_ROTATION(dword,value) ACPI_SET_BITS (dword, 19, ACPI_4BIT_MASK, value) /* Offset 96+19=115, Len 4 */ + +#define ACPI_PLD_GET_ORDER(dword) ACPI_GET_BITS (dword, 23, ACPI_5BIT_MASK) +#define ACPI_PLD_SET_ORDER(dword,value) ACPI_SET_BITS (dword, 23, ACPI_5BIT_MASK, value) /* Offset 96+23=119, Len 5 */ + +/* Fifth 32-bit dword, bits 128:159 (Revision 2 of _PLD only) */ + +#define ACPI_PLD_GET_VERT_OFFSET(dword) ACPI_GET_BITS (dword, 0, ACPI_16BIT_MASK) +#define ACPI_PLD_SET_VERT_OFFSET(dword,value) ACPI_SET_BITS (dword, 0, ACPI_16BIT_MASK, value) /* Offset 128+0=128, Len 16 */ + +#define ACPI_PLD_GET_HORIZ_OFFSET(dword) ACPI_GET_BITS (dword, 16, ACPI_16BIT_MASK) +#define ACPI_PLD_SET_HORIZ_OFFSET(dword,value) ACPI_SET_BITS (dword, 16, ACPI_16BIT_MASK, value) /* Offset 128+16=144, Len 16 */ + +#endif /* ACBUFFER_H */ diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h new file mode 100644 index 0000000..42d5731 --- /dev/null +++ b/include/acpi/acconfig.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acconfig.h - Global configuration constants + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef _ACCONFIG_H +#define _ACCONFIG_H + +/****************************************************************************** + * + * Configuration options + * + *****************************************************************************/ + +/* + * ACPI_DEBUG_OUTPUT - This switch enables all the debug facilities of the + * ACPI subsystem. This includes the DEBUG_PRINT output + * statements. When disabled, all DEBUG_PRINT + * statements are compiled out. + * + * ACPI_APPLICATION - Use this switch if the subsystem is going to be run + * at the application level. + * + */ + +/* + * OS name, used for the _OS object. The _OS object is essentially obsolete, + * but there is a large base of ASL/AML code in existing machines that check + * for the string below. The use of this string usually guarantees that + * the ASL will execute down the most tested code path. Also, there is some + * code that will not execute the _OSI method unless _OS matches the string + * below. Therefore, change this string at your own risk. + */ +#define ACPI_OS_NAME "Microsoft Windows NT" + +/* Maximum objects in the various object caches */ + +#define ACPI_MAX_STATE_CACHE_DEPTH 96 /* State objects */ +#define ACPI_MAX_PARSE_CACHE_DEPTH 96 /* Parse tree objects */ +#define ACPI_MAX_EXTPARSE_CACHE_DEPTH 96 /* Parse tree objects */ +#define ACPI_MAX_OBJECT_CACHE_DEPTH 96 /* Interpreter operand objects */ +#define ACPI_MAX_NAMESPACE_CACHE_DEPTH 96 /* Namespace objects */ +#define ACPI_MAX_COMMENT_CACHE_DEPTH 96 /* Comments for the -ca option */ + +/* + * Should the subsystem abort the loading of an ACPI table if the + * table checksum is incorrect? + */ +#ifndef ACPI_CHECKSUM_ABORT +#define ACPI_CHECKSUM_ABORT FALSE +#endif + +/* + * Generate a version of ACPICA that only supports "reduced hardware" + * platforms (as defined in ACPI 5.0). Set to TRUE to generate a specialized + * version of ACPICA that ONLY supports the ACPI 5.0 "reduced hardware" + * model. In other words, no ACPI hardware is supported. + * + * If TRUE, this means no support for the following: + * PM Event and Control registers + * SCI interrupt (and handler) + * Fixed Events + * General Purpose Events (GPEs) + * Global Lock + * ACPI PM timer + * FACS table (Waking vectors and Global Lock) + */ +#ifndef ACPI_REDUCED_HARDWARE +#define ACPI_REDUCED_HARDWARE FALSE +#endif + +/****************************************************************************** + * + * Subsystem Constants + * + *****************************************************************************/ + +/* Version of ACPI supported */ + +#define ACPI_CA_SUPPORT_LEVEL 5 + +/* Maximum count for a semaphore object */ + +#define ACPI_MAX_SEMAPHORE_COUNT 256 + +/* Maximum object reference count (detects object deletion issues) */ + +#define ACPI_MAX_REFERENCE_COUNT 0x4000 + +/* Default page size for use in mapping memory for operation regions */ + +#define ACPI_DEFAULT_PAGE_SIZE 4096 /* Must be power of 2 */ + +/* owner_id tracking. 128 entries allows for 4095 owner_ids */ + +#define ACPI_NUM_OWNERID_MASKS 128 + +/* Size of the root table array is increased by this increment */ + +#define ACPI_ROOT_TABLE_SIZE_INCREMENT 4 + +/* Maximum sleep allowed via Sleep() operator */ + +#define ACPI_MAX_SLEEP 2000 /* 2000 millisec == two seconds */ + +/* Address Range lists are per-space_id (Memory and I/O only) */ + +#define ACPI_ADDRESS_RANGE_MAX 2 + +/* Maximum time (default 30s) of While() loops before abort */ + +#define ACPI_MAX_LOOP_TIMEOUT 30 + +/****************************************************************************** + * + * ACPI Specification constants (Do not change unless the specification changes) + * + *****************************************************************************/ + +/* Method info (in WALK_STATE), containing local variables and argumetns */ + +#define ACPI_METHOD_NUM_LOCALS 8 +#define ACPI_METHOD_MAX_LOCAL 7 + +#define ACPI_METHOD_NUM_ARGS 7 +#define ACPI_METHOD_MAX_ARG 6 + +/* + * Operand Stack (in WALK_STATE), Must be large enough to contain METHOD_MAX_ARG + */ +#define ACPI_OBJ_NUM_OPERANDS 8 +#define ACPI_OBJ_MAX_OPERAND 7 + +/* Number of elements in the Result Stack frame, can be an arbitrary value */ + +#define ACPI_RESULTS_FRAME_OBJ_NUM 8 + +/* + * Maximal number of elements the Result Stack can contain, + * it may be an arbitrary value not exceeding the types of + * result_size and result_count (now u8). + */ +#define ACPI_RESULTS_OBJ_NUM_MAX 255 + +/* Constants used in searching for the RSDP in low memory */ + +#define ACPI_EBDA_PTR_LOCATION 0x0000040E /* Physical Address */ +#define ACPI_EBDA_PTR_LENGTH 2 +#define ACPI_EBDA_WINDOW_SIZE 1024 +#define ACPI_HI_RSDP_WINDOW_BASE 0x000E0000 /* Physical Address */ +#define ACPI_HI_RSDP_WINDOW_SIZE 0x00020000 +#define ACPI_RSDP_SCAN_STEP 16 + +/* Operation regions */ + +#define ACPI_USER_REGION_BEGIN 0x80 + +/* Maximum space_ids for Operation Regions */ + +#define ACPI_MAX_ADDRESS_SPACE 255 +#define ACPI_NUM_DEFAULT_SPACES 4 + +/* Array sizes. Used for range checking also */ + +#define ACPI_MAX_MATCH_OPCODE 5 + +/* RSDP checksums */ + +#define ACPI_RSDP_CHECKSUM_LENGTH 20 +#define ACPI_RSDP_XCHECKSUM_LENGTH 36 + +/* + * SMBus, GSBus and IPMI buffer sizes. All have a 2-byte header, + * containing both Status and Length. + */ +#define ACPI_SERIAL_HEADER_SIZE 2 /* Common for below. Status and Length fields */ + +#define ACPI_SMBUS_DATA_SIZE 32 +#define ACPI_SMBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_SMBUS_DATA_SIZE + +#define ACPI_IPMI_DATA_SIZE 64 +#define ACPI_IPMI_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_IPMI_DATA_SIZE + +#define ACPI_MAX_GSBUS_DATA_SIZE 255 +#define ACPI_MAX_GSBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_MAX_GSBUS_DATA_SIZE + +/* _sx_d and _sx_w control methods */ + +#define ACPI_NUM_sx_d_METHODS 4 +#define ACPI_NUM_sx_w_METHODS 5 + +/****************************************************************************** + * + * Miscellaneous constants + * + *****************************************************************************/ + +/* UUID constants */ + +#define UUID_BUFFER_LENGTH 16 /* Length of UUID in memory */ +#define UUID_STRING_LENGTH 36 /* Total length of a UUID string */ + +/* Positions for required hyphens (dashes) in UUID strings */ + +#define UUID_HYPHEN1_OFFSET 8 +#define UUID_HYPHEN2_OFFSET 13 +#define UUID_HYPHEN3_OFFSET 18 +#define UUID_HYPHEN4_OFFSET 23 + +/****************************************************************************** + * + * ACPI AML Debugger + * + *****************************************************************************/ + +#define ACPI_DEBUGGER_MAX_ARGS ACPI_METHOD_NUM_ARGS + 4 /* Max command line arguments */ +#define ACPI_DB_LINE_BUFFER_SIZE 512 + +#define ACPI_DEBUGGER_COMMAND_PROMPT '-' +#define ACPI_DEBUGGER_EXECUTE_PROMPT '%' + +#endif /* _ACCONFIG_H */ diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h new file mode 100644 index 0000000..233a72f --- /dev/null +++ b/include/acpi/acexcep.h @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acexcep.h - Exception codes returned by the ACPI subsystem + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACEXCEP_H__ +#define __ACEXCEP_H__ + +/* This module contains all possible exception codes for acpi_status */ + +/* + * Exception code classes + */ +#define AE_CODE_ENVIRONMENTAL 0x0000 /* General ACPICA environment */ +#define AE_CODE_PROGRAMMER 0x1000 /* External ACPICA interface caller */ +#define AE_CODE_ACPI_TABLES 0x2000 /* ACPI tables */ +#define AE_CODE_AML 0x3000 /* From executing AML code */ +#define AE_CODE_CONTROL 0x4000 /* Internal control codes */ + +#define AE_CODE_MAX 0x4000 +#define AE_CODE_MASK 0xF000 + +/* + * Macros to insert the exception code classes + */ +#define EXCEP_ENV(code) ((acpi_status) (code | AE_CODE_ENVIRONMENTAL)) +#define EXCEP_PGM(code) ((acpi_status) (code | AE_CODE_PROGRAMMER)) +#define EXCEP_TBL(code) ((acpi_status) (code | AE_CODE_ACPI_TABLES)) +#define EXCEP_AML(code) ((acpi_status) (code | AE_CODE_AML)) +#define EXCEP_CTL(code) ((acpi_status) (code | AE_CODE_CONTROL)) + +/* + * Exception info table. The "Description" field is used only by the + * ACPICA help application (acpihelp). + */ +struct acpi_exception_info { + char *name; + +#ifdef ACPI_HELP_APP + char *description; +#endif +}; + +#ifdef ACPI_HELP_APP +#define EXCEP_TXT(name,description) {name, description} +#else +#define EXCEP_TXT(name,description) {name} +#endif + +/* + * Success is always zero, failure is non-zero + */ +#define ACPI_SUCCESS(a) (!(a)) +#define ACPI_FAILURE(a) (a) + +#define AE_OK (acpi_status) 0x0000 + +#define ACPI_ENV_EXCEPTION(status) (status & AE_CODE_ENVIRONMENTAL) +#define ACPI_AML_EXCEPTION(status) (status & AE_CODE_AML) +#define ACPI_PROG_EXCEPTION(status) (status & AE_CODE_PROGRAMMER) +#define ACPI_TABLE_EXCEPTION(status) (status & AE_CODE_ACPI_TABLES) +#define ACPI_CNTL_EXCEPTION(status) (status & AE_CODE_CONTROL) + +/* + * Environmental exceptions + */ +#define AE_ERROR EXCEP_ENV (0x0001) +#define AE_NO_ACPI_TABLES EXCEP_ENV (0x0002) +#define AE_NO_NAMESPACE EXCEP_ENV (0x0003) +#define AE_NO_MEMORY EXCEP_ENV (0x0004) +#define AE_NOT_FOUND EXCEP_ENV (0x0005) +#define AE_NOT_EXIST EXCEP_ENV (0x0006) +#define AE_ALREADY_EXISTS EXCEP_ENV (0x0007) +#define AE_TYPE EXCEP_ENV (0x0008) +#define AE_NULL_OBJECT EXCEP_ENV (0x0009) +#define AE_NULL_ENTRY EXCEP_ENV (0x000A) +#define AE_BUFFER_OVERFLOW EXCEP_ENV (0x000B) +#define AE_STACK_OVERFLOW EXCEP_ENV (0x000C) +#define AE_STACK_UNDERFLOW EXCEP_ENV (0x000D) +#define AE_NOT_IMPLEMENTED EXCEP_ENV (0x000E) +#define AE_SUPPORT EXCEP_ENV (0x000F) +#define AE_LIMIT EXCEP_ENV (0x0010) +#define AE_TIME EXCEP_ENV (0x0011) +#define AE_ACQUIRE_DEADLOCK EXCEP_ENV (0x0012) +#define AE_RELEASE_DEADLOCK EXCEP_ENV (0x0013) +#define AE_NOT_ACQUIRED EXCEP_ENV (0x0014) +#define AE_ALREADY_ACQUIRED EXCEP_ENV (0x0015) +#define AE_NO_HARDWARE_RESPONSE EXCEP_ENV (0x0016) +#define AE_NO_GLOBAL_LOCK EXCEP_ENV (0x0017) +#define AE_ABORT_METHOD EXCEP_ENV (0x0018) +#define AE_SAME_HANDLER EXCEP_ENV (0x0019) +#define AE_NO_HANDLER EXCEP_ENV (0x001A) +#define AE_OWNER_ID_LIMIT EXCEP_ENV (0x001B) +#define AE_NOT_CONFIGURED EXCEP_ENV (0x001C) +#define AE_ACCESS EXCEP_ENV (0x001D) +#define AE_IO_ERROR EXCEP_ENV (0x001E) +#define AE_NUMERIC_OVERFLOW EXCEP_ENV (0x001F) +#define AE_HEX_OVERFLOW EXCEP_ENV (0x0020) +#define AE_DECIMAL_OVERFLOW EXCEP_ENV (0x0021) +#define AE_OCTAL_OVERFLOW EXCEP_ENV (0x0022) +#define AE_END_OF_TABLE EXCEP_ENV (0x0023) + +#define AE_CODE_ENV_MAX 0x0023 + +/* + * Programmer exceptions + */ +#define AE_BAD_PARAMETER EXCEP_PGM (0x0001) +#define AE_BAD_CHARACTER EXCEP_PGM (0x0002) +#define AE_BAD_PATHNAME EXCEP_PGM (0x0003) +#define AE_BAD_DATA EXCEP_PGM (0x0004) +#define AE_BAD_HEX_CONSTANT EXCEP_PGM (0x0005) +#define AE_BAD_OCTAL_CONSTANT EXCEP_PGM (0x0006) +#define AE_BAD_DECIMAL_CONSTANT EXCEP_PGM (0x0007) +#define AE_MISSING_ARGUMENTS EXCEP_PGM (0x0008) +#define AE_BAD_ADDRESS EXCEP_PGM (0x0009) + +#define AE_CODE_PGM_MAX 0x0009 + +/* + * Acpi table exceptions + */ +#define AE_BAD_SIGNATURE EXCEP_TBL (0x0001) +#define AE_BAD_HEADER EXCEP_TBL (0x0002) +#define AE_BAD_CHECKSUM EXCEP_TBL (0x0003) +#define AE_BAD_VALUE EXCEP_TBL (0x0004) +#define AE_INVALID_TABLE_LENGTH EXCEP_TBL (0x0005) + +#define AE_CODE_TBL_MAX 0x0005 + +/* + * AML exceptions. These are caused by problems with + * the actual AML byte stream + */ +#define AE_AML_BAD_OPCODE EXCEP_AML (0x0001) +#define AE_AML_NO_OPERAND EXCEP_AML (0x0002) +#define AE_AML_OPERAND_TYPE EXCEP_AML (0x0003) +#define AE_AML_OPERAND_VALUE EXCEP_AML (0x0004) +#define AE_AML_UNINITIALIZED_LOCAL EXCEP_AML (0x0005) +#define AE_AML_UNINITIALIZED_ARG EXCEP_AML (0x0006) +#define AE_AML_UNINITIALIZED_ELEMENT EXCEP_AML (0x0007) +#define AE_AML_NUMERIC_OVERFLOW EXCEP_AML (0x0008) +#define AE_AML_REGION_LIMIT EXCEP_AML (0x0009) +#define AE_AML_BUFFER_LIMIT EXCEP_AML (0x000A) +#define AE_AML_PACKAGE_LIMIT EXCEP_AML (0x000B) +#define AE_AML_DIVIDE_BY_ZERO EXCEP_AML (0x000C) +#define AE_AML_BAD_NAME EXCEP_AML (0x000D) +#define AE_AML_NAME_NOT_FOUND EXCEP_AML (0x000E) +#define AE_AML_INTERNAL EXCEP_AML (0x000F) +#define AE_AML_INVALID_SPACE_ID EXCEP_AML (0x0010) +#define AE_AML_STRING_LIMIT EXCEP_AML (0x0011) +#define AE_AML_NO_RETURN_VALUE EXCEP_AML (0x0012) +#define AE_AML_METHOD_LIMIT EXCEP_AML (0x0013) +#define AE_AML_NOT_OWNER EXCEP_AML (0x0014) +#define AE_AML_MUTEX_ORDER EXCEP_AML (0x0015) +#define AE_AML_MUTEX_NOT_ACQUIRED EXCEP_AML (0x0016) +#define AE_AML_INVALID_RESOURCE_TYPE EXCEP_AML (0x0017) +#define AE_AML_INVALID_INDEX EXCEP_AML (0x0018) +#define AE_AML_REGISTER_LIMIT EXCEP_AML (0x0019) +#define AE_AML_NO_WHILE EXCEP_AML (0x001A) +#define AE_AML_ALIGNMENT EXCEP_AML (0x001B) +#define AE_AML_NO_RESOURCE_END_TAG EXCEP_AML (0x001C) +#define AE_AML_BAD_RESOURCE_VALUE EXCEP_AML (0x001D) +#define AE_AML_CIRCULAR_REFERENCE EXCEP_AML (0x001E) +#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F) +#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020) +#define AE_AML_LOOP_TIMEOUT EXCEP_AML (0x0021) +#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022) +#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023) +#define AE_AML_PROTOCOL EXCEP_AML (0x0024) +#define AE_AML_BUFFER_LENGTH EXCEP_AML (0x0025) + +#define AE_CODE_AML_MAX 0x0025 + +/* + * Internal exceptions used for control + */ +#define AE_CTRL_RETURN_VALUE EXCEP_CTL (0x0001) +#define AE_CTRL_PENDING EXCEP_CTL (0x0002) +#define AE_CTRL_TERMINATE EXCEP_CTL (0x0003) +#define AE_CTRL_TRUE EXCEP_CTL (0x0004) +#define AE_CTRL_FALSE EXCEP_CTL (0x0005) +#define AE_CTRL_DEPTH EXCEP_CTL (0x0006) +#define AE_CTRL_END EXCEP_CTL (0x0007) +#define AE_CTRL_TRANSFER EXCEP_CTL (0x0008) +#define AE_CTRL_BREAK EXCEP_CTL (0x0009) +#define AE_CTRL_CONTINUE EXCEP_CTL (0x000A) +#define AE_CTRL_PARSE_CONTINUE EXCEP_CTL (0x000B) +#define AE_CTRL_PARSE_PENDING EXCEP_CTL (0x000C) + +#define AE_CODE_CTRL_MAX 0x000C + +/* Exception strings for acpi_format_exception */ + +#ifdef ACPI_DEFINE_EXCEPTION_TABLE + +/* + * String versions of the exception codes above + * These strings must match the corresponding defines exactly + */ +static const struct acpi_exception_info acpi_gbl_exception_names_env[] = { + EXCEP_TXT("AE_OK", "No error"), + EXCEP_TXT("AE_ERROR", "Unspecified error"), + EXCEP_TXT("AE_NO_ACPI_TABLES", "ACPI tables could not be found"), + EXCEP_TXT("AE_NO_NAMESPACE", "A namespace has not been loaded"), + EXCEP_TXT("AE_NO_MEMORY", "Insufficient dynamic memory"), + EXCEP_TXT("AE_NOT_FOUND", "A requested entity is not found"), + EXCEP_TXT("AE_NOT_EXIST", "A required entity does not exist"), + EXCEP_TXT("AE_ALREADY_EXISTS", "An entity already exists"), + EXCEP_TXT("AE_TYPE", "The object type is incorrect"), + EXCEP_TXT("AE_NULL_OBJECT", "A required object was missing"), + EXCEP_TXT("AE_NULL_ENTRY", "The requested object does not exist"), + EXCEP_TXT("AE_BUFFER_OVERFLOW", "The buffer provided is too small"), + EXCEP_TXT("AE_STACK_OVERFLOW", "An internal stack overflowed"), + EXCEP_TXT("AE_STACK_UNDERFLOW", "An internal stack underflowed"), + EXCEP_TXT("AE_NOT_IMPLEMENTED", "The feature is not implemented"), + EXCEP_TXT("AE_SUPPORT", "The feature is not supported"), + EXCEP_TXT("AE_LIMIT", "A predefined limit was exceeded"), + EXCEP_TXT("AE_TIME", "A time limit or timeout expired"), + EXCEP_TXT("AE_ACQUIRE_DEADLOCK", + "Internal error, attempt was made to acquire a mutex in improper order"), + EXCEP_TXT("AE_RELEASE_DEADLOCK", + "Internal error, attempt was made to release a mutex in improper order"), + EXCEP_TXT("AE_NOT_ACQUIRED", + "An attempt to release a mutex or Global Lock without a previous acquire"), + EXCEP_TXT("AE_ALREADY_ACQUIRED", + "Internal error, attempt was made to acquire a mutex twice"), + EXCEP_TXT("AE_NO_HARDWARE_RESPONSE", + "Hardware did not respond after an I/O operation"), + EXCEP_TXT("AE_NO_GLOBAL_LOCK", "There is no FACS Global Lock"), + EXCEP_TXT("AE_ABORT_METHOD", "A control method was aborted"), + EXCEP_TXT("AE_SAME_HANDLER", + "Attempt was made to install the same handler that is already installed"), + EXCEP_TXT("AE_NO_HANDLER", + "A handler for the operation is not installed"), + EXCEP_TXT("AE_OWNER_ID_LIMIT", + "There are no more Owner IDs available for ACPI tables or control methods"), + EXCEP_TXT("AE_NOT_CONFIGURED", + "The interface is not part of the current subsystem configuration"), + EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation"), + EXCEP_TXT("AE_IO_ERROR", "An I/O error occurred"), + EXCEP_TXT("AE_NUMERIC_OVERFLOW", + "Overflow during string-to-integer conversion"), + EXCEP_TXT("AE_HEX_OVERFLOW", + "Overflow during ASCII hex-to-binary conversion"), + EXCEP_TXT("AE_DECIMAL_OVERFLOW", + "Overflow during ASCII decimal-to-binary conversion"), + EXCEP_TXT("AE_OCTAL_OVERFLOW", + "Overflow during ASCII octal-to-binary conversion"), + EXCEP_TXT("AE_END_OF_TABLE", "Reached the end of table") +}; + +static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = { + EXCEP_TXT(NULL, NULL), + EXCEP_TXT("AE_BAD_PARAMETER", "A parameter is out of range or invalid"), + EXCEP_TXT("AE_BAD_CHARACTER", + "An invalid character was found in a name"), + EXCEP_TXT("AE_BAD_PATHNAME", + "An invalid character was found in a pathname"), + EXCEP_TXT("AE_BAD_DATA", + "A package or buffer contained incorrect data"), + EXCEP_TXT("AE_BAD_HEX_CONSTANT", "Invalid character in a Hex constant"), + EXCEP_TXT("AE_BAD_OCTAL_CONSTANT", + "Invalid character in an Octal constant"), + EXCEP_TXT("AE_BAD_DECIMAL_CONSTANT", + "Invalid character in a Decimal constant"), + EXCEP_TXT("AE_MISSING_ARGUMENTS", + "Too few arguments were passed to a control method"), + EXCEP_TXT("AE_BAD_ADDRESS", "An illegal null I/O address") +}; + +static const struct acpi_exception_info acpi_gbl_exception_names_tbl[] = { + EXCEP_TXT(NULL, NULL), + EXCEP_TXT("AE_BAD_SIGNATURE", "An ACPI table has an invalid signature"), + EXCEP_TXT("AE_BAD_HEADER", "Invalid field in an ACPI table header"), + EXCEP_TXT("AE_BAD_CHECKSUM", "An ACPI table checksum is not correct"), + EXCEP_TXT("AE_BAD_VALUE", "An invalid value was found in a table"), + EXCEP_TXT("AE_INVALID_TABLE_LENGTH", + "The FADT or FACS has improper length") +}; + +static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = { + EXCEP_TXT(NULL, NULL), + EXCEP_TXT("AE_AML_BAD_OPCODE", "Invalid AML opcode encountered"), + EXCEP_TXT("AE_AML_NO_OPERAND", "A required operand is missing"), + EXCEP_TXT("AE_AML_OPERAND_TYPE", + "An operand of an incorrect type was encountered"), + EXCEP_TXT("AE_AML_OPERAND_VALUE", + "The operand had an inappropriate or invalid value"), + EXCEP_TXT("AE_AML_UNINITIALIZED_LOCAL", + "Method tried to use an uninitialized local variable"), + EXCEP_TXT("AE_AML_UNINITIALIZED_ARG", + "Method tried to use an uninitialized argument"), + EXCEP_TXT("AE_AML_UNINITIALIZED_ELEMENT", + "Method tried to use an empty package element"), + EXCEP_TXT("AE_AML_NUMERIC_OVERFLOW", + "Overflow during BCD conversion or other"), + EXCEP_TXT("AE_AML_REGION_LIMIT", + "Tried to access beyond the end of an Operation Region"), + EXCEP_TXT("AE_AML_BUFFER_LIMIT", + "Tried to access beyond the end of a buffer"), + EXCEP_TXT("AE_AML_PACKAGE_LIMIT", + "Tried to access beyond the end of a package"), + EXCEP_TXT("AE_AML_DIVIDE_BY_ZERO", + "During execution of AML Divide operator"), + EXCEP_TXT("AE_AML_BAD_NAME", + "An ACPI name contains invalid character(s)"), + EXCEP_TXT("AE_AML_NAME_NOT_FOUND", + "Could not resolve a named reference"), + EXCEP_TXT("AE_AML_INTERNAL", + "An internal error within the interpreter"), + EXCEP_TXT("AE_AML_INVALID_SPACE_ID", + "An Operation Region SpaceID is invalid"), + EXCEP_TXT("AE_AML_STRING_LIMIT", + "String is longer than 200 characters"), + EXCEP_TXT("AE_AML_NO_RETURN_VALUE", + "A method did not return a required value"), + EXCEP_TXT("AE_AML_METHOD_LIMIT", + "A control method reached the maximum reentrancy limit of 255"), + EXCEP_TXT("AE_AML_NOT_OWNER", + "A thread tried to release a mutex that it does not own"), + EXCEP_TXT("AE_AML_MUTEX_ORDER", "Mutex SyncLevel release mismatch"), + EXCEP_TXT("AE_AML_MUTEX_NOT_ACQUIRED", + "Attempt to release a mutex that was not previously acquired"), + EXCEP_TXT("AE_AML_INVALID_RESOURCE_TYPE", + "Invalid resource type in resource list"), + EXCEP_TXT("AE_AML_INVALID_INDEX", + "Invalid Argx or Localx (x too large)"), + EXCEP_TXT("AE_AML_REGISTER_LIMIT", + "Bank value or Index value beyond range of register"), + EXCEP_TXT("AE_AML_NO_WHILE", "Break or Continue without a While"), + EXCEP_TXT("AE_AML_ALIGNMENT", + "Non-aligned memory transfer on platform that does not support this"), + EXCEP_TXT("AE_AML_NO_RESOURCE_END_TAG", + "No End Tag in a resource list"), + EXCEP_TXT("AE_AML_BAD_RESOURCE_VALUE", + "Invalid value of a resource element"), + EXCEP_TXT("AE_AML_CIRCULAR_REFERENCE", + "Two references refer to each other"), + EXCEP_TXT("AE_AML_BAD_RESOURCE_LENGTH", + "The length of a Resource Descriptor in the AML is incorrect"), + EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS", + "A memory, I/O, or PCI configuration address is invalid"), + EXCEP_TXT("AE_AML_LOOP_TIMEOUT", + "An AML While loop exceeded the maximum execution time"), + EXCEP_TXT("AE_AML_UNINITIALIZED_NODE", + "A namespace node is uninitialized or unresolved"), + EXCEP_TXT("AE_AML_TARGET_TYPE", + "A target operand of an incorrect type was encountered"), + EXCEP_TXT("AE_AML_PROTOCOL", "Violation of a fixed ACPI protocol"), + EXCEP_TXT("AE_AML_BUFFER_LENGTH", + "The length of the buffer is invalid/incorrect") +}; + +static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = { + EXCEP_TXT(NULL, NULL), + EXCEP_TXT("AE_CTRL_RETURN_VALUE", "A Method returned a value"), + EXCEP_TXT("AE_CTRL_PENDING", "Method is calling another method"), + EXCEP_TXT("AE_CTRL_TERMINATE", "Terminate the executing method"), + EXCEP_TXT("AE_CTRL_TRUE", "An If or While predicate result"), + EXCEP_TXT("AE_CTRL_FALSE", "An If or While predicate result"), + EXCEP_TXT("AE_CTRL_DEPTH", "Maximum search depth has been reached"), + EXCEP_TXT("AE_CTRL_END", "An If or While predicate is false"), + EXCEP_TXT("AE_CTRL_TRANSFER", "Transfer control to called method"), + EXCEP_TXT("AE_CTRL_BREAK", "A Break has been executed"), + EXCEP_TXT("AE_CTRL_CONTINUE", "A Continue has been executed"), + EXCEP_TXT("AE_CTRL_PARSE_CONTINUE", "Used to skip over bad opcodes"), + EXCEP_TXT("AE_CTRL_PARSE_PENDING", "Used to implement AML While loops") +}; + +#endif /* EXCEPTION_TABLE */ + +#endif /* __ACEXCEP_H__ */ diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h new file mode 100644 index 0000000..8b3eae9 --- /dev/null +++ b/include/acpi/acnames.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acnames.h - Global names and strings + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACNAMES_H__ +#define __ACNAMES_H__ + +/* Method names - these methods can appear anywhere in the namespace */ + +#define METHOD_NAME__ADR "_ADR" +#define METHOD_NAME__AEI "_AEI" +#define METHOD_NAME__BBN "_BBN" +#define METHOD_NAME__CBA "_CBA" +#define METHOD_NAME__CID "_CID" +#define METHOD_NAME__CLS "_CLS" +#define METHOD_NAME__CRS "_CRS" +#define METHOD_NAME__DDN "_DDN" +#define METHOD_NAME__DMA "_DMA" +#define METHOD_NAME__HID "_HID" +#define METHOD_NAME__INI "_INI" +#define METHOD_NAME__PLD "_PLD" +#define METHOD_NAME__DSD "_DSD" +#define METHOD_NAME__PRS "_PRS" +#define METHOD_NAME__PRT "_PRT" +#define METHOD_NAME__PRW "_PRW" +#define METHOD_NAME__PS0 "_PS0" +#define METHOD_NAME__PS1 "_PS1" +#define METHOD_NAME__PS2 "_PS2" +#define METHOD_NAME__PS3 "_PS3" +#define METHOD_NAME__REG "_REG" +#define METHOD_NAME__SB_ "_SB_" +#define METHOD_NAME__SEG "_SEG" +#define METHOD_NAME__SRS "_SRS" +#define METHOD_NAME__STA "_STA" +#define METHOD_NAME__SUB "_SUB" +#define METHOD_NAME__UID "_UID" + +/* Method names - these methods must appear at the namespace root */ + +#define METHOD_PATHNAME__PTS "\\_PTS" +#define METHOD_PATHNAME__SST "\\_SI._SST" +#define METHOD_PATHNAME__WAK "\\_WAK" + +/* Definitions of the predefined namespace names */ + +#define ACPI_UNKNOWN_NAME (u32) 0x3F3F3F3F /* Unknown name is "????" */ +#define ACPI_PREFIX_MIXED (u32) 0x69706341 /* "Acpi" */ +#define ACPI_PREFIX_LOWER (u32) 0x69706361 /* "acpi" */ + +/* Root name stuff */ + +#define ACPI_ROOT_NAME (u32) 0x5F5F5F5C /* Root name is "\___" */ +#define ACPI_ROOT_PATHNAME "\\___" +#define ACPI_NAMESPACE_ROOT "Namespace Root" +#define ACPI_NS_ROOT_PATH "\\" + +#endif /* __ACNAMES_H__ */ diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h new file mode 100644 index 0000000..c50542d --- /dev/null +++ b/include/acpi/acoutput.h @@ -0,0 +1,466 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acoutput.h -- debug output + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACOUTPUT_H__ +#define __ACOUTPUT_H__ + +/* + * Debug levels and component IDs. These are used to control the + * granularity of the output of the ACPI_DEBUG_PRINT macro -- on a + * per-component basis and a per-exception-type basis. + */ + +/* Component IDs are used in the global "DebugLayer" */ + +#define ACPI_UTILITIES 0x00000001 +#define ACPI_HARDWARE 0x00000002 +#define ACPI_EVENTS 0x00000004 +#define ACPI_TABLES 0x00000008 +#define ACPI_NAMESPACE 0x00000010 +#define ACPI_PARSER 0x00000020 +#define ACPI_DISPATCHER 0x00000040 +#define ACPI_EXECUTER 0x00000080 +#define ACPI_RESOURCES 0x00000100 +#define ACPI_CA_DEBUGGER 0x00000200 +#define ACPI_OS_SERVICES 0x00000400 +#define ACPI_CA_DISASSEMBLER 0x00000800 + +/* Component IDs for ACPI tools and utilities */ + +#define ACPI_COMPILER 0x00001000 +#define ACPI_TOOLS 0x00002000 +#define ACPI_EXAMPLE 0x00004000 +#define ACPI_DRIVER 0x00008000 +#define DT_COMPILER 0x00010000 +#define ASL_PREPROCESSOR 0x00020000 + +#define ACPI_ALL_COMPONENTS 0x0001FFFF +#define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS) + +/* Component IDs reserved for ACPI drivers */ + +#define ACPI_ALL_DRIVERS 0xFFFF0000 + +/* + * Raw debug output levels, do not use these in the ACPI_DEBUG_PRINT macros + */ +#define ACPI_LV_INIT 0x00000001 +#define ACPI_LV_DEBUG_OBJECT 0x00000002 +#define ACPI_LV_INFO 0x00000004 +#define ACPI_LV_REPAIR 0x00000008 +#define ACPI_LV_TRACE_POINT 0x00000010 +#define ACPI_LV_ALL_EXCEPTIONS 0x0000001F + +/* Trace verbosity level 1 [Standard Trace Level] */ + +#define ACPI_LV_INIT_NAMES 0x00000020 +#define ACPI_LV_PARSE 0x00000040 +#define ACPI_LV_LOAD 0x00000080 +#define ACPI_LV_DISPATCH 0x00000100 +#define ACPI_LV_EXEC 0x00000200 +#define ACPI_LV_NAMES 0x00000400 +#define ACPI_LV_OPREGION 0x00000800 +#define ACPI_LV_BFIELD 0x00001000 +#define ACPI_LV_TABLES 0x00002000 +#define ACPI_LV_VALUES 0x00004000 +#define ACPI_LV_OBJECTS 0x00008000 +#define ACPI_LV_RESOURCES 0x00010000 +#define ACPI_LV_USER_REQUESTS 0x00020000 +#define ACPI_LV_PACKAGE 0x00040000 +#define ACPI_LV_EVALUATION 0x00080000 +#define ACPI_LV_VERBOSITY1 0x000FFF40 | ACPI_LV_ALL_EXCEPTIONS + +/* Trace verbosity level 2 [Function tracing and memory allocation] */ + +#define ACPI_LV_ALLOCATIONS 0x00100000 +#define ACPI_LV_FUNCTIONS 0x00200000 +#define ACPI_LV_OPTIMIZATIONS 0x00400000 +#define ACPI_LV_PARSE_TREES 0x00800000 +#define ACPI_LV_VERBOSITY2 0x00F00000 | ACPI_LV_VERBOSITY1 +#define ACPI_LV_ALL ACPI_LV_VERBOSITY2 + +/* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ + +#define ACPI_LV_MUTEX 0x01000000 +#define ACPI_LV_THREADS 0x02000000 +#define ACPI_LV_IO 0x04000000 +#define ACPI_LV_INTERRUPTS 0x08000000 +#define ACPI_LV_VERBOSITY3 0x0F000000 | ACPI_LV_VERBOSITY2 + +/* Exceptionally verbose output -- also used in the global "DebugLevel" */ + +#define ACPI_LV_AML_DISASSEMBLE 0x10000000 +#define ACPI_LV_VERBOSE_INFO 0x20000000 +#define ACPI_LV_FULL_TABLES 0x40000000 +#define ACPI_LV_EVENTS 0x80000000 +#define ACPI_LV_VERBOSE 0xF0000000 + +/* + * Debug level macros that are used in the DEBUG_PRINT macros + */ +#define ACPI_DEBUG_LEVEL(dl) (u32) dl,ACPI_DEBUG_PARAMETERS + +/* + * Exception level -- used in the global "DebugLevel" + * + * Note: For errors, use the ACPI_ERROR or ACPI_EXCEPTION interfaces. + * For warnings, use ACPI_WARNING. + */ +#define ACPI_DB_INIT ACPI_DEBUG_LEVEL (ACPI_LV_INIT) +#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT) +#define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO) +#define ACPI_DB_REPAIR ACPI_DEBUG_LEVEL (ACPI_LV_REPAIR) +#define ACPI_DB_TRACE_POINT ACPI_DEBUG_LEVEL (ACPI_LV_TRACE_POINT) +#define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS) + +/* Trace level -- also used in the global "DebugLevel" */ + +#define ACPI_DB_INIT_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_INIT_NAMES) +#define ACPI_DB_THREADS ACPI_DEBUG_LEVEL (ACPI_LV_THREADS) +#define ACPI_DB_PARSE ACPI_DEBUG_LEVEL (ACPI_LV_PARSE) +#define ACPI_DB_DISPATCH ACPI_DEBUG_LEVEL (ACPI_LV_DISPATCH) +#define ACPI_DB_LOAD ACPI_DEBUG_LEVEL (ACPI_LV_LOAD) +#define ACPI_DB_EXEC ACPI_DEBUG_LEVEL (ACPI_LV_EXEC) +#define ACPI_DB_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_NAMES) +#define ACPI_DB_OPREGION ACPI_DEBUG_LEVEL (ACPI_LV_OPREGION) +#define ACPI_DB_BFIELD ACPI_DEBUG_LEVEL (ACPI_LV_BFIELD) +#define ACPI_DB_TABLES ACPI_DEBUG_LEVEL (ACPI_LV_TABLES) +#define ACPI_DB_FUNCTIONS ACPI_DEBUG_LEVEL (ACPI_LV_FUNCTIONS) +#define ACPI_DB_OPTIMIZATIONS ACPI_DEBUG_LEVEL (ACPI_LV_OPTIMIZATIONS) +#define ACPI_DB_PARSE_TREES ACPI_DEBUG_LEVEL (ACPI_LV_PARSE_TREES) +#define ACPI_DB_VALUES ACPI_DEBUG_LEVEL (ACPI_LV_VALUES) +#define ACPI_DB_OBJECTS ACPI_DEBUG_LEVEL (ACPI_LV_OBJECTS) +#define ACPI_DB_ALLOCATIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALLOCATIONS) +#define ACPI_DB_RESOURCES ACPI_DEBUG_LEVEL (ACPI_LV_RESOURCES) +#define ACPI_DB_IO ACPI_DEBUG_LEVEL (ACPI_LV_IO) +#define ACPI_DB_INTERRUPTS ACPI_DEBUG_LEVEL (ACPI_LV_INTERRUPTS) +#define ACPI_DB_USER_REQUESTS ACPI_DEBUG_LEVEL (ACPI_LV_USER_REQUESTS) +#define ACPI_DB_PACKAGE ACPI_DEBUG_LEVEL (ACPI_LV_PACKAGE) +#define ACPI_DB_EVALUATION ACPI_DEBUG_LEVEL (ACPI_LV_EVALUATION) +#define ACPI_DB_MUTEX ACPI_DEBUG_LEVEL (ACPI_LV_MUTEX) +#define ACPI_DB_EVENTS ACPI_DEBUG_LEVEL (ACPI_LV_EVENTS) + +#define ACPI_DB_ALL ACPI_DEBUG_LEVEL (ACPI_LV_ALL) + +/* Defaults for debug_level, debug and normal */ + +#ifndef ACPI_DEBUG_DEFAULT +#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR) +#endif + +#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR) +#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) + +/* + * Global trace flags + */ +#define ACPI_TRACE_ENABLED ((u32) 4) +#define ACPI_TRACE_ONESHOT ((u32) 2) +#define ACPI_TRACE_OPCODE ((u32) 1) + +/* Defaults for trace debugging level/layer */ + +#define ACPI_TRACE_LEVEL_ALL ACPI_LV_ALL +#define ACPI_TRACE_LAYER_ALL 0x000001FF +#define ACPI_TRACE_LEVEL_DEFAULT ACPI_LV_TRACE_POINT +#define ACPI_TRACE_LAYER_DEFAULT ACPI_EXECUTER + +#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES) +/* + * The module name is used primarily for error and debug messages. + * The __FILE__ macro is not very useful for this, because it + * usually includes the entire pathname to the module making the + * debug output difficult to read. + */ +#define ACPI_MODULE_NAME(name) static const char ACPI_UNUSED_VAR _acpi_module_name[] = name; +#else +/* + * For the no-debug and no-error-msg cases, we must at least define + * a null module name. + */ +#define ACPI_MODULE_NAME(name) +#define _acpi_module_name "" +#endif + +/* + * Ascii error messages can be configured out + */ +#ifndef ACPI_NO_ERROR_MESSAGES +#define AE_INFO _acpi_module_name, __LINE__ + +/* + * Error reporting. Callers module and line number are inserted by AE_INFO, + * the plist contains a set of parens to allow variable-length lists. + * These macros are used for both the debug and non-debug versions of the code. + */ +#define ACPI_INFO(plist) acpi_info plist +#define ACPI_WARNING(plist) acpi_warning plist +#define ACPI_EXCEPTION(plist) acpi_exception plist +#define ACPI_ERROR(plist) acpi_error plist +#define ACPI_BIOS_WARNING(plist) acpi_bios_warning plist +#define ACPI_BIOS_EXCEPTION(plist) acpi_bios_exception plist +#define ACPI_BIOS_ERROR(plist) acpi_bios_error plist +#define ACPI_DEBUG_OBJECT(obj,l,i) acpi_ex_do_debug_object(obj,l,i) + +#else + +/* No error messages */ + +#define ACPI_INFO(plist) +#define ACPI_WARNING(plist) +#define ACPI_EXCEPTION(plist) +#define ACPI_ERROR(plist) +#define ACPI_BIOS_WARNING(plist) +#define ACPI_BIOS_EXCEPTION(plist) +#define ACPI_BIOS_ERROR(plist) +#define ACPI_DEBUG_OBJECT(obj,l,i) + +#endif /* ACPI_NO_ERROR_MESSAGES */ + +/* + * Debug macros that are conditionally compiled + */ +#ifdef ACPI_DEBUG_OUTPUT + +/* + * If ACPI_GET_FUNCTION_NAME was not defined in the compiler-dependent header, + * define it now. This is the case where there the compiler does not support + * a __func__ macro or equivalent. + */ +#ifndef ACPI_GET_FUNCTION_NAME +#define ACPI_GET_FUNCTION_NAME _acpi_function_name + +/* + * The Name parameter should be the procedure name as a non-quoted string. + * The function name is also used by the function exit macros below. + * Note: (const char) is used to be compatible with the debug interfaces + * and macros such as __func__. + */ +#define ACPI_FUNCTION_NAME(name) static const char _acpi_function_name[] = #name; + +#else +/* Compiler supports __func__ (or equivalent) -- Ignore this macro */ + +#define ACPI_FUNCTION_NAME(name) +#endif /* ACPI_GET_FUNCTION_NAME */ + +/* + * Common parameters used for debug output functions: + * line number, function name, module(file) name, component ID + */ +#define ACPI_DEBUG_PARAMETERS \ + __LINE__, ACPI_GET_FUNCTION_NAME, _acpi_module_name, _COMPONENT + +/* Check if debug output is currently dynamically enabled */ + +#define ACPI_IS_DEBUG_ENABLED(level, component) \ + ((level & acpi_dbg_level) && (component & acpi_dbg_layer)) + +/* + * Master debug print macros + * Print message if and only if: + * 1) Debug print for the current component is enabled + * 2) Debug error level or trace level for the print statement is enabled + * + * November 2012: Moved the runtime check for whether to actually emit the + * debug message outside of the print function itself. This improves overall + * performance at a relatively small code cost. Implementation involves the + * use of variadic macros supported by C99. + * + * Note: the ACPI_DO_WHILE0 macro is used to prevent some compilers from + * complaining about these constructs. On other compilers the do...while + * adds some extra code, so this feature is optional. + */ +#ifdef ACPI_USE_DO_WHILE_0 +#define ACPI_DO_WHILE0(a) do a while(0) +#else +#define ACPI_DO_WHILE0(a) a +#endif + +/* DEBUG_PRINT functions */ + +#ifndef COMPILER_VA_MACRO + +#define ACPI_DEBUG_PRINT(plist) acpi_debug_print plist +#define ACPI_DEBUG_PRINT_RAW(plist) acpi_debug_print_raw plist + +#else + +/* Helper macros for DEBUG_PRINT */ + +#define ACPI_DO_DEBUG_PRINT(function, level, line, filename, modulename, component, ...) \ + ACPI_DO_WHILE0 ({ \ + if (ACPI_IS_DEBUG_ENABLED (level, component)) \ + { \ + function (level, line, filename, modulename, component, __VA_ARGS__); \ + } \ + }) + +#define ACPI_ACTUAL_DEBUG(level, line, filename, modulename, component, ...) \ + ACPI_DO_DEBUG_PRINT (acpi_debug_print, level, line, \ + filename, modulename, component, __VA_ARGS__) + +#define ACPI_ACTUAL_DEBUG_RAW(level, line, filename, modulename, component, ...) \ + ACPI_DO_DEBUG_PRINT (acpi_debug_print_raw, level, line, \ + filename, modulename, component, __VA_ARGS__) + +#define ACPI_DEBUG_PRINT(plist) ACPI_ACTUAL_DEBUG plist +#define ACPI_DEBUG_PRINT_RAW(plist) ACPI_ACTUAL_DEBUG_RAW plist + +#endif + +/* + * Function entry tracing + * + * The name of the function is emitted as a local variable that is + * intended to be used by both the entry trace and the exit trace. + */ + +/* Helper macro */ + +#define ACPI_TRACE_ENTRY(name, function, type, param) \ + ACPI_FUNCTION_NAME (name) \ + function (ACPI_DEBUG_PARAMETERS, (type) (param)) + +/* The actual entry trace macros */ + +#define ACPI_FUNCTION_TRACE(name) \ + ACPI_FUNCTION_NAME(name) \ + acpi_ut_trace (ACPI_DEBUG_PARAMETERS) + +#define ACPI_FUNCTION_TRACE_PTR(name, pointer) \ + ACPI_TRACE_ENTRY (name, acpi_ut_trace_ptr, void *, pointer) + +#define ACPI_FUNCTION_TRACE_U32(name, value) \ + ACPI_TRACE_ENTRY (name, acpi_ut_trace_u32, u32, value) + +#define ACPI_FUNCTION_TRACE_STR(name, string) \ + ACPI_TRACE_ENTRY (name, acpi_ut_trace_str, const char *, string) + +#define ACPI_FUNCTION_ENTRY() \ + acpi_ut_track_stack_ptr() + +/* + * Function exit tracing + * + * These macros include a return statement. This is usually considered + * bad form, but having a separate exit macro before the actual return + * is very ugly and difficult to maintain. + * + * One of the FUNCTION_TRACE macros above must be used in conjunction + * with these macros so that "_AcpiFunctionName" is defined. + * + * There are two versions of most of the return macros. The default version is + * safer, since it avoids side-effects by guaranteeing that the argument will + * not be evaluated twice. + * + * A less-safe version of the macros is provided for optional use if the + * compiler uses excessive CPU stack (for example, this may happen in the + * debug case if code optimzation is disabled.) + */ + +/* Exit trace helper macro */ + +#ifndef ACPI_SIMPLE_RETURN_MACROS + +#define ACPI_TRACE_EXIT(function, type, param) \ + ACPI_DO_WHILE0 ({ \ + register type _param = (type) (param); \ + function (ACPI_DEBUG_PARAMETERS, _param); \ + return (_param); \ + }) + +#else /* Use original less-safe macros */ + +#define ACPI_TRACE_EXIT(function, type, param) \ + ACPI_DO_WHILE0 ({ \ + function (ACPI_DEBUG_PARAMETERS, (type) (param)); \ + return (param); \ + }) + +#endif /* ACPI_SIMPLE_RETURN_MACROS */ + +/* The actual exit macros */ + +#define return_VOID \ + ACPI_DO_WHILE0 ({ \ + acpi_ut_exit (ACPI_DEBUG_PARAMETERS); \ + return; \ + }) + +#define return_ACPI_STATUS(status) \ + ACPI_TRACE_EXIT (acpi_ut_status_exit, acpi_status, status) + +#define return_PTR(pointer) \ + ACPI_TRACE_EXIT (acpi_ut_ptr_exit, void *, pointer) + +#define return_STR(string) \ + ACPI_TRACE_EXIT (acpi_ut_str_exit, const char *, string) + +#define return_VALUE(value) \ + ACPI_TRACE_EXIT (acpi_ut_value_exit, u64, value) + +#define return_UINT32(value) \ + ACPI_TRACE_EXIT (acpi_ut_value_exit, u32, value) + +#define return_UINT8(value) \ + ACPI_TRACE_EXIT (acpi_ut_value_exit, u8, value) + +/* Conditional execution */ + +#define ACPI_DEBUG_EXEC(a) a +#define ACPI_DEBUG_ONLY_MEMBERS(a) a; +#define _VERBOSE_STRUCTURES + +/* Various object display routines for debug */ + +#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0) +#define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c) +#define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b) +#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d) +#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) + +#define ACPI_TRACE_POINT(a, b, c, d) acpi_trace_point (a, b, c, d) + +#else /* ACPI_DEBUG_OUTPUT */ +/* + * This is the non-debug case -- make everything go away, + * leaving no executable debug code! + */ +#define ACPI_DEBUG_PRINT(pl) +#define ACPI_DEBUG_PRINT_RAW(pl) +#define ACPI_DEBUG_EXEC(a) +#define ACPI_DEBUG_ONLY_MEMBERS(a) +#define ACPI_FUNCTION_NAME(a) +#define ACPI_FUNCTION_TRACE(a) +#define ACPI_FUNCTION_TRACE_PTR(a, b) +#define ACPI_FUNCTION_TRACE_U32(a, b) +#define ACPI_FUNCTION_TRACE_STR(a, b) +#define ACPI_FUNCTION_ENTRY() +#define ACPI_DUMP_STACK_ENTRY(a) +#define ACPI_DUMP_OPERANDS(a, b, c) +#define ACPI_DUMP_ENTRY(a, b) +#define ACPI_DUMP_PATHNAME(a, b, c, d) +#define ACPI_DUMP_BUFFER(a, b) +#define ACPI_IS_DEBUG_ENABLED(level, component) 0 +#define ACPI_TRACE_POINT(a, b, c, d) + +/* Return macros must have a return statement at the minimum */ + +#define return_VOID return +#define return_ACPI_STATUS(s) return(s) +#define return_PTR(s) return(s) +#define return_STR(s) return(s) +#define return_VALUE(s) return(s) +#define return_UINT8(s) return(s) +#define return_UINT32(s) return(s) + +#endif /* ACPI_DEBUG_OUTPUT */ + +#endif /* __ACOUTPUT_H__ */ diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h new file mode 100644 index 0000000..bc7d39e --- /dev/null +++ b/include/acpi/acpi.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acpi.h - Master public include file used to interface to ACPICA + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACPI_H__ +#define __ACPI_H__ + +/* + * Public include files for use by code that will interface to ACPICA. + * + * Information includes the ACPICA data types, names, exceptions, and + * external interface prototypes. Also included are the definitions for + * all ACPI tables (FADT, MADT, etc.) + * + * Note: The order of these include files is important. + */ +#include /* Environment-specific items */ +#include /* Common ACPI names and strings */ +#include /* ACPICA data types and structures */ +#include /* ACPICA exceptions */ +#include /* ACPI table definitions */ +#include /* Resource Descriptor structs */ +#include /* Extra environment-specific items */ +#include /* Error output and Debug macros */ +#include /* OSL interfaces (ACPICA-to-OS) */ +#include /* ACPI core subsystem external interfaces */ + +#endif /* __ACPI_H__ */ diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h new file mode 100644 index 0000000..3f6fdde --- /dev/null +++ b/include/acpi/acpi_bus.h @@ -0,0 +1,697 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * acpi_bus.h - ACPI Bus Driver ($Revision: 22 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + */ + +#ifndef __ACPI_BUS_H__ +#define __ACPI_BUS_H__ + +#include +#include + +/* TBD: Make dynamic */ +#define ACPI_MAX_HANDLES 10 +struct acpi_handle_list { + u32 count; + acpi_handle handles[ACPI_MAX_HANDLES]; +}; + +/* acpi_utils.h */ +acpi_status +acpi_extract_package(union acpi_object *package, + struct acpi_buffer *format, struct acpi_buffer *buffer); +acpi_status +acpi_evaluate_integer(acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *arguments, unsigned long long *data); +acpi_status +acpi_evaluate_reference(acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *arguments, + struct acpi_handle_list *list); +acpi_status +acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code, + struct acpi_buffer *status_buf); + +acpi_status +acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld); + +bool acpi_has_method(acpi_handle handle, char *name); +acpi_status acpi_execute_simple_method(acpi_handle handle, char *method, + u64 arg); +acpi_status acpi_evaluate_ej0(acpi_handle handle); +acpi_status acpi_evaluate_lck(acpi_handle handle, int lock); +bool acpi_ata_match(acpi_handle handle); +bool acpi_bay_match(acpi_handle handle); +bool acpi_dock_match(acpi_handle handle); + +bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs); +union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, + u64 rev, u64 func, union acpi_object *argv4); + +static inline union acpi_object * +acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev, + u64 func, union acpi_object *argv4, + acpi_object_type type) +{ + union acpi_object *obj; + + obj = acpi_evaluate_dsm(handle, guid, rev, func, argv4); + if (obj && obj->type != type) { + ACPI_FREE(obj); + obj = NULL; + } + + return obj; +} + +#define ACPI_INIT_DSM_ARGV4(cnt, eles) \ + { \ + .package.type = ACPI_TYPE_PACKAGE, \ + .package.count = (cnt), \ + .package.elements = (eles) \ + } + +bool acpi_dev_found(const char *hid); +bool acpi_dev_present(const char *hid, const char *uid, s64 hrv); + +#ifdef CONFIG_ACPI + +#include + +#define ACPI_BUS_FILE_ROOT "acpi" +extern struct proc_dir_entry *acpi_root_dir; + +enum acpi_bus_device_type { + ACPI_BUS_TYPE_DEVICE = 0, + ACPI_BUS_TYPE_POWER, + ACPI_BUS_TYPE_PROCESSOR, + ACPI_BUS_TYPE_THERMAL, + ACPI_BUS_TYPE_POWER_BUTTON, + ACPI_BUS_TYPE_SLEEP_BUTTON, + ACPI_BUS_TYPE_ECDT_EC, + ACPI_BUS_DEVICE_TYPE_COUNT +}; + +struct acpi_driver; +struct acpi_device; + +/* + * ACPI Scan Handler + * ----------------- + */ + +struct acpi_hotplug_profile { + struct kobject kobj; + int (*scan_dependent)(struct acpi_device *adev); + void (*notify_online)(struct acpi_device *adev); + bool enabled:1; + bool demand_offline:1; +}; + +static inline struct acpi_hotplug_profile *to_acpi_hotplug_profile( + struct kobject *kobj) +{ + return container_of(kobj, struct acpi_hotplug_profile, kobj); +} + +struct acpi_scan_handler { + const struct acpi_device_id *ids; + struct list_head list_node; + bool (*match)(const char *idstr, const struct acpi_device_id **matchid); + int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id); + void (*detach)(struct acpi_device *dev); + void (*bind)(struct device *phys_dev); + void (*unbind)(struct device *phys_dev); + struct acpi_hotplug_profile hotplug; +}; + +/* + * ACPI Hotplug Context + * -------------------- + */ + +struct acpi_hotplug_context { + struct acpi_device *self; + int (*notify)(struct acpi_device *, u32); + void (*uevent)(struct acpi_device *, u32); + void (*fixup)(struct acpi_device *); +}; + +/* + * ACPI Driver + * ----------- + */ + +typedef int (*acpi_op_add) (struct acpi_device * device); +typedef int (*acpi_op_remove) (struct acpi_device * device); +typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event); + +struct acpi_device_ops { + acpi_op_add add; + acpi_op_remove remove; + acpi_op_notify notify; +}; + +#define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */ + +struct acpi_driver { + char name[80]; + char class[80]; + const struct acpi_device_id *ids; /* Supported Hardware IDs */ + unsigned int flags; + struct acpi_device_ops ops; + struct device_driver drv; + struct module *owner; +}; + +/* + * ACPI Device + * ----------- + */ + +/* Status (_STA) */ + +struct acpi_device_status { + u32 present:1; + u32 enabled:1; + u32 show_in_ui:1; + u32 functional:1; + u32 battery_present:1; + u32 reserved:27; +}; + +/* Flags */ + +struct acpi_device_flags { + u32 dynamic_status:1; + u32 removable:1; + u32 ejectable:1; + u32 power_manageable:1; + u32 match_driver:1; + u32 initialized:1; + u32 visited:1; + u32 hotplug_notify:1; + u32 is_dock_station:1; + u32 of_compatible_ok:1; + u32 coherent_dma:1; + u32 cca_seen:1; + u32 enumeration_by_parent:1; + u32 reserved:19; +}; + +/* File System */ + +struct acpi_device_dir { + struct proc_dir_entry *entry; +}; + +#define acpi_device_dir(d) ((d)->dir.entry) + +/* Plug and Play */ + +typedef char acpi_bus_id[8]; +typedef u64 acpi_bus_address; +typedef char acpi_device_name[40]; +typedef char acpi_device_class[20]; + +struct acpi_hardware_id { + struct list_head list; + const char *id; +}; + +struct acpi_pnp_type { + u32 hardware_id:1; + u32 bus_address:1; + u32 platform_id:1; + u32 reserved:29; +}; + +struct acpi_device_pnp { + acpi_bus_id bus_id; /* Object name */ + struct acpi_pnp_type type; /* ID type */ + acpi_bus_address bus_address; /* _ADR */ + char *unique_id; /* _UID */ + struct list_head ids; /* _HID and _CIDs */ + acpi_device_name device_name; /* Driver-determined */ + acpi_device_class device_class; /* " */ + union acpi_object *str_obj; /* unicode string for _STR method */ +}; + +#define acpi_device_bid(d) ((d)->pnp.bus_id) +#define acpi_device_adr(d) ((d)->pnp.bus_address) +const char *acpi_device_hid(struct acpi_device *device); +#define acpi_device_uid(d) ((d)->pnp.unique_id) +#define acpi_device_name(d) ((d)->pnp.device_name) +#define acpi_device_class(d) ((d)->pnp.device_class) + +/* Power Management */ + +struct acpi_device_power_flags { + u32 explicit_get:1; /* _PSC present? */ + u32 power_resources:1; /* Power resources */ + u32 inrush_current:1; /* Serialize Dx->D0 */ + u32 power_removed:1; /* Optimize Dx->D0 */ + u32 ignore_parent:1; /* Power is independent of parent power state */ + u32 dsw_present:1; /* _DSW present? */ + u32 reserved:26; +}; + +struct acpi_device_power_state { + struct { + u8 valid:1; + u8 explicit_set:1; /* _PSx present? */ + u8 reserved:6; + } flags; + int power; /* % Power (compared to D0) */ + int latency; /* Dx->D0 time (microseconds) */ + struct list_head resources; /* Power resources referenced */ +}; + +struct acpi_device_power { + int state; /* Current state */ + struct acpi_device_power_flags flags; + struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */ +}; + +/* Performance Management */ + +struct acpi_device_perf_flags { + u8 reserved:8; +}; + +struct acpi_device_perf_state { + struct { + u8 valid:1; + u8 reserved:7; + } flags; + u8 power; /* % Power (compared to P0) */ + u8 performance; /* % Performance ( " ) */ + int latency; /* Px->P0 time (microseconds) */ +}; + +struct acpi_device_perf { + int state; + struct acpi_device_perf_flags flags; + int state_count; + struct acpi_device_perf_state *states; +}; + +/* Wakeup Management */ +struct acpi_device_wakeup_flags { + u8 valid:1; /* Can successfully enable wakeup? */ + u8 notifier_present:1; /* Wake-up notify handler has been installed */ +}; + +struct acpi_device_wakeup_context { + void (*func)(struct acpi_device_wakeup_context *context); + struct device *dev; +}; + +struct acpi_device_wakeup { + acpi_handle gpe_device; + u64 gpe_number; + u64 sleep_state; + struct list_head resources; + struct acpi_device_wakeup_flags flags; + struct acpi_device_wakeup_context context; + struct wakeup_source *ws; + int prepare_count; + int enable_count; +}; + +struct acpi_device_physical_node { + unsigned int node_id; + struct list_head node; + struct device *dev; + bool put_online:1; +}; + +struct acpi_device_properties { + const guid_t *guid; + const union acpi_object *properties; + struct list_head list; +}; + +/* ACPI Device Specific Data (_DSD) */ +struct acpi_device_data { + const union acpi_object *pointer; + struct list_head properties; + const union acpi_object *of_compatible; + struct list_head subnodes; +}; + +struct acpi_gpio_mapping; + +/* Device */ +struct acpi_device { + int device_type; + acpi_handle handle; /* no handle for fixed hardware */ + struct fwnode_handle fwnode; + struct acpi_device *parent; + struct list_head children; + struct list_head node; + struct list_head wakeup_list; + struct list_head del_list; + struct acpi_device_status status; + struct acpi_device_flags flags; + struct acpi_device_pnp pnp; + struct acpi_device_power power; + struct acpi_device_wakeup wakeup; + struct acpi_device_perf performance; + struct acpi_device_dir dir; + struct acpi_device_data data; + struct acpi_scan_handler *handler; + struct acpi_hotplug_context *hp; + struct acpi_driver *driver; + const struct acpi_gpio_mapping *driver_gpios; + void *driver_data; + struct device dev; + unsigned int physical_node_count; + unsigned int dep_unmet; + struct list_head physical_node_list; + struct mutex physical_node_lock; + void (*remove)(struct acpi_device *); +}; + +/* Non-device subnode */ +struct acpi_data_node { + const char *name; + acpi_handle handle; + struct fwnode_handle fwnode; + struct fwnode_handle *parent; + struct acpi_device_data data; + struct list_head sibling; + struct kobject kobj; + struct completion kobj_done; +}; + +extern const struct fwnode_operations acpi_device_fwnode_ops; +extern const struct fwnode_operations acpi_data_fwnode_ops; +extern const struct fwnode_operations acpi_static_fwnode_ops; + +bool is_acpi_device_node(const struct fwnode_handle *fwnode); +bool is_acpi_data_node(const struct fwnode_handle *fwnode); + +static inline bool is_acpi_node(const struct fwnode_handle *fwnode) +{ + return (is_acpi_device_node(fwnode) || is_acpi_data_node(fwnode)); +} + +#define to_acpi_device_node(__fwnode) \ + ({ \ + typeof(__fwnode) __to_acpi_device_node_fwnode = __fwnode; \ + \ + is_acpi_device_node(__to_acpi_device_node_fwnode) ? \ + container_of(__to_acpi_device_node_fwnode, \ + struct acpi_device, fwnode) : \ + NULL; \ + }) + +#define to_acpi_data_node(__fwnode) \ + ({ \ + typeof(__fwnode) __to_acpi_data_node_fwnode = __fwnode; \ + \ + is_acpi_data_node(__to_acpi_data_node_fwnode) ? \ + container_of(__to_acpi_data_node_fwnode, \ + struct acpi_data_node, fwnode) : \ + NULL; \ + }) + +static inline bool is_acpi_static_node(const struct fwnode_handle *fwnode) +{ + return !IS_ERR_OR_NULL(fwnode) && + fwnode->ops == &acpi_static_fwnode_ops; +} + +static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode, + const char *name) +{ + return is_acpi_data_node(fwnode) ? + (!strcmp(to_acpi_data_node(fwnode)->name, name)) : false; +} + +static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) +{ + return &adev->fwnode; +} + +static inline void *acpi_driver_data(struct acpi_device *d) +{ + return d->driver_data; +} + +#define to_acpi_device(d) container_of(d, struct acpi_device, dev) +#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv) + +static inline void acpi_set_device_status(struct acpi_device *adev, u32 sta) +{ + *((u32 *)&adev->status) = sta; +} + +static inline void acpi_set_hp_context(struct acpi_device *adev, + struct acpi_hotplug_context *hp) +{ + hp->self = adev; + adev->hp = hp; +} + +void acpi_initialize_hp_context(struct acpi_device *adev, + struct acpi_hotplug_context *hp, + int (*notify)(struct acpi_device *, u32), + void (*uevent)(struct acpi_device *, u32)); + +/* acpi_device.dev.bus == &acpi_bus_type */ +extern struct bus_type acpi_bus_type; + +/* + * Events + * ------ + */ + +struct acpi_bus_event { + struct list_head node; + acpi_device_class device_class; + acpi_bus_id bus_id; + u32 type; + u32 data; +}; + +extern struct kobject *acpi_kobj; +extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); +void acpi_bus_private_data_handler(acpi_handle, void *); +int acpi_bus_get_private_data(acpi_handle, void **); +int acpi_bus_attach_private_data(acpi_handle, void *); +void acpi_bus_detach_private_data(acpi_handle); +extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); +extern int register_acpi_notifier(struct notifier_block *); +extern int unregister_acpi_notifier(struct notifier_block *); + +/* + * External Functions + */ + +int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); +struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle); +void acpi_bus_put_acpi_device(struct acpi_device *adev); +acpi_status acpi_bus_get_status_handle(acpi_handle handle, + unsigned long long *sta); +int acpi_bus_get_status(struct acpi_device *device); + +int acpi_bus_set_power(acpi_handle handle, int state); +const char *acpi_power_state_string(int state); +int acpi_device_set_power(struct acpi_device *device, int state); +int acpi_bus_init_power(struct acpi_device *device); +int acpi_device_fix_up_power(struct acpi_device *device); +int acpi_bus_update_power(acpi_handle handle, int *state_p); +int acpi_device_update_power(struct acpi_device *device, int *state_p); +bool acpi_bus_power_manageable(acpi_handle handle); +int acpi_device_power_add_dependent(struct acpi_device *adev, + struct device *dev); +void acpi_device_power_remove_dependent(struct acpi_device *adev, + struct device *dev); + +#ifdef CONFIG_PM +bool acpi_bus_can_wakeup(acpi_handle handle); +#else +static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; } +#endif + +void acpi_scan_lock_acquire(void); +void acpi_scan_lock_release(void); +void acpi_lock_hp_context(void); +void acpi_unlock_hp_context(void); +int acpi_scan_add_handler(struct acpi_scan_handler *handler); +int acpi_bus_register_driver(struct acpi_driver *driver); +void acpi_bus_unregister_driver(struct acpi_driver *driver); +int acpi_bus_scan(acpi_handle handle); +void acpi_bus_trim(struct acpi_device *start); +acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd); +int acpi_match_device_ids(struct acpi_device *device, + const struct acpi_device_id *ids); +void acpi_set_modalias(struct acpi_device *adev, const char *default_id, + char *modalias, size_t len); +int acpi_create_dir(struct acpi_device *); +void acpi_remove_dir(struct acpi_device *); + +static inline bool acpi_device_enumerated(struct acpi_device *adev) +{ + return adev && adev->flags.initialized && adev->flags.visited; +} + +/** + * module_acpi_driver(acpi_driver) - Helper macro for registering an ACPI driver + * @__acpi_driver: acpi_driver struct + * + * Helper macro for ACPI drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_acpi_driver(__acpi_driver) \ + module_driver(__acpi_driver, acpi_bus_register_driver, \ + acpi_bus_unregister_driver) + +/* + * Bind physical devices with ACPI devices + */ +struct acpi_bus_type { + struct list_head list; + const char *name; + bool (*match)(struct device *dev); + struct acpi_device * (*find_companion)(struct device *); + void (*setup)(struct device *); + void (*cleanup)(struct device *); +}; +int register_acpi_bus_type(struct acpi_bus_type *); +int unregister_acpi_bus_type(struct acpi_bus_type *); +int acpi_bind_one(struct device *dev, struct acpi_device *adev); +int acpi_unbind_one(struct device *dev); + +struct acpi_pci_root { + struct acpi_device * device; + struct pci_bus *bus; + u16 segment; + struct resource secondary; /* downstream bus range */ + + u32 osc_support_set; /* _OSC state of support bits */ + u32 osc_control_set; /* _OSC state of control bits */ + phys_addr_t mcfg_addr; +}; + +/* helper */ + +bool acpi_dma_supported(struct acpi_device *adev); +enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); +int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, + u64 *size); +int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr); + +struct acpi_device *acpi_find_child_device(struct acpi_device *parent, + u64 address, bool check_children); +int acpi_is_root_bridge(acpi_handle); +struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); + +int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); +int acpi_disable_wakeup_device_power(struct acpi_device *dev); + +#ifdef CONFIG_X86 +bool acpi_device_always_present(struct acpi_device *adev); +#else +static inline bool acpi_device_always_present(struct acpi_device *adev) +{ + return false; +} +#endif + +#ifdef CONFIG_PM +void acpi_pm_wakeup_event(struct device *dev); +acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, + void (*func)(struct acpi_device_wakeup_context *context)); +acpi_status acpi_remove_pm_notifier(struct acpi_device *adev); +bool acpi_pm_device_can_wakeup(struct device *dev); +int acpi_pm_device_sleep_state(struct device *, int *, int); +int acpi_pm_set_device_wakeup(struct device *dev, bool enable); +int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable); +#else +static inline void acpi_pm_wakeup_event(struct device *dev) +{ +} +static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev, + struct device *dev, + void (*func)(struct acpi_device_wakeup_context *context)) +{ + return AE_SUPPORT; +} +static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev) +{ + return AE_SUPPORT; +} +static inline bool acpi_pm_device_can_wakeup(struct device *dev) +{ + return false; +} +static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m) +{ + if (p) + *p = ACPI_STATE_D0; + + return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ? + m : ACPI_STATE_D0; +} +static inline int acpi_pm_set_device_wakeup(struct device *dev, bool enable) +{ + return -ENODEV; +} +static inline int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable) +{ + return -ENODEV; +} +#endif + +#ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT +bool acpi_sleep_state_supported(u8 sleep_state); +#else +static inline bool acpi_sleep_state_supported(u8 sleep_state) { return false; } +#endif + +#ifdef CONFIG_ACPI_SLEEP +u32 acpi_target_system_state(void); +#else +static inline u32 acpi_target_system_state(void) { return ACPI_STATE_S0; } +#endif + +static inline bool acpi_device_power_manageable(struct acpi_device *adev) +{ + return adev->flags.power_manageable; +} + +static inline bool acpi_device_can_wakeup(struct acpi_device *adev) +{ + return adev->wakeup.flags.valid; +} + +static inline bool acpi_device_can_poweroff(struct acpi_device *adev) +{ + return adev->power.states[ACPI_STATE_D3_COLD].flags.valid || + ((acpi_gbl_FADT.header.revision < 6) && + adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set); +} + +struct acpi_device * +acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv); + +static inline void acpi_dev_put(struct acpi_device *adev) +{ + put_device(&adev->dev); +} +#else /* CONFIG_ACPI */ + +static inline int register_acpi_bus_type(void *bus) { return 0; } +static inline int unregister_acpi_bus_type(void *bus) { return 0; } + +#endif /* CONFIG_ACPI */ + +#endif /*__ACPI_BUS_H__*/ diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h new file mode 100644 index 0000000..5eb1759 --- /dev/null +++ b/include/acpi/acpi_drivers.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * acpi_drivers.h ($Revision: 31 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + */ + +#ifndef __ACPI_DRIVERS_H__ +#define __ACPI_DRIVERS_H__ + +#define ACPI_MAX_STRING 80 + +/* + * Please update drivers/acpi/debug.c and Documentation/firmware-guide/acpi/debug.rst + * if you add to this list. + */ +#define ACPI_BUS_COMPONENT 0x00010000 +#define ACPI_AC_COMPONENT 0x00020000 +#define ACPI_BATTERY_COMPONENT 0x00040000 +#define ACPI_BUTTON_COMPONENT 0x00080000 +#define ACPI_SBS_COMPONENT 0x00100000 +#define ACPI_FAN_COMPONENT 0x00200000 +#define ACPI_PCI_COMPONENT 0x00400000 +#define ACPI_POWER_COMPONENT 0x00800000 +#define ACPI_CONTAINER_COMPONENT 0x01000000 +#define ACPI_SYSTEM_COMPONENT 0x02000000 +#define ACPI_THERMAL_COMPONENT 0x04000000 +#define ACPI_MEMORY_DEVICE_COMPONENT 0x08000000 +#define ACPI_VIDEO_COMPONENT 0x10000000 +#define ACPI_PROCESSOR_COMPONENT 0x20000000 + +/* + * _HID definitions + * HIDs must conform to ACPI spec(6.1.4) + * Linux specific HIDs do not apply to this and begin with LNX: + */ + +#define ACPI_POWER_HID "LNXPOWER" +#define ACPI_PROCESSOR_OBJECT_HID "LNXCPU" +#define ACPI_SYSTEM_HID "LNXSYSTM" +#define ACPI_THERMAL_HID "LNXTHERM" +#define ACPI_BUTTON_HID_POWERF "LNXPWRBN" +#define ACPI_BUTTON_HID_SLEEPF "LNXSLPBN" +#define ACPI_VIDEO_HID "LNXVIDEO" +#define ACPI_BAY_HID "LNXIOBAY" +#define ACPI_DOCK_HID "LNXDOCK" +#define ACPI_ECDT_HID "LNXEC" +/* Quirk for broken IBM BIOSes */ +#define ACPI_SMBUS_IBM_HID "SMBUSIBM" + +/* + * For fixed hardware buttons, we fabricate acpi_devices with HID + * ACPI_BUTTON_HID_POWERF or ACPI_BUTTON_HID_SLEEPF. Fixed hardware + * signals only an event; it doesn't supply a notification value. + * To allow drivers to treat notifications from fixed hardware the + * same as those from real devices, we turn the events into this + * notification value. + */ +#define ACPI_FIXED_HARDWARE_EVENT 0x100 + +/* -------------------------------------------------------------------------- + PCI + -------------------------------------------------------------------------- */ + + +/* ACPI PCI Interrupt Link (pci_link.c) */ + +int acpi_irq_penalty_init(void); +int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, + int *polarity, char **name); +int acpi_pci_link_free_irq(acpi_handle handle); + +/* ACPI PCI Device Binding (pci_bind.c) */ + +struct pci_bus; + +#ifdef CONFIG_PCI +struct pci_dev *acpi_get_pci_dev(acpi_handle); +#else +static inline struct pci_dev *acpi_get_pci_dev(acpi_handle handle) +{ + return NULL; +} +#endif + +/* Arch-defined function to add a bus to the system */ + +struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root); + +#ifdef CONFIG_X86 +void pci_acpi_crs_quirks(void); +#else +static inline void pci_acpi_crs_quirks(void) { } +#endif + +/* -------------------------------------------------------------------------- + Processor + -------------------------------------------------------------------------- */ + +#define ACPI_PROCESSOR_LIMIT_NONE 0x00 +#define ACPI_PROCESSOR_LIMIT_INCREMENT 0x01 +#define ACPI_PROCESSOR_LIMIT_DECREMENT 0x02 + +/*-------------------------------------------------------------------------- + Dock Station + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_DOCK +extern int is_dock_device(struct acpi_device *adev); +#else +static inline int is_dock_device(struct acpi_device *adev) +{ + return 0; +} +#endif /* CONFIG_ACPI_DOCK */ + +#endif /*__ACPI_DRIVERS_H__*/ diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h new file mode 100644 index 0000000..12d8bd3 --- /dev/null +++ b/include/acpi/acpi_io.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ACPI_IO_H_ +#define _ACPI_IO_H_ + +#include + +#include + +#ifndef acpi_os_ioremap +static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, + acpi_size size) +{ + return ioremap_cache(phys, size); +} +#endif + +extern bool acpi_permanent_mmap; + +void __iomem __ref +*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size); +void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size); +void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size); + +int acpi_os_map_generic_address(struct acpi_generic_address *addr); +void acpi_os_unmap_generic_address(struct acpi_generic_address *addr); + +#endif diff --git a/include/acpi/acpi_lpat.h b/include/acpi/acpi_lpat.h new file mode 100644 index 0000000..72d6264 --- /dev/null +++ b/include/acpi/acpi_lpat.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * acpi_lpat.h - LPAT table processing functions + * + * Copyright (C) 2015 Intel Corporation. All rights reserved. + */ + +#ifndef ACPI_LPAT_H +#define ACPI_LPAT_H + +struct acpi_lpat { + int temp; + int raw; +}; + +struct acpi_lpat_conversion_table { + struct acpi_lpat *lpat; + int lpat_count; +}; + +#ifdef CONFIG_ACPI + +int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table, + int raw); +int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table, + int temp); +struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle + handle); +void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table + *lpat_table); + +#else +static int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table, + int raw) +{ + return 0; +} + +static int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table, + int temp) +{ + return 0; +} + +static struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table( + acpi_handle handle) +{ + return NULL; +} + +static void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table + *lpat_table) +{ +} + +#endif +#endif diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h new file mode 100644 index 0000000..fdebcfc --- /dev/null +++ b/include/acpi/acpi_numa.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ACPI_NUMA_H +#define __ACPI_NUMA_H + +#ifdef CONFIG_ACPI_NUMA +#include +#include + +/* Proximity bitmap length */ +#if MAX_NUMNODES > 256 +#define MAX_PXM_DOMAINS MAX_NUMNODES +#else +#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */ +#endif + +extern int pxm_to_node(int); +extern int node_to_pxm(int); +extern int acpi_map_pxm_to_node(int); +extern unsigned char acpi_srat_revision; +extern int acpi_numa __initdata; + +extern void bad_srat(void); +extern int srat_disabled(void); + +#endif /* CONFIG_ACPI_NUMA */ +#endif /* __ACP_NUMA_H */ diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h new file mode 100644 index 0000000..2e63b7b --- /dev/null +++ b/include/acpi/acpiosxf.h @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These + * interfaces must be implemented by OSL to interface the + * ACPI components to the host operating system. + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACPIOSXF_H__ +#define __ACPIOSXF_H__ + +#include +#include + +/* Types for acpi_os_execute */ + +typedef enum { + OSL_GLOBAL_LOCK_HANDLER, + OSL_NOTIFY_HANDLER, + OSL_GPE_HANDLER, + OSL_DEBUGGER_MAIN_THREAD, + OSL_DEBUGGER_EXEC_THREAD, + OSL_EC_POLL_HANDLER, + OSL_EC_BURST_HANDLER +} acpi_execute_type; + +#define ACPI_NO_UNIT_LIMIT ((u32) -1) +#define ACPI_MUTEX_SEM 1 + +/* Functions for acpi_os_signal */ + +#define ACPI_SIGNAL_FATAL 0 +#define ACPI_SIGNAL_BREAKPOINT 1 + +struct acpi_signal_fatal_info { + u32 type; + u32 code; + u32 argument; +}; + +/* + * OSL Initialization and shutdown primitives + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize +acpi_status acpi_os_initialize(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate +acpi_status acpi_os_terminate(void); +#endif + +/* + * ACPI Table interfaces + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_root_pointer +acpi_physical_address acpi_os_get_root_pointer(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override +acpi_status +acpi_os_predefined_override(const struct acpi_predefined_names *init_val, + acpi_string *new_val); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override +acpi_status +acpi_os_table_override(struct acpi_table_header *existing_table, + struct acpi_table_header **new_table); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_physical_table_override +acpi_status +acpi_os_physical_table_override(struct acpi_table_header *existing_table, + acpi_physical_address *new_address, + u32 *new_table_length); +#endif + +/* + * Spinlock primitives + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock +acpi_status acpi_os_create_lock(acpi_spinlock * out_handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_lock +void acpi_os_delete_lock(acpi_spinlock handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_lock +acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_lock +void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags); +#endif + +/* + * RAW spinlock primitives. If the OS does not provide them, fallback to + * spinlock primitives + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_raw_lock +# define acpi_os_create_raw_lock(out_handle) acpi_os_create_lock(out_handle) +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_raw_lock +# define acpi_os_delete_raw_lock(handle) acpi_os_delete_lock(handle) +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_raw_lock +# define acpi_os_acquire_raw_lock(handle) acpi_os_acquire_lock(handle) +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_raw_lock +# define acpi_os_release_raw_lock(handle, flags) \ + acpi_os_release_lock(handle, flags) +#endif + +/* + * Semaphore primitives + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_semaphore +acpi_status +acpi_os_create_semaphore(u32 max_units, + u32 initial_units, acpi_semaphore * out_handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_semaphore +acpi_status acpi_os_delete_semaphore(acpi_semaphore handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_semaphore +acpi_status +acpi_os_wait_semaphore(acpi_semaphore handle, u32 units, u16 timeout); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_signal_semaphore +acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units); +#endif + +/* + * Mutex primitives. May be configured to use semaphores instead via + * ACPI_MUTEX_TYPE (see platform/acenv.h) + */ +#if (ACPI_MUTEX_TYPE != ACPI_BINARY_SEMAPHORE) + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_mutex +acpi_status acpi_os_create_mutex(acpi_mutex * out_handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_mutex +void acpi_os_delete_mutex(acpi_mutex handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_mutex +acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_mutex +void acpi_os_release_mutex(acpi_mutex handle); +#endif + +#endif + +/* + * Memory allocation and mapping + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate +void *acpi_os_allocate(acpi_size size); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate_zeroed +void *acpi_os_allocate_zeroed(acpi_size size); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_free +void acpi_os_free(void *memory); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_map_memory +void *acpi_os_map_memory(acpi_physical_address where, acpi_size length); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_unmap_memory +void acpi_os_unmap_memory(void *logical_address, acpi_size size); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_physical_address +acpi_status +acpi_os_get_physical_address(void *logical_address, + acpi_physical_address *physical_address); +#endif + +/* + * Memory/Object Cache + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_cache +acpi_status +acpi_os_create_cache(char *cache_name, + u16 object_size, + u16 max_depth, acpi_cache_t ** return_cache); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_cache +acpi_status acpi_os_delete_cache(acpi_cache_t * cache); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_purge_cache +acpi_status acpi_os_purge_cache(acpi_cache_t * cache); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_object +void *acpi_os_acquire_object(acpi_cache_t * cache); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_object +acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object); +#endif + +/* + * Interrupt handlers + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_install_interrupt_handler +acpi_status +acpi_os_install_interrupt_handler(u32 interrupt_number, + acpi_osd_handler service_routine, + void *context); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_remove_interrupt_handler +acpi_status +acpi_os_remove_interrupt_handler(u32 interrupt_number, + acpi_osd_handler service_routine); +#endif + +/* + * Threads and Scheduling + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id +acpi_thread_id acpi_os_get_thread_id(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_execute +acpi_status +acpi_os_execute(acpi_execute_type type, + acpi_osd_exec_callback function, void *context); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_events_complete +void acpi_os_wait_events_complete(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_sleep +void acpi_os_sleep(u64 milliseconds); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_stall +void acpi_os_stall(u32 microseconds); +#endif + +/* + * Platform and hardware-independent I/O interfaces + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_port +acpi_status acpi_os_read_port(acpi_io_address address, u32 *value, u32 width); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_port +acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width); +#endif + +/* + * Platform and hardware-independent physical memory interfaces + */ +int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width); + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_memory +acpi_status +acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_memory +acpi_status +acpi_os_write_memory(acpi_physical_address address, u64 value, u32 width); +#endif + +/* + * Platform and hardware-independent PCI configuration space access + * Note: Can't use "Register" as a parameter, changed to "Reg" -- + * certain compilers complain. + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_pci_configuration +acpi_status +acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id, + u32 reg, u64 *value, u32 width); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_pci_configuration +acpi_status +acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id, + u32 reg, u64 value, u32 width); +#endif + +/* + * Miscellaneous + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_readable +u8 acpi_os_readable(void *pointer, acpi_size length); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_writable +u8 acpi_os_writable(void *pointer, acpi_size length); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_timer +u64 acpi_os_get_timer(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_signal +acpi_status acpi_os_signal(u32 function, void *info); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_enter_sleep +acpi_status acpi_os_enter_sleep(u8 sleep_state, u32 rega_value, u32 regb_value); +#endif + +/* + * Debug print routines + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_printf +ACPI_PRINTF_LIKE(1) +void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *format, ...); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_vprintf +void acpi_os_vprintf(const char *format, va_list args); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_redirect_output +void acpi_os_redirect_output(void *destination); +#endif + +/* + * Debug IO + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_line +acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_debugger +acpi_status acpi_os_initialize_debugger(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_debugger +void acpi_os_terminate_debugger(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_command_ready +acpi_status acpi_os_wait_command_ready(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_notify_command_complete +acpi_status acpi_os_notify_command_complete(void); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point +void +acpi_os_trace_point(acpi_trace_event_type type, + u8 begin, u8 *aml, char *pathname); +#endif + +/* + * Obtain ACPI table(s) + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_name +acpi_status +acpi_os_get_table_by_name(char *signature, + u32 instance, + struct acpi_table_header **table, + acpi_physical_address *address); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index +acpi_status +acpi_os_get_table_by_index(u32 index, + struct acpi_table_header **table, + u32 *instance, acpi_physical_address *address); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address +acpi_status +acpi_os_get_table_by_address(acpi_physical_address address, + struct acpi_table_header **table); +#endif + +/* + * Directory manipulation + */ +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_directory +void *acpi_os_open_directory(char *pathname, + char *wildcard_spec, char requested_file_type); +#endif + +/* requeste_file_type values */ + +#define REQUEST_FILE_ONLY 0 +#define REQUEST_DIR_ONLY 1 + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename +char *acpi_os_get_next_filename(void *dir_handle); +#endif + +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory +void acpi_os_close_directory(void *dir_handle); +#endif + +#endif /* __ACPIOSXF_H__ */ diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h new file mode 100644 index 0000000..00441e2 --- /dev/null +++ b/include/acpi/acpixf.h @@ -0,0 +1,960 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acpixf.h - External interfaces to the ACPI subsystem + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACXFACE_H__ +#define __ACXFACE_H__ + +/* Current ACPICA subsystem version in YYYYMMDD format */ + +#define ACPI_CA_VERSION 0x20190816 + +#include +#include +#include +#include + +/***************************************************************************** + * + * Macros used for ACPICA globals and configuration + * + ****************************************************************************/ + +/* + * Ensure that global variables are defined and initialized only once. + * + * The use of these macros allows for a single list of globals (here) + * in order to simplify maintenance of the code. + */ +#ifdef DEFINE_ACPI_GLOBALS +#define ACPI_GLOBAL(type,name) \ + extern type name; \ + type name + +#define ACPI_INIT_GLOBAL(type,name,value) \ + type name=value + +#else +#ifndef ACPI_GLOBAL +#define ACPI_GLOBAL(type,name) \ + extern type name +#endif + +#ifndef ACPI_INIT_GLOBAL +#define ACPI_INIT_GLOBAL(type,name,value) \ + extern type name +#endif +#endif + +/* + * These macros configure the various ACPICA interfaces. They are + * useful for generating stub inline functions for features that are + * configured out of the current kernel or ACPICA application. + */ +#ifndef ACPI_EXTERNAL_RETURN_STATUS +#define ACPI_EXTERNAL_RETURN_STATUS(prototype) \ + prototype; +#endif + +#ifndef ACPI_EXTERNAL_RETURN_OK +#define ACPI_EXTERNAL_RETURN_OK(prototype) \ + prototype; +#endif + +#ifndef ACPI_EXTERNAL_RETURN_VOID +#define ACPI_EXTERNAL_RETURN_VOID(prototype) \ + prototype; +#endif + +#ifndef ACPI_EXTERNAL_RETURN_UINT32 +#define ACPI_EXTERNAL_RETURN_UINT32(prototype) \ + prototype; +#endif + +#ifndef ACPI_EXTERNAL_RETURN_PTR +#define ACPI_EXTERNAL_RETURN_PTR(prototype) \ + prototype; +#endif + +/***************************************************************************** + * + * Public globals and runtime configuration options + * + ****************************************************************************/ + +/* + * Enable "slack mode" of the AML interpreter? Default is FALSE, and the + * interpreter strictly follows the ACPI specification. Setting to TRUE + * allows the interpreter to ignore certain errors and/or bad AML constructs. + * + * Currently, these features are enabled by this flag: + * + * 1) Allow "implicit return" of last value in a control method + * 2) Allow access beyond the end of an operation region + * 3) Allow access to uninitialized locals/args (auto-init to integer 0) + * 4) Allow ANY object type to be a source operand for the Store() operator + * 5) Allow unresolved references (invalid target name) in package objects + * 6) Enable warning messages for behavior that is not ACPI spec compliant + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_interpreter_slack, FALSE); + +/* + * Automatically serialize all methods that create named objects? Default + * is TRUE, meaning that all non_serialized methods are scanned once at + * table load time to determine those that create named objects. Methods + * that create named objects are marked Serialized in order to prevent + * possible run-time problems if they are entered by more than one thread. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_auto_serialize_methods, TRUE); + +/* + * Create the predefined _OSI method in the namespace? Default is TRUE + * because ACPICA is fully compatible with other ACPI implementations. + * Changing this will revert ACPICA (and machine ASL) to pre-OSI behavior. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_create_osi_method, TRUE); + +/* + * Optionally use default values for the ACPI register widths. Set this to + * TRUE to use the defaults, if an FADT contains incorrect widths/lengths. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_use_default_register_widths, TRUE); + +/* + * Whether or not to validate (map) an entire table to verify + * checksum/duplication in early stage before install. Set this to TRUE to + * allow early table validation before install it to the table manager. + * Note that enabling this option causes errors to happen in some OSPMs + * during early initialization stages. Default behavior is to allow such + * validation. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_table_validation, TRUE); + +/* + * Optionally enable output from the AML Debug Object. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_aml_debug_object, FALSE); + +/* + * Optionally copy the entire DSDT to local memory (instead of simply + * mapping it.) There are some BIOSs that corrupt or replace the original + * DSDT, creating the need for this option. Default is FALSE, do not copy + * the DSDT. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE); + +/* + * Optionally ignore an XSDT if present and use the RSDT instead. + * Although the ACPI specification requires that an XSDT be used instead + * of the RSDT, the XSDT has been found to be corrupt or ill-formed on + * some machines. Default behavior is to use the XSDT if present. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE); + +/* + * Optionally use 32-bit FADT addresses if and when there is a conflict + * (address mismatch) between the 32-bit and 64-bit versions of the + * address. Although ACPICA adheres to the ACPI specification which + * requires the use of the corresponding 64-bit address if it is non-zero, + * some machines have been found to have a corrupted non-zero 64-bit + * address. Default is FALSE, do not favor the 32-bit addresses. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE); + +/* + * Optionally use 32-bit FACS table addresses. + * It is reported that some platforms fail to resume from system suspending + * if 64-bit FACS table address is selected: + * https://bugzilla.kernel.org/show_bug.cgi?id=74021 + * Default is TRUE, favor the 32-bit addresses. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_facs_addresses, TRUE); + +/* + * Optionally truncate I/O addresses to 16 bits. Provides compatibility + * with other ACPI implementations. NOTE: During ACPICA initialization, + * this value is set to TRUE if any Windows OSI strings have been + * requested by the BIOS. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_truncate_io_addresses, FALSE); + +/* + * Disable runtime checking and repair of values returned by control methods. + * Use only if the repair is causing a problem on a particular machine. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_auto_repair, FALSE); + +/* + * Optionally do not install any SSDTs from the RSDT/XSDT during initialization. + * This can be useful for debugging ACPI problems on some machines. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_ssdt_table_install, FALSE); + +/* + * Optionally enable runtime namespace override. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_runtime_namespace_override, TRUE); + +/* + * We keep track of the latest version of Windows that has been requested by + * the BIOS. ACPI 5.0. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0); + +/* + * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning + * that the ACPI hardware is no longer required. A flag in the FADT indicates + * a reduced HW machine, and that flag is duplicated here for convenience. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE); + +/* + * Maximum timeout for While() loop iterations before forced method abort. + * This mechanism is intended to prevent infinite loops during interpreter + * execution within a host kernel. + */ +ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_TIMEOUT); + +/* + * Optionally ignore AE_NOT_FOUND errors from named reference package elements + * during DSDT/SSDT table loading. This reduces error "noise" in platforms + * whose firmware is carrying around a bunch of unused package objects that + * refer to non-existent named objects. However, If the AML actually tries to + * use such a package, the unresolved element(s) will be replaced with NULL + * elements. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_package_resolution_errors, FALSE); + +/* + * This mechanism is used to trace a specified AML method. The method is + * traced each time it is executed. + */ +ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_flags, 0); +ACPI_INIT_GLOBAL(const char *, acpi_gbl_trace_method_name, NULL); +ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_level, ACPI_TRACE_LEVEL_DEFAULT); +ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_layer, ACPI_TRACE_LAYER_DEFAULT); + +/* + * Runtime configuration of debug output control masks. We want the debug + * switches statically initialized so they are already set when the debugger + * is entered. + */ +ACPI_INIT_GLOBAL(u32, acpi_dbg_level, ACPI_DEBUG_DEFAULT); +ACPI_INIT_GLOBAL(u32, acpi_dbg_layer, 0); + +/* Optionally enable timer output with Debug Object output */ + +ACPI_INIT_GLOBAL(u8, acpi_gbl_display_debug_timer, FALSE); + +/* + * Debugger command handshake globals. Host OSes need to access these + * variables to implement their own command handshake mechanism. + */ +#ifdef ACPI_DEBUGGER +ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE); +ACPI_GLOBAL(char, acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]); +#endif + +/* + * Other miscellaneous globals + */ +ACPI_GLOBAL(struct acpi_table_fadt, acpi_gbl_FADT); +ACPI_GLOBAL(u32, acpi_current_gpe_count); +ACPI_GLOBAL(u8, acpi_gbl_system_awake_and_running); + +/***************************************************************************** + * + * ACPICA public interface configuration. + * + * Interfaces that are configured out of the ACPICA build are replaced + * by inlined stubs by default. + * + ****************************************************************************/ + +/* + * Hardware-reduced prototypes (default: Not hardware reduced). + * + * All ACPICA hardware-related interfaces that use these macros will be + * configured out of the ACPICA build if the ACPI_REDUCED_HARDWARE flag + * is set to TRUE. + * + * Note: This static build option for reduced hardware is intended to + * reduce ACPICA code size if desired or necessary. However, even if this + * option is not specified, the runtime behavior of ACPICA is dependent + * on the actual FADT reduced hardware flag (HW_REDUCED_ACPI). If set, + * the flag will enable similar behavior -- ACPICA will not attempt + * to access any ACPI-relate hardware (SCI, GPEs, Fixed Events, etc.) + */ +#if (!ACPI_REDUCED_HARDWARE) +#define ACPI_HW_DEPENDENT_RETURN_STATUS(prototype) \ + ACPI_EXTERNAL_RETURN_STATUS(prototype) + +#define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \ + ACPI_EXTERNAL_RETURN_OK(prototype) + +#define ACPI_HW_DEPENDENT_RETURN_UINT32(prototype) \ + ACPI_EXTERNAL_RETURN_UINT32(prototype) + +#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \ + ACPI_EXTERNAL_RETURN_VOID(prototype) + +#else +#define ACPI_HW_DEPENDENT_RETURN_STATUS(prototype) \ + static ACPI_INLINE prototype {return(AE_NOT_CONFIGURED);} + +#define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \ + static ACPI_INLINE prototype {return(AE_OK);} + +#define ACPI_HW_DEPENDENT_RETURN_UINT32(prototype) \ + static ACPI_INLINE prototype {return(0);} + +#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} + +#endif /* !ACPI_REDUCED_HARDWARE */ + +/* + * Error message prototypes (default: error messages enabled). + * + * All interfaces related to error and warning messages + * will be configured out of the ACPICA build if the + * ACPI_NO_ERROR_MESSAGE flag is defined. + */ +#ifndef ACPI_NO_ERROR_MESSAGES +#define ACPI_MSG_DEPENDENT_RETURN_VOID(prototype) \ + prototype; + +#else +#define ACPI_MSG_DEPENDENT_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} + +#endif /* ACPI_NO_ERROR_MESSAGES */ + +/* + * Debugging output prototypes (default: no debug output). + * + * All interfaces related to debug output messages + * will be configured out of the ACPICA build unless the + * ACPI_DEBUG_OUTPUT flag is defined. + */ +#ifdef ACPI_DEBUG_OUTPUT +#define ACPI_DBG_DEPENDENT_RETURN_VOID(prototype) \ + prototype; + +#else +#define ACPI_DBG_DEPENDENT_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} + +#endif /* ACPI_DEBUG_OUTPUT */ + +/* + * Application prototypes + * + * All interfaces used by application will be configured + * out of the ACPICA build unless the ACPI_APPLICATION + * flag is defined. + */ +#ifdef ACPI_APPLICATION +#define ACPI_APP_DEPENDENT_RETURN_VOID(prototype) \ + prototype; + +#else +#define ACPI_APP_DEPENDENT_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} + +#endif /* ACPI_APPLICATION */ + +/* + * Debugger prototypes + * + * All interfaces used by debugger will be configured + * out of the ACPICA build unless the ACPI_DEBUGGER + * flag is defined. + */ +#ifdef ACPI_DEBUGGER +#define ACPI_DBR_DEPENDENT_RETURN_OK(prototype) \ + ACPI_EXTERNAL_RETURN_OK(prototype) + +#define ACPI_DBR_DEPENDENT_RETURN_VOID(prototype) \ + ACPI_EXTERNAL_RETURN_VOID(prototype) + +#else +#define ACPI_DBR_DEPENDENT_RETURN_OK(prototype) \ + static ACPI_INLINE prototype {return(AE_OK);} + +#define ACPI_DBR_DEPENDENT_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} + +#endif /* ACPI_DEBUGGER */ + +/***************************************************************************** + * + * ACPICA public interface prototypes + * + ****************************************************************************/ + +/* + * Initialization + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_initialize_tables(struct acpi_table_desc + *initial_storage, + u32 initial_table_count, + u8 allow_resize)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_initialize_subsystem(void)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_enable_subsystem(u32 flags)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_initialize_objects(u32 flags)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_terminate(void)) + +/* + * Miscellaneous global interfaces + */ +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable(void)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable(void)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_subsystem_status(void)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_system_info(struct acpi_buffer + *ret_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_statistics(struct acpi_statistics *stats)) +ACPI_EXTERNAL_RETURN_PTR(const char + *acpi_format_exception(acpi_status exception)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_purge_cached_objects(void)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_interface(acpi_string interface_name)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_remove_interface(acpi_string interface_name)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_update_interfaces(u8 action)) + +ACPI_EXTERNAL_RETURN_UINT32(u32 + acpi_check_address_range(acpi_adr_space_type + space_id, + acpi_physical_address + address, acpi_size length, + u8 warn)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_decode_pld_buffer(u8 *in_buffer, + acpi_size length, + struct acpi_pld_info + **return_buffer)) + +/* + * ACPI table load/unload interfaces + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_install_table(acpi_physical_address address, + u8 physical)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_load_table(struct acpi_table_header *table)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_unload_parent_table(acpi_handle object)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_load_tables(void)) + +/* + * ACPI table manipulation interfaces + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_reallocate_root_table(void)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_find_root_pointer(acpi_physical_address + *rsdp_address)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_table_header(acpi_string signature, + u32 instance, + struct acpi_table_header + *out_table_header)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_table(acpi_string signature, u32 instance, + struct acpi_table_header + **out_table)) +ACPI_EXTERNAL_RETURN_VOID(void acpi_put_table(struct acpi_table_header *table)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_table_by_index(u32 table_index, + struct acpi_table_header + **out_table)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_table_handler(acpi_table_handler + handler, void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_remove_table_handler(acpi_table_handler + handler)) + +/* + * Namespace and name interfaces + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_walk_namespace(acpi_object_type type, + acpi_handle start_object, + u32 max_depth, + acpi_walk_callback + descending_callback, + acpi_walk_callback + ascending_callback, + void *context, + void **return_value)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_devices(const char *HID, + acpi_walk_callback user_function, + void *context, + void **return_value)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_name(acpi_handle object, u32 name_type, + struct acpi_buffer *ret_path_ptr)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_handle(acpi_handle parent, + acpi_string pathname, + acpi_handle *ret_handle)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_attach_data(acpi_handle object, + acpi_object_handler handler, + void *data)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_detach_data(acpi_handle object, + acpi_object_handler handler)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_data(acpi_handle object, + acpi_object_handler handler, + void **data)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_debug_trace(const char *name, u32 debug_level, + u32 debug_layer, u32 flags)) + +/* + * Object manipulation and enumeration + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_evaluate_object(acpi_handle object, + acpi_string pathname, + struct acpi_object_list + *parameter_objects, + struct acpi_buffer + *return_object_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_evaluate_object_typed(acpi_handle object, + acpi_string pathname, + struct acpi_object_list + *external_params, + struct acpi_buffer + *return_buffer, + acpi_object_type + return_type)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_object_info(acpi_handle object, + struct acpi_device_info + **return_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_install_method(u8 *buffer)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_next_object(acpi_object_type type, + acpi_handle parent, + acpi_handle child, + acpi_handle *out_handle)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_type(acpi_handle object, + acpi_object_type *out_type)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_parent(acpi_handle object, + acpi_handle *out_handle)) + +/* + * Handler interfaces + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_initialization_handler + (acpi_init_handler handler, u32 function)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_sci_handler(acpi_sci_handler + address, + void *context)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_remove_sci_handler(acpi_sci_handler + address)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_global_event_handler + (acpi_gbl_event_handler handler, + void *context)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_fixed_event_handler(u32 + acpi_event, + acpi_event_handler + handler, + void + *context)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_remove_fixed_event_handler(u32 acpi_event, + acpi_event_handler + handler)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_gpe_handler(acpi_handle + gpe_device, + u32 gpe_number, + u32 type, + acpi_gpe_handler + address, + void *context)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_gpe_raw_handler(acpi_handle + gpe_device, + u32 gpe_number, + u32 type, + acpi_gpe_handler + address, + void *context)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_remove_gpe_handler(acpi_handle gpe_device, + u32 gpe_number, + acpi_gpe_handler + address)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_notify_handler(acpi_handle device, + u32 handler_type, + acpi_notify_handler + handler, + void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_remove_notify_handler(acpi_handle device, + u32 handler_type, + acpi_notify_handler + handler)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_address_space_handler(acpi_handle + device, + acpi_adr_space_type + space_id, + acpi_adr_space_handler + handler, + acpi_adr_space_setup + setup, + void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_remove_address_space_handler(acpi_handle + device, + acpi_adr_space_type + space_id, + acpi_adr_space_handler + handler)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_exception_handler + (acpi_exception_handler handler)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_interface_handler + (acpi_interface_handler handler)) + +/* + * Global Lock interfaces + */ +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_acquire_global_lock(u16 timeout, + u32 *handle)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_release_global_lock(u32 handle)) + +/* + * Interfaces to AML mutex objects + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_acquire_mutex(acpi_handle handle, + acpi_string pathname, + u16 timeout)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_release_mutex(acpi_handle handle, + acpi_string pathname)) + +/* + * Fixed Event interfaces + */ +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_enable_event(u32 event, u32 flags)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_disable_event(u32 event, u32 flags)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_clear_event(u32 event)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_get_event_status(u32 event, + acpi_event_status + *event_status)) + +/* + * General Purpose Event (GPE) Interfaces + */ +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_update_all_gpes(void)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_enable_gpe(acpi_handle gpe_device, + u32 gpe_number)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_disable_gpe(acpi_handle gpe_device, + u32 gpe_number)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_clear_gpe(acpi_handle gpe_device, + u32 gpe_number)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_set_gpe(acpi_handle gpe_device, + u32 gpe_number, u8 action)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_finish_gpe(acpi_handle gpe_device, + u32 gpe_number)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_mask_gpe(acpi_handle gpe_device, + u32 gpe_number, u8 is_masked)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_mark_gpe_for_wake(acpi_handle gpe_device, + u32 gpe_number)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_setup_gpe_for_wake(acpi_handle + parent_device, + acpi_handle gpe_device, + u32 gpe_number)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_set_gpe_wake_mask(acpi_handle gpe_device, + u32 gpe_number, + u8 action)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_get_gpe_status(acpi_handle gpe_device, + u32 gpe_number, + acpi_event_status + *event_status)) +ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) +ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(u32 gpe_skip_number)) +ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_get_gpe_device(u32 gpe_index, + acpi_handle *gpe_device)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_gpe_block(acpi_handle gpe_device, + struct + acpi_generic_address + *gpe_block_address, + u32 register_count, + u32 interrupt_number)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_remove_gpe_block(acpi_handle gpe_device)) + +/* + * Resource interfaces + */ +typedef +acpi_status (*acpi_walk_resource_callback) (struct acpi_resource * resource, + void *context); + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_vendor_resource(acpi_handle device, + char *name, + struct acpi_vendor_uuid + *uuid, + struct acpi_buffer + *ret_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_current_resources(acpi_handle device, + struct acpi_buffer + *ret_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_possible_resources(acpi_handle device, + struct acpi_buffer + *ret_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_event_resources(acpi_handle device_handle, + struct acpi_buffer + *ret_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_walk_resource_buffer(struct acpi_buffer + *buffer, + acpi_walk_resource_callback + user_function, + void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_walk_resources(acpi_handle device, char *name, + acpi_walk_resource_callback + user_function, void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_set_current_resources(acpi_handle device, + struct acpi_buffer + *in_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_irq_routing_table(acpi_handle device, + struct acpi_buffer + *ret_buffer)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_resource_to_address64(struct acpi_resource + *resource, + struct + acpi_resource_address64 + *out)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_buffer_to_resource(u8 *aml_buffer, + u16 aml_buffer_length, + struct acpi_resource + **resource_ptr)) + +/* + * Hardware (ACPI device) interfaces + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_reset(void)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_read(u64 *value, + struct acpi_generic_address *reg)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_write(u64 value, + struct acpi_generic_address *reg)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_read_bit_register(u32 register_id, + u32 *return_value)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_write_bit_register(u32 register_id, + u32 value)) + +/* + * Sleep/Wake interfaces + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_sleep_type_data(u8 sleep_state, + u8 *slp_typ_a, + u8 *slp_typ_b)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_enter_sleep_state_prep(u8 sleep_state)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_enter_sleep_state(u8 sleep_state)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enter_sleep_state_s4bios(void)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_leave_sleep_state_prep(u8 sleep_state)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_set_firmware_waking_vector + (acpi_physical_address physical_address, + acpi_physical_address physical_address64)) +/* + * ACPI Timer interfaces + */ +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_get_timer_resolution(u32 *resolution)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_get_timer(u32 *ticks)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_get_timer_duration(u32 start_ticks, + u32 end_ticks, + u32 *time_elapsed)) + +/* + * Error/Warning output + */ +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) + void ACPI_INTERNAL_VAR_XFACE + acpi_error(const char *module_name, + u32 line_number, + const char *format, ...)) +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(4) + void ACPI_INTERNAL_VAR_XFACE + acpi_exception(const char *module_name, + u32 line_number, + acpi_status status, + const char *format, ...)) +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) + void ACPI_INTERNAL_VAR_XFACE + acpi_warning(const char *module_name, + u32 line_number, + const char *format, ...)) +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1) + void ACPI_INTERNAL_VAR_XFACE + acpi_info(const char *format, ...)) +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) + void ACPI_INTERNAL_VAR_XFACE + acpi_bios_error(const char *module_name, + u32 line_number, + const char *format, ...)) +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(4) + void ACPI_INTERNAL_VAR_XFACE + acpi_bios_exception(const char *module_name, + u32 line_number, + acpi_status status, + const char *format, ...)) +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) + void ACPI_INTERNAL_VAR_XFACE + acpi_bios_warning(const char *module_name, + u32 line_number, + const char *format, ...)) + +/* + * Debug output + */ +ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6) + void ACPI_INTERNAL_VAR_XFACE + acpi_debug_print(u32 requested_debug_level, + u32 line_number, + const char *function_name, + const char *module_name, + u32 component_id, + const char *format, ...)) +ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6) + void ACPI_INTERNAL_VAR_XFACE + acpi_debug_print_raw(u32 requested_debug_level, + u32 line_number, + const char *function_name, + const char *module_name, + u32 component_id, + const char *format, ...)) + +ACPI_DBG_DEPENDENT_RETURN_VOID(void + acpi_trace_point(acpi_trace_event_type type, + u8 begin, + u8 *aml, char *pathname)) + +acpi_status acpi_initialize_debugger(void); + +void acpi_terminate_debugger(void); + +/* + * Divergences + */ +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_get_data_full(acpi_handle object, + acpi_object_handler handler, + void **data, + void (*callback)(void *))) + +void acpi_run_debugger(char *batch_buffer); + +void acpi_set_debugger_thread_id(acpi_thread_id thread_id); + +#endif /* __ACXFACE_H__ */ diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h new file mode 100644 index 0000000..6293058 --- /dev/null +++ b/include/acpi/acrestyp.h @@ -0,0 +1,678 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acrestyp.h - Defines, types, and structures for resource descriptors + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACRESTYP_H__ +#define __ACRESTYP_H__ + +/* + * Definitions for Resource Attributes + */ +typedef u16 acpi_rs_length; /* Resource Length field is fixed at 16 bits */ +typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (64K-1)+3 */ + +/* + * Memory Attributes + */ +#define ACPI_READ_ONLY_MEMORY (u8) 0x00 +#define ACPI_READ_WRITE_MEMORY (u8) 0x01 + +#define ACPI_NON_CACHEABLE_MEMORY (u8) 0x00 +#define ACPI_CACHABLE_MEMORY (u8) 0x01 +#define ACPI_WRITE_COMBINING_MEMORY (u8) 0x02 +#define ACPI_PREFETCHABLE_MEMORY (u8) 0x03 + +/*! [Begin] no source code translation */ +/* + * IO Attributes + * The ISA IO ranges are: n000-n0FFh, n400-n4FFh, n800-n8FFh, nC00-nCFFh. + * The non-ISA IO ranges are: n100-n3FFh, n500-n7FFh, n900-nBFFh, nCD0-nFFFh. + */ +/*! [End] no source code translation !*/ + +#define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01 +#define ACPI_ISA_ONLY_RANGES (u8) 0x02 +#define ACPI_ENTIRE_RANGE (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES) + +/* Type of translation - 1=Sparse, 0=Dense */ + +#define ACPI_SPARSE_TRANSLATION (u8) 0x01 + +/* + * IO Port Descriptor Decode + */ +#define ACPI_DECODE_10 (u8) 0x00 /* 10-bit IO address decode */ +#define ACPI_DECODE_16 (u8) 0x01 /* 16-bit IO address decode */ + +/* + * Interrupt attributes - used in multiple descriptors + */ + +/* Triggering */ + +#define ACPI_LEVEL_SENSITIVE (u8) 0x00 +#define ACPI_EDGE_SENSITIVE (u8) 0x01 + +/* Polarity */ + +#define ACPI_ACTIVE_HIGH (u8) 0x00 +#define ACPI_ACTIVE_LOW (u8) 0x01 +#define ACPI_ACTIVE_BOTH (u8) 0x02 + +/* Sharing */ + +#define ACPI_EXCLUSIVE (u8) 0x00 +#define ACPI_SHARED (u8) 0x01 + +/* Wake */ + +#define ACPI_NOT_WAKE_CAPABLE (u8) 0x00 +#define ACPI_WAKE_CAPABLE (u8) 0x01 + +/* + * DMA Attributes + */ +#define ACPI_COMPATIBILITY (u8) 0x00 +#define ACPI_TYPE_A (u8) 0x01 +#define ACPI_TYPE_B (u8) 0x02 +#define ACPI_TYPE_F (u8) 0x03 + +#define ACPI_NOT_BUS_MASTER (u8) 0x00 +#define ACPI_BUS_MASTER (u8) 0x01 + +#define ACPI_TRANSFER_8 (u8) 0x00 +#define ACPI_TRANSFER_8_16 (u8) 0x01 +#define ACPI_TRANSFER_16 (u8) 0x02 + +/* + * Start Dependent Functions Priority definitions + */ +#define ACPI_GOOD_CONFIGURATION (u8) 0x00 +#define ACPI_ACCEPTABLE_CONFIGURATION (u8) 0x01 +#define ACPI_SUB_OPTIMAL_CONFIGURATION (u8) 0x02 + +/* + * 16, 32 and 64-bit Address Descriptor resource types + */ +#define ACPI_MEMORY_RANGE (u8) 0x00 +#define ACPI_IO_RANGE (u8) 0x01 +#define ACPI_BUS_NUMBER_RANGE (u8) 0x02 + +#define ACPI_ADDRESS_NOT_FIXED (u8) 0x00 +#define ACPI_ADDRESS_FIXED (u8) 0x01 + +#define ACPI_POS_DECODE (u8) 0x00 +#define ACPI_SUB_DECODE (u8) 0x01 + +/* Producer/Consumer */ + +#define ACPI_PRODUCER (u8) 0x00 +#define ACPI_CONSUMER (u8) 0x01 + +/* + * If possible, pack the following structures to byte alignment + */ +#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED +#pragma pack(1) +#endif + +/* UUID data structures for use in vendor-defined resource descriptors */ + +struct acpi_uuid { + u8 data[ACPI_UUID_LENGTH]; +}; + +struct acpi_vendor_uuid { + u8 subtype; + u8 data[ACPI_UUID_LENGTH]; +}; + +/* + * Structures used to describe device resources + */ +struct acpi_resource_irq { + u8 descriptor_length; + u8 triggering; + u8 polarity; + u8 shareable; + u8 wake_capable; + u8 interrupt_count; + u8 interrupts[1]; +}; + +struct acpi_resource_dma { + u8 type; + u8 bus_master; + u8 transfer; + u8 channel_count; + u8 channels[1]; +}; + +struct acpi_resource_start_dependent { + u8 descriptor_length; + u8 compatibility_priority; + u8 performance_robustness; +}; + +/* + * The END_DEPENDENT_FUNCTIONS_RESOURCE struct is not + * needed because it has no fields + */ + +struct acpi_resource_io { + u8 io_decode; + u8 alignment; + u8 address_length; + u16 minimum; + u16 maximum; +}; + +struct acpi_resource_fixed_io { + u16 address; + u8 address_length; +}; + +struct acpi_resource_fixed_dma { + u16 request_lines; + u16 channels; + u8 width; +}; + +/* Values for Width field above */ + +#define ACPI_DMA_WIDTH8 0 +#define ACPI_DMA_WIDTH16 1 +#define ACPI_DMA_WIDTH32 2 +#define ACPI_DMA_WIDTH64 3 +#define ACPI_DMA_WIDTH128 4 +#define ACPI_DMA_WIDTH256 5 + +struct acpi_resource_vendor { + u16 byte_length; + u8 byte_data[1]; +}; + +/* Vendor resource with UUID info (introduced in ACPI 3.0) */ + +struct acpi_resource_vendor_typed { + u16 byte_length; + u8 uuid_subtype; + u8 uuid[ACPI_UUID_LENGTH]; + u8 byte_data[1]; +}; + +struct acpi_resource_end_tag { + u8 checksum; +}; + +struct acpi_resource_memory24 { + u8 write_protect; + u16 minimum; + u16 maximum; + u16 alignment; + u16 address_length; +}; + +struct acpi_resource_memory32 { + u8 write_protect; + u32 minimum; + u32 maximum; + u32 alignment; + u32 address_length; +}; + +struct acpi_resource_fixed_memory32 { + u8 write_protect; + u32 address; + u32 address_length; +}; + +struct acpi_memory_attribute { + u8 write_protect; + u8 caching; + u8 range_type; + u8 translation; +}; + +struct acpi_io_attribute { + u8 range_type; + u8 translation; + u8 translation_type; + u8 reserved1; +}; + +union acpi_resource_attribute { + struct acpi_memory_attribute mem; + struct acpi_io_attribute io; + + /* Used for the *word_space macros */ + + u8 type_specific; +}; + +struct acpi_resource_label { + u16 string_length; + char *string_ptr; +}; + +struct acpi_resource_source { + u8 index; + u16 string_length; + char *string_ptr; +}; + +/* Fields common to all address descriptors, 16/32/64 bit */ + +#define ACPI_RESOURCE_ADDRESS_COMMON \ + u8 resource_type; \ + u8 producer_consumer; \ + u8 decode; \ + u8 min_address_fixed; \ + u8 max_address_fixed; \ + union acpi_resource_attribute info; + +struct acpi_address16_attribute { + u16 granularity; + u16 minimum; + u16 maximum; + u16 translation_offset; + u16 address_length; +}; + +struct acpi_address32_attribute { + u32 granularity; + u32 minimum; + u32 maximum; + u32 translation_offset; + u32 address_length; +}; + +struct acpi_address64_attribute { + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; +}; + +struct acpi_resource_address { +ACPI_RESOURCE_ADDRESS_COMMON}; + +struct acpi_resource_address16 { + ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address16_attribute address; + struct acpi_resource_source resource_source; +}; + +struct acpi_resource_address32 { + ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address32_attribute address; + struct acpi_resource_source resource_source; +}; + +struct acpi_resource_address64 { + ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address64_attribute address; + struct acpi_resource_source resource_source; +}; + +struct acpi_resource_extended_address64 { + ACPI_RESOURCE_ADDRESS_COMMON u8 revision_ID; + struct acpi_address64_attribute address; + u64 type_specific; +}; + +struct acpi_resource_extended_irq { + u8 producer_consumer; + u8 triggering; + u8 polarity; + u8 shareable; + u8 wake_capable; + u8 interrupt_count; + struct acpi_resource_source resource_source; + u32 interrupts[1]; +}; + +struct acpi_resource_generic_register { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +}; + +struct acpi_resource_gpio { + u8 revision_id; + u8 connection_type; + u8 producer_consumer; /* For values, see Producer/Consumer above */ + u8 pin_config; + u8 shareable; /* For values, see Interrupt Attributes above */ + u8 wake_capable; /* For values, see Interrupt Attributes above */ + u8 io_restriction; + u8 triggering; /* For values, see Interrupt Attributes above */ + u8 polarity; /* For values, see Interrupt Attributes above */ + u16 drive_strength; + u16 debounce_timeout; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +}; + +/* Values for GPIO connection_type field above */ + +#define ACPI_RESOURCE_GPIO_TYPE_INT 0 +#define ACPI_RESOURCE_GPIO_TYPE_IO 1 + +/* Values for pin_config field above */ + +#define ACPI_PIN_CONFIG_DEFAULT 0 +#define ACPI_PIN_CONFIG_PULLUP 1 +#define ACPI_PIN_CONFIG_PULLDOWN 2 +#define ACPI_PIN_CONFIG_NOPULL 3 + +/* Values for io_restriction field above */ + +#define ACPI_IO_RESTRICT_NONE 0 +#define ACPI_IO_RESTRICT_INPUT 1 +#define ACPI_IO_RESTRICT_OUTPUT 2 +#define ACPI_IO_RESTRICT_NONE_PRESERVE 3 + +/* Common structure for I2C, SPI, and UART serial descriptors */ + +#define ACPI_RESOURCE_SERIAL_COMMON \ + u8 revision_id; \ + u8 type; \ + u8 producer_consumer; /* For values, see Producer/Consumer above */\ + u8 slave_mode; \ + u8 connection_sharing; \ + u8 type_revision_id; \ + u16 type_data_length; \ + u16 vendor_length; \ + struct acpi_resource_source resource_source; \ + u8 *vendor_data; + +struct acpi_resource_common_serialbus { +ACPI_RESOURCE_SERIAL_COMMON}; + +/* Values for the Type field above */ + +#define ACPI_RESOURCE_SERIAL_TYPE_I2C 1 +#define ACPI_RESOURCE_SERIAL_TYPE_SPI 2 +#define ACPI_RESOURCE_SERIAL_TYPE_UART 3 + +/* Values for slave_mode field above */ + +#define ACPI_CONTROLLER_INITIATED 0 +#define ACPI_DEVICE_INITIATED 1 + +struct acpi_resource_i2c_serialbus { + ACPI_RESOURCE_SERIAL_COMMON u8 access_mode; + u16 slave_address; + u32 connection_speed; +}; + +/* Values for access_mode field above */ + +#define ACPI_I2C_7BIT_MODE 0 +#define ACPI_I2C_10BIT_MODE 1 + +struct acpi_resource_spi_serialbus { + ACPI_RESOURCE_SERIAL_COMMON u8 wire_mode; + u8 device_polarity; + u8 data_bit_length; + u8 clock_phase; + u8 clock_polarity; + u16 device_selection; + u32 connection_speed; +}; + +/* Values for wire_mode field above */ + +#define ACPI_SPI_4WIRE_MODE 0 +#define ACPI_SPI_3WIRE_MODE 1 + +/* Values for device_polarity field above */ + +#define ACPI_SPI_ACTIVE_LOW 0 +#define ACPI_SPI_ACTIVE_HIGH 1 + +/* Values for clock_phase field above */ + +#define ACPI_SPI_FIRST_PHASE 0 +#define ACPI_SPI_SECOND_PHASE 1 + +/* Values for clock_polarity field above */ + +#define ACPI_SPI_START_LOW 0 +#define ACPI_SPI_START_HIGH 1 + +struct acpi_resource_uart_serialbus { + ACPI_RESOURCE_SERIAL_COMMON u8 endian; + u8 data_bits; + u8 stop_bits; + u8 flow_control; + u8 parity; + u8 lines_enabled; + u16 rx_fifo_size; + u16 tx_fifo_size; + u32 default_baud_rate; +}; + +/* Values for Endian field above */ + +#define ACPI_UART_LITTLE_ENDIAN 0 +#define ACPI_UART_BIG_ENDIAN 1 + +/* Values for data_bits field above */ + +#define ACPI_UART_5_DATA_BITS 0 +#define ACPI_UART_6_DATA_BITS 1 +#define ACPI_UART_7_DATA_BITS 2 +#define ACPI_UART_8_DATA_BITS 3 +#define ACPI_UART_9_DATA_BITS 4 + +/* Values for stop_bits field above */ + +#define ACPI_UART_NO_STOP_BITS 0 +#define ACPI_UART_1_STOP_BIT 1 +#define ACPI_UART_1P5_STOP_BITS 2 +#define ACPI_UART_2_STOP_BITS 3 + +/* Values for flow_control field above */ + +#define ACPI_UART_FLOW_CONTROL_NONE 0 +#define ACPI_UART_FLOW_CONTROL_HW 1 +#define ACPI_UART_FLOW_CONTROL_XON_XOFF 2 + +/* Values for Parity field above */ + +#define ACPI_UART_PARITY_NONE 0 +#define ACPI_UART_PARITY_EVEN 1 +#define ACPI_UART_PARITY_ODD 2 +#define ACPI_UART_PARITY_MARK 3 +#define ACPI_UART_PARITY_SPACE 4 + +/* Values for lines_enabled bitfield above */ + +#define ACPI_UART_CARRIER_DETECT (1<<2) +#define ACPI_UART_RING_INDICATOR (1<<3) +#define ACPI_UART_DATA_SET_READY (1<<4) +#define ACPI_UART_DATA_TERMINAL_READY (1<<5) +#define ACPI_UART_CLEAR_TO_SEND (1<<6) +#define ACPI_UART_REQUEST_TO_SEND (1<<7) + +struct acpi_resource_pin_function { + u8 revision_id; + u8 pin_config; + u8 shareable; /* For values, see Interrupt Attributes above */ + u16 function_number; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +}; + +struct acpi_resource_pin_config { + u8 revision_id; + u8 producer_consumer; /* For values, see Producer/Consumer above */ + u8 shareable; /* For values, see Interrupt Attributes above */ + u8 pin_config_type; + u32 pin_config_value; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +}; + +/* Values for pin_config_type field above */ + +#define ACPI_PIN_CONFIG_DEFAULT 0 +#define ACPI_PIN_CONFIG_BIAS_PULL_UP 1 +#define ACPI_PIN_CONFIG_BIAS_PULL_DOWN 2 +#define ACPI_PIN_CONFIG_BIAS_DEFAULT 3 +#define ACPI_PIN_CONFIG_BIAS_DISABLE 4 +#define ACPI_PIN_CONFIG_BIAS_HIGH_IMPEDANCE 5 +#define ACPI_PIN_CONFIG_BIAS_BUS_HOLD 6 +#define ACPI_PIN_CONFIG_DRIVE_OPEN_DRAIN 7 +#define ACPI_PIN_CONFIG_DRIVE_OPEN_SOURCE 8 +#define ACPI_PIN_CONFIG_DRIVE_PUSH_PULL 9 +#define ACPI_PIN_CONFIG_DRIVE_STRENGTH 10 +#define ACPI_PIN_CONFIG_SLEW_RATE 11 +#define ACPI_PIN_CONFIG_INPUT_DEBOUNCE 12 +#define ACPI_PIN_CONFIG_INPUT_SCHMITT_TRIGGER 13 + +struct acpi_resource_pin_group { + u8 revision_id; + u8 producer_consumer; /* For values, see Producer/Consumer above */ + u16 pin_table_length; + u16 vendor_length; + u16 *pin_table; + struct acpi_resource_label resource_label; + u8 *vendor_data; +}; + +struct acpi_resource_pin_group_function { + u8 revision_id; + u8 producer_consumer; /* For values, see Producer/Consumer above */ + u8 shareable; /* For values, see Interrupt Attributes above */ + u16 function_number; + u16 vendor_length; + struct acpi_resource_source resource_source; + struct acpi_resource_label resource_source_label; + u8 *vendor_data; +}; + +struct acpi_resource_pin_group_config { + u8 revision_id; + u8 producer_consumer; /* For values, see Producer/Consumer above */ + u8 shareable; /* For values, see Interrupt Attributes above */ + u8 pin_config_type; /* For values, see pin_config_type above */ + u32 pin_config_value; + u16 vendor_length; + struct acpi_resource_source resource_source; + struct acpi_resource_label resource_source_label; + u8 *vendor_data; +}; + +/* ACPI_RESOURCE_TYPEs */ + +#define ACPI_RESOURCE_TYPE_IRQ 0 +#define ACPI_RESOURCE_TYPE_DMA 1 +#define ACPI_RESOURCE_TYPE_START_DEPENDENT 2 +#define ACPI_RESOURCE_TYPE_END_DEPENDENT 3 +#define ACPI_RESOURCE_TYPE_IO 4 +#define ACPI_RESOURCE_TYPE_FIXED_IO 5 +#define ACPI_RESOURCE_TYPE_VENDOR 6 +#define ACPI_RESOURCE_TYPE_END_TAG 7 +#define ACPI_RESOURCE_TYPE_MEMORY24 8 +#define ACPI_RESOURCE_TYPE_MEMORY32 9 +#define ACPI_RESOURCE_TYPE_FIXED_MEMORY32 10 +#define ACPI_RESOURCE_TYPE_ADDRESS16 11 +#define ACPI_RESOURCE_TYPE_ADDRESS32 12 +#define ACPI_RESOURCE_TYPE_ADDRESS64 13 +#define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 14 /* ACPI 3.0 */ +#define ACPI_RESOURCE_TYPE_EXTENDED_IRQ 15 +#define ACPI_RESOURCE_TYPE_GENERIC_REGISTER 16 +#define ACPI_RESOURCE_TYPE_GPIO 17 /* ACPI 5.0 */ +#define ACPI_RESOURCE_TYPE_FIXED_DMA 18 /* ACPI 5.0 */ +#define ACPI_RESOURCE_TYPE_SERIAL_BUS 19 /* ACPI 5.0 */ +#define ACPI_RESOURCE_TYPE_PIN_FUNCTION 20 /* ACPI 6.2 */ +#define ACPI_RESOURCE_TYPE_PIN_CONFIG 21 /* ACPI 6.2 */ +#define ACPI_RESOURCE_TYPE_PIN_GROUP 22 /* ACPI 6.2 */ +#define ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION 23 /* ACPI 6.2 */ +#define ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG 24 /* ACPI 6.2 */ +#define ACPI_RESOURCE_TYPE_MAX 24 + +/* Master union for resource descriptors */ + +union acpi_resource_data { + struct acpi_resource_irq irq; + struct acpi_resource_dma dma; + struct acpi_resource_start_dependent start_dpf; + struct acpi_resource_io io; + struct acpi_resource_fixed_io fixed_io; + struct acpi_resource_fixed_dma fixed_dma; + struct acpi_resource_vendor vendor; + struct acpi_resource_vendor_typed vendor_typed; + struct acpi_resource_end_tag end_tag; + struct acpi_resource_memory24 memory24; + struct acpi_resource_memory32 memory32; + struct acpi_resource_fixed_memory32 fixed_memory32; + struct acpi_resource_address16 address16; + struct acpi_resource_address32 address32; + struct acpi_resource_address64 address64; + struct acpi_resource_extended_address64 ext_address64; + struct acpi_resource_extended_irq extended_irq; + struct acpi_resource_generic_register generic_reg; + struct acpi_resource_gpio gpio; + struct acpi_resource_i2c_serialbus i2c_serial_bus; + struct acpi_resource_spi_serialbus spi_serial_bus; + struct acpi_resource_uart_serialbus uart_serial_bus; + struct acpi_resource_common_serialbus common_serial_bus; + struct acpi_resource_pin_function pin_function; + struct acpi_resource_pin_config pin_config; + struct acpi_resource_pin_group pin_group; + struct acpi_resource_pin_group_function pin_group_function; + struct acpi_resource_pin_group_config pin_group_config; + + /* Common fields */ + + struct acpi_resource_address address; /* Common 16/32/64 address fields */ +}; + +/* Common resource header */ + +struct acpi_resource { + u32 type; + u32 length; + union acpi_resource_data data; +}; + +/* restore default alignment */ + +#pragma pack() + +#define ACPI_RS_SIZE_NO_DATA 8 /* Id + Length fields */ +#define ACPI_RS_SIZE_MIN (u32) ACPI_ROUND_UP_TO_NATIVE_WORD (12) +#define ACPI_RS_SIZE(type) (u32) (ACPI_RS_SIZE_NO_DATA + sizeof (type)) + +/* Macro for walking resource templates with multiple descriptors */ + +#define ACPI_NEXT_RESOURCE(res) \ + ACPI_ADD_PTR (struct acpi_resource, (res), (res)->length) + +struct acpi_pci_routing_table { + u32 length; + u32 pin; + u64 address; /* here for 64-bit alignment */ + u32 source_index; + char source[4]; /* pad to 64 bits so sizeof() works in all cases */ +}; + +#endif /* __ACRESTYP_H__ */ diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h new file mode 100644 index 0000000..d568128 --- /dev/null +++ b/include/acpi/actbl.h @@ -0,0 +1,401 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: actbl.h - Basic ACPI Table Definitions + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACTBL_H__ +#define __ACTBL_H__ + +/******************************************************************************* + * + * Fundamental ACPI tables + * + * This file contains definitions for the ACPI tables that are directly consumed + * by ACPICA. All other tables are consumed by the OS-dependent ACPI-related + * device drivers and other OS support code. + * + * The RSDP and FACS do not use the common ACPI table header. All other ACPI + * tables use the header. + * + ******************************************************************************/ + +/* + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. + */ +#define ACPI_SIG_DSDT "DSDT" /* Differentiated System Description Table */ +#define ACPI_SIG_FADT "FACP" /* Fixed ACPI Description Table */ +#define ACPI_SIG_FACS "FACS" /* Firmware ACPI Control Structure */ +#define ACPI_SIG_OSDT "OSDT" /* Override System Description Table */ +#define ACPI_SIG_PSDT "PSDT" /* Persistent System Description Table */ +#define ACPI_SIG_RSDP "RSD PTR " /* Root System Description Pointer */ +#define ACPI_SIG_RSDT "RSDT" /* Root System Description Table */ +#define ACPI_SIG_XSDT "XSDT" /* Extended System Description Table */ +#define ACPI_SIG_SSDT "SSDT" /* Secondary System Description Table */ +#define ACPI_RSDP_NAME "RSDP" /* Short name for RSDP, not signature */ +#define ACPI_OEM_NAME "OEM" /* Short name for OEM, not signature */ + +/* + * All tables and structures must be byte-packed to match the ACPI + * specification, since the tables are provided by the system BIOS + */ +#pragma pack(1) + +/* + * Note: C bitfields are not used for this reason: + * + * "Bitfields are great and easy to read, but unfortunately the C language + * does not specify the layout of bitfields in memory, which means they are + * essentially useless for dealing with packed data in on-disk formats or + * binary wire protocols." (Or ACPI tables and buffers.) "If you ask me, + * this decision was a design error in C. Ritchie could have picked an order + * and stuck with it." Norman Ramsey. + * See http://stackoverflow.com/a/1053662/41661 + */ + +/******************************************************************************* + * + * Master ACPI Table Header. This common header is used by all ACPI tables + * except the RSDP and FACS. + * + ******************************************************************************/ + +struct acpi_table_header { + char signature[ACPI_NAMESEG_SIZE]; /* ASCII table signature */ + u32 length; /* Length of table in bytes, including this header */ + u8 revision; /* ACPI Specification minor version number */ + u8 checksum; /* To make sum of entire table == 0 */ + char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */ + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */ + u32 oem_revision; /* OEM revision number */ + char asl_compiler_id[ACPI_NAMESEG_SIZE]; /* ASCII ASL compiler vendor ID */ + u32 asl_compiler_revision; /* ASL compiler version */ +}; + +/******************************************************************************* + * + * GAS - Generic Address Structure (ACPI 2.0+) + * + * Note: Since this structure is used in the ACPI tables, it is byte aligned. + * If misaligned access is not supported by the hardware, accesses to the + * 64-bit Address field must be performed with care. + * + ******************************************************************************/ + +struct acpi_generic_address { + u8 space_id; /* Address space where struct or register exists */ + u8 bit_width; /* Size in bits of given register */ + u8 bit_offset; /* Bit offset within the register */ + u8 access_width; /* Minimum Access size (ACPI 3.0) */ + u64 address; /* 64-bit address of struct or register */ +}; + +/******************************************************************************* + * + * RSDP - Root System Description Pointer (Signature is "RSD PTR ") + * Version 2 + * + ******************************************************************************/ + +struct acpi_table_rsdp { + char signature[8]; /* ACPI signature, contains "RSD PTR " */ + u8 checksum; /* ACPI 1.0 checksum */ + char oem_id[ACPI_OEM_ID_SIZE]; /* OEM identification */ + u8 revision; /* Must be (0) for ACPI 1.0 or (2) for ACPI 2.0+ */ + u32 rsdt_physical_address; /* 32-bit physical address of the RSDT */ + u32 length; /* Table length in bytes, including header (ACPI 2.0+) */ + u64 xsdt_physical_address; /* 64-bit physical address of the XSDT (ACPI 2.0+) */ + u8 extended_checksum; /* Checksum of entire table (ACPI 2.0+) */ + u8 reserved[3]; /* Reserved, must be zero */ +}; + +/* Standalone struct for the ACPI 1.0 RSDP */ + +struct acpi_rsdp_common { + char signature[8]; + u8 checksum; + char oem_id[ACPI_OEM_ID_SIZE]; + u8 revision; + u32 rsdt_physical_address; +}; + +/* Standalone struct for the extended part of the RSDP (ACPI 2.0+) */ + +struct acpi_rsdp_extension { + u32 length; + u64 xsdt_physical_address; + u8 extended_checksum; + u8 reserved[3]; +}; + +/******************************************************************************* + * + * RSDT/XSDT - Root System Description Tables + * Version 1 (both) + * + ******************************************************************************/ + +struct acpi_table_rsdt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */ +}; + +struct acpi_table_xsdt { + struct acpi_table_header header; /* Common ACPI table header */ + u64 table_offset_entry[1]; /* Array of pointers to ACPI tables */ +}; + +#define ACPI_RSDT_ENTRY_SIZE (sizeof (u32)) +#define ACPI_XSDT_ENTRY_SIZE (sizeof (u64)) + +/******************************************************************************* + * + * FACS - Firmware ACPI Control Structure (FACS) + * + ******************************************************************************/ + +struct acpi_table_facs { + char signature[4]; /* ASCII table signature */ + u32 length; /* Length of structure, in bytes */ + u32 hardware_signature; /* Hardware configuration signature */ + u32 firmware_waking_vector; /* 32-bit physical address of the Firmware Waking Vector */ + u32 global_lock; /* Global Lock for shared hardware resources */ + u32 flags; + u64 xfirmware_waking_vector; /* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */ + u8 version; /* Version of this table (ACPI 2.0+) */ + u8 reserved[3]; /* Reserved, must be zero */ + u32 ospm_flags; /* Flags to be set by OSPM (ACPI 4.0) */ + u8 reserved1[24]; /* Reserved, must be zero */ +}; + +/* Masks for global_lock flag field above */ + +#define ACPI_GLOCK_PENDING (1) /* 00: Pending global lock ownership */ +#define ACPI_GLOCK_OWNED (1<<1) /* 01: Global lock is owned */ + +/* Masks for Flags field above */ + +#define ACPI_FACS_S4_BIOS_PRESENT (1) /* 00: S4BIOS support is present */ +#define ACPI_FACS_64BIT_WAKE (1<<1) /* 01: 64-bit wake vector supported (ACPI 4.0) */ + +/* Masks for ospm_flags field above */ + +#define ACPI_FACS_64BIT_ENVIRONMENT (1) /* 00: 64-bit wake environment is required (ACPI 4.0) */ + +/******************************************************************************* + * + * FADT - Fixed ACPI Description Table (Signature "FACP") + * Version 6 + * + ******************************************************************************/ + +/* Fields common to all versions of the FADT */ + +struct acpi_table_fadt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 facs; /* 32-bit physical address of FACS */ + u32 dsdt; /* 32-bit physical address of DSDT */ + u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ + u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */ + u16 sci_interrupt; /* System vector of SCI interrupt */ + u32 smi_command; /* 32-bit Port address of SMI command port */ + u8 acpi_enable; /* Value to write to SMI_CMD to enable ACPI */ + u8 acpi_disable; /* Value to write to SMI_CMD to disable ACPI */ + u8 s4_bios_request; /* Value to write to SMI_CMD to enter S4BIOS state */ + u8 pstate_control; /* Processor performance state control */ + u32 pm1a_event_block; /* 32-bit port address of Power Mgt 1a Event Reg Blk */ + u32 pm1b_event_block; /* 32-bit port address of Power Mgt 1b Event Reg Blk */ + u32 pm1a_control_block; /* 32-bit port address of Power Mgt 1a Control Reg Blk */ + u32 pm1b_control_block; /* 32-bit port address of Power Mgt 1b Control Reg Blk */ + u32 pm2_control_block; /* 32-bit port address of Power Mgt 2 Control Reg Blk */ + u32 pm_timer_block; /* 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ + u32 gpe0_block; /* 32-bit port address of General Purpose Event 0 Reg Blk */ + u32 gpe1_block; /* 32-bit port address of General Purpose Event 1 Reg Blk */ + u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */ + u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */ + u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */ + u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */ + u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */ + u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */ + u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */ + u8 cst_control; /* Support for the _CST object and C-States change notification */ + u16 c2_latency; /* Worst case HW latency to enter/exit C2 state */ + u16 c3_latency; /* Worst case HW latency to enter/exit C3 state */ + u16 flush_size; /* Processor memory cache line width, in bytes */ + u16 flush_stride; /* Number of flush strides that need to be read */ + u8 duty_offset; /* Processor duty cycle index in processor P_CNT reg */ + u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */ + u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */ + u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */ + u8 century; /* Index to century in RTC CMOS RAM */ + u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */ + u8 reserved; /* Reserved, must be zero */ + u32 flags; /* Miscellaneous flag bits (see below for individual flags) */ + struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */ + u8 reset_value; /* Value to write to the reset_register port to reset the system */ + u16 arm_boot_flags; /* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ + u8 minor_revision; /* FADT Minor Revision (ACPI 5.1) */ + u64 Xfacs; /* 64-bit physical address of FACS */ + u64 Xdsdt; /* 64-bit physical address of DSDT */ + struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */ + struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */ + struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */ + struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */ + struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */ + struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ + struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */ + struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ + struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */ + struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */ + u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */ +}; + +/* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */ + +#define ACPI_FADT_LEGACY_DEVICES (1) /* 00: [V2] System has LPC or ISA bus devices */ +#define ACPI_FADT_8042 (1<<1) /* 01: [V3] System has an 8042 controller on port 60/64 */ +#define ACPI_FADT_NO_VGA (1<<2) /* 02: [V4] It is not safe to probe for VGA hardware */ +#define ACPI_FADT_NO_MSI (1<<3) /* 03: [V4] Message Signaled Interrupts (MSI) must not be enabled */ +#define ACPI_FADT_NO_ASPM (1<<4) /* 04: [V4] PCIe ASPM control must not be enabled */ +#define ACPI_FADT_NO_CMOS_RTC (1<<5) /* 05: [V5] No CMOS real-time clock present */ + +#define FADT2_REVISION_ID 3 + +/* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */ + +#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5+] PSCI 0.2+ is implemented */ +#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5+] HVC must be used instead of SMC as the PSCI conduit */ + +/* Masks for FADT flags */ + +#define ACPI_FADT_WBINVD (1) /* 00: [V1] The WBINVD instruction works properly */ +#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: [V1] WBINVD flushes but does not invalidate caches */ +#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: [V1] All processors support C1 state */ +#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: [V1] C2 state works on MP system */ +#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: [V1] Power button is handled as a control method device */ +#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: [V1] Sleep button is handled as a control method device */ +#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: [V1] RTC wakeup status is not in fixed register space */ +#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: [V1] RTC alarm can wake system from S4 */ +#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: [V1] ACPI timer width is 32-bit (0=24-bit) */ +#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: [V1] Docking supported */ +#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: [V2] System reset via the FADT RESET_REG supported */ +#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: [V3] No internal expansion capabilities and case is sealed */ +#define ACPI_FADT_HEADLESS (1<<12) /* 12: [V3] No local video capabilities or local input devices */ +#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: [V3] Must execute native instruction after writing SLP_TYPx register */ +#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: [V4] System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */ +#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: [V4] OSPM should use platform-provided timer (ACPI 3.0) */ +#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: [V4] Contents of RTC_STS valid after S4 wake (ACPI 3.0) */ +#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: [V4] System is compatible with remote power on (ACPI 3.0) */ +#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */ +#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local xAPICs must use physical dest mode (ACPI 3.0) */ +#define ACPI_FADT_HW_REDUCED (1<<20) /* 20: [V5] ACPI hardware is not implemented (ACPI 5.0) */ +#define ACPI_FADT_LOW_POWER_S0 (1<<21) /* 21: [V5] S0 power savings are equal or better than S3 (ACPI 5.0) */ + +/* Values for preferred_profile (Preferred Power Management Profiles) */ + +enum acpi_preferred_pm_profiles { + PM_UNSPECIFIED = 0, + PM_DESKTOP = 1, + PM_MOBILE = 2, + PM_WORKSTATION = 3, + PM_ENTERPRISE_SERVER = 4, + PM_SOHO_SERVER = 5, + PM_APPLIANCE_PC = 6, + PM_PERFORMANCE_SERVER = 7, + PM_TABLET = 8 +}; + +/* Values for sleep_status and sleep_control registers (V5+ FADT) */ + +#define ACPI_X_WAKE_STATUS 0x80 +#define ACPI_X_SLEEP_TYPE_MASK 0x1C +#define ACPI_X_SLEEP_TYPE_POSITION 0x02 +#define ACPI_X_SLEEP_ENABLE 0x20 + +/* Reset to default packing */ + +#pragma pack() + +/* + * Internal table-related structures + */ +union acpi_name_union { + u32 integer; + char ascii[4]; +}; + +/* Internal ACPI Table Descriptor. One per ACPI table. */ + +struct acpi_table_desc { + acpi_physical_address address; + struct acpi_table_header *pointer; + u32 length; /* Length fixed at 32 bits (fixed in table header) */ + union acpi_name_union signature; + acpi_owner_id owner_id; + u8 flags; + u16 validation_count; +}; + +/* + * Maximum value of the validation_count field in struct acpi_table_desc. + * When reached, validation_count cannot be changed any more and the table will + * be permanently regarded as validated. + * + * This is to prevent situations in which unbalanced table get/put operations + * may cause premature table unmapping in the OS to happen. + * + * The maximum validation count can be defined to any value, but should be + * greater than the maximum number of OS early stage mapping slots to avoid + * leaking early stage table mappings to the late stage. + */ +#define ACPI_MAX_TABLE_VALIDATIONS ACPI_UINT16_MAX + +/* Masks for Flags field above */ + +#define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL (0) /* Virtual address, external maintained */ +#define ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL (1) /* Physical address, internally mapped */ +#define ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL (2) /* Virtual address, internallly allocated */ +#define ACPI_TABLE_ORIGIN_MASK (3) +#define ACPI_TABLE_IS_VERIFIED (4) +#define ACPI_TABLE_IS_LOADED (8) + +/* + * Get the remaining ACPI tables + */ +#include +#include +#include + +/* Macros used to generate offsets to specific table fields */ + +#define ACPI_FADT_OFFSET(f) (u16) ACPI_OFFSET (struct acpi_table_fadt, f) + +/* + * Sizes of the various flavors of FADT. We need to look closely + * at the FADT length because the version number essentially tells + * us nothing because of many BIOS bugs where the version does not + * match the expected length. In other words, the length of the + * FADT is the bottom line as to what the version really is. + * + * For reference, the values below are as follows: + * FADT V1 size: 0x074 + * FADT V2 size: 0x084 + * FADT V3 size: 0x0F4 + * FADT V4 size: 0x0F4 + * FADT V5 size: 0x10C + * FADT V6 size: 0x114 + */ +#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) +#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1) +#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) +#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) +#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) + +#define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)" + +#endif /* __ACTBL_H__ */ diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h new file mode 100644 index 0000000..22c039e --- /dev/null +++ b/include/acpi/actbl1.h @@ -0,0 +1,1631 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: actbl1.h - Additional ACPI table definitions + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACTBL1_H__ +#define __ACTBL1_H__ + +/******************************************************************************* + * + * Additional ACPI Tables + * + * These tables are not consumed directly by the ACPICA subsystem, but are + * included here to support device drivers and the AML disassembler. + * + ******************************************************************************/ + +/* + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. + */ +#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */ +#define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */ +#define ACPI_SIG_BGRT "BGRT" /* Boot Graphics Resource Table */ +#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ +#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */ +#define ACPI_SIG_CSRT "CSRT" /* Core System Resource Table */ +#define ACPI_SIG_DBG2 "DBG2" /* Debug Port table type 2 */ +#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */ +#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */ +#define ACPI_SIG_DRTM "DRTM" /* Dynamic Root of Trust for Measurement table */ +#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */ +#define ACPI_SIG_EINJ "EINJ" /* Error Injection table */ +#define ACPI_SIG_ERST "ERST" /* Error Record Serialization Table */ +#define ACPI_SIG_FPDT "FPDT" /* Firmware Performance Data Table */ +#define ACPI_SIG_GTDT "GTDT" /* Generic Timer Description Table */ +#define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */ +#define ACPI_SIG_HMAT "HMAT" /* Heterogeneous Memory Attributes Table */ +#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ +#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */ + +#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */ +#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */ + +/* Reserved table signatures */ + +#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */ +#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */ + +/* + * These tables have been seen in the field, but no definition has been found + */ +#ifdef ACPI_UNDEFINED_TABLES +#define ACPI_SIG_ATKG "ATKG" +#define ACPI_SIG_GSCI "GSCI" /* GMCH SCI table */ +#define ACPI_SIG_IEIT "IEIT" +#endif + +/* + * All tables must be byte-packed to match the ACPI specification, since + * the tables are provided by the system BIOS. + */ +#pragma pack(1) + +/* + * Note: C bitfields are not used for this reason: + * + * "Bitfields are great and easy to read, but unfortunately the C language + * does not specify the layout of bitfields in memory, which means they are + * essentially useless for dealing with packed data in on-disk formats or + * binary wire protocols." (Or ACPI tables and buffers.) "If you ask me, + * this decision was a design error in C. Ritchie could have picked an order + * and stuck with it." Norman Ramsey. + * See http://stackoverflow.com/a/1053662/41661 + */ + +/******************************************************************************* + * + * Common subtable headers + * + ******************************************************************************/ + +/* Generic subtable header (used in MADT, SRAT, etc.) */ + +struct acpi_subtable_header { + u8 type; + u8 length; +}; + +/* Subtable header for WHEA tables (EINJ, ERST, WDAT) */ + +struct acpi_whea_header { + u8 action; + u8 instruction; + u8 flags; + u8 reserved; + struct acpi_generic_address register_region; + u64 value; /* Value used with Read/Write register */ + u64 mask; /* Bitmask required for this register instruction */ +}; + +/******************************************************************************* + * + * ASF - Alert Standard Format table (Signature "ASF!") + * Revision 0x10 + * + * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003 + * + ******************************************************************************/ + +struct acpi_table_asf { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* ASF subtable header */ + +struct acpi_asf_header { + u8 type; + u8 reserved; + u16 length; +}; + +/* Values for Type field above */ + +enum acpi_asf_type { + ACPI_ASF_TYPE_INFO = 0, + ACPI_ASF_TYPE_ALERT = 1, + ACPI_ASF_TYPE_CONTROL = 2, + ACPI_ASF_TYPE_BOOT = 3, + ACPI_ASF_TYPE_ADDRESS = 4, + ACPI_ASF_TYPE_RESERVED = 5 +}; + +/* + * ASF subtables + */ + +/* 0: ASF Information */ + +struct acpi_asf_info { + struct acpi_asf_header header; + u8 min_reset_value; + u8 min_poll_interval; + u16 system_id; + u32 mfg_id; + u8 flags; + u8 reserved2[3]; +}; + +/* Masks for Flags field above */ + +#define ACPI_ASF_SMBUS_PROTOCOLS (1) + +/* 1: ASF Alerts */ + +struct acpi_asf_alert { + struct acpi_asf_header header; + u8 assert_mask; + u8 deassert_mask; + u8 alerts; + u8 data_length; +}; + +struct acpi_asf_alert_data { + u8 address; + u8 command; + u8 mask; + u8 value; + u8 sensor_type; + u8 type; + u8 offset; + u8 source_type; + u8 severity; + u8 sensor_number; + u8 entity; + u8 instance; +}; + +/* 2: ASF Remote Control */ + +struct acpi_asf_remote { + struct acpi_asf_header header; + u8 controls; + u8 data_length; + u16 reserved2; +}; + +struct acpi_asf_control_data { + u8 function; + u8 address; + u8 command; + u8 value; +}; + +/* 3: ASF RMCP Boot Options */ + +struct acpi_asf_rmcp { + struct acpi_asf_header header; + u8 capabilities[7]; + u8 completion_code; + u32 enterprise_id; + u8 command; + u16 parameter; + u16 boot_options; + u16 oem_parameters; +}; + +/* 4: ASF Address */ + +struct acpi_asf_address { + struct acpi_asf_header header; + u8 eprom_address; + u8 devices; +}; + +/******************************************************************************* + * + * BERT - Boot Error Record Table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_bert { + struct acpi_table_header header; /* Common ACPI table header */ + u32 region_length; /* Length of the boot error region */ + u64 address; /* Physical address of the error region */ +}; + +/* Boot Error Region (not a subtable, pointed to by Address field above) */ + +struct acpi_bert_region { + u32 block_status; /* Type of error information */ + u32 raw_data_offset; /* Offset to raw error data */ + u32 raw_data_length; /* Length of raw error data */ + u32 data_length; /* Length of generic error data */ + u32 error_severity; /* Severity code */ +}; + +/* Values for block_status flags above */ + +#define ACPI_BERT_UNCORRECTABLE (1) +#define ACPI_BERT_CORRECTABLE (1<<1) +#define ACPI_BERT_MULTIPLE_UNCORRECTABLE (1<<2) +#define ACPI_BERT_MULTIPLE_CORRECTABLE (1<<3) +#define ACPI_BERT_ERROR_ENTRY_COUNT (0xFF<<4) /* 8 bits, error count */ + +/* Values for error_severity above */ + +enum acpi_bert_error_severity { + ACPI_BERT_ERROR_CORRECTABLE = 0, + ACPI_BERT_ERROR_FATAL = 1, + ACPI_BERT_ERROR_CORRECTED = 2, + ACPI_BERT_ERROR_NONE = 3, + ACPI_BERT_ERROR_RESERVED = 4 /* 4 and greater are reserved */ +}; + +/* + * Note: The generic error data that follows the error_severity field above + * uses the struct acpi_hest_generic_data defined under the HEST table below + */ + +/******************************************************************************* + * + * BGRT - Boot Graphics Resource Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_bgrt { + struct acpi_table_header header; /* Common ACPI table header */ + u16 version; + u8 status; + u8 image_type; + u64 image_address; + u32 image_offset_x; + u32 image_offset_y; +}; + +/* Flags for Status field above */ + +#define ACPI_BGRT_DISPLAYED (1) +#define ACPI_BGRT_ORIENTATION_OFFSET (3 << 1) + +/******************************************************************************* + * + * BOOT - Simple Boot Flag Table + * Version 1 + * + * Conforms to the "Simple Boot Flag Specification", Version 2.1 + * + ******************************************************************************/ + +struct acpi_table_boot { + struct acpi_table_header header; /* Common ACPI table header */ + u8 cmos_index; /* Index in CMOS RAM for the boot register */ + u8 reserved[3]; +}; + +/******************************************************************************* + * + * CPEP - Corrected Platform Error Polling table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_cpep { + struct acpi_table_header header; /* Common ACPI table header */ + u64 reserved; +}; + +/* Subtable */ + +struct acpi_cpep_polling { + struct acpi_subtable_header header; + u8 id; /* Processor ID */ + u8 eid; /* Processor EID */ + u32 interval; /* Polling interval (msec) */ +}; + +/******************************************************************************* + * + * CSRT - Core System Resource Table + * Version 0 + * + * Conforms to the "Core System Resource Table (CSRT)", November 14, 2011 + * + ******************************************************************************/ + +struct acpi_table_csrt { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* Resource Group subtable */ + +struct acpi_csrt_group { + u32 length; + u32 vendor_id; + u32 subvendor_id; + u16 device_id; + u16 subdevice_id; + u16 revision; + u16 reserved; + u32 shared_info_length; + + /* Shared data immediately follows (Length = shared_info_length) */ +}; + +/* Shared Info subtable */ + +struct acpi_csrt_shared_info { + u16 major_version; + u16 minor_version; + u32 mmio_base_low; + u32 mmio_base_high; + u32 gsi_interrupt; + u8 interrupt_polarity; + u8 interrupt_mode; + u8 num_channels; + u8 dma_address_width; + u16 base_request_line; + u16 num_handshake_signals; + u32 max_block_size; + + /* Resource descriptors immediately follow (Length = Group length - shared_info_length) */ +}; + +/* Resource Descriptor subtable */ + +struct acpi_csrt_descriptor { + u32 length; + u16 type; + u16 subtype; + u32 uid; + + /* Resource-specific information immediately follows */ +}; + +/* Resource Types */ + +#define ACPI_CSRT_TYPE_INTERRUPT 0x0001 +#define ACPI_CSRT_TYPE_TIMER 0x0002 +#define ACPI_CSRT_TYPE_DMA 0x0003 + +/* Resource Subtypes */ + +#define ACPI_CSRT_XRUPT_LINE 0x0000 +#define ACPI_CSRT_XRUPT_CONTROLLER 0x0001 +#define ACPI_CSRT_TIMER 0x0000 +#define ACPI_CSRT_DMA_CHANNEL 0x0000 +#define ACPI_CSRT_DMA_CONTROLLER 0x0001 + +/******************************************************************************* + * + * DBG2 - Debug Port Table 2 + * Version 0 (Both main table and subtables) + * + * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015 + * + ******************************************************************************/ + +struct acpi_table_dbg2 { + struct acpi_table_header header; /* Common ACPI table header */ + u32 info_offset; + u32 info_count; +}; + +struct acpi_dbg2_header { + u32 info_offset; + u32 info_count; +}; + +/* Debug Device Information Subtable */ + +struct acpi_dbg2_device { + u8 revision; + u16 length; + u8 register_count; /* Number of base_address registers */ + u16 namepath_length; + u16 namepath_offset; + u16 oem_data_length; + u16 oem_data_offset; + u16 port_type; + u16 port_subtype; + u16 reserved; + u16 base_address_offset; + u16 address_size_offset; + /* + * Data that follows: + * base_address (required) - Each in 12-byte Generic Address Structure format. + * address_size (required) - Array of u32 sizes corresponding to each base_address register. + * Namepath (required) - Null terminated string. Single dot if not supported. + * oem_data (optional) - Length is oem_data_length. + */ +}; + +/* Types for port_type field above */ + +#define ACPI_DBG2_SERIAL_PORT 0x8000 +#define ACPI_DBG2_1394_PORT 0x8001 +#define ACPI_DBG2_USB_PORT 0x8002 +#define ACPI_DBG2_NET_PORT 0x8003 + +/* Subtypes for port_subtype field above */ + +#define ACPI_DBG2_16550_COMPATIBLE 0x0000 +#define ACPI_DBG2_16550_SUBSET 0x0001 +#define ACPI_DBG2_ARM_PL011 0x0003 +#define ACPI_DBG2_ARM_SBSA_32BIT 0x000D +#define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E +#define ACPI_DBG2_ARM_DCC 0x000F +#define ACPI_DBG2_BCM2835 0x0010 + +#define ACPI_DBG2_1394_STANDARD 0x0000 + +#define ACPI_DBG2_USB_XHCI 0x0000 +#define ACPI_DBG2_USB_EHCI 0x0001 + +/******************************************************************************* + * + * DBGP - Debug Port table + * Version 1 + * + * Conforms to the "Debug Port Specification", Version 1.00, 2/9/2000 + * + ******************************************************************************/ + +struct acpi_table_dbgp { + struct acpi_table_header header; /* Common ACPI table header */ + u8 type; /* 0=full 16550, 1=subset of 16550 */ + u8 reserved[3]; + struct acpi_generic_address debug_port; +}; + +/******************************************************************************* + * + * DMAR - DMA Remapping table + * Version 1 + * + * Conforms to "Intel Virtualization Technology for Directed I/O", + * Version 2.3, October 2014 + * + ******************************************************************************/ + +struct acpi_table_dmar { + struct acpi_table_header header; /* Common ACPI table header */ + u8 width; /* Host Address Width */ + u8 flags; + u8 reserved[10]; +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_INTR_REMAP (1) +#define ACPI_DMAR_X2APIC_OPT_OUT (1<<1) +#define ACPI_DMAR_X2APIC_MODE (1<<2) + +/* DMAR subtable header */ + +struct acpi_dmar_header { + u16 type; + u16 length; +}; + +/* Values for subtable type in struct acpi_dmar_header */ + +enum acpi_dmar_type { + ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, + ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, + ACPI_DMAR_TYPE_ROOT_ATS = 2, + ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3, + ACPI_DMAR_TYPE_NAMESPACE = 4, + ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */ +}; + +/* DMAR Device Scope structure */ + +struct acpi_dmar_device_scope { + u8 entry_type; + u8 length; + u16 reserved; + u8 enumeration_id; + u8 bus; +}; + +/* Values for entry_type in struct acpi_dmar_device_scope - device types */ + +enum acpi_dmar_scope_type { + ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0, + ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1, + ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, + ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, + ACPI_DMAR_SCOPE_TYPE_HPET = 4, + ACPI_DMAR_SCOPE_TYPE_NAMESPACE = 5, + ACPI_DMAR_SCOPE_TYPE_RESERVED = 6 /* 6 and greater are reserved */ +}; + +struct acpi_dmar_pci_path { + u8 device; + u8 function; +}; + +/* + * DMAR Subtables, correspond to Type in struct acpi_dmar_header + */ + +/* 0: Hardware Unit Definition */ + +struct acpi_dmar_hardware_unit { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; + u64 address; /* Register Base Address */ +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_INCLUDE_ALL (1) + +/* 1: Reserved Memory Definition */ + +struct acpi_dmar_reserved_memory { + struct acpi_dmar_header header; + u16 reserved; + u16 segment; + u64 base_address; /* 4K aligned base address */ + u64 end_address; /* 4K aligned limit address */ +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_ALLOW_ALL (1) + +/* 2: Root Port ATS Capability Reporting Structure */ + +struct acpi_dmar_atsr { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; +}; + +/* Masks for Flags field above */ + +#define ACPI_DMAR_ALL_PORTS (1) + +/* 3: Remapping Hardware Static Affinity Structure */ + +struct acpi_dmar_rhsa { + struct acpi_dmar_header header; + u32 reserved; + u64 base_address; + u32 proximity_domain; +}; + +/* 4: ACPI Namespace Device Declaration Structure */ + +struct acpi_dmar_andd { + struct acpi_dmar_header header; + u8 reserved[3]; + u8 device_number; + char device_name[1]; +}; + +/******************************************************************************* + * + * DRTM - Dynamic Root of Trust for Measurement table + * Conforms to "TCG D-RTM Architecture" June 17 2013, Version 1.0.0 + * Table version 1 + * + ******************************************************************************/ + +struct acpi_table_drtm { + struct acpi_table_header header; /* Common ACPI table header */ + u64 entry_base_address; + u64 entry_length; + u32 entry_address32; + u64 entry_address64; + u64 exit_address; + u64 log_area_address; + u32 log_area_length; + u64 arch_dependent_address; + u32 flags; +}; + +/* Flag Definitions for above */ + +#define ACPI_DRTM_ACCESS_ALLOWED (1) +#define ACPI_DRTM_ENABLE_GAP_CODE (1<<1) +#define ACPI_DRTM_INCOMPLETE_MEASUREMENTS (1<<2) +#define ACPI_DRTM_AUTHORITY_ORDER (1<<3) + +/* 1) Validated Tables List (64-bit addresses) */ + +struct acpi_drtm_vtable_list { + u32 validated_table_count; + u64 validated_tables[1]; +}; + +/* 2) Resources List (of Resource Descriptors) */ + +/* Resource Descriptor */ + +struct acpi_drtm_resource { + u8 size[7]; + u8 type; + u64 address; +}; + +struct acpi_drtm_resource_list { + u32 resource_count; + struct acpi_drtm_resource resources[1]; +}; + +/* 3) Platform-specific Identifiers List */ + +struct acpi_drtm_dps_id { + u32 dps_id_length; + u8 dps_id[16]; +}; + +/******************************************************************************* + * + * ECDT - Embedded Controller Boot Resources Table + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_ecdt { + struct acpi_table_header header; /* Common ACPI table header */ + struct acpi_generic_address control; /* Address of EC command/status register */ + struct acpi_generic_address data; /* Address of EC data register */ + u32 uid; /* Unique ID - must be same as the EC _UID method */ + u8 gpe; /* The GPE for the EC */ + u8 id[1]; /* Full namepath of the EC in the ACPI namespace */ +}; + +/******************************************************************************* + * + * EINJ - Error Injection Table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_einj { + struct acpi_table_header header; /* Common ACPI table header */ + u32 header_length; + u8 flags; + u8 reserved[3]; + u32 entries; +}; + +/* EINJ Injection Instruction Entries (actions) */ + +struct acpi_einj_entry { + struct acpi_whea_header whea_header; /* Common header for WHEA tables */ +}; + +/* Masks for Flags field above */ + +#define ACPI_EINJ_PRESERVE (1) + +/* Values for Action field above */ + +enum acpi_einj_actions { + ACPI_EINJ_BEGIN_OPERATION = 0, + ACPI_EINJ_GET_TRIGGER_TABLE = 1, + ACPI_EINJ_SET_ERROR_TYPE = 2, + ACPI_EINJ_GET_ERROR_TYPE = 3, + ACPI_EINJ_END_OPERATION = 4, + ACPI_EINJ_EXECUTE_OPERATION = 5, + ACPI_EINJ_CHECK_BUSY_STATUS = 6, + ACPI_EINJ_GET_COMMAND_STATUS = 7, + ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 8, + ACPI_EINJ_GET_EXECUTE_TIMINGS = 9, + ACPI_EINJ_ACTION_RESERVED = 10, /* 10 and greater are reserved */ + ACPI_EINJ_TRIGGER_ERROR = 0xFF /* Except for this value */ +}; + +/* Values for Instruction field above */ + +enum acpi_einj_instructions { + ACPI_EINJ_READ_REGISTER = 0, + ACPI_EINJ_READ_REGISTER_VALUE = 1, + ACPI_EINJ_WRITE_REGISTER = 2, + ACPI_EINJ_WRITE_REGISTER_VALUE = 3, + ACPI_EINJ_NOOP = 4, + ACPI_EINJ_FLUSH_CACHELINE = 5, + ACPI_EINJ_INSTRUCTION_RESERVED = 6 /* 6 and greater are reserved */ +}; + +struct acpi_einj_error_type_with_addr { + u32 error_type; + u32 vendor_struct_offset; + u32 flags; + u32 apic_id; + u64 address; + u64 range; + u32 pcie_id; +}; + +struct acpi_einj_vendor { + u32 length; + u32 pcie_id; + u16 vendor_id; + u16 device_id; + u8 revision_id; + u8 reserved[3]; +}; + +/* EINJ Trigger Error Action Table */ + +struct acpi_einj_trigger { + u32 header_size; + u32 revision; + u32 table_size; + u32 entry_count; +}; + +/* Command status return values */ + +enum acpi_einj_command_status { + ACPI_EINJ_SUCCESS = 0, + ACPI_EINJ_FAILURE = 1, + ACPI_EINJ_INVALID_ACCESS = 2, + ACPI_EINJ_STATUS_RESERVED = 3 /* 3 and greater are reserved */ +}; + +/* Error types returned from ACPI_EINJ_GET_ERROR_TYPE (bitfield) */ + +#define ACPI_EINJ_PROCESSOR_CORRECTABLE (1) +#define ACPI_EINJ_PROCESSOR_UNCORRECTABLE (1<<1) +#define ACPI_EINJ_PROCESSOR_FATAL (1<<2) +#define ACPI_EINJ_MEMORY_CORRECTABLE (1<<3) +#define ACPI_EINJ_MEMORY_UNCORRECTABLE (1<<4) +#define ACPI_EINJ_MEMORY_FATAL (1<<5) +#define ACPI_EINJ_PCIX_CORRECTABLE (1<<6) +#define ACPI_EINJ_PCIX_UNCORRECTABLE (1<<7) +#define ACPI_EINJ_PCIX_FATAL (1<<8) +#define ACPI_EINJ_PLATFORM_CORRECTABLE (1<<9) +#define ACPI_EINJ_PLATFORM_UNCORRECTABLE (1<<10) +#define ACPI_EINJ_PLATFORM_FATAL (1<<11) +#define ACPI_EINJ_VENDOR_DEFINED (1<<31) + +/******************************************************************************* + * + * ERST - Error Record Serialization Table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_erst { + struct acpi_table_header header; /* Common ACPI table header */ + u32 header_length; + u32 reserved; + u32 entries; +}; + +/* ERST Serialization Entries (actions) */ + +struct acpi_erst_entry { + struct acpi_whea_header whea_header; /* Common header for WHEA tables */ +}; + +/* Masks for Flags field above */ + +#define ACPI_ERST_PRESERVE (1) + +/* Values for Action field above */ + +enum acpi_erst_actions { + ACPI_ERST_BEGIN_WRITE = 0, + ACPI_ERST_BEGIN_READ = 1, + ACPI_ERST_BEGIN_CLEAR = 2, + ACPI_ERST_END = 3, + ACPI_ERST_SET_RECORD_OFFSET = 4, + ACPI_ERST_EXECUTE_OPERATION = 5, + ACPI_ERST_CHECK_BUSY_STATUS = 6, + ACPI_ERST_GET_COMMAND_STATUS = 7, + ACPI_ERST_GET_RECORD_ID = 8, + ACPI_ERST_SET_RECORD_ID = 9, + ACPI_ERST_GET_RECORD_COUNT = 10, + ACPI_ERST_BEGIN_DUMMY_WRIITE = 11, + ACPI_ERST_NOT_USED = 12, + ACPI_ERST_GET_ERROR_RANGE = 13, + ACPI_ERST_GET_ERROR_LENGTH = 14, + ACPI_ERST_GET_ERROR_ATTRIBUTES = 15, + ACPI_ERST_EXECUTE_TIMINGS = 16, + ACPI_ERST_ACTION_RESERVED = 17 /* 17 and greater are reserved */ +}; + +/* Values for Instruction field above */ + +enum acpi_erst_instructions { + ACPI_ERST_READ_REGISTER = 0, + ACPI_ERST_READ_REGISTER_VALUE = 1, + ACPI_ERST_WRITE_REGISTER = 2, + ACPI_ERST_WRITE_REGISTER_VALUE = 3, + ACPI_ERST_NOOP = 4, + ACPI_ERST_LOAD_VAR1 = 5, + ACPI_ERST_LOAD_VAR2 = 6, + ACPI_ERST_STORE_VAR1 = 7, + ACPI_ERST_ADD = 8, + ACPI_ERST_SUBTRACT = 9, + ACPI_ERST_ADD_VALUE = 10, + ACPI_ERST_SUBTRACT_VALUE = 11, + ACPI_ERST_STALL = 12, + ACPI_ERST_STALL_WHILE_TRUE = 13, + ACPI_ERST_SKIP_NEXT_IF_TRUE = 14, + ACPI_ERST_GOTO = 15, + ACPI_ERST_SET_SRC_ADDRESS_BASE = 16, + ACPI_ERST_SET_DST_ADDRESS_BASE = 17, + ACPI_ERST_MOVE_DATA = 18, + ACPI_ERST_INSTRUCTION_RESERVED = 19 /* 19 and greater are reserved */ +}; + +/* Command status return values */ + +enum acpi_erst_command_status { + ACPI_ERST_SUCESS = 0, + ACPI_ERST_NO_SPACE = 1, + ACPI_ERST_NOT_AVAILABLE = 2, + ACPI_ERST_FAILURE = 3, + ACPI_ERST_RECORD_EMPTY = 4, + ACPI_ERST_NOT_FOUND = 5, + ACPI_ERST_STATUS_RESERVED = 6 /* 6 and greater are reserved */ +}; + +/* Error Record Serialization Information */ + +struct acpi_erst_info { + u16 signature; /* Should be "ER" */ + u8 data[48]; +}; + +/******************************************************************************* + * + * FPDT - Firmware Performance Data Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_fpdt { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* FPDT subtable header (Performance Record Structure) */ + +struct acpi_fpdt_header { + u16 type; + u8 length; + u8 revision; +}; + +/* Values for Type field above */ + +enum acpi_fpdt_type { + ACPI_FPDT_TYPE_BOOT = 0, + ACPI_FPDT_TYPE_S3PERF = 1 +}; + +/* + * FPDT subtables + */ + +/* 0: Firmware Basic Boot Performance Record */ + +struct acpi_fpdt_boot_pointer { + struct acpi_fpdt_header header; + u8 reserved[4]; + u64 address; +}; + +/* 1: S3 Performance Table Pointer Record */ + +struct acpi_fpdt_s3pt_pointer { + struct acpi_fpdt_header header; + u8 reserved[4]; + u64 address; +}; + +/* + * S3PT - S3 Performance Table. This table is pointed to by the + * S3 Pointer Record above. + */ +struct acpi_table_s3pt { + u8 signature[4]; /* "S3PT" */ + u32 length; +}; + +/* + * S3PT Subtables (Not part of the actual FPDT) + */ + +/* Values for Type field in S3PT header */ + +enum acpi_s3pt_type { + ACPI_S3PT_TYPE_RESUME = 0, + ACPI_S3PT_TYPE_SUSPEND = 1, + ACPI_FPDT_BOOT_PERFORMANCE = 2 +}; + +struct acpi_s3pt_resume { + struct acpi_fpdt_header header; + u32 resume_count; + u64 full_resume; + u64 average_resume; +}; + +struct acpi_s3pt_suspend { + struct acpi_fpdt_header header; + u64 suspend_start; + u64 suspend_end; +}; + +/* + * FPDT Boot Performance Record (Not part of the actual FPDT) + */ +struct acpi_fpdt_boot { + struct acpi_fpdt_header header; + u8 reserved[4]; + u64 reset_end; + u64 load_start; + u64 startup_start; + u64 exit_services_entry; + u64 exit_services_exit; +}; + +/******************************************************************************* + * + * GTDT - Generic Timer Description Table (ACPI 5.1) + * Version 2 + * + ******************************************************************************/ + +struct acpi_table_gtdt { + struct acpi_table_header header; /* Common ACPI table header */ + u64 counter_block_addresss; + u32 reserved; + u32 secure_el1_interrupt; + u32 secure_el1_flags; + u32 non_secure_el1_interrupt; + u32 non_secure_el1_flags; + u32 virtual_timer_interrupt; + u32 virtual_timer_flags; + u32 non_secure_el2_interrupt; + u32 non_secure_el2_flags; + u64 counter_read_block_address; + u32 platform_timer_count; + u32 platform_timer_offset; +}; + +/* Flag Definitions: Timer Block Physical Timers and Virtual timers */ + +#define ACPI_GTDT_INTERRUPT_MODE (1) +#define ACPI_GTDT_INTERRUPT_POLARITY (1<<1) +#define ACPI_GTDT_ALWAYS_ON (1<<2) + +struct acpi_gtdt_el2 { + u32 virtual_el2_timer_gsiv; + u32 virtual_el2_timer_flags; +}; + +/* Common GTDT subtable header */ + +struct acpi_gtdt_header { + u8 type; + u16 length; +}; + +/* Values for GTDT subtable type above */ + +enum acpi_gtdt_type { + ACPI_GTDT_TYPE_TIMER_BLOCK = 0, + ACPI_GTDT_TYPE_WATCHDOG = 1, + ACPI_GTDT_TYPE_RESERVED = 2 /* 2 and greater are reserved */ +}; + +/* GTDT Subtables, correspond to Type in struct acpi_gtdt_header */ + +/* 0: Generic Timer Block */ + +struct acpi_gtdt_timer_block { + struct acpi_gtdt_header header; + u8 reserved; + u64 block_address; + u32 timer_count; + u32 timer_offset; +}; + +/* Timer Sub-Structure, one per timer */ + +struct acpi_gtdt_timer_entry { + u8 frame_number; + u8 reserved[3]; + u64 base_address; + u64 el0_base_address; + u32 timer_interrupt; + u32 timer_flags; + u32 virtual_timer_interrupt; + u32 virtual_timer_flags; + u32 common_flags; +}; + +/* Flag Definitions: timer_flags and virtual_timer_flags above */ + +#define ACPI_GTDT_GT_IRQ_MODE (1) +#define ACPI_GTDT_GT_IRQ_POLARITY (1<<1) + +/* Flag Definitions: common_flags above */ + +#define ACPI_GTDT_GT_IS_SECURE_TIMER (1) +#define ACPI_GTDT_GT_ALWAYS_ON (1<<1) + +/* 1: SBSA Generic Watchdog Structure */ + +struct acpi_gtdt_watchdog { + struct acpi_gtdt_header header; + u8 reserved; + u64 refresh_frame_address; + u64 control_frame_address; + u32 timer_interrupt; + u32 timer_flags; +}; + +/* Flag Definitions: timer_flags above */ + +#define ACPI_GTDT_WATCHDOG_IRQ_MODE (1) +#define ACPI_GTDT_WATCHDOG_IRQ_POLARITY (1<<1) +#define ACPI_GTDT_WATCHDOG_SECURE (1<<2) + +/******************************************************************************* + * + * HEST - Hardware Error Source Table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_hest { + struct acpi_table_header header; /* Common ACPI table header */ + u32 error_source_count; +}; + +/* HEST subtable header */ + +struct acpi_hest_header { + u16 type; + u16 source_id; +}; + +/* Values for Type field above for subtables */ + +enum acpi_hest_types { + ACPI_HEST_TYPE_IA32_CHECK = 0, + ACPI_HEST_TYPE_IA32_CORRECTED_CHECK = 1, + ACPI_HEST_TYPE_IA32_NMI = 2, + ACPI_HEST_TYPE_NOT_USED3 = 3, + ACPI_HEST_TYPE_NOT_USED4 = 4, + ACPI_HEST_TYPE_NOT_USED5 = 5, + ACPI_HEST_TYPE_AER_ROOT_PORT = 6, + ACPI_HEST_TYPE_AER_ENDPOINT = 7, + ACPI_HEST_TYPE_AER_BRIDGE = 8, + ACPI_HEST_TYPE_GENERIC_ERROR = 9, + ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10, + ACPI_HEST_TYPE_IA32_DEFERRED_CHECK = 11, + ACPI_HEST_TYPE_RESERVED = 12 /* 12 and greater are reserved */ +}; + +/* + * HEST substructures contained in subtables + */ + +/* + * IA32 Error Bank(s) - Follows the struct acpi_hest_ia_machine_check and + * struct acpi_hest_ia_corrected structures. + */ +struct acpi_hest_ia_error_bank { + u8 bank_number; + u8 clear_status_on_init; + u8 status_format; + u8 reserved; + u32 control_register; + u64 control_data; + u32 status_register; + u32 address_register; + u32 misc_register; +}; + +/* Common HEST sub-structure for PCI/AER structures below (6,7,8) */ + +struct acpi_hest_aer_common { + u16 reserved1; + u8 flags; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u32 bus; /* Bus and Segment numbers */ + u16 device; + u16 function; + u16 device_control; + u16 reserved2; + u32 uncorrectable_mask; + u32 uncorrectable_severity; + u32 correctable_mask; + u32 advanced_capabilities; +}; + +/* Masks for HEST Flags fields */ + +#define ACPI_HEST_FIRMWARE_FIRST (1) +#define ACPI_HEST_GLOBAL (1<<1) +#define ACPI_HEST_GHES_ASSIST (1<<2) + +/* + * Macros to access the bus/segment numbers in Bus field above: + * Bus number is encoded in bits 7:0 + * Segment number is encoded in bits 23:8 + */ +#define ACPI_HEST_BUS(bus) ((bus) & 0xFF) +#define ACPI_HEST_SEGMENT(bus) (((bus) >> 8) & 0xFFFF) + +/* Hardware Error Notification */ + +struct acpi_hest_notify { + u8 type; + u8 length; + u16 config_write_enable; + u32 poll_interval; + u32 vector; + u32 polling_threshold_value; + u32 polling_threshold_window; + u32 error_threshold_value; + u32 error_threshold_window; +}; + +/* Values for Notify Type field above */ + +enum acpi_hest_notify_types { + ACPI_HEST_NOTIFY_POLLED = 0, + ACPI_HEST_NOTIFY_EXTERNAL = 1, + ACPI_HEST_NOTIFY_LOCAL = 2, + ACPI_HEST_NOTIFY_SCI = 3, + ACPI_HEST_NOTIFY_NMI = 4, + ACPI_HEST_NOTIFY_CMCI = 5, /* ACPI 5.0 */ + ACPI_HEST_NOTIFY_MCE = 6, /* ACPI 5.0 */ + ACPI_HEST_NOTIFY_GPIO = 7, /* ACPI 6.0 */ + ACPI_HEST_NOTIFY_SEA = 8, /* ACPI 6.1 */ + ACPI_HEST_NOTIFY_SEI = 9, /* ACPI 6.1 */ + ACPI_HEST_NOTIFY_GSIV = 10, /* ACPI 6.1 */ + ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED = 11, /* ACPI 6.2 */ + ACPI_HEST_NOTIFY_RESERVED = 12 /* 12 and greater are reserved */ +}; + +/* Values for config_write_enable bitfield above */ + +#define ACPI_HEST_TYPE (1) +#define ACPI_HEST_POLL_INTERVAL (1<<1) +#define ACPI_HEST_POLL_THRESHOLD_VALUE (1<<2) +#define ACPI_HEST_POLL_THRESHOLD_WINDOW (1<<3) +#define ACPI_HEST_ERR_THRESHOLD_VALUE (1<<4) +#define ACPI_HEST_ERR_THRESHOLD_WINDOW (1<<5) + +/* + * HEST subtables + */ + +/* 0: IA32 Machine Check Exception */ + +struct acpi_hest_ia_machine_check { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; /* See flags ACPI_HEST_GLOBAL, etc. above */ + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u64 global_capability_data; + u64 global_control_data; + u8 num_hardware_banks; + u8 reserved3[7]; +}; + +/* 1: IA32 Corrected Machine Check */ + +struct acpi_hest_ia_corrected { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; /* See flags ACPI_HEST_GLOBAL, etc. above */ + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + struct acpi_hest_notify notify; + u8 num_hardware_banks; + u8 reserved2[3]; +}; + +/* 2: IA32 Non-Maskable Interrupt */ + +struct acpi_hest_ia_nmi { + struct acpi_hest_header header; + u32 reserved; + u32 records_to_preallocate; + u32 max_sections_per_record; + u32 max_raw_data_length; +}; + +/* 3,4,5: Not used */ + +/* 6: PCI Express Root Port AER */ + +struct acpi_hest_aer_root { + struct acpi_hest_header header; + struct acpi_hest_aer_common aer; + u32 root_error_command; +}; + +/* 7: PCI Express AER (AER Endpoint) */ + +struct acpi_hest_aer { + struct acpi_hest_header header; + struct acpi_hest_aer_common aer; +}; + +/* 8: PCI Express/PCI-X Bridge AER */ + +struct acpi_hest_aer_bridge { + struct acpi_hest_header header; + struct acpi_hest_aer_common aer; + u32 uncorrectable_mask2; + u32 uncorrectable_severity2; + u32 advanced_capabilities2; +}; + +/* 9: Generic Hardware Error Source */ + +struct acpi_hest_generic { + struct acpi_hest_header header; + u16 related_source_id; + u8 reserved; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u32 max_raw_data_length; + struct acpi_generic_address error_status_address; + struct acpi_hest_notify notify; + u32 error_block_length; +}; + +/* 10: Generic Hardware Error Source, version 2 */ + +struct acpi_hest_generic_v2 { + struct acpi_hest_header header; + u16 related_source_id; + u8 reserved; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u32 max_raw_data_length; + struct acpi_generic_address error_status_address; + struct acpi_hest_notify notify; + u32 error_block_length; + struct acpi_generic_address read_ack_register; + u64 read_ack_preserve; + u64 read_ack_write; +}; + +/* Generic Error Status block */ + +struct acpi_hest_generic_status { + u32 block_status; + u32 raw_data_offset; + u32 raw_data_length; + u32 data_length; + u32 error_severity; +}; + +/* Values for block_status flags above */ + +#define ACPI_HEST_UNCORRECTABLE (1) +#define ACPI_HEST_CORRECTABLE (1<<1) +#define ACPI_HEST_MULTIPLE_UNCORRECTABLE (1<<2) +#define ACPI_HEST_MULTIPLE_CORRECTABLE (1<<3) +#define ACPI_HEST_ERROR_ENTRY_COUNT (0xFF<<4) /* 8 bits, error count */ + +/* Generic Error Data entry */ + +struct acpi_hest_generic_data { + u8 section_type[16]; + u32 error_severity; + u16 revision; + u8 validation_bits; + u8 flags; + u32 error_data_length; + u8 fru_id[16]; + u8 fru_text[20]; +}; + +/* Extension for revision 0x0300 */ + +struct acpi_hest_generic_data_v300 { + u8 section_type[16]; + u32 error_severity; + u16 revision; + u8 validation_bits; + u8 flags; + u32 error_data_length; + u8 fru_id[16]; + u8 fru_text[20]; + u64 time_stamp; +}; + +/* Values for error_severity above */ + +#define ACPI_HEST_GEN_ERROR_RECOVERABLE 0 +#define ACPI_HEST_GEN_ERROR_FATAL 1 +#define ACPI_HEST_GEN_ERROR_CORRECTED 2 +#define ACPI_HEST_GEN_ERROR_NONE 3 + +/* Flags for validation_bits above */ + +#define ACPI_HEST_GEN_VALID_FRU_ID (1) +#define ACPI_HEST_GEN_VALID_FRU_STRING (1<<1) +#define ACPI_HEST_GEN_VALID_TIMESTAMP (1<<2) + +/* 11: IA32 Deferred Machine Check Exception (ACPI 6.2) */ + +struct acpi_hest_ia_deferred_check { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; /* See flags ACPI_HEST_GLOBAL, etc. above */ + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + struct acpi_hest_notify notify; + u8 num_hardware_banks; + u8 reserved2[3]; +}; + +/******************************************************************************* + * + * HMAT - Heterogeneous Memory Attributes Table (ACPI 6.2) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_hmat { + struct acpi_table_header header; /* Common ACPI table header */ + u32 reserved; +}; + +/* Values for HMAT structure types */ + +enum acpi_hmat_type { + ACPI_HMAT_TYPE_PROXIMITY = 0, /* Memory proximity domain attributes */ + ACPI_HMAT_TYPE_LOCALITY = 1, /* System locality latency and bandwidth information */ + ACPI_HMAT_TYPE_CACHE = 2, /* Memory side cache information */ + ACPI_HMAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */ +}; + +struct acpi_hmat_structure { + u16 type; + u16 reserved; + u32 length; +}; + +/* + * HMAT Structures, correspond to Type in struct acpi_hmat_structure + */ + +/* 0: Memory proximity domain attributes */ + +struct acpi_hmat_proximity_domain { + struct acpi_hmat_structure header; + u16 flags; + u16 reserved1; + u32 processor_PD; /* Processor proximity domain */ + u32 memory_PD; /* Memory proximity domain */ + u32 reserved2; + u64 reserved3; + u64 reserved4; +}; + +/* Masks for Flags field above */ + +#define ACPI_HMAT_PROCESSOR_PD_VALID (1) /* 1: processor_PD field is valid */ +#define ACPI_HMAT_MEMORY_PD_VALID (1<<1) /* 1: memory_PD field is valid */ +#define ACPI_HMAT_RESERVATION_HINT (1<<2) /* 1: Reservation hint */ + +/* 1: System locality latency and bandwidth information */ + +struct acpi_hmat_locality { + struct acpi_hmat_structure header; + u8 flags; + u8 data_type; + u16 reserved1; + u32 number_of_initiator_Pds; + u32 number_of_target_Pds; + u32 reserved2; + u64 entry_base_unit; +}; + +/* Masks for Flags field above */ + +#define ACPI_HMAT_MEMORY_HIERARCHY (0x0F) + +/* Values for Memory Hierarchy flag */ + +#define ACPI_HMAT_MEMORY 0 +#define ACPI_HMAT_LAST_LEVEL_CACHE 1 +#define ACPI_HMAT_1ST_LEVEL_CACHE 2 +#define ACPI_HMAT_2ND_LEVEL_CACHE 3 +#define ACPI_HMAT_3RD_LEVEL_CACHE 4 + +/* Values for data_type field above */ + +#define ACPI_HMAT_ACCESS_LATENCY 0 +#define ACPI_HMAT_READ_LATENCY 1 +#define ACPI_HMAT_WRITE_LATENCY 2 +#define ACPI_HMAT_ACCESS_BANDWIDTH 3 +#define ACPI_HMAT_READ_BANDWIDTH 4 +#define ACPI_HMAT_WRITE_BANDWIDTH 5 + +/* 2: Memory side cache information */ + +struct acpi_hmat_cache { + struct acpi_hmat_structure header; + u32 memory_PD; + u32 reserved1; + u64 cache_size; + u32 cache_attributes; + u16 reserved2; + u16 number_of_SMBIOShandles; +}; + +/* Masks for cache_attributes field above */ + +#define ACPI_HMAT_TOTAL_CACHE_LEVEL (0x0000000F) +#define ACPI_HMAT_CACHE_LEVEL (0x000000F0) +#define ACPI_HMAT_CACHE_ASSOCIATIVITY (0x00000F00) +#define ACPI_HMAT_WRITE_POLICY (0x0000F000) +#define ACPI_HMAT_CACHE_LINE_SIZE (0xFFFF0000) + +/* Values for cache associativity flag */ + +#define ACPI_HMAT_CA_NONE (0) +#define ACPI_HMAT_CA_DIRECT_MAPPED (1) +#define ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING (2) + +/* Values for write policy flag */ + +#define ACPI_HMAT_CP_NONE (0) +#define ACPI_HMAT_CP_WB (1) +#define ACPI_HMAT_CP_WT (2) + +/******************************************************************************* + * + * HPET - High Precision Event Timer table + * Version 1 + * + * Conforms to "IA-PC HPET (High Precision Event Timers) Specification", + * Version 1.0a, October 2004 + * + ******************************************************************************/ + +struct acpi_table_hpet { + struct acpi_table_header header; /* Common ACPI table header */ + u32 id; /* Hardware ID of event timer block */ + struct acpi_generic_address address; /* Address of event timer block */ + u8 sequence; /* HPET sequence number */ + u16 minimum_tick; /* Main counter min tick, periodic mode */ + u8 flags; +}; + +/* Masks for Flags field above */ + +#define ACPI_HPET_PAGE_PROTECT_MASK (3) + +/* Values for Page Protect flags */ + +enum acpi_hpet_page_protect { + ACPI_HPET_NO_PAGE_PROTECT = 0, + ACPI_HPET_PAGE_PROTECT4 = 1, + ACPI_HPET_PAGE_PROTECT64 = 2 +}; + +/******************************************************************************* + * + * IBFT - Boot Firmware Table + * Version 1 + * + * Conforms to "iSCSI Boot Firmware Table (iBFT) as Defined in ACPI 3.0b + * Specification", Version 1.01, March 1, 2007 + * + * Note: It appears that this table is not intended to appear in the RSDT/XSDT. + * Therefore, it is not currently supported by the disassembler. + * + ******************************************************************************/ + +struct acpi_table_ibft { + struct acpi_table_header header; /* Common ACPI table header */ + u8 reserved[12]; +}; + +/* IBFT common subtable header */ + +struct acpi_ibft_header { + u8 type; + u8 version; + u16 length; + u8 index; + u8 flags; +}; + +/* Values for Type field above */ + +enum acpi_ibft_type { + ACPI_IBFT_TYPE_NOT_USED = 0, + ACPI_IBFT_TYPE_CONTROL = 1, + ACPI_IBFT_TYPE_INITIATOR = 2, + ACPI_IBFT_TYPE_NIC = 3, + ACPI_IBFT_TYPE_TARGET = 4, + ACPI_IBFT_TYPE_EXTENSIONS = 5, + ACPI_IBFT_TYPE_RESERVED = 6 /* 6 and greater are reserved */ +}; + +/* IBFT subtables */ + +struct acpi_ibft_control { + struct acpi_ibft_header header; + u16 extensions; + u16 initiator_offset; + u16 nic0_offset; + u16 target0_offset; + u16 nic1_offset; + u16 target1_offset; +}; + +struct acpi_ibft_initiator { + struct acpi_ibft_header header; + u8 sns_server[16]; + u8 slp_server[16]; + u8 primary_server[16]; + u8 secondary_server[16]; + u16 name_length; + u16 name_offset; +}; + +struct acpi_ibft_nic { + struct acpi_ibft_header header; + u8 ip_address[16]; + u8 subnet_mask_prefix; + u8 origin; + u8 gateway[16]; + u8 primary_dns[16]; + u8 secondary_dns[16]; + u8 dhcp[16]; + u16 vlan; + u8 mac_address[6]; + u16 pci_address; + u16 name_length; + u16 name_offset; +}; + +struct acpi_ibft_target { + struct acpi_ibft_header header; + u8 target_ip_address[16]; + u16 target_ip_socket; + u8 target_boot_lun[8]; + u8 chap_type; + u8 nic_association; + u16 target_name_length; + u16 target_name_offset; + u16 chap_name_length; + u16 chap_name_offset; + u16 chap_secret_length; + u16 chap_secret_offset; + u16 reverse_chap_name_length; + u16 reverse_chap_name_offset; + u16 reverse_chap_secret_length; + u16 reverse_chap_secret_offset; +}; + +/* Reset to default packing */ + +#pragma pack() + +#endif /* __ACTBL1_H__ */ diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h new file mode 100644 index 0000000..e45ced2 --- /dev/null +++ b/include/acpi/actbl2.h @@ -0,0 +1,1729 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec) + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACTBL2_H__ +#define __ACTBL2_H__ + +/******************************************************************************* + * + * Additional ACPI Tables (2) + * + * These tables are not consumed directly by the ACPICA subsystem, but are + * included here to support device drivers and the AML disassembler. + * + ******************************************************************************/ + +/* + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. + */ +#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */ +#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */ +#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */ +#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ +#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */ +#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */ +#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */ +#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */ +#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */ +#define ACPI_SIG_MTMR "MTMR" /* MID Timer table */ +#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */ +#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */ +#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */ +#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */ +#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */ +#define ACPI_SIG_RASF "RASF" /* RAS Feature table */ +#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ +#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */ +#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */ + +/* + * All tables must be byte-packed to match the ACPI specification, since + * the tables are provided by the system BIOS. + */ +#pragma pack(1) + +/* + * Note: C bitfields are not used for this reason: + * + * "Bitfields are great and easy to read, but unfortunately the C language + * does not specify the layout of bitfields in memory, which means they are + * essentially useless for dealing with packed data in on-disk formats or + * binary wire protocols." (Or ACPI tables and buffers.) "If you ask me, + * this decision was a design error in C. Ritchie could have picked an order + * and stuck with it." Norman Ramsey. + * See http://stackoverflow.com/a/1053662/41661 + */ + +/******************************************************************************* + * + * IORT - IO Remapping Table + * + * Conforms to "IO Remapping Table System Software on ARM Platforms", + * Document number: ARM DEN 0049D, March 2018 + * + ******************************************************************************/ + +struct acpi_table_iort { + struct acpi_table_header header; + u32 node_count; + u32 node_offset; + u32 reserved; +}; + +/* + * IORT subtables + */ +struct acpi_iort_node { + u8 type; + u16 length; + u8 revision; + u32 reserved; + u32 mapping_count; + u32 mapping_offset; + char node_data[1]; +}; + +/* Values for subtable Type above */ + +enum acpi_iort_node_type { + ACPI_IORT_NODE_ITS_GROUP = 0x00, + ACPI_IORT_NODE_NAMED_COMPONENT = 0x01, + ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02, + ACPI_IORT_NODE_SMMU = 0x03, + ACPI_IORT_NODE_SMMU_V3 = 0x04, + ACPI_IORT_NODE_PMCG = 0x05 +}; + +struct acpi_iort_id_mapping { + u32 input_base; /* Lowest value in input range */ + u32 id_count; /* Number of IDs */ + u32 output_base; /* Lowest value in output range */ + u32 output_reference; /* A reference to the output node */ + u32 flags; +}; + +/* Masks for Flags field above for IORT subtable */ + +#define ACPI_IORT_ID_SINGLE_MAPPING (1) + +struct acpi_iort_memory_access { + u32 cache_coherency; + u8 hints; + u16 reserved; + u8 memory_flags; +}; + +/* Values for cache_coherency field above */ + +#define ACPI_IORT_NODE_COHERENT 0x00000001 /* The device node is fully coherent */ +#define ACPI_IORT_NODE_NOT_COHERENT 0x00000000 /* The device node is not coherent */ + +/* Masks for Hints field above */ + +#define ACPI_IORT_HT_TRANSIENT (1) +#define ACPI_IORT_HT_WRITE (1<<1) +#define ACPI_IORT_HT_READ (1<<2) +#define ACPI_IORT_HT_OVERRIDE (1<<3) + +/* Masks for memory_flags field above */ + +#define ACPI_IORT_MF_COHERENCY (1) +#define ACPI_IORT_MF_ATTRIBUTES (1<<1) + +/* + * IORT node specific subtables + */ +struct acpi_iort_its_group { + u32 its_count; + u32 identifiers[1]; /* GIC ITS identifier array */ +}; + +struct acpi_iort_named_component { + u32 node_flags; + u64 memory_properties; /* Memory access properties */ + u8 memory_address_limit; /* Memory address size limit */ + char device_name[1]; /* Path of namespace object */ +}; + +/* Masks for Flags field above */ + +#define ACPI_IORT_NC_STALL_SUPPORTED (1) +#define ACPI_IORT_NC_PASID_BITS (31<<1) + +struct acpi_iort_root_complex { + u64 memory_properties; /* Memory access properties */ + u32 ats_attribute; + u32 pci_segment_number; + u8 memory_address_limit; /* Memory address size limit */ + u8 reserved[3]; /* Reserved, must be zero */ +}; + +/* Values for ats_attribute field above */ + +#define ACPI_IORT_ATS_SUPPORTED 0x00000001 /* The root complex supports ATS */ +#define ACPI_IORT_ATS_UNSUPPORTED 0x00000000 /* The root complex doesn't support ATS */ + +struct acpi_iort_smmu { + u64 base_address; /* SMMU base address */ + u64 span; /* Length of memory range */ + u32 model; + u32 flags; + u32 global_interrupt_offset; + u32 context_interrupt_count; + u32 context_interrupt_offset; + u32 pmu_interrupt_count; + u32 pmu_interrupt_offset; + u64 interrupts[1]; /* Interrupt array */ +}; + +/* Values for Model field above */ + +#define ACPI_IORT_SMMU_V1 0x00000000 /* Generic SMMUv1 */ +#define ACPI_IORT_SMMU_V2 0x00000001 /* Generic SMMUv2 */ +#define ACPI_IORT_SMMU_CORELINK_MMU400 0x00000002 /* ARM Corelink MMU-400 */ +#define ACPI_IORT_SMMU_CORELINK_MMU500 0x00000003 /* ARM Corelink MMU-500 */ +#define ACPI_IORT_SMMU_CORELINK_MMU401 0x00000004 /* ARM Corelink MMU-401 */ +#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x00000005 /* Cavium thunder_x SMMUv2 */ + +/* Masks for Flags field above */ + +#define ACPI_IORT_SMMU_DVM_SUPPORTED (1) +#define ACPI_IORT_SMMU_COHERENT_WALK (1<<1) + +/* Global interrupt format */ + +struct acpi_iort_smmu_gsi { + u32 nsg_irpt; + u32 nsg_irpt_flags; + u32 nsg_cfg_irpt; + u32 nsg_cfg_irpt_flags; +}; + +struct acpi_iort_smmu_v3 { + u64 base_address; /* SMMUv3 base address */ + u32 flags; + u32 reserved; + u64 vatos_address; + u32 model; + u32 event_gsiv; + u32 pri_gsiv; + u32 gerr_gsiv; + u32 sync_gsiv; + u32 pxm; + u32 id_mapping_index; +}; + +/* Values for Model field above */ + +#define ACPI_IORT_SMMU_V3_GENERIC 0x00000000 /* Generic SMMUv3 */ +#define ACPI_IORT_SMMU_V3_HISILICON_HI161X 0x00000001 /* hi_silicon Hi161x SMMUv3 */ +#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x00000002 /* Cavium CN99xx SMMUv3 */ + +/* Masks for Flags field above */ + +#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE (1) +#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (3<<1) +#define ACPI_IORT_SMMU_V3_PXM_VALID (1<<3) + +struct acpi_iort_pmcg { + u64 page0_base_address; + u32 overflow_gsiv; + u32 node_reference; + u64 page1_base_address; +}; + +/******************************************************************************* + * + * IVRS - I/O Virtualization Reporting Structure + * Version 1 + * + * Conforms to "AMD I/O Virtualization Technology (IOMMU) Specification", + * Revision 1.26, February 2009. + * + ******************************************************************************/ + +struct acpi_table_ivrs { + struct acpi_table_header header; /* Common ACPI table header */ + u32 info; /* Common virtualization info */ + u64 reserved; +}; + +/* Values for Info field above */ + +#define ACPI_IVRS_PHYSICAL_SIZE 0x00007F00 /* 7 bits, physical address size */ +#define ACPI_IVRS_VIRTUAL_SIZE 0x003F8000 /* 7 bits, virtual address size */ +#define ACPI_IVRS_ATS_RESERVED 0x00400000 /* ATS address translation range reserved */ + +/* IVRS subtable header */ + +struct acpi_ivrs_header { + u8 type; /* Subtable type */ + u8 flags; + u16 length; /* Subtable length */ + u16 device_id; /* ID of IOMMU */ +}; + +/* Values for subtable Type above */ + +enum acpi_ivrs_type { + ACPI_IVRS_TYPE_HARDWARE = 0x10, + ACPI_IVRS_TYPE_MEMORY1 = 0x20, + ACPI_IVRS_TYPE_MEMORY2 = 0x21, + ACPI_IVRS_TYPE_MEMORY3 = 0x22 +}; + +/* Masks for Flags field above for IVHD subtable */ + +#define ACPI_IVHD_TT_ENABLE (1) +#define ACPI_IVHD_PASS_PW (1<<1) +#define ACPI_IVHD_RES_PASS_PW (1<<2) +#define ACPI_IVHD_ISOC (1<<3) +#define ACPI_IVHD_IOTLB (1<<4) + +/* Masks for Flags field above for IVMD subtable */ + +#define ACPI_IVMD_UNITY (1) +#define ACPI_IVMD_READ (1<<1) +#define ACPI_IVMD_WRITE (1<<2) +#define ACPI_IVMD_EXCLUSION_RANGE (1<<3) + +/* + * IVRS subtables, correspond to Type in struct acpi_ivrs_header + */ + +/* 0x10: I/O Virtualization Hardware Definition Block (IVHD) */ + +struct acpi_ivrs_hardware { + struct acpi_ivrs_header header; + u16 capability_offset; /* Offset for IOMMU control fields */ + u64 base_address; /* IOMMU control registers */ + u16 pci_segment_group; + u16 info; /* MSI number and unit ID */ + u32 reserved; +}; + +/* Masks for Info field above */ + +#define ACPI_IVHD_MSI_NUMBER_MASK 0x001F /* 5 bits, MSI message number */ +#define ACPI_IVHD_UNIT_ID_MASK 0x1F00 /* 5 bits, unit_ID */ + +/* + * Device Entries for IVHD subtable, appear after struct acpi_ivrs_hardware structure. + * Upper two bits of the Type field are the (encoded) length of the structure. + * Currently, only 4 and 8 byte entries are defined. 16 and 32 byte entries + * are reserved for future use but not defined. + */ +struct acpi_ivrs_de_header { + u8 type; + u16 id; + u8 data_setting; +}; + +/* Length of device entry is in the top two bits of Type field above */ + +#define ACPI_IVHD_ENTRY_LENGTH 0xC0 + +/* Values for device entry Type field above */ + +enum acpi_ivrs_device_entry_type { + /* 4-byte device entries, all use struct acpi_ivrs_device4 */ + + ACPI_IVRS_TYPE_PAD4 = 0, + ACPI_IVRS_TYPE_ALL = 1, + ACPI_IVRS_TYPE_SELECT = 2, + ACPI_IVRS_TYPE_START = 3, + ACPI_IVRS_TYPE_END = 4, + + /* 8-byte device entries */ + + ACPI_IVRS_TYPE_PAD8 = 64, + ACPI_IVRS_TYPE_NOT_USED = 65, + ACPI_IVRS_TYPE_ALIAS_SELECT = 66, /* Uses struct acpi_ivrs_device8a */ + ACPI_IVRS_TYPE_ALIAS_START = 67, /* Uses struct acpi_ivrs_device8a */ + ACPI_IVRS_TYPE_EXT_SELECT = 70, /* Uses struct acpi_ivrs_device8b */ + ACPI_IVRS_TYPE_EXT_START = 71, /* Uses struct acpi_ivrs_device8b */ + ACPI_IVRS_TYPE_SPECIAL = 72 /* Uses struct acpi_ivrs_device8c */ +}; + +/* Values for Data field above */ + +#define ACPI_IVHD_INIT_PASS (1) +#define ACPI_IVHD_EINT_PASS (1<<1) +#define ACPI_IVHD_NMI_PASS (1<<2) +#define ACPI_IVHD_SYSTEM_MGMT (3<<4) +#define ACPI_IVHD_LINT0_PASS (1<<6) +#define ACPI_IVHD_LINT1_PASS (1<<7) + +/* Types 0-4: 4-byte device entry */ + +struct acpi_ivrs_device4 { + struct acpi_ivrs_de_header header; +}; + +/* Types 66-67: 8-byte device entry */ + +struct acpi_ivrs_device8a { + struct acpi_ivrs_de_header header; + u8 reserved1; + u16 used_id; + u8 reserved2; +}; + +/* Types 70-71: 8-byte device entry */ + +struct acpi_ivrs_device8b { + struct acpi_ivrs_de_header header; + u32 extended_data; +}; + +/* Values for extended_data above */ + +#define ACPI_IVHD_ATS_DISABLED (1<<31) + +/* Type 72: 8-byte device entry */ + +struct acpi_ivrs_device8c { + struct acpi_ivrs_de_header header; + u8 handle; + u16 used_id; + u8 variety; +}; + +/* Values for Variety field above */ + +#define ACPI_IVHD_IOAPIC 1 +#define ACPI_IVHD_HPET 2 + +/* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */ + +struct acpi_ivrs_memory { + struct acpi_ivrs_header header; + u16 aux_data; + u64 reserved; + u64 start_address; + u64 memory_length; +}; + +/******************************************************************************* + * + * LPIT - Low Power Idle Table + * + * Conforms to "ACPI Low Power Idle Table (LPIT)" July 2014. + * + ******************************************************************************/ + +struct acpi_table_lpit { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* LPIT subtable header */ + +struct acpi_lpit_header { + u32 type; /* Subtable type */ + u32 length; /* Subtable length */ + u16 unique_id; + u16 reserved; + u32 flags; +}; + +/* Values for subtable Type above */ + +enum acpi_lpit_type { + ACPI_LPIT_TYPE_NATIVE_CSTATE = 0x00, + ACPI_LPIT_TYPE_RESERVED = 0x01 /* 1 and above are reserved */ +}; + +/* Masks for Flags field above */ + +#define ACPI_LPIT_STATE_DISABLED (1) +#define ACPI_LPIT_NO_COUNTER (1<<1) + +/* + * LPIT subtables, correspond to Type in struct acpi_lpit_header + */ + +/* 0x00: Native C-state instruction based LPI structure */ + +struct acpi_lpit_native { + struct acpi_lpit_header header; + struct acpi_generic_address entry_trigger; + u32 residency; + u32 latency; + struct acpi_generic_address residency_counter; + u64 counter_frequency; +}; + +/******************************************************************************* + * + * MADT - Multiple APIC Description Table + * Version 3 + * + ******************************************************************************/ + +struct acpi_table_madt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 address; /* Physical address of local APIC */ + u32 flags; +}; + +/* Masks for Flags field above */ + +#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */ + +/* Values for PCATCompat flag */ + +#define ACPI_MADT_DUAL_PIC 1 +#define ACPI_MADT_MULTIPLE_APIC 0 + +/* Values for MADT subtable type in struct acpi_subtable_header */ + +enum acpi_madt_type { + ACPI_MADT_TYPE_LOCAL_APIC = 0, + ACPI_MADT_TYPE_IO_APIC = 1, + ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2, + ACPI_MADT_TYPE_NMI_SOURCE = 3, + ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4, + ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5, + ACPI_MADT_TYPE_IO_SAPIC = 6, + ACPI_MADT_TYPE_LOCAL_SAPIC = 7, + ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8, + ACPI_MADT_TYPE_LOCAL_X2APIC = 9, + ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10, + ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11, + ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12, + ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, + ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, + ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, + ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */ +}; + +/* + * MADT Subtables, correspond to Type in struct acpi_subtable_header + */ + +/* 0: Processor Local APIC */ + +struct acpi_madt_local_apic { + struct acpi_subtable_header header; + u8 processor_id; /* ACPI processor id */ + u8 id; /* Processor's local APIC id */ + u32 lapic_flags; +}; + +/* 1: IO APIC */ + +struct acpi_madt_io_apic { + struct acpi_subtable_header header; + u8 id; /* I/O APIC ID */ + u8 reserved; /* reserved - must be zero */ + u32 address; /* APIC physical address */ + u32 global_irq_base; /* Global system interrupt where INTI lines start */ +}; + +/* 2: Interrupt Override */ + +struct acpi_madt_interrupt_override { + struct acpi_subtable_header header; + u8 bus; /* 0 - ISA */ + u8 source_irq; /* Interrupt source (IRQ) */ + u32 global_irq; /* Global system interrupt */ + u16 inti_flags; +}; + +/* 3: NMI Source */ + +struct acpi_madt_nmi_source { + struct acpi_subtable_header header; + u16 inti_flags; + u32 global_irq; /* Global system interrupt */ +}; + +/* 4: Local APIC NMI */ + +struct acpi_madt_local_apic_nmi { + struct acpi_subtable_header header; + u8 processor_id; /* ACPI processor id */ + u16 inti_flags; + u8 lint; /* LINTn to which NMI is connected */ +}; + +/* 5: Address Override */ + +struct acpi_madt_local_apic_override { + struct acpi_subtable_header header; + u16 reserved; /* Reserved, must be zero */ + u64 address; /* APIC physical address */ +}; + +/* 6: I/O Sapic */ + +struct acpi_madt_io_sapic { + struct acpi_subtable_header header; + u8 id; /* I/O SAPIC ID */ + u8 reserved; /* Reserved, must be zero */ + u32 global_irq_base; /* Global interrupt for SAPIC start */ + u64 address; /* SAPIC physical address */ +}; + +/* 7: Local Sapic */ + +struct acpi_madt_local_sapic { + struct acpi_subtable_header header; + u8 processor_id; /* ACPI processor id */ + u8 id; /* SAPIC ID */ + u8 eid; /* SAPIC EID */ + u8 reserved[3]; /* Reserved, must be zero */ + u32 lapic_flags; + u32 uid; /* Numeric UID - ACPI 3.0 */ + char uid_string[1]; /* String UID - ACPI 3.0 */ +}; + +/* 8: Platform Interrupt Source */ + +struct acpi_madt_interrupt_source { + struct acpi_subtable_header header; + u16 inti_flags; + u8 type; /* 1=PMI, 2=INIT, 3=corrected */ + u8 id; /* Processor ID */ + u8 eid; /* Processor EID */ + u8 io_sapic_vector; /* Vector value for PMI interrupts */ + u32 global_irq; /* Global system interrupt */ + u32 flags; /* Interrupt Source Flags */ +}; + +/* Masks for Flags field above */ + +#define ACPI_MADT_CPEI_OVERRIDE (1) + +/* 9: Processor Local X2APIC (ACPI 4.0) */ + +struct acpi_madt_local_x2apic { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 local_apic_id; /* Processor x2APIC ID */ + u32 lapic_flags; + u32 uid; /* ACPI processor UID */ +}; + +/* 10: Local X2APIC NMI (ACPI 4.0) */ + +struct acpi_madt_local_x2apic_nmi { + struct acpi_subtable_header header; + u16 inti_flags; + u32 uid; /* ACPI processor UID */ + u8 lint; /* LINTn to which NMI is connected */ + u8 reserved[3]; /* reserved - must be zero */ +}; + +/* 11: Generic interrupt - GICC (ACPI 5.0 + ACPI 6.0 + ACPI 6.3 changes) */ + +struct acpi_madt_generic_interrupt { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 cpu_interface_number; + u32 uid; + u32 flags; + u32 parking_version; + u32 performance_interrupt; + u64 parked_address; + u64 base_address; + u64 gicv_base_address; + u64 gich_base_address; + u32 vgic_interrupt; + u64 gicr_base_address; + u64 arm_mpidr; + u8 efficiency_class; + u8 reserved2[1]; + u16 spe_interrupt; /* ACPI 6.3 */ +}; + +/* Masks for Flags field above */ + +/* ACPI_MADT_ENABLED (1) Processor is usable if set */ +#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */ +#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */ + +/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */ + +struct acpi_madt_generic_distributor { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 gic_id; + u64 base_address; + u32 global_irq_base; + u8 version; + u8 reserved2[3]; /* reserved - must be zero */ +}; + +/* Values for Version field above */ + +enum acpi_madt_gic_version { + ACPI_MADT_GIC_VERSION_NONE = 0, + ACPI_MADT_GIC_VERSION_V1 = 1, + ACPI_MADT_GIC_VERSION_V2 = 2, + ACPI_MADT_GIC_VERSION_V3 = 3, + ACPI_MADT_GIC_VERSION_V4 = 4, + ACPI_MADT_GIC_VERSION_RESERVED = 5 /* 5 and greater are reserved */ +}; + +/* 13: Generic MSI Frame (ACPI 5.1) */ + +struct acpi_madt_generic_msi_frame { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 msi_frame_id; + u64 base_address; + u32 flags; + u16 spi_count; + u16 spi_base; +}; + +/* Masks for Flags field above */ + +#define ACPI_MADT_OVERRIDE_SPI_VALUES (1) + +/* 14: Generic Redistributor (ACPI 5.1) */ + +struct acpi_madt_generic_redistributor { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u64 base_address; + u32 length; +}; + +/* 15: Generic Translator (ACPI 6.0) */ + +struct acpi_madt_generic_translator { + struct acpi_subtable_header header; + u16 reserved; /* reserved - must be zero */ + u32 translation_id; + u64 base_address; + u32 reserved2; +}; + +/* + * Common flags fields for MADT subtables + */ + +/* MADT Local APIC flags */ + +#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */ + +/* MADT MPS INTI flags (inti_flags) */ + +#define ACPI_MADT_POLARITY_MASK (3) /* 00-01: Polarity of APIC I/O input signals */ +#define ACPI_MADT_TRIGGER_MASK (3<<2) /* 02-03: Trigger mode of APIC input signals */ + +/* Values for MPS INTI flags */ + +#define ACPI_MADT_POLARITY_CONFORMS 0 +#define ACPI_MADT_POLARITY_ACTIVE_HIGH 1 +#define ACPI_MADT_POLARITY_RESERVED 2 +#define ACPI_MADT_POLARITY_ACTIVE_LOW 3 + +#define ACPI_MADT_TRIGGER_CONFORMS (0) +#define ACPI_MADT_TRIGGER_EDGE (1<<2) +#define ACPI_MADT_TRIGGER_RESERVED (2<<2) +#define ACPI_MADT_TRIGGER_LEVEL (3<<2) + +/******************************************************************************* + * + * MCFG - PCI Memory Mapped Configuration table and subtable + * Version 1 + * + * Conforms to "PCI Firmware Specification", Revision 3.0, June 20, 2005 + * + ******************************************************************************/ + +struct acpi_table_mcfg { + struct acpi_table_header header; /* Common ACPI table header */ + u8 reserved[8]; +}; + +/* Subtable */ + +struct acpi_mcfg_allocation { + u64 address; /* Base address, processor-relative */ + u16 pci_segment; /* PCI segment group number */ + u8 start_bus_number; /* Starting PCI Bus number */ + u8 end_bus_number; /* Final PCI Bus number */ + u32 reserved; +}; + +/******************************************************************************* + * + * MCHI - Management Controller Host Interface Table + * Version 1 + * + * Conforms to "Management Component Transport Protocol (MCTP) Host + * Interface Specification", Revision 1.0.0a, October 13, 2009 + * + ******************************************************************************/ + +struct acpi_table_mchi { + struct acpi_table_header header; /* Common ACPI table header */ + u8 interface_type; + u8 protocol; + u64 protocol_data; + u8 interrupt_type; + u8 gpe; + u8 pci_device_flag; + u32 global_interrupt; + struct acpi_generic_address control_register; + u8 pci_segment; + u8 pci_bus; + u8 pci_device; + u8 pci_function; +}; + +/******************************************************************************* + * + * MPST - Memory Power State Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +#define ACPI_MPST_CHANNEL_INFO \ + u8 channel_id; \ + u8 reserved1[3]; \ + u16 power_node_count; \ + u16 reserved2; + +/* Main table */ + +struct acpi_table_mpst { + struct acpi_table_header header; /* Common ACPI table header */ + ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */ +}; + +/* Memory Platform Communication Channel Info */ + +struct acpi_mpst_channel { + ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */ +}; + +/* Memory Power Node Structure */ + +struct acpi_mpst_power_node { + u8 flags; + u8 reserved1; + u16 node_id; + u32 length; + u64 range_address; + u64 range_length; + u32 num_power_states; + u32 num_physical_components; +}; + +/* Values for Flags field above */ + +#define ACPI_MPST_ENABLED 1 +#define ACPI_MPST_POWER_MANAGED 2 +#define ACPI_MPST_HOT_PLUG_CAPABLE 4 + +/* Memory Power State Structure (follows POWER_NODE above) */ + +struct acpi_mpst_power_state { + u8 power_state; + u8 info_index; +}; + +/* Physical Component ID Structure (follows POWER_STATE above) */ + +struct acpi_mpst_component { + u16 component_id; +}; + +/* Memory Power State Characteristics Structure (follows all POWER_NODEs) */ + +struct acpi_mpst_data_hdr { + u16 characteristics_count; + u16 reserved; +}; + +struct acpi_mpst_power_data { + u8 structure_id; + u8 flags; + u16 reserved1; + u32 average_power; + u32 power_saving; + u64 exit_latency; + u64 reserved2; +}; + +/* Values for Flags field above */ + +#define ACPI_MPST_PRESERVE 1 +#define ACPI_MPST_AUTOENTRY 2 +#define ACPI_MPST_AUTOEXIT 4 + +/* Shared Memory Region (not part of an ACPI table) */ + +struct acpi_mpst_shared { + u32 signature; + u16 pcc_command; + u16 pcc_status; + u32 command_register; + u32 status_register; + u32 power_state_id; + u32 power_node_id; + u64 energy_consumed; + u64 average_power; +}; + +/******************************************************************************* + * + * MSCT - Maximum System Characteristics Table (ACPI 4.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_msct { + struct acpi_table_header header; /* Common ACPI table header */ + u32 proximity_offset; /* Location of proximity info struct(s) */ + u32 max_proximity_domains; /* Max number of proximity domains */ + u32 max_clock_domains; /* Max number of clock domains */ + u64 max_address; /* Max physical address in system */ +}; + +/* subtable - Maximum Proximity Domain Information. Version 1 */ + +struct acpi_msct_proximity { + u8 revision; + u8 length; + u32 range_start; /* Start of domain range */ + u32 range_end; /* End of domain range */ + u32 processor_capacity; + u64 memory_capacity; /* In bytes */ +}; + +/******************************************************************************* + * + * MSDM - Microsoft Data Management table + * + * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)", + * November 29, 2011. Copyright 2011 Microsoft + * + ******************************************************************************/ + +/* Basic MSDM table is only the common ACPI header */ + +struct acpi_table_msdm { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/******************************************************************************* + * + * MTMR - MID Timer Table + * Version 1 + * + * Conforms to "Simple Firmware Interface Specification", + * Draft 0.8.2, Oct 19, 2010 + * NOTE: The ACPI MTMR is equivalent to the SFI MTMR table. + * + ******************************************************************************/ + +struct acpi_table_mtmr { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* MTMR entry */ + +struct acpi_mtmr_entry { + struct acpi_generic_address physical_address; + u32 frequency; + u32 irq; +}; + +/******************************************************************************* + * + * NFIT - NVDIMM Interface Table (ACPI 6.0+) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_nfit { + struct acpi_table_header header; /* Common ACPI table header */ + u32 reserved; /* Reserved, must be zero */ +}; + +/* Subtable header for NFIT */ + +struct acpi_nfit_header { + u16 type; + u16 length; +}; + +/* Values for subtable type in struct acpi_nfit_header */ + +enum acpi_nfit_type { + ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0, + ACPI_NFIT_TYPE_MEMORY_MAP = 1, + ACPI_NFIT_TYPE_INTERLEAVE = 2, + ACPI_NFIT_TYPE_SMBIOS = 3, + ACPI_NFIT_TYPE_CONTROL_REGION = 4, + ACPI_NFIT_TYPE_DATA_REGION = 5, + ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6, + ACPI_NFIT_TYPE_CAPABILITIES = 7, + ACPI_NFIT_TYPE_RESERVED = 8 /* 8 and greater are reserved */ +}; + +/* + * NFIT Subtables + */ + +/* 0: System Physical Address Range Structure */ + +struct acpi_nfit_system_address { + struct acpi_nfit_header header; + u16 range_index; + u16 flags; + u32 reserved; /* Reserved, must be zero */ + u32 proximity_domain; + u8 range_guid[16]; + u64 address; + u64 length; + u64 memory_mapping; +}; + +/* Flags */ + +#define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */ +#define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */ + +/* Range Type GUIDs appear in the include/acuuid.h file */ + +/* 1: Memory Device to System Address Range Map Structure */ + +struct acpi_nfit_memory_map { + struct acpi_nfit_header header; + u32 device_handle; + u16 physical_id; + u16 region_id; + u16 range_index; + u16 region_index; + u64 region_size; + u64 region_offset; + u64 address; + u16 interleave_index; + u16 interleave_ways; + u16 flags; + u16 reserved; /* Reserved, must be zero */ +}; + +/* Flags */ + +#define ACPI_NFIT_MEM_SAVE_FAILED (1) /* 00: Last SAVE to Memory Device failed */ +#define ACPI_NFIT_MEM_RESTORE_FAILED (1<<1) /* 01: Last RESTORE from Memory Device failed */ +#define ACPI_NFIT_MEM_FLUSH_FAILED (1<<2) /* 02: Platform flush failed */ +#define ACPI_NFIT_MEM_NOT_ARMED (1<<3) /* 03: Memory Device is not armed */ +#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */ +#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */ +#define ACPI_NFIT_MEM_MAP_FAILED (1<<6) /* 06: Mapping to SPA failed */ + +/* 2: Interleave Structure */ + +struct acpi_nfit_interleave { + struct acpi_nfit_header header; + u16 interleave_index; + u16 reserved; /* Reserved, must be zero */ + u32 line_count; + u32 line_size; + u32 line_offset[1]; /* Variable length */ +}; + +/* 3: SMBIOS Management Information Structure */ + +struct acpi_nfit_smbios { + struct acpi_nfit_header header; + u32 reserved; /* Reserved, must be zero */ + u8 data[1]; /* Variable length */ +}; + +/* 4: NVDIMM Control Region Structure */ + +struct acpi_nfit_control_region { + struct acpi_nfit_header header; + u16 region_index; + u16 vendor_id; + u16 device_id; + u16 revision_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 subsystem_revision_id; + u8 valid_fields; + u8 manufacturing_location; + u16 manufacturing_date; + u8 reserved[2]; /* Reserved, must be zero */ + u32 serial_number; + u16 code; + u16 windows; + u64 window_size; + u64 command_offset; + u64 command_size; + u64 status_offset; + u64 status_size; + u16 flags; + u8 reserved1[6]; /* Reserved, must be zero */ +}; + +/* Flags */ + +#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */ + +/* valid_fields bits */ + +#define ACPI_NFIT_CONTROL_MFG_INFO_VALID (1) /* Manufacturing fields are valid */ + +/* 5: NVDIMM Block Data Window Region Structure */ + +struct acpi_nfit_data_region { + struct acpi_nfit_header header; + u16 region_index; + u16 windows; + u64 offset; + u64 size; + u64 capacity; + u64 start_address; +}; + +/* 6: Flush Hint Address Structure */ + +struct acpi_nfit_flush_address { + struct acpi_nfit_header header; + u32 device_handle; + u16 hint_count; + u8 reserved[6]; /* Reserved, must be zero */ + u64 hint_address[1]; /* Variable length */ +}; + +/* 7: Platform Capabilities Structure */ + +struct acpi_nfit_capabilities { + struct acpi_nfit_header header; + u8 highest_capability; + u8 reserved[3]; /* Reserved, must be zero */ + u32 capabilities; + u32 reserved2; +}; + +/* Capabilities Flags */ + +#define ACPI_NFIT_CAPABILITY_CACHE_FLUSH (1) /* 00: Cache Flush to NVDIMM capable */ +#define ACPI_NFIT_CAPABILITY_MEM_FLUSH (1<<1) /* 01: Memory Flush to NVDIMM capable */ +#define ACPI_NFIT_CAPABILITY_MEM_MIRRORING (1<<2) /* 02: Memory Mirroring capable */ + +/* + * NFIT/DVDIMM device handle support - used as the _ADR for each NVDIMM + */ +struct nfit_device_handle { + u32 handle; +}; + +/* Device handle construction and extraction macros */ + +#define ACPI_NFIT_DIMM_NUMBER_MASK 0x0000000F +#define ACPI_NFIT_CHANNEL_NUMBER_MASK 0x000000F0 +#define ACPI_NFIT_MEMORY_ID_MASK 0x00000F00 +#define ACPI_NFIT_SOCKET_ID_MASK 0x0000F000 +#define ACPI_NFIT_NODE_ID_MASK 0x0FFF0000 + +#define ACPI_NFIT_DIMM_NUMBER_OFFSET 0 +#define ACPI_NFIT_CHANNEL_NUMBER_OFFSET 4 +#define ACPI_NFIT_MEMORY_ID_OFFSET 8 +#define ACPI_NFIT_SOCKET_ID_OFFSET 12 +#define ACPI_NFIT_NODE_ID_OFFSET 16 + +/* Macro to construct a NFIT/NVDIMM device handle */ + +#define ACPI_NFIT_BUILD_DEVICE_HANDLE(dimm, channel, memory, socket, node) \ + ((dimm) | \ + ((channel) << ACPI_NFIT_CHANNEL_NUMBER_OFFSET) | \ + ((memory) << ACPI_NFIT_MEMORY_ID_OFFSET) | \ + ((socket) << ACPI_NFIT_SOCKET_ID_OFFSET) | \ + ((node) << ACPI_NFIT_NODE_ID_OFFSET)) + +/* Macros to extract individual fields from a NFIT/NVDIMM device handle */ + +#define ACPI_NFIT_GET_DIMM_NUMBER(handle) \ + ((handle) & ACPI_NFIT_DIMM_NUMBER_MASK) + +#define ACPI_NFIT_GET_CHANNEL_NUMBER(handle) \ + (((handle) & ACPI_NFIT_CHANNEL_NUMBER_MASK) >> ACPI_NFIT_CHANNEL_NUMBER_OFFSET) + +#define ACPI_NFIT_GET_MEMORY_ID(handle) \ + (((handle) & ACPI_NFIT_MEMORY_ID_MASK) >> ACPI_NFIT_MEMORY_ID_OFFSET) + +#define ACPI_NFIT_GET_SOCKET_ID(handle) \ + (((handle) & ACPI_NFIT_SOCKET_ID_MASK) >> ACPI_NFIT_SOCKET_ID_OFFSET) + +#define ACPI_NFIT_GET_NODE_ID(handle) \ + (((handle) & ACPI_NFIT_NODE_ID_MASK) >> ACPI_NFIT_NODE_ID_OFFSET) + +/******************************************************************************* + * + * PCCT - Platform Communications Channel Table (ACPI 5.0) + * Version 2 (ACPI 6.2) + * + ******************************************************************************/ + +struct acpi_table_pcct { + struct acpi_table_header header; /* Common ACPI table header */ + u32 flags; + u64 reserved; +}; + +/* Values for Flags field above */ + +#define ACPI_PCCT_DOORBELL 1 + +/* Values for subtable type in struct acpi_subtable_header */ + +enum acpi_pcct_type { + ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0, + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1, + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */ + ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, /* ACPI 6.2 */ + ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, /* ACPI 6.2 */ + ACPI_PCCT_TYPE_RESERVED = 5 /* 5 and greater are reserved */ +}; + +/* + * PCCT Subtables, correspond to Type in struct acpi_subtable_header + */ + +/* 0: Generic Communications Subspace */ + +struct acpi_pcct_subspace { + struct acpi_subtable_header header; + u8 reserved[6]; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; +}; + +/* 1: HW-reduced Communications Subspace (ACPI 5.1) */ + +struct acpi_pcct_hw_reduced { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; +}; + +/* 2: HW-reduced Communications Subspace Type 2 (ACPI 6.1) */ + +struct acpi_pcct_hw_reduced_type2 { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; + struct acpi_generic_address platform_ack_register; + u64 ack_preserve_mask; + u64 ack_write_mask; +}; + +/* 3: Extended PCC Master Subspace Type 3 (ACPI 6.2) */ + +struct acpi_pcct_ext_pcc_master { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved1; + u64 base_address; + u32 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u32 min_turnaround_time; + struct acpi_generic_address platform_ack_register; + u64 ack_preserve_mask; + u64 ack_set_mask; + u64 reserved2; + struct acpi_generic_address cmd_complete_register; + u64 cmd_complete_mask; + struct acpi_generic_address cmd_update_register; + u64 cmd_update_preserve_mask; + u64 cmd_update_set_mask; + struct acpi_generic_address error_status_register; + u64 error_status_mask; +}; + +/* 4: Extended PCC Slave Subspace Type 4 (ACPI 6.2) */ + +struct acpi_pcct_ext_pcc_slave { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved1; + u64 base_address; + u32 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u32 min_turnaround_time; + struct acpi_generic_address platform_ack_register; + u64 ack_preserve_mask; + u64 ack_set_mask; + u64 reserved2; + struct acpi_generic_address cmd_complete_register; + u64 cmd_complete_mask; + struct acpi_generic_address cmd_update_register; + u64 cmd_update_preserve_mask; + u64 cmd_update_set_mask; + struct acpi_generic_address error_status_register; + u64 error_status_mask; +}; + +/* Values for doorbell flags above */ + +#define ACPI_PCCT_INTERRUPT_POLARITY (1) +#define ACPI_PCCT_INTERRUPT_MODE (1<<1) + +/* + * PCC memory structures (not part of the ACPI table) + */ + +/* Shared Memory Region */ + +struct acpi_pcct_shared_memory { + u32 signature; + u16 command; + u16 status; +}; + +/* Extended PCC Subspace Shared Memory Region (ACPI 6.2) */ + +struct acpi_pcct_ext_pcc_shared_memory { + u32 signature; + u32 flags; + u32 length; + u32 command; +}; + +/******************************************************************************* + * + * PDTT - Platform Debug Trigger Table (ACPI 6.2) + * Version 0 + * + ******************************************************************************/ + +struct acpi_table_pdtt { + struct acpi_table_header header; /* Common ACPI table header */ + u8 trigger_count; + u8 reserved[3]; + u32 array_offset; +}; + +/* + * PDTT Communication Channel Identifier Structure. + * The number of these structures is defined by trigger_count above, + * starting at array_offset. + */ +struct acpi_pdtt_channel { + u8 subchannel_id; + u8 flags; +}; + +/* Flags for above */ + +#define ACPI_PDTT_RUNTIME_TRIGGER (1) +#define ACPI_PDTT_WAIT_COMPLETION (1<<1) +#define ACPI_PDTT_TRIGGER_ORDER (1<<2) + +/******************************************************************************* + * + * PMTT - Platform Memory Topology Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_pmtt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 reserved; +}; + +/* Common header for PMTT subtables that follow main table */ + +struct acpi_pmtt_header { + u8 type; + u8 reserved1; + u16 length; + u16 flags; + u16 reserved2; +}; + +/* Values for Type field above */ + +#define ACPI_PMTT_TYPE_SOCKET 0 +#define ACPI_PMTT_TYPE_CONTROLLER 1 +#define ACPI_PMTT_TYPE_DIMM 2 +#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFF are reserved */ + +/* Values for Flags field above */ + +#define ACPI_PMTT_TOP_LEVEL 0x0001 +#define ACPI_PMTT_PHYSICAL 0x0002 +#define ACPI_PMTT_MEMORY_TYPE 0x000C + +/* + * PMTT subtables, correspond to Type in struct acpi_pmtt_header + */ + +/* 0: Socket Structure */ + +struct acpi_pmtt_socket { + struct acpi_pmtt_header header; + u16 socket_id; + u16 reserved; +}; + +/* 1: Memory Controller subtable */ + +struct acpi_pmtt_controller { + struct acpi_pmtt_header header; + u32 read_latency; + u32 write_latency; + u32 read_bandwidth; + u32 write_bandwidth; + u16 access_width; + u16 alignment; + u16 reserved; + u16 domain_count; +}; + +/* 1a: Proximity Domain substructure */ + +struct acpi_pmtt_domain { + u32 proximity_domain; +}; + +/* 2: Physical Component Identifier (DIMM) */ + +struct acpi_pmtt_physical_component { + struct acpi_pmtt_header header; + u16 component_id; + u16 reserved; + u32 memory_size; + u32 bios_handle; +}; + +/******************************************************************************* + * + * PPTT - Processor Properties Topology Table (ACPI 6.2) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_pptt { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* Values for Type field above */ + +enum acpi_pptt_type { + ACPI_PPTT_TYPE_PROCESSOR = 0, + ACPI_PPTT_TYPE_CACHE = 1, + ACPI_PPTT_TYPE_ID = 2, + ACPI_PPTT_TYPE_RESERVED = 3 +}; + +/* 0: Processor Hierarchy Node Structure */ + +struct acpi_pptt_processor { + struct acpi_subtable_header header; + u16 reserved; + u32 flags; + u32 parent; + u32 acpi_processor_id; + u32 number_of_priv_resources; +}; + +/* Flags */ + +#define ACPI_PPTT_PHYSICAL_PACKAGE (1) +#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID (1<<1) +#define ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD (1<<2) /* ACPI 6.3 */ +#define ACPI_PPTT_ACPI_LEAF_NODE (1<<3) /* ACPI 6.3 */ +#define ACPI_PPTT_ACPI_IDENTICAL (1<<4) /* ACPI 6.3 */ + +/* 1: Cache Type Structure */ + +struct acpi_pptt_cache { + struct acpi_subtable_header header; + u16 reserved; + u32 flags; + u32 next_level_of_cache; + u32 size; + u32 number_of_sets; + u8 associativity; + u8 attributes; + u16 line_size; +}; + +/* Flags */ + +#define ACPI_PPTT_SIZE_PROPERTY_VALID (1) /* Physical property valid */ +#define ACPI_PPTT_NUMBER_OF_SETS_VALID (1<<1) /* Number of sets valid */ +#define ACPI_PPTT_ASSOCIATIVITY_VALID (1<<2) /* Associativity valid */ +#define ACPI_PPTT_ALLOCATION_TYPE_VALID (1<<3) /* Allocation type valid */ +#define ACPI_PPTT_CACHE_TYPE_VALID (1<<4) /* Cache type valid */ +#define ACPI_PPTT_WRITE_POLICY_VALID (1<<5) /* Write policy valid */ +#define ACPI_PPTT_LINE_SIZE_VALID (1<<6) /* Line size valid */ + +/* Masks for Attributes */ + +#define ACPI_PPTT_MASK_ALLOCATION_TYPE (0x03) /* Allocation type */ +#define ACPI_PPTT_MASK_CACHE_TYPE (0x0C) /* Cache type */ +#define ACPI_PPTT_MASK_WRITE_POLICY (0x10) /* Write policy */ + +/* Attributes describing cache */ +#define ACPI_PPTT_CACHE_READ_ALLOCATE (0x0) /* Cache line is allocated on read */ +#define ACPI_PPTT_CACHE_WRITE_ALLOCATE (0x01) /* Cache line is allocated on write */ +#define ACPI_PPTT_CACHE_RW_ALLOCATE (0x02) /* Cache line is allocated on read and write */ +#define ACPI_PPTT_CACHE_RW_ALLOCATE_ALT (0x03) /* Alternate representation of above */ + +#define ACPI_PPTT_CACHE_TYPE_DATA (0x0) /* Data cache */ +#define ACPI_PPTT_CACHE_TYPE_INSTR (1<<2) /* Instruction cache */ +#define ACPI_PPTT_CACHE_TYPE_UNIFIED (2<<2) /* Unified I & D cache */ +#define ACPI_PPTT_CACHE_TYPE_UNIFIED_ALT (3<<2) /* Alternate representation of above */ + +#define ACPI_PPTT_CACHE_POLICY_WB (0x0) /* Cache is write back */ +#define ACPI_PPTT_CACHE_POLICY_WT (1<<4) /* Cache is write through */ + +/* 2: ID Structure */ + +struct acpi_pptt_id { + struct acpi_subtable_header header; + u16 reserved; + u32 vendor_id; + u64 level1_id; + u64 level2_id; + u16 major_rev; + u16 minor_rev; + u16 spin_rev; +}; + +/******************************************************************************* + * + * RASF - RAS Feature Table (ACPI 5.0) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_rasf { + struct acpi_table_header header; /* Common ACPI table header */ + u8 channel_id[12]; +}; + +/* RASF Platform Communication Channel Shared Memory Region */ + +struct acpi_rasf_shared_memory { + u32 signature; + u16 command; + u16 status; + u16 version; + u8 capabilities[16]; + u8 set_capabilities[16]; + u16 num_parameter_blocks; + u32 set_capabilities_status; +}; + +/* RASF Parameter Block Structure Header */ + +struct acpi_rasf_parameter_block { + u16 type; + u16 version; + u16 length; +}; + +/* RASF Parameter Block Structure for PATROL_SCRUB */ + +struct acpi_rasf_patrol_scrub_parameter { + struct acpi_rasf_parameter_block header; + u16 patrol_scrub_command; + u64 requested_address_range[2]; + u64 actual_address_range[2]; + u16 flags; + u8 requested_speed; +}; + +/* Masks for Flags and Speed fields above */ + +#define ACPI_RASF_SCRUBBER_RUNNING 1 +#define ACPI_RASF_SPEED (7<<1) +#define ACPI_RASF_SPEED_SLOW (0<<1) +#define ACPI_RASF_SPEED_MEDIUM (4<<1) +#define ACPI_RASF_SPEED_FAST (7<<1) + +/* Channel Commands */ + +enum acpi_rasf_commands { + ACPI_RASF_EXECUTE_RASF_COMMAND = 1 +}; + +/* Platform RAS Capabilities */ + +enum acpi_rasf_capabiliities { + ACPI_HW_PATROL_SCRUB_SUPPORTED = 0, + ACPI_SW_PATROL_SCRUB_EXPOSED = 1 +}; + +/* Patrol Scrub Commands */ + +enum acpi_rasf_patrol_scrub_commands { + ACPI_RASF_GET_PATROL_PARAMETERS = 1, + ACPI_RASF_START_PATROL_SCRUBBER = 2, + ACPI_RASF_STOP_PATROL_SCRUBBER = 3 +}; + +/* Channel Command flags */ + +#define ACPI_RASF_GENERATE_SCI (1<<15) + +/* Status values */ + +enum acpi_rasf_status { + ACPI_RASF_SUCCESS = 0, + ACPI_RASF_NOT_VALID = 1, + ACPI_RASF_NOT_SUPPORTED = 2, + ACPI_RASF_BUSY = 3, + ACPI_RASF_FAILED = 4, + ACPI_RASF_ABORTED = 5, + ACPI_RASF_INVALID_DATA = 6 +}; + +/* Status flags */ + +#define ACPI_RASF_COMMAND_COMPLETE (1) +#define ACPI_RASF_SCI_DOORBELL (1<<1) +#define ACPI_RASF_ERROR (1<<2) +#define ACPI_RASF_STATUS (0x1F<<3) + +/******************************************************************************* + * + * SBST - Smart Battery Specification Table + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_sbst { + struct acpi_table_header header; /* Common ACPI table header */ + u32 warning_level; + u32 low_level; + u32 critical_level; +}; + +/******************************************************************************* + * + * SDEI - Software Delegated Exception Interface Descriptor Table + * + * Conforms to "Software Delegated Exception Interface (SDEI)" ARM DEN0054A, + * May 8th, 2017. Copyright 2017 ARM Ltd. + * + ******************************************************************************/ + +struct acpi_table_sdei { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/******************************************************************************* + * + * SDEV - Secure Devices Table (ACPI 6.2) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_sdev { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +struct acpi_sdev_header { + u8 type; + u8 flags; + u16 length; +}; + +/* Values for subtable type above */ + +enum acpi_sdev_type { + ACPI_SDEV_TYPE_NAMESPACE_DEVICE = 0, + ACPI_SDEV_TYPE_PCIE_ENDPOINT_DEVICE = 1, + ACPI_SDEV_TYPE_RESERVED = 2 /* 2 and greater are reserved */ +}; + +/* Values for flags above */ + +#define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1) + +/* + * SDEV subtables + */ + +/* 0: Namespace Device Based Secure Device Structure */ + +struct acpi_sdev_namespace { + struct acpi_sdev_header header; + u16 device_id_offset; + u16 device_id_length; + u16 vendor_data_offset; + u16 vendor_data_length; +}; + +/* 1: PCIe Endpoint Device Based Device Structure */ + +struct acpi_sdev_pcie { + struct acpi_sdev_header header; + u16 segment; + u16 start_bus; + u16 path_offset; + u16 path_length; + u16 vendor_data_offset; + u16 vendor_data_length; +}; + +/* 1a: PCIe Endpoint path entry */ + +struct acpi_sdev_pcie_path { + u8 device; + u8 function; +}; + +/* Reset to default packing */ + +#pragma pack() + +#endif /* __ACTBL2_H__ */ diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h new file mode 100644 index 0000000..7a58c10 --- /dev/null +++ b/include/acpi/actbl3.h @@ -0,0 +1,716 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: actbl3.h - ACPI Table Definitions + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACTBL3_H__ +#define __ACTBL3_H__ + +/******************************************************************************* + * + * Additional ACPI Tables + * + * These tables are not consumed directly by the ACPICA subsystem, but are + * included here to support device drivers and the AML disassembler. + * + ******************************************************************************/ + +/* + * Values for description table header signatures for tables defined in this + * file. Useful because they make it more difficult to inadvertently type in + * the wrong signature. + */ +#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */ +#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */ +#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */ +#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */ +#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */ +#define ACPI_SIG_STAO "STAO" /* Status Override table */ +#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ +#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */ +#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */ +#define ACPI_SIG_VRTC "VRTC" /* Virtual Real Time Clock Table */ +#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */ +#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */ +#define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */ +#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ +#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */ +#define ACPI_SIG_WSMT "WSMT" /* Windows SMM Security Migrations Table */ +#define ACPI_SIG_XENV "XENV" /* Xen Environment table */ +#define ACPI_SIG_XXXX "XXXX" /* Intermediate AML header for ASL/ASL+ converter */ + +/* + * All tables must be byte-packed to match the ACPI specification, since + * the tables are provided by the system BIOS. + */ +#pragma pack(1) + +/* + * Note: C bitfields are not used for this reason: + * + * "Bitfields are great and easy to read, but unfortunately the C language + * does not specify the layout of bitfields in memory, which means they are + * essentially useless for dealing with packed data in on-disk formats or + * binary wire protocols." (Or ACPI tables and buffers.) "If you ask me, + * this decision was a design error in C. Ritchie could have picked an order + * and stuck with it." Norman Ramsey. + * See http://stackoverflow.com/a/1053662/41661 + */ + +/******************************************************************************* + * + * SLIC - Software Licensing Description Table + * + * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)", + * November 29, 2011. Copyright 2011 Microsoft + * + ******************************************************************************/ + +/* Basic SLIC table is only the common ACPI header */ + +struct acpi_table_slic { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/******************************************************************************* + * + * SLIT - System Locality Distance Information Table + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_slit { + struct acpi_table_header header; /* Common ACPI table header */ + u64 locality_count; + u8 entry[1]; /* Real size = localities^2 */ +}; + +/******************************************************************************* + * + * SPCR - Serial Port Console Redirection table + * Version 2 + * + * Conforms to "Serial Port Console Redirection Table", + * Version 1.03, August 10, 2015 + * + ******************************************************************************/ + +struct acpi_table_spcr { + struct acpi_table_header header; /* Common ACPI table header */ + u8 interface_type; /* 0=full 16550, 1=subset of 16550 */ + u8 reserved[3]; + struct acpi_generic_address serial_port; + u8 interrupt_type; + u8 pc_interrupt; + u32 interrupt; + u8 baud_rate; + u8 parity; + u8 stop_bits; + u8 flow_control; + u8 terminal_type; + u8 reserved1; + u16 pci_device_id; + u16 pci_vendor_id; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u32 pci_flags; + u8 pci_segment; + u32 reserved2; +}; + +/* Masks for pci_flags field above */ + +#define ACPI_SPCR_DO_NOT_DISABLE (1) + +/* Values for Interface Type: See the definition of the DBG2 table */ + +/******************************************************************************* + * + * SPMI - Server Platform Management Interface table + * Version 5 + * + * Conforms to "Intelligent Platform Management Interface Specification + * Second Generation v2.0", Document Revision 1.0, February 12, 2004 with + * June 12, 2009 markup. + * + ******************************************************************************/ + +struct acpi_table_spmi { + struct acpi_table_header header; /* Common ACPI table header */ + u8 interface_type; + u8 reserved; /* Must be 1 */ + u16 spec_revision; /* Version of IPMI */ + u8 interrupt_type; + u8 gpe_number; /* GPE assigned */ + u8 reserved1; + u8 pci_device_flag; + u32 interrupt; + struct acpi_generic_address ipmi_register; + u8 pci_segment; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u8 reserved2; +}; + +/* Values for interface_type above */ + +enum acpi_spmi_interface_types { + ACPI_SPMI_NOT_USED = 0, + ACPI_SPMI_KEYBOARD = 1, + ACPI_SPMI_SMI = 2, + ACPI_SPMI_BLOCK_TRANSFER = 3, + ACPI_SPMI_SMBUS = 4, + ACPI_SPMI_RESERVED = 5 /* 5 and above are reserved */ +}; + +/******************************************************************************* + * + * SRAT - System Resource Affinity Table + * Version 3 + * + ******************************************************************************/ + +struct acpi_table_srat { + struct acpi_table_header header; /* Common ACPI table header */ + u32 table_revision; /* Must be value '1' */ + u64 reserved; /* Reserved, must be zero */ +}; + +/* Values for subtable type in struct acpi_subtable_header */ + +enum acpi_srat_type { + ACPI_SRAT_TYPE_CPU_AFFINITY = 0, + ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, + ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, + ACPI_SRAT_TYPE_GICC_AFFINITY = 3, + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */ + ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, /* ACPI 6.3 */ + ACPI_SRAT_TYPE_RESERVED = 6 /* 5 and greater are reserved */ +}; + +/* + * SRAT Subtables, correspond to Type in struct acpi_subtable_header + */ + +/* 0: Processor Local APIC/SAPIC Affinity */ + +struct acpi_srat_cpu_affinity { + struct acpi_subtable_header header; + u8 proximity_domain_lo; + u8 apic_id; + u32 flags; + u8 local_sapic_eid; + u8 proximity_domain_hi[3]; + u32 clock_domain; +}; + +/* Flags */ + +#define ACPI_SRAT_CPU_USE_AFFINITY (1) /* 00: Use affinity structure */ + +/* 1: Memory Affinity */ + +struct acpi_srat_mem_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u16 reserved; /* Reserved, must be zero */ + u64 base_address; + u64 length; + u32 reserved1; + u32 flags; + u64 reserved2; /* Reserved, must be zero */ +}; + +/* Flags */ + +#define ACPI_SRAT_MEM_ENABLED (1) /* 00: Use affinity structure */ +#define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */ +#define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */ + +/* 2: Processor Local X2_APIC Affinity (ACPI 4.0) */ + +struct acpi_srat_x2apic_cpu_affinity { + struct acpi_subtable_header header; + u16 reserved; /* Reserved, must be zero */ + u32 proximity_domain; + u32 apic_id; + u32 flags; + u32 clock_domain; + u32 reserved2; +}; + +/* Flags for struct acpi_srat_cpu_affinity and struct acpi_srat_x2apic_cpu_affinity */ + +#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */ + +/* 3: GICC Affinity (ACPI 5.1) */ + +struct acpi_srat_gicc_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u32 acpi_processor_uid; + u32 flags; + u32 clock_domain; +}; + +/* Flags for struct acpi_srat_gicc_affinity */ + +#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */ + +/* 4: GCC ITS Affinity (ACPI 6.2) */ + +struct acpi_srat_gic_its_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u16 reserved; + u32 its_id; +}; + +/* 5: Generic Initiator Affinity Structure (ACPI 6.3) */ + +struct acpi_srat_generic_affinity { + struct acpi_subtable_header header; + u8 reserved; + u8 device_handle_type; + u32 proximity_domain; + u8 device_handle[16]; + u32 flags; + u32 reserved1; +}; + +/* Flags for struct acpi_srat_generic_affinity */ + +#define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */ + +/******************************************************************************* + * + * STAO - Status Override Table (_STA override) - ACPI 6.0 + * Version 1 + * + * Conforms to "ACPI Specification for Status Override Table" + * 6 January 2015 + * + ******************************************************************************/ + +struct acpi_table_stao { + struct acpi_table_header header; /* Common ACPI table header */ + u8 ignore_uart; +}; + +/******************************************************************************* + * + * TCPA - Trusted Computing Platform Alliance table + * Version 2 + * + * TCG Hardware Interface Table for TPM 1.2 Clients and Servers + * + * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0", + * Version 1.2, Revision 8 + * February 27, 2017 + * + * NOTE: There are two versions of the table with the same signature -- + * the client version and the server version. The common platform_class + * field is used to differentiate the two types of tables. + * + ******************************************************************************/ + +struct acpi_table_tcpa_hdr { + struct acpi_table_header header; /* Common ACPI table header */ + u16 platform_class; +}; + +/* + * Values for platform_class above. + * This is how the client and server subtables are differentiated + */ +#define ACPI_TCPA_CLIENT_TABLE 0 +#define ACPI_TCPA_SERVER_TABLE 1 + +struct acpi_table_tcpa_client { + u32 minimum_log_length; /* Minimum length for the event log area */ + u64 log_address; /* Address of the event log area */ +}; + +struct acpi_table_tcpa_server { + u16 reserved; + u64 minimum_log_length; /* Minimum length for the event log area */ + u64 log_address; /* Address of the event log area */ + u16 spec_revision; + u8 device_flags; + u8 interrupt_flags; + u8 gpe_number; + u8 reserved2[3]; + u32 global_interrupt; + struct acpi_generic_address address; + u32 reserved3; + struct acpi_generic_address config_address; + u8 group; + u8 bus; /* PCI Bus/Segment/Function numbers */ + u8 device; + u8 function; +}; + +/* Values for device_flags above */ + +#define ACPI_TCPA_PCI_DEVICE (1) +#define ACPI_TCPA_BUS_PNP (1<<1) +#define ACPI_TCPA_ADDRESS_VALID (1<<2) + +/* Values for interrupt_flags above */ + +#define ACPI_TCPA_INTERRUPT_MODE (1) +#define ACPI_TCPA_INTERRUPT_POLARITY (1<<1) +#define ACPI_TCPA_SCI_VIA_GPE (1<<2) +#define ACPI_TCPA_GLOBAL_INTERRUPT (1<<3) + +/******************************************************************************* + * + * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table + * Version 4 + * + * TCG Hardware Interface Table for TPM 2.0 Clients and Servers + * + * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0", + * Version 1.2, Revision 8 + * February 27, 2017 + * + ******************************************************************************/ + +/* Revision 3 */ + +struct acpi_table_tpm23 { + struct acpi_table_header header; /* Common ACPI table header */ + u32 reserved; + u64 control_address; + u32 start_method; +}; + +/* Value for start_method above */ + +#define ACPI_TPM23_ACPI_START_METHOD 2 + +/* + * Optional trailer for revision 3. If start method is 2, there is a 4 byte + * reserved area of all zeros. + */ +struct acpi_tmp23_trailer { + u32 reserved; +}; + +/* Revision 4 */ + +struct acpi_table_tpm2 { + struct acpi_table_header header; /* Common ACPI table header */ + u16 platform_class; + u16 reserved; + u64 control_address; + u32 start_method; + + /* Platform-specific data follows */ +}; + +/* Values for start_method above */ + +#define ACPI_TPM2_NOT_ALLOWED 0 +#define ACPI_TPM2_RESERVED1 1 +#define ACPI_TPM2_START_METHOD 2 +#define ACPI_TPM2_RESERVED3 3 +#define ACPI_TPM2_RESERVED4 4 +#define ACPI_TPM2_RESERVED5 5 +#define ACPI_TPM2_MEMORY_MAPPED 6 +#define ACPI_TPM2_COMMAND_BUFFER 7 +#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8 +#define ACPI_TPM2_RESERVED9 9 +#define ACPI_TPM2_RESERVED10 10 +#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */ +#define ACPI_TPM2_RESERVED 12 + +/* Optional trailer appears after any start_method subtables */ + +struct acpi_tpm2_trailer { + u8 method_parameters[12]; + u32 minimum_log_length; /* Minimum length for the event log area */ + u64 log_address; /* Address of the event log area */ +}; + +/* + * Subtables (start_method-specific) + */ + +/* 11: Start Method for ARM SMC (V1.2 Rev 8) */ + +struct acpi_tpm2_arm_smc { + u32 global_interrupt; + u8 interrupt_flags; + u8 operation_flags; + u16 reserved; + u32 function_id; +}; + +/* Values for interrupt_flags above */ + +#define ACPI_TPM2_INTERRUPT_SUPPORT (1) + +/* Values for operation_flags above */ + +#define ACPI_TPM2_IDLE_SUPPORT (1) + +/******************************************************************************* + * + * UEFI - UEFI Boot optimization Table + * Version 1 + * + * Conforms to "Unified Extensible Firmware Interface Specification", + * Version 2.3, May 8, 2009 + * + ******************************************************************************/ + +struct acpi_table_uefi { + struct acpi_table_header header; /* Common ACPI table header */ + u8 identifier[16]; /* UUID identifier */ + u16 data_offset; /* Offset of remaining data in table */ +}; + +/******************************************************************************* + * + * VRTC - Virtual Real Time Clock Table + * Version 1 + * + * Conforms to "Simple Firmware Interface Specification", + * Draft 0.8.2, Oct 19, 2010 + * NOTE: The ACPI VRTC is equivalent to The SFI MRTC table. + * + ******************************************************************************/ + +struct acpi_table_vrtc { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* VRTC entry */ + +struct acpi_vrtc_entry { + struct acpi_generic_address physical_address; + u32 irq; +}; + +/******************************************************************************* + * + * WAET - Windows ACPI Emulated devices Table + * Version 1 + * + * Conforms to "Windows ACPI Emulated Devices Table", version 1.0, April 6, 2009 + * + ******************************************************************************/ + +struct acpi_table_waet { + struct acpi_table_header header; /* Common ACPI table header */ + u32 flags; +}; + +/* Masks for Flags field above */ + +#define ACPI_WAET_RTC_NO_ACK (1) /* RTC requires no int acknowledge */ +#define ACPI_WAET_TIMER_ONE_READ (1<<1) /* PM timer requires only one read */ + +/******************************************************************************* + * + * WDAT - Watchdog Action Table + * Version 1 + * + * Conforms to "Hardware Watchdog Timers Design Specification", + * Copyright 2006 Microsoft Corporation. + * + ******************************************************************************/ + +struct acpi_table_wdat { + struct acpi_table_header header; /* Common ACPI table header */ + u32 header_length; /* Watchdog Header Length */ + u16 pci_segment; /* PCI Segment number */ + u8 pci_bus; /* PCI Bus number */ + u8 pci_device; /* PCI Device number */ + u8 pci_function; /* PCI Function number */ + u8 reserved[3]; + u32 timer_period; /* Period of one timer count (msec) */ + u32 max_count; /* Maximum counter value supported */ + u32 min_count; /* Minimum counter value */ + u8 flags; + u8 reserved2[3]; + u32 entries; /* Number of watchdog entries that follow */ +}; + +/* Masks for Flags field above */ + +#define ACPI_WDAT_ENABLED (1) +#define ACPI_WDAT_STOPPED 0x80 + +/* WDAT Instruction Entries (actions) */ + +struct acpi_wdat_entry { + u8 action; + u8 instruction; + u16 reserved; + struct acpi_generic_address register_region; + u32 value; /* Value used with Read/Write register */ + u32 mask; /* Bitmask required for this register instruction */ +}; + +/* Values for Action field above */ + +enum acpi_wdat_actions { + ACPI_WDAT_RESET = 1, + ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4, + ACPI_WDAT_GET_COUNTDOWN = 5, + ACPI_WDAT_SET_COUNTDOWN = 6, + ACPI_WDAT_GET_RUNNING_STATE = 8, + ACPI_WDAT_SET_RUNNING_STATE = 9, + ACPI_WDAT_GET_STOPPED_STATE = 10, + ACPI_WDAT_SET_STOPPED_STATE = 11, + ACPI_WDAT_GET_REBOOT = 16, + ACPI_WDAT_SET_REBOOT = 17, + ACPI_WDAT_GET_SHUTDOWN = 18, + ACPI_WDAT_SET_SHUTDOWN = 19, + ACPI_WDAT_GET_STATUS = 32, + ACPI_WDAT_SET_STATUS = 33, + ACPI_WDAT_ACTION_RESERVED = 34 /* 34 and greater are reserved */ +}; + +/* Values for Instruction field above */ + +enum acpi_wdat_instructions { + ACPI_WDAT_READ_VALUE = 0, + ACPI_WDAT_READ_COUNTDOWN = 1, + ACPI_WDAT_WRITE_VALUE = 2, + ACPI_WDAT_WRITE_COUNTDOWN = 3, + ACPI_WDAT_INSTRUCTION_RESERVED = 4, /* 4 and greater are reserved */ + ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */ +}; + +/******************************************************************************* + * + * WDDT - Watchdog Descriptor Table + * Version 1 + * + * Conforms to "Using the Intel ICH Family Watchdog Timer (WDT)", + * Version 001, September 2002 + * + ******************************************************************************/ + +struct acpi_table_wddt { + struct acpi_table_header header; /* Common ACPI table header */ + u16 spec_version; + u16 table_version; + u16 pci_vendor_id; + struct acpi_generic_address address; + u16 max_count; /* Maximum counter value supported */ + u16 min_count; /* Minimum counter value supported */ + u16 period; + u16 status; + u16 capability; +}; + +/* Flags for Status field above */ + +#define ACPI_WDDT_AVAILABLE (1) +#define ACPI_WDDT_ACTIVE (1<<1) +#define ACPI_WDDT_TCO_OS_OWNED (1<<2) +#define ACPI_WDDT_USER_RESET (1<<11) +#define ACPI_WDDT_WDT_RESET (1<<12) +#define ACPI_WDDT_POWER_FAIL (1<<13) +#define ACPI_WDDT_UNKNOWN_RESET (1<<14) + +/* Flags for Capability field above */ + +#define ACPI_WDDT_AUTO_RESET (1) +#define ACPI_WDDT_ALERT_SUPPORT (1<<1) + +/******************************************************************************* + * + * WDRT - Watchdog Resource Table + * Version 1 + * + * Conforms to "Watchdog Timer Hardware Requirements for Windows Server 2003", + * Version 1.01, August 28, 2006 + * + ******************************************************************************/ + +struct acpi_table_wdrt { + struct acpi_table_header header; /* Common ACPI table header */ + struct acpi_generic_address control_register; + struct acpi_generic_address count_register; + u16 pci_device_id; + u16 pci_vendor_id; + u8 pci_bus; /* PCI Bus number */ + u8 pci_device; /* PCI Device number */ + u8 pci_function; /* PCI Function number */ + u8 pci_segment; /* PCI Segment number */ + u16 max_count; /* Maximum counter value supported */ + u8 units; +}; + +/******************************************************************************* + * + * WPBT - Windows Platform Environment Table (ACPI 6.0) + * Version 1 + * + * Conforms to "Windows Platform Binary Table (WPBT)" 29 November 2011 + * + ******************************************************************************/ + +struct acpi_table_wpbt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 handoff_size; + u64 handoff_address; + u8 layout; + u8 type; + u16 arguments_length; +}; + +/******************************************************************************* + * + * WSMT - Windows SMM Security Migrations Table + * Version 1 + * + * Conforms to "Windows SMM Security Migrations Table", + * Version 1.0, April 18, 2016 + * + ******************************************************************************/ + +struct acpi_table_wsmt { + struct acpi_table_header header; /* Common ACPI table header */ + u32 protection_flags; +}; + +/* Flags for protection_flags field above */ + +#define ACPI_WSMT_FIXED_COMM_BUFFERS (1) +#define ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION (2) +#define ACPI_WSMT_SYSTEM_RESOURCE_PROTECTION (4) + +/******************************************************************************* + * + * XENV - Xen Environment Table (ACPI 6.0) + * Version 1 + * + * Conforms to "ACPI Specification for Xen Environment Table" 4 January 2015 + * + ******************************************************************************/ + +struct acpi_table_xenv { + struct acpi_table_header header; /* Common ACPI table header */ + u64 grant_table_address; + u64 grant_table_size; + u32 event_interrupt; + u8 event_flags; +}; + +/* Reset to default packing */ + +#pragma pack() + +#endif /* __ACTBL3_H__ */ diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h new file mode 100644 index 0000000..9373662 --- /dev/null +++ b/include/acpi/actypes.h @@ -0,0 +1,1282 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: actypes.h - Common data types for the entire ACPI subsystem + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACTYPES_H__ +#define __ACTYPES_H__ + +/* acpisrc:struct_defs -- for acpisrc conversion */ + +/* + * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent + * header and must be either 32 or 64. 16-bit ACPICA is no longer + * supported, as of 12/2006. + */ +#ifndef ACPI_MACHINE_WIDTH +#error ACPI_MACHINE_WIDTH not defined +#endif + +/* + * Data type ranges + * Note: These macros are designed to be compiler independent as well as + * working around problems that some 32-bit compilers have with 64-bit + * constants. + */ +#define ACPI_UINT8_MAX (u8) (~((u8) 0)) /* 0xFF */ +#define ACPI_UINT16_MAX (u16)(~((u16) 0)) /* 0xFFFF */ +#define ACPI_UINT32_MAX (u32)(~((u32) 0)) /* 0xFFFFFFFF */ +#define ACPI_UINT64_MAX (u64)(~((u64) 0)) /* 0xFFFFFFFFFFFFFFFF */ +#define ACPI_ASCII_MAX 0x7F + +/* + * Architecture-specific ACPICA Subsystem Data Types + * + * The goal of these types is to provide source code portability across + * 16-bit, 32-bit, and 64-bit targets. + * + * 1) The following types are of fixed size for all targets (16/32/64): + * + * u8 Logical boolean + * + * u8 8-bit (1 byte) unsigned value + * u16 16-bit (2 byte) unsigned value + * u32 32-bit (4 byte) unsigned value + * u64 64-bit (8 byte) unsigned value + * + * s16 16-bit (2 byte) signed value + * s32 32-bit (4 byte) signed value + * s64 64-bit (8 byte) signed value + * + * COMPILER_DEPENDENT_UINT64/s64 - These types are defined in the + * compiler-dependent header(s) and were introduced because there is no + * common 64-bit integer type across the various compilation models, as + * shown in the table below. + * + * Datatype LP64 ILP64 LLP64 ILP32 LP32 16bit + * char 8 8 8 8 8 8 + * short 16 16 16 16 16 16 + * _int32 32 + * int 32 64 32 32 16 16 + * long 64 64 32 32 32 32 + * long long 64 64 + * pointer 64 64 64 32 32 32 + * + * Note: ILP64 and LP32 are currently not supported. + * + * + * 2) These types represent the native word size of the target mode of the + * processor, and may be 16-bit, 32-bit, or 64-bit as required. They are + * usually used for memory allocation, efficient loop counters, and array + * indexes. The types are similar to the size_t type in the C library and + * are required because there is no C type that consistently represents the + * native data width. acpi_size is needed because there is no guarantee + * that a kernel-level C library is present. + * + * acpi_size 16/32/64-bit unsigned value + * acpi_native_int 16/32/64-bit signed value + */ + +/******************************************************************************* + * + * Common types for all compilers, all targets + * + ******************************************************************************/ + +#ifndef ACPI_USE_SYSTEM_INTTYPES + +typedef unsigned char u8; +typedef unsigned short u16; +typedef short s16; +typedef COMPILER_DEPENDENT_UINT64 u64; +typedef COMPILER_DEPENDENT_INT64 s64; + +#endif /* ACPI_USE_SYSTEM_INTTYPES */ + +/* + * Value returned by acpi_os_get_thread_id. There is no standard "thread_id" + * across operating systems or even the various UNIX systems. Since ACPICA + * only needs the thread ID as a unique thread identifier, we use a u64 + * as the only common data type - it will accommodate any type of pointer or + * any type of integer. It is up to the host-dependent OSL to cast the + * native thread ID type to a u64 (in acpi_os_get_thread_id). + */ +#define acpi_thread_id u64 + +/******************************************************************************* + * + * Types specific to 64-bit targets + * + ******************************************************************************/ + +#if ACPI_MACHINE_WIDTH == 64 + +#ifndef ACPI_USE_SYSTEM_INTTYPES + +typedef unsigned int u32; +typedef int s32; + +#endif /* ACPI_USE_SYSTEM_INTTYPES */ + +typedef s64 acpi_native_int; + +typedef u64 acpi_size; +typedef u64 acpi_io_address; +typedef u64 acpi_physical_address; + +#define ACPI_MAX_PTR ACPI_UINT64_MAX +#define ACPI_SIZE_MAX ACPI_UINT64_MAX + +#define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */ +#define ACPI_USE_NATIVE_MATH64 /* Has native 64-bit integer support */ + +/* + * In the case of the Itanium Processor Family (IPF), the hardware does not + * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED + * flag to indicate that special precautions must be taken to avoid alignment + * faults. (IA64 or ia64 is currently used by existing compilers to indicate + * IPF.) + * + * Note: EM64T and other X86-64 processors support misaligned transfers, + * so there is no need to define this flag. + */ +#if defined (__IA64__) || defined (__ia64__) +#define ACPI_MISALIGNMENT_NOT_SUPPORTED +#endif + +/******************************************************************************* + * + * Types specific to 32-bit targets + * + ******************************************************************************/ + +#elif ACPI_MACHINE_WIDTH == 32 + +#ifndef ACPI_USE_SYSTEM_INTTYPES + +typedef unsigned int u32; +typedef int s32; + +#endif /* ACPI_USE_SYSTEM_INTTYPES */ + +typedef s32 acpi_native_int; + +typedef u32 acpi_size; + +#ifdef ACPI_32BIT_PHYSICAL_ADDRESS + +/* + * OSPMs can define this to shrink the size of the structures for 32-bit + * none PAE environment. ASL compiler may always define this to generate + * 32-bit OSPM compliant tables. + */ +typedef u32 acpi_io_address; +typedef u32 acpi_physical_address; + +#else /* ACPI_32BIT_PHYSICAL_ADDRESS */ + +/* + * It is reported that, after some calculations, the physical addresses can + * wrap over the 32-bit boundary on 32-bit PAE environment. + * https://bugzilla.kernel.org/show_bug.cgi?id=87971 + */ +typedef u64 acpi_io_address; +typedef u64 acpi_physical_address; + +#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */ + +#define ACPI_MAX_PTR ACPI_UINT32_MAX +#define ACPI_SIZE_MAX ACPI_UINT32_MAX + +#else + +/* ACPI_MACHINE_WIDTH must be either 64 or 32 */ + +#error unknown ACPI_MACHINE_WIDTH +#endif + +/******************************************************************************* + * + * OS-dependent types + * + * If the defaults below are not appropriate for the host system, they can + * be defined in the OS-specific header, and this will take precedence. + * + ******************************************************************************/ + +/* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ + +#ifndef acpi_cpu_flags +#define acpi_cpu_flags acpi_size +#endif + +/* Object returned from acpi_os_create_cache */ + +#ifndef acpi_cache_t +#ifdef ACPI_USE_LOCAL_CACHE +#define acpi_cache_t struct acpi_memory_list +#else +#define acpi_cache_t void * +#endif +#endif + +/* + * Synchronization objects - Mutexes, Semaphores, and spin_locks + */ +#if (ACPI_MUTEX_TYPE == ACPI_BINARY_SEMAPHORE) +/* + * These macros are used if the host OS does not support a mutex object. + * Map the OSL Mutex interfaces to binary semaphores. + */ +#define acpi_mutex acpi_semaphore +#define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle) +#define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle) +#define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time) +#define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1) +#endif + +/* Configurable types for synchronization objects */ + +#ifndef acpi_spinlock +#define acpi_spinlock void * +#endif + +#ifndef acpi_raw_spinlock +#define acpi_raw_spinlock acpi_spinlock +#endif + +#ifndef acpi_semaphore +#define acpi_semaphore void * +#endif + +#ifndef acpi_mutex +#define acpi_mutex void * +#endif + +/******************************************************************************* + * + * Compiler-dependent types + * + * If the defaults below are not appropriate for the host compiler, they can + * be defined in the compiler-specific header, and this will take precedence. + * + ******************************************************************************/ + +/* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ + +#ifndef acpi_uintptr_t +#define acpi_uintptr_t void * +#endif + +/* + * ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because + * some compilers can catch printf format string problems + */ +#ifndef ACPI_PRINTF_LIKE +#define ACPI_PRINTF_LIKE(c) +#endif + +/* + * Some compilers complain about unused variables. Sometimes we don't want + * to use all the variables (for example, _acpi_module_name). This allows us + * to tell the compiler in a per-variable manner that a variable + * is unused + */ +#ifndef ACPI_UNUSED_VAR +#define ACPI_UNUSED_VAR +#endif + +/* + * All ACPICA external functions that are available to the rest of the + * kernel are tagged with these macros which can be defined as appropriate + * for the host. + * + * Notes: + * ACPI_EXPORT_SYMBOL_INIT is used for initialization and termination + * interfaces that may need special processing. + * ACPI_EXPORT_SYMBOL is used for all other public external functions. + */ +#ifndef ACPI_EXPORT_SYMBOL_INIT +#define ACPI_EXPORT_SYMBOL_INIT(symbol) +#endif + +#ifndef ACPI_EXPORT_SYMBOL +#define ACPI_EXPORT_SYMBOL(symbol) +#endif + +/* + * Compiler/Clibrary-dependent debug initialization. Used for ACPICA + * utilities only. + */ +#ifndef ACPI_DEBUG_INITIALIZE +#define ACPI_DEBUG_INITIALIZE() +#endif + +/******************************************************************************* + * + * Configuration + * + ******************************************************************************/ + +#ifdef ACPI_NO_MEM_ALLOCATIONS + +#define ACPI_ALLOCATE(a) NULL +#define ACPI_ALLOCATE_ZEROED(a) NULL +#define ACPI_FREE(a) +#define ACPI_MEM_TRACKING(a) + +#else /* ACPI_NO_MEM_ALLOCATIONS */ + +#ifdef ACPI_DBG_TRACK_ALLOCATIONS +/* + * Memory allocation tracking (used by acpi_exec to detect memory leaks) + */ +#define ACPI_MEM_PARAMETERS _COMPONENT, _acpi_module_name, __LINE__ +#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track ((acpi_size) (a), ACPI_MEM_PARAMETERS) +#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track ((acpi_size) (a), ACPI_MEM_PARAMETERS) +#define ACPI_FREE(a) acpi_ut_free_and_track (a, ACPI_MEM_PARAMETERS) +#define ACPI_MEM_TRACKING(a) a + +#else +/* + * Normal memory allocation directly via the OS services layer + */ +#define ACPI_ALLOCATE(a) acpi_os_allocate ((acpi_size) (a)) +#define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed ((acpi_size) (a)) +#define ACPI_FREE(a) acpi_os_free (a) +#define ACPI_MEM_TRACKING(a) + +#endif /* ACPI_DBG_TRACK_ALLOCATIONS */ + +#endif /* ACPI_NO_MEM_ALLOCATIONS */ + +/****************************************************************************** + * + * ACPI Specification constants (Do not change unless the specification + * changes) + * + *****************************************************************************/ + +/* Number of distinct FADT-based GPE register blocks (GPE0 and GPE1) */ + +#define ACPI_MAX_GPE_BLOCKS 2 + +/* Default ACPI register widths */ + +#define ACPI_GPE_REGISTER_WIDTH 8 +#define ACPI_PM1_REGISTER_WIDTH 16 +#define ACPI_PM2_REGISTER_WIDTH 8 +#define ACPI_PM_TIMER_WIDTH 32 +#define ACPI_RESET_REGISTER_WIDTH 8 + +/* Names within the namespace are 4 bytes long */ + +#define ACPI_NAMESEG_SIZE 4 /* Fixed by ACPI spec */ +#define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ +#define ACPI_PATH_SEPARATOR '.' + +/* Sizes for ACPI table headers */ + +#define ACPI_OEM_ID_SIZE 6 +#define ACPI_OEM_TABLE_ID_SIZE 8 + +/* ACPI/PNP hardware IDs */ + +#define PCI_ROOT_HID_STRING "PNP0A03" +#define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08" + +/* PM Timer ticks per second (HZ) */ + +#define ACPI_PM_TIMER_FREQUENCY 3579545 + +/******************************************************************************* + * + * Independent types + * + ******************************************************************************/ + +/* Logical defines and NULL */ + +#ifdef FALSE +#undef FALSE +#endif +#define FALSE (1 == 0) + +#ifdef TRUE +#undef TRUE +#endif +#define TRUE (1 == 1) + +#ifndef NULL +#define NULL (void *) 0 +#endif + +/* + * Miscellaneous types + */ +typedef u32 acpi_status; /* All ACPI Exceptions */ +typedef u32 acpi_name; /* 4-byte ACPI name */ +typedef char *acpi_string; /* Null terminated ASCII string */ +typedef void *acpi_handle; /* Actually a ptr to a NS Node */ + +/* Time constants for timer calculations */ + +#define ACPI_MSEC_PER_SEC 1000L + +#define ACPI_USEC_PER_MSEC 1000L +#define ACPI_USEC_PER_SEC 1000000L + +#define ACPI_100NSEC_PER_USEC 10L +#define ACPI_100NSEC_PER_MSEC 10000L +#define ACPI_100NSEC_PER_SEC 10000000L + +#define ACPI_NSEC_PER_USEC 1000L +#define ACPI_NSEC_PER_MSEC 1000000L +#define ACPI_NSEC_PER_SEC 1000000000L + +#define ACPI_TIME_AFTER(a, b) ((s64)((b) - (a)) < 0) + +/* Owner IDs are used to track namespace nodes for selective deletion */ + +typedef u16 acpi_owner_id; +#define ACPI_OWNER_ID_MAX 0xFFF /* 4095 possible owner IDs */ + +#define ACPI_INTEGER_BIT_SIZE 64 +#define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */ +#define ACPI_MAX64_DECIMAL_DIGITS 20 +#define ACPI_MAX32_DECIMAL_DIGITS 10 +#define ACPI_MAX16_DECIMAL_DIGITS 5 +#define ACPI_MAX8_DECIMAL_DIGITS 3 + +/* + * Constants with special meanings + */ +#define ACPI_ROOT_OBJECT ((acpi_handle) ACPI_TO_POINTER (ACPI_MAX_PTR)) +#define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ +#define ACPI_DO_NOT_WAIT 0 + +/* + * Obsolete: Acpi integer width. In ACPI version 1 (1996), integers are + * 32 bits. In ACPI version 2 (2000) and later, integers are max 64 bits. + * Note that this pertains to the ACPI integer type only, not to other + * integers used in the implementation of the ACPICA subsystem. + * + * 01/2010: This type is obsolete and has been removed from the entire ACPICA + * code base. It remains here for compatibility with device drivers that use + * the type. However, it will be removed in the future. + */ +typedef u64 acpi_integer; +#define ACPI_INTEGER_MAX ACPI_UINT64_MAX + +/******************************************************************************* + * + * Commonly used macros + * + ******************************************************************************/ + +/* Data manipulation */ + +#define ACPI_LOBYTE(integer) ((u8) (u16)(integer)) +#define ACPI_HIBYTE(integer) ((u8) (((u16)(integer)) >> 8)) +#define ACPI_LOWORD(integer) ((u16) (u32)(integer)) +#define ACPI_HIWORD(integer) ((u16)(((u32)(integer)) >> 16)) +#define ACPI_LODWORD(integer64) ((u32) (u64)(integer64)) +#define ACPI_HIDWORD(integer64) ((u32)(((u64)(integer64)) >> 32)) + +#define ACPI_SET_BIT(target,bit) ((target) |= (bit)) +#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) +#define ACPI_MIN(a,b) (((a)<(b))?(a):(b)) +#define ACPI_MAX(a,b) (((a)>(b))?(a):(b)) + +/* Size calculation */ + +#define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) + +/* Pointer manipulation */ + +#define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p)) +#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p)) +#define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b))) +#define ACPI_SUB_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) - (acpi_size)(b))) +#define ACPI_PTR_DIFF(a, b) ((acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))) + +/* Pointer/Integer type conversions */ + +#define ACPI_TO_POINTER(i) ACPI_CAST_PTR (void, (acpi_size) (i)) +#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0) +#define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0) +#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) +#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) + +/* Optimizations for 4-character (32-bit) acpi_name manipulation */ + +#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED +#define ACPI_COMPARE_NAMESEG(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) +#define ACPI_COPY_NAMESEG(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src))) +#else +#define ACPI_COMPARE_NAMESEG(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAMESEG_SIZE)) +#define ACPI_COPY_NAMESEG(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAMESEG_SIZE)) +#endif + +/* Support for the special RSDP signature (8 characters) */ + +#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8)) +#define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8)) + +/* Support for OEMx signature (x can be any character) */ +#define ACPI_IS_OEM_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_OEM_NAME, 3) &&\ + strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE) + +/* + * Algorithm to obtain access bit or byte width. + * Can be used with access_width of struct acpi_generic_address and access_size of + * struct acpi_resource_generic_register. + */ +#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2)) +#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1)) + +/******************************************************************************* + * + * Miscellaneous constants + * + ******************************************************************************/ + +/* + * Initialization sequence options + */ +#define ACPI_FULL_INITIALIZATION 0x0000 +#define ACPI_NO_FACS_INIT 0x0001 +#define ACPI_NO_ACPI_ENABLE 0x0002 +#define ACPI_NO_HARDWARE_INIT 0x0004 +#define ACPI_NO_EVENT_INIT 0x0008 +#define ACPI_NO_HANDLER_INIT 0x0010 +#define ACPI_NO_OBJECT_INIT 0x0020 +#define ACPI_NO_DEVICE_INIT 0x0040 +#define ACPI_NO_ADDRESS_SPACE_INIT 0x0080 + +/* + * Initialization state + */ +#define ACPI_SUBSYSTEM_INITIALIZE 0x01 +#define ACPI_INITIALIZED_OK 0x02 + +/* + * Power state values + */ +#define ACPI_STATE_UNKNOWN (u8) 0xFF + +#define ACPI_STATE_S0 (u8) 0 +#define ACPI_STATE_S1 (u8) 1 +#define ACPI_STATE_S2 (u8) 2 +#define ACPI_STATE_S3 (u8) 3 +#define ACPI_STATE_S4 (u8) 4 +#define ACPI_STATE_S5 (u8) 5 +#define ACPI_S_STATES_MAX ACPI_STATE_S5 +#define ACPI_S_STATE_COUNT 6 + +#define ACPI_STATE_D0 (u8) 0 +#define ACPI_STATE_D1 (u8) 1 +#define ACPI_STATE_D2 (u8) 2 +#define ACPI_STATE_D3_HOT (u8) 3 +#define ACPI_STATE_D3 (u8) 4 +#define ACPI_STATE_D3_COLD ACPI_STATE_D3 +#define ACPI_D_STATES_MAX ACPI_STATE_D3 +#define ACPI_D_STATE_COUNT 5 + +#define ACPI_STATE_C0 (u8) 0 +#define ACPI_STATE_C1 (u8) 1 +#define ACPI_STATE_C2 (u8) 2 +#define ACPI_STATE_C3 (u8) 3 +#define ACPI_C_STATES_MAX ACPI_STATE_C3 +#define ACPI_C_STATE_COUNT 4 + +/* + * Sleep type invalid value + */ +#define ACPI_SLEEP_TYPE_MAX 0x7 +#define ACPI_SLEEP_TYPE_INVALID 0xFF + +/* + * Standard notify values + */ +#define ACPI_NOTIFY_BUS_CHECK (u8) 0x00 +#define ACPI_NOTIFY_DEVICE_CHECK (u8) 0x01 +#define ACPI_NOTIFY_DEVICE_WAKE (u8) 0x02 +#define ACPI_NOTIFY_EJECT_REQUEST (u8) 0x03 +#define ACPI_NOTIFY_DEVICE_CHECK_LIGHT (u8) 0x04 +#define ACPI_NOTIFY_FREQUENCY_MISMATCH (u8) 0x05 +#define ACPI_NOTIFY_BUS_MODE_MISMATCH (u8) 0x06 +#define ACPI_NOTIFY_POWER_FAULT (u8) 0x07 +#define ACPI_NOTIFY_CAPABILITIES_CHECK (u8) 0x08 +#define ACPI_NOTIFY_DEVICE_PLD_CHECK (u8) 0x09 +#define ACPI_NOTIFY_RESERVED (u8) 0x0A +#define ACPI_NOTIFY_LOCALITY_UPDATE (u8) 0x0B +#define ACPI_NOTIFY_SHUTDOWN_REQUEST (u8) 0x0C +#define ACPI_NOTIFY_AFFINITY_UPDATE (u8) 0x0D +#define ACPI_NOTIFY_MEMORY_UPDATE (u8) 0x0E +#define ACPI_NOTIFY_DISCONNECT_RECOVER (u8) 0x0F + +#define ACPI_GENERIC_NOTIFY_MAX 0x0F +#define ACPI_SPECIFIC_NOTIFY_MAX 0x84 + +/* + * Types associated with ACPI names and objects. The first group of + * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition + * of the ACPI object_type() operator (See the ACPI Spec). Therefore, + * only add to the first group if the spec changes. + * + * NOTE: Types must be kept in sync with the global acpi_ns_properties + * and acpi_ns_type_names arrays. + */ +typedef u32 acpi_object_type; + +#define ACPI_TYPE_ANY 0x00 +#define ACPI_TYPE_INTEGER 0x01 /* Byte/Word/Dword/Zero/One/Ones */ +#define ACPI_TYPE_STRING 0x02 +#define ACPI_TYPE_BUFFER 0x03 +#define ACPI_TYPE_PACKAGE 0x04 /* byte_const, multiple data_term/Constant/super_name */ +#define ACPI_TYPE_FIELD_UNIT 0x05 +#define ACPI_TYPE_DEVICE 0x06 /* Name, multiple Node */ +#define ACPI_TYPE_EVENT 0x07 +#define ACPI_TYPE_METHOD 0x08 /* Name, byte_const, multiple Code */ +#define ACPI_TYPE_MUTEX 0x09 +#define ACPI_TYPE_REGION 0x0A +#define ACPI_TYPE_POWER 0x0B /* Name,byte_const,word_const,multi Node */ +#define ACPI_TYPE_PROCESSOR 0x0C /* Name,byte_const,Dword_const,byte_const,multi nm_o */ +#define ACPI_TYPE_THERMAL 0x0D /* Name, multiple Node */ +#define ACPI_TYPE_BUFFER_FIELD 0x0E +#define ACPI_TYPE_DDB_HANDLE 0x0F +#define ACPI_TYPE_DEBUG_OBJECT 0x10 + +#define ACPI_TYPE_EXTERNAL_MAX 0x10 +#define ACPI_NUM_TYPES (ACPI_TYPE_EXTERNAL_MAX + 1) + +/* + * These are object types that do not map directly to the ACPI + * object_type() operator. They are used for various internal purposes + * only. If new predefined ACPI_TYPEs are added (via the ACPI + * specification), these internal types must move upwards. (There + * is code that depends on these values being contiguous with the + * external types above.) + */ +#define ACPI_TYPE_LOCAL_REGION_FIELD 0x11 +#define ACPI_TYPE_LOCAL_BANK_FIELD 0x12 +#define ACPI_TYPE_LOCAL_INDEX_FIELD 0x13 +#define ACPI_TYPE_LOCAL_REFERENCE 0x14 /* Arg#, Local#, Name, Debug, ref_of, Index */ +#define ACPI_TYPE_LOCAL_ALIAS 0x15 +#define ACPI_TYPE_LOCAL_METHOD_ALIAS 0x16 +#define ACPI_TYPE_LOCAL_NOTIFY 0x17 +#define ACPI_TYPE_LOCAL_ADDRESS_HANDLER 0x18 +#define ACPI_TYPE_LOCAL_RESOURCE 0x19 +#define ACPI_TYPE_LOCAL_RESOURCE_FIELD 0x1A +#define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */ + +#define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */ +#define ACPI_TOTAL_TYPES (ACPI_TYPE_NS_NODE_MAX + 1) + +/* + * These are special object types that never appear in + * a Namespace node, only in an object of union acpi_operand_object + */ +#define ACPI_TYPE_LOCAL_EXTRA 0x1C +#define ACPI_TYPE_LOCAL_DATA 0x1D + +#define ACPI_TYPE_LOCAL_MAX 0x1D + +/* All types above here are invalid */ + +#define ACPI_TYPE_INVALID 0x1E +#define ACPI_TYPE_NOT_FOUND 0xFF + +#define ACPI_NUM_NS_TYPES (ACPI_TYPE_INVALID + 1) + +/* + * All I/O + */ +#define ACPI_READ 0 +#define ACPI_WRITE 1 +#define ACPI_IO_MASK 1 + +/* + * Event Types: Fixed & General Purpose + */ +typedef u32 acpi_event_type; + +/* + * Fixed events + */ +#define ACPI_EVENT_PMTIMER 0 +#define ACPI_EVENT_GLOBAL 1 +#define ACPI_EVENT_POWER_BUTTON 2 +#define ACPI_EVENT_SLEEP_BUTTON 3 +#define ACPI_EVENT_RTC 4 +#define ACPI_EVENT_MAX 4 +#define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1 + +/* + * Event status - Per event + * ------------- + * The encoding of acpi_event_status is illustrated below. + * Note that a set bit (1) indicates the property is TRUE + * (e.g. if bit 0 is set then the event is enabled). + * +-------------+-+-+-+-+-+-+ + * | Bits 31:6 |5|4|3|2|1|0| + * +-------------+-+-+-+-+-+-+ + * | | | | | | | + * | | | | | | +- Enabled? + * | | | | | +--- Enabled for wake? + * | | | | +----- Status bit set? + * | | | +------- Enable bit set? + * | | +--------- Has a handler? + * | +----------- Masked? + * +----------------- + */ +typedef u32 acpi_event_status; + +#define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00 +#define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01 +#define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02 +#define ACPI_EVENT_FLAG_STATUS_SET (acpi_event_status) 0x04 +#define ACPI_EVENT_FLAG_ENABLE_SET (acpi_event_status) 0x08 +#define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x10 +#define ACPI_EVENT_FLAG_MASKED (acpi_event_status) 0x20 +#define ACPI_EVENT_FLAG_SET ACPI_EVENT_FLAG_STATUS_SET + +/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */ + +#define ACPI_GPE_ENABLE 0 +#define ACPI_GPE_DISABLE 1 +#define ACPI_GPE_CONDITIONAL_ENABLE 2 + +/* + * GPE info flags - Per GPE + * +---+-+-+-+---+ + * |7:6|5|4|3|2:0| + * +---+-+-+-+---+ + * | | | | | + * | | | | +-- Type of dispatch:to method, handler, notify, or none + * | | | +----- Interrupt type: edge or level triggered + * | | +------- Is a Wake GPE + * | +--------- Has been enabled automatically at init time + * +------------ + */ +#define ACPI_GPE_DISPATCH_NONE (u8) 0x00 +#define ACPI_GPE_DISPATCH_METHOD (u8) 0x01 +#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x02 +#define ACPI_GPE_DISPATCH_NOTIFY (u8) 0x03 +#define ACPI_GPE_DISPATCH_RAW_HANDLER (u8) 0x04 +#define ACPI_GPE_DISPATCH_MASK (u8) 0x07 +#define ACPI_GPE_DISPATCH_TYPE(flags) ((u8) ((flags) & ACPI_GPE_DISPATCH_MASK)) + +#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x08 +#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 +#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x08 + +#define ACPI_GPE_CAN_WAKE (u8) 0x10 +#define ACPI_GPE_AUTO_ENABLED (u8) 0x20 +#define ACPI_GPE_INITIALIZED (u8) 0x40 + +/* + * Flags for GPE and Lock interfaces + */ +#define ACPI_NOT_ISR 0x1 +#define ACPI_ISR 0x0 + +/* Notify types */ + +#define ACPI_SYSTEM_NOTIFY 0x1 +#define ACPI_DEVICE_NOTIFY 0x2 +#define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY) +#define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3 +#define ACPI_NUM_NOTIFY_TYPES 2 + +#define ACPI_MAX_SYS_NOTIFY 0x7F +#define ACPI_MAX_DEVICE_SPECIFIC_NOTIFY 0xBF + +#define ACPI_SYSTEM_HANDLER_LIST 0 /* Used as index, must be SYSTEM_NOTIFY -1 */ +#define ACPI_DEVICE_HANDLER_LIST 1 /* Used as index, must be DEVICE_NOTIFY -1 */ + +/* Address Space (Operation Region) Types */ + +typedef u8 acpi_adr_space_type; + +#define ACPI_ADR_SPACE_SYSTEM_MEMORY (acpi_adr_space_type) 0 +#define ACPI_ADR_SPACE_SYSTEM_IO (acpi_adr_space_type) 1 +#define ACPI_ADR_SPACE_PCI_CONFIG (acpi_adr_space_type) 2 +#define ACPI_ADR_SPACE_EC (acpi_adr_space_type) 3 +#define ACPI_ADR_SPACE_SMBUS (acpi_adr_space_type) 4 +#define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 +#define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 +#define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7 +#define ACPI_ADR_SPACE_GPIO (acpi_adr_space_type) 8 +#define ACPI_ADR_SPACE_GSBUS (acpi_adr_space_type) 9 +#define ACPI_ADR_SPACE_PLATFORM_COMM (acpi_adr_space_type) 10 + +#define ACPI_NUM_PREDEFINED_REGIONS 11 + +/* + * Special Address Spaces + * + * Note: A Data Table region is a special type of operation region + * that has its own AML opcode. However, internally, the AML + * interpreter simply creates an operation region with an an address + * space type of ACPI_ADR_SPACE_DATA_TABLE. + */ +#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 0x7E /* Internal to ACPICA only */ +#define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 0x7F + +/* Values for _REG connection code */ + +#define ACPI_REG_DISCONNECT 0 +#define ACPI_REG_CONNECT 1 + +/* + * bit_register IDs + * + * These values are intended to be used by the hardware interfaces + * and are mapped to individual bitfields defined within the ACPI + * registers. See the acpi_gbl_bit_register_info global table in utglobal.c + * for this mapping. + */ + +/* PM1 Status register */ + +#define ACPI_BITREG_TIMER_STATUS 0x00 +#define ACPI_BITREG_BUS_MASTER_STATUS 0x01 +#define ACPI_BITREG_GLOBAL_LOCK_STATUS 0x02 +#define ACPI_BITREG_POWER_BUTTON_STATUS 0x03 +#define ACPI_BITREG_SLEEP_BUTTON_STATUS 0x04 +#define ACPI_BITREG_RT_CLOCK_STATUS 0x05 +#define ACPI_BITREG_WAKE_STATUS 0x06 +#define ACPI_BITREG_PCIEXP_WAKE_STATUS 0x07 + +/* PM1 Enable register */ + +#define ACPI_BITREG_TIMER_ENABLE 0x08 +#define ACPI_BITREG_GLOBAL_LOCK_ENABLE 0x09 +#define ACPI_BITREG_POWER_BUTTON_ENABLE 0x0A +#define ACPI_BITREG_SLEEP_BUTTON_ENABLE 0x0B +#define ACPI_BITREG_RT_CLOCK_ENABLE 0x0C +#define ACPI_BITREG_PCIEXP_WAKE_DISABLE 0x0D + +/* PM1 Control register */ + +#define ACPI_BITREG_SCI_ENABLE 0x0E +#define ACPI_BITREG_BUS_MASTER_RLD 0x0F +#define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10 +#define ACPI_BITREG_SLEEP_TYPE 0x11 +#define ACPI_BITREG_SLEEP_ENABLE 0x12 + +/* PM2 Control register */ + +#define ACPI_BITREG_ARB_DISABLE 0x13 + +#define ACPI_BITREG_MAX 0x13 +#define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 + +/* Status register values. A 1 clears a status bit. 0 = no effect */ + +#define ACPI_CLEAR_STATUS 1 + +/* Enable and Control register values */ + +#define ACPI_ENABLE_EVENT 1 +#define ACPI_DISABLE_EVENT 0 + +/* + * External ACPI object definition + */ + +/* + * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package + * element or an unresolved named reference. + */ +union acpi_object { + acpi_object_type type; /* See definition of acpi_ns_type for values */ + struct { + acpi_object_type type; /* ACPI_TYPE_INTEGER */ + u64 value; /* The actual number */ + } integer; + + struct { + acpi_object_type type; /* ACPI_TYPE_STRING */ + u32 length; /* # of bytes in string, excluding trailing null */ + char *pointer; /* points to the string value */ + } string; + + struct { + acpi_object_type type; /* ACPI_TYPE_BUFFER */ + u32 length; /* # of bytes in buffer */ + u8 *pointer; /* points to the buffer */ + } buffer; + + struct { + acpi_object_type type; /* ACPI_TYPE_PACKAGE */ + u32 count; /* # of elements in package */ + union acpi_object *elements; /* Pointer to an array of ACPI_OBJECTs */ + } package; + + struct { + acpi_object_type type; /* ACPI_TYPE_LOCAL_REFERENCE */ + acpi_object_type actual_type; /* Type associated with the Handle */ + acpi_handle handle; /* object reference */ + } reference; + + struct { + acpi_object_type type; /* ACPI_TYPE_PROCESSOR */ + u32 proc_id; + acpi_io_address pblk_address; + u32 pblk_length; + } processor; + + struct { + acpi_object_type type; /* ACPI_TYPE_POWER */ + u32 system_level; + u32 resource_order; + } power_resource; +}; + +/* + * List of objects, used as a parameter list for control method evaluation + */ +struct acpi_object_list { + u32 count; + union acpi_object *pointer; +}; + +/* + * Miscellaneous common Data Structures used by the interfaces + */ +#define ACPI_NO_BUFFER 0 + +#ifdef ACPI_NO_MEM_ALLOCATIONS + +#define ACPI_ALLOCATE_BUFFER (acpi_size) (0) +#define ACPI_ALLOCATE_LOCAL_BUFFER (acpi_size) (0) + +#else /* ACPI_NO_MEM_ALLOCATIONS */ + +#define ACPI_ALLOCATE_BUFFER (acpi_size) (-1) /* Let ACPICA allocate buffer */ +#define ACPI_ALLOCATE_LOCAL_BUFFER (acpi_size) (-2) /* For internal use only (enables tracking) */ + +#endif /* ACPI_NO_MEM_ALLOCATIONS */ + +struct acpi_buffer { + acpi_size length; /* Length in bytes of the buffer */ + void *pointer; /* pointer to buffer */ +}; + +/* + * name_type for acpi_get_name + */ +#define ACPI_FULL_PATHNAME 0 +#define ACPI_SINGLE_NAME 1 +#define ACPI_FULL_PATHNAME_NO_TRAILING 2 +#define ACPI_NAME_TYPE_MAX 2 + +/* + * Predefined Namespace items + */ +struct acpi_predefined_names { + const char *name; + u8 type; + char *val; +}; + +/* + * Structure and flags for acpi_get_system_info + */ +#define ACPI_SYS_MODE_UNKNOWN 0x0000 +#define ACPI_SYS_MODE_ACPI 0x0001 +#define ACPI_SYS_MODE_LEGACY 0x0002 +#define ACPI_SYS_MODES_MASK 0x0003 + +/* + * System info returned by acpi_get_system_info() + */ +struct acpi_system_info { + u32 acpi_ca_version; + u32 flags; + u32 timer_resolution; + u32 reserved1; + u32 reserved2; + u32 debug_level; + u32 debug_layer; +}; + +/* + * System statistics returned by acpi_get_statistics() + */ +struct acpi_statistics { + u32 sci_count; + u32 gpe_count; + u32 fixed_event_count[ACPI_NUM_FIXED_EVENTS]; + u32 method_count; +}; + +/* + * Types specific to the OS service interfaces + */ +typedef u32 + (ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); + +typedef void + (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context); + +/* + * Various handlers and callback procedures + */ +typedef +u32 (*acpi_sci_handler) (void *context); + +typedef +void (*acpi_gbl_event_handler) (u32 event_type, + acpi_handle device, + u32 event_number, void *context); + +#define ACPI_EVENT_TYPE_GPE 0 +#define ACPI_EVENT_TYPE_FIXED 1 + +typedef +u32(*acpi_event_handler) (void *context); + +typedef +u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context); + +typedef +void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); + +typedef +void (*acpi_object_handler) (acpi_handle object, void *data); + +typedef +acpi_status (*acpi_init_handler) (acpi_handle object, u32 function); + +#define ACPI_INIT_DEVICE_INI 1 + +typedef +acpi_status (*acpi_exception_handler) (acpi_status aml_status, + acpi_name name, + u16 opcode, + u32 aml_offset, void *context); + +/* Table Event handler (Load, load_table, etc.) and types */ + +typedef +acpi_status (*acpi_table_handler) (u32 event, void *table, void *context); + +/* Table Event Types */ + +#define ACPI_TABLE_EVENT_LOAD 0x0 +#define ACPI_TABLE_EVENT_UNLOAD 0x1 +#define ACPI_TABLE_EVENT_INSTALL 0x2 +#define ACPI_TABLE_EVENT_UNINSTALL 0x3 +#define ACPI_NUM_TABLE_EVENTS 4 + +/* Address Spaces (For Operation Regions) */ + +typedef +acpi_status (*acpi_adr_space_handler) (u32 function, + acpi_physical_address address, + u32 bit_width, + u64 *value, + void *handler_context, + void *region_context); + +#define ACPI_DEFAULT_HANDLER NULL + +/* Special Context data for generic_serial_bus/general_purpose_io (ACPI 5.0) */ + +struct acpi_connection_info { + u8 *connection; + u16 length; + u8 access_length; +}; + +typedef +acpi_status (*acpi_adr_space_setup) (acpi_handle region_handle, + u32 function, + void *handler_context, + void **region_context); + +#define ACPI_REGION_ACTIVATE 0 +#define ACPI_REGION_DEACTIVATE 1 + +typedef +acpi_status (*acpi_walk_callback) (acpi_handle object, + u32 nesting_level, + void *context, void **return_value); + +typedef +u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported); + +/* Interrupt handler return values */ + +#define ACPI_INTERRUPT_NOT_HANDLED 0x00 +#define ACPI_INTERRUPT_HANDLED 0x01 + +/* GPE handler return values */ + +#define ACPI_REENABLE_GPE 0x80 + +/* Length of 32-bit EISAID values when converted back to a string */ + +#define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */ + +/* Length of UUID (string) values */ + +#define ACPI_UUID_LENGTH 16 + +/* Length of 3-byte PCI class code values when converted back to a string */ + +#define ACPI_PCICLS_STRING_SIZE 7 /* Includes null terminator */ + +/* Structures used for device/processor HID, UID, CID */ + +struct acpi_pnp_device_id { + u32 length; /* Length of string + null */ + char *string; +}; + +struct acpi_pnp_device_id_list { + u32 count; /* Number of IDs in Ids array */ + u32 list_size; /* Size of list, including ID strings */ + struct acpi_pnp_device_id ids[1]; /* ID array */ +}; + +/* + * Structure returned from acpi_get_object_info. + * Optimized for both 32-bit and 64-bit builds. + */ +struct acpi_device_info { + u32 info_size; /* Size of info, including ID strings */ + u32 name; /* ACPI object Name */ + acpi_object_type type; /* ACPI object Type */ + u8 param_count; /* If a method, required parameter count */ + u16 valid; /* Indicates which optional fields are valid */ + u8 flags; /* Miscellaneous info */ + u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ + u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ + u64 address; /* _ADR value */ + struct acpi_pnp_device_id hardware_id; /* _HID value */ + struct acpi_pnp_device_id unique_id; /* _UID value */ + struct acpi_pnp_device_id class_code; /* _CLS value */ + struct acpi_pnp_device_id_list compatible_id_list; /* _CID list */ +}; + +/* Values for Flags field above (acpi_get_object_info) */ + +#define ACPI_PCI_ROOT_BRIDGE 0x01 + +/* Flags for Valid field above (acpi_get_object_info) */ + +#define ACPI_VALID_ADR 0x0002 +#define ACPI_VALID_HID 0x0004 +#define ACPI_VALID_UID 0x0008 +#define ACPI_VALID_CID 0x0020 +#define ACPI_VALID_CLS 0x0040 +#define ACPI_VALID_SXDS 0x0100 +#define ACPI_VALID_SXWS 0x0200 + +/* Flags for _STA method */ + +#define ACPI_STA_DEVICE_PRESENT 0x01 +#define ACPI_STA_DEVICE_ENABLED 0x02 +#define ACPI_STA_DEVICE_UI 0x04 +#define ACPI_STA_DEVICE_FUNCTIONING 0x08 +#define ACPI_STA_DEVICE_OK 0x08 /* Synonym */ +#define ACPI_STA_BATTERY_PRESENT 0x10 + +/* Context structs for address space handlers */ + +struct acpi_pci_id { + u16 segment; + u16 bus; + u16 device; + u16 function; +}; + +struct acpi_mem_space_context { + u32 length; + acpi_physical_address address; + acpi_physical_address mapped_physical_address; + u8 *mapped_logical_address; + acpi_size mapped_length; +}; + +/* + * struct acpi_memory_list is used only if the ACPICA local cache is enabled + */ +struct acpi_memory_list { + const char *list_name; + void *list_head; + u16 object_size; + u16 max_depth; + u16 current_depth; + +#ifdef ACPI_DBG_TRACK_ALLOCATIONS + + /* Statistics for debug memory tracking only */ + + u32 total_allocated; + u32 total_freed; + u32 max_occupied; + u32 total_size; + u32 current_total_size; + u32 requests; + u32 hits; +#endif +}; + +/* Definitions of trace event types */ + +typedef enum { + ACPI_TRACE_AML_METHOD, + ACPI_TRACE_AML_OPCODE, + ACPI_TRACE_AML_REGION +} acpi_trace_event_type; + +/* Definitions of _OSI support */ + +#define ACPI_VENDOR_STRINGS 0x01 +#define ACPI_FEATURE_STRINGS 0x02 +#define ACPI_ENABLE_INTERFACES 0x00 +#define ACPI_DISABLE_INTERFACES 0x04 + +#define ACPI_DISABLE_ALL_VENDOR_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_VENDOR_STRINGS) +#define ACPI_DISABLE_ALL_FEATURE_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_FEATURE_STRINGS) +#define ACPI_DISABLE_ALL_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS) +#define ACPI_ENABLE_ALL_VENDOR_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS) +#define ACPI_ENABLE_ALL_FEATURE_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_FEATURE_STRINGS) +#define ACPI_ENABLE_ALL_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS) + +#define ACPI_OSI_WIN_2000 0x01 +#define ACPI_OSI_WIN_XP 0x02 +#define ACPI_OSI_WIN_XP_SP1 0x03 +#define ACPI_OSI_WINSRV_2003 0x04 +#define ACPI_OSI_WIN_XP_SP2 0x05 +#define ACPI_OSI_WINSRV_2003_SP1 0x06 +#define ACPI_OSI_WIN_VISTA 0x07 +#define ACPI_OSI_WINSRV_2008 0x08 +#define ACPI_OSI_WIN_VISTA_SP1 0x09 +#define ACPI_OSI_WIN_VISTA_SP2 0x0A +#define ACPI_OSI_WIN_7 0x0B +#define ACPI_OSI_WIN_8 0x0C +#define ACPI_OSI_WIN_8_1 0x0D +#define ACPI_OSI_WIN_10 0x0E +#define ACPI_OSI_WIN_10_RS1 0x0F +#define ACPI_OSI_WIN_10_RS2 0x10 +#define ACPI_OSI_WIN_10_RS3 0x11 +#define ACPI_OSI_WIN_10_RS4 0x12 +#define ACPI_OSI_WIN_10_RS5 0x13 +#define ACPI_OSI_WIN_10_19H1 0x14 + +/* Definitions of getopt */ + +#define ACPI_OPT_END -1 + +#endif /* __ACTYPES_H__ */ diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h new file mode 100644 index 0000000..23262ca --- /dev/null +++ b/include/acpi/acuuid.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acuuid.h - ACPI-related UUID/GUID definitions + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACUUID_H__ +#define __ACUUID_H__ + +/* + * Note1: UUIDs and GUIDs are defined to be identical in ACPI. + * + * Note2: This file is standalone and should remain that way. + */ + +/* Controllers */ + +#define UUID_GPIO_CONTROLLER "4f248f40-d5e2-499f-834c-27758ea1cd3f" +#define UUID_USB_CONTROLLER "ce2ee385-00e6-48cb-9f05-2edb927c4899" +#define UUID_SATA_CONTROLLER "e4db149b-fcfe-425b-a6d8-92357d78fc7f" + +/* Devices */ + +#define UUID_PCI_HOST_BRIDGE "33db4d5b-1ff7-401c-9657-7441c03dd766" +#define UUID_I2C_DEVICE "3cdff6f7-4267-4555-ad05-b30a3d8938de" +#define UUID_POWER_BUTTON "dfbcf3c5-e7a5-44e6-9c1f-29c76f6e059c" + +/* Interfaces */ + +#define UUID_DEVICE_LABELING "e5c937d0-3553-4d7a-9117-ea4d19c3434d" +#define UUID_PHYSICAL_PRESENCE "3dddfaa6-361b-4eb4-a424-8d10089d1653" + +/* NVDIMM - NFIT table */ + +#define UUID_VOLATILE_MEMORY "7305944f-fdda-44e3-b16c-3f22d252e5d0" +#define UUID_PERSISTENT_MEMORY "66f0d379-b4f3-4074-ac43-0d3318b78cdb" +#define UUID_CONTROL_REGION "92f701f6-13b4-405d-910b-299367e8234c" +#define UUID_DATA_REGION "91af0530-5d86-470e-a6b0-0a2db9408249" +#define UUID_VOLATILE_VIRTUAL_DISK "77ab535a-45fc-624b-5560-f7b281d1f96e" +#define UUID_VOLATILE_VIRTUAL_CD "3d5abd30-4175-87ce-6d64-d2ade523c4bb" +#define UUID_PERSISTENT_VIRTUAL_DISK "5cea02c9-4d07-69d3-269f-4496fbe096f9" +#define UUID_PERSISTENT_VIRTUAL_CD "08018188-42cd-bb48-100f-5387d53ded3d" + +/* Processor Properties (ACPI 6.2) */ + +#define UUID_CACHE_PROPERTIES "6DC63E77-257E-4E78-A973-A21F2796898D" +#define UUID_PHYSICAL_PROPERTY "DDE4D59A-AA42-4349-B407-EA40F57D9FB7" + +/* Miscellaneous */ + +#define UUID_PLATFORM_CAPABILITIES "0811b06e-4a27-44f9-8d60-3cbbc22e7b48" +#define UUID_DYNAMIC_ENUMERATION "d8c1a3a6-be9b-4c9b-91bf-c3cb81fc5daf" +#define UUID_BATTERY_THERMAL_LIMIT "4c2067e3-887d-475c-9720-4af1d3ed602e" +#define UUID_THERMAL_EXTENSIONS "14d399cd-7a27-4b18-8fb4-7cb7b9f4e500" +#define UUID_DEVICE_PROPERTIES "daffd814-6eba-4d8c-8a91-bc9bbf4aa301" + +#endif /* __AUUID_H__ */ diff --git a/include/acpi/apei.h b/include/acpi/apei.h new file mode 100644 index 0000000..680f809 --- /dev/null +++ b/include/acpi/apei.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * apei.h - ACPI Platform Error Interface + */ + +#ifndef ACPI_APEI_H +#define ACPI_APEI_H + +#include +#include +#include + +#define APEI_ERST_INVALID_RECORD_ID 0xffffffffffffffffULL + +#define APEI_ERST_CLEAR_RECORD _IOW('E', 1, u64) +#define APEI_ERST_GET_RECORD_COUNT _IOR('E', 2, u32) + +#ifdef __KERNEL__ + +enum hest_status { + HEST_ENABLED, + HEST_DISABLED, + HEST_NOT_FOUND, +}; + +extern int hest_disable; +extern int erst_disable; +#ifdef CONFIG_ACPI_APEI_GHES +extern bool ghes_disable; +#else +#define ghes_disable 1 +#endif + +#ifdef CONFIG_ACPI_APEI +void __init acpi_hest_init(void); +#else +static inline void acpi_hest_init(void) { return; } +#endif + +typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data); +int apei_hest_parse(apei_hest_func_t func, void *data); + +int erst_write(const struct cper_record_header *record); +ssize_t erst_get_record_count(void); +int erst_get_record_id_begin(int *pos); +int erst_get_record_id_next(int *pos, u64 *record_id); +void erst_get_record_id_end(void); +ssize_t erst_read(u64 record_id, struct cper_record_header *record, + size_t buflen); +int erst_clear(u64 record_id); + +int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data); +void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err); + +#endif +#endif diff --git a/include/acpi/battery.h b/include/acpi/battery.h new file mode 100644 index 0000000..5d8f5d9 --- /dev/null +++ b/include/acpi/battery.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ACPI_BATTERY_H +#define __ACPI_BATTERY_H + +#define ACPI_BATTERY_CLASS "battery" + +#define ACPI_BATTERY_NOTIFY_STATUS 0x80 +#define ACPI_BATTERY_NOTIFY_INFO 0x81 +#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82 + +struct acpi_battery_hook { + const char *name; + int (*add_battery)(struct power_supply *battery); + int (*remove_battery)(struct power_supply *battery); + struct list_head list; +}; + +void battery_hook_register(struct acpi_battery_hook *hook); +void battery_hook_unregister(struct acpi_battery_hook *hook); + +#endif diff --git a/include/acpi/button.h b/include/acpi/button.h new file mode 100644 index 0000000..3a2b853 --- /dev/null +++ b/include/acpi/button.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ACPI_BUTTON_H +#define ACPI_BUTTON_H + +#include + +#if IS_ENABLED(CONFIG_ACPI_BUTTON) +extern int acpi_lid_notifier_register(struct notifier_block *nb); +extern int acpi_lid_notifier_unregister(struct notifier_block *nb); +extern int acpi_lid_open(void); +#else +static inline int acpi_lid_notifier_register(struct notifier_block *nb) +{ + return 0; +} +static inline int acpi_lid_notifier_unregister(struct notifier_block *nb) +{ + return 0; +} +static inline int acpi_lid_open(void) +{ + return 1; +} +#endif /* IS_ENABLED(CONFIG_ACPI_BUTTON) */ + +#endif /* ACPI_BUTTON_H */ diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h new file mode 100644 index 0000000..a6a9373 --- /dev/null +++ b/include/acpi/cppc_acpi.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CPPC (Collaborative Processor Performance Control) methods used + * by CPUfreq drivers. + * + * (C) Copyright 2014, 2015 Linaro Ltd. + * Author: Ashwin Chaugule + */ + +#ifndef _CPPC_ACPI_H +#define _CPPC_ACPI_H + +#include +#include + +#include +#include + +/* Support CPPCv2 and CPPCv3 */ +#define CPPC_V2_REV 2 +#define CPPC_V3_REV 3 +#define CPPC_V2_NUM_ENT 21 +#define CPPC_V3_NUM_ENT 23 + +#define PCC_CMD_COMPLETE_MASK (1 << 0) +#define PCC_ERROR_MASK (1 << 2) + +#define MAX_CPC_REG_ENT 21 + +/* CPPC specific PCC commands. */ +#define CMD_READ 0 +#define CMD_WRITE 1 + +/* Each register has the folowing format. */ +struct cpc_reg { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_width; + u64 __iomem address; +} __packed; + +/* + * Each entry in the CPC table is either + * of type ACPI_TYPE_BUFFER or + * ACPI_TYPE_INTEGER. + */ +struct cpc_register_resource { + acpi_object_type type; + u64 __iomem *sys_mem_vaddr; + union { + struct cpc_reg reg; + u64 int_value; + } cpc_entry; +}; + +/* Container to hold the CPC details for each CPU */ +struct cpc_desc { + int num_entries; + int version; + int cpu_id; + int write_cmd_status; + int write_cmd_id; + struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT]; + struct acpi_psd_package domain_info; + struct kobject kobj; +}; + +/* These are indexes into the per-cpu cpc_regs[]. Order is important. */ +enum cppc_regs { + HIGHEST_PERF, + NOMINAL_PERF, + LOW_NON_LINEAR_PERF, + LOWEST_PERF, + GUARANTEED_PERF, + DESIRED_PERF, + MIN_PERF, + MAX_PERF, + PERF_REDUC_TOLERANCE, + TIME_WINDOW, + CTR_WRAP_TIME, + REFERENCE_CTR, + DELIVERED_CTR, + PERF_LIMITED, + ENABLE, + AUTO_SEL_ENABLE, + AUTO_ACT_WINDOW, + ENERGY_PERF, + REFERENCE_PERF, + LOWEST_FREQ, + NOMINAL_FREQ, +}; + +/* + * Categorization of registers as described + * in the ACPI v.5.1 spec. + * XXX: Only filling up ones which are used by governors + * today. + */ +struct cppc_perf_caps { + u32 guaranteed_perf; + u32 highest_perf; + u32 nominal_perf; + u32 lowest_perf; + u32 lowest_nonlinear_perf; + u32 lowest_freq; + u32 nominal_freq; +}; + +struct cppc_perf_ctrls { + u32 max_perf; + u32 min_perf; + u32 desired_perf; +}; + +struct cppc_perf_fb_ctrs { + u64 reference; + u64 delivered; + u64 reference_perf; + u64 wraparound_time; +}; + +/* Per CPU container for runtime CPPC management. */ +struct cppc_cpudata { + int cpu; + struct cppc_perf_caps perf_caps; + struct cppc_perf_ctrls perf_ctrls; + struct cppc_perf_fb_ctrs perf_fb_ctrs; + struct cpufreq_policy *cur_policy; + unsigned int shared_type; + cpumask_var_t shared_cpu_map; +}; + +extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf); +extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); +extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); +extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); +extern int acpi_get_psd_map(struct cppc_cpudata **); +extern unsigned int cppc_get_transition_latency(int cpu); +extern bool cpc_ffh_supported(void); +extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val); +extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val); + +#endif /* _CPPC_ACPI_H*/ diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h new file mode 100644 index 0000000..e3f1cdd --- /dev/null +++ b/include/acpi/ghes.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef GHES_H +#define GHES_H + +#include +#include + +/* + * One struct ghes is created for each generic hardware error source. + * It provides the context for APEI hardware error timer/IRQ/SCI/NMI + * handler. + * + * estatus: memory buffer for error status block, allocated during + * HEST parsing. + */ +#define GHES_EXITING 0x0002 + +struct ghes { + union { + struct acpi_hest_generic *generic; + struct acpi_hest_generic_v2 *generic_v2; + }; + struct acpi_hest_generic_status *estatus; + unsigned long flags; + union { + struct list_head list; + struct timer_list timer; + unsigned int irq; + }; +}; + +struct ghes_estatus_node { + struct llist_node llnode; + struct acpi_hest_generic *generic; + struct ghes *ghes; +}; + +struct ghes_estatus_cache { + u32 estatus_len; + atomic_t count; + struct acpi_hest_generic *generic; + unsigned long long time_in; + struct rcu_head rcu; +}; + +enum { + GHES_SEV_NO = 0x0, + GHES_SEV_CORRECTED = 0x1, + GHES_SEV_RECOVERABLE = 0x2, + GHES_SEV_PANIC = 0x3, +}; + +int ghes_estatus_pool_init(int num_ghes); + +/* From drivers/edac/ghes_edac.c */ + +#ifdef CONFIG_EDAC_GHES +void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err); + +int ghes_edac_register(struct ghes *ghes, struct device *dev); + +void ghes_edac_unregister(struct ghes *ghes); + +#else +static inline void ghes_edac_report_mem_error(int sev, + struct cper_sec_mem_err *mem_err) +{ +} + +static inline int ghes_edac_register(struct ghes *ghes, struct device *dev) +{ + return -ENODEV; +} + +static inline void ghes_edac_unregister(struct ghes *ghes) +{ +} +#endif + +static inline int acpi_hest_get_version(struct acpi_hest_generic_data *gdata) +{ + return gdata->revision >> 8; +} + +static inline void *acpi_hest_get_payload(struct acpi_hest_generic_data *gdata) +{ + if (acpi_hest_get_version(gdata) >= 3) + return (void *)(((struct acpi_hest_generic_data_v300 *)(gdata)) + 1); + + return gdata + 1; +} + +static inline int acpi_hest_get_error_length(struct acpi_hest_generic_data *gdata) +{ + return ((struct acpi_hest_generic_data *)(gdata))->error_data_length; +} + +static inline int acpi_hest_get_size(struct acpi_hest_generic_data *gdata) +{ + if (acpi_hest_get_version(gdata) >= 3) + return sizeof(struct acpi_hest_generic_data_v300); + + return sizeof(struct acpi_hest_generic_data); +} + +static inline int acpi_hest_get_record_size(struct acpi_hest_generic_data *gdata) +{ + return (acpi_hest_get_size(gdata) + acpi_hest_get_error_length(gdata)); +} + +static inline void *acpi_hest_get_next(struct acpi_hest_generic_data *gdata) +{ + return (void *)(gdata) + acpi_hest_get_record_size(gdata); +} + +#define apei_estatus_for_each_section(estatus, section) \ + for (section = (struct acpi_hest_generic_data *)(estatus + 1); \ + (void *)section - (void *)(estatus + 1) < estatus->data_length; \ + section = acpi_hest_get_next(section)) + +#ifdef CONFIG_ACPI_APEI_SEA +int ghes_notify_sea(void); +#else +static inline int ghes_notify_sea(void) { return -ENOENT; } +#endif + +#endif /* GHES_H */ diff --git a/include/acpi/hed.h b/include/acpi/hed.h new file mode 100644 index 0000000..ebef902 --- /dev/null +++ b/include/acpi/hed.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * hed.h - ACPI Hardware Error Device + * + * Copyright (C) 2009, Intel Corp. + * Author: Huang Ying + */ + +#ifndef ACPI_HED_H +#define ACPI_HED_H + +#include + +int register_acpi_hed_notifier(struct notifier_block *nb); +void unregister_acpi_hed_notifier(struct notifier_block *nb); + +#endif diff --git a/include/acpi/nfit.h b/include/acpi/nfit.h new file mode 100644 index 0000000..86ed07c --- /dev/null +++ b/include/acpi/nfit.h @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __ACPI_NFIT_H +#define __ACPI_NFIT_H + +#if IS_ENABLED(CONFIG_ACPI_NFIT) +int nfit_get_smbios_id(u32 device_handle, u16 *flags); +#else +static inline int nfit_get_smbios_id(u32 device_handle, u16 *flags) +{ + return -EOPNOTSUPP; +} +#endif + +#endif /* __ACPI_NFIT_H */ diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h new file mode 100644 index 0000000..4dec4ed --- /dev/null +++ b/include/acpi/pcc.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * PCC (Platform Communications Channel) methods + */ + +#ifndef _PCC_H +#define _PCC_H + +#include +#include + +#define MAX_PCC_SUBSPACES 256 +#ifdef CONFIG_PCC +extern struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, + int subspace_id); +extern void pcc_mbox_free_channel(struct mbox_chan *chan); +#else +static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, + int subspace_id) +{ + return ERR_PTR(-ENODEV); +} +static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { } +#endif + +#endif /* _PCC_H */ diff --git a/include/acpi/pdc_intel.h b/include/acpi/pdc_intel.h new file mode 100644 index 0000000..967c552 --- /dev/null +++ b/include/acpi/pdc_intel.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* _PDC bit definition for Intel processors */ + +#ifndef __PDC_INTEL_H__ +#define __PDC_INTEL_H__ + +#define ACPI_PDC_P_FFH (0x0001) +#define ACPI_PDC_C_C1_HALT (0x0002) +#define ACPI_PDC_T_FFH (0x0004) +#define ACPI_PDC_SMP_C1PT (0x0008) +#define ACPI_PDC_SMP_C2C3 (0x0010) +#define ACPI_PDC_SMP_P_SWCOORD (0x0020) +#define ACPI_PDC_SMP_C_SWCOORD (0x0040) +#define ACPI_PDC_SMP_T_SWCOORD (0x0080) +#define ACPI_PDC_C_C1_FFH (0x0100) +#define ACPI_PDC_C_C2C3_FFH (0x0200) +#define ACPI_PDC_SMP_P_HWCOORD (0x0800) + +#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_SMP_P_SWCOORD | \ + ACPI_PDC_SMP_P_HWCOORD | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \ + ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_C_C1_FFH | \ + ACPI_PDC_C_C2C3_FFH) + +#endif /* __PDC_INTEL_H__ */ diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h new file mode 100644 index 0000000..35ab3f8 --- /dev/null +++ b/include/acpi/platform/acenv.h @@ -0,0 +1,356 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acenv.h - Host and compiler configuration + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACENV_H__ +#define __ACENV_H__ + +/* + * Environment configuration. The purpose of this file is to interface ACPICA + * to the local environment. This includes compiler-specific, OS-specific, + * and machine-specific configuration. + */ + +/* Types for ACPI_MUTEX_TYPE */ + +#define ACPI_BINARY_SEMAPHORE 0 +#define ACPI_OSL_MUTEX 1 + +/* Types for DEBUGGER_THREADING */ + +#define DEBUGGER_SINGLE_THREADED 0 +#define DEBUGGER_MULTI_THREADED 1 + +/****************************************************************************** + * + * Configuration for ACPI tools and utilities + * + *****************************************************************************/ + +/* Common application configuration. All single threaded except for acpi_exec. */ + +#if (defined ACPI_ASL_COMPILER) || \ + (defined ACPI_BIN_APP) || \ + (defined ACPI_DUMP_APP) || \ + (defined ACPI_HELP_APP) || \ + (defined ACPI_NAMES_APP) || \ + (defined ACPI_SRC_APP) || \ + (defined ACPI_XTRACT_APP) || \ + (defined ACPI_EXAMPLE_APP) || \ + (defined ACPI_EFI_HELLO) +#define ACPI_APPLICATION +#define ACPI_SINGLE_THREADED +#define USE_NATIVE_ALLOCATE_ZEROED +#endif + +/* iASL configuration */ + +#ifdef ACPI_ASL_COMPILER +#define ACPI_DEBUG_OUTPUT +#define ACPI_CONSTANT_EVAL_ONLY +#define ACPI_LARGE_NAMESPACE_NODE +#define ACPI_DATA_TABLE_DISASSEMBLY +#define ACPI_32BIT_PHYSICAL_ADDRESS +#define ACPI_DISASSEMBLER 1 +#endif + +/* acpi_exec configuration. Multithreaded with full AML debugger */ + +#ifdef ACPI_EXEC_APP +#define ACPI_APPLICATION +#define ACPI_FULL_DEBUG +#define ACPI_MUTEX_DEBUG +#define ACPI_DBG_TRACK_ALLOCATIONS +#endif + +/* acpi_help configuration. Error messages disabled. */ + +#ifdef ACPI_HELP_APP +#define ACPI_NO_ERROR_MESSAGES +#endif + +/* acpi_names configuration. Debug output enabled. */ + +#ifdef ACPI_NAMES_APP +#define ACPI_DEBUG_OUTPUT +#endif + +/* acpi_exec/acpi_names/Example configuration. Native RSDP used. */ + +#if (defined ACPI_EXEC_APP) || \ + (defined ACPI_EXAMPLE_APP) || \ + (defined ACPI_NAMES_APP) +#define ACPI_USE_NATIVE_RSDP_POINTER +#endif + +/* acpi_dump configuration. Native mapping used if provided by the host */ + +#ifdef ACPI_DUMP_APP +#define ACPI_USE_NATIVE_MEMORY_MAPPING +#endif + +/* acpi_names/Example configuration. Hardware disabled */ + +#if (defined ACPI_EXAMPLE_APP) || \ + (defined ACPI_NAMES_APP) +#define ACPI_REDUCED_HARDWARE 1 +#endif + +/* Linkable ACPICA library. Two versions, one with full debug. */ + +#ifdef ACPI_LIBRARY +#define ACPI_USE_LOCAL_CACHE +#define ACPI_DEBUGGER 1 +#define ACPI_DISASSEMBLER 1 + +#ifdef _DEBUG +#define ACPI_DEBUG_OUTPUT +#endif +#endif + +/* Common for all ACPICA applications */ + +#ifdef ACPI_APPLICATION +#define ACPI_USE_LOCAL_CACHE +#endif + +/* Common debug/disassembler support */ + +#ifdef ACPI_FULL_DEBUG +#define ACPI_DEBUG_OUTPUT +#define ACPI_DEBUGGER 1 +#define ACPI_DISASSEMBLER 1 +#endif + + +/*! [Begin] no source code translation */ + +/****************************************************************************** + * + * Host configuration files. The compiler configuration files are included + * first. + * + *****************************************************************************/ + +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) +#include + +#elif defined(_MSC_VER) +#include "acmsvc.h" + +#elif defined(__INTEL_COMPILER) +#include + +#endif + +#if defined(_LINUX) || defined(__linux__) +#include + +#elif defined(_APPLE) || defined(__APPLE__) +#include "acmacosx.h" + +#elif defined(__DragonFly__) +#include "acdragonfly.h" + +#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#include "acfreebsd.h" + +#elif defined(__NetBSD__) +#include "acnetbsd.h" + +#elif defined(__sun) +#include "acsolaris.h" + +#elif defined(MODESTO) +#include "acmodesto.h" + +#elif defined(NETWARE) +#include "acnetware.h" + +#elif defined(_CYGWIN) +#include "accygwin.h" + +#elif defined(WIN32) +#include "acwin.h" + +#elif defined(WIN64) +#include "acwin64.h" + +#elif defined(_WRS_LIB_BUILD) +#include "acvxworks.h" + +#elif defined(__OS2__) +#include "acos2.h" + +#elif defined(__HAIKU__) +#include "achaiku.h" + +#elif defined(__QNX__) +#include "acqnx.h" + +/* + * EFI applications can be built with -nostdlib, in this case, it must be + * included after including all other host environmental definitions, in + * order to override the definitions. + */ +#elif defined(_AED_EFI) || defined(_GNU_EFI) || defined(_EDK2_EFI) +#include "acefi.h" + +#else + +/* Unknown environment */ + +#error Unknown target environment +#endif + +/*! [End] no source code translation !*/ + +/****************************************************************************** + * + * Setup defaults for the required symbols that were not defined in one of + * the host/compiler files above. + * + *****************************************************************************/ + +/* 64-bit data types */ + +#ifndef COMPILER_DEPENDENT_INT64 +#define COMPILER_DEPENDENT_INT64 long long +#endif + +#ifndef COMPILER_DEPENDENT_UINT64 +#define COMPILER_DEPENDENT_UINT64 unsigned long long +#endif + +/* Type of mutex supported by host. Default is binary semaphores. */ +#ifndef ACPI_MUTEX_TYPE +#define ACPI_MUTEX_TYPE ACPI_BINARY_SEMAPHORE +#endif + +/* Global Lock acquire/release */ + +#ifndef ACPI_ACQUIRE_GLOBAL_LOCK +#define ACPI_ACQUIRE_GLOBAL_LOCK(Glptr, acquired) acquired = 1 +#endif + +#ifndef ACPI_RELEASE_GLOBAL_LOCK +#define ACPI_RELEASE_GLOBAL_LOCK(Glptr, pending) pending = 0 +#endif + +/* Flush CPU cache - used when going to sleep. Wbinvd or similar. */ + +#ifndef ACPI_FLUSH_CPU_CACHE +#define ACPI_FLUSH_CPU_CACHE() +#endif + +/* "inline" keywords - configurable since inline is not standardized */ + +#ifndef ACPI_INLINE +#define ACPI_INLINE +#endif + +/* Use ordered initialization if compiler doesn't support designated. */ +#ifndef ACPI_STRUCT_INIT +#define ACPI_STRUCT_INIT(field, value) value +#endif + +/* + * Configurable calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#ifndef ACPI_SYSTEM_XFACE +#define ACPI_SYSTEM_XFACE +#endif + +#ifndef ACPI_EXTERNAL_XFACE +#define ACPI_EXTERNAL_XFACE +#endif + +#ifndef ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#endif + +#ifndef ACPI_INTERNAL_VAR_XFACE +#define ACPI_INTERNAL_VAR_XFACE +#endif + +/* + * Debugger threading model + * Use single threaded if the entire subsystem is contained in an application + * Use multiple threaded when the subsystem is running in the kernel. + * + * By default the model is single threaded if ACPI_APPLICATION is set, + * multi-threaded if ACPI_APPLICATION is not set. + */ +#ifndef DEBUGGER_THREADING +#if !defined (ACPI_APPLICATION) || defined (ACPI_EXEC_APP) +#define DEBUGGER_THREADING DEBUGGER_MULTI_THREADED + +#else +#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED +#endif +#endif /* !DEBUGGER_THREADING */ + +/****************************************************************************** + * + * C library configuration + * + *****************************************************************************/ + +/* + * ACPI_USE_SYSTEM_CLIBRARY - Define this if linking to an actual C library. + * Otherwise, local versions of string/memory functions will be used. + * ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and + * the standard header files may be used. Defining this implies that + * ACPI_USE_SYSTEM_CLIBRARY has been defined. + * + * The ACPICA subsystem only uses low level C library functions that do not + * call operating system services and may therefore be inlined in the code. + * + * It may be necessary to tailor these include files to the target + * generation environment. + */ + +/* Use the standard C library headers. We want to keep these to a minimum. */ + +#ifdef ACPI_USE_STANDARD_HEADERS + +/* Use the standard headers from the standard locations */ + +#include +#include +#include +#if defined (ACPI_APPLICATION) || defined(ACPI_LIBRARY) +#include +#include +#include +#include +#include +#endif + +#endif /* ACPI_USE_STANDARD_HEADERS */ + +#ifdef ACPI_APPLICATION +#define ACPI_FILE FILE * +#define ACPI_FILE_OUT stdout +#define ACPI_FILE_ERR stderr +#else +#define ACPI_FILE void * +#define ACPI_FILE_OUT NULL +#define ACPI_FILE_ERR NULL +#endif /* ACPI_APPLICATION */ + +#ifndef ACPI_INIT_FUNCTION +#define ACPI_INIT_FUNCTION +#endif + +#endif /* __ACENV_H__ */ diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h new file mode 100644 index 0000000..2e36c83 --- /dev/null +++ b/include/acpi/platform/acenvex.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acenvex.h - Extra host and compiler configuration + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACENVEX_H__ +#define __ACENVEX_H__ + +/*! [Begin] no source code translation */ + +/****************************************************************************** + * + * Extra host configuration files. All ACPICA headers are included before + * including these files. + * + *****************************************************************************/ + +#if defined(_LINUX) || defined(__linux__) +#include + +#elif defined(__DragonFly__) +#include "acdragonflyex.h" + +/* + * EFI applications can be built with -nostdlib, in this case, it must be + * included after including all other host environmental definitions, in + * order to override the definitions. + */ +#elif defined(_AED_EFI) || defined(_GNU_EFI) || defined(_EDK2_EFI) +#include "acefiex.h" + +#endif + +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) +#include "acgccex.h" + +#elif defined(_MSC_VER) +#include "acmsvcex.h" + +#endif + +/*! [End] no source code translation !*/ + +#endif /* __ACENVEX_H__ */ diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h new file mode 100644 index 0000000..6a0705b --- /dev/null +++ b/include/acpi/platform/acgcc.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acgcc.h - GCC specific defines, etc. + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACGCC_H__ +#define __ACGCC_H__ + +/* + * Use compiler specific is a good practice for even when + * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined. + */ +#ifndef va_arg +#ifdef ACPI_USE_BUILTIN_STDARG +typedef __builtin_va_list va_list; +#define va_start(v, l) __builtin_va_start(v, l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v, l) __builtin_va_arg(v, l) +#define va_copy(d, s) __builtin_va_copy(d, s) +#else +#include +#endif +#endif + +#define ACPI_INLINE __inline__ + +/* Function name is used for debug output. Non-ANSI, compiler-dependent */ + +#define ACPI_GET_FUNCTION_NAME __func__ + +/* + * This macro is used to tag functions as "printf-like" because + * some compilers (like GCC) can catch printf format string problems. + */ +#define ACPI_PRINTF_LIKE(c) __attribute__ ((__format__ (__printf__, c, c+1))) + +/* + * Some compilers complain about unused variables. Sometimes we don't want to + * use all the variables (for example, _acpi_module_name). This allows us + * to tell the compiler warning in a per-variable manner that a variable + * is unused. + */ +#define ACPI_UNUSED_VAR __attribute__ ((unused)) + +/* GCC supports __VA_ARGS__ in macros */ + +#define COMPILER_VA_MACRO 1 + +/* GCC supports native multiply/shift on 32-bit platforms */ + +#define ACPI_USE_NATIVE_MATH64 + +#endif /* __ACGCC_H__ */ diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h new file mode 100644 index 0000000..8dda285 --- /dev/null +++ b/include/acpi/platform/acgccex.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acgccex.h - Extra GCC specific defines, etc. + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACGCCEX_H__ +#define __ACGCCEX_H__ + +/* + * Some versions of gcc implement strchr() with a buggy macro. So, + * undef it here. Prevents error messages of this form (usually from the + * file getopt.c): + * + * error: logical '&&' with non-zero constant will always evaluate as true + */ +#ifdef strchr +#undef strchr +#endif + +#endif /* __ACGCCEX_H__ */ diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h new file mode 100644 index 0000000..d2cc247 --- /dev/null +++ b/include/acpi/platform/acintel.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: acintel.h - VC specific defines, etc. + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACINTEL_H__ +#define __ACINTEL_H__ + +/* + * Use compiler specific is a good practice for even when + * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined. + */ +#ifndef va_arg +#include +#endif + +/* Configuration specific to Intel 64-bit C compiler */ + +#define COMPILER_DEPENDENT_INT64 __int64 +#define COMPILER_DEPENDENT_UINT64 unsigned __int64 +#define ACPI_INLINE __inline + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* remark 981 - operands evaluated in no particular order */ +#pragma warning(disable:981) + +/* warn C4100: unreferenced formal parameter */ +#pragma warning(disable:4100) + +/* warn C4127: conditional expression is constant */ +#pragma warning(disable:4127) + +/* warn C4706: assignment within conditional expression */ +#pragma warning(disable:4706) + +/* warn C4214: bit field types other than int */ +#pragma warning(disable:4214) + +#endif /* __ACINTEL_H__ */ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h new file mode 100644 index 0000000..3105019 --- /dev/null +++ b/include/acpi/platform/aclinux.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: aclinux.h - OS specific defines, etc. for Linux + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACLINUX_H__ +#define __ACLINUX_H__ + +#ifdef __KERNEL__ + +/* ACPICA external files should not include ACPICA headers directly. */ + +#if !defined(BUILDING_ACPICA) && !defined(_LINUX_ACPI_H) +#error "Please don't include directly, include instead." +#endif + +#endif + +/* Common (in-kernel/user-space) ACPICA configuration */ + +#define ACPI_USE_SYSTEM_CLIBRARY +#define ACPI_USE_DO_WHILE_0 +#define ACPI_IGNORE_PACKAGE_RESOLUTION_ERRORS + +#ifdef __KERNEL__ + +#define ACPI_USE_SYSTEM_INTTYPES +#define ACPI_USE_GPE_POLLING + +/* Kernel specific ACPICA configuration */ + +#ifdef CONFIG_PCI +#define ACPI_PCI_CONFIGURED +#endif + +#ifdef CONFIG_ACPI_REDUCED_HARDWARE_ONLY +#define ACPI_REDUCED_HARDWARE 1 +#endif + +#ifdef CONFIG_ACPI_DEBUGGER +#define ACPI_DEBUGGER +#endif + +#ifdef CONFIG_ACPI_DEBUG +#define ACPI_MUTEX_DEBUG +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef EXPORT_ACPI_INTERFACES +#include +#endif +#ifdef CONFIG_ACPI +#include +#endif + +#define ACPI_INIT_FUNCTION __init + +/* Use a specific bugging default separate from ACPICA */ + +#undef ACPI_DEBUG_DEFAULT +#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR) + +#ifndef CONFIG_ACPI + +/* External globals for __KERNEL__, stubs is needed */ + +#define ACPI_GLOBAL(t,a) +#define ACPI_INIT_GLOBAL(t,a,b) + +/* Generating stubs for configurable ACPICA macros */ + +#define ACPI_NO_MEM_ALLOCATIONS + +/* Generating stubs for configurable ACPICA functions */ + +#define ACPI_NO_ERROR_MESSAGES +#undef ACPI_DEBUG_OUTPUT + +/* External interface for __KERNEL__, stub is needed */ + +#define ACPI_EXTERNAL_RETURN_STATUS(prototype) \ + static ACPI_INLINE prototype {return(AE_NOT_CONFIGURED);} +#define ACPI_EXTERNAL_RETURN_OK(prototype) \ + static ACPI_INLINE prototype {return(AE_OK);} +#define ACPI_EXTERNAL_RETURN_VOID(prototype) \ + static ACPI_INLINE prototype {return;} +#define ACPI_EXTERNAL_RETURN_UINT32(prototype) \ + static ACPI_INLINE prototype {return(0);} +#define ACPI_EXTERNAL_RETURN_PTR(prototype) \ + static ACPI_INLINE prototype {return(NULL);} + +#endif /* CONFIG_ACPI */ + +/* Host-dependent types and defines for in-kernel ACPICA */ + +#define ACPI_MACHINE_WIDTH BITS_PER_LONG +#define ACPI_USE_NATIVE_MATH64 +#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); +#define strtoul simple_strtoul + +#define acpi_cache_t struct kmem_cache +#define acpi_spinlock spinlock_t * +#define acpi_raw_spinlock raw_spinlock_t * +#define acpi_cpu_flags unsigned long + +/* Use native linux version of acpi_os_allocate_zeroed */ + +#define USE_NATIVE_ALLOCATE_ZEROED + +/* + * Overrides for in-kernel ACPICA + */ +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_allocate_zeroed +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_free +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_object +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_raw_lock +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_delete_raw_lock +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_acquire_raw_lock +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_release_raw_lock + +/* + * OSL interfaces used by debugger/disassembler + */ +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_readable +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_writable +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_debugger +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_debugger + +/* + * OSL interfaces used by utilities + */ +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_redirect_output +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_name +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_directory +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename +#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory + +#define ACPI_MSG_ERROR KERN_ERR "ACPI Error: " +#define ACPI_MSG_EXCEPTION KERN_ERR "ACPI Exception: " +#define ACPI_MSG_WARNING KERN_WARNING "ACPI Warning: " +#define ACPI_MSG_INFO KERN_INFO "ACPI: " + +#define ACPI_MSG_BIOS_ERROR KERN_ERR "ACPI BIOS Error (bug): " +#define ACPI_MSG_BIOS_WARNING KERN_WARNING "ACPI BIOS Warning (bug): " + +/* + * Linux wants to use designated initializers for function pointer structs. + */ +#define ACPI_STRUCT_INIT(field, value) .field = value + +#else /* !__KERNEL__ */ + +#define ACPI_USE_STANDARD_HEADERS + +#ifdef ACPI_USE_STANDARD_HEADERS +#include +#endif + +/* Define/disable kernel-specific declarators */ + +#ifndef __init +#define __init +#endif +#ifndef __iomem +#define __iomem +#endif + +/* Host-dependent types and defines for user-space ACPICA */ + +#define ACPI_FLUSH_CPU_CACHE() +#define ACPI_CAST_PTHREAD_T(pthread) ((acpi_thread_id) (pthread)) + +#if defined(__ia64__) || (defined(__x86_64__) && !defined(__ILP32__)) ||\ + defined(__aarch64__) || defined(__PPC64__) ||\ + defined(__s390x__) +#define ACPI_MACHINE_WIDTH 64 +#define COMPILER_DEPENDENT_INT64 long +#define COMPILER_DEPENDENT_UINT64 unsigned long +#else +#define ACPI_MACHINE_WIDTH 32 +#define COMPILER_DEPENDENT_INT64 long long +#define COMPILER_DEPENDENT_UINT64 unsigned long long +#define ACPI_USE_NATIVE_DIVIDE +#define ACPI_USE_NATIVE_MATH64 +#endif + +#ifndef __cdecl +#define __cdecl +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ACLINUX_H__ */ diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h new file mode 100644 index 0000000..cc4f1eb --- /dev/null +++ b/include/acpi/platform/aclinuxex.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/****************************************************************************** + * + * Name: aclinuxex.h - Extra OS specific defines, etc. for Linux + * + * Copyright (C) 2000 - 2019, Intel Corp. + * + *****************************************************************************/ + +#ifndef __ACLINUXEX_H__ +#define __ACLINUXEX_H__ + +#ifdef __KERNEL__ + +#ifndef ACPI_USE_NATIVE_DIVIDE + +#ifndef ACPI_DIV_64_BY_32 +#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ + do { \ + u64 (__n) = ((u64) n_hi) << 32 | (n_lo); \ + (r32) = do_div ((__n), (d32)); \ + (q32) = (u32) (__n); \ + } while (0) +#endif + +#ifndef ACPI_SHIFT_RIGHT_64 +#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ + do { \ + (n_lo) >>= 1; \ + (n_lo) |= (((n_hi) & 1) << 31); \ + (n_hi) >>= 1; \ + } while (0) +#endif + +#endif + +/* + * Overrides for in-kernel ACPICA + */ +acpi_status ACPI_INIT_FUNCTION acpi_os_initialize(void); + +acpi_status acpi_os_terminate(void); + +/* + * The irqs_disabled() check is for resume from RAM. + * Interrupts are off during resume, just like they are for boot. + * However, boot has (system_state != SYSTEM_RUNNING) + * to quiet __might_sleep() in kmalloc() and resume does not. + */ +static inline void *acpi_os_allocate(acpi_size size) +{ + return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); +} + +static inline void *acpi_os_allocate_zeroed(acpi_size size) +{ + return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); +} + +static inline void acpi_os_free(void *memory) +{ + kfree(memory); +} + +static inline void *acpi_os_acquire_object(acpi_cache_t * cache) +{ + return kmem_cache_zalloc(cache, + irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); +} + +static inline acpi_thread_id acpi_os_get_thread_id(void) +{ + return (acpi_thread_id) (unsigned long)current; +} + +/* + * When lockdep is enabled, the spin_lock_init() macro stringifies it's + * argument and uses that as a name for the lock in debugging. + * By executing spin_lock_init() in a macro the key changes from "lock" for + * all locks to the name of the argument of acpi_os_create_lock(), which + * prevents lockdep from reporting false positives for ACPICA locks. + */ +#define acpi_os_create_lock(__handle) \ + ({ \ + spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ + if (lock) { \ + *(__handle) = lock; \ + spin_lock_init(*(__handle)); \ + } \ + lock ? AE_OK : AE_NO_MEMORY; \ + }) + + +#define acpi_os_create_raw_lock(__handle) \ + ({ \ + raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ + if (lock) { \ + *(__handle) = lock; \ + raw_spin_lock_init(*(__handle)); \ + } \ + lock ? AE_OK : AE_NO_MEMORY; \ + }) + +static inline acpi_cpu_flags acpi_os_acquire_raw_lock(acpi_raw_spinlock lockp) +{ + acpi_cpu_flags flags; + + raw_spin_lock_irqsave(lockp, flags); + return flags; +} + +static inline void acpi_os_release_raw_lock(acpi_raw_spinlock lockp, + acpi_cpu_flags flags) +{ + raw_spin_unlock_irqrestore(lockp, flags); +} + +static inline void acpi_os_delete_raw_lock(acpi_raw_spinlock handle) +{ + ACPI_FREE(handle); +} + +static inline u8 acpi_os_readable(void *pointer, acpi_size length) +{ + return TRUE; +} + +static inline acpi_status acpi_os_initialize_debugger(void) +{ + return AE_OK; +} + +static inline void acpi_os_terminate_debugger(void) +{ + return; +} + +/* + * OSL interfaces added by Linux + */ + +#endif /* __KERNEL__ */ + +#endif /* __ACLINUXEX_H__ */ diff --git a/include/acpi/processor.h b/include/acpi/processor.h new file mode 100644 index 0000000..683e124 --- /dev/null +++ b/include/acpi/processor.h @@ -0,0 +1,455 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ACPI_PROCESSOR_H +#define __ACPI_PROCESSOR_H + +#include +#include +#include +#include +#include +#include + +#define ACPI_PROCESSOR_CLASS "processor" +#define ACPI_PROCESSOR_DEVICE_NAME "Processor" +#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007" +#define ACPI_PROCESSOR_CONTAINER_HID "ACPI0010" + +#define ACPI_PROCESSOR_BUSY_METRIC 10 + +#define ACPI_PROCESSOR_MAX_POWER 8 +#define ACPI_PROCESSOR_MAX_C2_LATENCY 100 +#define ACPI_PROCESSOR_MAX_C3_LATENCY 1000 + +#define ACPI_PROCESSOR_MAX_THROTTLING 16 +#define ACPI_PROCESSOR_MAX_THROTTLE 250 /* 25% */ +#define ACPI_PROCESSOR_MAX_DUTY_WIDTH 4 + +#define ACPI_PDC_REVISION_ID 0x1 + +#define ACPI_PSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */ +#define ACPI_PSD_REV0_ENTRIES 5 + +#define ACPI_TSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */ +#define ACPI_TSD_REV0_ENTRIES 5 +/* + * Types of coordination defined in ACPI 3.0. Same macros can be used across + * P, C and T states + */ +#define DOMAIN_COORD_TYPE_SW_ALL 0xfc +#define DOMAIN_COORD_TYPE_SW_ANY 0xfd +#define DOMAIN_COORD_TYPE_HW_ALL 0xfe + +#define ACPI_CSTATE_SYSTEMIO 0 +#define ACPI_CSTATE_FFH 1 +#define ACPI_CSTATE_HALT 2 +#define ACPI_CSTATE_INTEGER 3 + +#define ACPI_CX_DESC_LEN 32 + +/* Power Management */ + +struct acpi_processor_cx; + +struct acpi_power_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __packed; + +struct acpi_processor_cx { + u8 valid; + u8 type; + u32 address; + u8 entry_method; + u8 index; + u32 latency; + u8 bm_sts_skip; + char desc[ACPI_CX_DESC_LEN]; +}; + +struct acpi_lpi_state { + u32 min_residency; + u32 wake_latency; /* worst case */ + u32 flags; + u32 arch_flags; + u32 res_cnt_freq; + u32 enable_parent_state; + u64 address; + u8 index; + u8 entry_method; + char desc[ACPI_CX_DESC_LEN]; +}; + +struct acpi_processor_power { + int count; + union { + struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER]; + struct acpi_lpi_state lpi_states[ACPI_PROCESSOR_MAX_POWER]; + }; + int timer_broadcast_on_state; +}; + +/* Performance Management */ + +struct acpi_psd_package { + u64 num_entries; + u64 revision; + u64 domain; + u64 coord_type; + u64 num_processors; +} __packed; + +struct acpi_pct_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 reserved; + u64 address; +} __packed; + +struct acpi_processor_px { + u64 core_frequency; /* megahertz */ + u64 power; /* milliWatts */ + u64 transition_latency; /* microseconds */ + u64 bus_master_latency; /* microseconds */ + u64 control; /* control value */ + u64 status; /* success indicator */ +}; + +struct acpi_processor_performance { + unsigned int state; + unsigned int platform_limit; + struct acpi_pct_register control_register; + struct acpi_pct_register status_register; + unsigned int state_count; + struct acpi_processor_px *states; + struct acpi_psd_package domain_info; + cpumask_var_t shared_cpu_map; + unsigned int shared_type; +}; + +/* Throttling Control */ + +struct acpi_tsd_package { + u64 num_entries; + u64 revision; + u64 domain; + u64 coord_type; + u64 num_processors; +} __packed; + +struct acpi_ptc_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 reserved; + u64 address; +} __packed; + +struct acpi_processor_tx_tss { + u64 freqpercentage; /* */ + u64 power; /* milliWatts */ + u64 transition_latency; /* microseconds */ + u64 control; /* control value */ + u64 status; /* success indicator */ +}; +struct acpi_processor_tx { + u16 power; + u16 performance; +}; + +struct acpi_processor; +struct acpi_processor_throttling { + unsigned int state; + unsigned int platform_limit; + struct acpi_pct_register control_register; + struct acpi_pct_register status_register; + unsigned int state_count; + struct acpi_processor_tx_tss *states_tss; + struct acpi_tsd_package domain_info; + cpumask_var_t shared_cpu_map; + int (*acpi_processor_get_throttling) (struct acpi_processor * pr); + int (*acpi_processor_set_throttling) (struct acpi_processor * pr, + int state, bool force); + + u32 address; + u8 duty_offset; + u8 duty_width; + u8 tsd_valid_flag; + unsigned int shared_type; + struct acpi_processor_tx states[ACPI_PROCESSOR_MAX_THROTTLING]; +}; + +/* Limit Interface */ + +struct acpi_processor_lx { + int px; /* performance state */ + int tx; /* throttle level */ +}; + +struct acpi_processor_limit { + struct acpi_processor_lx state; /* current limit */ + struct acpi_processor_lx thermal; /* thermal limit */ + struct acpi_processor_lx user; /* user limit */ +}; + +struct acpi_processor_flags { + u8 power:1; + u8 performance:1; + u8 throttling:1; + u8 limit:1; + u8 bm_control:1; + u8 bm_check:1; + u8 has_cst:1; + u8 has_lpi:1; + u8 power_setup_done:1; + u8 bm_rld_set:1; + u8 need_hotplug_init:1; +}; + +struct acpi_processor { + acpi_handle handle; + u32 acpi_id; + phys_cpuid_t phys_id; /* CPU hardware ID such as APIC ID for x86 */ + u32 id; /* CPU logical ID allocated by OS */ + u32 pblk; + int performance_platform_limit; + int throttling_platform_limit; + /* 0 - states 0..n-th state available */ + + struct acpi_processor_flags flags; + struct acpi_processor_power power; + struct acpi_processor_performance *performance; + struct acpi_processor_throttling throttling; + struct acpi_processor_limit limit; + struct thermal_cooling_device *cdev; + struct device *dev; /* Processor device. */ + struct freq_qos_request perflib_req; + struct freq_qos_request thermal_req; +}; + +struct acpi_processor_errata { + u8 smp; + struct { + u8 throttle:1; + u8 fdma:1; + u8 reserved:6; + u32 bmisx; + } piix4; +}; + +extern int acpi_processor_preregister_performance(struct + acpi_processor_performance + __percpu *performance); + +extern int acpi_processor_register_performance(struct acpi_processor_performance + *performance, unsigned int cpu); +extern void acpi_processor_unregister_performance(unsigned int cpu); + +int acpi_processor_pstate_control(void); +/* note: this locks both the calling module and the processor module + if a _PPC object exists, rmmod is disallowed then */ +int acpi_processor_notify_smm(struct module *calling_module); +int acpi_processor_get_psd(acpi_handle handle, + struct acpi_psd_package *pdomain); + +/* parsing the _P* objects. */ +extern int acpi_processor_get_performance_info(struct acpi_processor *pr); + +/* for communication between multiple parts of the processor kernel module */ +DECLARE_PER_CPU(struct acpi_processor *, processors); +extern struct acpi_processor_errata errata; + +#if defined(ARCH_HAS_POWER_INIT) && defined(CONFIG_ACPI_PROCESSOR_CSTATE) +void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, + unsigned int cpu); +int acpi_processor_ffh_cstate_probe(unsigned int cpu, + struct acpi_processor_cx *cx, + struct acpi_power_register *reg); +void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cstate); +#else +static inline void acpi_processor_power_init_bm_check(struct + acpi_processor_flags + *flags, unsigned int cpu) +{ + flags->bm_check = 1; + return; +} +static inline int acpi_processor_ffh_cstate_probe(unsigned int cpu, + struct acpi_processor_cx *cx, + struct acpi_power_register + *reg) +{ + return -1; +} +static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx + *cstate) +{ + return; +} +#endif + +static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg, + bool direct) +{ + if (direct || (is_percpu_thread() && cpu == smp_processor_id())) + return fn(arg); + return work_on_cpu(cpu, fn, arg); +} + +/* in processor_perflib.c */ + +#ifdef CONFIG_CPU_FREQ +extern bool acpi_processor_cpufreq_init; +void acpi_processor_ignore_ppc_init(void); +void acpi_processor_ppc_init(struct cpufreq_policy *policy); +void acpi_processor_ppc_exit(struct cpufreq_policy *policy); +void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag); +extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit); +#else +static inline void acpi_processor_ignore_ppc_init(void) +{ + return; +} +static inline void acpi_processor_ppc_init(struct cpufreq_policy *policy) +{ + return; +} +static inline void acpi_processor_ppc_exit(struct cpufreq_policy *policy) +{ + return; +} +static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr, + int event_flag) +{ + static unsigned int printout = 1; + if (printout) { + printk(KERN_WARNING + "Warning: Processor Platform Limit event detected, but not handled.\n"); + printk(KERN_WARNING + "Consider compiling CPUfreq support into your kernel.\n"); + printout = 0; + } +} +static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) +{ + return -ENODEV; +} + +#endif /* CONFIG_CPU_FREQ */ + +/* in processor_core.c */ +phys_cpuid_t acpi_get_phys_id(acpi_handle, int type, u32 acpi_id); +phys_cpuid_t acpi_map_madt_entry(u32 acpi_id); +int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id); +int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); + +#ifdef CONFIG_ACPI_CPPC_LIB +extern int acpi_cppc_processor_probe(struct acpi_processor *pr); +extern void acpi_cppc_processor_exit(struct acpi_processor *pr); +#else +static inline int acpi_cppc_processor_probe(struct acpi_processor *pr) +{ + return 0; +} +static inline void acpi_cppc_processor_exit(struct acpi_processor *pr) +{ + return; +} +#endif /* CONFIG_ACPI_CPPC_LIB */ + +/* in processor_pdc.c */ +void acpi_processor_set_pdc(acpi_handle handle); + +/* in processor_throttling.c */ +#ifdef CONFIG_ACPI_CPU_FREQ_PSS +int acpi_processor_tstate_has_changed(struct acpi_processor *pr); +int acpi_processor_get_throttling_info(struct acpi_processor *pr); +extern int acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force); +/* + * Reevaluate whether the T-state is invalid after one cpu is + * onlined/offlined. In such case the flags.throttling will be updated. + */ +extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, + bool is_dead); +extern const struct file_operations acpi_processor_throttling_fops; +extern void acpi_processor_throttling_init(void); +#else +static inline int acpi_processor_tstate_has_changed(struct acpi_processor *pr) +{ + return 0; +} + +static inline int acpi_processor_get_throttling_info(struct acpi_processor *pr) +{ + return -ENODEV; +} + +static inline int acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force) +{ + return -ENODEV; +} + +static inline void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, + bool is_dead) {} + +static inline void acpi_processor_throttling_init(void) {} +#endif /* CONFIG_ACPI_CPU_FREQ_PSS */ + +/* in processor_idle.c */ +extern struct cpuidle_driver acpi_idle_driver; +#ifdef CONFIG_ACPI_PROCESSOR_IDLE +int acpi_processor_power_init(struct acpi_processor *pr); +int acpi_processor_power_exit(struct acpi_processor *pr); +int acpi_processor_power_state_has_changed(struct acpi_processor *pr); +int acpi_processor_hotplug(struct acpi_processor *pr); +#else +static inline int acpi_processor_power_init(struct acpi_processor *pr) +{ + return -ENODEV; +} + +static inline int acpi_processor_power_exit(struct acpi_processor *pr) +{ + return -ENODEV; +} + +static inline int acpi_processor_power_state_has_changed(struct acpi_processor *pr) +{ + return -ENODEV; +} + +static inline int acpi_processor_hotplug(struct acpi_processor *pr) +{ + return -ENODEV; +} +#endif /* CONFIG_ACPI_PROCESSOR_IDLE */ + +/* in processor_thermal.c */ +int acpi_processor_get_limit_info(struct acpi_processor *pr); +extern const struct thermal_cooling_device_ops processor_cooling_ops; +#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ) +void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy); +void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy); +#else +static inline void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy) +{ + return; +} +static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) +{ + return; +} +#endif /* CONFIG_ACPI_CPU_FREQ_PSS */ + +#endif diff --git a/include/acpi/reboot.h b/include/acpi/reboot.h new file mode 100644 index 0000000..14122fc --- /dev/null +++ b/include/acpi/reboot.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ACPI_REBOOT_H +#define __ACPI_REBOOT_H + +#ifdef CONFIG_ACPI +extern void acpi_reboot(void); +#else +static inline void acpi_reboot(void) { } +#endif + +#endif + diff --git a/include/acpi/video.h b/include/acpi/video.h new file mode 100644 index 0000000..db8548f --- /dev/null +++ b/include/acpi/video.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ACPI_VIDEO_H +#define __ACPI_VIDEO_H + +#include /* for ENODEV */ +#include /* for bool */ + +struct acpi_video_brightness_flags { + u8 _BCL_no_ac_battery_levels:1; /* no AC/Battery levels in _BCL */ + u8 _BCL_reversed:1; /* _BCL package is in a reversed order */ + u8 _BQC_use_index:1; /* _BQC returns an index value */ +}; + +struct acpi_video_device_brightness { + int curr; + int count; + int *levels; + struct acpi_video_brightness_flags flags; +}; + +struct acpi_device; + +#define ACPI_VIDEO_CLASS "video" + +#define ACPI_VIDEO_DISPLAY_CRT 1 +#define ACPI_VIDEO_DISPLAY_TV 2 +#define ACPI_VIDEO_DISPLAY_DVI 3 +#define ACPI_VIDEO_DISPLAY_LCD 4 + +#define ACPI_VIDEO_DISPLAY_LEGACY_MONITOR 0x0100 +#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110 +#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200 + +#define ACPI_VIDEO_NOTIFY_SWITCH 0x80 +#define ACPI_VIDEO_NOTIFY_PROBE 0x81 +#define ACPI_VIDEO_NOTIFY_CYCLE 0x82 +#define ACPI_VIDEO_NOTIFY_NEXT_OUTPUT 0x83 +#define ACPI_VIDEO_NOTIFY_PREV_OUTPUT 0x84 +#define ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS 0x85 +#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86 +#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87 +#define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS 0x88 +#define ACPI_VIDEO_NOTIFY_DISPLAY_OFF 0x89 + +enum acpi_backlight_type { + acpi_backlight_undef = -1, + acpi_backlight_none = 0, + acpi_backlight_video, + acpi_backlight_vendor, + acpi_backlight_native, +}; + +#if IS_ENABLED(CONFIG_ACPI_VIDEO) +extern int acpi_video_register(void); +extern void acpi_video_unregister(void); +extern int acpi_video_get_edid(struct acpi_device *device, int type, + int device_id, void **edid); +extern enum acpi_backlight_type acpi_video_get_backlight_type(void); +extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type); +/* + * Note: The value returned by acpi_video_handles_brightness_key_presses() + * may change over time and should not be cached. + */ +extern bool acpi_video_handles_brightness_key_presses(void); +extern int acpi_video_get_levels(struct acpi_device *device, + struct acpi_video_device_brightness **dev_br, + int *pmax_level); +#else +static inline int acpi_video_register(void) { return -ENODEV; } +static inline void acpi_video_unregister(void) { return; } +static inline int acpi_video_get_edid(struct acpi_device *device, int type, + int device_id, void **edid) +{ + return -ENODEV; +} +static inline enum acpi_backlight_type acpi_video_get_backlight_type(void) +{ + return acpi_backlight_vendor; +} +static inline void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type) +{ +} +static inline bool acpi_video_handles_brightness_key_presses(void) +{ + return false; +} +static inline int acpi_video_get_levels(struct acpi_device *device, + struct acpi_video_device_brightness **dev_br, + int *pmax_level) +{ + return -ENODEV; +} +#endif + +#endif diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h new file mode 100644 index 0000000..e3667c9 --- /dev/null +++ b/include/asm-generic/4level-fixup.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _4LEVEL_FIXUP_H +#define _4LEVEL_FIXUP_H + +#define __ARCH_HAS_4LEVEL_HACK +#define __PAGETABLE_PUD_FOLDED 1 + +#define PUD_SHIFT PGDIR_SHIFT +#define PUD_SIZE PGDIR_SIZE +#define PUD_MASK PGDIR_MASK +#define PTRS_PER_PUD 1 + +#define pud_t pgd_t + +#define pmd_alloc(mm, pud, address) \ + ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ + NULL: pmd_offset(pud, address)) + +#define pud_offset(pgd, start) (pgd) +#define pud_none(pud) 0 +#define pud_bad(pud) 0 +#define pud_present(pud) 1 +#define pud_ERROR(pud) do { } while (0) +#define pud_clear(pud) pgd_clear(pud) +#define pud_val(pud) pgd_val(pud) +#define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd) +#define pud_page(pud) pgd_page(pud) +#define pud_page_vaddr(pud) pgd_page_vaddr(pud) + +#undef pud_free_tlb +#define pud_free_tlb(tlb, x, addr) do { } while (0) +#define pud_free(mm, x) do { } while (0) +#define __pud_free_tlb(tlb, x, addr) do { } while (0) + +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + +#include + +#endif diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h new file mode 100644 index 0000000..f6947da --- /dev/null +++ b/include/asm-generic/5level-fixup.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _5LEVEL_FIXUP_H +#define _5LEVEL_FIXUP_H + +#define __ARCH_HAS_5LEVEL_HACK +#define __PAGETABLE_P4D_FOLDED 1 + +#define P4D_SHIFT PGDIR_SHIFT +#define P4D_SIZE PGDIR_SIZE +#define P4D_MASK PGDIR_MASK +#define MAX_PTRS_PER_P4D 1 +#define PTRS_PER_P4D 1 + +#define p4d_t pgd_t + +#define pud_alloc(mm, p4d, address) \ + ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \ + NULL : pud_offset(p4d, address)) + +#define p4d_alloc(mm, pgd, address) (pgd) +#define p4d_offset(pgd, start) (pgd) + +#ifndef __ASSEMBLY__ +static inline int p4d_none(p4d_t p4d) +{ + return 0; +} + +static inline int p4d_bad(p4d_t p4d) +{ + return 0; +} + +static inline int p4d_present(p4d_t p4d) +{ + return 1; +} +#endif + +#define p4d_ERROR(p4d) do { } while (0) +#define p4d_clear(p4d) pgd_clear(p4d) +#define p4d_val(p4d) pgd_val(p4d) +#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud) +#define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud) +#define p4d_page(p4d) pgd_page(p4d) +#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d) + +#define __p4d(x) __pgd(x) +#define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d) + +#undef p4d_free_tlb +#define p4d_free_tlb(tlb, x, addr) do { } while (0) +#define p4d_free(mm, x) do { } while (0) +#define __p4d_free_tlb(tlb, x, addr) do { } while (0) + +#undef p4d_addr_end +#define p4d_addr_end(addr, end) (end) + +#endif diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild new file mode 100644 index 0000000..adff14f --- /dev/null +++ b/include/asm-generic/Kbuild @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# asm headers that all architectures except um should have +# (This file is not included when SRCARCH=um since UML borrows several +# asm headers from the host architecutre.) + +mandatory-y += simd.h diff --git a/include/asm-generic/asm-offsets.h b/include/asm-generic/asm-offsets.h new file mode 100644 index 0000000..d370ee3 --- /dev/null +++ b/include/asm-generic/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h new file mode 100644 index 0000000..2fa2bc2 --- /dev/null +++ b/include/asm-generic/asm-prototypes.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#undef __memset +extern void *__memset(void *, int, __kernel_size_t); +#undef __memcpy +extern void *__memcpy(void *, const void *, __kernel_size_t); +#undef __memmove +extern void *__memmove(void *, const void *, __kernel_size_t); +#undef memset +extern void *memset(void *, int, __kernel_size_t); +#undef memcpy +extern void *memcpy(void *, const void *, __kernel_size_t); +#undef memmove +extern void *memmove(void *, const void *, __kernel_size_t); diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h new file mode 100644 index 0000000..e8730c6 --- /dev/null +++ b/include/asm-generic/atomic-instrumented.h @@ -0,0 +1,1788 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-instrumented.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +/* + * This file provides wrappers with KASAN instrumentation for atomic operations. + * To use this functionality an arch's atomic.h file needs to define all + * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include + * this file at the end. This file provides atomic_read() that forwards to + * arch_atomic_read() for actual atomic operation. + * Note: if an arch atomic operation is implemented by means of other atomic + * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use + * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid + * double instrumentation. + */ +#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H +#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H + +#include +#include + +static inline int +atomic_read(const atomic_t *v) +{ + kasan_check_read(v, sizeof(*v)); + return arch_atomic_read(v); +} +#define atomic_read atomic_read + +#if defined(arch_atomic_read_acquire) +static inline int +atomic_read_acquire(const atomic_t *v) +{ + kasan_check_read(v, sizeof(*v)); + return arch_atomic_read_acquire(v); +} +#define atomic_read_acquire atomic_read_acquire +#endif + +static inline void +atomic_set(atomic_t *v, int i) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_set(v, i); +} +#define atomic_set atomic_set + +#if defined(arch_atomic_set_release) +static inline void +atomic_set_release(atomic_t *v, int i) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_set_release(v, i); +} +#define atomic_set_release atomic_set_release +#endif + +static inline void +atomic_add(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_add(i, v); +} +#define atomic_add atomic_add + +#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return) +static inline int +atomic_add_return(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_add_return(i, v); +} +#define atomic_add_return atomic_add_return +#endif + +#if defined(arch_atomic_add_return_acquire) +static inline int +atomic_add_return_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_add_return_acquire(i, v); +} +#define atomic_add_return_acquire atomic_add_return_acquire +#endif + +#if defined(arch_atomic_add_return_release) +static inline int +atomic_add_return_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_add_return_release(i, v); +} +#define atomic_add_return_release atomic_add_return_release +#endif + +#if defined(arch_atomic_add_return_relaxed) +static inline int +atomic_add_return_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_add_return_relaxed(i, v); +} +#define atomic_add_return_relaxed atomic_add_return_relaxed +#endif + +#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add) +static inline int +atomic_fetch_add(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_add(i, v); +} +#define atomic_fetch_add atomic_fetch_add +#endif + +#if defined(arch_atomic_fetch_add_acquire) +static inline int +atomic_fetch_add_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_add_acquire(i, v); +} +#define atomic_fetch_add_acquire atomic_fetch_add_acquire +#endif + +#if defined(arch_atomic_fetch_add_release) +static inline int +atomic_fetch_add_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_add_release(i, v); +} +#define atomic_fetch_add_release atomic_fetch_add_release +#endif + +#if defined(arch_atomic_fetch_add_relaxed) +static inline int +atomic_fetch_add_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_add_relaxed(i, v); +} +#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed +#endif + +static inline void +atomic_sub(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_sub(i, v); +} +#define atomic_sub atomic_sub + +#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return) +static inline int +atomic_sub_return(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_sub_return(i, v); +} +#define atomic_sub_return atomic_sub_return +#endif + +#if defined(arch_atomic_sub_return_acquire) +static inline int +atomic_sub_return_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_sub_return_acquire(i, v); +} +#define atomic_sub_return_acquire atomic_sub_return_acquire +#endif + +#if defined(arch_atomic_sub_return_release) +static inline int +atomic_sub_return_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_sub_return_release(i, v); +} +#define atomic_sub_return_release atomic_sub_return_release +#endif + +#if defined(arch_atomic_sub_return_relaxed) +static inline int +atomic_sub_return_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_sub_return_relaxed(i, v); +} +#define atomic_sub_return_relaxed atomic_sub_return_relaxed +#endif + +#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub) +static inline int +atomic_fetch_sub(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_sub(i, v); +} +#define atomic_fetch_sub atomic_fetch_sub +#endif + +#if defined(arch_atomic_fetch_sub_acquire) +static inline int +atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_acquire(i, v); +} +#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire +#endif + +#if defined(arch_atomic_fetch_sub_release) +static inline int +atomic_fetch_sub_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_release(i, v); +} +#define atomic_fetch_sub_release atomic_fetch_sub_release +#endif + +#if defined(arch_atomic_fetch_sub_relaxed) +static inline int +atomic_fetch_sub_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_relaxed(i, v); +} +#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed +#endif + +#if defined(arch_atomic_inc) +static inline void +atomic_inc(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_inc(v); +} +#define atomic_inc atomic_inc +#endif + +#if defined(arch_atomic_inc_return) +static inline int +atomic_inc_return(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_return(v); +} +#define atomic_inc_return atomic_inc_return +#endif + +#if defined(arch_atomic_inc_return_acquire) +static inline int +atomic_inc_return_acquire(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_return_acquire(v); +} +#define atomic_inc_return_acquire atomic_inc_return_acquire +#endif + +#if defined(arch_atomic_inc_return_release) +static inline int +atomic_inc_return_release(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_return_release(v); +} +#define atomic_inc_return_release atomic_inc_return_release +#endif + +#if defined(arch_atomic_inc_return_relaxed) +static inline int +atomic_inc_return_relaxed(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_return_relaxed(v); +} +#define atomic_inc_return_relaxed atomic_inc_return_relaxed +#endif + +#if defined(arch_atomic_fetch_inc) +static inline int +atomic_fetch_inc(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_inc(v); +} +#define atomic_fetch_inc atomic_fetch_inc +#endif + +#if defined(arch_atomic_fetch_inc_acquire) +static inline int +atomic_fetch_inc_acquire(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_acquire(v); +} +#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire +#endif + +#if defined(arch_atomic_fetch_inc_release) +static inline int +atomic_fetch_inc_release(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_release(v); +} +#define atomic_fetch_inc_release atomic_fetch_inc_release +#endif + +#if defined(arch_atomic_fetch_inc_relaxed) +static inline int +atomic_fetch_inc_relaxed(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_relaxed(v); +} +#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed +#endif + +#if defined(arch_atomic_dec) +static inline void +atomic_dec(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_dec(v); +} +#define atomic_dec atomic_dec +#endif + +#if defined(arch_atomic_dec_return) +static inline int +atomic_dec_return(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_dec_return(v); +} +#define atomic_dec_return atomic_dec_return +#endif + +#if defined(arch_atomic_dec_return_acquire) +static inline int +atomic_dec_return_acquire(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_dec_return_acquire(v); +} +#define atomic_dec_return_acquire atomic_dec_return_acquire +#endif + +#if defined(arch_atomic_dec_return_release) +static inline int +atomic_dec_return_release(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_dec_return_release(v); +} +#define atomic_dec_return_release atomic_dec_return_release +#endif + +#if defined(arch_atomic_dec_return_relaxed) +static inline int +atomic_dec_return_relaxed(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_dec_return_relaxed(v); +} +#define atomic_dec_return_relaxed atomic_dec_return_relaxed +#endif + +#if defined(arch_atomic_fetch_dec) +static inline int +atomic_fetch_dec(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_dec(v); +} +#define atomic_fetch_dec atomic_fetch_dec +#endif + +#if defined(arch_atomic_fetch_dec_acquire) +static inline int +atomic_fetch_dec_acquire(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_acquire(v); +} +#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire +#endif + +#if defined(arch_atomic_fetch_dec_release) +static inline int +atomic_fetch_dec_release(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_release(v); +} +#define atomic_fetch_dec_release atomic_fetch_dec_release +#endif + +#if defined(arch_atomic_fetch_dec_relaxed) +static inline int +atomic_fetch_dec_relaxed(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_relaxed(v); +} +#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed +#endif + +static inline void +atomic_and(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_and(i, v); +} +#define atomic_and atomic_and + +#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and) +static inline int +atomic_fetch_and(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_and(i, v); +} +#define atomic_fetch_and atomic_fetch_and +#endif + +#if defined(arch_atomic_fetch_and_acquire) +static inline int +atomic_fetch_and_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_and_acquire(i, v); +} +#define atomic_fetch_and_acquire atomic_fetch_and_acquire +#endif + +#if defined(arch_atomic_fetch_and_release) +static inline int +atomic_fetch_and_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_and_release(i, v); +} +#define atomic_fetch_and_release atomic_fetch_and_release +#endif + +#if defined(arch_atomic_fetch_and_relaxed) +static inline int +atomic_fetch_and_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_and_relaxed(i, v); +} +#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed +#endif + +#if defined(arch_atomic_andnot) +static inline void +atomic_andnot(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_andnot(i, v); +} +#define atomic_andnot atomic_andnot +#endif + +#if defined(arch_atomic_fetch_andnot) +static inline int +atomic_fetch_andnot(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot(i, v); +} +#define atomic_fetch_andnot atomic_fetch_andnot +#endif + +#if defined(arch_atomic_fetch_andnot_acquire) +static inline int +atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_acquire(i, v); +} +#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire +#endif + +#if defined(arch_atomic_fetch_andnot_release) +static inline int +atomic_fetch_andnot_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_release(i, v); +} +#define atomic_fetch_andnot_release atomic_fetch_andnot_release +#endif + +#if defined(arch_atomic_fetch_andnot_relaxed) +static inline int +atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_relaxed(i, v); +} +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed +#endif + +static inline void +atomic_or(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_or(i, v); +} +#define atomic_or atomic_or + +#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or) +static inline int +atomic_fetch_or(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_or(i, v); +} +#define atomic_fetch_or atomic_fetch_or +#endif + +#if defined(arch_atomic_fetch_or_acquire) +static inline int +atomic_fetch_or_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_or_acquire(i, v); +} +#define atomic_fetch_or_acquire atomic_fetch_or_acquire +#endif + +#if defined(arch_atomic_fetch_or_release) +static inline int +atomic_fetch_or_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_or_release(i, v); +} +#define atomic_fetch_or_release atomic_fetch_or_release +#endif + +#if defined(arch_atomic_fetch_or_relaxed) +static inline int +atomic_fetch_or_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_or_relaxed(i, v); +} +#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed +#endif + +static inline void +atomic_xor(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_xor(i, v); +} +#define atomic_xor atomic_xor + +#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor) +static inline int +atomic_fetch_xor(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_xor(i, v); +} +#define atomic_fetch_xor atomic_fetch_xor +#endif + +#if defined(arch_atomic_fetch_xor_acquire) +static inline int +atomic_fetch_xor_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_xor_acquire(i, v); +} +#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire +#endif + +#if defined(arch_atomic_fetch_xor_release) +static inline int +atomic_fetch_xor_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_xor_release(i, v); +} +#define atomic_fetch_xor_release atomic_fetch_xor_release +#endif + +#if defined(arch_atomic_fetch_xor_relaxed) +static inline int +atomic_fetch_xor_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_xor_relaxed(i, v); +} +#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed +#endif + +#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg) +static inline int +atomic_xchg(atomic_t *v, int i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_xchg(v, i); +} +#define atomic_xchg atomic_xchg +#endif + +#if defined(arch_atomic_xchg_acquire) +static inline int +atomic_xchg_acquire(atomic_t *v, int i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_xchg_acquire(v, i); +} +#define atomic_xchg_acquire atomic_xchg_acquire +#endif + +#if defined(arch_atomic_xchg_release) +static inline int +atomic_xchg_release(atomic_t *v, int i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_xchg_release(v, i); +} +#define atomic_xchg_release atomic_xchg_release +#endif + +#if defined(arch_atomic_xchg_relaxed) +static inline int +atomic_xchg_relaxed(atomic_t *v, int i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_xchg_relaxed(v, i); +} +#define atomic_xchg_relaxed atomic_xchg_relaxed +#endif + +#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg) +static inline int +atomic_cmpxchg(atomic_t *v, int old, int new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_cmpxchg(v, old, new); +} +#define atomic_cmpxchg atomic_cmpxchg +#endif + +#if defined(arch_atomic_cmpxchg_acquire) +static inline int +atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_acquire(v, old, new); +} +#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire +#endif + +#if defined(arch_atomic_cmpxchg_release) +static inline int +atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_release(v, old, new); +} +#define atomic_cmpxchg_release atomic_cmpxchg_release +#endif + +#if defined(arch_atomic_cmpxchg_relaxed) +static inline int +atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_relaxed(v, old, new); +} +#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed +#endif + +#if defined(arch_atomic_try_cmpxchg) +static inline bool +atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg(v, old, new); +} +#define atomic_try_cmpxchg atomic_try_cmpxchg +#endif + +#if defined(arch_atomic_try_cmpxchg_acquire) +static inline bool +atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_acquire(v, old, new); +} +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +#endif + +#if defined(arch_atomic_try_cmpxchg_release) +static inline bool +atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_release(v, old, new); +} +#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release +#endif + +#if defined(arch_atomic_try_cmpxchg_relaxed) +static inline bool +atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_relaxed(v, old, new); +} +#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed +#endif + +#if defined(arch_atomic_sub_and_test) +static inline bool +atomic_sub_and_test(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_sub_and_test(i, v); +} +#define atomic_sub_and_test atomic_sub_and_test +#endif + +#if defined(arch_atomic_dec_and_test) +static inline bool +atomic_dec_and_test(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_dec_and_test(v); +} +#define atomic_dec_and_test atomic_dec_and_test +#endif + +#if defined(arch_atomic_inc_and_test) +static inline bool +atomic_inc_and_test(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_and_test(v); +} +#define atomic_inc_and_test atomic_inc_and_test +#endif + +#if defined(arch_atomic_add_negative) +static inline bool +atomic_add_negative(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_add_negative(i, v); +} +#define atomic_add_negative atomic_add_negative +#endif + +#if defined(arch_atomic_fetch_add_unless) +static inline int +atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_add_unless(v, a, u); +} +#define atomic_fetch_add_unless atomic_fetch_add_unless +#endif + +#if defined(arch_atomic_add_unless) +static inline bool +atomic_add_unless(atomic_t *v, int a, int u) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_add_unless(v, a, u); +} +#define atomic_add_unless atomic_add_unless +#endif + +#if defined(arch_atomic_inc_not_zero) +static inline bool +atomic_inc_not_zero(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_not_zero(v); +} +#define atomic_inc_not_zero atomic_inc_not_zero +#endif + +#if defined(arch_atomic_inc_unless_negative) +static inline bool +atomic_inc_unless_negative(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_unless_negative(v); +} +#define atomic_inc_unless_negative atomic_inc_unless_negative +#endif + +#if defined(arch_atomic_dec_unless_positive) +static inline bool +atomic_dec_unless_positive(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_dec_unless_positive(v); +} +#define atomic_dec_unless_positive atomic_dec_unless_positive +#endif + +#if defined(arch_atomic_dec_if_positive) +static inline int +atomic_dec_if_positive(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_dec_if_positive(v); +} +#define atomic_dec_if_positive atomic_dec_if_positive +#endif + +static inline s64 +atomic64_read(const atomic64_t *v) +{ + kasan_check_read(v, sizeof(*v)); + return arch_atomic64_read(v); +} +#define atomic64_read atomic64_read + +#if defined(arch_atomic64_read_acquire) +static inline s64 +atomic64_read_acquire(const atomic64_t *v) +{ + kasan_check_read(v, sizeof(*v)); + return arch_atomic64_read_acquire(v); +} +#define atomic64_read_acquire atomic64_read_acquire +#endif + +static inline void +atomic64_set(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_set(v, i); +} +#define atomic64_set atomic64_set + +#if defined(arch_atomic64_set_release) +static inline void +atomic64_set_release(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_set_release(v, i); +} +#define atomic64_set_release atomic64_set_release +#endif + +static inline void +atomic64_add(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_add(i, v); +} +#define atomic64_add atomic64_add + +#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return) +static inline s64 +atomic64_add_return(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_add_return(i, v); +} +#define atomic64_add_return atomic64_add_return +#endif + +#if defined(arch_atomic64_add_return_acquire) +static inline s64 +atomic64_add_return_acquire(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_add_return_acquire(i, v); +} +#define atomic64_add_return_acquire atomic64_add_return_acquire +#endif + +#if defined(arch_atomic64_add_return_release) +static inline s64 +atomic64_add_return_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_add_return_release(i, v); +} +#define atomic64_add_return_release atomic64_add_return_release +#endif + +#if defined(arch_atomic64_add_return_relaxed) +static inline s64 +atomic64_add_return_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_add_return_relaxed(i, v); +} +#define atomic64_add_return_relaxed atomic64_add_return_relaxed +#endif + +#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add) +static inline s64 +atomic64_fetch_add(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_add(i, v); +} +#define atomic64_fetch_add atomic64_fetch_add +#endif + +#if defined(arch_atomic64_fetch_add_acquire) +static inline s64 +atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_acquire(i, v); +} +#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire +#endif + +#if defined(arch_atomic64_fetch_add_release) +static inline s64 +atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_release(i, v); +} +#define atomic64_fetch_add_release atomic64_fetch_add_release +#endif + +#if defined(arch_atomic64_fetch_add_relaxed) +static inline s64 +atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_relaxed(i, v); +} +#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed +#endif + +static inline void +atomic64_sub(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_sub(i, v); +} +#define atomic64_sub atomic64_sub + +#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return) +static inline s64 +atomic64_sub_return(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_sub_return(i, v); +} +#define atomic64_sub_return atomic64_sub_return +#endif + +#if defined(arch_atomic64_sub_return_acquire) +static inline s64 +atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_sub_return_acquire(i, v); +} +#define atomic64_sub_return_acquire atomic64_sub_return_acquire +#endif + +#if defined(arch_atomic64_sub_return_release) +static inline s64 +atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_sub_return_release(i, v); +} +#define atomic64_sub_return_release atomic64_sub_return_release +#endif + +#if defined(arch_atomic64_sub_return_relaxed) +static inline s64 +atomic64_sub_return_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_sub_return_relaxed(i, v); +} +#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed +#endif + +#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub) +static inline s64 +atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub(i, v); +} +#define atomic64_fetch_sub atomic64_fetch_sub +#endif + +#if defined(arch_atomic64_fetch_sub_acquire) +static inline s64 +atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_acquire(i, v); +} +#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire +#endif + +#if defined(arch_atomic64_fetch_sub_release) +static inline s64 +atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_release(i, v); +} +#define atomic64_fetch_sub_release atomic64_fetch_sub_release +#endif + +#if defined(arch_atomic64_fetch_sub_relaxed) +static inline s64 +atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_relaxed(i, v); +} +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed +#endif + +#if defined(arch_atomic64_inc) +static inline void +atomic64_inc(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_inc(v); +} +#define atomic64_inc atomic64_inc +#endif + +#if defined(arch_atomic64_inc_return) +static inline s64 +atomic64_inc_return(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_return(v); +} +#define atomic64_inc_return atomic64_inc_return +#endif + +#if defined(arch_atomic64_inc_return_acquire) +static inline s64 +atomic64_inc_return_acquire(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_return_acquire(v); +} +#define atomic64_inc_return_acquire atomic64_inc_return_acquire +#endif + +#if defined(arch_atomic64_inc_return_release) +static inline s64 +atomic64_inc_return_release(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_return_release(v); +} +#define atomic64_inc_return_release atomic64_inc_return_release +#endif + +#if defined(arch_atomic64_inc_return_relaxed) +static inline s64 +atomic64_inc_return_relaxed(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_return_relaxed(v); +} +#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed +#endif + +#if defined(arch_atomic64_fetch_inc) +static inline s64 +atomic64_fetch_inc(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc(v); +} +#define atomic64_fetch_inc atomic64_fetch_inc +#endif + +#if defined(arch_atomic64_fetch_inc_acquire) +static inline s64 +atomic64_fetch_inc_acquire(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_acquire(v); +} +#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire +#endif + +#if defined(arch_atomic64_fetch_inc_release) +static inline s64 +atomic64_fetch_inc_release(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_release(v); +} +#define atomic64_fetch_inc_release atomic64_fetch_inc_release +#endif + +#if defined(arch_atomic64_fetch_inc_relaxed) +static inline s64 +atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_relaxed(v); +} +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed +#endif + +#if defined(arch_atomic64_dec) +static inline void +atomic64_dec(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_dec(v); +} +#define atomic64_dec atomic64_dec +#endif + +#if defined(arch_atomic64_dec_return) +static inline s64 +atomic64_dec_return(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_return(v); +} +#define atomic64_dec_return atomic64_dec_return +#endif + +#if defined(arch_atomic64_dec_return_acquire) +static inline s64 +atomic64_dec_return_acquire(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_return_acquire(v); +} +#define atomic64_dec_return_acquire atomic64_dec_return_acquire +#endif + +#if defined(arch_atomic64_dec_return_release) +static inline s64 +atomic64_dec_return_release(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_return_release(v); +} +#define atomic64_dec_return_release atomic64_dec_return_release +#endif + +#if defined(arch_atomic64_dec_return_relaxed) +static inline s64 +atomic64_dec_return_relaxed(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_return_relaxed(v); +} +#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed +#endif + +#if defined(arch_atomic64_fetch_dec) +static inline s64 +atomic64_fetch_dec(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec(v); +} +#define atomic64_fetch_dec atomic64_fetch_dec +#endif + +#if defined(arch_atomic64_fetch_dec_acquire) +static inline s64 +atomic64_fetch_dec_acquire(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_acquire(v); +} +#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire +#endif + +#if defined(arch_atomic64_fetch_dec_release) +static inline s64 +atomic64_fetch_dec_release(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_release(v); +} +#define atomic64_fetch_dec_release atomic64_fetch_dec_release +#endif + +#if defined(arch_atomic64_fetch_dec_relaxed) +static inline s64 +atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_relaxed(v); +} +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed +#endif + +static inline void +atomic64_and(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_and(i, v); +} +#define atomic64_and atomic64_and + +#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and) +static inline s64 +atomic64_fetch_and(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_and(i, v); +} +#define atomic64_fetch_and atomic64_fetch_and +#endif + +#if defined(arch_atomic64_fetch_and_acquire) +static inline s64 +atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_acquire(i, v); +} +#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire +#endif + +#if defined(arch_atomic64_fetch_and_release) +static inline s64 +atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_release(i, v); +} +#define atomic64_fetch_and_release atomic64_fetch_and_release +#endif + +#if defined(arch_atomic64_fetch_and_relaxed) +static inline s64 +atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_relaxed(i, v); +} +#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed +#endif + +#if defined(arch_atomic64_andnot) +static inline void +atomic64_andnot(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_andnot(i, v); +} +#define atomic64_andnot atomic64_andnot +#endif + +#if defined(arch_atomic64_fetch_andnot) +static inline s64 +atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot(i, v); +} +#define atomic64_fetch_andnot atomic64_fetch_andnot +#endif + +#if defined(arch_atomic64_fetch_andnot_acquire) +static inline s64 +atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_acquire(i, v); +} +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire +#endif + +#if defined(arch_atomic64_fetch_andnot_release) +static inline s64 +atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_release(i, v); +} +#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release +#endif + +#if defined(arch_atomic64_fetch_andnot_relaxed) +static inline s64 +atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_relaxed(i, v); +} +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed +#endif + +static inline void +atomic64_or(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_or(i, v); +} +#define atomic64_or atomic64_or + +#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or) +static inline s64 +atomic64_fetch_or(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_or(i, v); +} +#define atomic64_fetch_or atomic64_fetch_or +#endif + +#if defined(arch_atomic64_fetch_or_acquire) +static inline s64 +atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_acquire(i, v); +} +#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire +#endif + +#if defined(arch_atomic64_fetch_or_release) +static inline s64 +atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_release(i, v); +} +#define atomic64_fetch_or_release atomic64_fetch_or_release +#endif + +#if defined(arch_atomic64_fetch_or_relaxed) +static inline s64 +atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_relaxed(i, v); +} +#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed +#endif + +static inline void +atomic64_xor(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_xor(i, v); +} +#define atomic64_xor atomic64_xor + +#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor) +static inline s64 +atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor(i, v); +} +#define atomic64_fetch_xor atomic64_fetch_xor +#endif + +#if defined(arch_atomic64_fetch_xor_acquire) +static inline s64 +atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_acquire(i, v); +} +#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire +#endif + +#if defined(arch_atomic64_fetch_xor_release) +static inline s64 +atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_release(i, v); +} +#define atomic64_fetch_xor_release atomic64_fetch_xor_release +#endif + +#if defined(arch_atomic64_fetch_xor_relaxed) +static inline s64 +atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_relaxed(i, v); +} +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed +#endif + +#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg) +static inline s64 +atomic64_xchg(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_xchg(v, i); +} +#define atomic64_xchg atomic64_xchg +#endif + +#if defined(arch_atomic64_xchg_acquire) +static inline s64 +atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_xchg_acquire(v, i); +} +#define atomic64_xchg_acquire atomic64_xchg_acquire +#endif + +#if defined(arch_atomic64_xchg_release) +static inline s64 +atomic64_xchg_release(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_xchg_release(v, i); +} +#define atomic64_xchg_release atomic64_xchg_release +#endif + +#if defined(arch_atomic64_xchg_relaxed) +static inline s64 +atomic64_xchg_relaxed(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_xchg_relaxed(v, i); +} +#define atomic64_xchg_relaxed atomic64_xchg_relaxed +#endif + +#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg) +static inline s64 +atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg(v, old, new); +} +#define atomic64_cmpxchg atomic64_cmpxchg +#endif + +#if defined(arch_atomic64_cmpxchg_acquire) +static inline s64 +atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_acquire(v, old, new); +} +#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire +#endif + +#if defined(arch_atomic64_cmpxchg_release) +static inline s64 +atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_release(v, old, new); +} +#define atomic64_cmpxchg_release atomic64_cmpxchg_release +#endif + +#if defined(arch_atomic64_cmpxchg_relaxed) +static inline s64 +atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed +#endif + +#if defined(arch_atomic64_try_cmpxchg) +static inline bool +atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg(v, old, new); +} +#define atomic64_try_cmpxchg atomic64_try_cmpxchg +#endif + +#if defined(arch_atomic64_try_cmpxchg_acquire) +static inline bool +atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_acquire(v, old, new); +} +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +#endif + +#if defined(arch_atomic64_try_cmpxchg_release) +static inline bool +atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_release(v, old, new); +} +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +#endif + +#if defined(arch_atomic64_try_cmpxchg_relaxed) +static inline bool +atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); +} +#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed +#endif + +#if defined(arch_atomic64_sub_and_test) +static inline bool +atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_sub_and_test(i, v); +} +#define atomic64_sub_and_test atomic64_sub_and_test +#endif + +#if defined(arch_atomic64_dec_and_test) +static inline bool +atomic64_dec_and_test(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_and_test(v); +} +#define atomic64_dec_and_test atomic64_dec_and_test +#endif + +#if defined(arch_atomic64_inc_and_test) +static inline bool +atomic64_inc_and_test(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_and_test(v); +} +#define atomic64_inc_and_test atomic64_inc_and_test +#endif + +#if defined(arch_atomic64_add_negative) +static inline bool +atomic64_add_negative(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_add_negative(i, v); +} +#define atomic64_add_negative atomic64_add_negative +#endif + +#if defined(arch_atomic64_fetch_add_unless) +static inline s64 +atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_unless(v, a, u); +} +#define atomic64_fetch_add_unless atomic64_fetch_add_unless +#endif + +#if defined(arch_atomic64_add_unless) +static inline bool +atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_add_unless(v, a, u); +} +#define atomic64_add_unless atomic64_add_unless +#endif + +#if defined(arch_atomic64_inc_not_zero) +static inline bool +atomic64_inc_not_zero(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_not_zero(v); +} +#define atomic64_inc_not_zero atomic64_inc_not_zero +#endif + +#if defined(arch_atomic64_inc_unless_negative) +static inline bool +atomic64_inc_unless_negative(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_unless_negative(v); +} +#define atomic64_inc_unless_negative atomic64_inc_unless_negative +#endif + +#if defined(arch_atomic64_dec_unless_positive) +static inline bool +atomic64_dec_unless_positive(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_unless_positive(v); +} +#define atomic64_dec_unless_positive atomic64_dec_unless_positive +#endif + +#if defined(arch_atomic64_dec_if_positive) +static inline s64 +atomic64_dec_if_positive(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_if_positive(v); +} +#define atomic64_dec_if_positive atomic64_dec_if_positive +#endif + +#if !defined(arch_xchg_relaxed) || defined(arch_xchg) +#define xchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_xchg_acquire) +#define xchg_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_xchg_release) +#define xchg_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_release(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_xchg_relaxed) +#define xchg_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg) +#define cmpxchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_cmpxchg_acquire) +#define cmpxchg_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_cmpxchg_release) +#define cmpxchg_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_cmpxchg_relaxed) +#define cmpxchg_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64) +#define cmpxchg64(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_cmpxchg64_acquire) +#define cmpxchg64_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_cmpxchg64_release) +#define cmpxchg64_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_cmpxchg64_relaxed) +#define cmpxchg64_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#define cmpxchg_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ +}) + +#define sync_cmpxchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_double(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ +}) + + +#define cmpxchg_double_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ +}) + +#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ +// b29b625d5de9280f680e42c7be859b55b15e5f6a diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h new file mode 100644 index 0000000..881c7e2 --- /dev/null +++ b/include/asm-generic/atomic-long.h @@ -0,0 +1,1013 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-long.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _ASM_GENERIC_ATOMIC_LONG_H +#define _ASM_GENERIC_ATOMIC_LONG_H + +#include + +#ifdef CONFIG_64BIT +typedef atomic64_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) +#define atomic_long_cond_read_acquire atomic64_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed +#else +typedef atomic_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +#define atomic_long_cond_read_acquire atomic_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed +#endif + +#ifdef CONFIG_64BIT + +static inline long +atomic_long_read(const atomic_long_t *v) +{ + return atomic64_read(v); +} + +static inline long +atomic_long_read_acquire(const atomic_long_t *v) +{ + return atomic64_read_acquire(v); +} + +static inline void +atomic_long_set(atomic_long_t *v, long i) +{ + atomic64_set(v, i); +} + +static inline void +atomic_long_set_release(atomic_long_t *v, long i) +{ + atomic64_set_release(v, i); +} + +static inline void +atomic_long_add(long i, atomic_long_t *v) +{ + atomic64_add(i, v); +} + +static inline long +atomic_long_add_return(long i, atomic_long_t *v) +{ + return atomic64_add_return(i, v); +} + +static inline long +atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return atomic64_add_return_acquire(i, v); +} + +static inline long +atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return atomic64_add_return_release(i, v); +} + +static inline long +atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return atomic64_add_return_relaxed(i, v); +} + +static inline long +atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return atomic64_fetch_add(i, v); +} + +static inline long +atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_add_acquire(i, v); +} + +static inline long +atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_add_release(i, v); +} + +static inline long +atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_add_relaxed(i, v); +} + +static inline void +atomic_long_sub(long i, atomic_long_t *v) +{ + atomic64_sub(i, v); +} + +static inline long +atomic_long_sub_return(long i, atomic_long_t *v) +{ + return atomic64_sub_return(i, v); +} + +static inline long +atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return atomic64_sub_return_acquire(i, v); +} + +static inline long +atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return atomic64_sub_return_release(i, v); +} + +static inline long +atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return atomic64_sub_return_relaxed(i, v); +} + +static inline long +atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub(i, v); +} + +static inline long +atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub_acquire(i, v); +} + +static inline long +atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub_release(i, v); +} + +static inline long +atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub_relaxed(i, v); +} + +static inline void +atomic_long_inc(atomic_long_t *v) +{ + atomic64_inc(v); +} + +static inline long +atomic_long_inc_return(atomic_long_t *v) +{ + return atomic64_inc_return(v); +} + +static inline long +atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return atomic64_inc_return_acquire(v); +} + +static inline long +atomic_long_inc_return_release(atomic_long_t *v) +{ + return atomic64_inc_return_release(v); +} + +static inline long +atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return atomic64_inc_return_relaxed(v); +} + +static inline long +atomic_long_fetch_inc(atomic_long_t *v) +{ + return atomic64_fetch_inc(v); +} + +static inline long +atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return atomic64_fetch_inc_acquire(v); +} + +static inline long +atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return atomic64_fetch_inc_release(v); +} + +static inline long +atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return atomic64_fetch_inc_relaxed(v); +} + +static inline void +atomic_long_dec(atomic_long_t *v) +{ + atomic64_dec(v); +} + +static inline long +atomic_long_dec_return(atomic_long_t *v) +{ + return atomic64_dec_return(v); +} + +static inline long +atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return atomic64_dec_return_acquire(v); +} + +static inline long +atomic_long_dec_return_release(atomic_long_t *v) +{ + return atomic64_dec_return_release(v); +} + +static inline long +atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return atomic64_dec_return_relaxed(v); +} + +static inline long +atomic_long_fetch_dec(atomic_long_t *v) +{ + return atomic64_fetch_dec(v); +} + +static inline long +atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return atomic64_fetch_dec_acquire(v); +} + +static inline long +atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return atomic64_fetch_dec_release(v); +} + +static inline long +atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return atomic64_fetch_dec_relaxed(v); +} + +static inline void +atomic_long_and(long i, atomic_long_t *v) +{ + atomic64_and(i, v); +} + +static inline long +atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return atomic64_fetch_and(i, v); +} + +static inline long +atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_and_acquire(i, v); +} + +static inline long +atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_and_release(i, v); +} + +static inline long +atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_and_relaxed(i, v); +} + +static inline void +atomic_long_andnot(long i, atomic_long_t *v) +{ + atomic64_andnot(i, v); +} + +static inline long +atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot(i, v); +} + +static inline long +atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot_acquire(i, v); +} + +static inline long +atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot_release(i, v); +} + +static inline long +atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot_relaxed(i, v); +} + +static inline void +atomic_long_or(long i, atomic_long_t *v) +{ + atomic64_or(i, v); +} + +static inline long +atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return atomic64_fetch_or(i, v); +} + +static inline long +atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_or_acquire(i, v); +} + +static inline long +atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_or_release(i, v); +} + +static inline long +atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_or_relaxed(i, v); +} + +static inline void +atomic_long_xor(long i, atomic_long_t *v) +{ + atomic64_xor(i, v); +} + +static inline long +atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor(i, v); +} + +static inline long +atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor_acquire(i, v); +} + +static inline long +atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor_release(i, v); +} + +static inline long +atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor_relaxed(i, v); +} + +static inline long +atomic_long_xchg(atomic_long_t *v, long i) +{ + return atomic64_xchg(v, i); +} + +static inline long +atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return atomic64_xchg_acquire(v, i); +} + +static inline long +atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return atomic64_xchg_release(v, i); +} + +static inline long +atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return atomic64_xchg_relaxed(v, i); +} + +static inline long +atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg(v, old, new); +} + +static inline long +atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg_acquire(v, old, new); +} + +static inline long +atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg_release(v, old, new); +} + +static inline long +atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg_relaxed(v, old, new); +} + +static inline bool +atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg(v, (s64 *)old, new); +} + +static inline bool +atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); +} + +static inline bool +atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg_release(v, (s64 *)old, new); +} + +static inline bool +atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); +} + +static inline bool +atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return atomic64_sub_and_test(i, v); +} + +static inline bool +atomic_long_dec_and_test(atomic_long_t *v) +{ + return atomic64_dec_and_test(v); +} + +static inline bool +atomic_long_inc_and_test(atomic_long_t *v) +{ + return atomic64_inc_and_test(v); +} + +static inline bool +atomic_long_add_negative(long i, atomic_long_t *v) +{ + return atomic64_add_negative(i, v); +} + +static inline long +atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic64_fetch_add_unless(v, a, u); +} + +static inline bool +atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic64_add_unless(v, a, u); +} + +static inline bool +atomic_long_inc_not_zero(atomic_long_t *v) +{ + return atomic64_inc_not_zero(v); +} + +static inline bool +atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return atomic64_inc_unless_negative(v); +} + +static inline bool +atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return atomic64_dec_unless_positive(v); +} + +static inline long +atomic_long_dec_if_positive(atomic_long_t *v) +{ + return atomic64_dec_if_positive(v); +} + +#else /* CONFIG_64BIT */ + +static inline long +atomic_long_read(const atomic_long_t *v) +{ + return atomic_read(v); +} + +static inline long +atomic_long_read_acquire(const atomic_long_t *v) +{ + return atomic_read_acquire(v); +} + +static inline void +atomic_long_set(atomic_long_t *v, long i) +{ + atomic_set(v, i); +} + +static inline void +atomic_long_set_release(atomic_long_t *v, long i) +{ + atomic_set_release(v, i); +} + +static inline void +atomic_long_add(long i, atomic_long_t *v) +{ + atomic_add(i, v); +} + +static inline long +atomic_long_add_return(long i, atomic_long_t *v) +{ + return atomic_add_return(i, v); +} + +static inline long +atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return atomic_add_return_acquire(i, v); +} + +static inline long +atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return atomic_add_return_release(i, v); +} + +static inline long +atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return atomic_add_return_relaxed(i, v); +} + +static inline long +atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return atomic_fetch_add(i, v); +} + +static inline long +atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_add_acquire(i, v); +} + +static inline long +atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return atomic_fetch_add_release(i, v); +} + +static inline long +atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_add_relaxed(i, v); +} + +static inline void +atomic_long_sub(long i, atomic_long_t *v) +{ + atomic_sub(i, v); +} + +static inline long +atomic_long_sub_return(long i, atomic_long_t *v) +{ + return atomic_sub_return(i, v); +} + +static inline long +atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return atomic_sub_return_acquire(i, v); +} + +static inline long +atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return atomic_sub_return_release(i, v); +} + +static inline long +atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return atomic_sub_return_relaxed(i, v); +} + +static inline long +atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return atomic_fetch_sub(i, v); +} + +static inline long +atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_sub_acquire(i, v); +} + +static inline long +atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return atomic_fetch_sub_release(i, v); +} + +static inline long +atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_sub_relaxed(i, v); +} + +static inline void +atomic_long_inc(atomic_long_t *v) +{ + atomic_inc(v); +} + +static inline long +atomic_long_inc_return(atomic_long_t *v) +{ + return atomic_inc_return(v); +} + +static inline long +atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return atomic_inc_return_acquire(v); +} + +static inline long +atomic_long_inc_return_release(atomic_long_t *v) +{ + return atomic_inc_return_release(v); +} + +static inline long +atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return atomic_inc_return_relaxed(v); +} + +static inline long +atomic_long_fetch_inc(atomic_long_t *v) +{ + return atomic_fetch_inc(v); +} + +static inline long +atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return atomic_fetch_inc_acquire(v); +} + +static inline long +atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return atomic_fetch_inc_release(v); +} + +static inline long +atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return atomic_fetch_inc_relaxed(v); +} + +static inline void +atomic_long_dec(atomic_long_t *v) +{ + atomic_dec(v); +} + +static inline long +atomic_long_dec_return(atomic_long_t *v) +{ + return atomic_dec_return(v); +} + +static inline long +atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return atomic_dec_return_acquire(v); +} + +static inline long +atomic_long_dec_return_release(atomic_long_t *v) +{ + return atomic_dec_return_release(v); +} + +static inline long +atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return atomic_dec_return_relaxed(v); +} + +static inline long +atomic_long_fetch_dec(atomic_long_t *v) +{ + return atomic_fetch_dec(v); +} + +static inline long +atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return atomic_fetch_dec_acquire(v); +} + +static inline long +atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return atomic_fetch_dec_release(v); +} + +static inline long +atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return atomic_fetch_dec_relaxed(v); +} + +static inline void +atomic_long_and(long i, atomic_long_t *v) +{ + atomic_and(i, v); +} + +static inline long +atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return atomic_fetch_and(i, v); +} + +static inline long +atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_and_acquire(i, v); +} + +static inline long +atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return atomic_fetch_and_release(i, v); +} + +static inline long +atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_and_relaxed(i, v); +} + +static inline void +atomic_long_andnot(long i, atomic_long_t *v) +{ + atomic_andnot(i, v); +} + +static inline long +atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return atomic_fetch_andnot(i, v); +} + +static inline long +atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_andnot_acquire(i, v); +} + +static inline long +atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return atomic_fetch_andnot_release(i, v); +} + +static inline long +atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_andnot_relaxed(i, v); +} + +static inline void +atomic_long_or(long i, atomic_long_t *v) +{ + atomic_or(i, v); +} + +static inline long +atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return atomic_fetch_or(i, v); +} + +static inline long +atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_or_acquire(i, v); +} + +static inline long +atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return atomic_fetch_or_release(i, v); +} + +static inline long +atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_or_relaxed(i, v); +} + +static inline void +atomic_long_xor(long i, atomic_long_t *v) +{ + atomic_xor(i, v); +} + +static inline long +atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return atomic_fetch_xor(i, v); +} + +static inline long +atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_xor_acquire(i, v); +} + +static inline long +atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return atomic_fetch_xor_release(i, v); +} + +static inline long +atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_xor_relaxed(i, v); +} + +static inline long +atomic_long_xchg(atomic_long_t *v, long i) +{ + return atomic_xchg(v, i); +} + +static inline long +atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return atomic_xchg_acquire(v, i); +} + +static inline long +atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return atomic_xchg_release(v, i); +} + +static inline long +atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return atomic_xchg_relaxed(v, i); +} + +static inline long +atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return atomic_cmpxchg(v, old, new); +} + +static inline long +atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return atomic_cmpxchg_acquire(v, old, new); +} + +static inline long +atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return atomic_cmpxchg_release(v, old, new); +} + +static inline long +atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return atomic_cmpxchg_relaxed(v, old, new); +} + +static inline bool +atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return atomic_try_cmpxchg(v, (int *)old, new); +} + +static inline bool +atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return atomic_try_cmpxchg_acquire(v, (int *)old, new); +} + +static inline bool +atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return atomic_try_cmpxchg_release(v, (int *)old, new); +} + +static inline bool +atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return atomic_try_cmpxchg_relaxed(v, (int *)old, new); +} + +static inline bool +atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return atomic_sub_and_test(i, v); +} + +static inline bool +atomic_long_dec_and_test(atomic_long_t *v) +{ + return atomic_dec_and_test(v); +} + +static inline bool +atomic_long_inc_and_test(atomic_long_t *v) +{ + return atomic_inc_and_test(v); +} + +static inline bool +atomic_long_add_negative(long i, atomic_long_t *v) +{ + return atomic_add_negative(i, v); +} + +static inline long +atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic_fetch_add_unless(v, a, u); +} + +static inline bool +atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic_add_unless(v, a, u); +} + +static inline bool +atomic_long_inc_not_zero(atomic_long_t *v) +{ + return atomic_inc_not_zero(v); +} + +static inline bool +atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return atomic_inc_unless_negative(v); +} + +static inline bool +atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return atomic_dec_unless_positive(v); +} + +static inline long +atomic_long_dec_if_positive(atomic_long_t *v) +{ + return atomic_dec_if_positive(v); +} + +#endif /* CONFIG_64BIT */ +#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ +// 77558968132ce4f911ad53f6f52ce423006f6268 diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h new file mode 100644 index 0000000..286867f --- /dev/null +++ b/include/asm-generic/atomic.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Generic C implementation of atomic counter operations. Usable on + * UP systems only. Do not include in machine independent code. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ +#ifndef __ASM_GENERIC_ATOMIC_H +#define __ASM_GENERIC_ATOMIC_H + +#include +#include + +/* + * atomic_$op() - $op integer to atomic variable + * @i: integer value to $op + * @v: pointer to the atomic variable + * + * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use + * smp_mb__{before,after}_atomic(). + */ + +/* + * atomic_$op_return() - $op interer to atomic variable and returns the result + * @i: integer value to $op + * @v: pointer to the atomic variable + * + * Atomically $ops @i to @v. Does imply a full memory barrier. + */ + +#ifdef CONFIG_SMP + +/* we can build all atomic primitives from cmpxchg */ + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ + \ + return c c_op i; \ +} + +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ + \ + return c; \ +} + +#else + +#include + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter = v->counter c_op i; \ + raw_local_irq_restore(flags); \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int ret; \ + \ + raw_local_irq_save(flags); \ + ret = (v->counter = v->counter c_op i); \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int ret; \ + \ + raw_local_irq_save(flags); \ + ret = v->counter; \ + v->counter = v->counter c_op i; \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + +#endif /* CONFIG_SMP */ + +#ifndef atomic_add_return +ATOMIC_OP_RETURN(add, +) +#endif + +#ifndef atomic_sub_return +ATOMIC_OP_RETURN(sub, -) +#endif + +#ifndef atomic_fetch_add +ATOMIC_FETCH_OP(add, +) +#endif + +#ifndef atomic_fetch_sub +ATOMIC_FETCH_OP(sub, -) +#endif + +#ifndef atomic_fetch_and +ATOMIC_FETCH_OP(and, &) +#endif + +#ifndef atomic_fetch_or +ATOMIC_FETCH_OP(or, |) +#endif + +#ifndef atomic_fetch_xor +ATOMIC_FETCH_OP(xor, ^) +#endif + +#ifndef atomic_and +ATOMIC_OP(and, &) +#endif + +#ifndef atomic_or +ATOMIC_OP(or, |) +#endif + +#ifndef atomic_xor +ATOMIC_OP(xor, ^) +#endif + +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +#define ATOMIC_INIT(i) { (i) } + +/** + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. + */ +#ifndef atomic_read +#define atomic_read(v) READ_ONCE((v)->counter) +#endif + +/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) + +#include + +static inline void atomic_add(int i, atomic_t *v) +{ + atomic_add_return(i, v); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + atomic_sub_return(i, v); +} + +#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) + +#endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h new file mode 100644 index 0000000..370f01d --- /dev/null +++ b/include/asm-generic/atomic64.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Generic implementation of 64-bit atomics using spinlocks, + * useful on processors that don't have 64-bit atomic instructions. + * + * Copyright © 2009 Paul Mackerras, IBM Corp. + */ +#ifndef _ASM_GENERIC_ATOMIC64_H +#define _ASM_GENERIC_ATOMIC64_H +#include + +typedef struct { + s64 counter; +} atomic64_t; + +#define ATOMIC64_INIT(i) { (i) } + +extern s64 atomic64_read(const atomic64_t *v); +extern void atomic64_set(atomic64_t *v, s64 i); + +#define atomic64_set_release(v, i) atomic64_set((v), (i)) + +#define ATOMIC64_OP(op) \ +extern void atomic64_##op(s64 a, atomic64_t *v); + +#define ATOMIC64_OP_RETURN(op) \ +extern s64 atomic64_##op##_return(s64 a, atomic64_t *v); + +#define ATOMIC64_FETCH_OP(op) \ +extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v); + +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) + +ATOMIC64_OPS(add) +ATOMIC64_OPS(sub) + +#undef ATOMIC64_OPS +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op) + +ATOMIC64_OPS(and) +ATOMIC64_OPS(or) +ATOMIC64_OPS(xor) + +#undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + +extern s64 atomic64_dec_if_positive(atomic64_t *v); +#define atomic64_dec_if_positive atomic64_dec_if_positive +extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n); +extern s64 atomic64_xchg(atomic64_t *v, s64 new); +extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u); +#define atomic64_fetch_add_unless atomic64_fetch_add_unless + +#endif /* _ASM_GENERIC_ATOMIC64_H */ diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h new file mode 100644 index 0000000..3316708 --- /dev/null +++ b/include/asm-generic/audit_change_attr.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifdef __NR_chmod +__NR_chmod, +#endif +__NR_fchmod, +#ifdef __NR_chown +__NR_chown, +__NR_lchown, +#endif +#ifdef __NR_fchown +__NR_fchown, +#endif +__NR_setxattr, +__NR_lsetxattr, +__NR_fsetxattr, +__NR_removexattr, +__NR_lremovexattr, +__NR_fremovexattr, +#ifdef __NR_fchownat +__NR_fchownat, +__NR_fchmodat, +#endif +#ifdef __NR_chown32 +__NR_chown32, +__NR_fchown32, +__NR_lchown32, +#endif +#ifdef __NR_link +__NR_link, +#endif +#ifdef __NR_linkat +__NR_linkat, +#endif diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h new file mode 100644 index 0000000..dd5a9dd --- /dev/null +++ b/include/asm-generic/audit_dir_write.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifdef __NR_rename +__NR_rename, +#endif +#ifdef __NR_mkdir +__NR_mkdir, +#endif +#ifdef __NR_rmdir +__NR_rmdir, +#endif +#ifdef __NR_creat +__NR_creat, +#endif +#ifdef __NR_link +__NR_link, +#endif +#ifdef __NR_unlink +__NR_unlink, +#endif +#ifdef __NR_symlink +__NR_symlink, +#endif +#ifdef __NR_mknod +__NR_mknod, +#endif +#ifdef __NR_mkdirat +__NR_mkdirat, +__NR_mknodat, +__NR_unlinkat, +#ifdef __NR_renameat +__NR_renameat, +#endif +__NR_linkat, +__NR_symlinkat, +#endif +#ifdef __NR_renameat2 +__NR_renameat2, +#endif diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h new file mode 100644 index 0000000..7bb7b5a --- /dev/null +++ b/include/asm-generic/audit_read.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifdef __NR_readlink +__NR_readlink, +#endif +__NR_quotactl, +__NR_listxattr, +__NR_llistxattr, +__NR_flistxattr, +__NR_getxattr, +__NR_lgetxattr, +__NR_fgetxattr, +#ifdef __NR_readlinkat +__NR_readlinkat, +#endif diff --git a/include/asm-generic/audit_signal.h b/include/asm-generic/audit_signal.h new file mode 100644 index 0000000..6feab7f --- /dev/null +++ b/include/asm-generic/audit_signal.h @@ -0,0 +1,3 @@ +__NR_kill, +__NR_tgkill, +__NR_tkill, diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h new file mode 100644 index 0000000..f9f1d0a --- /dev/null +++ b/include/asm-generic/audit_write.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +__NR_acct, +#ifdef __NR_swapon +__NR_swapon, +#endif +__NR_quotactl, +#ifdef __NR_truncate +__NR_truncate, +#endif +#ifdef __NR_truncate64 +__NR_truncate64, +#endif +#ifdef __NR_ftruncate +__NR_ftruncate, +#endif +#ifdef __NR_ftruncate64 +__NR_ftruncate64, +#endif +#ifdef __NR_bind +__NR_bind, /* bind can affect fs object only in one way... */ +#endif +#ifdef __NR_fallocate +__NR_fallocate, +#endif diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h new file mode 100644 index 0000000..85b28eb --- /dev/null +++ b/include/asm-generic/barrier.h @@ -0,0 +1,261 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Generic barrier definitions. + * + * It should be possible to use these on really simple architectures, + * but it serves more as a starting point for new ports. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ +#ifndef __ASM_GENERIC_BARRIER_H +#define __ASM_GENERIC_BARRIER_H + +#ifndef __ASSEMBLY__ + +#include + +#ifndef nop +#define nop() asm volatile ("nop") +#endif + +/* + * Force strict CPU ordering. And yes, this is required on UP too when we're + * talking to devices. + * + * Fall back to compiler barriers if nothing better is provided. + */ + +#ifndef mb +#define mb() barrier() +#endif + +#ifndef rmb +#define rmb() mb() +#endif + +#ifndef wmb +#define wmb() mb() +#endif + +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif + +#ifndef dma_wmb +#define dma_wmb() wmb() +#endif + +#ifndef read_barrier_depends +#define read_barrier_depends() do { } while (0) +#endif + +#ifndef __smp_mb +#define __smp_mb() mb() +#endif + +#ifndef __smp_rmb +#define __smp_rmb() rmb() +#endif + +#ifndef __smp_wmb +#define __smp_wmb() wmb() +#endif + +#ifndef __smp_read_barrier_depends +#define __smp_read_barrier_depends() read_barrier_depends() +#endif + +#ifdef CONFIG_SMP + +#ifndef smp_mb +#define smp_mb() __smp_mb() +#endif + +#ifndef smp_rmb +#define smp_rmb() __smp_rmb() +#endif + +#ifndef smp_wmb +#define smp_wmb() __smp_wmb() +#endif + +#ifndef smp_read_barrier_depends +#define smp_read_barrier_depends() __smp_read_barrier_depends() +#endif + +#else /* !CONFIG_SMP */ + +#ifndef smp_mb +#define smp_mb() barrier() +#endif + +#ifndef smp_rmb +#define smp_rmb() barrier() +#endif + +#ifndef smp_wmb +#define smp_wmb() barrier() +#endif + +#ifndef smp_read_barrier_depends +#define smp_read_barrier_depends() do { } while (0) +#endif + +#endif /* CONFIG_SMP */ + +#ifndef __smp_store_mb +#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) +#endif + +#ifndef __smp_mb__before_atomic +#define __smp_mb__before_atomic() __smp_mb() +#endif + +#ifndef __smp_mb__after_atomic +#define __smp_mb__after_atomic() __smp_mb() +#endif + +#ifndef __smp_store_release +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + __smp_mb(); \ + WRITE_ONCE(*p, v); \ +} while (0) +#endif + +#ifndef __smp_load_acquire +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + __smp_mb(); \ + ___p1; \ +}) +#endif + +#ifdef CONFIG_SMP + +#ifndef smp_store_mb +#define smp_store_mb(var, value) __smp_store_mb(var, value) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() __smp_mb__before_atomic() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() __smp_mb__after_atomic() +#endif + +#ifndef smp_store_release +#define smp_store_release(p, v) __smp_store_release(p, v) +#endif + +#ifndef smp_load_acquire +#define smp_load_acquire(p) __smp_load_acquire(p) +#endif + +#else /* !CONFIG_SMP */ + +#ifndef smp_store_mb +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() barrier() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() barrier() +#endif + +#ifndef smp_store_release +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + WRITE_ONCE(*p, v); \ +} while (0) +#endif + +#ifndef smp_load_acquire +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) +#endif + +#endif /* CONFIG_SMP */ + +/* Barriers for virtual machine guests when talking to an SMP host */ +#define virt_mb() __smp_mb() +#define virt_rmb() __smp_rmb() +#define virt_wmb() __smp_wmb() +#define virt_read_barrier_depends() __smp_read_barrier_depends() +#define virt_store_mb(var, value) __smp_store_mb(var, value) +#define virt_mb__before_atomic() __smp_mb__before_atomic() +#define virt_mb__after_atomic() __smp_mb__after_atomic() +#define virt_store_release(p, v) __smp_store_release(p, v) +#define virt_load_acquire(p) __smp_load_acquire(p) + +/** + * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency + * + * A control dependency provides a LOAD->STORE order, the additional RMB + * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, + * aka. (load)-ACQUIRE. + * + * Architectures that do not do load speculation can have this be barrier(). + */ +#ifndef smp_acquire__after_ctrl_dep +#define smp_acquire__after_ctrl_dep() smp_rmb() +#endif + +/** + * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees + * @ptr: pointer to the variable to wait on + * @cond: boolean expression to wait for + * + * Equivalent to using READ_ONCE() on the condition variable. + * + * Due to C lacking lambda expressions we load the value of *ptr into a + * pre-named variable @VAL to be used in @cond. + */ +#ifndef smp_cond_load_relaxed +#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ + typeof(ptr) __PTR = (ptr); \ + typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + cpu_relax(); \ + } \ + VAL; \ +}) +#endif + +/** + * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering + * @ptr: pointer to the variable to wait on + * @cond: boolean expression to wait for + * + * Equivalent to using smp_load_acquire() on the condition variable but employs + * the control dependency of the wait to reduce the barrier on many platforms. + */ +#ifndef smp_cond_load_acquire +#define smp_cond_load_acquire(ptr, cond_expr) ({ \ + typeof(*ptr) _val; \ + _val = smp_cond_load_relaxed(ptr, cond_expr); \ + smp_acquire__after_ctrl_dep(); \ + _val; \ +}) +#endif + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/bitops-instrumented.h b/include/asm-generic/bitops-instrumented.h new file mode 100644 index 0000000..ddd1c6d --- /dev/null +++ b/include/asm-generic/bitops-instrumented.h @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * This file provides wrappers with sanitizer instrumentation for bit + * operations. + * + * To use this functionality, an arch's bitops.h file needs to define each of + * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), + * arch___set_bit(), etc.). + */ +#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H +#define _ASM_GENERIC_BITOPS_INSTRUMENTED_H + +#include + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This is a relaxed atomic operation (no implied memory barriers). + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch_set_bit(nr, addr); +} + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic. If it is called on the same + * region of memory concurrently, the effect may be that only one operation + * succeeds. + */ +static inline void __set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___set_bit(nr, addr); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * This is a relaxed atomic operation (no implied memory barriers). + */ +static inline void clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch_clear_bit(nr, addr); +} + +/** + * __clear_bit - Clears a bit in memory + * @nr: the bit to clear + * @addr: the address to start counting from + * + * Unlike clear_bit(), this function is non-atomic. If it is called on the same + * region of memory concurrently, the effect may be that only one operation + * succeeds. + */ +static inline void __clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___clear_bit(nr, addr); +} + +/** + * clear_bit_unlock - Clear a bit in memory, for unlock + * @nr: the bit to set + * @addr: the address to start counting from + * + * This operation is atomic and provides release barrier semantics. + */ +static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch_clear_bit_unlock(nr, addr); +} + +/** + * __clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * This is a non-atomic operation but implies a release barrier before the + * memory operation. It can be used for an unlock if no other CPUs can + * concurrently modify other bits in the word. + */ +static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___clear_bit_unlock(nr, addr); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * This is a relaxed atomic operation (no implied memory barriers). + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch_change_bit(nr, addr); +} + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic. If it is called on the same + * region of memory concurrently, the effect may be that only one operation + * succeeds. + */ +static inline void __change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___change_bit(nr, addr); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This is an atomic fully-ordered operation (implied full memory barrier). + */ +static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_set_bit(nr, addr); +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic. If two instances of this operation race, one + * can appear to succeed but actually fail. + */ +static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch___test_and_set_bit(nr, addr); +} + +/** + * test_and_set_bit_lock - Set a bit and return its old value, for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and provides acquire barrier semantics if + * the returned value is 0. + * It can be used to implement bit locks. + */ +static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_set_bit_lock(nr, addr); +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This is an atomic fully-ordered operation (implied full memory barrier). + */ +static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_clear_bit(nr, addr); +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic. If two instances of this operation race, one + * can appear to succeed but actually fail. + */ +static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch___test_and_clear_bit(nr, addr); +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This is an atomic fully-ordered operation (implied full memory barrier). + */ +static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_change_bit(nr, addr); +} + +/** + * __test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is non-atomic. If two instances of this operation race, one + * can appear to succeed but actually fail. + */ +static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch___test_and_change_bit(nr, addr); +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline bool test_bit(long nr, const volatile unsigned long *addr) +{ + kasan_check_read(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_bit(nr, addr); +} + +#if defined(arch_clear_bit_unlock_is_negative_byte) +/** + * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom + * byte is negative, for unlock. + * @nr: the bit to clear + * @addr: the address to start counting from + * + * This operation is atomic and provides release barrier semantics. + * + * This is a bit of a one-trick-pony for the filemap code, which clears + * PG_locked and tests PG_waiters, + */ +static inline bool +clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_clear_bit_unlock_is_negative_byte(nr, addr); +} +/* Let everybody know we have it. */ +#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte +#endif + +#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_H */ diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h new file mode 100644 index 0000000..bfc96bf --- /dev/null +++ b/include/asm-generic/bitops.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_BITOPS_H +#define __ASM_GENERIC_BITOPS_H + +/* + * For the benefit of those who are trying to port Linux to another + * architecture, here are some C-language equivalents. You should + * recode these in the native assembly language, if at all possible. + * + * C language equivalents written by Theodore Ts'o, 9/26/92 + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include +#include +#include + +#include +#include +#include +#include + +#endif /* __ASM_GENERIC_BITOPS_H */ diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h new file mode 100644 index 0000000..39e56e1 --- /dev/null +++ b/include/asm-generic/bitops/__ffs.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS___FFS_H_ +#define _ASM_GENERIC_BITOPS___FFS_H_ + +#include + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __ffs(unsigned long word) +{ + int num = 0; + +#if BITS_PER_LONG == 64 + if ((word & 0xffffffff) == 0) { + num += 32; + word >>= 32; + } +#endif + if ((word & 0xffff) == 0) { + num += 16; + word >>= 16; + } + if ((word & 0xff) == 0) { + num += 8; + word >>= 8; + } + if ((word & 0xf) == 0) { + num += 4; + word >>= 4; + } + if ((word & 0x3) == 0) { + num += 2; + word >>= 2; + } + if ((word & 0x1) == 0) + num += 1; + return num; +} + +#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */ diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h new file mode 100644 index 0000000..03f721a --- /dev/null +++ b/include/asm-generic/bitops/__fls.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS___FLS_H_ +#define _ASM_GENERIC_BITOPS___FLS_H_ + +#include + +/** + * __fls - find last (most-significant) set bit in a long word + * @word: the word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __fls(unsigned long word) +{ + int num = BITS_PER_LONG - 1; + +#if BITS_PER_LONG == 64 + if (!(word & (~0ul << 32))) { + num -= 32; + word <<= 32; + } +#endif + if (!(word & (~0ul << (BITS_PER_LONG-16)))) { + num -= 16; + word <<= 16; + } + if (!(word & (~0ul << (BITS_PER_LONG-8)))) { + num -= 8; + word <<= 8; + } + if (!(word & (~0ul << (BITS_PER_LONG-4)))) { + num -= 4; + word <<= 4; + } + if (!(word & (~0ul << (BITS_PER_LONG-2)))) { + num -= 2; + word <<= 2; + } + if (!(word & (~0ul << (BITS_PER_LONG-1)))) + num -= 1; + return num; +} + +#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */ diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h new file mode 100644 index 0000000..c2705e1 --- /dev/null +++ b/include/asm-generic/bitops/arch_hweight.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ + +#include + +static inline unsigned int __arch_hweight32(unsigned int w) +{ + return __sw_hweight32(w); +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __sw_hweight16(w); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __sw_hweight8(w); +} + +static inline unsigned long __arch_hweight64(__u64 w) +{ + return __sw_hweight64(w); +} +#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h new file mode 100644 index 0000000..dd90c97 --- /dev/null +++ b/include/asm-generic/bitops/atomic.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_ATOMIC_H_ + +#include +#include +#include + +/* + * Implementation of atomic bitops using atomic-fetch ops. + * See Documentation/atomic_bitops.txt for details. + */ + +static inline void set_bit(unsigned int nr, volatile unsigned long *p) +{ + p += BIT_WORD(nr); + atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); +} + +static inline void clear_bit(unsigned int nr, volatile unsigned long *p) +{ + p += BIT_WORD(nr); + atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); +} + +static inline void change_bit(unsigned int nr, volatile unsigned long *p) +{ + p += BIT_WORD(nr); + atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); +} + +static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) +{ + long old; + unsigned long mask = BIT_MASK(nr); + + p += BIT_WORD(nr); + if (READ_ONCE(*p) & mask) + return 1; + + old = atomic_long_fetch_or(mask, (atomic_long_t *)p); + return !!(old & mask); +} + +static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) +{ + long old; + unsigned long mask = BIT_MASK(nr); + + p += BIT_WORD(nr); + if (!(READ_ONCE(*p) & mask)) + return 0; + + old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p); + return !!(old & mask); +} + +static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p) +{ + long old; + unsigned long mask = BIT_MASK(nr); + + p += BIT_WORD(nr); + old = atomic_long_fetch_xor(mask, (atomic_long_t *)p); + return !!(old & mask); +} + +#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h new file mode 100644 index 0000000..87024da --- /dev/null +++ b/include/asm-generic/bitops/builtin-__ffs.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_ +#define _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_ + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __ffs(unsigned long word) +{ + return __builtin_ctzl(word); +} + +#endif diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h new file mode 100644 index 0000000..43a5aa9 --- /dev/null +++ b/include/asm-generic/bitops/builtin-__fls.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_ +#define _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_ + +/** + * __fls - find last (most-significant) set bit in a long word + * @word: the word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __fls(unsigned long word) +{ + return (sizeof(word) * 8) - 1 - __builtin_clzl(word); +} + +#endif diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h new file mode 100644 index 0000000..458c85e --- /dev/null +++ b/include/asm-generic/bitops/builtin-ffs.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_ +#define _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_ + +/** + * ffs - find first bit set + * @x: the word to search + * + * This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static __always_inline int ffs(int x) +{ + return __builtin_ffs(x); +} + +#endif diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h new file mode 100644 index 0000000..c8455cc --- /dev/null +++ b/include/asm-generic/bitops/builtin-fls.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_ +#define _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_ + +/** + * fls - find last (most-significant) bit set + * @x: the word to search + * + * This is defined the same way as ffs. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +static __always_inline int fls(unsigned int x) +{ + return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; +} + +#endif diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h new file mode 100644 index 0000000..149faee --- /dev/null +++ b/include/asm-generic/bitops/const_hweight.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ + +/* + * Compile time versions of __arch_hweightN() + */ +#define __const_hweight8(w) \ + ((unsigned int) \ + ((!!((w) & (1ULL << 0))) + \ + (!!((w) & (1ULL << 1))) + \ + (!!((w) & (1ULL << 2))) + \ + (!!((w) & (1ULL << 3))) + \ + (!!((w) & (1ULL << 4))) + \ + (!!((w) & (1ULL << 5))) + \ + (!!((w) & (1ULL << 6))) + \ + (!!((w) & (1ULL << 7))))) + +#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 )) +#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) +#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32)) + +/* + * Generic interface. + */ +#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w)) +#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w)) +#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w)) +#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w)) + +/* + * Interface for known constant arguments + */ +#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w)) +#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w)) +#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w)) +#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w)) + +/* + * Type invariant interface to the compile time constant hweight functions. + */ +#define HWEIGHT(w) HWEIGHT64((u64)w) + +#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/ext2-atomic-setbit.h b/include/asm-generic/bitops/ext2-atomic-setbit.h new file mode 100644 index 0000000..b041cbf --- /dev/null +++ b/include/asm-generic/bitops/ext2-atomic-setbit.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ +#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ + +/* + * Atomic bitops based version of ext2 atomic bitops + */ + +#define ext2_set_bit_atomic(l, nr, addr) test_and_set_bit_le(nr, addr) +#define ext2_clear_bit_atomic(l, nr, addr) test_and_clear_bit_le(nr, addr) + +#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ */ diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h new file mode 100644 index 0000000..0cfc318 --- /dev/null +++ b/include/asm-generic/bitops/ext2-atomic.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ + +/* + * Spinlock based version of ext2 atomic bitops + */ + +#define ext2_set_bit_atomic(lock, nr, addr) \ + ({ \ + int ret; \ + spin_lock(lock); \ + ret = __test_and_set_bit_le(nr, addr); \ + spin_unlock(lock); \ + ret; \ + }) + +#define ext2_clear_bit_atomic(lock, nr, addr) \ + ({ \ + int ret; \ + spin_lock(lock); \ + ret = __test_and_clear_bit_le(nr, addr); \ + spin_unlock(lock); \ + ret; \ + }) + +#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h new file mode 100644 index 0000000..e81868b --- /dev/null +++ b/include/asm-generic/bitops/ffs.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_FFS_H_ +#define _ASM_GENERIC_BITOPS_FFS_H_ + +/** + * ffs - find first bit set + * @x: the word to search + * + * This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static inline int ffs(int x) +{ + int r = 1; + + if (!x) + return 0; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; +} + +#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */ diff --git a/include/asm-generic/bitops/ffz.h b/include/asm-generic/bitops/ffz.h new file mode 100644 index 0000000..0d01008 --- /dev/null +++ b/include/asm-generic/bitops/ffz.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_FFZ_H_ +#define _ASM_GENERIC_BITOPS_FFZ_H_ + +/* + * ffz - find first zero in word. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +#define ffz(x) __ffs(~(x)) + +#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */ diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h new file mode 100644 index 0000000..8a1ee10 --- /dev/null +++ b/include/asm-generic/bitops/find.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_FIND_H_ +#define _ASM_GENERIC_BITOPS_FIND_H_ + +#ifndef find_next_bit +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + * + * Returns the bit number for the next set bit + * If no bits are set, returns @size. + */ +extern unsigned long find_next_bit(const unsigned long *addr, unsigned long + size, unsigned long offset); +#endif + +#ifndef find_next_and_bit +/** + * find_next_and_bit - find the next set bit in both memory regions + * @addr1: The first address to base the search on + * @addr2: The second address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + * + * Returns the bit number for the next set bit + * If no bits are set, returns @size. + */ +extern unsigned long find_next_and_bit(const unsigned long *addr1, + const unsigned long *addr2, unsigned long size, + unsigned long offset); +#endif + +#ifndef find_next_zero_bit +/** + * find_next_zero_bit - find the next cleared bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + * + * Returns the bit number of the next zero bit + * If no bits are zero, returns @size. + */ +extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); +#endif + +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum number of bits to search + * + * Returns the bit number of the first set bit. + * If no bits are set, returns @size. + */ +extern unsigned long find_first_bit(const unsigned long *addr, + unsigned long size); + +/** + * find_first_zero_bit - find the first cleared bit in a memory region + * @addr: The address to start the search at + * @size: The maximum number of bits to search + * + * Returns the bit number of the first cleared bit. + * If no bits are zero, returns @size. + */ +extern unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size); +#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ + +#ifndef find_first_bit +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +#endif +#ifndef find_first_zero_bit +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) +#endif + +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ + +#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h new file mode 100644 index 0000000..b168bb1 --- /dev/null +++ b/include/asm-generic/bitops/fls.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_FLS_H_ +#define _ASM_GENERIC_BITOPS_FLS_H_ + +/** + * fls - find last (most-significant) bit set + * @x: the word to search + * + * This is defined the same way as ffs. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ + +static __always_inline int fls(unsigned int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + +#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */ diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h new file mode 100644 index 0000000..866f2b2 --- /dev/null +++ b/include/asm-generic/bitops/fls64.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_FLS64_H_ +#define _ASM_GENERIC_BITOPS_FLS64_H_ + +#include + +/** + * fls64 - find last set bit in a 64-bit word + * @x: the word to search + * + * This is defined in a similar way as the libc and compiler builtin + * ffsll, but returns the position of the most significant set bit. + * + * fls64(value) returns 0 if value is 0 or the position of the last + * set bit if value is nonzero. The last (most significant) bit is + * at position 64. + */ +#if BITS_PER_LONG == 32 +static __always_inline int fls64(__u64 x) +{ + __u32 h = x >> 32; + if (h) + return fls(h) + 32; + return fls(x); +} +#elif BITS_PER_LONG == 64 +static __always_inline int fls64(__u64 x) +{ + if (x == 0) + return 0; + return __fls(x) + 1; +} +#else +#error BITS_PER_LONG not 32 or 64 +#endif + +#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h new file mode 100644 index 0000000..6bf1bba --- /dev/null +++ b/include/asm-generic/bitops/hweight.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_HWEIGHT_H_ + +#include +#include + +#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h new file mode 100644 index 0000000..188d3eb --- /dev/null +++ b/include/asm-generic/bitops/le.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_LE_H_ +#define _ASM_GENERIC_BITOPS_LE_H_ + +#include +#include + +#if defined(__LITTLE_ENDIAN) + +#define BITOP_LE_SWIZZLE 0 + +static inline unsigned long find_next_zero_bit_le(const void *addr, + unsigned long size, unsigned long offset) +{ + return find_next_zero_bit(addr, size, offset); +} + +static inline unsigned long find_next_bit_le(const void *addr, + unsigned long size, unsigned long offset) +{ + return find_next_bit(addr, size, offset); +} + +static inline unsigned long find_first_zero_bit_le(const void *addr, + unsigned long size) +{ + return find_first_zero_bit(addr, size); +} + +#elif defined(__BIG_ENDIAN) + +#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) + +#ifndef find_next_zero_bit_le +extern unsigned long find_next_zero_bit_le(const void *addr, + unsigned long size, unsigned long offset); +#endif + +#ifndef find_next_bit_le +extern unsigned long find_next_bit_le(const void *addr, + unsigned long size, unsigned long offset); +#endif + +#ifndef find_first_zero_bit_le +#define find_first_zero_bit_le(addr, size) \ + find_next_zero_bit_le((addr), (size), 0) +#endif + +#else +#error "Please fix " +#endif + +static inline int test_bit_le(int nr, const void *addr) +{ + return test_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void set_bit_le(int nr, void *addr) +{ + set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void clear_bit_le(int nr, void *addr) +{ + clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void __set_bit_le(int nr, void *addr) +{ + __set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void __clear_bit_le(int nr, void *addr) +{ + __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int test_and_set_bit_le(int nr, void *addr) +{ + return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int test_and_clear_bit_le(int nr, void *addr) +{ + return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int __test_and_set_bit_le(int nr, void *addr) +{ + return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int __test_and_clear_bit_le(int nr, void *addr) +{ + return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +#endif /* _ASM_GENERIC_BITOPS_LE_H_ */ diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h new file mode 100644 index 0000000..3ae0213 --- /dev/null +++ b/include/asm-generic/bitops/lock.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_LOCK_H_ +#define _ASM_GENERIC_BITOPS_LOCK_H_ + +#include +#include +#include + +/** + * test_and_set_bit_lock - Set a bit and return its old value, for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and provides acquire barrier semantics if + * the returned value is 0. + * It can be used to implement bit locks. + */ +static inline int test_and_set_bit_lock(unsigned int nr, + volatile unsigned long *p) +{ + long old; + unsigned long mask = BIT_MASK(nr); + + p += BIT_WORD(nr); + if (READ_ONCE(*p) & mask) + return 1; + + old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); + return !!(old & mask); +} + + +/** + * clear_bit_unlock - Clear a bit in memory, for unlock + * @nr: the bit to set + * @addr: the address to start counting from + * + * This operation is atomic and provides release barrier semantics. + */ +static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) +{ + p += BIT_WORD(nr); + atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); +} + +/** + * __clear_bit_unlock - Clear a bit in memory, for unlock + * @nr: the bit to set + * @addr: the address to start counting from + * + * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all + * the bits in the word are protected by this lock some archs can use weaker + * ops to safely unlock. + * + * See for example x86's implementation. + */ +static inline void __clear_bit_unlock(unsigned int nr, + volatile unsigned long *p) +{ + unsigned long old; + + p += BIT_WORD(nr); + old = READ_ONCE(*p); + old &= ~BIT_MASK(nr); + atomic_long_set_release((atomic_long_t *)p, old); +} + +/** + * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom + * byte is negative, for unlock. + * @nr: the bit to clear + * @addr: the address to start counting from + * + * This is a bit of a one-trick-pony for the filemap code, which clears + * PG_locked and tests PG_waiters, + */ +#ifndef clear_bit_unlock_is_negative_byte +static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr, + volatile unsigned long *p) +{ + long old; + unsigned long mask = BIT_MASK(nr); + + p += BIT_WORD(nr); + old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); + return !!(old & BIT(7)); +} +#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte +#endif + +#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h new file mode 100644 index 0000000..7e10c4b --- /dev/null +++ b/include/asm-generic/bitops/non-atomic.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ + +#include + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; +} + +static inline void __clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p &= ~mask; +} + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p ^= mask; +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/* WARNING: non atomic and it can be reordered! */ +static inline int __test_and_change_bit(int nr, + volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit(int nr, const volatile unsigned long *addr) +{ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h new file mode 100644 index 0000000..86470cf --- /dev/null +++ b/include/asm-generic/bitops/sched.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BITOPS_SCHED_H_ +#define _ASM_GENERIC_BITOPS_SCHED_H_ + +#include /* unlikely() */ +#include + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 100-bit bitmap. It's guaranteed that at least + * one of the 100 bits is cleared. + */ +static inline int sched_find_first_bit(const unsigned long *b) +{ +#if BITS_PER_LONG == 64 + if (b[0]) + return __ffs(b[0]); + return __ffs(b[1]) + 64; +#elif BITS_PER_LONG == 32 + if (b[0]) + return __ffs(b[0]); + if (b[1]) + return __ffs(b[1]) + 32; + if (b[2]) + return __ffs(b[2]) + 64; + return __ffs(b[3]) + 96; +#else +#error BITS_PER_LONG not defined +#endif +} + +#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */ diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h new file mode 100644 index 0000000..3905c1c --- /dev/null +++ b/include/asm-generic/bitsperlong.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_BITS_PER_LONG +#define __ASM_GENERIC_BITS_PER_LONG + +#include + + +#ifdef CONFIG_64BIT +#define BITS_PER_LONG 64 +#else +#define BITS_PER_LONG 32 +#endif /* CONFIG_64BIT */ + +/* + * FIXME: The check currently breaks x86-64 build, so it's + * temporarily disabled. Please fix x86-64 and reenable + */ +#if 0 && BITS_PER_LONG != __BITS_PER_LONG +#error Inconsistent word size. Check asm/bitsperlong.h +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#endif /* __ASM_GENERIC_BITS_PER_LONG */ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h new file mode 100644 index 0000000..384b5c8 --- /dev/null +++ b/include/asm-generic/bug.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_BUG_H +#define _ASM_GENERIC_BUG_H + +#include + +#define CUT_HERE "------------[ cut here ]------------\n" + +#ifdef CONFIG_GENERIC_BUG +#define BUGFLAG_WARNING (1 << 0) +#define BUGFLAG_ONCE (1 << 1) +#define BUGFLAG_DONE (1 << 2) +#define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */ +#define BUGFLAG_TAINT(taint) ((taint) << 8) +#define BUG_GET_TAINT(bug) ((bug)->flags >> 8) +#endif + +#ifndef __ASSEMBLY__ +#include + +#ifdef CONFIG_BUG + +#ifdef CONFIG_GENERIC_BUG +struct bug_entry { +#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS + unsigned long bug_addr; +#else + signed int bug_addr_disp; +#endif +#ifdef CONFIG_DEBUG_BUGVERBOSE +#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS + const char *file; +#else + signed int file_disp; +#endif + unsigned short line; +#endif + unsigned short flags; +}; +#endif /* CONFIG_GENERIC_BUG */ + +/* + * Don't use BUG() or BUG_ON() unless there's really no way out; one + * example might be detecting data structure corruption in the middle + * of an operation that can't be backed out of. If the (sub)system + * can somehow continue operating, perhaps with reduced functionality, + * it's probably not BUG-worthy. + * + * If you're tempted to BUG(), think again: is completely giving up + * really the *only* solution? There are usually better options, where + * users don't need to reboot ASAP and can mostly shut down cleanly. + */ +#ifndef HAVE_ARCH_BUG +#define BUG() do { \ + printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + barrier_before_unreachable(); \ + panic("BUG!"); \ +} while (0) +#endif + +#ifndef HAVE_ARCH_BUG_ON +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) +#endif + +/* + * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report + * significant kernel issues that need prompt attention if they should ever + * appear at runtime. + * + * Do not use these macros when checking for invalid external inputs + * (e.g. invalid system call arguments, or invalid data coming from + * network/devices), and on transient conditions like ENOMEM or EAGAIN. + * These macros should be used for recoverable kernel issues only. + * For invalid external inputs, transient conditions, etc use + * pr_err[_once/_ratelimited]() followed by dump_stack(), if necessary. + * Do not include "BUG"/"WARNING" in format strings manually to make these + * conditions distinguishable from kernel issues. + * + * Use the versions with printk format strings to provide better diagnostics. + */ +#ifndef __WARN_FLAGS +extern __printf(4, 5) +void warn_slowpath_fmt(const char *file, const int line, unsigned taint, + const char *fmt, ...); +#define __WARN() __WARN_printf(TAINT_WARN, NULL) +#define __WARN_printf(taint, arg...) \ + warn_slowpath_fmt(__FILE__, __LINE__, taint, arg) +#else +extern __printf(1, 2) void __warn_printk(const char *fmt, ...); +#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN)) +#define __WARN_printf(taint, arg...) do { \ + __warn_printk(arg); \ + __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\ + } while (0) +#define WARN_ON_ONCE(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_FLAGS(BUGFLAG_ONCE | \ + BUGFLAG_TAINT(TAINT_WARN)); \ + unlikely(__ret_warn_on); \ +}) +#endif + +/* used internally by panic.c */ +struct warn_args; +struct pt_regs; + +void __warn(const char *file, int line, void *caller, unsigned taint, + struct pt_regs *regs, struct warn_args *args); + +#ifndef WARN_ON +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN(); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(TAINT_WARN, format); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#define WARN_TAINT(condition, taint, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(taint, format); \ + unlikely(__ret_warn_on); \ +}) + +#ifndef WARN_ON_ONCE +#define WARN_ON_ONCE(condition) ({ \ + static bool __section(.data.once) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn_once); \ +}) +#endif + +#define WARN_ONCE(condition, format...) ({ \ + static bool __section(.data.once) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN(1, format); \ + } \ + unlikely(__ret_warn_once); \ +}) + +#define WARN_TAINT_ONCE(condition, taint, format...) ({ \ + static bool __section(.data.once) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN_TAINT(1, taint, format); \ + } \ + unlikely(__ret_warn_once); \ +}) + +#else /* !CONFIG_BUG */ +#ifndef HAVE_ARCH_BUG +#define BUG() do {} while (1) +#endif + +#ifndef HAVE_ARCH_BUG_ON +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) +#endif + +#ifndef HAVE_ARCH_WARN_ON +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + no_printk(format); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#define WARN_ON_ONCE(condition) WARN_ON(condition) +#define WARN_ONCE(condition, format...) WARN(condition, format) +#define WARN_TAINT(condition, taint, format...) WARN(condition, format) +#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format) + +#endif + +/* + * WARN_ON_SMP() is for cases that the warning is either + * meaningless for !SMP or may even cause failures. + * It can also be used with values that are only defined + * on SMP: + * + * struct foo { + * [...] + * #ifdef CONFIG_SMP + * int bar; + * #endif + * }; + * + * void func(struct foo *zoot) + * { + * WARN_ON_SMP(!zoot->bar); + * + * For CONFIG_SMP, WARN_ON_SMP() should act the same as WARN_ON(), + * and should be a nop and return false for uniprocessor. + * + * if (WARN_ON_SMP(x)) returns true only when CONFIG_SMP is set + * and x is true. + */ +#ifdef CONFIG_SMP +# define WARN_ON_SMP(x) WARN_ON(x) +#else +/* + * Use of ({0;}) because WARN_ON_SMP(x) may be used either as + * a stand alone line statement or as a condition in an if () + * statement. + * A simple "0" would cause gcc to give a "statement has no effect" + * warning. + */ +# define WARN_ON_SMP(x) ({0;}) +#endif + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h new file mode 100644 index 0000000..6902183 --- /dev/null +++ b/include/asm-generic/bugs.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_BUGS_H +#define __ASM_GENERIC_BUGS_H +/* + * This file is included by 'init/main.c' to check for + * architecture-dependent bugs. + */ + +static inline void check_bugs(void) { } + +#endif /* __ASM_GENERIC_BUGS_H */ diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h new file mode 100644 index 0000000..60386e1 --- /dev/null +++ b/include/asm-generic/cache.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_CACHE_H +#define __ASM_GENERIC_CACHE_H +/* + * 32 bytes appears to be the most common cache line size, + * so make that the default here. Architectures with larger + * cache lines need to provide their own cache.h. + */ + +#define L1_CACHE_SHIFT 5 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#endif /* __ASM_GENERIC_CACHE_H */ diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h new file mode 100644 index 0000000..cac7404 --- /dev/null +++ b/include/asm-generic/cacheflush.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_CACHEFLUSH_H +#define __ASM_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 + +/* + * The cache doesn't need to be flushed when TLB entries change when + * the cache is mapped to physical memory, not virtual memory + */ +#ifndef flush_cache_all +static inline void flush_cache_all(void) +{ +} +#endif + +#ifndef flush_cache_mm +static inline void flush_cache_mm(struct mm_struct *mm) +{ +} +#endif + +#ifndef flush_cache_dup_mm +static inline void flush_cache_dup_mm(struct mm_struct *mm) +{ +} +#endif + +#ifndef flush_cache_range +static inline void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end) +{ +} +#endif + +#ifndef flush_cache_page +static inline void flush_cache_page(struct vm_area_struct *vma, + unsigned long vmaddr, + unsigned long pfn) +{ +} +#endif + +#ifndef flush_dcache_page +static inline void flush_dcache_page(struct page *page) +{ +} +#endif + +#ifndef flush_dcache_mmap_lock +static inline void flush_dcache_mmap_lock(struct address_space *mapping) +{ +} +#endif + +#ifndef flush_dcache_mmap_unlock +static inline void flush_dcache_mmap_unlock(struct address_space *mapping) +{ +} +#endif + +#ifndef flush_icache_range +static inline void flush_icache_range(unsigned long start, unsigned long end) +{ +} +#endif + +#ifndef flush_icache_page +static inline void flush_icache_page(struct vm_area_struct *vma, + struct page *page) +{ +} +#endif + +#ifndef flush_icache_user_range +static inline void flush_icache_user_range(struct vm_area_struct *vma, + struct page *page, + unsigned long addr, int len) +{ +} +#endif + +#ifndef flush_cache_vmap +static inline void flush_cache_vmap(unsigned long start, unsigned long end) +{ +} +#endif + +#ifndef flush_cache_vunmap +static inline void flush_cache_vunmap(unsigned long start, unsigned long end) +{ +} +#endif + +#ifndef copy_to_user_page +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + flush_icache_user_range(vma, page, vaddr, len); \ + } while (0) +#endif + +#ifndef copy_from_user_page +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) +#endif + +#endif /* __ASM_CACHEFLUSH_H */ diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h new file mode 100644 index 0000000..34785c0 --- /dev/null +++ b/include/asm-generic/checksum.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_CHECKSUM_H +#define __ASM_GENERIC_CHECKSUM_H + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +extern __wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum); + +/* + * the same as csum_partial_copy, but copies from user space. + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, + int len, __wsum sum, int *csum_err); + +#ifndef csum_partial_copy_nocheck +#define csum_partial_copy_nocheck(src, dst, len, sum) \ + csum_partial_copy((src), (dst), (len), (sum)) +#endif + +#ifndef ip_fast_csum +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); +#endif + +#ifndef csum_fold +/* + * Fold a partial checksum + */ +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + sum = (sum & 0xffff) + (sum >> 16); + sum = (sum & 0xffff) + (sum >> 16); + return (__force __sum16)~sum; +} +#endif + +#ifndef csum_tcpudp_nofold +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +extern __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum); +#endif + +#ifndef csum_tcpudp_magic +static inline __sum16 +csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} +#endif + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +extern __sum16 ip_compute_csum(const void *buff, int len); + +#endif /* __ASM_GENERIC_CHECKSUM_H */ diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h new file mode 100644 index 0000000..f17f14f --- /dev/null +++ b/include/asm-generic/cmpxchg-local.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_CMPXCHG_LOCAL_H +#define __ASM_GENERIC_CMPXCHG_LOCAL_H + +#include +#include + +extern unsigned long wrong_size_cmpxchg(volatile void *ptr) + __noreturn; + +/* + * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned + * long parameter, supporting various types of architectures. + */ +static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, + unsigned long old, unsigned long new, int size) +{ + unsigned long flags, prev; + + /* + * Sanity checking, compile-time. + */ + if (size == 8 && sizeof(unsigned long) != 8) + wrong_size_cmpxchg(ptr); + + raw_local_irq_save(flags); + switch (size) { + case 1: prev = *(u8 *)ptr; + if (prev == old) + *(u8 *)ptr = (u8)new; + break; + case 2: prev = *(u16 *)ptr; + if (prev == old) + *(u16 *)ptr = (u16)new; + break; + case 4: prev = *(u32 *)ptr; + if (prev == old) + *(u32 *)ptr = (u32)new; + break; + case 8: prev = *(u64 *)ptr; + if (prev == old) + *(u64 *)ptr = (u64)new; + break; + default: + wrong_size_cmpxchg(ptr); + } + raw_local_irq_restore(flags); + return prev; +} + +/* + * Generic version of __cmpxchg64_local. Takes an u64 parameter. + */ +static inline u64 __cmpxchg64_local_generic(volatile void *ptr, + u64 old, u64 new) +{ + u64 prev; + unsigned long flags; + + raw_local_irq_save(flags); + prev = *(u64 *)ptr; + if (prev == old) + *(u64 *)ptr = new; + raw_local_irq_restore(flags); + return prev; +} + +#endif diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h new file mode 100644 index 0000000..9a24510 --- /dev/null +++ b/include/asm-generic/cmpxchg.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Generic UP xchg and cmpxchg using interrupt disablement. Does not + * support SMP. + */ + +#ifndef __ASM_GENERIC_CMPXCHG_H +#define __ASM_GENERIC_CMPXCHG_H + +#ifdef CONFIG_SMP +#error "Cannot use generic cmpxchg on SMP" +#endif + +#include +#include + +#ifndef xchg + +/* + * This function doesn't exist, so you'll get a linker error if + * something tries to do an invalidly-sized xchg(). + */ +extern void __xchg_called_with_bad_pointer(void); + +static inline +unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + unsigned long ret, flags; + + switch (size) { + case 1: +#ifdef __xchg_u8 + return __xchg_u8(x, ptr); +#else + local_irq_save(flags); + ret = *(volatile u8 *)ptr; + *(volatile u8 *)ptr = x; + local_irq_restore(flags); + return ret; +#endif /* __xchg_u8 */ + + case 2: +#ifdef __xchg_u16 + return __xchg_u16(x, ptr); +#else + local_irq_save(flags); + ret = *(volatile u16 *)ptr; + *(volatile u16 *)ptr = x; + local_irq_restore(flags); + return ret; +#endif /* __xchg_u16 */ + + case 4: +#ifdef __xchg_u32 + return __xchg_u32(x, ptr); +#else + local_irq_save(flags); + ret = *(volatile u32 *)ptr; + *(volatile u32 *)ptr = x; + local_irq_restore(flags); + return ret; +#endif /* __xchg_u32 */ + +#ifdef CONFIG_64BIT + case 8: +#ifdef __xchg_u64 + return __xchg_u64(x, ptr); +#else + local_irq_save(flags); + ret = *(volatile u64 *)ptr; + *(volatile u64 *)ptr = x; + local_irq_restore(flags); + return ret; +#endif /* __xchg_u64 */ +#endif /* CONFIG_64BIT */ + + default: + __xchg_called_with_bad_pointer(); + return x; + } +} + +#define xchg(ptr, x) ({ \ + ((__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ +}) + +#endif /* xchg */ + +/* + * Atomic compare and exchange. + */ +#include + +#ifndef cmpxchg_local +#define cmpxchg_local(ptr, o, n) ({ \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))); \ +}) +#endif + +#ifndef cmpxchg64_local +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + +#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) +#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) + +#endif /* __ASM_GENERIC_CMPXCHG_H */ diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h new file mode 100644 index 0000000..a86f65b --- /dev/null +++ b/include/asm-generic/compat.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_COMPAT_H +#define __ASM_GENERIC_COMPAT_H + +/* These types are common across all compat ABIs */ +typedef u32 compat_size_t; +typedef s32 compat_ssize_t; +typedef s32 compat_clock_t; +typedef s32 compat_pid_t; +typedef u32 compat_ino_t; +typedef s32 compat_off_t; +typedef s64 compat_loff_t; +typedef s32 compat_daddr_t; +typedef s32 compat_timer_t; +typedef s32 compat_key_t; +typedef s16 compat_short_t; +typedef s32 compat_int_t; +typedef s32 compat_long_t; +typedef u16 compat_ushort_t; +typedef u32 compat_uint_t; +typedef u32 compat_ulong_t; +typedef u32 compat_uptr_t; +typedef u32 compat_aio_context_t; + +#endif diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h new file mode 100644 index 0000000..3a2e224 --- /dev/null +++ b/include/asm-generic/current.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_CURRENT_H +#define __ASM_GENERIC_CURRENT_H + +#include + +#define get_current() (current_thread_info()->task) +#define current get_current() + +#endif /* __ASM_GENERIC_CURRENT_H */ diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h new file mode 100644 index 0000000..e448ac6 --- /dev/null +++ b/include/asm-generic/delay.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_DELAY_H +#define __ASM_GENERIC_DELAY_H + +/* Undefined functions to get compile-time errors */ +extern void __bad_udelay(void); +extern void __bad_ndelay(void); + +extern void __udelay(unsigned long usecs); +extern void __ndelay(unsigned long nsecs); +extern void __const_udelay(unsigned long xloops); +extern void __delay(unsigned long loops); + +/* + * The weird n/20000 thing suppresses a "comparison is always false due to + * limited range of data type" warning with non-const 8-bit arguments. + */ + +/* 0x10c7 is 2**32 / 1000000 (rounded up) */ +#define udelay(n) \ + ({ \ + if (__builtin_constant_p(n)) { \ + if ((n) / 20000 >= 1) \ + __bad_udelay(); \ + else \ + __const_udelay((n) * 0x10c7ul); \ + } else { \ + __udelay(n); \ + } \ + }) + +/* 0x5 is 2**32 / 1000000000 (rounded up) */ +#define ndelay(n) \ + ({ \ + if (__builtin_constant_p(n)) { \ + if ((n) / 20000 >= 1) \ + __bad_ndelay(); \ + else \ + __const_udelay((n) * 5ul); \ + } else { \ + __ndelay(n); \ + } \ + }) + +#endif /* __ASM_GENERIC_DELAY_H */ diff --git a/include/asm-generic/device.h b/include/asm-generic/device.h new file mode 100644 index 0000000..974517c --- /dev/null +++ b/include/asm-generic/device.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Arch specific extensions to struct device + */ +#ifndef _ASM_GENERIC_DEVICE_H +#define _ASM_GENERIC_DEVICE_H + +struct dev_archdata { +}; + +struct pdev_archdata { +}; + +#endif /* _ASM_GENERIC_DEVICE_H */ diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h new file mode 100644 index 0000000..a3b98c8 --- /dev/null +++ b/include/asm-generic/div64.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_DIV64_H +#define _ASM_GENERIC_DIV64_H +/* + * Copyright (C) 2003 Bernardo Innocenti + * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h + * + * Optimization for constant divisors on 32-bit machines: + * Copyright (C) 2006-2015 Nicolas Pitre + * + * The semantics of do_div() are: + * + * uint32_t do_div(uint64_t *n, uint32_t base) + * { + * uint32_t remainder = *n % base; + * *n = *n / base; + * return remainder; + * } + * + * NOTE: macro parameter n is evaluated multiple times, + * beware of side effects! + */ + +#include +#include + +#if BITS_PER_LONG == 64 + +/** + * do_div - returns 2 values: calculate remainder and update new dividend + * @n: uint64_t dividend (will be updated) + * @base: uint32_t divisor + * + * Summary: + * ``uint32_t remainder = n % base;`` + * ``n = n / base;`` + * + * Return: (uint32_t)remainder + * + * NOTE: macro parameter @n is evaluated multiple times, + * beware of side effects! + */ +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) + +#elif BITS_PER_LONG == 32 + +#include + +/* + * If the divisor happens to be constant, we determine the appropriate + * inverse at compile time to turn the division into a few inline + * multiplications which ought to be much faster. And yet only if compiling + * with a sufficiently recent gcc version to perform proper 64-bit constant + * propagation. + * + * (It is unfortunate that gcc doesn't perform all this internally.) + */ + +#ifndef __div64_const32_is_OK +#define __div64_const32_is_OK (__GNUC__ >= 4) +#endif + +#define __div64_const32(n, ___b) \ +({ \ + /* \ + * Multiplication by reciprocal of b: n / b = n * (p / b) / p \ + * \ + * We rely on the fact that most of this code gets optimized \ + * away at compile time due to constant propagation and only \ + * a few multiplication instructions should remain. \ + * Hence this monstrous macro (static inline doesn't always \ + * do the trick here). \ + */ \ + uint64_t ___res, ___x, ___t, ___m, ___n = (n); \ + uint32_t ___p, ___bias; \ + \ + /* determine MSB of b */ \ + ___p = 1 << ilog2(___b); \ + \ + /* compute m = ((p << 64) + b - 1) / b */ \ + ___m = (~0ULL / ___b) * ___p; \ + ___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b; \ + \ + /* one less than the dividend with highest result */ \ + ___x = ~0ULL / ___b * ___b - 1; \ + \ + /* test our ___m with res = m * x / (p << 64) */ \ + ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \ + ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \ + ___res += (___x & 0xffffffff) * (___m >> 32); \ + ___t = (___res < ___t) ? (1ULL << 32) : 0; \ + ___res = (___res >> 32) + ___t; \ + ___res += (___m >> 32) * (___x >> 32); \ + ___res /= ___p; \ + \ + /* Now sanitize and optimize what we've got. */ \ + if (~0ULL % (___b / (___b & -___b)) == 0) { \ + /* special case, can be simplified to ... */ \ + ___n /= (___b & -___b); \ + ___m = ~0ULL / (___b / (___b & -___b)); \ + ___p = 1; \ + ___bias = 1; \ + } else if (___res != ___x / ___b) { \ + /* \ + * We can't get away without a bias to compensate \ + * for bit truncation errors. To avoid it we'd need an \ + * additional bit to represent m which would overflow \ + * a 64-bit variable. \ + * \ + * Instead we do m = p / b and n / b = (n * m + m) / p. \ + */ \ + ___bias = 1; \ + /* Compute m = (p << 64) / b */ \ + ___m = (~0ULL / ___b) * ___p; \ + ___m += ((~0ULL % ___b + 1) * ___p) / ___b; \ + } else { \ + /* \ + * Reduce m / p, and try to clear bit 31 of m when \ + * possible, otherwise that'll need extra overflow \ + * handling later. \ + */ \ + uint32_t ___bits = -(___m & -___m); \ + ___bits |= ___m >> 32; \ + ___bits = (~___bits) << 1; \ + /* \ + * If ___bits == 0 then setting bit 31 is unavoidable. \ + * Simply apply the maximum possible reduction in that \ + * case. Otherwise the MSB of ___bits indicates the \ + * best reduction we should apply. \ + */ \ + if (!___bits) { \ + ___p /= (___m & -___m); \ + ___m /= (___m & -___m); \ + } else { \ + ___p >>= ilog2(___bits); \ + ___m >>= ilog2(___bits); \ + } \ + /* No bias needed. */ \ + ___bias = 0; \ + } \ + \ + /* \ + * Now we have a combination of 2 conditions: \ + * \ + * 1) whether or not we need to apply a bias, and \ + * \ + * 2) whether or not there might be an overflow in the cross \ + * product determined by (___m & ((1 << 63) | (1 << 31))). \ + * \ + * Select the best way to do (m_bias + m * n) / (1 << 64). \ + * From now on there will be actual runtime code generated. \ + */ \ + ___res = __arch_xprod_64(___m, ___n, ___bias); \ + \ + ___res /= ___p; \ +}) + +#ifndef __arch_xprod_64 +/* + * Default C implementation for __arch_xprod_64() + * + * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) + * Semantic: retval = ((bias ? m : 0) + m * n) >> 64 + * + * The product is a 128-bit value, scaled down to 64 bits. + * Assuming constant propagation to optimize away unused conditional code. + * Architectures may provide their own optimized assembly implementation. + */ +static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) +{ + uint32_t m_lo = m; + uint32_t m_hi = m >> 32; + uint32_t n_lo = n; + uint32_t n_hi = n >> 32; + uint64_t res; + uint32_t res_lo, res_hi, tmp; + + if (!bias) { + res = ((uint64_t)m_lo * n_lo) >> 32; + } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) { + /* there can't be any overflow here */ + res = (m + (uint64_t)m_lo * n_lo) >> 32; + } else { + res = m + (uint64_t)m_lo * n_lo; + res_lo = res >> 32; + res_hi = (res_lo < m_hi); + res = res_lo | ((uint64_t)res_hi << 32); + } + + if (!(m & ((1ULL << 63) | (1ULL << 31)))) { + /* there can't be any overflow here */ + res += (uint64_t)m_lo * n_hi; + res += (uint64_t)m_hi * n_lo; + res >>= 32; + } else { + res += (uint64_t)m_lo * n_hi; + tmp = res >> 32; + res += (uint64_t)m_hi * n_lo; + res_lo = res >> 32; + res_hi = (res_lo < tmp); + res = res_lo | ((uint64_t)res_hi << 32); + } + + res += (uint64_t)m_hi * n_hi; + + return res; +} +#endif + +#ifndef __div64_32 +extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); +#endif + +/* The unnecessary pointer compare is there + * to check for type safety (n must be 64bit) + */ +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ + if (__builtin_constant_p(__base) && \ + is_power_of_2(__base)) { \ + __rem = (n) & (__base - 1); \ + (n) >>= ilog2(__base); \ + } else if (__div64_const32_is_OK && \ + __builtin_constant_p(__base) && \ + __base != 0) { \ + uint32_t __res_lo, __n_lo = (n); \ + (n) = __div64_const32(n, __base); \ + /* the remainder can be computed with 32-bit regs */ \ + __res_lo = (n); \ + __rem = __n_lo - __res_lo * __base; \ + } else if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = __div64_32(&(n), __base); \ + __rem; \ + }) + +#else /* BITS_PER_LONG == ?? */ + +# error do_div() does not yet support the C64 + +#endif /* BITS_PER_LONG */ + +#endif /* _ASM_GENERIC_DIV64_H */ diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h new file mode 100644 index 0000000..f24b0f9 --- /dev/null +++ b/include/asm-generic/dma-contiguous.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H +#define _ASM_GENERIC_DMA_CONTIGUOUS_H + +#include + +static inline void +dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } + +#endif diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h new file mode 100644 index 0000000..c13f461 --- /dev/null +++ b/include/asm-generic/dma-mapping.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_DMA_MAPPING_H +#define _ASM_GENERIC_DMA_MAPPING_H + +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) +{ + return NULL; +} + +#endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/asm-generic/dma.h b/include/asm-generic/dma.h new file mode 100644 index 0000000..43d0c8a --- /dev/null +++ b/include/asm-generic/dma.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_DMA_H +#define __ASM_GENERIC_DMA_H +/* + * This file traditionally describes the i8237 PC style DMA controller. + * Most architectures don't have these any more and can get the minimal + * implementation from kernel/dma.c by not defining MAX_DMA_CHANNELS. + * + * Some code relies on seeing MAX_DMA_ADDRESS though. + */ +#define MAX_DMA_ADDRESS PAGE_OFFSET + +extern int request_dma(unsigned int dmanr, const char *device_id); +extern void free_dma(unsigned int dmanr); + +#endif /* __ASM_GENERIC_DMA_H */ diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h new file mode 100644 index 0000000..9def22e --- /dev/null +++ b/include/asm-generic/early_ioremap.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_EARLY_IOREMAP_H_ +#define _ASM_EARLY_IOREMAP_H_ + +#include + +/* + * early_ioremap() and early_iounmap() are for temporary early boot-time + * mappings, before the real ioremap() is functional. + */ +extern void __iomem *early_ioremap(resource_size_t phys_addr, + unsigned long size); +extern void *early_memremap(resource_size_t phys_addr, + unsigned long size); +extern void *early_memremap_ro(resource_size_t phys_addr, + unsigned long size); +extern void *early_memremap_prot(resource_size_t phys_addr, + unsigned long size, unsigned long prot_val); +extern void early_iounmap(void __iomem *addr, unsigned long size); +extern void early_memunmap(void *addr, unsigned long size); + +/* + * Weak function called by early_ioremap_reset(). It does nothing, but + * architectures may provide their own version to do any needed cleanups. + */ +extern void early_ioremap_shutdown(void); + +#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU) +/* Arch-specific initialization */ +extern void early_ioremap_init(void); + +/* Generic initialization called by architecture code */ +extern void early_ioremap_setup(void); + +/* + * Called as last step in paging_init() so library can act + * accordingly for subsequent map/unmap requests. + */ +extern void early_ioremap_reset(void); + +/* + * Early copy from unmapped memory to kernel mapped memory. + */ +extern void copy_from_early_mem(void *dest, phys_addr_t src, + unsigned long size); + +#else +static inline void early_ioremap_init(void) { } +static inline void early_ioremap_setup(void) { } +static inline void early_ioremap_reset(void) { } +#endif + +#endif /* _ASM_EARLY_IOREMAP_H_ */ diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h new file mode 100644 index 0000000..445de38 --- /dev/null +++ b/include/asm-generic/emergency-restart.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_EMERGENCY_RESTART_H +#define _ASM_GENERIC_EMERGENCY_RESTART_H + +static inline void machine_emergency_restart(void) +{ + machine_restart(NULL); +} + +#endif /* _ASM_GENERIC_EMERGENCY_RESTART_H */ diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h new file mode 100644 index 0000000..80ca610 --- /dev/null +++ b/include/asm-generic/error-injection.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_ERROR_INJECTION_H +#define _ASM_GENERIC_ERROR_INJECTION_H + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +enum { + EI_ETYPE_NONE, /* Dummy value for undefined case */ + EI_ETYPE_NULL, /* Return NULL if failure */ + EI_ETYPE_ERRNO, /* Return -ERRNO if failure */ + EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */ + EI_ETYPE_TRUE, /* Return true if failure */ +}; + +struct error_injection_entry { + unsigned long addr; + int etype; +}; + +struct pt_regs; + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION +/* + * Whitelist ganerating macro. Specify functions which can be + * error-injectable using this macro. + */ +#define ALLOW_ERROR_INJECTION(fname, _etype) \ +static struct error_injection_entry __used \ + __attribute__((__section__("_error_injection_whitelist"))) \ + _eil_addr_##fname = { \ + .addr = (unsigned long)fname, \ + .etype = EI_ETYPE_##_etype, \ + }; + +void override_function_with_return(struct pt_regs *regs); +#else +#define ALLOW_ERROR_INJECTION(fname, _etype) + +static inline void override_function_with_return(struct pt_regs *regs) { } +#endif +#endif + +#endif /* _ASM_GENERIC_ERROR_INJECTION_H */ diff --git a/include/asm-generic/exec.h b/include/asm-generic/exec.h new file mode 100644 index 0000000..f66dc71 --- /dev/null +++ b/include/asm-generic/exec.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Generic process execution definitions. + * + * It should be possible to use these on really simple architectures, + * but it serves more as a starting point for new ports. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ +#ifndef __ASM_GENERIC_EXEC_H +#define __ASM_GENERIC_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __ASM_GENERIC_EXEC_H */ diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h new file mode 100644 index 0000000..fa57797 --- /dev/null +++ b/include/asm-generic/export.h @@ -0,0 +1,93 @@ +#ifndef __ASM_GENERIC_EXPORT_H +#define __ASM_GENERIC_EXPORT_H + +#ifndef KSYM_FUNC +#define KSYM_FUNC(x) x +#endif +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +#define KSYM_ALIGN 4 +#elif defined(CONFIG_64BIT) +#define KSYM_ALIGN 8 +#else +#define KSYM_ALIGN 4 +#endif +#ifndef KCRC_ALIGN +#define KCRC_ALIGN 4 +#endif + +.macro __put, val, name +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + .long \val - ., \name - ., 0 +#elif defined(CONFIG_64BIT) + .quad \val, \name, 0 +#else + .long \val, \name, 0 +#endif +.endm + +/* + * note on .section use: @progbits vs %progbits nastiness doesn't matter, + * since we immediately emit into those sections anyway. + */ +.macro ___EXPORT_SYMBOL name,val,sec +#ifdef CONFIG_MODULES + .globl __ksymtab_\name + .section ___ksymtab\sec+\name,"a" + .balign KSYM_ALIGN +__ksymtab_\name: + __put \val, __kstrtab_\name + .previous + .section __ksymtab_strings,"a" +__kstrtab_\name: + .asciz "\name" + .previous +#ifdef CONFIG_MODVERSIONS + .section ___kcrctab\sec+\name,"a" + .balign KCRC_ALIGN +__kcrctab_\name: +#if defined(CONFIG_MODULE_REL_CRCS) + .long __crc_\name - . +#else + .long __crc_\name +#endif + .weak __crc_\name + .previous +#endif +#endif +.endm + +#if defined(CONFIG_TRIM_UNUSED_KSYMS) + +#include +#include + +.macro __ksym_marker sym + .section ".discard.ksym","a" +__ksym_marker_\sym: + .previous +.endm + +#define __EXPORT_SYMBOL(sym, val, sec) \ + __ksym_marker sym; \ + __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym)) +#define __cond_export_sym(sym, val, sec, conf) \ + ___cond_export_sym(sym, val, sec, conf) +#define ___cond_export_sym(sym, val, sec, enabled) \ + __cond_export_sym_##enabled(sym, val, sec) +#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec +#define __cond_export_sym_0(sym, val, sec) /* nothing */ + +#else +#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec +#endif + +#define EXPORT_SYMBOL(name) \ + __EXPORT_SYMBOL(name, KSYM_FUNC(name),) +#define EXPORT_SYMBOL_GPL(name) \ + __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl) +#define EXPORT_DATA_SYMBOL(name) \ + __EXPORT_SYMBOL(name, name,) +#define EXPORT_DATA_SYMBOL_GPL(name) \ + __EXPORT_SYMBOL(name, name,_gpl) + +#endif diff --git a/include/asm-generic/extable.h b/include/asm-generic/extable.h new file mode 100644 index 0000000..f9618bd --- /dev/null +++ b/include/asm-generic/extable.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_EXTABLE_H +#define __ASM_GENERIC_EXTABLE_H + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + + +struct pt_regs; +extern int fixup_exception(struct pt_regs *regs); + +#endif diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h new file mode 100644 index 0000000..f9f1810 --- /dev/null +++ b/include/asm-generic/fb.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_FB_H_ +#define __ASM_GENERIC_FB_H_ +#include + +#define fb_pgprotect(...) do {} while (0) + +static inline int fb_is_primary_device(struct fb_info *info) +{ + return 0; +} + +#endif /* __ASM_GENERIC_FB_H_ */ diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h new file mode 100644 index 0000000..8cc7b09 --- /dev/null +++ b/include/asm-generic/fixmap.h @@ -0,0 +1,104 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 + * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 + * Break out common bits to asm-generic by Mark Salter, November 2013 + */ + +#ifndef __ASM_GENERIC_FIXMAP_H +#define __ASM_GENERIC_FIXMAP_H + +#include +#include + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +#ifndef __ASSEMBLY__ +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without translation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static __always_inline unsigned long fix_to_virt(const unsigned int idx) +{ + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +/* + * Provide some reasonable defaults for page flags. + * Not all architectures use all of these different types and some + * architectures use different names. + */ +#ifndef FIXMAP_PAGE_NORMAL +#define FIXMAP_PAGE_NORMAL PAGE_KERNEL +#endif +#if !defined(FIXMAP_PAGE_RO) && defined(PAGE_KERNEL_RO) +#define FIXMAP_PAGE_RO PAGE_KERNEL_RO +#endif +#ifndef FIXMAP_PAGE_NOCACHE +#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE +#endif +#ifndef FIXMAP_PAGE_IO +#define FIXMAP_PAGE_IO PAGE_KERNEL_IO +#endif +#ifndef FIXMAP_PAGE_CLEAR +#define FIXMAP_PAGE_CLEAR __pgprot(0) +#endif + +#ifndef set_fixmap +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL) +#endif + +#ifndef clear_fixmap +#define clear_fixmap(idx) \ + __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR) +#endif + +/* Return a pointer with offset calculated */ +#define __set_fixmap_offset(idx, phys, flags) \ +({ \ + unsigned long ________addr; \ + __set_fixmap(idx, phys, flags); \ + ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ + ________addr; \ +}) + +#define set_fixmap_offset(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL) + +/* + * Some hardware wants to get fixmapped without caching. + */ +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE) + +#define set_fixmap_offset_nocache(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE) + +/* + * Some fixmaps are for IO + */ +#define set_fixmap_io(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_IO) + +#define set_fixmap_offset_io(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO) + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_GENERIC_FIXMAP_H */ diff --git a/include/asm-generic/flat.h b/include/asm-generic/flat.h new file mode 100644 index 0000000..1928a35 --- /dev/null +++ b/include/asm-generic/flat.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_FLAT_H +#define _ASM_GENERIC_FLAT_H + +#include + +static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, + u32 *addr) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + return copy_from_user(addr, rp, 4) ? -EFAULT : 0; +#else + return get_user(*addr, rp); +#endif +} + +static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + return copy_to_user(rp, &addr, 4) ? -EFAULT : 0; +#else + return put_user(addr, rp); +#endif +} + +#endif /* _ASM_GENERIC_FLAT_H */ diff --git a/include/asm-generic/ftrace.h b/include/asm-generic/ftrace.h new file mode 100644 index 0000000..3a23028 --- /dev/null +++ b/include/asm-generic/ftrace.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/include/asm-generic/ftrace.h + */ +#ifndef __ASM_GENERIC_FTRACE_H__ +#define __ASM_GENERIC_FTRACE_H__ + +/* + * Not all architectures need their own ftrace.h, the most + * common definitions are already in linux/ftrace.h. + */ + +#endif /* __ASM_GENERIC_FTRACE_H__ */ diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h new file mode 100644 index 0000000..02970b1 --- /dev/null +++ b/include/asm-generic/futex.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_FUTEX_H +#define _ASM_GENERIC_FUTEX_H + +#include +#include +#include + +#ifndef CONFIG_SMP +/* + * The following implementation only for uniprocessor machines. + * It relies on preempt_disable() ensuring mutual exclusion. + * + */ + +/** + * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant + * argument and comparison of the previous + * futex value with another constant. + * + * @encoded_op: encoded operation to execute + * @uaddr: pointer to user space address + * + * Return: + * 0 - On success + * -EFAULT - User access resulted in a page fault + * -EAGAIN - Atomic operation was unable to complete due to contention + * -ENOSYS - Operation not supported + */ +static inline int +arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) +{ + int oldval, ret; + u32 tmp; + + preempt_disable(); + pagefault_disable(); + + ret = -EFAULT; + if (unlikely(get_user(oldval, uaddr) != 0)) + goto out_pagefault_enable; + + ret = 0; + tmp = oldval; + + switch (op) { + case FUTEX_OP_SET: + tmp = oparg; + break; + case FUTEX_OP_ADD: + tmp += oparg; + break; + case FUTEX_OP_OR: + tmp |= oparg; + break; + case FUTEX_OP_ANDN: + tmp &= ~oparg; + break; + case FUTEX_OP_XOR: + tmp ^= oparg; + break; + default: + ret = -ENOSYS; + } + + if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0)) + ret = -EFAULT; + +out_pagefault_enable: + pagefault_enable(); + preempt_enable(); + + if (ret == 0) + *oval = oldval; + + return ret; +} + +/** + * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the + * uaddr with newval if the current value is + * oldval. + * @uval: pointer to store content of @uaddr + * @uaddr: pointer to user space address + * @oldval: old value + * @newval: new value to store to @uaddr + * + * Return: + * 0 - On success + * -EFAULT - User access resulted in a page fault + * -EAGAIN - Atomic operation was unable to complete due to contention + * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG) + */ +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + u32 val; + + preempt_disable(); + if (unlikely(get_user(val, uaddr) != 0)) { + preempt_enable(); + return -EFAULT; + } + + if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) { + preempt_enable(); + return -EFAULT; + } + + *uval = val; + preempt_enable(); + + return 0; +} + +#else +static inline int +arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) +{ + return -ENOSYS; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + return -ENOSYS; +} + +#endif /* CONFIG_SMP */ +#endif diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h new file mode 100644 index 0000000..e9f20b8 --- /dev/null +++ b/include/asm-generic/getorder.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_GETORDER_H +#define __ASM_GENERIC_GETORDER_H + +#ifndef __ASSEMBLY__ + +#include +#include + +/** + * get_order - Determine the allocation order of a memory size + * @size: The size for which to get the order + * + * Determine the allocation order of a particular sized block of memory. This + * is on a logarithmic scale, where: + * + * 0 -> 2^0 * PAGE_SIZE and below + * 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1 + * 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1 + * 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1 + * 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1 + * ... + * + * The order returned is used to find the smallest allocation granule required + * to hold an object of the specified size. + * + * The result is undefined if the size is 0. + */ +static inline __attribute_const__ int get_order(unsigned long size) +{ + if (__builtin_constant_p(size)) { + if (!size) + return BITS_PER_LONG - PAGE_SHIFT; + + if (size < (1UL << PAGE_SHIFT)) + return 0; + + return ilog2((size) - 1) - PAGE_SHIFT + 1; + } + + size--; + size >>= PAGE_SHIFT; +#if BITS_PER_LONG == 32 + return fls(size); +#else + return fls64(size); +#endif +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_GENERIC_GETORDER_H */ diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h new file mode 100644 index 0000000..19eadac --- /dev/null +++ b/include/asm-generic/gpio.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_GPIO_H +#define _ASM_GENERIC_GPIO_H + +#include +#include +#include +#include + +#ifdef CONFIG_GPIOLIB + +#include +#include +#include + +/* Platforms may implement their GPIO interface with library code, + * at a small performance cost for non-inlined operations and some + * extra memory (for code and for per-GPIO table entries). + * + * While the GPIO programming interface defines valid GPIO numbers + * to be in the range 0..MAX_INT, this library restricts them to the + * smaller range 0..ARCH_NR_GPIOS-1. + * + * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of + * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is + * actually an estimate of a board-specific value. + */ + +#ifndef ARCH_NR_GPIOS +#if defined(CONFIG_ARCH_NR_GPIO) && CONFIG_ARCH_NR_GPIO > 0 +#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO +#else +#define ARCH_NR_GPIOS 512 +#endif +#endif + +/* + * "valid" GPIO numbers are nonnegative and may be passed to + * setup routines like gpio_request(). only some valid numbers + * can successfully be requested and used. + * + * Invalid GPIO numbers are useful for indicating no-such-GPIO in + * platform data and other tables. + */ + +static inline bool gpio_is_valid(int number) +{ + return number >= 0 && number < ARCH_NR_GPIOS; +} + +struct device; +struct gpio; +struct seq_file; +struct module; +struct device_node; +struct gpio_desc; + +/* caller holds gpio_lock *OR* gpio is marked as requested */ +static inline struct gpio_chip *gpio_to_chip(unsigned gpio) +{ + return gpiod_to_chip(gpio_to_desc(gpio)); +} + +/* Always use the library code for GPIO management calls, + * or when sleeping may be involved. + */ +extern int gpio_request(unsigned gpio, const char *label); +extern void gpio_free(unsigned gpio); + +static inline int gpio_direction_input(unsigned gpio) +{ + return gpiod_direction_input(gpio_to_desc(gpio)); +} +static inline int gpio_direction_output(unsigned gpio, int value) +{ + return gpiod_direction_output_raw(gpio_to_desc(gpio), value); +} + +static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) +{ + return gpiod_set_debounce(gpio_to_desc(gpio), debounce); +} + +static inline int gpio_get_value_cansleep(unsigned gpio) +{ + return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio)); +} +static inline void gpio_set_value_cansleep(unsigned gpio, int value) +{ + return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value); +} + + +/* A platform's code may want to inline the I/O calls when + * the GPIO is constant and refers to some always-present controller, + * giving direct access to chip registers and tight bitbanging loops. + */ +static inline int __gpio_get_value(unsigned gpio) +{ + return gpiod_get_raw_value(gpio_to_desc(gpio)); +} +static inline void __gpio_set_value(unsigned gpio, int value) +{ + return gpiod_set_raw_value(gpio_to_desc(gpio), value); +} + +static inline int __gpio_cansleep(unsigned gpio) +{ + return gpiod_cansleep(gpio_to_desc(gpio)); +} + +static inline int __gpio_to_irq(unsigned gpio) +{ + return gpiod_to_irq(gpio_to_desc(gpio)); +} + +extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); +extern int gpio_request_array(const struct gpio *array, size_t num); +extern void gpio_free_array(const struct gpio *array, size_t num); + +/* + * A sysfs interface can be exported by individual drivers if they want, + * but more typically is configured entirely from userspace. + */ +static inline int gpio_export(unsigned gpio, bool direction_may_change) +{ + return gpiod_export(gpio_to_desc(gpio), direction_may_change); +} + +static inline int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) +{ + return gpiod_export_link(dev, name, gpio_to_desc(gpio)); +} + +static inline void gpio_unexport(unsigned gpio) +{ + gpiod_unexport(gpio_to_desc(gpio)); +} + +#else /* !CONFIG_GPIOLIB */ + +static inline bool gpio_is_valid(int number) +{ + /* only non-negative numbers are valid */ + return number >= 0; +} + +/* platforms that don't directly support access to GPIOs through I2C, SPI, + * or other blocking infrastructure can use these wrappers. + */ + +static inline int gpio_cansleep(unsigned gpio) +{ + return 0; +} + +static inline int gpio_get_value_cansleep(unsigned gpio) +{ + might_sleep(); + return __gpio_get_value(gpio); +} + +static inline void gpio_set_value_cansleep(unsigned gpio, int value) +{ + might_sleep(); + __gpio_set_value(gpio, value); +} + +#endif /* !CONFIG_GPIOLIB */ + +#endif /* _ASM_GENERIC_GPIO_H */ diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h new file mode 100644 index 0000000..d14214d --- /dev/null +++ b/include/asm-generic/hardirq.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_HARDIRQ_H +#define __ASM_GENERIC_HARDIRQ_H + +#include +#include + +typedef struct { + unsigned int __softirq_pending; +} ____cacheline_aligned irq_cpustat_t; + +#include /* Standard mappings for irq_cpustat_t above */ +#include + +#ifndef ack_bad_irq +static inline void ack_bad_irq(unsigned int irq) +{ + printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); +} +#endif + +#endif /* __ASM_GENERIC_HARDIRQ_H */ diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h new file mode 100644 index 0000000..822f433 --- /dev/null +++ b/include/asm-generic/hugetlb.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_HUGETLB_H +#define _ASM_GENERIC_HUGETLB_H + +static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) +{ + return mk_pte(page, pgprot); +} + +static inline unsigned long huge_pte_write(pte_t pte) +{ + return pte_write(pte); +} + +static inline unsigned long huge_pte_dirty(pte_t pte) +{ + return pte_dirty(pte); +} + +static inline pte_t huge_pte_mkwrite(pte_t pte) +{ + return pte_mkwrite(pte); +} + +static inline pte_t huge_pte_mkdirty(pte_t pte) +{ + return pte_mkdirty(pte); +} + +static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) +{ + return pte_modify(pte, newprot); +} + +#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + pte_clear(mm, addr, ptep); +} +#endif + +#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE +static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + free_pgd_range(tlb, addr, end, floor, ceiling); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(mm, addr, ptep, pte); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return ptep_get_and_clear(mm, addr, ptep); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + ptep_clear_flush(vma, addr, ptep); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTE_NONE +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} +#endif + +#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) +{ + struct hstate *h = hstate_file(file); + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (addr & ~huge_page_mask(h)) + return -EINVAL; + + return 0; +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTEP_GET +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} +#endif + +#ifndef __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED +static inline bool gigantic_page_runtime_supported(void) +{ + return IS_ENABLED(CONFIG_ARCH_HAS_GIGANTIC_PAGE); +} +#endif /* __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED */ + +#endif /* _ASM_GENERIC_HUGETLB_H */ diff --git a/include/asm-generic/hw_irq.h b/include/asm-generic/hw_irq.h new file mode 100644 index 0000000..89036d7 --- /dev/null +++ b/include/asm-generic/hw_irq.h @@ -0,0 +1,9 @@ +#ifndef __ASM_GENERIC_HW_IRQ_H +#define __ASM_GENERIC_HW_IRQ_H +/* + * hw_irq.h has internal declarations for the low-level interrupt + * controller, like the original i8259A. + * In general, this is not needed for new architectures. + */ + +#endif /* __ASM_GENERIC_HW_IRQ_H */ diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h new file mode 100644 index 0000000..81dfa3e --- /dev/null +++ b/include/asm-generic/ide_iops.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Generic I/O and MEMIO string operations. */ + +#define __ide_insw insw +#define __ide_insl insl +#define __ide_outsw outsw +#define __ide_outsl outsl + +static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count) +{ + while (count--) { + *(u16 *)addr = readw(port); + addr += 2; + } +} + +static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count) +{ + while (count--) { + *(u32 *)addr = readl(port); + addr += 4; + } +} + +static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) +{ + while (count--) { + writew(*(u16 *)addr, port); + addr += 2; + } +} + +static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count) +{ + while (count--) { + writel(*(u32 *)addr, port); + addr += 4; + } +} diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h new file mode 100644 index 0000000..a248545 --- /dev/null +++ b/include/asm-generic/int-ll64.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * asm-generic/int-ll64.h + * + * Integer declarations for architectures which use "long long" + * for 64-bit types. + */ +#ifndef _ASM_GENERIC_INT_LL64_H +#define _ASM_GENERIC_INT_LL64_H + +#include + + +#ifndef __ASSEMBLY__ + +typedef __s8 s8; +typedef __u8 u8; +typedef __s16 s16; +typedef __u16 u16; +typedef __s32 s32; +typedef __u32 u32; +typedef __s64 s64; +typedef __u64 u64; + +#define S8_C(x) x +#define U8_C(x) x ## U +#define S16_C(x) x +#define U16_C(x) x ## U +#define S32_C(x) x +#define U32_C(x) x ## U +#define S64_C(x) x ## LL +#define U64_C(x) x ## ULL + +#else /* __ASSEMBLY__ */ + +#define S8_C(x) x +#define U8_C(x) x +#define S16_C(x) x +#define U16_C(x) x +#define S32_C(x) x +#define U32_C(x) x +#define S64_C(x) x +#define U64_C(x) x + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_GENERIC_INT_LL64_H */ diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h new file mode 100644 index 0000000..d028065 --- /dev/null +++ b/include/asm-generic/io.h @@ -0,0 +1,1124 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Generic I/O port emulation. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ +#ifndef __ASM_GENERIC_IO_H +#define __ASM_GENERIC_IO_H + +#include /* I/O is all done through memory accesses */ +#include /* for memset() and memcpy() */ +#include + +#ifdef CONFIG_GENERIC_IOMAP +#include +#endif + +#include +#include + +#ifndef __io_br +#define __io_br() barrier() +#endif + +/* prevent prefetching of coherent DMA data ahead of a dma-complete */ +#ifndef __io_ar +#ifdef rmb +#define __io_ar(v) rmb() +#else +#define __io_ar(v) barrier() +#endif +#endif + +/* flush writes to coherent DMA data before possibly triggering a DMA read */ +#ifndef __io_bw +#ifdef wmb +#define __io_bw() wmb() +#else +#define __io_bw() barrier() +#endif +#endif + +/* serialize device access against a spin_unlock, usually handled there. */ +#ifndef __io_aw +#define __io_aw() mmiowb_set_pending() +#endif + +#ifndef __io_pbw +#define __io_pbw() __io_bw() +#endif + +#ifndef __io_paw +#define __io_paw() __io_aw() +#endif + +#ifndef __io_pbr +#define __io_pbr() __io_br() +#endif + +#ifndef __io_par +#define __io_par(v) __io_ar(v) +#endif + + +/* + * __raw_{read,write}{b,w,l,q}() access memory in native endianness. + * + * On some architectures memory mapped IO needs to be accessed differently. + * On the simple architectures, we just read/write the memory location + * directly. + */ + +#ifndef __raw_readb +#define __raw_readb __raw_readb +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + return *(const volatile u8 __force *)addr; +} +#endif + +#ifndef __raw_readw +#define __raw_readw __raw_readw +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + return *(const volatile u16 __force *)addr; +} +#endif + +#ifndef __raw_readl +#define __raw_readl __raw_readl +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + return *(const volatile u32 __force *)addr; +} +#endif + +#ifdef CONFIG_64BIT +#ifndef __raw_readq +#define __raw_readq __raw_readq +static inline u64 __raw_readq(const volatile void __iomem *addr) +{ + return *(const volatile u64 __force *)addr; +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef __raw_writeb +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 value, volatile void __iomem *addr) +{ + *(volatile u8 __force *)addr = value; +} +#endif + +#ifndef __raw_writew +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 value, volatile void __iomem *addr) +{ + *(volatile u16 __force *)addr = value; +} +#endif + +#ifndef __raw_writel +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 value, volatile void __iomem *addr) +{ + *(volatile u32 __force *)addr = value; +} +#endif + +#ifdef CONFIG_64BIT +#ifndef __raw_writeq +#define __raw_writeq __raw_writeq +static inline void __raw_writeq(u64 value, volatile void __iomem *addr) +{ + *(volatile u64 __force *)addr = value; +} +#endif +#endif /* CONFIG_64BIT */ + +/* + * {read,write}{b,w,l,q}() access little endian memory and return result in + * native endianness. + */ + +#ifndef readb +#define readb readb +static inline u8 readb(const volatile void __iomem *addr) +{ + u8 val; + + __io_br(); + val = __raw_readb(addr); + __io_ar(val); + return val; +} +#endif + +#ifndef readw +#define readw readw +static inline u16 readw(const volatile void __iomem *addr) +{ + u16 val; + + __io_br(); + val = __le16_to_cpu(__raw_readw(addr)); + __io_ar(val); + return val; +} +#endif + +#ifndef readl +#define readl readl +static inline u32 readl(const volatile void __iomem *addr) +{ + u32 val; + + __io_br(); + val = __le32_to_cpu(__raw_readl(addr)); + __io_ar(val); + return val; +} +#endif + +#ifdef CONFIG_64BIT +#ifndef readq +#define readq readq +static inline u64 readq(const volatile void __iomem *addr) +{ + u64 val; + + __io_br(); + val = __le64_to_cpu(__raw_readq(addr)); + __io_ar(val); + return val; +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef writeb +#define writeb writeb +static inline void writeb(u8 value, volatile void __iomem *addr) +{ + __io_bw(); + __raw_writeb(value, addr); + __io_aw(); +} +#endif + +#ifndef writew +#define writew writew +static inline void writew(u16 value, volatile void __iomem *addr) +{ + __io_bw(); + __raw_writew(cpu_to_le16(value), addr); + __io_aw(); +} +#endif + +#ifndef writel +#define writel writel +static inline void writel(u32 value, volatile void __iomem *addr) +{ + __io_bw(); + __raw_writel(__cpu_to_le32(value), addr); + __io_aw(); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef writeq +#define writeq writeq +static inline void writeq(u64 value, volatile void __iomem *addr) +{ + __io_bw(); + __raw_writeq(__cpu_to_le64(value), addr); + __io_aw(); +} +#endif +#endif /* CONFIG_64BIT */ + +/* + * {read,write}{b,w,l,q}_relaxed() are like the regular version, but + * are not guaranteed to provide ordering against spinlocks or memory + * accesses. + */ +#ifndef readb_relaxed +#define readb_relaxed readb_relaxed +static inline u8 readb_relaxed(const volatile void __iomem *addr) +{ + return __raw_readb(addr); +} +#endif + +#ifndef readw_relaxed +#define readw_relaxed readw_relaxed +static inline u16 readw_relaxed(const volatile void __iomem *addr) +{ + return __le16_to_cpu(__raw_readw(addr)); +} +#endif + +#ifndef readl_relaxed +#define readl_relaxed readl_relaxed +static inline u32 readl_relaxed(const volatile void __iomem *addr) +{ + return __le32_to_cpu(__raw_readl(addr)); +} +#endif + +#if defined(readq) && !defined(readq_relaxed) +#define readq_relaxed readq_relaxed +static inline u64 readq_relaxed(const volatile void __iomem *addr) +{ + return __le64_to_cpu(__raw_readq(addr)); +} +#endif + +#ifndef writeb_relaxed +#define writeb_relaxed writeb_relaxed +static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) +{ + __raw_writeb(value, addr); +} +#endif + +#ifndef writew_relaxed +#define writew_relaxed writew_relaxed +static inline void writew_relaxed(u16 value, volatile void __iomem *addr) +{ + __raw_writew(cpu_to_le16(value), addr); +} +#endif + +#ifndef writel_relaxed +#define writel_relaxed writel_relaxed +static inline void writel_relaxed(u32 value, volatile void __iomem *addr) +{ + __raw_writel(__cpu_to_le32(value), addr); +} +#endif + +#if defined(writeq) && !defined(writeq_relaxed) +#define writeq_relaxed writeq_relaxed +static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) +{ + __raw_writeq(__cpu_to_le64(value), addr); +} +#endif + +/* + * {read,write}s{b,w,l,q}() repeatedly access the same memory address in + * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). + */ +#ifndef readsb +#define readsb readsb +static inline void readsb(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u8 *buf = buffer; + + do { + u8 x = __raw_readb(addr); + *buf++ = x; + } while (--count); + } +} +#endif + +#ifndef readsw +#define readsw readsw +static inline void readsw(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u16 *buf = buffer; + + do { + u16 x = __raw_readw(addr); + *buf++ = x; + } while (--count); + } +} +#endif + +#ifndef readsl +#define readsl readsl +static inline void readsl(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u32 *buf = buffer; + + do { + u32 x = __raw_readl(addr); + *buf++ = x; + } while (--count); + } +} +#endif + +#ifdef CONFIG_64BIT +#ifndef readsq +#define readsq readsq +static inline void readsq(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u64 *buf = buffer; + + do { + u64 x = __raw_readq(addr); + *buf++ = x; + } while (--count); + } +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef writesb +#define writesb writesb +static inline void writesb(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u8 *buf = buffer; + + do { + __raw_writeb(*buf++, addr); + } while (--count); + } +} +#endif + +#ifndef writesw +#define writesw writesw +static inline void writesw(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u16 *buf = buffer; + + do { + __raw_writew(*buf++, addr); + } while (--count); + } +} +#endif + +#ifndef writesl +#define writesl writesl +static inline void writesl(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u32 *buf = buffer; + + do { + __raw_writel(*buf++, addr); + } while (--count); + } +} +#endif + +#ifdef CONFIG_64BIT +#ifndef writesq +#define writesq writesq +static inline void writesq(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u64 *buf = buffer; + + do { + __raw_writeq(*buf++, addr); + } while (--count); + } +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef PCI_IOBASE +#define PCI_IOBASE ((void __iomem *)0) +#endif + +#ifndef IO_SPACE_LIMIT +#define IO_SPACE_LIMIT 0xffff +#endif + +#include + +/* + * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be + * implemented on hardware that needs an additional delay for I/O accesses to + * take effect. + */ + +#ifndef inb +#define inb inb +static inline u8 inb(unsigned long addr) +{ + u8 val; + + __io_pbr(); + val = __raw_readb(PCI_IOBASE + addr); + __io_par(val); + return val; +} +#endif + +#ifndef inw +#define inw inw +static inline u16 inw(unsigned long addr) +{ + u16 val; + + __io_pbr(); + val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr)); + __io_par(val); + return val; +} +#endif + +#ifndef inl +#define inl inl +static inline u32 inl(unsigned long addr) +{ + u32 val; + + __io_pbr(); + val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr)); + __io_par(val); + return val; +} +#endif + +#ifndef outb +#define outb outb +static inline void outb(u8 value, unsigned long addr) +{ + __io_pbw(); + __raw_writeb(value, PCI_IOBASE + addr); + __io_paw(); +} +#endif + +#ifndef outw +#define outw outw +static inline void outw(u16 value, unsigned long addr) +{ + __io_pbw(); + __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr); + __io_paw(); +} +#endif + +#ifndef outl +#define outl outl +static inline void outl(u32 value, unsigned long addr) +{ + __io_pbw(); + __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr); + __io_paw(); +} +#endif + +#ifndef inb_p +#define inb_p inb_p +static inline u8 inb_p(unsigned long addr) +{ + return inb(addr); +} +#endif + +#ifndef inw_p +#define inw_p inw_p +static inline u16 inw_p(unsigned long addr) +{ + return inw(addr); +} +#endif + +#ifndef inl_p +#define inl_p inl_p +static inline u32 inl_p(unsigned long addr) +{ + return inl(addr); +} +#endif + +#ifndef outb_p +#define outb_p outb_p +static inline void outb_p(u8 value, unsigned long addr) +{ + outb(value, addr); +} +#endif + +#ifndef outw_p +#define outw_p outw_p +static inline void outw_p(u16 value, unsigned long addr) +{ + outw(value, addr); +} +#endif + +#ifndef outl_p +#define outl_p outl_p +static inline void outl_p(u32 value, unsigned long addr) +{ + outl(value, addr); +} +#endif + +/* + * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a + * single I/O port multiple times. + */ + +#ifndef insb +#define insb insb +static inline void insb(unsigned long addr, void *buffer, unsigned int count) +{ + readsb(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insw +#define insw insw +static inline void insw(unsigned long addr, void *buffer, unsigned int count) +{ + readsw(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insl +#define insl insl +static inline void insl(unsigned long addr, void *buffer, unsigned int count) +{ + readsl(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsb +#define outsb outsb +static inline void outsb(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesb(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsw +#define outsw outsw +static inline void outsw(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesw(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsl +#define outsl outsl +static inline void outsl(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesl(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insb_p +#define insb_p insb_p +static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) +{ + insb(addr, buffer, count); +} +#endif + +#ifndef insw_p +#define insw_p insw_p +static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) +{ + insw(addr, buffer, count); +} +#endif + +#ifndef insl_p +#define insl_p insl_p +static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) +{ + insl(addr, buffer, count); +} +#endif + +#ifndef outsb_p +#define outsb_p outsb_p +static inline void outsb_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsb(addr, buffer, count); +} +#endif + +#ifndef outsw_p +#define outsw_p outsw_p +static inline void outsw_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsw(addr, buffer, count); +} +#endif + +#ifndef outsl_p +#define outsl_p outsl_p +static inline void outsl_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsl(addr, buffer, count); +} +#endif + +#ifndef CONFIG_GENERIC_IOMAP +#ifndef ioread8 +#define ioread8 ioread8 +static inline u8 ioread8(const volatile void __iomem *addr) +{ + return readb(addr); +} +#endif + +#ifndef ioread16 +#define ioread16 ioread16 +static inline u16 ioread16(const volatile void __iomem *addr) +{ + return readw(addr); +} +#endif + +#ifndef ioread32 +#define ioread32 ioread32 +static inline u32 ioread32(const volatile void __iomem *addr) +{ + return readl(addr); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef ioread64 +#define ioread64 ioread64 +static inline u64 ioread64(const volatile void __iomem *addr) +{ + return readq(addr); +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef iowrite8 +#define iowrite8 iowrite8 +static inline void iowrite8(u8 value, volatile void __iomem *addr) +{ + writeb(value, addr); +} +#endif + +#ifndef iowrite16 +#define iowrite16 iowrite16 +static inline void iowrite16(u16 value, volatile void __iomem *addr) +{ + writew(value, addr); +} +#endif + +#ifndef iowrite32 +#define iowrite32 iowrite32 +static inline void iowrite32(u32 value, volatile void __iomem *addr) +{ + writel(value, addr); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef iowrite64 +#define iowrite64 iowrite64 +static inline void iowrite64(u64 value, volatile void __iomem *addr) +{ + writeq(value, addr); +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef ioread16be +#define ioread16be ioread16be +static inline u16 ioread16be(const volatile void __iomem *addr) +{ + return swab16(readw(addr)); +} +#endif + +#ifndef ioread32be +#define ioread32be ioread32be +static inline u32 ioread32be(const volatile void __iomem *addr) +{ + return swab32(readl(addr)); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef ioread64be +#define ioread64be ioread64be +static inline u64 ioread64be(const volatile void __iomem *addr) +{ + return swab64(readq(addr)); +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef iowrite16be +#define iowrite16be iowrite16be +static inline void iowrite16be(u16 value, void volatile __iomem *addr) +{ + writew(swab16(value), addr); +} +#endif + +#ifndef iowrite32be +#define iowrite32be iowrite32be +static inline void iowrite32be(u32 value, volatile void __iomem *addr) +{ + writel(swab32(value), addr); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef iowrite64be +#define iowrite64be iowrite64be +static inline void iowrite64be(u64 value, volatile void __iomem *addr) +{ + writeq(swab64(value), addr); +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef ioread8_rep +#define ioread8_rep ioread8_rep +static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + readsb(addr, buffer, count); +} +#endif + +#ifndef ioread16_rep +#define ioread16_rep ioread16_rep +static inline void ioread16_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsw(addr, buffer, count); +} +#endif + +#ifndef ioread32_rep +#define ioread32_rep ioread32_rep +static inline void ioread32_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsl(addr, buffer, count); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef ioread64_rep +#define ioread64_rep ioread64_rep +static inline void ioread64_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsq(addr, buffer, count); +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef iowrite8_rep +#define iowrite8_rep iowrite8_rep +static inline void iowrite8_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesb(addr, buffer, count); +} +#endif + +#ifndef iowrite16_rep +#define iowrite16_rep iowrite16_rep +static inline void iowrite16_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesw(addr, buffer, count); +} +#endif + +#ifndef iowrite32_rep +#define iowrite32_rep iowrite32_rep +static inline void iowrite32_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesl(addr, buffer, count); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef iowrite64_rep +#define iowrite64_rep iowrite64_rep +static inline void iowrite64_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesq(addr, buffer, count); +} +#endif +#endif /* CONFIG_64BIT */ +#endif /* CONFIG_GENERIC_IOMAP */ + +#ifdef __KERNEL__ + +#include +#define __io_virt(x) ((void __force *)(x)) + +#ifndef CONFIG_GENERIC_IOMAP +struct pci_dev; +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); + +#ifndef pci_iounmap +#define pci_iounmap pci_iounmap +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) +{ +} +#endif +#endif /* CONFIG_GENERIC_IOMAP */ + +/* + * Change virtual addresses to physical addresses and vv. + * These are pretty trivial + */ +#ifndef virt_to_phys +#define virt_to_phys virt_to_phys +static inline unsigned long virt_to_phys(volatile void *address) +{ + return __pa((unsigned long)address); +} +#endif + +#ifndef phys_to_virt +#define phys_to_virt phys_to_virt +static inline void *phys_to_virt(unsigned long address) +{ + return __va(address); +} +#endif + +/** + * DOC: ioremap() and ioremap_*() variants + * + * If you have an IOMMU your architecture is expected to have both ioremap() + * and iounmap() implemented otherwise the asm-generic helpers will provide a + * direct mapping. + * + * There are ioremap_*() call variants, if you have no IOMMU we naturally will + * default to direct mapping for all of them, you can override these defaults. + * If you have an IOMMU you are highly encouraged to provide your own + * ioremap variant implementation as there currently is no safe architecture + * agnostic default. To avoid possible improper behaviour default asm-generic + * ioremap_*() variants all return NULL when an IOMMU is available. If you've + * defined your own ioremap_*() variant you must then declare your own + * ioremap_*() variant as defined to itself to avoid the default NULL return. + */ + +#ifdef CONFIG_MMU + +#ifndef ioremap_uc +#define ioremap_uc ioremap_uc +static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) +{ + return NULL; +} +#endif + +#else /* !CONFIG_MMU */ + +/* + * Change "struct page" to physical address. + * + * This implementation is for the no-MMU case only... if you have an MMU + * you'll need to provide your own definitions. + */ + +#ifndef ioremap +#define ioremap ioremap +static inline void __iomem *ioremap(phys_addr_t offset, size_t size) +{ + return (void __iomem *)(unsigned long)offset; +} +#endif + +#ifndef iounmap +#define iounmap iounmap + +static inline void iounmap(void __iomem *addr) +{ +} +#endif +#endif /* CONFIG_MMU */ +#ifndef ioremap_nocache +void __iomem *ioremap(phys_addr_t phys_addr, size_t size); +#define ioremap_nocache ioremap_nocache +static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) +{ + return ioremap(offset, size); +} +#endif + +#ifndef ioremap_uc +#define ioremap_uc ioremap_uc +static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) +{ + return ioremap_nocache(offset, size); +} +#endif + +#ifndef ioremap_wc +#define ioremap_wc ioremap_wc +static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) +{ + return ioremap_nocache(offset, size); +} +#endif + +#ifndef ioremap_wt +#define ioremap_wt ioremap_wt +static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) +{ + return ioremap_nocache(offset, size); +} +#endif + +#ifdef CONFIG_HAS_IOPORT_MAP +#ifndef CONFIG_GENERIC_IOMAP +#ifndef ioport_map +#define ioport_map ioport_map +static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) +{ + port &= IO_SPACE_LIMIT; + return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; +} +#endif + +#ifndef ioport_unmap +#define ioport_unmap ioport_unmap +static inline void ioport_unmap(void __iomem *p) +{ +} +#endif +#else /* CONFIG_GENERIC_IOMAP */ +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *p); +#endif /* CONFIG_GENERIC_IOMAP */ +#endif /* CONFIG_HAS_IOPORT_MAP */ + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#ifndef xlate_dev_kmem_ptr +#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr +static inline void *xlate_dev_kmem_ptr(void *addr) +{ + return addr; +} +#endif + +#ifndef xlate_dev_mem_ptr +#define xlate_dev_mem_ptr xlate_dev_mem_ptr +static inline void *xlate_dev_mem_ptr(phys_addr_t addr) +{ + return __va(addr); +} +#endif + +#ifndef unxlate_dev_mem_ptr +#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr +static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) +{ +} +#endif + +#ifdef CONFIG_VIRT_TO_BUS +#ifndef virt_to_bus +static inline unsigned long virt_to_bus(void *address) +{ + return (unsigned long)address; +} + +static inline void *bus_to_virt(unsigned long address) +{ + return (void *)address; +} +#endif +#endif + +#ifndef memset_io +#define memset_io memset_io +/** + * memset_io Set a range of I/O memory to a constant value + * @addr: The beginning of the I/O-memory range to set + * @val: The value to set the memory to + * @count: The number of bytes to set + * + * Set a range of I/O memory to a given value. + */ +static inline void memset_io(volatile void __iomem *addr, int value, + size_t size) +{ + memset(__io_virt(addr), value, size); +} +#endif + +#ifndef memcpy_fromio +#define memcpy_fromio memcpy_fromio +/** + * memcpy_fromio Copy a block of data from I/O memory + * @dst: The (RAM) destination for the copy + * @src: The (I/O memory) source for the data + * @count: The number of bytes to copy + * + * Copy a block of data from I/O memory. + */ +static inline void memcpy_fromio(void *buffer, + const volatile void __iomem *addr, + size_t size) +{ + memcpy(buffer, __io_virt(addr), size); +} +#endif + +#ifndef memcpy_toio +#define memcpy_toio memcpy_toio +/** + * memcpy_toio Copy a block of data into I/O memory + * @dst: The (I/O memory) destination for the copy + * @src: The (RAM) source for the data + * @count: The number of bytes to copy + * + * Copy a block of data to I/O memory. + */ +static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, + size_t size) +{ + memcpy(__io_virt(addr), buffer, size); +} +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ASM_GENERIC_IO_H */ diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h new file mode 100644 index 0000000..9fda9ed --- /dev/null +++ b/include/asm-generic/ioctl.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_IOCTL_H +#define _ASM_GENERIC_IOCTL_H + +#include + +#ifdef __CHECKER__ +#define _IOC_TYPECHECK(t) (sizeof(t)) +#else +/* provoke compile error for invalid uses of size argument */ +extern unsigned int __invalid_size_argument_for_IOC; +#define _IOC_TYPECHECK(t) \ + ((sizeof(t) == sizeof(t[1]) && \ + sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ + sizeof(t) : __invalid_size_argument_for_IOC) +#endif + +#endif /* _ASM_GENERIC_IOCTL_H */ diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h new file mode 100644 index 0000000..a008f50 --- /dev/null +++ b/include/asm-generic/iomap.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __GENERIC_IO_H +#define __GENERIC_IO_H + +#include +#include + +/* + * These are the "generic" interfaces for doing new-style + * memory-mapped or PIO accesses. Architectures may do + * their own arch-optimized versions, these just act as + * wrappers around the old-style IO register access functions: + * read[bwl]/write[bwl]/in[bwl]/out[bwl] + * + * Don't include this directly, include it from . + */ + +/* + * Read/write from/to an (offsettable) iomem cookie. It might be a PIO + * access or a MMIO access, these functions don't care. The info is + * encoded in the hardware mapping set up by the mapping functions + * (or the cookie itself, depending on implementation and hw). + * + * The generic routines just encode the PIO/MMIO as part of the + * cookie, and coldly assume that the MMIO IO mappings are not + * in the low address range. Architectures for which this is not + * true can't use this generic implementation. + */ +extern unsigned int ioread8(void __iomem *); +extern unsigned int ioread16(void __iomem *); +extern unsigned int ioread16be(void __iomem *); +extern unsigned int ioread32(void __iomem *); +extern unsigned int ioread32be(void __iomem *); +#ifdef CONFIG_64BIT +extern u64 ioread64(void __iomem *); +extern u64 ioread64be(void __iomem *); +#endif + +#ifdef readq +#define ioread64_lo_hi ioread64_lo_hi +#define ioread64_hi_lo ioread64_hi_lo +#define ioread64be_lo_hi ioread64be_lo_hi +#define ioread64be_hi_lo ioread64be_hi_lo +extern u64 ioread64_lo_hi(void __iomem *addr); +extern u64 ioread64_hi_lo(void __iomem *addr); +extern u64 ioread64be_lo_hi(void __iomem *addr); +extern u64 ioread64be_hi_lo(void __iomem *addr); +#endif + +extern void iowrite8(u8, void __iomem *); +extern void iowrite16(u16, void __iomem *); +extern void iowrite16be(u16, void __iomem *); +extern void iowrite32(u32, void __iomem *); +extern void iowrite32be(u32, void __iomem *); +#ifdef CONFIG_64BIT +extern void iowrite64(u64, void __iomem *); +extern void iowrite64be(u64, void __iomem *); +#endif + +#ifdef writeq +#define iowrite64_lo_hi iowrite64_lo_hi +#define iowrite64_hi_lo iowrite64_hi_lo +#define iowrite64be_lo_hi iowrite64be_lo_hi +#define iowrite64be_hi_lo iowrite64be_hi_lo +extern void iowrite64_lo_hi(u64 val, void __iomem *addr); +extern void iowrite64_hi_lo(u64 val, void __iomem *addr); +extern void iowrite64be_lo_hi(u64 val, void __iomem *addr); +extern void iowrite64be_hi_lo(u64 val, void __iomem *addr); +#endif + +/* + * "string" versions of the above. Note that they + * use native byte ordering for the accesses (on + * the assumption that IO and memory agree on a + * byte order, and CPU byteorder is irrelevant). + * + * They do _not_ update the port address. If you + * want MMIO that copies stuff laid out in MMIO + * memory across multiple ports, use "memcpy_toio()" + * and friends. + */ +extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count); +extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count); +extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count); + +extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); +extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); +extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); + +#ifdef CONFIG_HAS_IOPORT_MAP +/* Create a virtual mapping cookie for an IO port range */ +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *); +#endif + +#ifndef ARCH_HAS_IOREMAP_WC +#define ioremap_wc ioremap_nocache +#endif + +#ifndef ARCH_HAS_IOREMAP_WT +#define ioremap_wt ioremap_nocache +#endif + +#ifdef CONFIG_PCI +/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */ +struct pci_dev; +extern void pci_iounmap(struct pci_dev *dev, void __iomem *); +#elif defined(CONFIG_GENERIC_IOMAP) +struct pci_dev; +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ } +#endif + +#include + +#endif diff --git a/include/asm-generic/irq.h b/include/asm-generic/irq.h new file mode 100644 index 0000000..da21de9 --- /dev/null +++ b/include/asm-generic/irq.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_IRQ_H +#define __ASM_GENERIC_IRQ_H + +/* + * NR_IRQS is the upper bound of how many interrupts can be handled + * in the platform. It is used to size the static irq_map array, + * so don't make it too big. + */ +#ifndef NR_IRQS +#define NR_IRQS 64 +#endif + +static inline int irq_canonicalize(int irq) +{ + return irq; +} + +#endif /* __ASM_GENERIC_IRQ_H */ diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h new file mode 100644 index 0000000..2e7c6e8 --- /dev/null +++ b/include/asm-generic/irq_regs.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Fallback per-CPU frame pointer holder + * + * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _ASM_GENERIC_IRQ_REGS_H +#define _ASM_GENERIC_IRQ_REGS_H + +#include + +/* + * Per-cpu current frame pointer - the location of the last exception frame on + * the stack + */ +DECLARE_PER_CPU(struct pt_regs *, __irq_regs); + +static inline struct pt_regs *get_irq_regs(void) +{ + return __this_cpu_read(__irq_regs); +} + +static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) +{ + struct pt_regs *old_regs; + + old_regs = __this_cpu_read(__irq_regs); + __this_cpu_write(__irq_regs, new_regs); + return old_regs; +} + +#endif /* _ASM_GENERIC_IRQ_REGS_H */ diff --git a/include/asm-generic/irq_work.h b/include/asm-generic/irq_work.h new file mode 100644 index 0000000..d5dce06 --- /dev/null +++ b/include/asm-generic/irq_work.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_IRQ_WORK_H +#define __ASM_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + return false; +} + +#endif /* __ASM_IRQ_WORK_H */ + diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h new file mode 100644 index 0000000..19ccbf4 --- /dev/null +++ b/include/asm-generic/irqflags.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_IRQFLAGS_H +#define __ASM_GENERIC_IRQFLAGS_H + +/* + * All architectures should implement at least the first two functions, + * usually inline assembly will be the best way. + */ +#ifndef ARCH_IRQ_DISABLED +#define ARCH_IRQ_DISABLED 0 +#define ARCH_IRQ_ENABLED 1 +#endif + +/* read interrupt enabled status */ +#ifndef arch_local_save_flags +unsigned long arch_local_save_flags(void); +#endif + +/* set interrupt enabled status */ +#ifndef arch_local_irq_restore +void arch_local_irq_restore(unsigned long flags); +#endif + +/* get status and disable interrupts */ +#ifndef arch_local_irq_save +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + flags = arch_local_save_flags(); + arch_local_irq_restore(ARCH_IRQ_DISABLED); + return flags; +} +#endif + +/* test flags */ +#ifndef arch_irqs_disabled_flags +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return flags == ARCH_IRQ_DISABLED; +} +#endif + +/* unconditionally enable interrupts */ +#ifndef arch_local_irq_enable +static inline void arch_local_irq_enable(void) +{ + arch_local_irq_restore(ARCH_IRQ_ENABLED); +} +#endif + +/* unconditionally disable interrupts */ +#ifndef arch_local_irq_disable +static inline void arch_local_irq_disable(void) +{ + arch_local_irq_restore(ARCH_IRQ_DISABLED); +} +#endif + +/* test hardware interrupt enable bit */ +#ifndef arch_irqs_disabled +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} +#endif + +#endif /* __ASM_GENERIC_IRQFLAGS_H */ diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h new file mode 100644 index 0000000..2b10b31 --- /dev/null +++ b/include/asm-generic/kdebug.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_KDEBUG_H +#define _ASM_GENERIC_KDEBUG_H + +enum die_val { + DIE_UNUSED, + DIE_OOPS = 1, +}; + +#endif /* _ASM_GENERIC_KDEBUG_H */ diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h new file mode 100644 index 0000000..9f95b7b --- /dev/null +++ b/include/asm-generic/kmap_types.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_KMAP_TYPES_H +#define _ASM_GENERIC_KMAP_TYPES_H + +#ifdef __WITH_KM_FENCE +# define KM_TYPE_NR 41 +#else +# define KM_TYPE_NR 20 +#endif + +#endif diff --git a/include/asm-generic/kprobes.h b/include/asm-generic/kprobes.h new file mode 100644 index 0000000..4a98208 --- /dev/null +++ b/include/asm-generic/kprobes.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_KPROBES_H +#define _ASM_GENERIC_KPROBES_H + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +#ifdef CONFIG_KPROBES +/* + * Blacklist ganerating macro. Specify functions which is not probed + * by using this macro. + */ +# define __NOKPROBE_SYMBOL(fname) \ +static unsigned long __used \ + __attribute__((__section__("_kprobe_blacklist"))) \ + _kbl_addr_##fname = (unsigned long)fname; +# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) +/* Use this to forbid a kprobes attach on very low level functions */ +# define __kprobes __attribute__((__section__(".kprobes.text"))) +# define nokprobe_inline __always_inline +#else +# define NOKPROBE_SYMBOL(fname) +# define __kprobes +# define nokprobe_inline inline +#endif +#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ + +#endif /* _ASM_GENERIC_KPROBES_H */ diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h new file mode 100644 index 0000000..728e5c5 --- /dev/null +++ b/include/asm-generic/kvm_para.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_KVM_PARA_H +#define _ASM_GENERIC_KVM_PARA_H + +#include + + +/* + * This function is used by architectures that support kvm to avoid issuing + * false soft lockup messages. + */ +static inline bool kvm_check_and_clear_guest_paused(void) +{ + return false; +} + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + +static inline bool kvm_para_available(void) +{ + return false; +} + +#endif diff --git a/include/asm-generic/linkage.h b/include/asm-generic/linkage.h new file mode 100644 index 0000000..fef7a01 --- /dev/null +++ b/include/asm-generic/linkage.h @@ -0,0 +1,8 @@ +#ifndef __ASM_GENERIC_LINKAGE_H +#define __ASM_GENERIC_LINKAGE_H +/* + * linux/linkage.h provides reasonable defaults. + * an architecture can override them by providing its own version. + */ + +#endif /* __ASM_GENERIC_LINKAGE_H */ diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h new file mode 100644 index 0000000..fca7f1d --- /dev/null +++ b/include/asm-generic/local.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_LOCAL_H +#define _ASM_GENERIC_LOCAL_H + +#include +#include +#include + +/* + * A signed long type for operations which are atomic for a single CPU. + * Usually used in combination with per-cpu variables. + * + * This is the default implementation, which uses atomic_long_t. Which is + * rather pointless. The whole point behind local_t is that some processors + * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs + * running on this CPU. local_t allows exploitation of such capabilities. + */ + +/* Implement in terms of atomics. */ + +/* Don't use typedef: don't want them to be mixed with atomic_t's. */ +typedef struct +{ + atomic_long_t a; +} local_t; + +#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local_read(l) atomic_long_read(&(l)->a) +#define local_set(l,i) atomic_long_set((&(l)->a),(i)) +#define local_inc(l) atomic_long_inc(&(l)->a) +#define local_dec(l) atomic_long_dec(&(l)->a) +#define local_add(i,l) atomic_long_add((i),(&(l)->a)) +#define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) + +#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a)) +#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a) +#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a) +#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a)) +#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) +#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) +#define local_inc_return(l) atomic_long_inc_return(&(l)->a) + +#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) +#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) +#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) +#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local_inc(l) local_set((l), local_read(l) + 1) +#define __local_dec(l) local_set((l), local_read(l) - 1) +#define __local_add(i,l) local_set((l), local_read(l) + (i)) +#define __local_sub(i,l) local_set((l), local_read(l) - (i)) + +#endif /* _ASM_GENERIC_LOCAL_H */ diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h new file mode 100644 index 0000000..765be0b --- /dev/null +++ b/include/asm-generic/local64.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_LOCAL64_H +#define _ASM_GENERIC_LOCAL64_H + +#include +#include + +/* + * A signed long type for operations which are atomic for a single CPU. + * Usually used in combination with per-cpu variables. + * + * This is the default implementation, which uses atomic64_t. Which is + * rather pointless. The whole point behind local64_t is that some processors + * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs + * running on this CPU. local64_t allows exploitation of such capabilities. + */ + +/* Implement in terms of atomics. */ + +#if BITS_PER_LONG == 64 + +#include + +typedef struct { + local_t a; +} local64_t; + +#define LOCAL64_INIT(i) { LOCAL_INIT(i) } + +#define local64_read(l) local_read(&(l)->a) +#define local64_set(l,i) local_set((&(l)->a),(i)) +#define local64_inc(l) local_inc(&(l)->a) +#define local64_dec(l) local_dec(&(l)->a) +#define local64_add(i,l) local_add((i),(&(l)->a)) +#define local64_sub(i,l) local_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) local_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) local_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) local_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) local_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) local_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#else /* BITS_PER_LONG != 64 */ + +#include + +/* Don't use typedef: don't want them to be mixed with atomic_t's. */ +typedef struct { + atomic64_t a; +} local64_t; + +#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local64_read(l) atomic64_read(&(l)->a) +#define local64_set(l,i) atomic64_set((&(l)->a),(i)) +#define local64_inc(l) atomic64_inc(&(l)->a) +#define local64_dec(l) atomic64_dec(&(l)->a) +#define local64_add(i,l) atomic64_add((i),(&(l)->a)) +#define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) atomic64_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#endif /* BITS_PER_LONG != 64 */ + +#endif /* _ASM_GENERIC_LOCAL64_H */ diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h new file mode 100644 index 0000000..10cd4ff --- /dev/null +++ b/include/asm-generic/mcs_spinlock.h @@ -0,0 +1,13 @@ +#ifndef __ASM_MCS_SPINLOCK_H +#define __ASM_MCS_SPINLOCK_H + +/* + * Architectures can define their own: + * + * arch_mcs_spin_lock_contended(l) + * arch_mcs_spin_unlock_contended(l) + * + * See kernel/locking/mcs_spinlock.c. + */ + +#endif /* __ASM_MCS_SPINLOCK_H */ diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h new file mode 100644 index 0000000..7637fb4 --- /dev/null +++ b/include/asm-generic/memory_model.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MEMORY_MODEL_H +#define __ASM_MEMORY_MODEL_H + +#include + +#ifndef __ASSEMBLY__ + +#if defined(CONFIG_FLATMEM) + +#ifndef ARCH_PFN_OFFSET +#define ARCH_PFN_OFFSET (0UL) +#endif + +#elif defined(CONFIG_DISCONTIGMEM) + +#ifndef arch_pfn_to_nid +#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn) +#endif + +#ifndef arch_local_page_offset +#define arch_local_page_offset(pfn, nid) \ + ((pfn) - NODE_DATA(nid)->node_start_pfn) +#endif + +#endif /* CONFIG_DISCONTIGMEM */ + +/* + * supports 3 memory models. + */ +#if defined(CONFIG_FLATMEM) + +#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) +#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ + ARCH_PFN_OFFSET) +#elif defined(CONFIG_DISCONTIGMEM) + +#define __pfn_to_page(pfn) \ +({ unsigned long __pfn = (pfn); \ + unsigned long __nid = arch_pfn_to_nid(__pfn); \ + NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ +}) + +#define __page_to_pfn(pg) \ +({ const struct page *__pg = (pg); \ + struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ + (unsigned long)(__pg - __pgdat->node_mem_map) + \ + __pgdat->node_start_pfn; \ +}) + +#elif defined(CONFIG_SPARSEMEM_VMEMMAP) + +/* memmap is virtually contiguous. */ +#define __pfn_to_page(pfn) (vmemmap + (pfn)) +#define __page_to_pfn(page) (unsigned long)((page) - vmemmap) + +#elif defined(CONFIG_SPARSEMEM) +/* + * Note: section's mem_map is encoded to reflect its start_pfn. + * section[i].section_mem_map == mem_map's address - start_pfn; + */ +#define __page_to_pfn(pg) \ +({ const struct page *__pg = (pg); \ + int __sec = page_to_section(__pg); \ + (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ +}) + +#define __pfn_to_page(pfn) \ +({ unsigned long __pfn = (pfn); \ + struct mem_section *__sec = __pfn_to_section(__pfn); \ + __section_mem_map_addr(__sec) + __pfn; \ +}) +#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ + +/* + * Convert a physical address to a Page Frame Number and back + */ +#define __phys_to_pfn(paddr) PHYS_PFN(paddr) +#define __pfn_to_phys(pfn) PFN_PHYS(pfn) + +#define page_to_pfn __page_to_pfn +#define pfn_to_page __pfn_to_page + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h new file mode 100644 index 0000000..5ff0e51 --- /dev/null +++ b/include/asm-generic/mm-arch-hooks.h @@ -0,0 +1,16 @@ +/* + * Architecture specific mm hooks + */ + +#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H +#define _ASM_GENERIC_MM_ARCH_HOOKS_H + +/* + * This file should be included through arch/../include/asm/Kbuild for + * the architecture which doesn't need specific mm hooks. + * + * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h + * are used. + */ + +#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */ diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h new file mode 100644 index 0000000..6736ed2 --- /dev/null +++ b/include/asm-generic/mm_hooks.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap + * and arch_unmap to be included in asm-FOO/mmu_context.h for any + * arch FOO which doesn't need to hook these. + */ +#ifndef _ASM_GENERIC_MM_HOOKS_H +#define _ASM_GENERIC_MM_HOOKS_H + +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + return 0; +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + +static inline void arch_unmap(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool execute, bool foreign) +{ + /* by default, allow everything */ + return true; +} +#endif /* _ASM_GENERIC_MM_HOOKS_H */ diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h new file mode 100644 index 0000000..5698fca --- /dev/null +++ b/include/asm-generic/mmiowb.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MMIOWB_H +#define __ASM_GENERIC_MMIOWB_H + +/* + * Generic implementation of mmiowb() tracking for spinlocks. + * + * If your architecture doesn't ensure that writes to an I/O peripheral + * within two spinlocked sections on two different CPUs are seen by the + * peripheral in the order corresponding to the lock handover, then you + * need to follow these FIVE easy steps: + * + * 1. Implement mmiowb() (and arch_mmiowb_state() if you're fancy) + * in asm/mmiowb.h, then #include this file + * 2. Ensure your I/O write accessors call mmiowb_set_pending() + * 3. Select ARCH_HAS_MMIOWB + * 4. Untangle the resulting mess of header files + * 5. Complain to your architects + */ +#ifdef CONFIG_MMIOWB + +#include +#include + +#ifndef arch_mmiowb_state +#include +#include + +DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state); +#define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state) +#else +#define __mmiowb_state() arch_mmiowb_state() +#endif /* arch_mmiowb_state */ + +static inline void mmiowb_set_pending(void) +{ + struct mmiowb_state *ms = __mmiowb_state(); + + if (likely(ms->nesting_count)) + ms->mmiowb_pending = ms->nesting_count; +} + +static inline void mmiowb_spin_lock(void) +{ + struct mmiowb_state *ms = __mmiowb_state(); + ms->nesting_count++; +} + +static inline void mmiowb_spin_unlock(void) +{ + struct mmiowb_state *ms = __mmiowb_state(); + + if (unlikely(ms->mmiowb_pending)) { + ms->mmiowb_pending = 0; + mmiowb(); + } + + ms->nesting_count--; +} +#else +#define mmiowb_set_pending() do { } while (0) +#define mmiowb_spin_lock() do { } while (0) +#define mmiowb_spin_unlock() do { } while (0) +#endif /* CONFIG_MMIOWB */ +#endif /* __ASM_GENERIC_MMIOWB_H */ diff --git a/include/asm-generic/mmiowb_types.h b/include/asm-generic/mmiowb_types.h new file mode 100644 index 0000000..8eb0095 --- /dev/null +++ b/include/asm-generic/mmiowb_types.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MMIOWB_TYPES_H +#define __ASM_GENERIC_MMIOWB_TYPES_H + +#include + +struct mmiowb_state { + u16 nesting_count; + u16 mmiowb_pending; +}; + +#endif /* __ASM_GENERIC_MMIOWB_TYPES_H */ diff --git a/include/asm-generic/mmu.h b/include/asm-generic/mmu.h new file mode 100644 index 0000000..0618380 --- /dev/null +++ b/include/asm-generic/mmu.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MMU_H +#define __ASM_GENERIC_MMU_H + +/* + * This is the mmu.h header for nommu implementations. + * Architectures with an MMU need something more complex. + */ +#ifndef __ASSEMBLY__ +typedef struct { + unsigned long end_brk; + +#ifdef CONFIG_BINFMT_ELF_FDPIC + unsigned long exec_fdpic_loadmap; + unsigned long interp_fdpic_loadmap; +#endif +} mm_context_t; +#endif + +#endif /* __ASM_GENERIC_MMU_H */ diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h new file mode 100644 index 0000000..6be9106 --- /dev/null +++ b/include/asm-generic/mmu_context.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MMU_CONTEXT_H +#define __ASM_GENERIC_MMU_CONTEXT_H + +/* + * Generic hooks for NOMMU architectures, which do not need to do + * anything special here. + */ + +#include + +struct task_struct; +struct mm_struct; + +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +} + +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + return 0; +} + +static inline void destroy_context(struct mm_struct *mm) +{ +} + +static inline void deactivate_mm(struct task_struct *task, + struct mm_struct *mm) +{ +} + +static inline void switch_mm(struct mm_struct *prev, + struct mm_struct *next, + struct task_struct *tsk) +{ +} + +static inline void activate_mm(struct mm_struct *prev_mm, + struct mm_struct *next_mm) +{ +} + +#endif /* __ASM_GENERIC_MMU_CONTEXT_H */ diff --git a/include/asm-generic/module.h b/include/asm-generic/module.h new file mode 100644 index 0000000..98e1541 --- /dev/null +++ b/include/asm-generic/module.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MODULE_H +#define __ASM_GENERIC_MODULE_H + +/* + * Many architectures just need a simple module + * loader without arch specific data. + */ +#ifndef CONFIG_HAVE_MOD_ARCH_SPECIFIC +struct mod_arch_specific +{ +}; +#endif + +#ifdef CONFIG_64BIT +#define Elf_Shdr Elf64_Shdr +#define Elf_Phdr Elf64_Phdr +#define Elf_Sym Elf64_Sym +#define Elf_Dyn Elf64_Dyn +#define Elf_Ehdr Elf64_Ehdr +#define Elf_Addr Elf64_Addr +#ifdef CONFIG_MODULES_USE_ELF_REL +#define Elf_Rel Elf64_Rel +#endif +#ifdef CONFIG_MODULES_USE_ELF_RELA +#define Elf_Rela Elf64_Rela +#endif +#define ELF_R_TYPE(X) ELF64_R_TYPE(X) +#define ELF_R_SYM(X) ELF64_R_SYM(X) + +#else /* CONFIG_64BIT */ + +#define Elf_Shdr Elf32_Shdr +#define Elf_Phdr Elf32_Phdr +#define Elf_Sym Elf32_Sym +#define Elf_Dyn Elf32_Dyn +#define Elf_Ehdr Elf32_Ehdr +#define Elf_Addr Elf32_Addr +#ifdef CONFIG_MODULES_USE_ELF_REL +#define Elf_Rel Elf32_Rel +#endif +#ifdef CONFIG_MODULES_USE_ELF_RELA +#define Elf_Rela Elf32_Rela +#endif +#define ELF_R_TYPE(X) ELF32_R_TYPE(X) +#define ELF_R_SYM(X) ELF32_R_SYM(X) +#endif + +#endif /* __ASM_GENERIC_MODULE_H */ diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h new file mode 100644 index 0000000..53759d2 --- /dev/null +++ b/include/asm-generic/mshyperv.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Linux-specific definitions for managing interactions with Microsoft's + * Hyper-V hypervisor. The definitions in this file are architecture + * independent. See arch//include/asm/mshyperv.h for definitions + * that are specific to architecture . + * + * Definitions that are specified in the Hyper-V Top Level Functional + * Spec (TLFS) should not go in this file, but should instead go in + * hyperv-tlfs.h. + * + * Copyright (C) 2019, Microsoft, Inc. + * + * Author : Michael Kelley + */ + +#ifndef _ASM_GENERIC_MSHYPERV_H +#define _ASM_GENERIC_MSHYPERV_H + +#include +#include +#include +#include +#include +#include + +struct ms_hyperv_info { + u32 features; + u32 misc_features; + u32 hints; + u32 nested_features; + u32 max_vp_index; + u32 max_lp_index; +}; +extern struct ms_hyperv_info ms_hyperv; + +extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr); +extern u64 hv_do_fast_hypercall8(u16 control, u64 input8); + + +/* Generate the guest OS identifier as described in the Hyper-V TLFS */ +static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version, + __u64 d_info2) +{ + __u64 guest_id = 0; + + guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48); + guest_id |= (d_info1 << 48); + guest_id |= (kernel_version << 16); + guest_id |= d_info2; + + return guest_id; +} + + +/* Free the message slot and signal end-of-message if required */ +static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) +{ + /* + * On crash we're reading some other CPU's message page and we need + * to be careful: this other CPU may already had cleared the header + * and the host may already had delivered some other message there. + * In case we blindly write msg->header.message_type we're going + * to lose it. We can still lose a message of the same type but + * we count on the fact that there can only be one + * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages + * on crash. + */ + if (cmpxchg(&msg->header.message_type, old_msg_type, + HVMSG_NONE) != old_msg_type) + return; + + /* + * The cmxchg() above does an implicit memory barrier to + * ensure the write to MessageType (ie set to + * HVMSG_NONE) happens before we read the + * MessagePending and EOMing. Otherwise, the EOMing + * will not deliver any more messages since there is + * no empty slot + */ + if (msg->header.message_flags.msg_pending) { + /* + * This will cause message queue rescan to + * possibly deliver another msg from the + * hypervisor + */ + hv_signal_eom(); + } +} + +void hv_setup_vmbus_irq(void (*handler)(void)); +void hv_remove_vmbus_irq(void); +void hv_enable_vmbus_irq(void); +void hv_disable_vmbus_irq(void); + +void hv_setup_kexec_handler(void (*handler)(void)); +void hv_remove_kexec_handler(void); +void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); +void hv_remove_crash_handler(void); + +#if IS_ENABLED(CONFIG_HYPERV) +/* + * Hypervisor's notion of virtual processor ID is different from + * Linux' notion of CPU ID. This information can only be retrieved + * in the context of the calling CPU. Setup a map for easy access + * to this information. + */ +extern u32 *hv_vp_index; +extern u32 hv_max_vp_index; + +/* Sentinel value for an uninitialized entry in hv_vp_index array */ +#define VP_INVAL U32_MAX + +/** + * hv_cpu_number_to_vp_number() - Map CPU to VP. + * @cpu_number: CPU number in Linux terms + * + * This function returns the mapping between the Linux processor + * number and the hypervisor's virtual processor number, useful + * in making hypercalls and such that talk about specific + * processors. + * + * Return: Virtual processor number in Hyper-V terms + */ +static inline int hv_cpu_number_to_vp_number(int cpu_number) +{ + return hv_vp_index[cpu_number]; +} + +static inline int cpumask_to_vpset(struct hv_vpset *vpset, + const struct cpumask *cpus) +{ + int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; + + /* valid_bank_mask can represent up to 64 banks */ + if (hv_max_vp_index / 64 >= 64) + return 0; + + /* + * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex + * structs are not cleared between calls, we risk flushing unneeded + * vCPUs otherwise. + */ + for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++) + vpset->bank_contents[vcpu_bank] = 0; + + /* + * Some banks may end up being empty but this is acceptable. + */ + for_each_cpu(cpu, cpus) { + vcpu = hv_cpu_number_to_vp_number(cpu); + if (vcpu == VP_INVAL) + return -1; + vcpu_bank = vcpu / 64; + vcpu_offset = vcpu % 64; + __set_bit(vcpu_offset, (unsigned long *) + &vpset->bank_contents[vcpu_bank]); + if (vcpu_bank >= nr_bank) + nr_bank = vcpu_bank + 1; + } + vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); + return nr_bank; +} + +void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); +void hyperv_report_panic_msg(phys_addr_t pa, size_t size); +bool hv_is_hyperv_initialized(void); +void hyperv_cleanup(void); +void hv_setup_sched_clock(void *sched_clock); +#else /* CONFIG_HYPERV */ +static inline bool hv_is_hyperv_initialized(void) { return false; } +static inline void hyperv_cleanup(void) {} +#endif /* CONFIG_HYPERV */ + +#if IS_ENABLED(CONFIG_HYPERV) +extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void)); +extern void hv_remove_stimer0_irq(int irq); +#endif + +#endif diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h new file mode 100644 index 0000000..e6795f0 --- /dev/null +++ b/include/asm-generic/msi.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MSI_H +#define __ASM_GENERIC_MSI_H + +#include + +#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS +# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2 +#endif + +struct msi_desc; + +/** + * struct msi_alloc_info - Default structure for MSI interrupt allocation. + * @desc: Pointer to msi descriptor + * @hwirq: Associated hw interrupt number in the domain + * @scratchpad: Storage for implementation specific scratch data + * + * Architectures can provide their own implementation by not including + * asm-generic/msi.h into their arch specific header file. + */ +typedef struct msi_alloc_info { + struct msi_desc *desc; + irq_hw_number_t hwirq; + union { + unsigned long ul; + void *ptr; + } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS]; +} msi_alloc_info_t; + +#define GENERIC_MSI_DOMAIN_OPS 1 + +#endif diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h new file mode 100644 index 0000000..fe801f0 --- /dev/null +++ b/include/asm-generic/page.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_PAGE_H +#define __ASM_GENERIC_PAGE_H +/* + * Generic page.h implementation, for NOMMU architectures. + * This provides the dummy definitions for the memory management. + */ + +#ifdef CONFIG_MMU +#error need to provide a real asm/page.h +#endif + + +/* PAGE_SHIFT determines the page size */ + +#define PAGE_SHIFT 12 +#ifdef __ASSEMBLY__ +#define PAGE_SIZE (1 << PAGE_SHIFT) +#else +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#endif +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#include + +#ifndef __ASSEMBLY__ + +#define clear_page(page) memset((page), 0, PAGE_SIZE) +#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { + unsigned long pte; +} pte_t; +typedef struct { + unsigned long pmd[16]; +} pmd_t; +typedef struct { + unsigned long pgd; +} pgd_t; +typedef struct { + unsigned long pgprot; +} pgprot_t; +typedef struct page *pgtable_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((&x)->pmd[0]) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +extern unsigned long memory_start; +extern unsigned long memory_end; + +#endif /* !__ASSEMBLY__ */ + +#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS +#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS) +#else +#define PAGE_OFFSET (0) +#endif + +#ifndef ARCH_PFN_OFFSET +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) +#endif + +#ifndef __ASSEMBLY__ + +#define __va(x) ((void *)((unsigned long) (x))) +#define __pa(x) ((unsigned long) (x)) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) + +#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr)) +#define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) + +#ifndef page_to_phys +#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) +#endif + +#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) + +#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ + ((void *)(kaddr) < (void *)memory_end)) + +#endif /* __ASSEMBLY__ */ + +#include +#include + +#endif /* __ASM_GENERIC_PAGE_H */ diff --git a/include/asm-generic/param.h b/include/asm-generic/param.h new file mode 100644 index 0000000..8d3009d --- /dev/null +++ b/include/asm-generic/param.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_PARAM_H +#define __ASM_GENERIC_PARAM_H + +#include + +# undef HZ +# define HZ CONFIG_HZ /* Internal kernel timer frequency */ +# define USER_HZ 100 /* some user interfaces are */ +# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ +#endif /* __ASM_GENERIC_PARAM_H */ diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h new file mode 100644 index 0000000..483991d --- /dev/null +++ b/include/asm-generic/parport.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_PARPORT_H +#define __ASM_GENERIC_PARPORT_H + +/* + * An ISA bus may have i8255 parallel ports at well-known + * locations in the I/O space, which are scanned by + * parport_pc_find_isa_ports. + * + * Without ISA support, the driver will only attach + * to devices on the PCI bus. + */ + +static int parport_pc_find_isa_ports(int autoirq, int autodma); +static int parport_pc_find_nonpci_ports(int autoirq, int autodma) +{ +#ifdef CONFIG_ISA + return parport_pc_find_isa_ports(autoirq, autodma); +#else + return 0; +#endif +} + +#endif /* __ASM_GENERIC_PARPORT_H */ diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h new file mode 100644 index 0000000..6bb3cd3 --- /dev/null +++ b/include/asm-generic/pci.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/asm-generic/pci.h + * + * Copyright (C) 2003 Russell King + */ +#ifndef _ASM_GENERIC_PCI_H +#define _ASM_GENERIC_PCI_H + +#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + return channel ? 15 : 14; +} +#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ + +#endif /* _ASM_GENERIC_PCI_H */ diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h new file mode 100644 index 0000000..d4f16dc --- /dev/null +++ b/include/asm-generic/pci_iomap.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Generic I/O port emulation. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ +#ifndef __ASM_GENERIC_PCI_IOMAP_H +#define __ASM_GENERIC_PCI_IOMAP_H + +struct pci_dev; +#ifdef CONFIG_PCI +/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); +extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max); +extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen); +extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen); +/* Create a virtual mapping cookie for a port on a given PCI device. + * Do not call this directly, it exists to make it easier for architectures + * to override */ +#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP +extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port, + unsigned int nr); +#else +#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr)) +#endif + +#elif defined(CONFIG_GENERIC_PCI_IOMAP) +static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) +{ + return NULL; +} + +static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max) +{ + return NULL; +} +static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen) +{ + return NULL; +} +static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen) +{ + return NULL; +} +#endif + +#endif /* __ASM_GENERIC_IO_H */ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h new file mode 100644 index 0000000..ba43b8a --- /dev/null +++ b/include/asm-generic/percpu.h @@ -0,0 +1,449 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_PERCPU_H_ +#define _ASM_GENERIC_PERCPU_H_ + +#include +#include +#include + +#ifdef CONFIG_SMP + +/* + * per_cpu_offset() is the offset that has to be added to a + * percpu variable to get to the instance for a certain processor. + * + * Most arches use the __per_cpu_offset array for those offsets but + * some arches have their own ways of determining the offset (x86_64, s390). + */ +#ifndef __per_cpu_offset +extern unsigned long __per_cpu_offset[NR_CPUS]; + +#define per_cpu_offset(x) (__per_cpu_offset[x]) +#endif + +/* + * Determine the offset for the currently active processor. + * An arch may define __my_cpu_offset to provide a more effective + * means of obtaining the offset to the per cpu variables of the + * current processor. + */ +#ifndef __my_cpu_offset +#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) +#endif +#ifdef CONFIG_DEBUG_PREEMPT +#ifdef CONFIG_MCST +extern unsigned int debug_smp_processor_id(void); +#define my_cpu_offset per_cpu_offset(debug_smp_processor_id()) +#else +#define my_cpu_offset per_cpu_offset(smp_processor_id()) +#endif +#else +#define my_cpu_offset __my_cpu_offset +#endif + +/* + * Arch may define arch_raw_cpu_ptr() to provide more efficient address + * translations for raw_cpu_ptr(). + */ +#ifndef arch_raw_cpu_ptr +#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) +#endif + +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +extern void setup_per_cpu_areas(void); +#endif + +#endif /* SMP */ + +#ifndef PER_CPU_BASE_SECTION +#ifdef CONFIG_SMP +#define PER_CPU_BASE_SECTION ".data..percpu" +#else +#define PER_CPU_BASE_SECTION ".data" +#endif +#endif + +#ifndef PER_CPU_ATTRIBUTES +#define PER_CPU_ATTRIBUTES +#endif + +#define raw_cpu_generic_read(pcp) \ +({ \ + *raw_cpu_ptr(&(pcp)); \ +}) + +#define raw_cpu_generic_to_op(pcp, val, op) \ +do { \ + *raw_cpu_ptr(&(pcp)) op val; \ +} while (0) + +#define raw_cpu_generic_add_return(pcp, val) \ +({ \ + typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ + \ + *__p += val; \ + *__p; \ +}) + +#define raw_cpu_generic_xchg(pcp, nval) \ +({ \ + typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ + typeof(pcp) __ret; \ + __ret = *__p; \ + *__p = nval; \ + __ret; \ +}) + +#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \ + typeof(pcp) __ret; \ + __ret = *__p; \ + if (__ret == (oval)) \ + *__p = nval; \ + __ret; \ +}) + +#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1)); \ + typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2)); \ + int __ret = 0; \ + if (*__p1 == (oval1) && *__p2 == (oval2)) { \ + *__p1 = nval1; \ + *__p2 = nval2; \ + __ret = 1; \ + } \ + (__ret); \ +}) + +#define __this_cpu_generic_read_nopreempt(pcp) \ +({ \ + typeof(pcp) __ret; \ + preempt_disable_notrace(); \ + __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \ + preempt_enable_notrace(); \ + __ret; \ +}) + +#define __this_cpu_generic_read_noirq(pcp) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_read(pcp); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_read(pcp) \ +({ \ + typeof(pcp) __ret; \ + if (__native_word(pcp)) \ + __ret = __this_cpu_generic_read_nopreempt(pcp); \ + else \ + __ret = __this_cpu_generic_read_noirq(pcp); \ + __ret; \ +}) + +#define this_cpu_generic_to_op(pcp, val, op) \ +do { \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + raw_cpu_generic_to_op(pcp, val, op); \ + raw_local_irq_restore(__flags); \ +} while (0) + + +#define this_cpu_generic_add_return(pcp, val) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_add_return(pcp, val); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_xchg(pcp, nval) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_xchg(pcp, nval); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + int __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ + oval1, oval2, nval1, nval2); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#ifndef raw_cpu_read_1 +#define raw_cpu_read_1(pcp) raw_cpu_generic_read(pcp) +#endif +#ifndef raw_cpu_read_2 +#define raw_cpu_read_2(pcp) raw_cpu_generic_read(pcp) +#endif +#ifndef raw_cpu_read_4 +#define raw_cpu_read_4(pcp) raw_cpu_generic_read(pcp) +#endif +#ifndef raw_cpu_read_8 +#define raw_cpu_read_8(pcp) raw_cpu_generic_read(pcp) +#endif + +#ifndef raw_cpu_write_1 +#define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_2 +#define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_4 +#define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_8 +#define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif + +#ifndef raw_cpu_add_1 +#define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_2 +#define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_4 +#define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_8 +#define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif + +#ifndef raw_cpu_and_1 +#define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_2 +#define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_4 +#define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_8 +#define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif + +#ifndef raw_cpu_or_1 +#define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_2 +#define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_4 +#define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_8 +#define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif + +#ifndef raw_cpu_add_return_1 +#define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_2 +#define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_4 +#define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_8 +#define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif + +#ifndef raw_cpu_xchg_1 +#define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_2 +#define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_4 +#define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_8 +#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif + +#ifndef raw_cpu_cmpxchg_1 +#define raw_cpu_cmpxchg_1(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_2 +#define raw_cpu_cmpxchg_2(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_4 +#define raw_cpu_cmpxchg_4(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_8 +#define raw_cpu_cmpxchg_8(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif + +#ifndef raw_cpu_cmpxchg_double_1 +#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_2 +#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_4 +#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_8 +#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif + +#ifndef this_cpu_read_1 +#define this_cpu_read_1(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_2 +#define this_cpu_read_2(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_4 +#define this_cpu_read_4(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_8 +#define this_cpu_read_8(pcp) this_cpu_generic_read(pcp) +#endif + +#ifndef this_cpu_write_1 +#define this_cpu_write_1(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_2 +#define this_cpu_write_2(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_4 +#define this_cpu_write_4(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_8 +#define this_cpu_write_8(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif + +#ifndef this_cpu_add_1 +#define this_cpu_add_1(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_2 +#define this_cpu_add_2(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_4 +#define this_cpu_add_4(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_8 +#define this_cpu_add_8(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif + +#ifndef this_cpu_and_1 +#define this_cpu_and_1(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_2 +#define this_cpu_and_2(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_4 +#define this_cpu_and_4(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_8 +#define this_cpu_and_8(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif + +#ifndef this_cpu_or_1 +#define this_cpu_or_1(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_2 +#define this_cpu_or_2(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_4 +#define this_cpu_or_4(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_8 +#define this_cpu_or_8(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif + +#ifndef this_cpu_add_return_1 +#define this_cpu_add_return_1(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_2 +#define this_cpu_add_return_2(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_4 +#define this_cpu_add_return_4(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_8 +#define this_cpu_add_return_8(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif + +#ifndef this_cpu_xchg_1 +#define this_cpu_xchg_1(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_2 +#define this_cpu_xchg_2(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_4 +#define this_cpu_xchg_4(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_8 +#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif + +#ifndef this_cpu_cmpxchg_1 +#define this_cpu_cmpxchg_1(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_2 +#define this_cpu_cmpxchg_2(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_4 +#define this_cpu_cmpxchg_4(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_8 +#define this_cpu_cmpxchg_8(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif + +#ifndef this_cpu_cmpxchg_double_1 +#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_2 +#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_4 +#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_8 +#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif + +#endif /* _ASM_GENERIC_PERCPU_H_ */ diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h new file mode 100644 index 0000000..73f7421 --- /dev/null +++ b/include/asm-generic/pgalloc.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_PGALLOC_H +#define __ASM_GENERIC_PGALLOC_H + +#ifdef CONFIG_MMU + +#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO) +#define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT) + +/** + * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table + * @mm: the mm_struct of the current context + * + * This function is intended for architectures that need + * anything beyond simple page allocation. + * + * Return: pointer to the allocated memory or %NULL on error + */ +static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) +{ + return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL); +} + +#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL +/** + * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table + * @mm: the mm_struct of the current context + * + * Return: pointer to the allocated memory or %NULL on error + */ +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +{ + return __pte_alloc_one_kernel(mm); +} +#endif + +/** + * pte_free_kernel - free PTE-level kernel page table page + * @mm: the mm_struct of the current context + * @pte: pointer to the memory containing the page table + */ +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long)pte); +} + +/** + * __pte_alloc_one - allocate a page for PTE-level user page table + * @mm: the mm_struct of the current context + * @gfp: GFP flags to use for the allocation + * + * Allocates a page and runs the pgtable_pte_page_ctor(). + * + * This function is intended for architectures that need + * anything beyond simple page allocation or must have custom GFP flags. + * + * Return: `struct page` initialized as page table or %NULL on error + */ +static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) +{ + struct page *pte; + + pte = alloc_page(gfp); + if (!pte) + return NULL; + if (!pgtable_pte_page_ctor(pte)) { + __free_page(pte); + return NULL; + } + + return pte; +} + +#ifndef __HAVE_ARCH_PTE_ALLOC_ONE +/** + * pte_alloc_one - allocate a page for PTE-level user page table + * @mm: the mm_struct of the current context + * + * Allocates a page and runs the pgtable_pte_page_ctor(). + * + * Return: `struct page` initialized as page table or %NULL on error + */ +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) +{ + return __pte_alloc_one(mm, GFP_PGTABLE_USER); +} +#endif + +/* + * Should really implement gc for free page table pages. This could be + * done with a reference count in struct page. + */ + +/** + * pte_free - free PTE-level user page table page + * @mm: the mm_struct of the current context + * @pte_page: the `struct page` representing the page table + */ +static inline void pte_free(struct mm_struct *mm, struct page *pte_page) +{ + pgtable_pte_page_dtor(pte_page); + __free_page(pte_page); +} + +#endif /* CONFIG_MMU */ + +#endif /* __ASM_GENERIC_PGALLOC_H */ diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h new file mode 100644 index 0000000..829bdb0 --- /dev/null +++ b/include/asm-generic/pgtable-nop4d-hack.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PGTABLE_NOP4D_HACK_H +#define _PGTABLE_NOP4D_HACK_H + +#ifndef __ASSEMBLY__ +#include + +#define __PAGETABLE_PUD_FOLDED 1 + +/* + * Having the pud type consist of a pgd gets the size right, and allows + * us to conceptually access the pgd entry that this pud is folded into + * without casting. + */ +typedef struct { pgd_t pgd; } pud_t; + +#define PUD_SHIFT PGDIR_SHIFT +#define PTRS_PER_PUD 1 +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded + * into the pgd entry) + */ +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +static inline void pgd_clear(pgd_t *pgd) { } +#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) + +#define pgd_populate(mm, pgd, pud) do { } while (0) +#define pgd_populate_safe(mm, pgd, pud) do { } while (0) +/* + * (puds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) + +static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) +{ + return (pud_t *)pgd; +} + +#define pud_val(x) (pgd_val((x).pgd)) +#define __pud(x) ((pud_t) { __pgd(x) }) + +#define pgd_page(pgd) (pud_page((pud_t){ pgd })) +#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) + +/* + * allocating and freeing a pud is trivial: the 1-entry pud is + * inside the pgd, so has no extra memory associated with it. + */ +#define pud_alloc_one(mm, address) NULL +#define pud_free(mm, x) do { } while (0) +#define __pud_free_tlb(tlb, x, a) do { } while (0) + +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ +#endif /* _PGTABLE_NOP4D_HACK_H */ diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h new file mode 100644 index 0000000..aebab90 --- /dev/null +++ b/include/asm-generic/pgtable-nop4d.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PGTABLE_NOP4D_H +#define _PGTABLE_NOP4D_H + +#ifndef __ASSEMBLY__ + +#define __PAGETABLE_P4D_FOLDED 1 + +typedef struct { pgd_t pgd; } p4d_t; + +#define P4D_SHIFT PGDIR_SHIFT +#define MAX_PTRS_PER_P4D 1 +#define PTRS_PER_P4D 1 +#define P4D_SIZE (1UL << P4D_SHIFT) +#define P4D_MASK (~(P4D_SIZE-1)) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the p4d is never bad, and a p4d always exists (as it's folded + * into the pgd entry) + */ +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +static inline void pgd_clear(pgd_t *pgd) { } +#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) + +#define pgd_populate(mm, pgd, p4d) do { } while (0) +#define pgd_populate_safe(mm, pgd, p4d) do { } while (0) +/* + * (p4ds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pgd(pgdptr, pgdval) set_p4d((p4d_t *)(pgdptr), (p4d_t) { pgdval }) + +static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +{ + return (p4d_t *)pgd; +} + +#define p4d_val(x) (pgd_val((x).pgd)) +#define __p4d(x) ((p4d_t) { __pgd(x) }) + +#define pgd_page(pgd) (p4d_page((p4d_t){ pgd })) +#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd })) + +/* + * allocating and freeing a p4d is trivial: the 1-entry p4d is + * inside the pgd, so has no extra memory associated with it. + */ +#define p4d_alloc_one(mm, address) NULL +#define p4d_free(mm, x) do { } while (0) +#define __p4d_free_tlb(tlb, x, a) do { } while (0) + +#undef p4d_addr_end +#define p4d_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ +#endif /* _PGTABLE_NOP4D_H */ diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h new file mode 100644 index 0000000..b85b827 --- /dev/null +++ b/include/asm-generic/pgtable-nopmd.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PGTABLE_NOPMD_H +#define _PGTABLE_NOPMD_H + +#ifndef __ASSEMBLY__ + +#include + +struct mm_struct; + +#define __PAGETABLE_PMD_FOLDED 1 + +/* + * Having the pmd type consist of a pud gets the size right, and allows + * us to conceptually access the pud entry that this pmd is folded into + * without casting. + */ +typedef struct { pud_t pud; } pmd_t; + +#define PMD_SHIFT PUD_SHIFT +#define PTRS_PER_PMD 1 +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* + * The "pud_xxx()" functions here are trivial for a folded two-level + * setup: the pmd is never bad, and a pmd always exists (as it's folded + * into the pud entry) + */ +static inline int pud_none(pud_t pud) { return 0; } +static inline int pud_bad(pud_t pud) { return 0; } +static inline int pud_present(pud_t pud) { return 1; } +static inline void pud_clear(pud_t *pud) { } +#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud)) + +#define pud_populate(mm, pmd, pte) do { } while (0) + +/* + * (pmds are folded into puds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval }) + +static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) +{ + return (pmd_t *)pud; +} + +#define pmd_val(x) (pud_val((x).pud)) +#define __pmd(x) ((pmd_t) { __pud(x) } ) + +#define pud_page(pud) (pmd_page((pmd_t){ pud })) +#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud })) + +/* + * allocating and freeing a pmd is trivial: the 1-entry pmd is + * inside the pud, so has no extra memory associated with it. + */ +#define pmd_alloc_one(mm, address) NULL +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ +} +#define __pmd_free_tlb(tlb, x, a) do { } while (0) + +#undef pmd_addr_end +#define pmd_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ + +#endif /* _PGTABLE_NOPMD_H */ diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h new file mode 100644 index 0000000..c77a1d3 --- /dev/null +++ b/include/asm-generic/pgtable-nopud.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PGTABLE_NOPUD_H +#define _PGTABLE_NOPUD_H + +#ifndef __ASSEMBLY__ + +#ifdef __ARCH_USE_5LEVEL_HACK +#include +#else +#include + +#define __PAGETABLE_PUD_FOLDED 1 + +/* + * Having the pud type consist of a p4d gets the size right, and allows + * us to conceptually access the p4d entry that this pud is folded into + * without casting. + */ +typedef struct { p4d_t p4d; } pud_t; + +#define PUD_SHIFT P4D_SHIFT +#define PTRS_PER_PUD 1 +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* + * The "p4d_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded + * into the p4d entry) + */ +static inline int p4d_none(p4d_t p4d) { return 0; } +static inline int p4d_bad(p4d_t p4d) { return 0; } +static inline int p4d_present(p4d_t p4d) { return 1; } +static inline void p4d_clear(p4d_t *p4d) { } +#define pud_ERROR(pud) (p4d_ERROR((pud).p4d)) + +#define p4d_populate(mm, p4d, pud) do { } while (0) +#define p4d_populate_safe(mm, p4d, pud) do { } while (0) +/* + * (puds are folded into p4ds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_p4d(p4dptr, p4dval) set_pud((pud_t *)(p4dptr), (pud_t) { p4dval }) + +static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) +{ + return (pud_t *)p4d; +} + +#define pud_val(x) (p4d_val((x).p4d)) +#define __pud(x) ((pud_t) { __p4d(x) }) + +#define p4d_page(p4d) (pud_page((pud_t){ p4d })) +#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d })) + +/* + * allocating and freeing a pud is trivial: the 1-entry pud is + * inside the p4d, so has no extra memory associated with it. + */ +#define pud_alloc_one(mm, address) NULL +#define pud_free(mm, x) do { } while (0) +#define __pud_free_tlb(tlb, x, a) do { } while (0) + +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ +#endif /* !__ARCH_USE_5LEVEL_HACK */ +#endif /* _PGTABLE_NOPUD_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h new file mode 100644 index 0000000..559f8c6 --- /dev/null +++ b/include/asm-generic/pgtable.h @@ -0,0 +1,1193 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_PGTABLE_H +#define _ASM_GENERIC_PGTABLE_H + +#include + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_MMU + +#include +#include +#include + +#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ + defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS +#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED +#endif + +/* + * On almost all architectures and configurations, 0 can be used as the + * upper ceiling to free_pgtables(): on many architectures it has the same + * effect as using TASK_SIZE. However, there is one configuration which + * must impose a more careful limit, to avoid freeing kernel pgtables. + */ +#ifndef USER_PGTABLES_CEILING +#define USER_PGTABLES_CEILING 0UL +#endif + +#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty); +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); +extern int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty); +#else +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +static inline int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + int r = 1; + if (!pte_young(pte)) + r = 0; + else + set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); + return r; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + int r = 1; + if (!pmd_young(pmd)) + r = 0; + else + set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); + return r; +} +#else +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#else +/* + * Despite relevant to THP only, this API is called from generic rmap code + * under PageTransHuge(), hence needs a dummy implementation for !THP + */ +static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + pte_clear(mm, address, ptep); + return pte; +} +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + pmd_clear(pmdp); + return pmd; +} +#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR +static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, + pud_t *pudp) +{ + pud_t pud = *pudp; + + pud_clear(pudp); + return pud; +} +#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL +static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp, + int full) +{ + return pmdp_huge_get_and_clear(mm, address, pmdp); +} +#endif + +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL +static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pud_t *pudp, + int full) +{ + return pudp_huge_get_and_clear(mm, address, pudp); +} +#endif +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pte_t *ptep, + int full) +{ + pte_t pte; + pte = ptep_get_and_clear(mm, address, ptep); + return pte; +} +#endif + +/* + * Some architectures may be able to avoid expensive synchronization + * primitives when modifications are made to PTE's which are already + * not present, or in the process of an address space destruction. + */ +#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL +static inline void pte_clear_not_present_full(struct mm_struct *mm, + unsigned long address, + pte_t *ptep, + int full) +{ + pte_clear(mm, address, ptep); +} +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH +extern pte_t ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH +extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp); +extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pud_t *pudp); +#endif + +#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT +struct mm_struct; +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) +{ + pte_t old_pte = *ptep; + set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); +} +#endif + +#ifndef pte_savedwrite +#define pte_savedwrite pte_write +#endif + +#ifndef pte_mk_savedwrite +#define pte_mk_savedwrite pte_mkwrite +#endif + +#ifndef pte_clear_savedwrite +#define pte_clear_savedwrite pte_wrprotect +#endif + +#ifndef pmd_savedwrite +#define pmd_savedwrite pmd_write +#endif + +#ifndef pmd_mk_savedwrite +#define pmd_mk_savedwrite pmd_mkwrite +#endif + +#ifndef pmd_clear_savedwrite +#define pmd_clear_savedwrite pmd_wrprotect +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); +} +#else +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif +#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + pud_t old_pud = *pudp; + + set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); +} +#else +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + BUILD_BUG(); +} +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ +#endif + +#ifndef pmdp_collapse_flush +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#else +static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + BUILD_BUG(); + return *pmdp; +} +#define pmdp_collapse_flush pmdp_collapse_flush +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); +#endif + +#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * This is an implementation of pmdp_establish() that is only suitable for an + * architecture that doesn't have hardware dirty/accessed bits. In this case we + * can't race with CPU which sets these bits and non-atomic aproach is fine. + */ +static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + return old_pmd; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_INVALIDATE +extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif + +#ifndef __HAVE_ARCH_PTE_SAME +static inline int pte_same(pte_t pte_a, pte_t pte_b) +{ + return pte_val(pte_a) == pte_val(pte_b); +} +#endif + +#ifndef __HAVE_ARCH_PTE_UNUSED +/* + * Some architectures provide facilities to virtualization guests + * so that they can flag allocated pages as unused. This allows the + * host to transparently reclaim unused pages. This function returns + * whether the pte's page is unused. + */ +static inline int pte_unused(pte_t pte) +{ + return 0; +} +#endif + +#ifndef pte_access_permitted +#define pte_access_permitted(pte, write) \ + (pte_present(pte) && (!(write) || pte_write(pte))) +#endif + +#ifndef pmd_access_permitted +#define pmd_access_permitted(pmd, write) \ + (pmd_present(pmd) && (!(write) || pmd_write(pmd))) +#endif + +#ifndef pud_access_permitted +#define pud_access_permitted(pud, write) \ + (pud_present(pud) && (!(write) || pud_write(pud))) +#endif + +#ifndef p4d_access_permitted +#define p4d_access_permitted(p4d, write) \ + (p4d_present(p4d) && (!(write) || p4d_write(p4d))) +#endif + +#ifndef pgd_access_permitted +#define pgd_access_permitted(pgd, write) \ + (pgd_present(pgd) && (!(write) || pgd_write(pgd))) +#endif + +#ifndef __HAVE_ARCH_PMD_SAME +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return pmd_val(pmd_a) == pmd_val(pmd_b); +} + +static inline int pud_same(pud_t pud_a, pud_t pud_b) +{ + return pud_val(pud_a) == pud_val(pud_b); +} +#endif + +#ifndef __HAVE_ARCH_P4D_SAME +static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b) +{ + return p4d_val(p4d_a) == p4d_val(p4d_b); +} +#endif + +#ifndef __HAVE_ARCH_PGD_SAME +static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) +{ + return pgd_val(pgd_a) == pgd_val(pgd_b); +} +#endif + +/* + * Use set_p*_safe(), and elide TLB flushing, when confident that *no* + * TLB flush will be required as a result of the "set". For example, use + * in scenarios where it is known ahead of time that the routine is + * setting non-present entries, or re-setting an existing entry to the + * same value. Otherwise, use the typical "set" helpers and flush the + * TLB. + */ +#define set_pte_safe(ptep, pte) \ +({ \ + WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \ + set_pte(ptep, pte); \ +}) + +#define set_pmd_safe(pmdp, pmd) \ +({ \ + WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ + set_pmd(pmdp, pmd); \ +}) + +#define set_pud_safe(pudp, pud) \ +({ \ + WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \ + set_pud(pudp, pud); \ +}) + +#define set_p4d_safe(p4dp, p4d) \ +({ \ + WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ + set_p4d(p4dp, p4d); \ +}) + +#define set_pgd_safe(pgdp, pgd) \ +({ \ + WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ + set_pgd(pgdp, pgd); \ +}) + +#ifndef __HAVE_ARCH_DO_SWAP_PAGE +/* + * Some architectures support metadata associated with a page. When a + * page is being swapped out, this metadata must be saved so it can be + * restored when the page is swapped back in. SPARC M7 and newer + * processors support an ADI (Application Data Integrity) tag for the + * page as metadata for the page. arch_do_swap_page() can restore this + * metadata when a page is swapped back in. + */ +static inline void arch_do_swap_page(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte) +{ + +} +#endif + +#ifndef __HAVE_ARCH_UNMAP_ONE +/* + * Some architectures support metadata associated with a page. When a + * page is being swapped out, this metadata must be saved so it can be + * restored when the page is swapped back in. SPARC M7 and newer + * processors support an ADI (Application Data Integrity) tag for the + * page as metadata for the page. arch_unmap_one() can save this + * metadata on a swap-out of a page. + */ +static inline int arch_unmap_one(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t orig_pte) +{ + return 0; +} +#endif + +#ifndef __HAVE_ARCH_PGD_OFFSET_GATE +#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) +#endif + +#ifndef __HAVE_ARCH_MOVE_PTE +#define move_pte(pte, prot, old_addr, new_addr) (pte) +#endif + +#ifndef pte_accessible +# define pte_accessible(mm, pte) ((void)(pte), 1) +#endif + +#ifndef flush_tlb_fix_spurious_fault +#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) +#endif + +#ifndef pgprot_noncached +#define pgprot_noncached(prot) (prot) +#endif + +#ifndef pgprot_writecombine +#define pgprot_writecombine pgprot_noncached +#endif + +#ifndef pgprot_writethrough +#define pgprot_writethrough pgprot_noncached +#endif + +#ifndef pgprot_device +#define pgprot_device pgprot_noncached +#endif + +#ifndef pgprot_modify +#define pgprot_modify pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) + newprot = pgprot_noncached(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) + newprot = pgprot_writecombine(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) + newprot = pgprot_device(newprot); + return newprot; +} +#endif + +/* + * When walking page tables, get the address of the next boundary, + * or the end address of the range if that comes earlier. Although no + * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. + */ + +#define pgd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#ifndef p4d_addr_end +#define p4d_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +#ifndef pud_addr_end +#define pud_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +#ifndef pmd_addr_end +#define pmd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +/* + * When walking page tables, we usually want to skip any p?d_none entries; + * and any p?d_bad entries - reporting the error before resetting to none. + * Do the tests inline, but report and clear the bad entry in mm/memory.c. + */ +void pgd_clear_bad(pgd_t *); +void p4d_clear_bad(p4d_t *); +void pud_clear_bad(pud_t *); +void pmd_clear_bad(pmd_t *); + +static inline int pgd_none_or_clear_bad(pgd_t *pgd) +{ + if (pgd_none(*pgd)) + return 1; + if (unlikely(pgd_bad(*pgd))) { + pgd_clear_bad(pgd); + return 1; + } + return 0; +} + +static inline int p4d_none_or_clear_bad(p4d_t *p4d) +{ + if (p4d_none(*p4d)) + return 1; + if (unlikely(p4d_bad(*p4d))) { + p4d_clear_bad(p4d); + return 1; + } + return 0; +} + +static inline int pud_none_or_clear_bad(pud_t *pud) +{ + if (pud_none(*pud)) + return 1; + if (unlikely(pud_bad(*pud))) { + pud_clear_bad(pud); + return 1; + } + return 0; +} + +static inline int pmd_none_or_clear_bad(pmd_t *pmd) +{ + if (pmd_none(*pmd)) + return 1; + if (unlikely(pmd_bad(*pmd))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep) +{ + /* + * Get the current pte state, but zero it out to make it + * non-present, preventing the hardware from asynchronously + * updating it. + */ + return ptep_get_and_clear(vma->vm_mm, addr, ptep); +} + +static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, pte_t pte) +{ + /* + * The pte is non-present, so there's no hardware state to + * preserve. + */ + set_pte_at(vma->vm_mm, addr, ptep, pte); +} + +#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION +/* + * Start a pte protection read-modify-write transaction, which + * protects against asynchronous hardware modifications to the pte. + * The intention is not to prevent the hardware from making pte + * updates, but to prevent any updates it may make from being lost. + * + * This does not protect against other software modifications of the + * pte; the appropriate pte lock must be held over the transation. + * + * Note that this interface is intended to be batchable, meaning that + * ptep_modify_prot_commit may not actually update the pte, but merely + * queue the update to be done at some later time. The update must be + * actually committed before the pte lock is released, however. + */ +static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep) +{ + return __ptep_modify_prot_start(vma, addr, ptep); +} + +/* + * Commit an update to a pte, leaving any hardware-controlled bits in + * the PTE unmodified. + */ +static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte) +{ + __ptep_modify_prot_commit(vma, addr, ptep, pte); +} +#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ +#endif /* CONFIG_MMU */ + +/* + * No-op macros that just return the current protection value. Defined here + * because these macros can be used used even if CONFIG_MMU is not defined. + */ +#ifndef pgprot_encrypted +#define pgprot_encrypted(prot) (prot) +#endif + +#ifndef pgprot_decrypted +#define pgprot_decrypted(prot) (prot) +#endif + +/* + * A facility to provide lazy MMU batching. This allows PTE updates and + * page invalidations to be delayed until a call to leave lazy MMU mode + * is issued. Some architectures may benefit from doing this, and it is + * beneficial for both shadow and direct mode hypervisors, which may batch + * the PTE updates which happen during this window. Note that using this + * interface requires that read hazards be removed from the code. A read + * hazard could result in the direct mode hypervisor case, since the actual + * write to the page tables may not yet have taken place, so reads though + * a raw PTE pointer after it has been modified are not guaranteed to be + * up to date. This mode can only be entered and left under the protection of + * the page table locks for all page tables which may be modified. In the UP + * case, this is required so that preemption is disabled, and in the SMP case, + * it must synchronize the delayed page table writes properly on other CPUs. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE +#define arch_enter_lazy_mmu_mode() do {} while (0) +#define arch_leave_lazy_mmu_mode() do {} while (0) +#define arch_flush_lazy_mmu_mode() do {} while (0) +#endif + +/* + * A facility to provide batching of the reload of page tables and + * other process state with the actual context switch code for + * paravirtualized guests. By convention, only one of the batched + * update (lazy) modes (CPU, MMU) should be active at any given time, + * entry should never be nested, and entry and exits should always be + * paired. This is for sanity of maintaining and reasoning about the + * kernel code. In this case, the exit (end of the context switch) is + * in architecture-specific code, and so doesn't need a generic + * definition. + */ +#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH +#define arch_start_context_switch(prev) do {} while (0) +#endif + +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION +static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline int pmd_swp_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} +#endif +#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */ +static inline int pte_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline int pmd_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline int pte_swp_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline int pmd_swp_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} +#endif + +#ifndef __HAVE_PFNMAP_TRACKING +/* + * Interfaces that can be used by architecture code to keep track of + * memory type of pfn mappings specified by the remap_pfn_range, + * vmf_insert_pfn. + */ + +/* + * track_pfn_remap is called when a _new_ pfn mapping is being established + * by remap_pfn_range() for physical range indicated by pfn and size. + */ +static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size) +{ + return 0; +} + +/* + * track_pfn_insert is called when a _new_ single pfn is established + * by vmf_insert_pfn(). + */ +static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + pfn_t pfn) +{ +} + +/* + * track_pfn_copy is called when vma that is covering the pfnmap gets + * copied through copy_page_range(). + */ +static inline int track_pfn_copy(struct vm_area_struct *vma) +{ + return 0; +} + +/* + * untrack_pfn is called while unmapping a pfnmap for a region. + * untrack can be called for a specific region indicated by pfn and size or + * can be for the entire vma (in which case pfn, size are zero). + */ +static inline void untrack_pfn(struct vm_area_struct *vma, + unsigned long pfn, unsigned long size) +{ +} + +/* + * untrack_pfn_moved is called while mremapping a pfnmap for a new region. + */ +static inline void untrack_pfn_moved(struct vm_area_struct *vma) +{ +} +#else +extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size); +extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + pfn_t pfn); +extern int track_pfn_copy(struct vm_area_struct *vma); +extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, + unsigned long size); +extern void untrack_pfn_moved(struct vm_area_struct *vma); +#endif + +/* e2k has replicated zero page implementation */ +#ifndef CONFIG_E2K +#ifdef __HAVE_COLOR_ZERO_PAGE +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + unsigned long offset_from_zero_pfn = pfn - zero_pfn; + return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); +} + +#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) + +#else +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + return pfn == zero_pfn; +} + +static inline unsigned long my_zero_pfn(unsigned long addr) +{ + extern unsigned long zero_pfn; + return zero_pfn; +} +#endif +#endif /* !CONFIG_E2K */ + +#ifdef CONFIG_MMU + +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_huge(pmd_t pmd) +{ + return 0; +} +#ifndef pmd_write +static inline int pmd_write(pmd_t pmd) +{ + BUG(); + return 0; +} +#endif /* pmd_write */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifndef pud_write +static inline int pud_write(pud_t pud) +{ + BUG(); + return 0; +} +#endif /* pud_write */ + +#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ + (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) +static inline int pud_trans_huge(pud_t pud) +{ + return 0; +} +#endif + +#ifndef pmd_read_atomic +static inline pmd_t pmd_read_atomic(pmd_t *pmdp) +{ + /* + * Depend on compiler for an atomic pmd read. NOTE: this is + * only going to work, if the pmdval_t isn't larger than + * an unsigned long. + */ + return *pmdp; +} +#endif + +#ifndef arch_needs_pgtable_deposit +#define arch_needs_pgtable_deposit() (false) +#endif +/* + * This function is meant to be used by sites walking pagetables with + * the mmap_sem hold in read mode to protect against MADV_DONTNEED and + * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd + * into a null pmd and the transhuge page fault can convert a null pmd + * into an hugepmd or into a regular pmd (if the hugepage allocation + * fails). While holding the mmap_sem in read mode the pmd becomes + * stable and stops changing under us only if it's not null and not a + * transhuge pmd. When those races occurs and this function makes a + * difference vs the standard pmd_none_or_clear_bad, the result is + * undefined so behaving like if the pmd was none is safe (because it + * can return none anyway). The compiler level barrier() is critically + * important to compute the two checks atomically on the same pmdval. + * + * For 32bit kernels with a 64bit large pmd_t this automatically takes + * care of reading the pmd atomically to avoid SMP race conditions + * against pmd_populate() when the mmap_sem is hold for reading by the + * caller (a special atomic read not done by "gcc" as in the generic + * version above, is also needed when THP is disabled because the page + * fault can populate the pmd from under us). + */ +static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) +{ + pmd_t pmdval = pmd_read_atomic(pmd); + /* + * The barrier will stabilize the pmdval in a register or on + * the stack so that it will stop changing under the code. + * + * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, + * pmd_read_atomic is allowed to return a not atomic pmdval + * (for example pointing to an hugepage that has never been + * mapped in the pmd). The below checks will only care about + * the low part of the pmd with 32bit PAE x86 anyway, with the + * exception of pmd_none(). So the important thing is that if + * the low part of the pmd is found null, the high part will + * be also null or the pmd_none() check below would be + * confused. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + barrier(); +#endif + /* + * !pmd_present() checks for pmd migration entries + * + * The complete check uses is_pmd_migration_entry() in linux/swapops.h + * But using that requires moving current function and pmd_trans_unstable() + * to linux/swapops.h to resovle dependency, which is too much code move. + * + * !pmd_present() is equivalent to is_pmd_migration_entry() currently, + * because !pmd_present() pages can only be under migration not swapped + * out. + * + * pmd_none() is preseved for future condition checks on pmd migration + * entries and not confusing with this function name, although it is + * redundant with !pmd_present(). + */ + if (pmd_none(pmdval) || pmd_trans_huge(pmdval) || + (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval))) + return 1; + if (unlikely(pmd_bad(pmdval))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +/* + * This is a noop if Transparent Hugepage Support is not built into + * the kernel. Otherwise it is equivalent to + * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in + * places that already verified the pmd is not none and they want to + * walk ptes while holding the mmap sem in read mode (write mode don't + * need this). If THP is not enabled, the pmd can't go away under the + * code even if MADV_DONTNEED runs, but if THP is enabled we need to + * run a pmd_trans_unstable before walking the ptes after + * split_huge_pmd returns (because it may have run when the pmd become + * null, but then a page fault can map in a THP and not a regular page). + */ +static inline int pmd_trans_unstable(pmd_t *pmd) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return pmd_none_or_trans_huge_or_clear_bad(pmd); +#else + return 0; +#endif +} + +#ifndef CONFIG_NUMA_BALANCING +/* + * Technically a PTE can be PROTNONE even when not doing NUMA balancing but + * the only case the kernel cares is for NUMA balancing and is only ever set + * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked + * _PAGE_PROTNONE so by by default, implement the helper as "always no". It + * is the responsibility of the caller to distinguish between PROT_NONE + * protections and NUMA hinting fault protections. + */ +static inline int pte_protnone(pte_t pte) +{ + return 0; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return 0; +} +#endif /* CONFIG_NUMA_BALANCING */ + +#endif /* CONFIG_MMU */ + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#ifndef __PAGETABLE_P4D_FOLDED +int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); +int p4d_clear_huge(p4d_t *p4d); +#else +static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int p4d_clear_huge(p4d_t *p4d) +{ + return 0; +} +#endif /* !__PAGETABLE_P4D_FOLDED */ + +int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); +int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); +int pud_clear_huge(pud_t *pud); +int pmd_clear_huge(pmd_t *pmd); +int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); +int pud_free_pmd_page(pud_t *pud, unsigned long addr); +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); +#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ +static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int p4d_clear_huge(p4d_t *p4d) +{ + return 0; +} +static inline int pud_clear_huge(pud_t *pud) +{ + return 0; +} +static inline int pmd_clear_huge(pmd_t *pmd) +{ + return 0; +} +static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) +{ + return 0; +} +static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) +{ + return 0; +} +static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) +{ + return 0; +} +#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ + +#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * ARCHes with special requirements for evicting THP backing TLB entries can + * implement this. Otherwise also, it can help optimize normal TLB flush in + * THP regime. stock flush_tlb_range() typically has optimization to nuke the + * entire TLB TLB if flush span is greater than a threshold, which will + * likely be true for a single huge page. Thus a single thp flush will + * invalidate the entire TLB which is not desitable. + * e.g. see arch/arc: flush_pmd_tlb_range + */ +#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#else +#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() +#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() +#endif +#endif + +struct file; +int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t *vma_prot); + +#ifndef CONFIG_X86_ESPFIX64 +static inline void init_espfix_bsp(void) { } +#endif + +extern void __init pgtable_cache_init(void); + +#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED +static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) +{ + return true; +} + +static inline bool arch_has_pfn_modify_check(void) +{ + return false; +} +#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ + +/* + * Architecture PAGE_KERNEL_* fallbacks + * + * Some architectures don't define certain PAGE_KERNEL_* flags. This is either + * because they really don't support them, or the port needs to be updated to + * reflect the required functionality. Below are a set of relatively safe + * fallbacks, as best effort, which we can count on in lieu of the architectures + * not defining them on their own yet. + */ + +#ifndef PAGE_KERNEL_RO +# define PAGE_KERNEL_RO PAGE_KERNEL +#endif + +#ifndef PAGE_KERNEL_EXEC +# define PAGE_KERNEL_EXEC PAGE_KERNEL +#endif + +#endif /* !__ASSEMBLY__ */ + +#ifndef io_remap_pfn_range +#define io_remap_pfn_range remap_pfn_range +#endif + +#ifndef has_transparent_hugepage +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define has_transparent_hugepage() 1 +#else +#define has_transparent_hugepage() 0 +#endif +#endif + +/* + * On some architectures it depends on the mm if the p4d/pud or pmd + * layer of the page table hierarchy is folded or not. + */ +#ifndef mm_p4d_folded +#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED) +#endif + +#ifndef mm_pud_folded +#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED) +#endif + +#ifndef mm_pmd_folded +#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) +#endif + +#endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h new file mode 100644 index 0000000..d683f5e --- /dev/null +++ b/include/asm-generic/preempt.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_PREEMPT_H +#define __ASM_PREEMPT_H + +#include + +#define PREEMPT_ENABLED (0) + +static __always_inline int preempt_count(void) +{ + return READ_ONCE(current_thread_info()->preempt_count); +} + +static __always_inline volatile int *preempt_count_ptr(void) +{ + return ¤t_thread_info()->preempt_count; +} + +static __always_inline void preempt_count_set(int pc) +{ + *preempt_count_ptr() = pc; +} + +/* + * must be macros to avoid header recursion hell + */ +#define init_task_preempt_count(p) do { \ + task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \ +} while (0) + +#define init_idle_preempt_count(p, cpu) do { \ + task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ +} while (0) + +static __always_inline void set_preempt_need_resched(void) +{ +} + +static __always_inline void clear_preempt_need_resched(void) +{ +} + +static __always_inline bool test_preempt_need_resched(void) +{ + return false; +} + +/* + * The various preempt_count add/sub methods + */ + +static __always_inline void __preempt_count_add(int val) +{ + *preempt_count_ptr() += val; +} + +static __always_inline void __preempt_count_sub(int val) +{ + *preempt_count_ptr() -= val; +} + +static __always_inline bool __preempt_count_dec_and_test(void) +{ + /* + * Because of load-store architectures cannot do per-cpu atomic + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get + * lost. + */ + return !--*preempt_count_ptr() && tif_need_resched(); +} + +/* + * Returns true when we need to resched and can (barring IRQ state). + */ +static __always_inline bool should_resched(int preempt_offset) +{ + return unlikely(preempt_count() == preempt_offset && + tif_need_resched()); +} + +#ifdef CONFIG_PREEMPTION +extern asmlinkage void preempt_schedule(void); +#define __preempt_schedule() preempt_schedule() +extern asmlinkage void preempt_schedule_notrace(void); +#define __preempt_schedule_notrace() preempt_schedule_notrace() +#endif /* CONFIG_PREEMPTION */ + +#endif /* __ASM_PREEMPT_H */ diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h new file mode 100644 index 0000000..b7087a0 --- /dev/null +++ b/include/asm-generic/qrwlock.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Queue read/write lock + * + * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. + * + * Authors: Waiman Long + */ +#ifndef __ASM_GENERIC_QRWLOCK_H +#define __ASM_GENERIC_QRWLOCK_H + +#include +#include +#include + +#include + +/* + * Writer states & reader shift and bias. + */ +#define _QW_WAITING 0x100 /* A writer is waiting */ +#define _QW_LOCKED 0x0ff /* A writer holds the lock */ +#define _QW_WMASK 0x1ff /* Writer mask */ +#define _QR_SHIFT 9 /* Reader count shift */ +#define _QR_BIAS (1U << _QR_SHIFT) + +/* + * External function declarations + */ +extern void queued_read_lock_slowpath(struct qrwlock *lock); +extern void queued_write_lock_slowpath(struct qrwlock *lock); + +/** + * queued_read_trylock - try to acquire read lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static inline int queued_read_trylock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_read(&lock->cnts); + if (likely(!(cnts & _QW_WMASK))) { +#ifdef CONFIG_E2K + cnts = (u32)atomic_add_return_lock(_QR_BIAS, &lock->cnts); +#else + cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); +#endif + if (likely(!(cnts & _QW_WMASK))) + return 1; + atomic_sub(_QR_BIAS, &lock->cnts); + } + return 0; +} + +/** + * queued_write_trylock - try to acquire write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static inline int queued_write_trylock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_read(&lock->cnts); + if (unlikely(cnts)) + return 0; + +#ifdef CONFIG_E2K + return likely(atomic_try_cmpxchg_lock(&lock->cnts, &cnts, _QW_LOCKED)); +#else + return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, + _QW_LOCKED)); +#endif +} +/** + * queued_read_lock - acquire read lock of a queue rwlock + * @lock: Pointer to queue rwlock structure + */ +static inline void queued_read_lock(struct qrwlock *lock) +{ + u32 cnts; + +#ifdef CONFIG_E2K + cnts = atomic_add_return_lock(_QR_BIAS, &lock->cnts); +#else + cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); +#endif + if (likely(!(cnts & _QW_WMASK))) + return; + + /* The slowpath will decrement the reader count, if necessary. */ + queued_read_lock_slowpath(lock); +} + +/** + * queued_write_lock - acquire write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queued_write_lock(struct qrwlock *lock) +{ + u32 cnts = 0; + /* Optimize for the unfair lock case where the fair flag is 0. */ +#ifdef CONFIG_E2K + if (likely(atomic_try_cmpxchg_lock(&lock->cnts, &cnts, _QW_LOCKED))) +#else + if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) +#endif + return; + + queued_write_lock_slowpath(lock); +} + +/** + * queued_read_unlock - release read lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queued_read_unlock(struct qrwlock *lock) +{ + /* + * Atomically decrement the reader count + */ + (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); +} + +/** + * queued_write_unlock - release write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queued_write_unlock(struct qrwlock *lock) +{ + smp_store_release(&lock->wlocked, 0); +} + +/* + * Remapping rwlock architecture specific functions to the corresponding + * queue rwlock functions. + */ +#define arch_read_lock(l) queued_read_lock(l) +#define arch_write_lock(l) queued_write_lock(l) +#define arch_read_trylock(l) queued_read_trylock(l) +#define arch_write_trylock(l) queued_write_trylock(l) +#define arch_read_unlock(l) queued_read_unlock(l) +#define arch_write_unlock(l) queued_write_unlock(l) + +#endif /* __ASM_GENERIC_QRWLOCK_H */ diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h new file mode 100644 index 0000000..c36f1d5 --- /dev/null +++ b/include/asm-generic/qrwlock_types.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_QRWLOCK_TYPES_H +#define __ASM_GENERIC_QRWLOCK_TYPES_H + +#include +#include +#include + +/* + * The queue read/write lock data structure + */ + +typedef struct qrwlock { + union { + atomic_t cnts; + struct { +#ifdef __LITTLE_ENDIAN + u8 wlocked; /* Locked for write? */ + u8 __lstate[3]; +#else + u8 __lstate[3]; + u8 wlocked; /* Locked for write? */ +#endif + }; + }; + arch_spinlock_t wait_lock; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { \ + { .cnts = ATOMIC_INIT(0), }, \ + .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ +} + +#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */ diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h new file mode 100644 index 0000000..1a0f299 --- /dev/null +++ b/include/asm-generic/qspinlock.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Queued spinlock + * + * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. + * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP + * + * Authors: Waiman Long + */ +#ifndef __ASM_GENERIC_QSPINLOCK_H +#define __ASM_GENERIC_QSPINLOCK_H + +#include + +/** + * queued_spin_is_locked - is the spinlock locked? + * @lock: Pointer to queued spinlock structure + * Return: 1 if it is locked, 0 otherwise + */ +static __always_inline int queued_spin_is_locked(struct qspinlock *lock) +{ + /* + * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL + * isn't immediately observable. + */ + return atomic_read(&lock->val); +} + +/** + * queued_spin_value_unlocked - is the spinlock structure unlocked? + * @lock: queued spinlock structure + * Return: 1 if it is unlocked, 0 otherwise + * + * N.B. Whenever there are tasks waiting for the lock, it is considered + * locked wrt the lockref code to avoid lock stealing by the lockref + * code and change things underneath the lock. This also allows some + * optimizations to be applied without conflict with lockref. + */ +static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) +{ + return !atomic_read(&lock.val); +} + +/** + * queued_spin_is_contended - check if the lock is contended + * @lock : Pointer to queued spinlock structure + * Return: 1 if lock contended, 0 otherwise + */ +static __always_inline int queued_spin_is_contended(struct qspinlock *lock) +{ + return atomic_read(&lock->val) & ~_Q_LOCKED_MASK; +} +/** + * queued_spin_trylock - try to acquire the queued spinlock + * @lock : Pointer to queued spinlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static __always_inline int queued_spin_trylock(struct qspinlock *lock) +{ + u32 val = atomic_read(&lock->val); + + if (unlikely(val)) + return 0; + + return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); +} + +extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +/** + * queued_spin_lock - acquire a queued spinlock + * @lock: Pointer to queued spinlock structure + */ +static __always_inline void queued_spin_lock(struct qspinlock *lock) +{ + u32 val = 0; + +#ifdef CONFIG_E2K + if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL))) +#else + if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) +#endif + return; + + queued_spin_lock_slowpath(lock, val); +} + +#ifndef queued_spin_unlock +/** + * queued_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + */ +static __always_inline void queued_spin_unlock(struct qspinlock *lock) +{ + /* + * unlock() needs release semantics: + */ + smp_store_release(&lock->locked, 0); +} +#endif + +#ifndef virt_spin_lock +static __always_inline bool virt_spin_lock(struct qspinlock *lock) +{ + return false; +} +#endif + +/* + * Remapping spinlock architecture specific functions to the corresponding + * queued spinlock functions. + */ +#define arch_spin_is_locked(l) queued_spin_is_locked(l) +#define arch_spin_is_contended(l) queued_spin_is_contended(l) +#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l) +#define arch_spin_lock(l) queued_spin_lock(l) +#define arch_spin_trylock(l) queued_spin_trylock(l) +#define arch_spin_unlock(l) queued_spin_unlock(l) + +#endif /* __ASM_GENERIC_QSPINLOCK_H */ diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h new file mode 100644 index 0000000..56d1309 --- /dev/null +++ b/include/asm-generic/qspinlock_types.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Queued spinlock + * + * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. + * + * Authors: Waiman Long + */ +#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H +#define __ASM_GENERIC_QSPINLOCK_TYPES_H + +/* + * Including atomic.h with PARAVIRT on will cause compilation errors because + * of recursive header file incluson via paravirt_types.h. So don't include + * it if PARAVIRT is on. + */ +#ifndef CONFIG_PARAVIRT +#include +#include +#endif + +typedef struct qspinlock { + union { + atomic_t val; + + /* + * By using the whole 2nd least significant byte for the + * pending bit, we can allow better optimization of the lock + * acquisition for the pending bit holder. + */ +#ifdef __LITTLE_ENDIAN + struct { + u8 locked; + u8 pending; + }; + struct { + u16 locked_pending; + u16 tail; + }; +#else + struct { + u16 tail; + u16 locked_pending; + }; + struct { + u8 reserved[2]; + u8 pending; + u8 locked; + }; +#endif + }; +} arch_spinlock_t; + +/* + * Initializier + */ +#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } } + +/* + * Bitfields in the atomic value: + * + * When NR_CPUS < 16K + * 0- 7: locked byte + * 8: pending + * 9-15: not used + * 16-17: tail index + * 18-31: tail cpu (+1) + * + * When NR_CPUS >= 16K + * 0- 7: locked byte + * 8: pending + * 9-10: tail index + * 11-31: tail cpu (+1) + */ +#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\ + << _Q_ ## type ## _OFFSET) +#define _Q_LOCKED_OFFSET 0 +#define _Q_LOCKED_BITS 8 +#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) + +#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) +#if CONFIG_NR_CPUS < (1U << 14) +#define _Q_PENDING_BITS 8 +#else +#define _Q_PENDING_BITS 1 +#endif +#define _Q_PENDING_MASK _Q_SET_MASK(PENDING) + +#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) +#define _Q_TAIL_IDX_BITS 2 +#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) + +#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) +#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) +#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) + +#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) + +#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) +#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) + +#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h new file mode 100644 index 0000000..8874f68 --- /dev/null +++ b/include/asm-generic/resource.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_RESOURCE_H +#define _ASM_GENERIC_RESOURCE_H + +#include + + +/* + * boot-time rlimit defaults for the init task: + */ +#define INIT_RLIMITS \ +{ \ + [RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_STACK] = { _STK_LIM, RLIM_INFINITY }, \ + [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \ + [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_NPROC] = { 0, 0 }, \ + [RLIMIT_NOFILE] = { INR_OPEN_CUR, INR_OPEN_MAX }, \ + [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \ + [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_SIGPENDING] = { 0, 0 }, \ + [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ + [RLIMIT_NICE] = { 0, 0 }, \ + [RLIMIT_RTPRIO] = { 0, 0 }, \ + [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ +} + +#endif diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h new file mode 100644 index 0000000..1321ac7 --- /dev/null +++ b/include/asm-generic/seccomp.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * include/asm-generic/seccomp.h + * + * Copyright (C) 2014 Linaro Limited + * Author: AKASHI Takahiro + */ +#ifndef _ASM_GENERIC_SECCOMP_H +#define _ASM_GENERIC_SECCOMP_H + +#include + +#if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32) +#define __NR_seccomp_read_32 __NR_read +#define __NR_seccomp_write_32 __NR_write +#define __NR_seccomp_exit_32 __NR_exit +#ifndef __NR_seccomp_sigreturn_32 +#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn +#endif +#endif /* CONFIG_COMPAT && ! already defined */ + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#ifndef __NR_seccomp_sigreturn +#define __NR_seccomp_sigreturn __NR_rt_sigreturn +#endif + +#ifdef CONFIG_COMPAT +#ifndef get_compat_mode1_syscalls +static inline const int *get_compat_mode1_syscalls(void) +{ + static const int mode1_syscalls_32[] = { + __NR_seccomp_read_32, __NR_seccomp_write_32, + __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, + 0, /* null terminated */ + }; + return mode1_syscalls_32; +} +#endif +#endif /* CONFIG_COMPAT */ + +#endif /* _ASM_GENERIC_SECCOMP_H */ diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h new file mode 100644 index 0000000..d1779d4 --- /dev/null +++ b/include/asm-generic/sections.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_SECTIONS_H_ +#define _ASM_GENERIC_SECTIONS_H_ + +/* References to section boundaries */ + +#include +#include + +/* + * Usage guidelines: + * _text, _data: architecture specific, don't use them in arch-independent code + * [_stext, _etext]: contains .text.* sections, may also contain .rodata.* + * and/or .init.* sections + * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* + * and/or .init.* sections. + * [__start_rodata, __end_rodata]: contains .rodata.* sections + * [__start_ro_after_init, __end_ro_after_init]: + * contains .data..ro_after_init section + * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* + * may be out of this range on some architectures. + * [_sinittext, _einittext]: contains .init.text.* sections + * [__bss_start, __bss_stop]: contains BSS sections + * + * Following global variables are optional and may be unavailable on some + * architectures and/or kernel configurations. + * _text, _data + * __kprobes_text_start, __kprobes_text_end + * __entry_text_start, __entry_text_end + * __ctors_start, __ctors_end + * __irqentry_text_start, __irqentry_text_end + * __softirqentry_text_start, __softirqentry_text_end + * __start_opd, __end_opd + */ +extern char _text[], _stext[], _etext[]; +extern char _data[], _sdata[], _edata[]; +extern char __bss_start[], __bss_stop[]; +extern char __init_begin[], __init_end[]; +extern char _sinittext[], _einittext[]; +extern char __start_ro_after_init[], __end_ro_after_init[]; +extern char _end[]; +extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; +extern char __kprobes_text_start[], __kprobes_text_end[]; +extern char __entry_text_start[], __entry_text_end[]; +extern char __start_rodata[], __end_rodata[]; +extern char __irqentry_text_start[], __irqentry_text_end[]; +extern char __softirqentry_text_start[], __softirqentry_text_end[]; +extern char __start_once[], __end_once[]; + +/* Start and end of .ctors section - used for constructor calls. */ +extern char __ctors_start[], __ctors_end[]; + +/* Start and end of .opd section - used for function descriptors. */ +extern char __start_opd[], __end_opd[]; + +extern __visible const void __nosave_begin, __nosave_end; + +/* Function descriptor handling (if any). Override in asm/sections.h */ +#ifndef dereference_function_descriptor +#define dereference_function_descriptor(p) (p) +#define dereference_kernel_function_descriptor(p) (p) +#endif + +/* random extra sections (if any). Override + * in asm/sections.h */ +#ifndef arch_is_kernel_text +static inline int arch_is_kernel_text(unsigned long addr) +{ + return 0; +} +#endif + +#ifndef arch_is_kernel_data +static inline int arch_is_kernel_data(unsigned long addr) +{ + return 0; +} +#endif + +/* + * Check if an address is part of freed initmem. This is needed on architectures + * with virt == phys kernel mapping, for code that wants to check if an address + * is part of a static object within [_stext, _end]. After initmem is freed, + * memory can be allocated from it, and such allocations would then have + * addresses within the range [_stext, _end]. + */ +#ifndef arch_is_kernel_initmem_freed +static inline int arch_is_kernel_initmem_freed(unsigned long addr) +{ + return 0; +} +#endif + +/** + * memory_contains - checks if an object is contained within a memory region + * @begin: virtual address of the beginning of the memory region + * @end: virtual address of the end of the memory region + * @virt: virtual address of the memory object + * @size: size of the memory object + * + * Returns: true if the object specified by @virt and @size is entirely + * contained within the memory region defined by @begin and @end, false + * otherwise. + */ +static inline bool memory_contains(void *begin, void *end, void *virt, + size_t size) +{ + return virt >= begin && virt + size <= end; +} + +/** + * memory_intersects - checks if the region occupied by an object intersects + * with another memory region + * @begin: virtual address of the beginning of the memory regien + * @end: virtual address of the end of the memory region + * @virt: virtual address of the memory object + * @size: size of the memory object + * + * Returns: true if an object's memory region, specified by @virt and @size, + * intersects with the region specified by @begin and @end, false otherwise. + */ +static inline bool memory_intersects(void *begin, void *end, void *virt, + size_t size) +{ + void *vend = virt + size; + + return (virt >= begin && virt < end) || (vend >= begin && vend < end); +} + +/** + * init_section_contains - checks if an object is contained within the init + * section + * @virt: virtual address of the memory object + * @size: size of the memory object + * + * Returns: true if the object specified by @virt and @size is entirely + * contained within the init section, false otherwise. + */ +static inline bool init_section_contains(void *virt, size_t size) +{ + return memory_contains(__init_begin, __init_end, virt, size); +} + +/** + * init_section_intersects - checks if the region occupied by an object + * intersects with the init section + * @virt: virtual address of the memory object + * @size: size of the memory object + * + * Returns: true if an object's memory region, specified by @virt and @size, + * intersects with the init section, false otherwise. + */ +static inline bool init_section_intersects(void *virt, size_t size) +{ + return memory_intersects(__init_begin, __init_end, virt, size); +} + +/** + * is_kernel_rodata - checks if the pointer address is located in the + * .rodata section + * + * @addr: address to check + * + * Returns: true if the address is located in .rodata, false otherwise. + */ +static inline bool is_kernel_rodata(unsigned long addr) +{ + return addr >= (unsigned long)__start_rodata && + addr < (unsigned long)__end_rodata; +} + +#endif /* _ASM_GENERIC_SECTIONS_H_ */ diff --git a/include/asm-generic/serial.h b/include/asm-generic/serial.h new file mode 100644 index 0000000..ca9f7b6 --- /dev/null +++ b/include/asm-generic/serial.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_SERIAL_H +#define __ASM_GENERIC_SERIAL_H + +/* + * This should not be an architecture specific #define, oh well. + * + * Traditionally, it just describes i8250 and related serial ports + * that have this clock rate. + */ + +#define BASE_BAUD (1843200 / 16) + +#endif /* __ASM_GENERIC_SERIAL_H */ diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h new file mode 100644 index 0000000..c86abf6 --- /dev/null +++ b/include/asm-generic/set_memory.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_SET_MEMORY_H +#define __ASM_SET_MEMORY_H + +/* + * Functions to change memory attributes. + */ +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); + +#endif diff --git a/include/asm-generic/shmparam.h b/include/asm-generic/shmparam.h new file mode 100644 index 0000000..b8f9035 --- /dev/null +++ b/include/asm-generic/shmparam.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_SHMPARAM_H +#define __ASM_GENERIC_SHMPARAM_H + +#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ + +#endif /* _ASM_GENERIC_SHMPARAM_H */ diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h new file mode 100644 index 0000000..c53984f --- /dev/null +++ b/include/asm-generic/signal.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_SIGNAL_H +#define __ASM_GENERIC_SIGNAL_H + +#include + +#ifndef __ASSEMBLY__ +#ifdef SA_RESTORER +#endif + +#include +#undef __HAVE_ARCH_SIG_BITOPS + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_GENERIC_SIGNAL_H */ diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h new file mode 100644 index 0000000..d0343d5 --- /dev/null +++ b/include/asm-generic/simd.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +/* + * may_use_simd - whether it is allowable at this time to issue SIMD + * instructions or access the SIMD register file + * + * As architectures typically don't preserve the SIMD register file when + * taking an interrupt, !in_interrupt() should be a reasonable default. + */ +static __must_check inline bool may_use_simd(void) +{ + return !in_interrupt(); +} diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h new file mode 100644 index 0000000..adaf6ac --- /dev/null +++ b/include/asm-generic/spinlock.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_SPINLOCK_H +#define __ASM_GENERIC_SPINLOCK_H +/* + * You need to implement asm/spinlock.h for SMP support. The generic + * version does not handle SMP. + */ +#ifdef CONFIG_SMP +#error need an architecture specific asm/spinlock.h +#endif + +#endif /* __ASM_GENERIC_SPINLOCK_H */ diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h new file mode 100644 index 0000000..f88dcd8 --- /dev/null +++ b/include/asm-generic/statfs.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _GENERIC_STATFS_H +#define _GENERIC_STATFS_H + +#include + +typedef __kernel_fsid_t fsid_t; +#endif diff --git a/include/asm-generic/string.h b/include/asm-generic/string.h new file mode 100644 index 0000000..de5e020 --- /dev/null +++ b/include/asm-generic/string.h @@ -0,0 +1,10 @@ +#ifndef __ASM_GENERIC_STRING_H +#define __ASM_GENERIC_STRING_H +/* + * The kernel provides all required functions in lib/string.c + * + * Architectures probably want to provide at least their own optimized + * memcpy and memset functions though. + */ + +#endif /* __ASM_GENERIC_STRING_H */ diff --git a/include/asm-generic/switch_to.h b/include/asm-generic/switch_to.h new file mode 100644 index 0000000..5897d10 --- /dev/null +++ b/include/asm-generic/switch_to.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Generic task switch macro wrapper. + * + * It should be possible to use these on really simple architectures, + * but it serves more as a starting point for new ports. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ +#ifndef __ASM_GENERIC_SWITCH_TO_H +#define __ASM_GENERIC_SWITCH_TO_H + +#include + +/* + * Context switching is now performed out-of-line in switch_to.S + */ +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *); + +#define switch_to(prev, next, last) \ + do { \ + ((last) = __switch_to((prev), (next))); \ + } while (0) + +#endif /* __ASM_GENERIC_SWITCH_TO_H */ diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h new file mode 100644 index 0000000..f3135e7 --- /dev/null +++ b/include/asm-generic/syscall.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Access to user system call parameters and results + * + * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. + * + * This file is a stub providing documentation for what functions + * asm-ARCH/syscall.h files need to define. Most arch definitions + * will be simple inlines. + * + * All of these functions expect to be called with no locks, + * and only when the caller is sure that the task of interest + * cannot return to user mode while we are looking at it. + */ + +#ifndef _ASM_SYSCALL_H +#define _ASM_SYSCALL_H 1 + +struct task_struct; +struct pt_regs; + +/** + * syscall_get_nr - find what system call a task is executing + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * If @task is executing a system call or is at system call + * tracing about to attempt one, returns the system call number. + * If @task is not executing a system call, i.e. it's blocked + * inside the kernel for a fault or signal, returns -1. + * + * Note this returns int even on 64-bit machines. Only 32 bits of + * system call number can be meaningful. If the actual arch value + * is 64 bits, this truncates to 32 bits so 0xffffffff means -1. + * + * It's only valid to call this when @task is known to be blocked. + */ +int syscall_get_nr(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_rollback - roll back registers after an aborted system call + * @task: task of interest, must be in system call exit tracing + * @regs: task_pt_regs() of @task + * + * It's only valid to call this when @task is stopped for system + * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT), + * after tracehook_report_syscall_entry() returned nonzero to prevent + * the system call from taking place. + * + * This rolls back the register state in @regs so it's as if the + * system call instruction was a no-op. The registers containing + * the system call number and arguments are as they were before the + * system call instruction. This may not be the same as what the + * register state looked like at system call entry tracing. + */ +void syscall_rollback(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_get_error - check result of traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * Returns 0 if the system call succeeded, or -ERRORCODE if it failed. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +long syscall_get_error(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_get_return_value - get the return value of a traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * Returns the return value of the successful system call. + * This value is meaningless if syscall_get_error() returned nonzero. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_set_return_value - change the return value of a traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * @error: negative error code, or zero to indicate success + * @val: user return value if @error is zero + * + * This changes the results of the system call that user mode will see. + * If @error is zero, the user sees a successful system call with a + * return value of @val. If @error is nonzero, it's a negated errno + * code; the user sees a failed system call with this errno code. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, + int error, long val); + +/** + * syscall_get_arguments - extract system call parameter values + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * @args: array filled with argument values + * + * Fetches 6 arguments to the system call. First argument is stored in +* @args[0], and so on. + * + * It's only valid to call this when @task is stopped for tracing on + * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned long *args); + +/** + * syscall_set_arguments - change system call parameter value + * @task: task of interest, must be in system call entry tracing + * @regs: task_pt_regs() of @task + * @args: array of argument values to store + * + * Changes 6 arguments to the system call. + * The first argument gets value @args[0], and so on. + * + * It's only valid to call this when @task is stopped for tracing on + * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, + const unsigned long *args); + +/** + * syscall_get_arch - return the AUDIT_ARCH for the current system call + * @task: task of interest, must be blocked + * + * Returns the AUDIT_ARCH_* based on the system call convention in use. + * + * It's only valid to call this when @task is stopped on entry to a system + * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP. + * + * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must + * provide an implementation of this. + */ +int syscall_get_arch(struct task_struct *task); +#endif /* _ASM_SYSCALL_H */ diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h new file mode 100644 index 0000000..933ca65 --- /dev/null +++ b/include/asm-generic/syscalls.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_SYSCALLS_H +#define __ASM_GENERIC_SYSCALLS_H + +#include +#include + +/* + * Calling conventions for these system calls can differ, so + * it's possible to override them. + */ + +#ifndef sys_mmap2 +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +#endif + +#ifndef sys_mmap +asmlinkage long sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, off_t pgoff); +#endif + +#ifndef sys_rt_sigreturn +asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); +#endif + +#endif /* __ASM_GENERIC_SYSCALLS_H */ diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h new file mode 100644 index 0000000..59c5a3b --- /dev/null +++ b/include/asm-generic/termios-base.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* termios.h: generic termios/termio user copying/translation + */ + +#ifndef _ASM_GENERIC_TERMIOS_BASE_H +#define _ASM_GENERIC_TERMIOS_BASE_H + +#include + +#ifndef __ARCH_TERMIO_GETPUT + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +static inline int user_termio_to_kernel_termios(struct ktermios *termios, + struct termio __user *termio) +{ + unsigned short tmp; + + if (get_user(tmp, &termio->c_iflag) < 0) + goto fault; + termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp; + + if (get_user(tmp, &termio->c_oflag) < 0) + goto fault; + termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp; + + if (get_user(tmp, &termio->c_cflag) < 0) + goto fault; + termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp; + + if (get_user(tmp, &termio->c_lflag) < 0) + goto fault; + termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp; + + if (get_user(termios->c_line, &termio->c_line) < 0) + goto fault; + + if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0) + goto fault; + + return 0; + + fault: + return -EFAULT; +} + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +static inline int kernel_termios_to_user_termio(struct termio __user *termio, + struct ktermios *termios) +{ + if (put_user(termios->c_iflag, &termio->c_iflag) < 0 || + put_user(termios->c_oflag, &termio->c_oflag) < 0 || + put_user(termios->c_cflag, &termio->c_cflag) < 0 || + put_user(termios->c_lflag, &termio->c_lflag) < 0 || + put_user(termios->c_line, &termio->c_line) < 0 || + copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0) + return -EFAULT; + + return 0; +} + +#ifndef user_termios_to_kernel_termios +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) +#endif + +#ifndef kernel_termios_to_user_termios +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) +#endif + +#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) + +#endif /* __ARCH_TERMIO_GETPUT */ + +#endif /* _ASM_GENERIC_TERMIOS_BASE_H */ diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h new file mode 100644 index 0000000..b1398d0 --- /dev/null +++ b/include/asm-generic/termios.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_TERMIOS_H +#define _ASM_GENERIC_TERMIOS_H + + +#include +#include + +/* intr=^C quit=^\ erase=del kill=^U + eof=^D vtime=\0 vmin=\1 sxtc=\0 + start=^Q stop=^S susp=^Z eol=\0 + reprint=^R discard=^U werase=^W lnext=^V + eol2=\0 +*/ +#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +static inline int user_termio_to_kernel_termios(struct ktermios *termios, + const struct termio __user *termio) +{ + unsigned short tmp; + + if (get_user(tmp, &termio->c_iflag) < 0) + goto fault; + termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp; + + if (get_user(tmp, &termio->c_oflag) < 0) + goto fault; + termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp; + + if (get_user(tmp, &termio->c_cflag) < 0) + goto fault; + termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp; + + if (get_user(tmp, &termio->c_lflag) < 0) + goto fault; + termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp; + + if (get_user(termios->c_line, &termio->c_line) < 0) + goto fault; + + if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0) + goto fault; + + return 0; + + fault: + return -EFAULT; +} + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +static inline int kernel_termios_to_user_termio(struct termio __user *termio, + struct ktermios *termios) +{ + if (put_user(termios->c_iflag, &termio->c_iflag) < 0 || + put_user(termios->c_oflag, &termio->c_oflag) < 0 || + put_user(termios->c_cflag, &termio->c_cflag) < 0 || + put_user(termios->c_lflag, &termio->c_lflag) < 0 || + put_user(termios->c_line, &termio->c_line) < 0 || + copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0) + return -EFAULT; + + return 0; +} + +#ifdef TCGETS2 +static inline int user_termios_to_kernel_termios(struct ktermios *k, + struct termios2 __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios2)); +} + +static inline int kernel_termios_to_user_termios(struct termios2 __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios2)); +} + +static inline int user_termios_to_kernel_termios_1(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)); +} + +static inline int kernel_termios_to_user_termios_1(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)); +} +#else /* TCGETS2 */ +static inline int user_termios_to_kernel_termios(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)); +} + +static inline int kernel_termios_to_user_termios(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)); +} +#endif /* TCGETS2 */ + +#endif /* _ASM_GENERIC_TERMIOS_H */ diff --git a/include/asm-generic/timex.h b/include/asm-generic/timex.h new file mode 100644 index 0000000..50ba9b5 --- /dev/null +++ b/include/asm-generic/timex.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_TIMEX_H +#define __ASM_GENERIC_TIMEX_H + +/* + * If you have a cycle counter, return the value here. + */ +typedef unsigned long cycles_t; +#ifndef get_cycles +static inline cycles_t get_cycles(void) +{ + return 0; +} +#endif + +/* + * Architectures are encouraged to implement read_current_timer + * and define this in order to avoid the expensive delay loop + * calibration during boot. + */ +#undef ARCH_HAS_READ_CURRENT_TIMER + +#endif /* __ASM_GENERIC_TIMEX_H */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h new file mode 100644 index 0000000..c716ea8 --- /dev/null +++ b/include/asm-generic/tlb.h @@ -0,0 +1,620 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* include/asm-generic/tlb.h + * + * Generic TLB shootdown code + * + * Copyright 2001 Red Hat, Inc. + * Based on code from mm/memory.c Copyright Linus Torvalds and others. + * + * Copyright 2011 Red Hat, Inc., Peter Zijlstra + */ +#ifndef _ASM_GENERIC__TLB_H +#define _ASM_GENERIC__TLB_H + +#include +#include +#include +#include +#include + +/* + * Blindly accessing user memory from NMI context can be dangerous + * if we're in the middle of switching the current user task or switching + * the loaded mm. + */ +#ifndef nmi_uaccess_okay +# define nmi_uaccess_okay() true +#endif + +#ifdef CONFIG_MMU + +/* + * Generic MMU-gather implementation. + * + * The mmu_gather data structure is used by the mm code to implement the + * correct and efficient ordering of freeing pages and TLB invalidations. + * + * This correct ordering is: + * + * 1) unhook page + * 2) TLB invalidate page + * 3) free page + * + * That is, we must never free a page before we have ensured there are no live + * translations left to it. Otherwise it might be possible to observe (or + * worse, change) the page content after it has been reused. + * + * The mmu_gather API consists of: + * + * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather + * + * Finish in particular will issue a (final) TLB invalidate and free + * all (remaining) queued pages. + * + * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA + * + * Defaults to flushing at tlb_end_vma() to reset the range; helps when + * there's large holes between the VMAs. + * + * - tlb_remove_page() / __tlb_remove_page() + * - tlb_remove_page_size() / __tlb_remove_page_size() + * + * __tlb_remove_page_size() is the basic primitive that queues a page for + * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a + * boolean indicating if the queue is (now) full and a call to + * tlb_flush_mmu() is required. + * + * tlb_remove_page() and tlb_remove_page_size() imply the call to + * tlb_flush_mmu() when required and has no return value. + * + * - tlb_change_page_size() + * + * call before __tlb_remove_page*() to set the current page-size; implies a + * possible tlb_flush_mmu() call. + * + * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() + * + * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets + * related state, like the range) + * + * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees + * whatever pages are still batched. + * + * - mmu_gather::fullmm + * + * A flag set by tlb_gather_mmu() to indicate we're going to free + * the entire mm; this allows a number of optimizations. + * + * - We can ignore tlb_{start,end}_vma(); because we don't + * care about ranges. Everything will be shot down. + * + * - (RISC) architectures that use ASIDs can cycle to a new ASID + * and delay the invalidation until ASID space runs out. + * + * - mmu_gather::need_flush_all + * + * A flag that can be set by the arch code if it wants to force + * flush the entire TLB irrespective of the range. For instance + * x86-PAE needs this when changing top-level entries. + * + * And allows the architecture to provide and implement tlb_flush(): + * + * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make + * use of: + * + * - mmu_gather::start / mmu_gather::end + * + * which provides the range that needs to be flushed to cover the pages to + * be freed. + * + * - mmu_gather::freed_tables + * + * set when we freed page table pages + * + * - tlb_get_unmap_shift() / tlb_get_unmap_size() + * + * returns the smallest TLB entry size unmapped in this range. + * + * If an architecture does not provide tlb_flush() a default implementation + * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is + * specified, in which case we'll default to flush_tlb_mm(). + * + * Additionally there are a few opt-in features: + * + * HAVE_MMU_GATHER_PAGE_SIZE + * + * This ensures we call tlb_flush() every time tlb_change_page_size() actually + * changes the size and provides mmu_gather::page_size to tlb_flush(). + * + * HAVE_RCU_TABLE_FREE + * + * This provides tlb_remove_table(), to be used instead of tlb_remove_page() + * for page directores (__p*_free_tlb()). This provides separate freeing of + * the page-table pages themselves in a semi-RCU fashion (see comment below). + * Useful if your architecture doesn't use IPIs for remote TLB invalidates + * and therefore doesn't naturally serialize with software page-table walkers. + * + * When used, an architecture is expected to provide __tlb_remove_table() + * which does the actual freeing of these pages. + * + * MMU_GATHER_NO_RANGE + * + * Use this if your architecture lacks an efficient flush_tlb_range(). + */ + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +/* + * Semi RCU freeing of the page directories. + * + * This is needed by some architectures to implement software pagetable walkers. + * + * gup_fast() and other software pagetable walkers do a lockless page-table + * walk and therefore needs some synchronization with the freeing of the page + * directories. The chosen means to accomplish that is by disabling IRQs over + * the walk. + * + * Architectures that use IPIs to flush TLBs will then automagically DTRT, + * since we unlink the page, flush TLBs, free the page. Since the disabling of + * IRQs delays the completion of the TLB flush we can never observe an already + * freed page. + * + * Architectures that do not have this (PPC) need to delay the freeing by some + * other means, this is that means. + * + * What we do is batch the freed directory pages (tables) and RCU free them. + * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling + * holds off grace periods. + * + * However, in order to batch these pages we need to allocate storage, this + * allocation is deep inside the MM code and can thus easily fail on memory + * pressure. To guarantee progress we fall back to single table freeing, see + * the implementation of tlb_remove_table_one(). + * + */ +struct mmu_table_batch { + struct rcu_head rcu; + unsigned int nr; + void *tables[0]; +}; + +#define MAX_TABLE_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) + +extern void tlb_remove_table(struct mmu_gather *tlb, void *table); + +/* + * This allows an architecture that does not use the linux page-tables for + * hardware to skip the TLBI when freeing page tables. + */ +#ifndef tlb_needs_table_invalidate +#define tlb_needs_table_invalidate() (true) +#endif + +#else + +#ifdef tlb_needs_table_invalidate +#error tlb_needs_table_invalidate() requires HAVE_RCU_TABLE_FREE +#endif + +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ + + +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER +/* + * If we can't allocate a page to make a big batch of page pointers + * to work on, then just handle a few from the on-stack structure. + */ +#define MMU_GATHER_BUNDLE 8 + +struct mmu_gather_batch { + struct mmu_gather_batch *next; + unsigned int nr; + unsigned int max; + struct page *pages[0]; +}; + +#define MAX_GATHER_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) + +/* + * Limit the maximum number of mmu_gather batches to reduce a risk of soft + * lockups for non-preemptible kernels on huge machines when a lot of memory + * is zapped during unmapping. + * 10K pages freed at once should be safe even without a preemption point. + */ +#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) + +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + int page_size); +#endif + +/* + * struct mmu_gather is an opaque type used by the mm code for passing around + * any data needed by arch specific code for tlb_remove_page. + */ +struct mmu_gather { + struct mm_struct *mm; + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + struct mmu_table_batch *batch; +#endif + + unsigned long start; + unsigned long end; + /* + * we are in the middle of an operation to clear + * a full mm and can make some optimizations + */ + unsigned int fullmm : 1; + + /* + * we have performed an operation which + * requires a complete flush of the tlb + */ + unsigned int need_flush_all : 1; + + /* + * we have removed page directories + */ + unsigned int freed_tables : 1; + + /* + * at which levels have we cleared entries? + */ + unsigned int cleared_ptes : 1; + unsigned int cleared_pmds : 1; + unsigned int cleared_puds : 1; + unsigned int cleared_p4ds : 1; + + /* + * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma + */ + unsigned int vma_exec : 1; + unsigned int vma_huge : 1; + + unsigned int batch_count; + +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + struct mmu_gather_batch *active; + struct mmu_gather_batch local; + struct page *__pages[MMU_GATHER_BUNDLE]; + +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE + unsigned int page_size; +#endif +#endif +}; + +void arch_tlb_gather_mmu(struct mmu_gather *tlb, + struct mm_struct *mm, unsigned long start, unsigned long end); +void tlb_flush_mmu(struct mmu_gather *tlb); +void arch_tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end, bool force); + +static inline void __tlb_adjust_range(struct mmu_gather *tlb, + unsigned long address, + unsigned int range_size) +{ + tlb->start = min(tlb->start, address); + tlb->end = max(tlb->end, address + range_size); +} + +static inline void __tlb_reset_range(struct mmu_gather *tlb) +{ + if (tlb->fullmm) { + tlb->start = tlb->end = ~0; + } else { + tlb->start = TASK_SIZE; + tlb->end = 0; + } + tlb->freed_tables = 0; + tlb->cleared_ptes = 0; + tlb->cleared_pmds = 0; + tlb->cleared_puds = 0; + tlb->cleared_p4ds = 0; + /* + * Do not reset mmu_gather::vma_* fields here, we do not + * call into tlb_start_vma() again to set them if there is an + * intermediate flush. + */ +} + +#ifdef CONFIG_MMU_GATHER_NO_RANGE + +#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma) +#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma() +#endif + +/* + * When an architecture does not have efficient means of range flushing TLBs + * there is no point in doing intermediate flushes on tlb_end_vma() to keep the + * range small. We equally don't have to worry about page granularity or other + * things. + * + * All we need to do is issue a full flush for any !0 range. + */ +static inline void tlb_flush(struct mmu_gather *tlb) +{ + if (tlb->end) + flush_tlb_mm(tlb->mm); +} + +static inline void +tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } + +#define tlb_end_vma tlb_end_vma +static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } + +#else /* CONFIG_MMU_GATHER_NO_RANGE */ + +#ifndef tlb_flush + +#if defined(tlb_start_vma) || defined(tlb_end_vma) +#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma() +#endif + +/* + * When an architecture does not provide its own tlb_flush() implementation + * but does have a reasonably efficient flush_vma_range() implementation + * use that. + */ +static inline void tlb_flush(struct mmu_gather *tlb) +{ + if (tlb->fullmm || tlb->need_flush_all) { + flush_tlb_mm(tlb->mm); + } else if (tlb->end) { + struct vm_area_struct vma = { + .vm_mm = tlb->mm, + .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | + (tlb->vma_huge ? VM_HUGETLB : 0), + }; + + flush_tlb_range(&vma, tlb->start, tlb->end); + } +} + +static inline void +tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + /* + * flush_tlb_range() implementations that look at VM_HUGETLB (tile, + * mips-4k) flush only large pages. + * + * flush_tlb_range() implementations that flush I-TLB also flush D-TLB + * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing + * range. + * + * We rely on tlb_end_vma() to issue a flush, such that when we reset + * these values the batch is empty. + */ + tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB); + tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); +} + +#else + +static inline void +tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } + +#endif + +#endif /* CONFIG_MMU_GATHER_NO_RANGE */ + +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) +{ + if (!tlb->end) + return; + + tlb_flush(tlb); + mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); + __tlb_reset_range(tlb); +} + +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + if (__tlb_remove_page_size(tlb, page, page_size)) + tlb_flush_mmu(tlb); +} + +static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + return __tlb_remove_page_size(tlb, page, PAGE_SIZE); +} + +/* tlb_remove_page + * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when + * required. + */ +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + return tlb_remove_page_size(tlb, page, PAGE_SIZE); +} + +static inline void tlb_change_page_size(struct mmu_gather *tlb, + unsigned int page_size) +{ +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE + if (tlb->page_size && tlb->page_size != page_size) { + if (!tlb->fullmm) + tlb_flush_mmu(tlb); + } + + tlb->page_size = page_size; +#endif +} + +static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) +{ + if (tlb->cleared_ptes) + return PAGE_SHIFT; + if (tlb->cleared_pmds) + return PMD_SHIFT; + if (tlb->cleared_puds) + return PUD_SHIFT; + if (tlb->cleared_p4ds) + return P4D_SHIFT; + + return PAGE_SHIFT; +} + +static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) +{ + return 1UL << tlb_get_unmap_shift(tlb); +} + +/* + * In the case of tlb vma handling, we can optimise these away in the + * case where we're doing a full MM flush. When we're doing a munmap, + * the vmas are adjusted to only cover the region to be torn down. + */ +#ifndef tlb_start_vma +static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (tlb->fullmm) + return; + + tlb_update_vma_flags(tlb, vma); + flush_cache_range(vma, vma->vm_start, vma->vm_end); +} +#endif + +#ifndef tlb_end_vma +static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (tlb->fullmm) + return; + + /* + * Do a TLB flush and reset the range at VMA boundaries; this avoids + * the ranges growing with the unused space between consecutive VMAs, + * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on + * this. + */ + tlb_flush_mmu_tlbonly(tlb); +} +#endif + +#ifndef __tlb_remove_tlb_entry +#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) +#endif + +/** + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. + * + * Record the fact that pte's were really unmapped by updating the range, + * so we can later optimise away the tlb invalidate. This helps when + * userspace is unmapping already-unmapped pages, which happens quite a lot. + */ +#define tlb_remove_tlb_entry(tlb, ptep, address) \ + do { \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->cleared_ptes = 1; \ + __tlb_remove_tlb_entry(tlb, ptep, address); \ + } while (0) + +#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ + do { \ + unsigned long _sz = huge_page_size(h); \ + __tlb_adjust_range(tlb, address, _sz); \ + if (_sz == PMD_SIZE) \ + tlb->cleared_pmds = 1; \ + else if (_sz == PUD_SIZE) \ + tlb->cleared_puds = 1; \ + __tlb_remove_tlb_entry(tlb, ptep, address); \ + } while (0) + +/** + * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation + * This is a nop so far, because only x86 needs it. + */ +#ifndef __tlb_remove_pmd_tlb_entry +#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) +#endif + +#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ + do { \ + __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \ + tlb->cleared_pmds = 1; \ + __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ + } while (0) + +/** + * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb + * invalidation. This is a nop so far, because only x86 needs it. + */ +#ifndef __tlb_remove_pud_tlb_entry +#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) +#endif + +#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ + do { \ + __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ + tlb->cleared_puds = 1; \ + __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ + } while (0) + +/* + * For things like page tables caches (ie caching addresses "inside" the + * page tables, like x86 does), for legacy reasons, flushing an + * individual page had better flush the page table caches behind it. This + * is definitely how x86 works, for example. And if you have an + * architected non-legacy page table cache (which I'm not aware of + * anybody actually doing), you're going to have some architecturally + * explicit flushing for that, likely *separate* from a regular TLB entry + * flush, and thus you'd need more than just some range expansion.. + * + * So if we ever find an architecture + * that would want something that odd, I think it is up to that + * architecture to do its own odd thing, not cause pain for others + * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com + * + * For now w.r.t page table cache, mark the range_size as PAGE_SIZE + */ + +#ifndef pte_free_tlb +#define pte_free_tlb(tlb, ptep, address) \ + do { \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->freed_tables = 1; \ + tlb->cleared_pmds = 1; \ + __pte_free_tlb(tlb, ptep, address); \ + } while (0) +#endif + +#ifndef pmd_free_tlb +#define pmd_free_tlb(tlb, pmdp, address) \ + do { \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->freed_tables = 1; \ + tlb->cleared_puds = 1; \ + __pmd_free_tlb(tlb, pmdp, address); \ + } while (0) +#endif + +#ifndef __ARCH_HAS_4LEVEL_HACK +#ifndef pud_free_tlb +#define pud_free_tlb(tlb, pudp, address) \ + do { \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->freed_tables = 1; \ + tlb->cleared_p4ds = 1; \ + __pud_free_tlb(tlb, pudp, address); \ + } while (0) +#endif +#endif + +#ifndef __ARCH_HAS_5LEVEL_HACK +#ifndef p4d_free_tlb +#define p4d_free_tlb(tlb, pudp, address) \ + do { \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->freed_tables = 1; \ + __p4d_free_tlb(tlb, pudp, address); \ + } while (0) +#endif +#endif + +#endif /* CONFIG_MMU */ + +#endif /* _ASM_GENERIC__TLB_H */ diff --git a/include/asm-generic/tlbflush.h b/include/asm-generic/tlbflush.h new file mode 100644 index 0000000..dc26692 --- /dev/null +++ b/include/asm-generic/tlbflush.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_TLBFLUSH_H +#define __ASM_GENERIC_TLBFLUSH_H +/* + * This is a dummy tlbflush implementation that can be used on all + * nommu architectures. + * If you have an MMU, you need to write your own functions. + */ +#ifdef CONFIG_MMU +#error need to implement an architecture specific asm/tlbflush.h +#endif + +#include + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + BUG(); +} + + +#endif /* __ASM_GENERIC_TLBFLUSH_H */ diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h new file mode 100644 index 0000000..5aa8705 --- /dev/null +++ b/include/asm-generic/topology.h @@ -0,0 +1,77 @@ +/* + * linux/include/asm-generic/topology.h + * + * Written by: Matthew Dobson, IBM Corporation + * + * Copyright (C) 2002, IBM Corp. + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to + */ +#ifndef _ASM_GENERIC_TOPOLOGY_H +#define _ASM_GENERIC_TOPOLOGY_H + +#ifndef CONFIG_NUMA + +/* Other architectures wishing to use this simple topology API should fill + in the below functions as appropriate in their own file. */ +#ifndef cpu_to_node +#define cpu_to_node(cpu) ((void)(cpu),0) +#endif +#ifndef set_numa_node +#define set_numa_node(node) +#endif +#ifndef set_cpu_numa_node +#define set_cpu_numa_node(cpu, node) +#endif +#ifndef cpu_to_mem +#define cpu_to_mem(cpu) ((void)(cpu),0) +#endif + +#ifndef cpumask_of_node + #ifdef CONFIG_NEED_MULTIPLE_NODES + #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) + #else + #define cpumask_of_node(node) ((void)(node), cpu_online_mask) + #endif +#endif +#ifndef pcibus_to_node +#define pcibus_to_node(bus) ((void)(bus), -1) +#endif + +#ifndef cpumask_of_pcibus +#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ + cpu_all_mask : \ + cpumask_of_node(pcibus_to_node(bus))) +#endif + +#endif /* CONFIG_NUMA */ + +#if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES) + +#ifndef set_numa_mem +#define set_numa_mem(node) +#endif +#ifndef set_cpu_numa_mem +#define set_cpu_numa_mem(cpu, node) +#endif + +#endif /* !CONFIG_NUMA || !CONFIG_HAVE_MEMORYLESS_NODES */ + +#endif /* _ASM_GENERIC_TOPOLOGY_H */ diff --git a/include/asm-generic/trace_clock.h b/include/asm-generic/trace_clock.h new file mode 100644 index 0000000..cbbca29 --- /dev/null +++ b/include/asm-generic/trace_clock.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_TRACE_CLOCK_H +#define _ASM_GENERIC_TRACE_CLOCK_H +/* + * Arch-specific trace clocks. + */ + +/* + * Additional trace clocks added to the trace_clocks + * array in kernel/trace/trace.c + * None if the architecture has not defined it. + */ +#ifndef ARCH_TRACE_CLOCKS +# define ARCH_TRACE_CLOCKS +#endif + +#endif /* _ASM_GENERIC_TRACE_CLOCK_H */ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h new file mode 100644 index 0000000..e935318 --- /dev/null +++ b/include/asm-generic/uaccess.h @@ -0,0 +1,283 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_UACCESS_H +#define __ASM_GENERIC_UACCESS_H + +/* + * User space memory access functions, these should work + * on any machine that has kernel and user data in the same + * address space, e.g. all NOMMU machines. + */ +#include + +#ifdef CONFIG_UACCESS_MEMCPY +static inline __must_check unsigned long +raw_copy_from_user(void *to, const void __user * from, unsigned long n) +{ + if (__builtin_constant_p(n)) { + switch(n) { + case 1: + *(u8 *)to = *(u8 __force *)from; + return 0; + case 2: + *(u16 *)to = *(u16 __force *)from; + return 0; + case 4: + *(u32 *)to = *(u32 __force *)from; + return 0; +#ifdef CONFIG_64BIT + case 8: + *(u64 *)to = *(u64 __force *)from; + return 0; +#endif + } + } + + memcpy(to, (const void __force *)from, n); + return 0; +} + +static inline __must_check unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if (__builtin_constant_p(n)) { + switch(n) { + case 1: + *(u8 __force *)to = *(u8 *)from; + return 0; + case 2: + *(u16 __force *)to = *(u16 *)from; + return 0; + case 4: + *(u32 __force *)to = *(u32 *)from; + return 0; +#ifdef CONFIG_64BIT + case 8: + *(u64 __force *)to = *(u64 *)from; + return 0; +#endif + default: + break; + } + } + + memcpy((void __force *)to, from, n); + return 0; +} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER +#endif /* CONFIG_UACCESS_MEMCPY */ + +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) + +#ifndef KERNEL_DS +#define KERNEL_DS MAKE_MM_SEG(~0UL) +#endif + +#ifndef USER_DS +#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) +#endif + +#ifndef get_fs +#define get_fs() (current_thread_info()->addr_limit) + +static inline void set_fs(mm_segment_t fs) +{ + current_thread_info()->addr_limit = fs; +} +#endif + +#ifndef segment_eq +#define segment_eq(a, b) ((a).seg == (b).seg) +#endif + +#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) + +/* + * The architecture should really override this if possible, at least + * doing a check on the get_fs() + */ +#ifndef __access_ok +static inline int __access_ok(unsigned long addr, unsigned long size) +{ + return 1; +} +#endif + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * This version just falls back to copy_{from,to}_user, which should + * provide a fast-path for small values. + */ +#define __put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __x = (x); \ + int __pu_err = -EFAULT; \ + __chk_user_ptr(ptr); \ + switch (sizeof (*(ptr))) { \ + case 1: \ + case 2: \ + case 4: \ + case 8: \ + __pu_err = __put_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + break; \ + default: \ + __put_user_bad(); \ + break; \ + } \ + __pu_err; \ +}) + +#define put_user(x, ptr) \ +({ \ + void __user *__p = (ptr); \ + might_fault(); \ + access_ok(__p, sizeof(*ptr)) ? \ + __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \ + -EFAULT; \ +}) + +#ifndef __put_user_fn + +static inline int __put_user_fn(size_t size, void __user *ptr, void *x) +{ + return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0; +} + +#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) + +#endif + +extern int __put_user_bad(void) __attribute__((noreturn)); + +#define __get_user(x, ptr) \ +({ \ + int __gu_err = -EFAULT; \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: { \ + unsigned char __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 2: { \ + unsigned short __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 4: { \ + unsigned int __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 8: { \ + unsigned long long __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + default: \ + __get_user_bad(); \ + break; \ + } \ + __gu_err; \ +}) + +#define get_user(x, ptr) \ +({ \ + const void __user *__p = (ptr); \ + might_fault(); \ + access_ok(__p, sizeof(*ptr)) ? \ + __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\ + ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ +}) + +#ifndef __get_user_fn +static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) +{ + return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0; +} + +#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) + +#endif + +extern int __get_user_bad(void) __attribute__((noreturn)); + +/* + * Copy a null terminated string from userspace. + */ +#ifndef __strncpy_from_user +static inline long +__strncpy_from_user(char *dst, const char __user *src, long count) +{ + char *tmp; + strncpy(dst, (const char __force *)src, count); + for (tmp = dst; *tmp && count > 0; tmp++, count--) + ; + return (tmp - dst); +} +#endif + +static inline long +strncpy_from_user(char *dst, const char __user *src, long count) +{ + if (!access_ok(src, 1)) + return -EFAULT; + return __strncpy_from_user(dst, src, count); +} + +/* + * Return the size of a string (including the ending 0) + * + * Return 0 on exception, a value greater than N if too long + */ +#ifndef __strnlen_user +#define __strnlen_user(s, n) (strnlen((s), (n)) + 1) +#endif + +/* + * Unlike strnlen, strnlen_user includes the nul terminator in + * its returned count. Callers should check for a returned value + * greater than N as an indication the string is too long. + */ +static inline long strnlen_user(const char __user *src, long n) +{ + if (!access_ok(src, 1)) + return 0; + return __strnlen_user(src, n); +} + +/* + * Zero Userspace + */ +#ifndef __clear_user +static inline __must_check unsigned long +__clear_user(void __user *to, unsigned long n) +{ + memset((void __force *)to, 0, n); + return 0; +} +#endif + +static inline __must_check unsigned long +clear_user(void __user *to, unsigned long n) +{ + might_fault(); + if (!access_ok(to, n)) + return n; + + return __clear_user(to, n); +} + +#include + +#endif /* __ASM_GENERIC_UACCESS_H */ diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h new file mode 100644 index 0000000..374c940 --- /dev/null +++ b/include/asm-generic/unaligned.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_UNALIGNED_H +#define __ASM_GENERIC_UNALIGNED_H + +/* + * This is the most generic implementation of unaligned accesses + * and should work almost anywhere. + */ +#include + +/* Set by the arch if it can handle unaligned accesses in hardware. */ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include +#endif + +#if defined(__LITTLE_ENDIAN) +# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include +# include +# endif +# include +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#elif defined(__BIG_ENDIAN) +# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include +# include +# endif +# include +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#else +# error need to define endianess +#endif + +#endif /* __ASM_GENERIC_UNALIGNED_H */ diff --git a/include/asm-generic/user.h b/include/asm-generic/user.h new file mode 100644 index 0000000..35638c3 --- /dev/null +++ b/include/asm-generic/user.h @@ -0,0 +1,8 @@ +#ifndef __ASM_GENERIC_USER_H +#define __ASM_GENERIC_USER_H +/* + * This file may define a 'struct user' structure. However, it is only + * used for a.out files, which are not supported on new architectures. + */ + +#endif /* __ASM_GENERIC_USER_H */ diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h new file mode 100644 index 0000000..cec543d --- /dev/null +++ b/include/asm-generic/vdso/vsyscall.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_VSYSCALL_H +#define __ASM_GENERIC_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#ifndef __arch_get_k_vdso_data +static __always_inline struct vdso_data *__arch_get_k_vdso_data(void) +{ + return NULL; +} +#endif /* __arch_get_k_vdso_data */ + +#ifndef __arch_update_vdso_data +static __always_inline bool __arch_update_vdso_data(void) +{ + return true; +} +#endif /* __arch_update_vdso_data */ + +#ifndef __arch_get_clock_mode +static __always_inline int __arch_get_clock_mode(struct timekeeper *tk) +{ + return 0; +} +#endif /* __arch_get_clock_mode */ + +#ifndef __arch_update_vsyscall +static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata, + struct timekeeper *tk) +{ +} +#endif /* __arch_update_vsyscall */ + +#ifndef __arch_sync_vdso_data +static __always_inline void __arch_sync_vdso_data(struct vdso_data *vdata) +{ +} +#endif /* __arch_sync_vdso_data */ + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_GENERIC_VSYSCALL_H */ diff --git a/include/asm-generic/vga.h b/include/asm-generic/vga.h new file mode 100644 index 0000000..adf91a7 --- /dev/null +++ b/include/asm-generic/vga.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Access to VGA videoram + * + * (c) 1998 Martin Mares + */ +#ifndef __ASM_GENERIC_VGA_H +#define __ASM_GENERIC_VGA_H + +/* + * On most architectures that support VGA, we can just + * recalculate addresses and then access the videoram + * directly without any black magic. + * + * Everyone else needs to ioremap the address and use + * proper I/O accesses. + */ +#ifndef VGA_MAP_MEM +#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) +#endif + +#define vga_readb(x) (*(x)) +#define vga_writeb(x, y) (*(y) = (x)) + +#endif /* _ASM_GENERIC_VGA_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h new file mode 100644 index 0000000..d7616d0 --- /dev/null +++ b/include/asm-generic/vmlinux.lds.h @@ -0,0 +1,1020 @@ +/* + * Helper macros to support writing architecture specific + * linker scripts. + * + * A minimal linker scripts has following content: + * [This is a sample, architectures may have special requiriements] + * + * OUTPUT_FORMAT(...) + * OUTPUT_ARCH(...) + * ENTRY(...) + * SECTIONS + * { + * . = START; + * __init_begin = .; + * HEAD_TEXT_SECTION + * INIT_TEXT_SECTION(PAGE_SIZE) + * INIT_DATA_SECTION(...) + * PERCPU_SECTION(CACHELINE_SIZE) + * __init_end = .; + * + * _stext = .; + * TEXT_SECTION = 0 + * _etext = .; + * + * _sdata = .; + * RO_DATA_SECTION(PAGE_SIZE) + * RW_DATA_SECTION(...) + * _edata = .; + * + * EXCEPTION_TABLE(...) + * NOTES + * + * BSS_SECTION(0, 0, 0) + * _end = .; + * + * STABS_DEBUG + * DWARF_DEBUG + * + * DISCARDS // must be the last + * } + * + * [__init_begin, __init_end] is the init section that may be freed after init + * // __init_begin and __init_end should be page aligned, so that we can + * // free the whole .init memory + * [_stext, _etext] is the text section + * [_sdata, _edata] is the data section + * + * Some of the included output section have their own set of constants. + * Examples are: [__initramfs_start, __initramfs_end] for initramfs and + * [__nosave_begin, __nosave_end] for the nosave data + */ + +#ifndef LOAD_OFFSET +#define LOAD_OFFSET 0 +#endif + +/* Align . to a 8 byte boundary equals to maximum function alignment. */ +#define ALIGN_FUNCTION() . = ALIGN(8) + +/* + * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which + * generates .data.identifier sections, which need to be pulled in with + * .data. We don't want to pull in .data..other sections, which Linux + * has defined. Same for text and bss. + * + * RODATA_MAIN is not used because existing code already defines .rodata.x + * sections to be brought in with rodata. + */ +#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION +#define TEXT_MAIN .text .text.[0-9a-zA-Z_]* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* +#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* +#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* +#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* +#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* +#else +#define TEXT_MAIN .text +#define DATA_MAIN .data +#define SDATA_MAIN .sdata +#define RODATA_MAIN .rodata +#define BSS_MAIN .bss +#define SBSS_MAIN .sbss +#endif + +/* + * Align to a 32 byte boundary equal to the + * alignment gcc 4.5 uses for a struct + */ +#define STRUCT_ALIGNMENT 32 +#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) + +/* The actual configuration determine if the init/exit sections + * are handled as text/data or they can be discarded (which + * often happens at runtime) + */ +#ifdef CONFIG_HOTPLUG_CPU +#define CPU_KEEP(sec) *(.cpu##sec) +#define CPU_DISCARD(sec) +#else +#define CPU_KEEP(sec) +#define CPU_DISCARD(sec) *(.cpu##sec) +#endif + +#if defined(CONFIG_MEMORY_HOTPLUG) +#define MEM_KEEP(sec) *(.mem##sec) +#define MEM_DISCARD(sec) +#else +#define MEM_KEEP(sec) +#define MEM_DISCARD(sec) *(.mem##sec) +#endif + +#ifdef CONFIG_FTRACE_MCOUNT_RECORD +#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY +#define MCOUNT_REC() . = ALIGN(8); \ + __start_mcount_loc = .; \ + KEEP(*(__patchable_function_entries)) \ + __stop_mcount_loc = .; +#else +#define MCOUNT_REC() . = ALIGN(8); \ + __start_mcount_loc = .; \ + KEEP(*(__mcount_loc)) \ + __stop_mcount_loc = .; +#endif +#else +#define MCOUNT_REC() +#endif + +#ifdef CONFIG_TRACE_BRANCH_PROFILING +#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \ + KEEP(*(_ftrace_annotated_branch)) \ + __stop_annotated_branch_profile = .; +#else +#define LIKELY_PROFILE() +#endif + +#ifdef CONFIG_PROFILE_ALL_BRANCHES +#define BRANCH_PROFILE() __start_branch_profile = .; \ + KEEP(*(_ftrace_branch)) \ + __stop_branch_profile = .; +#else +#define BRANCH_PROFILE() +#endif + +#ifdef CONFIG_KPROBES +#define KPROBE_BLACKLIST() . = ALIGN(8); \ + __start_kprobe_blacklist = .; \ + KEEP(*(_kprobe_blacklist)) \ + __stop_kprobe_blacklist = .; +#else +#define KPROBE_BLACKLIST() +#endif + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION +#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ + __start_error_injection_whitelist = .; \ + KEEP(*(_error_injection_whitelist)) \ + __stop_error_injection_whitelist = .; +#else +#define ERROR_INJECT_WHITELIST() +#endif + +#ifdef CONFIG_EVENT_TRACING +#define FTRACE_EVENTS() . = ALIGN(8); \ + __start_ftrace_events = .; \ + KEEP(*(_ftrace_events)) \ + __stop_ftrace_events = .; \ + __start_ftrace_eval_maps = .; \ + KEEP(*(_ftrace_eval_map)) \ + __stop_ftrace_eval_maps = .; +#else +#define FTRACE_EVENTS() +#endif + +#ifdef CONFIG_TRACING +#define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \ + KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ + __stop___trace_bprintk_fmt = .; +#define TRACEPOINT_STR() __start___tracepoint_str = .; \ + KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ + __stop___tracepoint_str = .; +#else +#define TRACE_PRINTKS() +#define TRACEPOINT_STR() +#endif + +#ifdef CONFIG_FTRACE_SYSCALLS +#define TRACE_SYSCALLS() . = ALIGN(8); \ + __start_syscalls_metadata = .; \ + KEEP(*(__syscalls_metadata)) \ + __stop_syscalls_metadata = .; +#else +#define TRACE_SYSCALLS() +#endif + +#ifdef CONFIG_BPF_EVENTS +#define BPF_RAW_TP() STRUCT_ALIGN(); \ + __start__bpf_raw_tp = .; \ + KEEP(*(__bpf_raw_tp_map)) \ + __stop__bpf_raw_tp = .; +#else +#define BPF_RAW_TP() +#endif + +#ifdef CONFIG_SERIAL_EARLYCON +#define EARLYCON_TABLE() . = ALIGN(8); \ + __earlycon_table = .; \ + KEEP(*(__earlycon_table)) \ + __earlycon_table_end = .; +#else +#define EARLYCON_TABLE() +#endif + +#ifdef CONFIG_SECURITY +#define LSM_TABLE() . = ALIGN(8); \ + __start_lsm_info = .; \ + KEEP(*(.lsm_info.init)) \ + __end_lsm_info = .; +#define EARLY_LSM_TABLE() . = ALIGN(8); \ + __start_early_lsm_info = .; \ + KEEP(*(.early_lsm_info.init)) \ + __end_early_lsm_info = .; +#else +#define LSM_TABLE() +#define EARLY_LSM_TABLE() +#endif + +#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) +#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) +#define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) +#define _OF_TABLE_0(name) +#define _OF_TABLE_1(name) \ + . = ALIGN(8); \ + __##name##_of_table = .; \ + KEEP(*(__##name##_of_table)) \ + KEEP(*(__##name##_of_table_end)) + +#define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) +#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) +#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) +#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) +#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) +#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) + +#ifdef CONFIG_ACPI +#define ACPI_PROBE_TABLE(name) \ + . = ALIGN(8); \ + __##name##_acpi_probe_table = .; \ + KEEP(*(__##name##_acpi_probe_table)) \ + __##name##_acpi_probe_table_end = .; +#else +#define ACPI_PROBE_TABLE(name) +#endif + +#ifdef CONFIG_THERMAL +#define THERMAL_TABLE(name) \ + . = ALIGN(8); \ + __##name##_thermal_table = .; \ + KEEP(*(__##name##_thermal_table)) \ + __##name##_thermal_table_end = .; +#else +#define THERMAL_TABLE(name) +#endif + +#define KERNEL_DTB() \ + STRUCT_ALIGN(); \ + __dtb_start = .; \ + KEEP(*(.dtb.init.rodata)) \ + __dtb_end = .; + +/* + * .data section + */ +#define DATA_DATA \ + *(.xiptext) \ + *(DATA_MAIN) \ + *(.ref.data) \ + *(.data..shared_aligned) /* percpu related */ \ + MEM_KEEP(init.data*) \ + MEM_KEEP(exit.data*) \ + *(.data.unlikely) \ + __start_once = .; \ + *(.data.once) \ + __end_once = .; \ + STRUCT_ALIGN(); \ + *(__tracepoints) \ + /* implement dynamic printk debug */ \ + . = ALIGN(8); \ + __start___verbose = .; \ + KEEP(*(__verbose)) \ + __stop___verbose = .; \ + LIKELY_PROFILE() \ + BRANCH_PROFILE() \ + TRACE_PRINTKS() \ + BPF_RAW_TP() \ + TRACEPOINT_STR() + +/* + * Data section helpers + */ +#define NOSAVE_DATA \ + . = ALIGN(PAGE_SIZE); \ + __nosave_begin = .; \ + *(.data..nosave) \ + . = ALIGN(PAGE_SIZE); \ + __nosave_end = .; + +#define PAGE_ALIGNED_DATA(page_align) \ + . = ALIGN(page_align); \ + *(.data..page_aligned) \ + . = ALIGN(page_align); + +#define READ_MOSTLY_DATA(align) \ + . = ALIGN(align); \ + *(.data..read_mostly) \ + . = ALIGN(align); + +#define CACHELINE_ALIGNED_DATA(align) \ + . = ALIGN(align); \ + *(.data..cacheline_aligned) + +#define INIT_TASK_DATA(align) \ + . = ALIGN(align); \ + __start_init_task = .; \ + init_thread_union = .; \ + init_stack = .; \ + KEEP(*(.data..init_task)) \ + KEEP(*(.data..init_thread_info)) \ + . = __start_init_task + THREAD_SIZE; \ + __end_init_task = .; + +#define JUMP_TABLE_DATA \ + . = ALIGN(8); \ + __start___jump_table = .; \ + KEEP(*(__jump_table)) \ + __stop___jump_table = .; + +/* + * Allow architectures to handle ro_after_init data on their + * own by defining an empty RO_AFTER_INIT_DATA. + */ +#ifndef RO_AFTER_INIT_DATA +#define RO_AFTER_INIT_DATA \ + __start_ro_after_init = .; \ + *(.data..ro_after_init) \ + JUMP_TABLE_DATA \ + __end_ro_after_init = .; +#endif + +/* + * Read only Data + */ +#define RO_DATA_SECTION(align) \ + . = ALIGN((align)); \ + .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ + __start_rodata = .; \ + *(.rodata) *(.rodata.*) \ + RO_AFTER_INIT_DATA /* Read only after init */ \ + . = ALIGN(8); \ + __start___tracepoints_ptrs = .; \ + KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ + __stop___tracepoints_ptrs = .; \ + *(__tracepoints_strings)/* Tracepoints: strings */ \ + } \ + \ + .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ + *(.rodata1) \ + } \ + \ + /* PCI quirks */ \ + .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ + __start_pci_fixups_early = .; \ + KEEP(*(.pci_fixup_early)) \ + __end_pci_fixups_early = .; \ + __start_pci_fixups_header = .; \ + KEEP(*(.pci_fixup_header)) \ + __end_pci_fixups_header = .; \ + __start_pci_fixups_final = .; \ + KEEP(*(.pci_fixup_final)) \ + __end_pci_fixups_final = .; \ + __start_pci_fixups_enable = .; \ + KEEP(*(.pci_fixup_enable)) \ + __end_pci_fixups_enable = .; \ + __start_pci_fixups_resume = .; \ + KEEP(*(.pci_fixup_resume)) \ + __end_pci_fixups_resume = .; \ + __start_pci_fixups_resume_early = .; \ + KEEP(*(.pci_fixup_resume_early)) \ + __end_pci_fixups_resume_early = .; \ + __start_pci_fixups_suspend = .; \ + KEEP(*(.pci_fixup_suspend)) \ + __end_pci_fixups_suspend = .; \ + __start_pci_fixups_suspend_late = .; \ + KEEP(*(.pci_fixup_suspend_late)) \ + __end_pci_fixups_suspend_late = .; \ + } \ + \ + /* Built-in firmware blobs */ \ + .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ + __start_builtin_fw = .; \ + KEEP(*(.builtin_fw)) \ + __end_builtin_fw = .; \ + } \ + \ + TRACEDATA \ + \ + /* Kernel symbol table: Normal symbols */ \ + __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ + __start___ksymtab = .; \ + KEEP(*(SORT(___ksymtab+*))) \ + __stop___ksymtab = .; \ + } \ + \ + /* Kernel symbol table: GPL-only symbols */ \ + __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ + __start___ksymtab_gpl = .; \ + KEEP(*(SORT(___ksymtab_gpl+*))) \ + __stop___ksymtab_gpl = .; \ + } \ + \ + /* Kernel symbol table: Normal unused symbols */ \ + __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ + __start___ksymtab_unused = .; \ + KEEP(*(SORT(___ksymtab_unused+*))) \ + __stop___ksymtab_unused = .; \ + } \ + \ + /* Kernel symbol table: GPL-only unused symbols */ \ + __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ + __start___ksymtab_unused_gpl = .; \ + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ + __stop___ksymtab_unused_gpl = .; \ + } \ + \ + /* Kernel symbol table: GPL-future-only symbols */ \ + __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ + __start___ksymtab_gpl_future = .; \ + KEEP(*(SORT(___ksymtab_gpl_future+*))) \ + __stop___ksymtab_gpl_future = .; \ + } \ + \ + /* Kernel symbol table: Normal symbols */ \ + __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ + __start___kcrctab = .; \ + KEEP(*(SORT(___kcrctab+*))) \ + __stop___kcrctab = .; \ + } \ + \ + /* Kernel symbol table: GPL-only symbols */ \ + __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ + __start___kcrctab_gpl = .; \ + KEEP(*(SORT(___kcrctab_gpl+*))) \ + __stop___kcrctab_gpl = .; \ + } \ + \ + /* Kernel symbol table: Normal unused symbols */ \ + __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ + __start___kcrctab_unused = .; \ + KEEP(*(SORT(___kcrctab_unused+*))) \ + __stop___kcrctab_unused = .; \ + } \ + \ + /* Kernel symbol table: GPL-only unused symbols */ \ + __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ + __start___kcrctab_unused_gpl = .; \ + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ + __stop___kcrctab_unused_gpl = .; \ + } \ + \ + /* Kernel symbol table: GPL-future-only symbols */ \ + __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ + __start___kcrctab_gpl_future = .; \ + KEEP(*(SORT(___kcrctab_gpl_future+*))) \ + __stop___kcrctab_gpl_future = .; \ + } \ + \ + /* Kernel symbol table: strings */ \ + __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ + *(__ksymtab_strings) \ + } \ + \ + /* __*init sections */ \ + __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ + *(.ref.rodata) \ + MEM_KEEP(init.rodata) \ + MEM_KEEP(exit.rodata) \ + } \ + \ + /* Built-in module parameters. */ \ + __param : AT(ADDR(__param) - LOAD_OFFSET) { \ + __start___param = .; \ + KEEP(*(__param)) \ + __stop___param = .; \ + } \ + \ + /* Built-in module versions. */ \ + __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ + __start___modver = .; \ + KEEP(*(__modver)) \ + __stop___modver = .; \ + } \ + \ + BTF \ + \ + . = ALIGN((align)); \ + __end_rodata = .; + +/* RODATA & RO_DATA provided for backward compatibility. + * All archs are supposed to use RO_DATA() */ +#define RODATA RO_DATA_SECTION(4096) +#define RO_DATA(align) RO_DATA_SECTION(align) + +/* + * .text section. Map to function alignment to avoid address changes + * during second ld run in second ld pass when generating System.map + * + * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead + * code elimination is enabled, so these sections should be converted + * to use ".." first. + */ +#define TEXT_TEXT \ + ALIGN_FUNCTION(); \ + *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ + *(.text..refcount) \ + *(.ref.text) \ + MEM_KEEP(init.text*) \ + MEM_KEEP(exit.text*) \ + + +/* sched.text is aling to function alignment to secure we have same + * address even at second ld pass when generating System.map */ +#define SCHED_TEXT \ + ALIGN_FUNCTION(); \ + __sched_text_start = .; \ + *(.sched.text) \ + __sched_text_end = .; + +/* spinlock.text is aling to function alignment to secure we have same + * address even at second ld pass when generating System.map */ +#define LOCK_TEXT \ + ALIGN_FUNCTION(); \ + __lock_text_start = .; \ + *(.spinlock.text) \ + __lock_text_end = .; + +#define CPUIDLE_TEXT \ + ALIGN_FUNCTION(); \ + __cpuidle_text_start = .; \ + *(.cpuidle.text) \ + __cpuidle_text_end = .; + +#define KPROBES_TEXT \ + ALIGN_FUNCTION(); \ + __kprobes_text_start = .; \ + *(.kprobes.text) \ + __kprobes_text_end = .; + +#define ENTRY_TEXT \ + ALIGN_FUNCTION(); \ + __entry_text_start = .; \ + *(.entry.text) \ + __entry_text_end = .; + +#define IRQENTRY_TEXT \ + ALIGN_FUNCTION(); \ + __irqentry_text_start = .; \ + *(.irqentry.text) \ + __irqentry_text_end = .; + +#define SOFTIRQENTRY_TEXT \ + ALIGN_FUNCTION(); \ + __softirqentry_text_start = .; \ + *(.softirqentry.text) \ + __softirqentry_text_end = .; + +/* Section used for early init (in .S files) */ +#define HEAD_TEXT KEEP(*(.head.text)) + +#define HEAD_TEXT_SECTION \ + .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ + HEAD_TEXT \ + } + +/* + * Exception table + */ +#define EXCEPTION_TABLE(align) \ + . = ALIGN(align); \ + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ + __start___ex_table = .; \ + KEEP(*(__ex_table)) \ + __stop___ex_table = .; \ + } + +/* + * .BTF + */ +#ifdef CONFIG_DEBUG_INFO_BTF +#define BTF \ + .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ + __start_BTF = .; \ + *(.BTF) \ + __stop_BTF = .; \ + } +#else +#define BTF +#endif + +/* + * Init task + */ +#define INIT_TASK_DATA_SECTION(align) \ + . = ALIGN(align); \ + .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ + INIT_TASK_DATA(align) \ + } + +#ifdef CONFIG_CONSTRUCTORS +#define KERNEL_CTORS() . = ALIGN(8); \ + __ctors_start = .; \ + KEEP(*(.ctors)) \ + KEEP(*(SORT(.init_array.*))) \ + KEEP(*(.init_array)) \ + __ctors_end = .; +#else +#define KERNEL_CTORS() +#endif + +/* init and exit section handling */ +#define INIT_DATA \ + KEEP(*(SORT(___kentry+*))) \ + *(.init.data init.data.*) \ + MEM_DISCARD(init.data*) \ + KERNEL_CTORS() \ + MCOUNT_REC() \ + *(.init.rodata .init.rodata.*) \ + FTRACE_EVENTS() \ + TRACE_SYSCALLS() \ + KPROBE_BLACKLIST() \ + ERROR_INJECT_WHITELIST() \ + MEM_DISCARD(init.rodata) \ + CLK_OF_TABLES() \ + RESERVEDMEM_OF_TABLES() \ + TIMER_OF_TABLES() \ + CPU_METHOD_OF_TABLES() \ + CPUIDLE_METHOD_OF_TABLES() \ + KERNEL_DTB() \ + IRQCHIP_OF_MATCH_TABLE() \ + ACPI_PROBE_TABLE(irqchip) \ + ACPI_PROBE_TABLE(timer) \ + THERMAL_TABLE(governor) \ + EARLYCON_TABLE() \ + LSM_TABLE() \ + EARLY_LSM_TABLE() + +#define INIT_TEXT \ + *(.init.text .init.text.*) \ + *(.text.startup) \ + MEM_DISCARD(init.text*) + +#define EXIT_DATA \ + *(.exit.data .exit.data.*) \ + *(.fini_array .fini_array.*) \ + *(.dtors .dtors.*) \ + MEM_DISCARD(exit.data*) \ + MEM_DISCARD(exit.rodata*) + +#define EXIT_TEXT \ + *(.exit.text) \ + *(.text.exit) \ + MEM_DISCARD(exit.text) + +#define EXIT_CALL \ + *(.exitcall.exit) + +/* + * bss (Block Started by Symbol) - uninitialized data + * zeroed during startup + */ +#define SBSS(sbss_align) \ + . = ALIGN(sbss_align); \ + .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ + *(.dynsbss) \ + *(SBSS_MAIN) \ + *(.scommon) \ + } + +/* + * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra + * sections to the front of bss. + */ +#ifndef BSS_FIRST_SECTIONS +#define BSS_FIRST_SECTIONS +#endif + +#define BSS(bss_align) \ + . = ALIGN(bss_align); \ + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ + BSS_FIRST_SECTIONS \ + . = ALIGN(PAGE_SIZE); \ + *(.bss..page_aligned) \ + . = ALIGN(PAGE_SIZE); \ + *(.dynbss) \ + *(BSS_MAIN) \ + *(COMMON) \ + } + +/* + * DWARF debug sections. + * Symbols in the DWARF debugging sections are relative to + * the beginning of the section so we begin them at 0. + */ +#define DWARF_DEBUG \ + /* DWARF 1 */ \ + .debug 0 : { *(.debug) } \ + .line 0 : { *(.line) } \ + /* GNU DWARF 1 extensions */ \ + .debug_srcinfo 0 : { *(.debug_srcinfo) } \ + .debug_sfnames 0 : { *(.debug_sfnames) } \ + /* DWARF 1.1 and DWARF 2 */ \ + .debug_aranges 0 : { *(.debug_aranges) } \ + .debug_pubnames 0 : { *(.debug_pubnames) } \ + /* DWARF 2 */ \ + .debug_info 0 : { *(.debug_info \ + .gnu.linkonce.wi.*) } \ + .debug_abbrev 0 : { *(.debug_abbrev) } \ + .debug_line 0 : { *(.debug_line) } \ + .debug_frame 0 : { *(.debug_frame) } \ + .debug_str 0 : { *(.debug_str) } \ + .debug_loc 0 : { *(.debug_loc) } \ + .debug_macinfo 0 : { *(.debug_macinfo) } \ + .debug_pubtypes 0 : { *(.debug_pubtypes) } \ + /* DWARF 3 */ \ + .debug_ranges 0 : { *(.debug_ranges) } \ + /* SGI/MIPS DWARF 2 extensions */ \ + .debug_weaknames 0 : { *(.debug_weaknames) } \ + .debug_funcnames 0 : { *(.debug_funcnames) } \ + .debug_typenames 0 : { *(.debug_typenames) } \ + .debug_varnames 0 : { *(.debug_varnames) } \ + /* GNU DWARF 2 extensions */ \ + .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ + .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ + /* DWARF 4 */ \ + .debug_types 0 : { *(.debug_types) } \ + /* DWARF 5 */ \ + .debug_macro 0 : { *(.debug_macro) } \ + .debug_addr 0 : { *(.debug_addr) } + + /* Stabs debugging sections. */ +#define STABS_DEBUG \ + .stab 0 : { *(.stab) } \ + .stabstr 0 : { *(.stabstr) } \ + .stab.excl 0 : { *(.stab.excl) } \ + .stab.exclstr 0 : { *(.stab.exclstr) } \ + .stab.index 0 : { *(.stab.index) } \ + .stab.indexstr 0 : { *(.stab.indexstr) } \ + .comment 0 : { *(.comment) } + +#ifdef CONFIG_GENERIC_BUG +#define BUG_TABLE \ + . = ALIGN(8); \ + __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ + __start___bug_table = .; \ + KEEP(*(__bug_table)) \ + __stop___bug_table = .; \ + } +#else +#define BUG_TABLE +#endif + +#ifdef CONFIG_UNWINDER_ORC +#define ORC_UNWIND_TABLE \ + . = ALIGN(4); \ + .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ + __start_orc_unwind_ip = .; \ + KEEP(*(.orc_unwind_ip)) \ + __stop_orc_unwind_ip = .; \ + } \ + . = ALIGN(2); \ + .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ + __start_orc_unwind = .; \ + KEEP(*(.orc_unwind)) \ + __stop_orc_unwind = .; \ + } \ + . = ALIGN(4); \ + .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ + orc_lookup = .; \ + . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \ + LOOKUP_BLOCK_SIZE) + 1) * 4; \ + orc_lookup_end = .; \ + } +#else +#define ORC_UNWIND_TABLE +#endif + +#ifdef CONFIG_PM_TRACE +#define TRACEDATA \ + . = ALIGN(4); \ + .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ + __tracedata_start = .; \ + KEEP(*(.tracedata)) \ + __tracedata_end = .; \ + } +#else +#define TRACEDATA +#endif + +#define NOTES \ + .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ + __start_notes = .; \ + KEEP(*(.note.*)) \ + __stop_notes = .; \ + } + +#define INIT_SETUP(initsetup_align) \ + . = ALIGN(initsetup_align); \ + __setup_start = .; \ + KEEP(*(.init.setup)) \ + __setup_end = .; + +#define INIT_CALLS_LEVEL(level) \ + __initcall##level##_start = .; \ + KEEP(*(.initcall##level##.init)) \ + KEEP(*(.initcall##level##s.init)) \ + +#define INIT_CALLS \ + __initcall_start = .; \ + KEEP(*(.initcallearly.init)) \ + INIT_CALLS_LEVEL(0) \ + INIT_CALLS_LEVEL(1) \ + INIT_CALLS_LEVEL(2) \ + INIT_CALLS_LEVEL(3) \ + INIT_CALLS_LEVEL(4) \ + INIT_CALLS_LEVEL(5) \ + INIT_CALLS_LEVEL(rootfs) \ + INIT_CALLS_LEVEL(6) \ + INIT_CALLS_LEVEL(7) \ + __initcall_end = .; + +#define CON_INITCALL \ + __con_initcall_start = .; \ + KEEP(*(.con_initcall.init)) \ + __con_initcall_end = .; + +#ifdef CONFIG_BLK_DEV_INITRD +#define INIT_RAM_FS \ + . = ALIGN(4); \ + __initramfs_start = .; \ + KEEP(*(.init.ramfs)) \ + . = ALIGN(8); \ + KEEP(*(.init.ramfs.info)) +#else +#define INIT_RAM_FS +#endif + +/* + * Memory encryption operates on a page basis. Since we need to clear + * the memory encryption mask for this section, it needs to be aligned + * on a page boundary and be a page-size multiple in length. + * + * Note: We use a separate section so that only this section gets + * decrypted to avoid exposing more than we wish. + */ +#ifdef CONFIG_AMD_MEM_ENCRYPT +#define PERCPU_DECRYPTED_SECTION \ + . = ALIGN(PAGE_SIZE); \ + *(.data..percpu..decrypted) \ + . = ALIGN(PAGE_SIZE); +#else +#define PERCPU_DECRYPTED_SECTION +#endif + + +/* + * Default discarded sections. + * + * Some archs want to discard exit text/data at runtime rather than + * link time due to cross-section references such as alt instructions, + * bug table, eh_frame, etc. DISCARDS must be the last of output + * section definitions so that such archs put those in earlier section + * definitions. + */ +#define DISCARDS \ + /DISCARD/ : { \ + EXIT_TEXT \ + EXIT_DATA \ + EXIT_CALL \ + *(.discard) \ + *(.discard.*) \ + *(.modinfo) \ + } + +/** + * PERCPU_INPUT - the percpu input sections + * @cacheline: cacheline size + * + * The core percpu section names and core symbols which do not rely + * directly upon load addresses. + * + * @cacheline is used to align subsections to avoid false cacheline + * sharing between subsections for different purposes. + */ +#define PERCPU_INPUT(cacheline) \ + __per_cpu_start = .; \ + *(.data..percpu..first) \ + . = ALIGN(PAGE_SIZE); \ + *(.data..percpu..page_aligned) \ + . = ALIGN(cacheline); \ + *(.data..percpu..read_mostly) \ + . = ALIGN(cacheline); \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ + PERCPU_DECRYPTED_SECTION \ + __per_cpu_end = .; + +/** + * PERCPU_VADDR - define output section for percpu area + * @cacheline: cacheline size + * @vaddr: explicit base address (optional) + * @phdr: destination PHDR (optional) + * + * Macro which expands to output section for percpu area. + * + * @cacheline is used to align subsections to avoid false cacheline + * sharing between subsections for different purposes. + * + * If @vaddr is not blank, it specifies explicit base address and all + * percpu symbols will be offset from the given address. If blank, + * @vaddr always equals @laddr + LOAD_OFFSET. + * + * @phdr defines the output PHDR to use if not blank. Be warned that + * output PHDR is sticky. If @phdr is specified, the next output + * section in the linker script will go there too. @phdr should have + * a leading colon. + * + * Note that this macros defines __per_cpu_load as an absolute symbol. + * If there is no need to put the percpu section at a predetermined + * address, use PERCPU_SECTION. + */ +#define PERCPU_VADDR(cacheline, vaddr, phdr) \ + __per_cpu_load = .; \ + .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ + PERCPU_INPUT(cacheline) \ + } phdr \ + . = __per_cpu_load + SIZEOF(.data..percpu); + +/** + * PERCPU_SECTION - define output section for percpu area, simple version + * @cacheline: cacheline size + * + * Align to PAGE_SIZE and outputs output section for percpu area. This + * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and + * __per_cpu_start will be identical. + * + * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) + * except that __per_cpu_load is defined as a relative symbol against + * .data..percpu which is required for relocatable x86_32 configuration. + */ +#define PERCPU_SECTION(cacheline) \ + . = ALIGN(PAGE_SIZE); \ + .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ + __per_cpu_load = .; \ + PERCPU_INPUT(cacheline) \ + } + + +/* + * Definition of the high level *_SECTION macros + * They will fit only a subset of the architectures + */ + + +/* + * Writeable data. + * All sections are combined in a single .data section. + * The sections following CONSTRUCTORS are arranged so their + * typical alignment matches. + * A cacheline is typical/always less than a PAGE_SIZE so + * the sections that has this restriction (or similar) + * is located before the ones requiring PAGE_SIZE alignment. + * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which + * matches the requirement of PAGE_ALIGNED_DATA. + * + * use 0 as page_align if page_aligned data is not used */ +#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ + . = ALIGN(PAGE_SIZE); \ + .data : AT(ADDR(.data) - LOAD_OFFSET) { \ + INIT_TASK_DATA(inittask) \ + NOSAVE_DATA \ + PAGE_ALIGNED_DATA(pagealigned) \ + CACHELINE_ALIGNED_DATA(cacheline) \ + READ_MOSTLY_DATA(cacheline) \ + DATA_DATA \ + CONSTRUCTORS \ + } \ + BUG_TABLE \ + +#define INIT_TEXT_SECTION(inittext_align) \ + . = ALIGN(inittext_align); \ + .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ + _sinittext = .; \ + INIT_TEXT \ + _einittext = .; \ + } + +#define INIT_DATA_SECTION(initsetup_align) \ + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ + INIT_DATA \ + INIT_SETUP(initsetup_align) \ + INIT_CALLS \ + CON_INITCALL \ + INIT_RAM_FS \ + } + +#define BSS_SECTION(sbss_align, bss_align, stop_align) \ + . = ALIGN(sbss_align); \ + __bss_start = .; \ + SBSS(sbss_align) \ + BSS(bss_align) \ + . = ALIGN(stop_align); \ + __bss_stop = .; diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h new file mode 100644 index 0000000..b1a4967 --- /dev/null +++ b/include/asm-generic/vtime.h @@ -0,0 +1 @@ +/* no content, but patch(1) dislikes empty files */ diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h new file mode 100644 index 0000000..20c93f0 --- /dev/null +++ b/include/asm-generic/word-at-a-time.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_WORD_AT_A_TIME_H +#define _ASM_WORD_AT_A_TIME_H + +#include +#include + +#ifdef __BIG_ENDIAN + +struct word_at_a_time { + const unsigned long high_bits, low_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) } + +/* Bit set in the bytes that have a zero */ +static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c) +{ + unsigned long mask = (val & c->low_bits) + c->low_bits; + return ~(mask | rhs); +} + +#define create_zero_mask(mask) (mask) + +static inline long find_zero(unsigned long mask) +{ + long byte = 0; +#ifdef CONFIG_64BIT + if (mask >> 32) + mask >>= 32; + else + byte = 4; +#endif + if (mask >> 16) + mask >>= 16; + else + byte += 2; + return (mask >> 8) ? byte : byte + 1; +} + +static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) +{ + unsigned long rhs = val | c->low_bits; + *data = rhs; + return (val + c->high_bits) & ~rhs; +} + +#ifndef zero_bytemask +#define zero_bytemask(mask) (~1ul << __fls(mask)) +#endif + +#else + +/* + * The optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +#ifdef CONFIG_64BIT + +/* + * Jan Achrenius on G+: microoptimized version of + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" + * that works for the bytemasks without having to + * mask them first. + */ +static inline long count_masked_bytes(unsigned long mask) +{ + return mask*0x0001020304050608ul >> 56; +} + +#else /* 32-bit case */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +#endif + +/* Return nonzero if it has a zero */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + return count_masked_bytes(mask); +} + +#endif /* __BIG_ENDIAN */ + +#endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h new file mode 100644 index 0000000..b62a2a5 --- /dev/null +++ b/include/asm-generic/xor.h @@ -0,0 +1,710 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * include/asm-generic/xor.h + * + * Generic optimized RAID-5 checksumming functions. + */ + +#include + +static void +xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + p1[0] ^= p2[0]; + p1[1] ^= p2[1]; + p1[2] ^= p2[2]; + p1[3] ^= p2[3]; + p1[4] ^= p2[4]; + p1[5] ^= p2[5]; + p1[6] ^= p2[6]; + p1[7] ^= p2[7]; + p1 += 8; + p2 += 8; + } while (--lines > 0); +} + +static void +xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + p1[0] ^= p2[0] ^ p3[0]; + p1[1] ^= p2[1] ^ p3[1]; + p1[2] ^= p2[2] ^ p3[2]; + p1[3] ^= p2[3] ^ p3[3]; + p1[4] ^= p2[4] ^ p3[4]; + p1[5] ^= p2[5] ^ p3[5]; + p1[6] ^= p2[6] ^ p3[6]; + p1[7] ^= p2[7] ^ p3[7]; + p1 += 8; + p2 += 8; + p3 += 8; + } while (--lines > 0); +} + +static void +xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + p1[0] ^= p2[0] ^ p3[0] ^ p4[0]; + p1[1] ^= p2[1] ^ p3[1] ^ p4[1]; + p1[2] ^= p2[2] ^ p3[2] ^ p4[2]; + p1[3] ^= p2[3] ^ p3[3] ^ p4[3]; + p1[4] ^= p2[4] ^ p3[4] ^ p4[4]; + p1[5] ^= p2[5] ^ p3[5] ^ p4[5]; + p1[6] ^= p2[6] ^ p3[6] ^ p4[6]; + p1[7] ^= p2[7] ^ p3[7] ^ p4[7]; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + } while (--lines > 0); +} + +static void +xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0]; + p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1]; + p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2]; + p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3]; + p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4]; + p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5]; + p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6]; + p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7]; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + p5 += 8; + } while (--lines > 0); +} + +static void +xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + } while (--lines > 0); +} + +static void +xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + } while (--lines > 0); +} + +static void +xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + d0 ^= p4[0]; + d1 ^= p4[1]; + d2 ^= p4[2]; + d3 ^= p4[3]; + d4 ^= p4[4]; + d5 ^= p4[5]; + d6 ^= p4[6]; + d7 ^= p4[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + } while (--lines > 0); +} + +static void +xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + d0 ^= p4[0]; + d1 ^= p4[1]; + d2 ^= p4[2]; + d3 ^= p4[3]; + d4 ^= p4[4]; + d5 ^= p4[5]; + d6 ^= p4[6]; + d7 ^= p4[7]; + d0 ^= p5[0]; + d1 ^= p5[1]; + d2 ^= p5[2]; + d3 ^= p5[3]; + d4 ^= p5[4]; + d5 ^= p5[5]; + d6 ^= p5[6]; + d7 ^= p5[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + p5 += 8; + } while (--lines > 0); +} + +static void +xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + prefetchw(p1); + prefetch(p2); + + do { + prefetchw(p1+8); + prefetch(p2+8); + once_more: + p1[0] ^= p2[0]; + p1[1] ^= p2[1]; + p1[2] ^= p2[2]; + p1[3] ^= p2[3]; + p1[4] ^= p2[4]; + p1[5] ^= p2[5]; + p1[6] ^= p2[6]; + p1[7] ^= p2[7]; + p1 += 8; + p2 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + prefetchw(p1); + prefetch(p2); + prefetch(p3); + + do { + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + once_more: + p1[0] ^= p2[0] ^ p3[0]; + p1[1] ^= p2[1] ^ p3[1]; + p1[2] ^= p2[2] ^ p3[2]; + p1[3] ^= p2[3] ^ p3[3]; + p1[4] ^= p2[4] ^ p3[4]; + p1[5] ^= p2[5] ^ p3[5]; + p1[6] ^= p2[6] ^ p3[6]; + p1[7] ^= p2[7] ^ p3[7]; + p1 += 8; + p2 += 8; + p3 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + prefetch(p4); + + do { + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + prefetch(p4+8); + once_more: + p1[0] ^= p2[0] ^ p3[0] ^ p4[0]; + p1[1] ^= p2[1] ^ p3[1] ^ p4[1]; + p1[2] ^= p2[2] ^ p3[2] ^ p4[2]; + p1[3] ^= p2[3] ^ p3[3] ^ p4[3]; + p1[4] ^= p2[4] ^ p3[4] ^ p4[4]; + p1[5] ^= p2[5] ^ p3[5] ^ p4[5]; + p1[6] ^= p2[6] ^ p3[6] ^ p4[6]; + p1[7] ^= p2[7] ^ p3[7] ^ p4[7]; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + prefetch(p4); + prefetch(p5); + + do { + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + prefetch(p4+8); + prefetch(p5+8); + once_more: + p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0]; + p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1]; + p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2]; + p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3]; + p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4]; + p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5]; + p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6]; + p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7]; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + p5 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + + prefetchw(p1+8); + prefetch(p2+8); + once_more: + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + once_more: + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + prefetch(p4); + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + prefetch(p4+8); + once_more: + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + d0 ^= p4[0]; + d1 ^= p4[1]; + d2 ^= p4[2]; + d3 ^= p4[3]; + d4 ^= p4[4]; + d5 ^= p4[5]; + d6 ^= p4[6]; + d7 ^= p4[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + prefetch(p4); + prefetch(p5); + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + prefetch(p4+8); + prefetch(p5+8); + once_more: + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + d0 ^= p4[0]; + d1 ^= p4[1]; + d2 ^= p4[2]; + d3 ^= p4[3]; + d4 ^= p4[4]; + d5 ^= p4[5]; + d6 ^= p4[6]; + d7 ^= p4[7]; + d0 ^= p5[0]; + d1 ^= p5[1]; + d2 ^= p5[2]; + d3 ^= p5[3]; + d4 ^= p5[4]; + d5 ^= p5[5]; + d6 ^= p5[6]; + d7 ^= p5[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + p5 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static struct xor_block_template xor_block_8regs = { + .name = "8regs", + .do_2 = xor_8regs_2, + .do_3 = xor_8regs_3, + .do_4 = xor_8regs_4, + .do_5 = xor_8regs_5, +}; + +static struct xor_block_template xor_block_32regs = { + .name = "32regs", + .do_2 = xor_32regs_2, + .do_3 = xor_32regs_3, + .do_4 = xor_32regs_4, + .do_5 = xor_32regs_5, +}; + +static struct xor_block_template xor_block_8regs_p __maybe_unused = { + .name = "8regs_prefetch", + .do_2 = xor_8regs_p_2, + .do_3 = xor_8regs_p_3, + .do_4 = xor_8regs_p_4, + .do_5 = xor_8regs_p_5, +}; + +static struct xor_block_template xor_block_32regs_p __maybe_unused = { + .name = "32regs_prefetch", + .do_2 = xor_32regs_p_2, + .do_3 = xor_32regs_p_3, + .do_4 = xor_32regs_p_4, + .do_5 = xor_32regs_p_5, +}; + +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_8regs_p); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_32regs_p); \ + } while (0) diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h new file mode 100644 index 0000000..1d68d56 --- /dev/null +++ b/include/clocksource/arm_arch_timer.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Ltd. + */ +#ifndef __CLKSOURCE_ARM_ARCH_TIMER_H +#define __CLKSOURCE_ARM_ARCH_TIMER_H + +#include +#include +#include + +#define ARCH_TIMER_TYPE_CP15 BIT(0) +#define ARCH_TIMER_TYPE_MEM BIT(1) + +#define ARCH_TIMER_CTRL_ENABLE (1 << 0) +#define ARCH_TIMER_CTRL_IT_MASK (1 << 1) +#define ARCH_TIMER_CTRL_IT_STAT (1 << 2) + +#define CNTHCTL_EL1PCTEN (1 << 0) +#define CNTHCTL_EL1PCEN (1 << 1) +#define CNTHCTL_EVNTEN (1 << 2) +#define CNTHCTL_EVNTDIR (1 << 3) +#define CNTHCTL_EVNTI (0xF << 4) + +enum arch_timer_reg { + ARCH_TIMER_REG_CTRL, + ARCH_TIMER_REG_TVAL, +}; + +enum arch_timer_ppi_nr { + ARCH_TIMER_PHYS_SECURE_PPI, + ARCH_TIMER_PHYS_NONSECURE_PPI, + ARCH_TIMER_VIRT_PPI, + ARCH_TIMER_HYP_PPI, + ARCH_TIMER_MAX_TIMER_PPI +}; + +enum arch_timer_spi_nr { + ARCH_TIMER_PHYS_SPI, + ARCH_TIMER_VIRT_SPI, + ARCH_TIMER_MAX_TIMER_SPI +}; + +#define ARCH_TIMER_PHYS_ACCESS 0 +#define ARCH_TIMER_VIRT_ACCESS 1 +#define ARCH_TIMER_MEM_PHYS_ACCESS 2 +#define ARCH_TIMER_MEM_VIRT_ACCESS 3 + +#define ARCH_TIMER_MEM_MAX_FRAMES 8 + +#define ARCH_TIMER_USR_PCT_ACCESS_EN (1 << 0) /* physical counter */ +#define ARCH_TIMER_USR_VCT_ACCESS_EN (1 << 1) /* virtual counter */ +#define ARCH_TIMER_VIRT_EVT_EN (1 << 2) +#define ARCH_TIMER_EVT_TRIGGER_SHIFT (4) +#define ARCH_TIMER_EVT_TRIGGER_MASK (0xF << ARCH_TIMER_EVT_TRIGGER_SHIFT) +#define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */ +#define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */ + +#define ARCH_TIMER_EVT_STREAM_PERIOD_US 100 +#define ARCH_TIMER_EVT_STREAM_FREQ \ + (USEC_PER_SEC / ARCH_TIMER_EVT_STREAM_PERIOD_US) + +struct arch_timer_kvm_info { + struct timecounter timecounter; + int virtual_irq; + int physical_irq; +}; + +struct arch_timer_mem_frame { + bool valid; + phys_addr_t cntbase; + size_t size; + int phys_irq; + int virt_irq; +}; + +struct arch_timer_mem { + phys_addr_t cntctlbase; + size_t size; + struct arch_timer_mem_frame frame[ARCH_TIMER_MEM_MAX_FRAMES]; +}; + +#ifdef CONFIG_ARM_ARCH_TIMER + +extern u32 arch_timer_get_rate(void); +extern u64 (*arch_timer_read_counter)(void); +extern struct arch_timer_kvm_info *arch_timer_get_kvm_info(void); +extern bool arch_timer_evtstrm_available(void); + +#else + +static inline u32 arch_timer_get_rate(void) +{ + return 0; +} + +static inline u64 arch_timer_read_counter(void) +{ + return 0; +} + +static inline bool arch_timer_evtstrm_available(void) +{ + return false; +} + +#endif + +#endif diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h new file mode 100644 index 0000000..422f5e5 --- /dev/null +++ b/include/clocksource/hyperv_timer.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Definitions for the clocksource provided by the Hyper-V + * hypervisor to guest VMs, as described in the Hyper-V Top + * Level Functional Spec (TLFS). + * + * Copyright (C) 2019, Microsoft, Inc. + * + * Author: Michael Kelley + */ + +#ifndef __CLKSOURCE_HYPERV_TIMER_H +#define __CLKSOURCE_HYPERV_TIMER_H + +#include +#include +#include + +#define HV_MAX_MAX_DELTA_TICKS 0xffffffff +#define HV_MIN_DELTA_TICKS 1 + +/* Routines called by the VMbus driver */ +extern int hv_stimer_alloc(int sint); +extern void hv_stimer_free(void); +extern void hv_stimer_init(unsigned int cpu); +extern void hv_stimer_cleanup(unsigned int cpu); +extern void hv_stimer_global_cleanup(void); +extern void hv_stimer0_isr(void); + +#ifdef CONFIG_HYPERV_TIMER +extern struct clocksource *hyperv_cs; +extern void hv_init_clocksource(void); + +extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void); + +static inline notrace u64 +hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc) +{ + u64 scale, offset; + u32 sequence; + + /* + * The protocol for reading Hyper-V TSC page is specified in Hypervisor + * Top-Level Functional Specification ver. 3.0 and above. To get the + * reference time we must do the following: + * - READ ReferenceTscSequence + * A special '0' value indicates the time source is unreliable and we + * need to use something else. The currently published specification + * versions (up to 4.0b) contain a mistake and wrongly claim '-1' + * instead of '0' as the special value, see commit c35b82ef0294. + * - ReferenceTime = + * ((RDTSC() * ReferenceTscScale) >> 64) + ReferenceTscOffset + * - READ ReferenceTscSequence again. In case its value has changed + * since our first reading we need to discard ReferenceTime and repeat + * the whole sequence as the hypervisor was updating the page in + * between. + */ + do { + sequence = READ_ONCE(tsc_pg->tsc_sequence); + if (!sequence) + return U64_MAX; + /* + * Make sure we read sequence before we read other values from + * TSC page. + */ + smp_rmb(); + + scale = READ_ONCE(tsc_pg->tsc_scale); + offset = READ_ONCE(tsc_pg->tsc_offset); + *cur_tsc = hv_get_raw_timer(); + + /* + * Make sure we read sequence after we read all other values + * from TSC page. + */ + smp_rmb(); + + } while (READ_ONCE(tsc_pg->tsc_sequence) != sequence); + + return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset; +} + +static inline notrace u64 +hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg) +{ + u64 cur_tsc; + + return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc); +} + +#else /* CONFIG_HYPERV_TIMER */ +static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void) +{ + return NULL; +} + +static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, + u64 *cur_tsc) +{ + return U64_MAX; +} +#endif /* CONFIG_HYPERV_TIMER */ + +#endif diff --git a/include/clocksource/pxa.h b/include/clocksource/pxa.h new file mode 100644 index 0000000..0cfe7b9 --- /dev/null +++ b/include/clocksource/pxa.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * PXA clocksource, clockevents, and OST interrupt handlers. + * + * Copyright (C) 2014 Robert Jarzmik + */ + +#ifndef _CLOCKSOURCE_PXA_H +#define _CLOCKSOURCE_PXA_H + +extern void pxa_timer_nodt_init(int irq, void __iomem *base); + +#endif diff --git a/include/clocksource/samsung_pwm.h b/include/clocksource/samsung_pwm.h new file mode 100644 index 0000000..c395238 --- /dev/null +++ b/include/clocksource/samsung_pwm.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + */ +#ifndef __CLOCKSOURCE_SAMSUNG_PWM_H +#define __CLOCKSOURCE_SAMSUNG_PWM_H + +#include + +#define SAMSUNG_PWM_NUM 5 + +/* + * Following declaration must be in an ifdef due to this symbol being static + * in pwm-samsung driver if the clocksource driver is not compiled in and the + * spinlock is not shared between both drivers. + */ +#ifdef CONFIG_CLKSRC_SAMSUNG_PWM +extern spinlock_t samsung_pwm_lock; +#endif + +struct samsung_pwm_variant { + u8 bits; + u8 div_base; + u8 tclk_mask; + u8 output_mask; + bool has_tint_cstat; +}; + +void samsung_pwm_clocksource_init(void __iomem *base, + unsigned int *irqs, struct samsung_pwm_variant *variant); + +#endif /* __CLOCKSOURCE_SAMSUNG_PWM_H */ diff --git a/include/clocksource/timer-davinci.h b/include/clocksource/timer-davinci.h new file mode 100644 index 0000000..1dcc133 --- /dev/null +++ b/include/clocksource/timer-davinci.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * TI DaVinci clocksource driver + * + * Copyright (C) 2019 Texas Instruments + * Author: Bartosz Golaszewski + */ + +#ifndef __TIMER_DAVINCI_H__ +#define __TIMER_DAVINCI_H__ + +#include +#include + +enum { + DAVINCI_TIMER_CLOCKEVENT_IRQ, + DAVINCI_TIMER_CLOCKSOURCE_IRQ, + DAVINCI_TIMER_NUM_IRQS, +}; + +/** + * struct davinci_timer_cfg - davinci clocksource driver configuration struct + * @reg: register range resource + * @irq: clockevent and clocksource interrupt resources + * @cmp_off: if set - it specifies the compare register used for clockevent + * + * Note: if the compare register is specified, the driver will use the bottom + * clock half for both clocksource and clockevent and the compare register + * to generate event irqs. The user must supply the correct compare register + * interrupt number. + * + * This is only used by da830 the DSP of which uses the top half. The timer + * driver still configures the top half to run in free-run mode. + */ +struct davinci_timer_cfg { + struct resource reg; + struct resource irq[DAVINCI_TIMER_NUM_IRQS]; + unsigned int cmp_off; +}; + +int __init davinci_timer_register(struct clk *clk, + const struct davinci_timer_cfg *data); + +#endif /* __TIMER_DAVINCI_H__ */ diff --git a/include/clocksource/timer-sp804.h b/include/clocksource/timer-sp804.h new file mode 100644 index 0000000..a5b41f3 --- /dev/null +++ b/include/clocksource/timer-sp804.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CLKSOURCE_TIMER_SP804_H +#define __CLKSOURCE_TIMER_SP804_H + +struct clk; + +int __sp804_clocksource_and_sched_clock_init(void __iomem *, + const char *, struct clk *, int); +int __sp804_clockevents_init(void __iomem *, unsigned int, + struct clk *, const char *); +void sp804_timer_disable(void __iomem *); + +static inline void sp804_clocksource_init(void __iomem *base, const char *name) +{ + __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0); +} + +static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base, + const char *name) +{ + __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1); +} + +static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name) +{ + __sp804_clockevents_init(base, irq, NULL, name); + +} +#endif diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h new file mode 100644 index 0000000..7d9598d --- /dev/null +++ b/include/clocksource/timer-ti-dm.h @@ -0,0 +1,394 @@ +/* + * OMAP Dual-Mode Timers + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ + * Tarun Kanti DebBarma + * Thara Gopinath + * + * Platform device conversion and hwmod support. + * + * Copyright (C) 2005 Nokia Corporation + * Author: Lauri Leukkunen + * PWM and clock framwork support by Timo Teras. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include + +#ifndef __CLOCKSOURCE_DMTIMER_H +#define __CLOCKSOURCE_DMTIMER_H + +/* clock sources */ +#define OMAP_TIMER_SRC_SYS_CLK 0x00 +#define OMAP_TIMER_SRC_32_KHZ 0x01 +#define OMAP_TIMER_SRC_EXT_CLK 0x02 + +/* timer interrupt enable bits */ +#define OMAP_TIMER_INT_CAPTURE (1 << 2) +#define OMAP_TIMER_INT_OVERFLOW (1 << 1) +#define OMAP_TIMER_INT_MATCH (1 << 0) + +/* trigger types */ +#define OMAP_TIMER_TRIGGER_NONE 0x00 +#define OMAP_TIMER_TRIGGER_OVERFLOW 0x01 +#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02 + +/* posted mode types */ +#define OMAP_TIMER_NONPOSTED 0x00 +#define OMAP_TIMER_POSTED 0x01 + +/* timer capabilities used in hwmod database */ +#define OMAP_TIMER_SECURE 0x80000000 +#define OMAP_TIMER_ALWON 0x40000000 +#define OMAP_TIMER_HAS_PWM 0x20000000 +#define OMAP_TIMER_NEEDS_RESET 0x10000000 +#define OMAP_TIMER_HAS_DSP_IRQ 0x08000000 + +/* + * timer errata flags + * + * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This + * errata prevents us from using posted mode on these devices, unless the + * timer counter register is never read. For more details please refer to + * the OMAP3/4/5 errata documents. + */ +#define OMAP_TIMER_ERRATA_I103_I767 0x80000000 + +struct timer_regs { + u32 tidr; + u32 tier; + u32 twer; + u32 tclr; + u32 tcrr; + u32 tldr; + u32 ttrg; + u32 twps; + u32 tmar; + u32 tcar1; + u32 tsicr; + u32 tcar2; + u32 tpir; + u32 tnir; + u32 tcvr; + u32 tocr; + u32 towr; +}; + +struct omap_dm_timer { + int id; + int irq; + struct clk *fclk; + + void __iomem *io_base; + void __iomem *irq_stat; /* TISR/IRQSTATUS interrupt status */ + void __iomem *irq_ena; /* irq enable */ + void __iomem *irq_dis; /* irq disable, only on v2 ip */ + void __iomem *pend; /* write pending */ + void __iomem *func_base; /* function register base */ + + unsigned long rate; + unsigned reserved:1; + unsigned posted:1; + struct timer_regs context; + int (*get_context_loss_count)(struct device *); + int ctx_loss_count; + int revision; + u32 capability; + u32 errata; + struct platform_device *pdev; + struct list_head node; +}; + +int omap_dm_timer_reserve_systimer(int id); +struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap); + +int omap_dm_timer_get_irq(struct omap_dm_timer *timer); + +u32 omap_dm_timer_modify_idlect_mask(u32 inputmask); + +int omap_dm_timer_trigger(struct omap_dm_timer *timer); + +int omap_dm_timers_active(void); + +/* + * Do not use the defines below, they are not needed. They should be only + * used by dmtimer.c and sys_timer related code. + */ + +/* + * The interrupt registers are different between v1 and v2 ip. + * These registers are offsets from timer->iobase. + */ +#define OMAP_TIMER_ID_OFFSET 0x00 +#define OMAP_TIMER_OCP_CFG_OFFSET 0x10 + +#define OMAP_TIMER_V1_SYS_STAT_OFFSET 0x14 +#define OMAP_TIMER_V1_STAT_OFFSET 0x18 +#define OMAP_TIMER_V1_INT_EN_OFFSET 0x1c + +#define OMAP_TIMER_V2_IRQSTATUS_RAW 0x24 +#define OMAP_TIMER_V2_IRQSTATUS 0x28 +#define OMAP_TIMER_V2_IRQENABLE_SET 0x2c +#define OMAP_TIMER_V2_IRQENABLE_CLR 0x30 + +/* + * The functional registers have a different base on v1 and v2 ip. + * These registers are offsets from timer->func_base. The func_base + * is samae as io_base for v1 and io_base + 0x14 for v2 ip. + * + */ +#define OMAP_TIMER_V2_FUNC_OFFSET 0x14 + +#define _OMAP_TIMER_WAKEUP_EN_OFFSET 0x20 +#define _OMAP_TIMER_CTRL_OFFSET 0x24 +#define OMAP_TIMER_CTRL_GPOCFG (1 << 14) +#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13) +#define OMAP_TIMER_CTRL_PT (1 << 12) +#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8) +#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8) +#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8) +#define OMAP_TIMER_CTRL_SCPWM (1 << 7) +#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */ +#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */ +#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* prescaler value shift */ +#define OMAP_TIMER_CTRL_POSTED (1 << 2) +#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */ +#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */ +#define _OMAP_TIMER_COUNTER_OFFSET 0x28 +#define _OMAP_TIMER_LOAD_OFFSET 0x2c +#define _OMAP_TIMER_TRIGGER_OFFSET 0x30 +#define _OMAP_TIMER_WRITE_PEND_OFFSET 0x34 +#define WP_NONE 0 /* no write pending bit */ +#define WP_TCLR (1 << 0) +#define WP_TCRR (1 << 1) +#define WP_TLDR (1 << 2) +#define WP_TTGR (1 << 3) +#define WP_TMAR (1 << 4) +#define WP_TPIR (1 << 5) +#define WP_TNIR (1 << 6) +#define WP_TCVR (1 << 7) +#define WP_TOCR (1 << 8) +#define WP_TOWR (1 << 9) +#define _OMAP_TIMER_MATCH_OFFSET 0x38 +#define _OMAP_TIMER_CAPTURE_OFFSET 0x3c +#define _OMAP_TIMER_IF_CTRL_OFFSET 0x40 +#define _OMAP_TIMER_CAPTURE2_OFFSET 0x44 /* TCAR2, 34xx only */ +#define _OMAP_TIMER_TICK_POS_OFFSET 0x48 /* TPIR, 34xx only */ +#define _OMAP_TIMER_TICK_NEG_OFFSET 0x4c /* TNIR, 34xx only */ +#define _OMAP_TIMER_TICK_COUNT_OFFSET 0x50 /* TCVR, 34xx only */ +#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */ +#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */ + +/* register offsets with the write pending bit encoded */ +#define WPSHIFT 16 + +#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \ + | (WP_TCLR << WPSHIFT)) + +#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \ + | (WP_TCRR << WPSHIFT)) + +#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \ + | (WP_TLDR << WPSHIFT)) + +#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \ + | (WP_TTGR << WPSHIFT)) + +#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \ + | (WP_TMAR << WPSHIFT)) + +#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \ + | (WP_TPIR << WPSHIFT)) + +#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \ + | (WP_TNIR << WPSHIFT)) + +#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \ + | (WP_TCVR << WPSHIFT)) + +#define OMAP_TIMER_TICK_INT_MASK_SET_REG \ + (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT)) + +#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \ + (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT)) + +/* + * The below are inlined to optimize code size for system timers. Other code + * should not need these at all, see + * include/linux/platform_data/pwm_omap_dmtimer.h + */ +#if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP2PLUS) +static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg, + int posted) +{ + if (posted) + while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) + cpu_relax(); + + return readl_relaxed(timer->func_base + (reg & 0xff)); +} + +static inline void __omap_dm_timer_write(struct omap_dm_timer *timer, + u32 reg, u32 val, int posted) +{ + if (posted) + while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) + cpu_relax(); + + writel_relaxed(val, timer->func_base + (reg & 0xff)); +} + +static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer) +{ + u32 tidr; + + /* Assume v1 ip if bits [31:16] are zero */ + tidr = readl_relaxed(timer->io_base); + if (!(tidr >> 16)) { + timer->revision = 1; + timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET; + timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; + timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; + timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET; + timer->func_base = timer->io_base; + } else { + timer->revision = 2; + timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS; + timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET; + timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR; + timer->pend = timer->io_base + + _OMAP_TIMER_WRITE_PEND_OFFSET + + OMAP_TIMER_V2_FUNC_OFFSET; + timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET; + } +} + +/* + * __omap_dm_timer_enable_posted - enables write posted mode + * @timer: pointer to timer instance handle + * + * Enables the write posted mode for the timer. When posted mode is enabled + * writes to certain timer registers are immediately acknowledged by the + * internal bus and hence prevents stalling the CPU waiting for the write to + * complete. Enabling this feature can improve performance for writing to the + * timer registers. + */ +static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer) +{ + if (timer->posted) + return; + + if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) { + timer->posted = OMAP_TIMER_NONPOSTED; + __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0); + return; + } + + __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, + OMAP_TIMER_CTRL_POSTED, 0); + timer->context.tsicr = OMAP_TIMER_CTRL_POSTED; + timer->posted = OMAP_TIMER_POSTED; +} + +/** + * __omap_dm_timer_override_errata - override errata flags for a timer + * @timer: pointer to timer handle + * @errata: errata flags to be ignored + * + * For a given timer, override a timer errata by clearing the flags + * specified by the errata argument. A specific erratum should only be + * overridden for a timer if the timer is used in such a way the erratum + * has no impact. + */ +static inline void __omap_dm_timer_override_errata(struct omap_dm_timer *timer, + u32 errata) +{ + timer->errata &= ~errata; +} + +static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer, + int posted, unsigned long rate) +{ + u32 l; + + l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); + if (l & OMAP_TIMER_CTRL_ST) { + l &= ~0x1; + __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted); +#ifdef CONFIG_ARCH_OMAP2PLUS + /* Readback to make sure write has completed */ + __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); + /* + * Wait for functional clock period x 3.5 to make sure that + * timer is stopped + */ + udelay(3500000 / rate + 1); +#endif + } + + /* Ack possibly pending interrupt */ + writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat); +} + +static inline void __omap_dm_timer_load_start(struct omap_dm_timer *timer, + u32 ctrl, unsigned int load, + int posted) +{ + __omap_dm_timer_write(timer, OMAP_TIMER_COUNTER_REG, load, posted); + __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, ctrl, posted); +} + +static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer, + unsigned int value) +{ + writel_relaxed(value, timer->irq_ena); + __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0); +} + +static inline unsigned int +__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted) +{ + return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted); +} + +static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer, + unsigned int value) +{ + writel_relaxed(value, timer->irq_stat); +} +#endif /* CONFIG_ARCH_OMAP1 || CONFIG_ARCH_OMAP2PLUS */ +#endif /* __CLOCKSOURCE_DMTIMER_H */ diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h new file mode 100644 index 0000000..d873f99 --- /dev/null +++ b/include/crypto/acompress.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Asynchronous Compression operations + * + * Copyright (c) 2016, Intel Corporation + * Authors: Weigang Li + * Giovanni Cabiddu + */ +#ifndef _CRYPTO_ACOMP_H +#define _CRYPTO_ACOMP_H +#include + +#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 + +/** + * struct acomp_req - asynchronous (de)compression request + * + * @base: Common attributes for asynchronous crypto requests + * @src: Source Data + * @dst: Destination data + * @slen: Size of the input buffer + * @dlen: Size of the output buffer and number of bytes produced + * @flags: Internal flags + * @__ctx: Start of private context data + */ +struct acomp_req { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int slen; + unsigned int dlen; + u32 flags; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_acomp - user-instantiated objects which encapsulate + * algorithms and core processing logic + * + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @dst_free: Frees destination buffer if allocated inside the + * algorithm + * @reqsize: Context size for (de)compression requests + * @base: Common crypto API algorithm data structure + */ +struct crypto_acomp { + int (*compress)(struct acomp_req *req); + int (*decompress)(struct acomp_req *req); + void (*dst_free)(struct scatterlist *dst); + unsigned int reqsize; + struct crypto_tfm base; +}; + +/** + * struct acomp_alg - asynchronous compression algorithm + * + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @dst_free: Frees destination buffer if allocated inside the algorithm + * @init: Initialize the cryptographic transformation object. + * This function is used to initialize the cryptographic + * transformation object. This function is called only once at + * the instantiation time, right after the transformation context + * was allocated. In case the cryptographic hardware has some + * special requirements which need to be handled by software, this + * function shall check for the precise requirement of the + * transformation and put any software fallbacks in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * + * @reqsize: Context size for (de)compression requests + * @base: Common crypto API algorithm data structure + */ +struct acomp_alg { + int (*compress)(struct acomp_req *req); + int (*decompress)(struct acomp_req *req); + void (*dst_free)(struct scatterlist *dst); + int (*init)(struct crypto_acomp *tfm); + void (*exit)(struct crypto_acomp *tfm); + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Asynchronous Compression API + * + * The Asynchronous Compression API is used with the algorithms of type + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto) + */ + +/** + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * compression algorithm e.g. "deflate" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for a compression algorithm. The returned struct + * crypto_acomp is the handle that is required for any subsequent + * API invocation for the compression operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) +{ + return &tfm->base; +} + +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct acomp_alg, base); +} + +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_acomp, base); +} + +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) +{ + return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) +{ + return tfm->reqsize; +} + +static inline void acomp_request_set_tfm(struct acomp_req *req, + struct crypto_acomp *tfm) +{ + req->base.tfm = crypto_acomp_tfm(tfm); +} + +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) +{ + return __crypto_acomp_tfm(req->base.tfm); +} + +/** + * crypto_free_acomp() -- free ACOMPRESS tfm handle + * + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + */ +static inline void crypto_free_acomp(struct crypto_acomp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm)); +} + +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_ACOMPRESS; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +/** + * acomp_request_alloc() -- allocates asynchronous (de)compression request + * + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * + * Return: allocated handle in case of success or NULL in case of an error + */ +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); + +/** + * acomp_request_free() -- zeroize and free asynchronous (de)compression + * request as well as the output buffer if allocated + * inside the algorithm + * + * @req: request to free + */ +void acomp_request_free(struct acomp_req *req); + +/** + * acomp_request_set_callback() -- Sets an asynchronous callback + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmlp: callback which will be called + * @data: private data used by the caller + */ +static inline void acomp_request_set_callback(struct acomp_req *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * acomp_request_set_params() -- Sets request parameters + * + * Sets parameters required by an acomp operation + * + * @req: asynchronous compress request + * @src: pointer to input buffer scatterlist + * @dst: pointer to output buffer scatterlist. If this is NULL, the + * acomp layer will allocate the output memory + * @slen: size of the input buffer + * @dlen: size of the output buffer. If dst is NULL, this can be used by + * the user to specify the maximum amount of memory to allocate + */ +static inline void acomp_request_set_params(struct acomp_req *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int slen, + unsigned int dlen) +{ + req->src = src; + req->dst = dst; + req->slen = slen; + req->dlen = dlen; + + if (!req->dst) + req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; +} + +/** + * crypto_acomp_compress() -- Invoke asynchronous compress operation + * + * Function invokes the asynchronous compress operation + * + * @req: asynchronous compress request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_acomp_compress(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int slen = req->slen; + int ret; + + crypto_stats_get(alg); + ret = tfm->compress(req); + crypto_stats_compress(slen, ret, alg); + return ret; +} + +/** + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation + * + * Function invokes the asynchronous decompress operation + * + * @req: asynchronous compress request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_acomp_decompress(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int slen = req->slen; + int ret; + + crypto_stats_get(alg); + ret = tfm->decompress(req); + crypto_stats_decompress(slen, ret, alg); + return ret; +} + +#endif diff --git a/include/crypto/aead.h b/include/crypto/aead.h new file mode 100644 index 0000000..3c245b1 --- /dev/null +++ b/include/crypto/aead.h @@ -0,0 +1,505 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007-2015 Herbert Xu + */ + +#ifndef _CRYPTO_AEAD_H +#define _CRYPTO_AEAD_H + +#include +#include +#include + +/** + * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API + * + * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD + * (listed as type "aead" in /proc/crypto) + * + * The most prominent examples for this type of encryption is GCM and CCM. + * However, the kernel supports other types of AEAD ciphers which are defined + * with the following cipher string: + * + * authenc(keyed message digest, block cipher) + * + * For example: authenc(hmac(sha256), cbc(aes)) + * + * The example code provided for the symmetric key cipher operation + * applies here as well. Naturally all *skcipher* symbols must be exchanged + * the *aead* pendants discussed in the following. In addition, for the AEAD + * operation, the aead_request_set_ad function must be used to set the + * pointer to the associated data memory location before performing the + * encryption or decryption operation. In case of an encryption, the associated + * data memory is filled during the encryption operation. For decryption, the + * associated data memory must contain data that is used to verify the integrity + * of the decrypted data. Another deviation from the asynchronous block cipher + * operation is that the caller should explicitly check for -EBADMSG of the + * crypto_aead_decrypt. That error indicates an authentication error, i.e. + * a breach in the integrity of the message. In essence, that -EBADMSG error + * code is the key bonus an AEAD cipher has over "standard" block chaining + * modes. + * + * Memory Structure: + * + * To support the needs of the most prominent user of AEAD ciphers, namely + * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere + * to. + * + * The scatter list pointing to the input data must contain: + * + * * for RFC4106 ciphers, the concatenation of + * associated authentication data || IV || plaintext or ciphertext. Note, the + * same IV (buffer) is also set with the aead_request_set_crypt call. Note, + * the API call of aead_request_set_ad must provide the length of the AAD and + * the IV. The API call of aead_request_set_crypt only points to the size of + * the input plaintext or ciphertext. + * + * * for "normal" AEAD ciphers, the concatenation of + * associated authentication data || plaintext or ciphertext. + * + * It is important to note that if multiple scatter gather list entries form + * the input data mentioned above, the first entry must not point to a NULL + * buffer. If there is any potential where the AAD buffer can be NULL, the + * calling code must contain a precaution to ensure that this does not result + * in the first scatter gather list entry pointing to a NULL buffer. + */ + +struct crypto_aead; + +/** + * struct aead_request - AEAD request + * @base: Common attributes for async crypto requests + * @assoclen: Length in bytes of associated data for authentication + * @cryptlen: Length of data to be encrypted or decrypted + * @iv: Initialisation vector + * @src: Source data + * @dst: Destination data + * @__ctx: Start of private context data + */ +struct aead_request { + struct crypto_async_request base; + + unsigned int assoclen; + unsigned int cryptlen; + + u8 *iv; + + struct scatterlist *src; + struct scatterlist *dst; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct aead_alg - AEAD cipher definition + * @maxauthsize: Set the maximum authentication tag size supported by the + * transformation. A transformation may support smaller tag sizes. + * As the authentication tag is a message digest to ensure the + * integrity of the encrypted data, a consumer typically wants the + * largest authentication tag possible as defined by this + * variable. + * @setauthsize: Set authentication size for the AEAD transformation. This + * function is used to specify the consumer requested size of the + * authentication tag to be either generated by the transformation + * during encryption or the size of the authentication tag to be + * supplied during the decryption operation. This function is also + * responsible for checking the authentication tag size for + * validity. + * @setkey: see struct skcipher_alg + * @encrypt: see struct skcipher_alg + * @decrypt: see struct skcipher_alg + * @ivsize: see struct skcipher_alg + * @chunksize: see struct skcipher_alg + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @base: Definition of a generic crypto cipher algorithm. + * + * All fields except @ivsize is mandatory and must be filled. + */ +struct aead_alg { + int (*setkey)(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen); + int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); + int (*encrypt)(struct aead_request *req); + int (*decrypt)(struct aead_request *req); + int (*init)(struct crypto_aead *tfm); + void (*exit)(struct crypto_aead *tfm); + + unsigned int ivsize; + unsigned int maxauthsize; + unsigned int chunksize; + + struct crypto_alg base; +}; + +struct crypto_aead { + unsigned int authsize; + unsigned int reqsize; + + struct crypto_tfm base; +}; + +static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_aead, base); +} + +/** + * crypto_alloc_aead() - allocate AEAD cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * AEAD cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an AEAD. The returned struct + * crypto_aead is the cipher handle that is required for any subsequent + * API invocation for that AEAD. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_aead() - zeroize and free aead handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_aead(struct crypto_aead *tfm) +{ + crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); +} + +static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) +{ + return container_of(crypto_aead_tfm(tfm)->__crt_alg, + struct aead_alg, base); +} + +static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg) +{ + return alg->ivsize; +} + +/** + * crypto_aead_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the aead referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) +{ + return crypto_aead_alg_ivsize(crypto_aead_alg(tfm)); +} + +/** + * crypto_aead_authsize() - obtain maximum authentication data size + * @tfm: cipher handle + * + * The maximum size of the authentication data for the AEAD cipher referenced + * by the AEAD cipher handle is returned. The authentication data size may be + * zero if the cipher implements a hard-coded maximum. + * + * The authentication data may also be known as "tag value". + * + * Return: authentication data size / tag size in bytes + */ +static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) +{ + return tfm->authsize; +} + +/** + * crypto_aead_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the AEAD referenced with the cipher handle is returned. + * The caller may use that information to allocate appropriate memory for the + * data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); +} + +static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm)); +} + +static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm) +{ + return crypto_tfm_get_flags(crypto_aead_tfm(tfm)); +} + +static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags); +} + +static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); +} + +/** + * crypto_aead_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the AEAD referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_aead_setkey(struct crypto_aead *tfm, + const u8 *key, unsigned int keylen); + +/** + * crypto_aead_setauthsize() - set authentication data size + * @tfm: cipher handle + * @authsize: size of the authentication data / tag in bytes + * + * Set the authentication data size / tag size. AEAD requires an authentication + * tag (or MAC) in addition to the associated data. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); + +static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) +{ + return __crypto_aead_cast(req->base.tfm); +} + +/** + * crypto_aead_encrypt() - encrypt plaintext + * @req: reference to the aead_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the aead_request handle. That data structure + * and how it is filled with data is discussed with the aead_request_* + * functions. + * + * IMPORTANT NOTE The encryption operation creates the authentication data / + * tag. That data is concatenated with the created ciphertext. + * The ciphertext memory size is therefore the given number of + * block cipher blocks + the size defined by the + * crypto_aead_setauthsize invocation. The caller must ensure + * that sufficient memory is available for the ciphertext and + * the authentication tag. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +int crypto_aead_encrypt(struct aead_request *req); + +/** + * crypto_aead_decrypt() - decrypt ciphertext + * @req: reference to the ablkcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the aead_request handle. That data structure + * and how it is filled with data is discussed with the aead_request_* + * functions. + * + * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the + * authentication data / tag. That authentication data / tag + * must have the size defined by the crypto_aead_setauthsize + * invocation. + * + * + * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD + * cipher operation performs the authentication of the data during the + * decryption operation. Therefore, the function returns this error if + * the authentication of the ciphertext was unsuccessful (i.e. the + * integrity of the ciphertext or the associated data was violated); + * < 0 if an error occurred. + */ +int crypto_aead_decrypt(struct aead_request *req); + +/** + * DOC: Asynchronous AEAD Request Handle + * + * The aead_request data structure contains all pointers to data required for + * the AEAD cipher operation. This includes the cipher handle (which can be + * used by multiple aead_request instances), pointer to plaintext and + * ciphertext, asynchronous callback function, etc. It acts as a handle to the + * aead_request_* API calls in a similar way as AEAD handle to the + * crypto_aead_* API calls. + */ + +/** + * crypto_aead_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ +static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) +{ + return tfm->reqsize; +} + +/** + * aead_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing aead handle in the request + * data structure with a different one. + */ +static inline void aead_request_set_tfm(struct aead_request *req, + struct crypto_aead *tfm) +{ + req->base.tfm = crypto_aead_tfm(tfm); +} + +/** + * aead_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the AEAD + * encrypt and decrypt API calls. During the allocation, the provided aead + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, + gfp_t gfp) +{ + struct aead_request *req; + + req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp); + + if (likely(req)) + aead_request_set_tfm(req, tfm); + + return req; +} + +/** + * aead_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void aead_request_free(struct aead_request *req) +{ + kzfree(req); +} + +/** + * aead_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * Setting the callback function that is triggered once the cipher operation + * completes + * + * The callback function is registered with the aead_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void aead_request_set_callback(struct aead_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * aead_request_set_crypt - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @cryptlen: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_aead_ivsize() + * + * Setting the source data and destination data scatter / gather lists which + * hold the associated data concatenated with the plaintext or ciphertext. See + * below for the authentication tag. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed - the source is the ciphertext and the destination is the plaintext. + * + * The memory structure for cipher operation has the following structure: + * + * - AEAD encryption input: assoc data || plaintext + * - AEAD encryption output: assoc data || cipherntext || auth tag + * - AEAD decryption input: assoc data || ciphertext || auth tag + * - AEAD decryption output: assoc data || plaintext + * + * Albeit the kernel requires the presence of the AAD buffer, however, + * the kernel does not fill the AAD buffer in the output case. If the + * caller wants to have that data buffer filled, the caller must either + * use an in-place cipher operation (i.e. same memory location for + * input/output memory location). + */ +static inline void aead_request_set_crypt(struct aead_request *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int cryptlen, u8 *iv) +{ + req->src = src; + req->dst = dst; + req->cryptlen = cryptlen; + req->iv = iv; +} + +/** + * aead_request_set_ad - set associated data information + * @req: request handle + * @assoclen: number of bytes in associated data + * + * Setting the AD information. This function sets the length of + * the associated data. + */ +static inline void aead_request_set_ad(struct aead_request *req, + unsigned int assoclen) +{ + req->assoclen = assoclen; +} + +#endif /* _CRYPTO_AEAD_H */ diff --git a/include/crypto/aes.h b/include/crypto/aes.h new file mode 100644 index 0000000..2090729 --- /dev/null +++ b/include/crypto/aes.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for AES algorithms + */ + +#ifndef _CRYPTO_AES_H +#define _CRYPTO_AES_H + +#include +#include + +#define AES_MIN_KEY_SIZE 16 +#define AES_MAX_KEY_SIZE 32 +#define AES_KEYSIZE_128 16 +#define AES_KEYSIZE_192 24 +#define AES_KEYSIZE_256 32 +#define AES_BLOCK_SIZE 16 +#define AES_MAX_KEYLENGTH (15 * 16) +#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32)) + +/* + * Please ensure that the first two fields are 16-byte aligned + * relative to the start of the structure, i.e., don't move them! + */ +struct crypto_aes_ctx { + u32 key_enc[AES_MAX_KEYLENGTH_U32]; + u32 key_dec[AES_MAX_KEYLENGTH_U32]; + u32 key_length; +}; + +extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned; +extern const u32 crypto_it_tab[4][256] ____cacheline_aligned; + +/* + * validate key length for AES algorithms + */ +static inline int aes_check_keylen(unsigned int keylen) +{ + switch (keylen) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_192: + case AES_KEYSIZE_256: + break; + default: + return -EINVAL; + } + + return 0; +} + +int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len); + +/** + * aes_expandkey - Expands the AES key as described in FIPS-197 + * @ctx: The location where the computed key will be stored. + * @in_key: The supplied key. + * @key_len: The length of the supplied key. + * + * Returns 0 on success. The function fails only if an invalid key size (or + * pointer) is supplied. + * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes + * key schedule plus a 16 bytes key which is used before the first round). + * The decryption key is prepared for the "Equivalent Inverse Cipher" as + * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is + * for the initial combination, the second slot for the first round and so on. + */ +int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, + unsigned int key_len); + +/** + * aes_encrypt - Encrypt a single AES block + * @ctx: Context struct containing the key schedule + * @out: Buffer to store the ciphertext + * @in: Buffer containing the plaintext + */ +void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); + +/** + * aes_decrypt - Decrypt a single AES block + * @ctx: Context struct containing the key schedule + * @out: Buffer to store the plaintext + * @in: Buffer containing the ciphertext + */ +void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); + +extern const u8 crypto_aes_sbox[]; +extern const u8 crypto_aes_inv_sbox[]; + +#endif diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h new file mode 100644 index 0000000..6924b09 --- /dev/null +++ b/include/crypto/akcipher.h @@ -0,0 +1,416 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Public Key Encryption + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk + */ +#ifndef _CRYPTO_AKCIPHER_H +#define _CRYPTO_AKCIPHER_H +#include + +/** + * struct akcipher_request - public key request + * + * @base: Common attributes for async crypto requests + * @src: Source data + * For verify op this is signature + digest, in that case + * total size of @src is @src_len + @dst_len. + * @dst: Destination data (Should be NULL for verify op) + * @src_len: Size of the input buffer + * For verify op it's size of signature part of @src, this part + * is supposed to be operated by cipher. + * @dst_len: Size of @dst buffer (for all ops except verify). + * It needs to be at least as big as the expected result + * depending on the operation. + * After operation it will be updated with the actual size of the + * result. + * In case of error where the dst sgl size was insufficient, + * it will be updated to the size required for the operation. + * For verify op this is size of digest part in @src. + * @__ctx: Start of private context data + */ +struct akcipher_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_akcipher - user-instantiated objects which encapsulate + * algorithms and core processing logic + * + * @base: Common crypto API algorithm data structure + */ +struct crypto_akcipher { + struct crypto_tfm base; +}; + +/** + * struct akcipher_alg - generic public key algorithm + * + * @sign: Function performs a sign operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @verify: Function performs a complete verify operation as defined by + * public key algorithm, returning verification status. Requires + * digest value as input parameter. + * @encrypt: Function performs an encrypt operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @decrypt: Function performs a decrypt operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @set_pub_key: Function invokes the algorithm specific set public key + * function, which knows how to decode and interpret + * the BER encoded public key and parameters + * @set_priv_key: Function invokes the algorithm specific set private key + * function, which knows how to decode and interpret + * the BER encoded private key and parameters + * @max_size: Function returns dest buffer size required for a given key. + * @init: Initialize the cryptographic transformation object. + * This function is used to initialize the cryptographic + * transformation object. This function is called only once at + * the instantiation time, right after the transformation context + * was allocated. In case the cryptographic hardware has some + * special requirements which need to be handled by software, this + * function shall check for the precise requirement of the + * transformation and put any software fallbacks in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * + * @reqsize: Request context size required by algorithm implementation + * @base: Common crypto API algorithm data structure + */ +struct akcipher_alg { + int (*sign)(struct akcipher_request *req); + int (*verify)(struct akcipher_request *req); + int (*encrypt)(struct akcipher_request *req); + int (*decrypt)(struct akcipher_request *req); + int (*set_pub_key)(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen); + int (*set_priv_key)(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen); + unsigned int (*max_size)(struct crypto_akcipher *tfm); + int (*init)(struct crypto_akcipher *tfm); + void (*exit)(struct crypto_akcipher *tfm); + + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Generic Public Key API + * + * The Public Key API is used with the algorithms of type + * CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto) + */ + +/** + * crypto_alloc_akcipher() - allocate AKCIPHER tfm handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * public key algorithm e.g. "rsa" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for public key algorithm. The returned struct + * crypto_akcipher is the handle that is required for any subsequent + * API invocation for the public key operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_akcipher_tfm( + struct crypto_akcipher *tfm) +{ + return &tfm->base; +} + +static inline struct akcipher_alg *__crypto_akcipher_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct akcipher_alg, base); +} + +static inline struct crypto_akcipher *__crypto_akcipher_tfm( + struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_akcipher, base); +} + +static inline struct akcipher_alg *crypto_akcipher_alg( + struct crypto_akcipher *tfm) +{ + return __crypto_akcipher_alg(crypto_akcipher_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm) +{ + return crypto_akcipher_alg(tfm)->reqsize; +} + +static inline void akcipher_request_set_tfm(struct akcipher_request *req, + struct crypto_akcipher *tfm) +{ + req->base.tfm = crypto_akcipher_tfm(tfm); +} + +static inline struct crypto_akcipher *crypto_akcipher_reqtfm( + struct akcipher_request *req) +{ + return __crypto_akcipher_tfm(req->base.tfm); +} + +/** + * crypto_free_akcipher() - free AKCIPHER tfm handle + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + */ +static inline void crypto_free_akcipher(struct crypto_akcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_akcipher_tfm(tfm)); +} + +/** + * akcipher_request_alloc() - allocates public key request + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * @gfp: allocation flags + * + * Return: allocated handle in case of success or NULL in case of an error. + */ +static inline struct akcipher_request *akcipher_request_alloc( + struct crypto_akcipher *tfm, gfp_t gfp) +{ + struct akcipher_request *req; + + req = kmalloc(sizeof(*req) + crypto_akcipher_reqsize(tfm), gfp); + if (likely(req)) + akcipher_request_set_tfm(req, tfm); + + return req; +} + +/** + * akcipher_request_free() - zeroize and free public key request + * + * @req: request to free + */ +static inline void akcipher_request_free(struct akcipher_request *req) +{ + kzfree(req); +} + +/** + * akcipher_request_set_callback() - Sets an asynchronous callback. + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmpl: callback which will be called + * @data: private data used by the caller + */ +static inline void akcipher_request_set_callback(struct akcipher_request *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * akcipher_request_set_crypt() - Sets request parameters + * + * Sets parameters required by crypto operation + * + * @req: public key request + * @src: ptr to input scatter list + * @dst: ptr to output scatter list or NULL for verify op + * @src_len: size of the src input scatter list to be processed + * @dst_len: size of the dst output scatter list or size of signature + * portion in @src for verify op + */ +static inline void akcipher_request_set_crypt(struct akcipher_request *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int src_len, + unsigned int dst_len) +{ + req->src = src; + req->dst = dst; + req->src_len = src_len; + req->dst_len = dst_len; +} + +/** + * crypto_akcipher_maxsize() - Get len for output buffer + * + * Function returns the dest buffer size required for a given key. + * Function assumes that the key is already set in the transformation. If this + * function is called without a setkey or with a failed setkey, you will end up + * in a NULL dereference. + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + */ +static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->max_size(tfm); +} + +/** + * crypto_akcipher_encrypt() - Invoke public key encrypt operation + * + * Function invokes the specific public key encrypt operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + unsigned int src_len = req->src_len; + int ret; + + crypto_stats_get(calg); + ret = alg->encrypt(req); + crypto_stats_akcipher_encrypt(src_len, ret, calg); + return ret; +} + +/** + * crypto_akcipher_decrypt() - Invoke public key decrypt operation + * + * Function invokes the specific public key decrypt operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + unsigned int src_len = req->src_len; + int ret; + + crypto_stats_get(calg); + ret = alg->decrypt(req); + crypto_stats_akcipher_decrypt(src_len, ret, calg); + return ret; +} + +/** + * crypto_akcipher_sign() - Invoke public key sign operation + * + * Function invokes the specific public key sign operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->sign(req); + crypto_stats_akcipher_sign(ret, calg); + return ret; +} + +/** + * crypto_akcipher_verify() - Invoke public key signature verification + * + * Function invokes the specific public key signature verification operation + * for a given public key algorithm. + * + * @req: asymmetric key request + * + * Note: req->dst should be NULL, req->src should point to SG of size + * (req->src_size + req->dst_size), containing signature (of req->src_size + * length) with appended digest (of req->dst_size length). + * + * Return: zero on verification success; error code in case of error. + */ +static inline int crypto_akcipher_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->verify(req); + crypto_stats_akcipher_verify(ret, calg); + return ret; +} + +/** + * crypto_akcipher_set_pub_key() - Invoke set public key operation + * + * Function invokes the algorithm specific set key function, which knows + * how to decode and interpret the encoded key and parameters + * + * @tfm: tfm handle + * @key: BER encoded public key, algo OID, paramlen, BER encoded + * parameters + * @keylen: length of the key (not including other data) + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm, + const void *key, + unsigned int keylen) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->set_pub_key(tfm, key, keylen); +} + +/** + * crypto_akcipher_set_priv_key() - Invoke set private key operation + * + * Function invokes the algorithm specific set key function, which knows + * how to decode and interpret the encoded key and parameters + * + * @tfm: tfm handle + * @key: BER encoded private key, algo OID, paramlen, BER encoded + * parameters + * @keylen: length of the key (not including other data) + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_set_priv_key(struct crypto_akcipher *tfm, + const void *key, + unsigned int keylen) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->set_priv_key(tfm, key, keylen); +} +#endif diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h new file mode 100644 index 0000000..e5bd302 --- /dev/null +++ b/include/crypto/algapi.h @@ -0,0 +1,426 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API for algorithms (i.e., low-level API). + * + * Copyright (c) 2006 Herbert Xu + */ +#ifndef _CRYPTO_ALGAPI_H +#define _CRYPTO_ALGAPI_H + +#include +#include +#include +#include + +/* + * Maximum values for blocksize and alignmask, used to allocate + * static buffers that are big enough for any combination of + * algs and architectures. Ciphers have a lower maximum size. + */ +#define MAX_ALGAPI_BLOCKSIZE 160 +#define MAX_ALGAPI_ALIGNMASK 63 +#define MAX_CIPHER_BLOCKSIZE 16 +#define MAX_CIPHER_ALIGNMASK 15 + +struct crypto_aead; +struct crypto_instance; +struct module; +struct rtattr; +struct seq_file; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); + unsigned int (*extsize)(struct crypto_alg *alg); + int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); + int (*init_tfm)(struct crypto_tfm *tfm); + void (*show)(struct seq_file *m, struct crypto_alg *alg); + int (*report)(struct sk_buff *skb, struct crypto_alg *alg); + void (*free)(struct crypto_instance *inst); + + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +}; + +struct crypto_instance { + struct crypto_alg alg; + + struct crypto_template *tmpl; + struct hlist_node list; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct crypto_template { + struct list_head list; + struct hlist_head instances; + struct module *module; + + struct crypto_instance *(*alloc)(struct rtattr **tb); + void (*free)(struct crypto_instance *inst); + int (*create)(struct crypto_template *tmpl, struct rtattr **tb); + + char name[CRYPTO_MAX_ALG_NAME]; +}; + +struct crypto_spawn { + struct list_head list; + struct crypto_alg *alg; + struct crypto_instance *inst; + const struct crypto_type *frontend; + u32 mask; +}; + +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + + unsigned int qlen; + unsigned int max_qlen; +}; + +struct scatter_walk { + struct scatterlist *sg; + unsigned int offset; +}; + +struct blkcipher_walk { + union { + struct { + struct page *page; + unsigned long offset; + } phys; + + struct { + u8 *page; + u8 *addr; + } virt; + } src, dst; + + struct scatter_walk in; + unsigned int nbytes; + + struct scatter_walk out; + unsigned int total; + + void *page; + u8 *buffer; + u8 *iv; + unsigned int ivsize; + + int flags; + unsigned int walk_blocksize; + unsigned int cipher_blocksize; + unsigned int alignmask; +}; + +struct ablkcipher_walk { + struct { + struct page *page; + unsigned int offset; + } src, dst; + + struct scatter_walk in; + unsigned int nbytes; + struct scatter_walk out; + unsigned int total; + struct list_head buffers; + u8 *iv_buffer; + u8 *iv; + int flags; + unsigned int blocksize; +}; + +extern const struct crypto_type crypto_ablkcipher_type; +extern const struct crypto_type crypto_blkcipher_type; + +void crypto_mod_put(struct crypto_alg *alg); + +int crypto_register_template(struct crypto_template *tmpl); +int crypto_register_templates(struct crypto_template *tmpls, int count); +void crypto_unregister_template(struct crypto_template *tmpl); +void crypto_unregister_templates(struct crypto_template *tmpls, int count); +struct crypto_template *crypto_lookup_template(const char *name); + +int crypto_register_instance(struct crypto_template *tmpl, + struct crypto_instance *inst); +int crypto_unregister_instance(struct crypto_instance *inst); + +int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, + struct crypto_instance *inst, u32 mask); +int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, + struct crypto_instance *inst, + const struct crypto_type *frontend); +int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, + u32 type, u32 mask); + +void crypto_drop_spawn(struct crypto_spawn *spawn); +struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, + u32 mask); +void *crypto_spawn_tfm2(struct crypto_spawn *spawn); + +static inline void crypto_set_spawn(struct crypto_spawn *spawn, + struct crypto_instance *inst) +{ + spawn->inst = inst; +} + +struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); +int crypto_check_attr_type(struct rtattr **tb, u32 type); +const char *crypto_attr_alg_name(struct rtattr *rta); +struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, + const struct crypto_type *frontend, + u32 type, u32 mask); + +static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, + u32 type, u32 mask) +{ + return crypto_attr_alg2(rta, NULL, type, mask); +} + +int crypto_attr_u32(struct rtattr *rta, u32 *num); +int crypto_inst_setname(struct crypto_instance *inst, const char *name, + struct crypto_alg *alg); +void *crypto_alloc_instance(const char *name, struct crypto_alg *alg, + unsigned int head); + +void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); +int crypto_enqueue_request(struct crypto_queue *queue, + struct crypto_async_request *request); +struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); +static inline unsigned int crypto_queue_len(struct crypto_queue *queue) +{ + return queue->qlen; +} + +void crypto_inc(u8 *a, unsigned int size); +void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size); + +static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s = (unsigned long *)src; + + while (size > 0) { + *d++ ^= *s++; + size -= sizeof(unsigned long); + } + } else { + __crypto_xor(dst, dst, src, size); + } +} + +static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, + unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s1 = (unsigned long *)src1; + unsigned long *s2 = (unsigned long *)src2; + + while (size > 0) { + *d++ = *s1++ ^ *s2++; + size -= sizeof(unsigned long); + } + } else { + __crypto_xor(dst, src1, src2, size); + } +} + +int blkcipher_walk_done(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, int err); +int blkcipher_walk_virt(struct blkcipher_desc *desc, + struct blkcipher_walk *walk); +int blkcipher_walk_phys(struct blkcipher_desc *desc, + struct blkcipher_walk *walk); +int blkcipher_walk_virt_block(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + unsigned int blocksize); +int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + struct crypto_aead *tfm, + unsigned int blocksize); + +int ablkcipher_walk_done(struct ablkcipher_request *req, + struct ablkcipher_walk *walk, int err); +int ablkcipher_walk_phys(struct ablkcipher_request *req, + struct ablkcipher_walk *walk); +void __ablkcipher_walk_complete(struct ablkcipher_walk *walk); + +static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) +{ + return PTR_ALIGN(crypto_tfm_ctx(tfm), + crypto_tfm_alg_alignmask(tfm) + 1); +} + +static inline struct crypto_instance *crypto_tfm_alg_instance( + struct crypto_tfm *tfm) +{ + return container_of(tfm->__crt_alg, struct crypto_instance, alg); +} + +static inline void *crypto_instance_ctx(struct crypto_instance *inst) +{ + return inst->__ctx; +} + +static inline struct ablkcipher_alg *crypto_ablkcipher_alg( + struct crypto_ablkcipher *tfm) +{ + return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher; +} + +static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct crypto_blkcipher *crypto_spawn_blkcipher( + struct crypto_spawn *spawn) +{ + u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; + u32 mask = CRYPTO_ALG_TYPE_MASK; + + return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); +} + +static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct crypto_cipher *crypto_spawn_cipher( + struct crypto_spawn *spawn) +{ + u32 type = CRYPTO_ALG_TYPE_CIPHER; + u32 mask = CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask)); +} + +static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) +{ + return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; +} + +static inline void blkcipher_walk_init(struct blkcipher_walk *walk, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + walk->in.sg = src; + walk->out.sg = dst; + walk->total = nbytes; +} + +static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + walk->in.sg = src; + walk->out.sg = dst; + walk->total = nbytes; + INIT_LIST_HEAD(&walk->buffers); +} + +static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk) +{ + if (unlikely(!list_empty(&walk->buffers))) + __ablkcipher_walk_complete(walk); +} + +static inline struct crypto_async_request *crypto_get_backlog( + struct crypto_queue *queue) +{ + return queue->backlog == &queue->list ? NULL : + container_of(queue->backlog, struct crypto_async_request, list); +} + +static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, + struct ablkcipher_request *request) +{ + return crypto_enqueue_request(queue, &request->base); +} + +static inline struct ablkcipher_request *ablkcipher_dequeue_request( + struct crypto_queue *queue) +{ + return ablkcipher_request_cast(crypto_dequeue_request(queue)); +} + +static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) +{ + return req->__ctx; +} + +static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, + u32 type, u32 mask) +{ + return crypto_attr_alg(tb[1], type, mask); +} + +static inline int crypto_requires_off(u32 type, u32 mask, u32 off) +{ + return (type ^ off) & mask & off; +} + +/* + * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. + * Otherwise returns zero. + */ +static inline int crypto_requires_sync(u32 type, u32 mask) +{ + return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC); +} + +noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); + +/** + * crypto_memneq - Compare two areas of memory without leaking + * timing information. + * + * @a: One area of memory + * @b: Another area of memory + * @size: The size of the area. + * + * Returns 0 when data is equal, 1 otherwise. + */ +static inline int crypto_memneq(const void *a, const void *b, size_t size) +{ + return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; +} + +static inline void crypto_yield(u32 flags) +{ + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) + cond_resched(); +} + +int crypto_register_notifier(struct notifier_block *nb); +int crypto_unregister_notifier(struct notifier_block *nb); + +/* Crypto notification events. */ +enum { + CRYPTO_MSG_ALG_REQUEST, + CRYPTO_MSG_ALG_REGISTER, + CRYPTO_MSG_ALG_LOADED, +}; + +#endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/arc4.h b/include/crypto/arc4.h new file mode 100644 index 0000000..f3c22fe --- /dev/null +++ b/include/crypto/arc4.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Common values for ARC4 Cipher Algorithm + */ + +#ifndef _CRYPTO_ARC4_H +#define _CRYPTO_ARC4_H + +#include + +#define ARC4_MIN_KEY_SIZE 1 +#define ARC4_MAX_KEY_SIZE 256 +#define ARC4_BLOCK_SIZE 1 + +struct arc4_ctx { + u32 S[256]; + u32 x, y; +}; + +int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len); +void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len); + +#endif /* _CRYPTO_ARC4_H */ diff --git a/include/crypto/asym_tpm_subtype.h b/include/crypto/asym_tpm_subtype.h new file mode 100644 index 0000000..48198c3 --- /dev/null +++ b/include/crypto/asym_tpm_subtype.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _LINUX_ASYM_TPM_SUBTYPE_H +#define _LINUX_ASYM_TPM_SUBTYPE_H + +#include + +struct tpm_key { + void *blob; + u32 blob_len; + uint16_t key_len; /* Size in bits of the key */ + const void *pub_key; /* pointer inside blob to the public key bytes */ + uint16_t pub_key_len; /* length of the public key */ +}; + +struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len); + +extern struct asymmetric_key_subtype asym_tpm_subtype; + +#endif /* _LINUX_ASYM_TPM_SUBTYPE_H */ diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h new file mode 100644 index 0000000..5f92a98 --- /dev/null +++ b/include/crypto/authenc.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Authenc: Simple AEAD wrapper for IPsec + * + * Copyright (c) 2007 Herbert Xu + */ +#ifndef _CRYPTO_AUTHENC_H +#define _CRYPTO_AUTHENC_H + +#include + +enum { + CRYPTO_AUTHENC_KEYA_UNSPEC, + CRYPTO_AUTHENC_KEYA_PARAM, +}; + +struct crypto_authenc_key_param { + __be32 enckeylen; +}; + +struct crypto_authenc_keys { + const u8 *authkey; + const u8 *enckey; + + unsigned int authkeylen; + unsigned int enckeylen; +}; + +int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen); + +#endif /* _CRYPTO_AUTHENC_H */ diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h new file mode 100644 index 0000000..0b8e6bc --- /dev/null +++ b/include/crypto/b128ops.h @@ -0,0 +1,80 @@ +/* b128ops.h - common 128-bit block operations + * + * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. + * Copyright (c) 2006, Rik Snel + * + * Based on Dr Brian Gladman's (GPL'd) work published at + * http://fp.gladman.plus.com/cryptography_technology/index.htm + * See the original copyright notice below. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ +/* + --------------------------------------------------------------------------- + Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved. + + LICENSE TERMS + + The free distribution and use of this software in both source and binary + form is allowed (with or without changes) provided that: + + 1. distributions of this source code include the above copyright + notice, this list of conditions and the following disclaimer; + + 2. distributions in binary form include the above copyright + notice, this list of conditions and the following disclaimer + in the documentation and/or other associated materials; + + 3. the copyright holder's name is not used to endorse products + built using this software without specific written permission. + + ALTERNATIVELY, provided that this notice is retained in full, this product + may be distributed under the terms of the GNU General Public License (GPL), + in which case the provisions of the GPL apply INSTEAD OF those given above. + + DISCLAIMER + + This software is provided 'as is' with no explicit or implied warranties + in respect of its properties, including, but not limited to, correctness + and/or fitness for purpose. + --------------------------------------------------------------------------- + Issue Date: 13/06/2006 +*/ + +#ifndef _CRYPTO_B128OPS_H +#define _CRYPTO_B128OPS_H + +#include + +typedef struct { + u64 a, b; +} u128; + +typedef struct { + __be64 a, b; +} be128; + +typedef struct { + __le64 b, a; +} le128; + +static inline void u128_xor(u128 *r, const u128 *p, const u128 *q) +{ + r->a = p->a ^ q->a; + r->b = p->b ^ q->b; +} + +static inline void be128_xor(be128 *r, const be128 *p, const be128 *q) +{ + u128_xor((u128 *)r, (u128 *)p, (u128 *)q); +} + +static inline void le128_xor(le128 *r, const le128 *p, const le128 *q) +{ + u128_xor((u128 *)r, (u128 *)p, (u128 *)q); +} + +#endif /* _CRYPTO_B128OPS_H */ diff --git a/include/crypto/blowfish.h b/include/crypto/blowfish.h new file mode 100644 index 0000000..9b38467 --- /dev/null +++ b/include/crypto/blowfish.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for blowfish algorithms + */ + +#ifndef _CRYPTO_BLOWFISH_H +#define _CRYPTO_BLOWFISH_H + +#include +#include + +#define BF_BLOCK_SIZE 8 +#define BF_MIN_KEY_SIZE 4 +#define BF_MAX_KEY_SIZE 56 + +struct bf_ctx { + u32 p[18]; + u32 s[1024]; +}; + +int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int key_len); + +#endif diff --git a/include/crypto/cast5.h b/include/crypto/cast5.h new file mode 100644 index 0000000..3d4ed4e --- /dev/null +++ b/include/crypto/cast5.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_CAST5_H +#define _CRYPTO_CAST5_H + +#include +#include +#include + +#define CAST5_BLOCK_SIZE 8 +#define CAST5_MIN_KEY_SIZE 5 +#define CAST5_MAX_KEY_SIZE 16 + +struct cast5_ctx { + u32 Km[16]; + u8 Kr[16]; + int rr; /* rr ? rounds = 12 : rounds = 16; (rfc 2144) */ +}; + +int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); + +void __cast5_encrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src); +void __cast5_decrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src); + +#endif diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h new file mode 100644 index 0000000..c71f6ef --- /dev/null +++ b/include/crypto/cast6.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_CAST6_H +#define _CRYPTO_CAST6_H + +#include +#include +#include + +#define CAST6_BLOCK_SIZE 16 +#define CAST6_MIN_KEY_SIZE 16 +#define CAST6_MAX_KEY_SIZE 32 + +struct cast6_ctx { + u32 Km[12][4]; + u8 Kr[12][4]; +}; + +int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, + unsigned int keylen, u32 *flags); +int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); + +void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src); +void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src); + +#endif diff --git a/include/crypto/cast_common.h b/include/crypto/cast_common.h new file mode 100644 index 0000000..b900902 --- /dev/null +++ b/include/crypto/cast_common.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_CAST_COMMON_H +#define _CRYPTO_CAST_COMMON_H + +extern const u32 cast_s1[256]; +extern const u32 cast_s2[256]; +extern const u32 cast_s3[256]; +extern const u32 cast_s4[256]; + +#endif diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h new file mode 100644 index 0000000..2b6422d --- /dev/null +++ b/include/crypto/cbc.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * CBC: Cipher Block Chaining mode + * + * Copyright (c) 2016 Herbert Xu + */ + +#ifndef _CRYPTO_CBC_H +#define _CRYPTO_CBC_H + +#include +#include +#include + +static inline int crypto_cbc_encrypt_segment( + struct skcipher_walk *walk, struct crypto_skcipher *tfm, + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ + unsigned int bsize = crypto_skcipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + u8 *iv = walk->iv; + + do { + crypto_xor(iv, src, bsize); + fn(tfm, iv, dst); + memcpy(iv, dst, bsize); + + src += bsize; + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + + return nbytes; +} + +static inline int crypto_cbc_encrypt_inplace( + struct skcipher_walk *walk, struct crypto_skcipher *tfm, + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ + unsigned int bsize = crypto_skcipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *iv = walk->iv; + + do { + crypto_xor(src, iv, bsize); + fn(tfm, src, src); + iv = src; + + src += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; +} + +static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req, + void (*fn)(struct crypto_skcipher *, + const u8 *, u8 *)) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_walk walk; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes) { + if (walk.src.virt.addr == walk.dst.virt.addr) + err = crypto_cbc_encrypt_inplace(&walk, tfm, fn); + else + err = crypto_cbc_encrypt_segment(&walk, tfm, fn); + err = skcipher_walk_done(&walk, err); + } + + return err; +} + +static inline int crypto_cbc_decrypt_segment( + struct skcipher_walk *walk, struct crypto_skcipher *tfm, + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ + unsigned int bsize = crypto_skcipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + u8 *iv = walk->iv; + + do { + fn(tfm, src, dst); + crypto_xor(dst, iv, bsize); + iv = src; + + src += bsize; + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; +} + +static inline int crypto_cbc_decrypt_inplace( + struct skcipher_walk *walk, struct crypto_skcipher *tfm, + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ + unsigned int bsize = crypto_skcipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 last_iv[MAX_CIPHER_BLOCKSIZE]; + + /* Start of the last block. */ + src += nbytes - (nbytes & (bsize - 1)) - bsize; + memcpy(last_iv, src, bsize); + + for (;;) { + fn(tfm, src, src); + if ((nbytes -= bsize) < bsize) + break; + crypto_xor(src, src - bsize, bsize); + src -= bsize; + } + + crypto_xor(src, walk->iv, bsize); + memcpy(walk->iv, last_iv, bsize); + + return nbytes; +} + +static inline int crypto_cbc_decrypt_blocks( + struct skcipher_walk *walk, struct crypto_skcipher *tfm, + void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ + if (walk->src.virt.addr == walk->dst.virt.addr) + return crypto_cbc_decrypt_inplace(walk, tfm, fn); + else + return crypto_cbc_decrypt_segment(walk, tfm, fn); +} + +#endif /* _CRYPTO_CBC_H */ diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h new file mode 100644 index 0000000..d1e723c --- /dev/null +++ b/include/crypto/chacha.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values and helper functions for the ChaCha and XChaCha stream ciphers. + * + * XChaCha extends ChaCha's nonce to 192 bits, while provably retaining ChaCha's + * security. Here they share the same key size, tfm context, and setkey + * function; only their IV size and encrypt/decrypt function differ. + * + * The ChaCha paper specifies 20, 12, and 8-round variants. In general, it is + * recommended to use the 20-round variant ChaCha20. However, the other + * variants can be needed in some performance-sensitive scenarios. The generic + * ChaCha code currently allows only the 20 and 12-round variants. + */ + +#ifndef _CRYPTO_CHACHA_H +#define _CRYPTO_CHACHA_H + +#include +#include +#include + +/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */ +#define CHACHA_IV_SIZE 16 + +#define CHACHA_KEY_SIZE 32 +#define CHACHA_BLOCK_SIZE 64 +#define CHACHAPOLY_IV_SIZE 12 + +/* 192-bit nonce, then 64-bit stream position */ +#define XCHACHA_IV_SIZE 32 + +struct chacha_ctx { + u32 key[8]; + int nrounds; +}; + +void chacha_block(u32 *state, u8 *stream, int nrounds); +static inline void chacha20_block(u32 *state, u8 *stream) +{ + chacha_block(state, stream, 20); +} +void hchacha_block(const u32 *in, u32 *out, int nrounds); + +void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv); + +int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize); +int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize); + +int crypto_chacha_crypt(struct skcipher_request *req); +int crypto_xchacha_crypt(struct skcipher_request *req); + +#endif /* _CRYPTO_CHACHA_H */ diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h new file mode 100644 index 0000000..23169f4 --- /dev/null +++ b/include/crypto/cryptd.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Software async crypto daemon + * + * Added AEAD support to cryptd. + * Authors: Tadeusz Struk (tadeusz.struk@intel.com) + * Adrian Hoban + * Gabriele Paoloni + * Aidan O'Mahony (aidan.o.mahony@intel.com) + * Copyright (c) 2010, Intel Corporation. + */ + +#ifndef _CRYPTO_CRYPT_H +#define _CRYPTO_CRYPT_H + +#include +#include +#include +#include + +struct cryptd_skcipher { + struct crypto_skcipher base; +}; + +/* alg_name should be algorithm to be cryptd-ed */ +struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, + u32 type, u32 mask); +struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); +/* Must be called without moving CPUs. */ +bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm); +void cryptd_free_skcipher(struct cryptd_skcipher *tfm); + +struct cryptd_ahash { + struct crypto_ahash base; +}; + +static inline struct cryptd_ahash *__cryptd_ahash_cast( + struct crypto_ahash *tfm) +{ + return (struct cryptd_ahash *)tfm; +} + +/* alg_name should be algorithm to be cryptd-ed */ +struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, + u32 type, u32 mask); +struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); +struct shash_desc *cryptd_shash_desc(struct ahash_request *req); +/* Must be called without moving CPUs. */ +bool cryptd_ahash_queued(struct cryptd_ahash *tfm); +void cryptd_free_ahash(struct cryptd_ahash *tfm); + +struct cryptd_aead { + struct crypto_aead base; +}; + +static inline struct cryptd_aead *__cryptd_aead_cast( + struct crypto_aead *tfm) +{ + return (struct cryptd_aead *)tfm; +} + +struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, + u32 type, u32 mask); + +struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); +/* Must be called without moving CPUs. */ +bool cryptd_aead_queued(struct cryptd_aead *tfm); + +void cryptd_free_aead(struct cryptd_aead *tfm); + +#endif diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h new file mode 100644 index 0000000..a1c66d1 --- /dev/null +++ b/include/crypto/ctr.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * CTR: Counter mode + * + * Copyright (c) 2007 Herbert Xu + */ + +#ifndef _CRYPTO_CTR_H +#define _CRYPTO_CTR_H + +#include +#include +#include +#include + +#define CTR_RFC3686_NONCE_SIZE 4 +#define CTR_RFC3686_IV_SIZE 8 +#define CTR_RFC3686_BLOCK_SIZE 16 + +static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req, + void (*fn)(struct crypto_skcipher *, + const u8 *, u8 *)) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + int blocksize = crypto_skcipher_chunksize(tfm); + u8 buf[MAX_CIPHER_BLOCKSIZE]; + struct skcipher_walk walk; + int err; + + /* avoid integer division due to variable blocksize parameter */ + if (WARN_ON_ONCE(!is_power_of_2(blocksize))) + return -EINVAL; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes > 0) { + u8 *dst = walk.dst.virt.addr; + u8 *src = walk.src.virt.addr; + int nbytes = walk.nbytes; + int tail = 0; + + if (nbytes < walk.total) { + tail = walk.nbytes & (blocksize - 1); + nbytes -= tail; + } + + do { + int bsize = min(nbytes, blocksize); + + fn(tfm, walk.iv, buf); + + crypto_xor_cpy(dst, src, buf, bsize); + crypto_inc(walk.iv, blocksize); + + dst += bsize; + src += bsize; + nbytes -= bsize; + } while (nbytes > 0); + + err = skcipher_walk_done(&walk, tail); + } + return err; +} + +#endif /* _CRYPTO_CTR_H */ diff --git a/include/crypto/des.h b/include/crypto/des.h new file mode 100644 index 0000000..7812b43 --- /dev/null +++ b/include/crypto/des.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DES & Triple DES EDE Cipher Algorithms. + */ + +#ifndef __CRYPTO_DES_H +#define __CRYPTO_DES_H + +#include + +#define DES_KEY_SIZE 8 +#define DES_EXPKEY_WORDS 32 +#define DES_BLOCK_SIZE 8 + +#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE) +#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) +#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE + +struct des_ctx { + u32 expkey[DES_EXPKEY_WORDS]; +}; + +struct des3_ede_ctx { + u32 expkey[DES3_EDE_EXPKEY_WORDS]; +}; + +void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src); +void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src); + +void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src); +void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src); + +/** + * des_expand_key - Expand a DES input key into a key schedule + * @ctx: the key schedule + * @key: buffer containing the input key + * @len: size of the buffer contents + * + * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if + * the key is accepted but has been found to be weak. + */ +int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen); + +/** + * des3_ede_expand_key - Expand a triple DES input key into a key schedule + * @ctx: the key schedule + * @key: buffer containing the input key + * @len: size of the buffer contents + * + * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if + * the key is accepted but has been found to be weak. Note that weak keys will + * be rejected (and -EINVAL will be returned) when running in FIPS mode. + */ +int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key, + unsigned int keylen); + +#endif /* __CRYPTO_DES_H */ diff --git a/include/crypto/dh.h b/include/crypto/dh.h new file mode 100644 index 0000000..d71e985 --- /dev/null +++ b/include/crypto/dh.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Diffie-Hellman secret to be used with kpp API along with helper functions + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + */ +#ifndef _CRYPTO_DH_ +#define _CRYPTO_DH_ + +/** + * DOC: DH Helper Functions + * + * To use DH with the KPP cipher API, the following data structure and + * functions should be used. + * + * To use DH with KPP, the following functions should be used to operate on + * a DH private key. The packet private key that can be set with + * the KPP API function call of crypto_kpp_set_secret. + */ + +/** + * struct dh - define a DH private key + * + * @key: Private DH key + * @p: Diffie-Hellman parameter P + * @q: Diffie-Hellman parameter Q + * @g: Diffie-Hellman generator G + * @key_size: Size of the private DH key + * @p_size: Size of DH parameter P + * @q_size: Size of DH parameter Q + * @g_size: Size of DH generator G + */ +struct dh { + void *key; + void *p; + void *q; + void *g; + unsigned int key_size; + unsigned int p_size; + unsigned int q_size; + unsigned int g_size; +}; + +/** + * crypto_dh_key_len() - Obtain the size of the private DH key + * @params: private DH key + * + * This function returns the packet DH key size. A caller can use that + * with the provided DH private key reference to obtain the required + * memory size to hold a packet key. + * + * Return: size of the key in bytes + */ +unsigned int crypto_dh_key_len(const struct dh *params); + +/** + * crypto_dh_encode_key() - encode the private key + * @buf: Buffer allocated by the caller to hold the packet DH + * private key. The buffer should be at least crypto_dh_key_len + * bytes in size. + * @len: Length of the packet private key buffer + * @params: Buffer with the caller-specified private key + * + * The DH implementations operate on a packet representation of the private + * key. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params); + +/** + * crypto_dh_decode_key() - decode a private key + * @buf: Buffer holding a packet key that should be decoded + * @len: Length of the packet private key buffer + * @params: Buffer allocated by the caller that is filled with the + * unpacked DH private key. + * + * The unpacking obtains the private key by pointing @p to the correct location + * in @buf. Thus, both pointers refer to the same memory. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params); + +#endif diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h new file mode 100644 index 0000000..8c9af21 --- /dev/null +++ b/include/crypto/drbg.h @@ -0,0 +1,285 @@ +/* + * DRBG based on NIST SP800-90A + * + * Copyright Stephan Mueller , 2014 + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, and the entire permission notice in its entirety, + * including the disclaimer of warranties. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * ALTERNATIVELY, this product may be distributed under the terms of + * the GNU General Public License, in which case the provisions of the GPL are + * required INSTEAD OF the above restrictions. (This clause is + * necessary due to a potential bad interaction between the GPL and + * the restrictions contained in a BSD-style copyright.) + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF + * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + */ + +#ifndef _DRBG_H +#define _DRBG_H + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Concatenation Helper and string operation helper + * + * SP800-90A requires the concatenation of different data. To avoid copying + * buffers around or allocate additional memory, the following data structure + * is used to point to the original memory with its size. In addition, it + * is used to build a linked list. The linked list defines the concatenation + * of individual buffers. The order of memory block referenced in that + * linked list determines the order of concatenation. + */ +struct drbg_string { + const unsigned char *buf; + size_t len; + struct list_head list; +}; + +static inline void drbg_string_fill(struct drbg_string *string, + const unsigned char *buf, size_t len) +{ + string->buf = buf; + string->len = len; + INIT_LIST_HEAD(&string->list); +} + +struct drbg_state; +typedef uint32_t drbg_flag_t; + +struct drbg_core { + drbg_flag_t flags; /* flags for the cipher */ + __u8 statelen; /* maximum state length */ + __u8 blocklen_bytes; /* block size of output in bytes */ + char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */ + /* kernel crypto API backend cipher name */ + char backend_cra_name[CRYPTO_MAX_ALG_NAME]; +}; + +struct drbg_state_ops { + int (*update)(struct drbg_state *drbg, struct list_head *seed, + int reseed); + int (*generate)(struct drbg_state *drbg, + unsigned char *buf, unsigned int buflen, + struct list_head *addtl); + int (*crypto_init)(struct drbg_state *drbg); + int (*crypto_fini)(struct drbg_state *drbg); + +}; + +struct drbg_test_data { + struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */ +}; + +struct drbg_state { + struct mutex drbg_mutex; /* lock around DRBG */ + unsigned char *V; /* internal state 10.1.1.1 1a) */ + unsigned char *Vbuf; + /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ + unsigned char *C; + unsigned char *Cbuf; + /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ + size_t reseed_ctr; + size_t reseed_threshold; + /* some memory the DRBG can use for its operation */ + unsigned char *scratchpad; + unsigned char *scratchpadbuf; + void *priv_data; /* Cipher handle */ + + struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ + struct skcipher_request *ctr_req; /* CTR mode request handle */ + __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ + __u8 *outscratchpad; /* CTR mode aligned outbuf */ + struct crypto_wait ctr_wait; /* CTR mode async wait obj */ + struct scatterlist sg_in, sg_out; /* CTR mode SGLs */ + + bool seeded; /* DRBG fully seeded? */ + bool pr; /* Prediction resistance enabled? */ + bool fips_primed; /* Continuous test primed? */ + unsigned char *prev; /* FIPS 140-2 continuous test value */ + struct work_struct seed_work; /* asynchronous seeding support */ + struct crypto_rng *jent; + const struct drbg_state_ops *d_ops; + const struct drbg_core *core; + struct drbg_string test_data; + struct random_ready_callback random_ready; +}; + +static inline __u8 drbg_statelen(struct drbg_state *drbg) +{ + if (drbg && drbg->core) + return drbg->core->statelen; + return 0; +} + +static inline __u8 drbg_blocklen(struct drbg_state *drbg) +{ + if (drbg && drbg->core) + return drbg->core->blocklen_bytes; + return 0; +} + +static inline __u8 drbg_keylen(struct drbg_state *drbg) +{ + if (drbg && drbg->core) + return (drbg->core->statelen - drbg->core->blocklen_bytes); + return 0; +} + +static inline size_t drbg_max_request_bytes(struct drbg_state *drbg) +{ + /* SP800-90A requires the limit 2**19 bits, but we return bytes */ + return (1 << 16); +} + +static inline size_t drbg_max_addtl(struct drbg_state *drbg) +{ + /* SP800-90A requires 2**35 bytes additional info str / pers str */ +#if (__BITS_PER_LONG == 32) + /* + * SP800-90A allows smaller maximum numbers to be returned -- we + * return SIZE_MAX - 1 to allow the verification of the enforcement + * of this value in drbg_healthcheck_sanity. + */ + return (SIZE_MAX - 1); +#else + return (1UL<<35); +#endif +} + +static inline size_t drbg_max_requests(struct drbg_state *drbg) +{ + /* SP800-90A requires 2**48 maximum requests before reseeding */ +#if (__BITS_PER_LONG == 32) + return SIZE_MAX; +#else + return (1UL<<48); +#endif +} + +/* + * This is a wrapper to the kernel crypto API function of + * crypto_rng_generate() to allow the caller to provide additional data. + * + * @drng DRBG handle -- see crypto_rng_get_bytes + * @outbuf output buffer -- see crypto_rng_get_bytes + * @outlen length of output buffer -- see crypto_rng_get_bytes + * @addtl_input additional information string input buffer + * @addtllen length of additional information string buffer + * + * return + * see crypto_rng_get_bytes + */ +static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng, + unsigned char *outbuf, unsigned int outlen, + struct drbg_string *addtl) +{ + return crypto_rng_generate(drng, addtl->buf, addtl->len, + outbuf, outlen); +} + +/* + * TEST code + * + * This is a wrapper to the kernel crypto API function of + * crypto_rng_generate() to allow the caller to provide additional data and + * allow furnishing of test_data + * + * @drng DRBG handle -- see crypto_rng_get_bytes + * @outbuf output buffer -- see crypto_rng_get_bytes + * @outlen length of output buffer -- see crypto_rng_get_bytes + * @addtl_input additional information string input buffer + * @addtllen length of additional information string buffer + * @test_data filled test data + * + * return + * see crypto_rng_get_bytes + */ +static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng, + unsigned char *outbuf, unsigned int outlen, + struct drbg_string *addtl, + struct drbg_test_data *test_data) +{ + crypto_rng_set_entropy(drng, test_data->testentropy->buf, + test_data->testentropy->len); + return crypto_rng_generate(drng, addtl->buf, addtl->len, + outbuf, outlen); +} + +/* + * TEST code + * + * This is a wrapper to the kernel crypto API function of + * crypto_rng_reset() to allow the caller to provide test_data + * + * @drng DRBG handle -- see crypto_rng_reset + * @pers personalization string input buffer + * @perslen length of additional information string buffer + * @test_data filled test data + * + * return + * see crypto_rng_reset + */ +static inline int crypto_drbg_reset_test(struct crypto_rng *drng, + struct drbg_string *pers, + struct drbg_test_data *test_data) +{ + crypto_rng_set_entropy(drng, test_data->testentropy->buf, + test_data->testentropy->len); + return crypto_rng_reset(drng, pers->buf, pers->len); +} + +/* DRBG type flags */ +#define DRBG_CTR ((drbg_flag_t)1<<0) +#define DRBG_HMAC ((drbg_flag_t)1<<1) +#define DRBG_HASH ((drbg_flag_t)1<<2) +#define DRBG_TYPE_MASK (DRBG_CTR | DRBG_HMAC | DRBG_HASH) +/* DRBG strength flags */ +#define DRBG_STRENGTH128 ((drbg_flag_t)1<<3) +#define DRBG_STRENGTH192 ((drbg_flag_t)1<<4) +#define DRBG_STRENGTH256 ((drbg_flag_t)1<<5) +#define DRBG_STRENGTH_MASK (DRBG_STRENGTH128 | DRBG_STRENGTH192 | \ + DRBG_STRENGTH256) + +enum drbg_prefixes { + DRBG_PREFIX0 = 0x00, + DRBG_PREFIX1, + DRBG_PREFIX2, + DRBG_PREFIX3 +}; + +#endif /* _DRBG_H */ diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h new file mode 100644 index 0000000..a5b805b --- /dev/null +++ b/include/crypto/ecdh.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * ECDH params to be used with kpp API + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + */ +#ifndef _CRYPTO_ECDH_ +#define _CRYPTO_ECDH_ + +/** + * DOC: ECDH Helper Functions + * + * To use ECDH with the KPP cipher API, the following data structure and + * functions should be used. + * + * The ECC curves known to the ECDH implementation are specified in this + * header file. + * + * To use ECDH with KPP, the following functions should be used to operate on + * an ECDH private key. The packet private key that can be set with + * the KPP API function call of crypto_kpp_set_secret. + */ + +/* Curves IDs */ +#define ECC_CURVE_NIST_P192 0x0001 +#define ECC_CURVE_NIST_P256 0x0002 + +/** + * struct ecdh - define an ECDH private key + * + * @curve_id: ECC curve the key is based on. + * @key: Private ECDH key + * @key_size: Size of the private ECDH key + */ +struct ecdh { + unsigned short curve_id; + char *key; + unsigned short key_size; +}; + +/** + * crypto_ecdh_key_len() - Obtain the size of the private ECDH key + * @params: private ECDH key + * + * This function returns the packet ECDH key size. A caller can use that + * with the provided ECDH private key reference to obtain the required + * memory size to hold a packet key. + * + * Return: size of the key in bytes + */ +unsigned int crypto_ecdh_key_len(const struct ecdh *params); + +/** + * crypto_ecdh_encode_key() - encode the private key + * @buf: Buffer allocated by the caller to hold the packet ECDH + * private key. The buffer should be at least crypto_ecdh_key_len + * bytes in size. + * @len: Length of the packet private key buffer + * @p: Buffer with the caller-specified private key + * + * The ECDH implementations operate on a packet representation of the private + * key. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p); + +/** + * crypto_ecdh_decode_key() - decode a private key + * @buf: Buffer holding a packet key that should be decoded + * @len: Length of the packet private key buffer + * @p: Buffer allocated by the caller that is filled with the + * unpacked ECDH private key. + * + * The unpacking obtains the private key by pointing @p to the correct location + * in @buf. Thus, both pointers refer to the same memory. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p); + +#endif diff --git a/include/crypto/engine.h b/include/crypto/engine.h new file mode 100644 index 0000000..84c708b --- /dev/null +++ b/include/crypto/engine.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Crypto engine API + * + * Copyright (c) 2016 Baolin Wang + */ +#ifndef _CRYPTO_ENGINE_H +#define _CRYPTO_ENGINE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ENGINE_NAME_LEN 30 +/* + * struct crypto_engine - crypto hardware engine + * @name: the engine name + * @idling: the engine is entering idle state + * @busy: request pump is busy + * @running: the engine is on working + * @cur_req_prepared: current request is prepared + * @list: link with the global crypto engine list + * @queue_lock: spinlock to syncronise access to request queue + * @queue: the crypto queue of the engine + * @rt: whether this queue is set to run as a realtime task + * @prepare_crypt_hardware: a request will soon arrive from the queue + * so the subsystem requests the driver to prepare the hardware + * by issuing this call + * @unprepare_crypt_hardware: there are currently no more requests on the + * queue so the subsystem notifies the driver that it may relax the + * hardware by issuing this call + * @kworker: kthread worker struct for request pump + * @pump_requests: work struct for scheduling work to the request pump + * @priv_data: the engine private data + * @cur_req: the current request which is on processing + */ +struct crypto_engine { + char name[ENGINE_NAME_LEN]; + bool idling; + bool busy; + bool running; + bool cur_req_prepared; + + struct list_head list; + spinlock_t queue_lock; + struct crypto_queue queue; + struct device *dev; + + bool rt; + + int (*prepare_crypt_hardware)(struct crypto_engine *engine); + int (*unprepare_crypt_hardware)(struct crypto_engine *engine); + + struct kthread_worker *kworker; + struct kthread_work pump_requests; + + void *priv_data; + struct crypto_async_request *cur_req; +}; + +/* + * struct crypto_engine_op - crypto hardware engine operations + * @prepare__request: do some prepare if need before handle the current request + * @unprepare_request: undo any work done by prepare_request() + * @do_one_request: do encryption for current request + */ +struct crypto_engine_op { + int (*prepare_request)(struct crypto_engine *engine, + void *areq); + int (*unprepare_request)(struct crypto_engine *engine, + void *areq); + int (*do_one_request)(struct crypto_engine *engine, + void *areq); +}; + +struct crypto_engine_ctx { + struct crypto_engine_op op; +}; + +int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine, + struct ablkcipher_request *req); +int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, + struct aead_request *req); +int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, + struct akcipher_request *req); +int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, + struct ahash_request *req); +int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, + struct skcipher_request *req); +void crypto_finalize_ablkcipher_request(struct crypto_engine *engine, + struct ablkcipher_request *req, int err); +void crypto_finalize_aead_request(struct crypto_engine *engine, + struct aead_request *req, int err); +void crypto_finalize_akcipher_request(struct crypto_engine *engine, + struct akcipher_request *req, int err); +void crypto_finalize_hash_request(struct crypto_engine *engine, + struct ahash_request *req, int err); +void crypto_finalize_skcipher_request(struct crypto_engine *engine, + struct skcipher_request *req, int err); +int crypto_engine_start(struct crypto_engine *engine); +int crypto_engine_stop(struct crypto_engine *engine); +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); +int crypto_engine_exit(struct crypto_engine *engine); + +#endif /* _CRYPTO_ENGINE_H */ diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h new file mode 100644 index 0000000..9d7eff0 --- /dev/null +++ b/include/crypto/gcm.h @@ -0,0 +1,63 @@ +#ifndef _CRYPTO_GCM_H +#define _CRYPTO_GCM_H + +#include + +#define GCM_AES_IV_SIZE 12 +#define GCM_RFC4106_IV_SIZE 8 +#define GCM_RFC4543_IV_SIZE 8 + +/* + * validate authentication tag for GCM + */ +static inline int crypto_gcm_check_authsize(unsigned int authsize) +{ + switch (authsize) { + case 4: + case 8: + case 12: + case 13: + case 14: + case 15: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * validate authentication tag for RFC4106 + */ +static inline int crypto_rfc4106_check_authsize(unsigned int authsize) +{ + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * validate assoclen for RFC4106/RFC4543 + */ +static inline int crypto_ipsec_check_assoclen(unsigned int assoclen) +{ + switch (assoclen) { + case 16: + case 20: + break; + default: + return -EINVAL; + } + + return 0; +} +#endif diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h new file mode 100644 index 0000000..fa0a63d --- /dev/null +++ b/include/crypto/gf128mul.h @@ -0,0 +1,252 @@ +/* gf128mul.h - GF(2^128) multiplication functions + * + * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. + * Copyright (c) 2006 Rik Snel + * + * Based on Dr Brian Gladman's (GPL'd) work published at + * http://fp.gladman.plus.com/cryptography_technology/index.htm + * See the original copyright notice below. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ +/* + --------------------------------------------------------------------------- + Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved. + + LICENSE TERMS + + The free distribution and use of this software in both source and binary + form is allowed (with or without changes) provided that: + + 1. distributions of this source code include the above copyright + notice, this list of conditions and the following disclaimer; + + 2. distributions in binary form include the above copyright + notice, this list of conditions and the following disclaimer + in the documentation and/or other associated materials; + + 3. the copyright holder's name is not used to endorse products + built using this software without specific written permission. + + ALTERNATIVELY, provided that this notice is retained in full, this product + may be distributed under the terms of the GNU General Public License (GPL), + in which case the provisions of the GPL apply INSTEAD OF those given above. + + DISCLAIMER + + This software is provided 'as is' with no explicit or implied warranties + in respect of its properties, including, but not limited to, correctness + and/or fitness for purpose. + --------------------------------------------------------------------------- + Issue Date: 31/01/2006 + + An implementation of field multiplication in Galois Field GF(2^128) +*/ + +#ifndef _CRYPTO_GF128MUL_H +#define _CRYPTO_GF128MUL_H + +#include +#include +#include + +/* Comment by Rik: + * + * For some background on GF(2^128) see for example: + * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf + * + * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can + * be mapped to computer memory in a variety of ways. Let's examine + * three common cases. + * + * Take a look at the 16 binary octets below in memory order. The msb's + * are left and the lsb's are right. char b[16] is an array and b[0] is + * the first octet. + * + * 10000000 00000000 00000000 00000000 .... 00000000 00000000 00000000 + * b[0] b[1] b[2] b[3] b[13] b[14] b[15] + * + * Every bit is a coefficient of some power of X. We can store the bits + * in every byte in little-endian order and the bytes themselves also in + * little endian order. I will call this lle (little-little-endian). + * The above buffer represents the polynomial 1, and X^7+X^2+X^1+1 looks + * like 11100001 00000000 .... 00000000 = { 0xE1, 0x00, }. + * This format was originally implemented in gf128mul and is used + * in GCM (Galois/Counter mode) and in ABL (Arbitrary Block Length). + * + * Another convention says: store the bits in bigendian order and the + * bytes also. This is bbe (big-big-endian). Now the buffer above + * represents X^127. X^7+X^2+X^1+1 looks like 00000000 .... 10000111, + * b[15] = 0x87 and the rest is 0. LRW uses this convention and bbe + * is partly implemented. + * + * Both of the above formats are easy to implement on big-endian + * machines. + * + * XTS and EME (the latter of which is patent encumbered) use the ble + * format (bits are stored in big endian order and the bytes in little + * endian). The above buffer represents X^7 in this case and the + * primitive polynomial is b[0] = 0x87. + * + * The common machine word-size is smaller than 128 bits, so to make + * an efficient implementation we must split into machine word sizes. + * This implementation uses 64-bit words for the moment. Machine + * endianness comes into play. The lle format in relation to machine + * endianness is discussed below by the original author of gf128mul Dr + * Brian Gladman. + * + * Let's look at the bbe and ble format on a little endian machine. + * + * bbe on a little endian machine u32 x[4]: + * + * MS x[0] LS MS x[1] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 103..96 111.104 119.112 127.120 71...64 79...72 87...80 95...88 + * + * MS x[2] LS MS x[3] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 39...32 47...40 55...48 63...56 07...00 15...08 23...16 31...24 + * + * ble on a little endian machine + * + * MS x[0] LS MS x[1] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 31...24 23...16 15...08 07...00 63...56 55...48 47...40 39...32 + * + * MS x[2] LS MS x[3] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 95...88 87...80 79...72 71...64 127.120 199.112 111.104 103..96 + * + * Multiplications in GF(2^128) are mostly bit-shifts, so you see why + * ble (and lbe also) are easier to implement on a little-endian + * machine than on a big-endian machine. The converse holds for bbe + * and lle. + * + * Note: to have good alignment, it seems to me that it is sufficient + * to keep elements of GF(2^128) in type u64[2]. On 32-bit wordsize + * machines this will automatically aligned to wordsize and on a 64-bit + * machine also. + */ +/* Multiply a GF(2^128) field element by x. Field elements are + held in arrays of bytes in which field bits 8n..8n + 7 are held in + byte[n], with lower indexed bits placed in the more numerically + significant bit positions within bytes. + + On little endian machines the bit indexes translate into the bit + positions within four 32-bit words in the following way + + MS x[0] LS MS x[1] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 24...31 16...23 08...15 00...07 56...63 48...55 40...47 32...39 + + MS x[2] LS MS x[3] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 88...95 80...87 72...79 64...71 120.127 112.119 104.111 96..103 + + On big endian machines the bit indexes translate into the bit + positions within four 32-bit words in the following way + + MS x[0] LS MS x[1] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 00...07 08...15 16...23 24...31 32...39 40...47 48...55 56...63 + + MS x[2] LS MS x[3] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127 +*/ + +/* A slow generic version of gf_mul, implemented for lle and bbe + * It multiplies a and b and puts the result in a */ +void gf128mul_lle(be128 *a, const be128 *b); + +void gf128mul_bbe(be128 *a, const be128 *b); + +/* + * The following functions multiply a field element by x in + * the polynomial field representation. They use 64-bit word operations + * to gain speed but compensate for machine endianness and hence work + * correctly on both styles of machine. + * + * They are defined here for performance. + */ + +static inline u64 gf128mul_mask_from_bit(u64 x, int which) +{ + /* a constant-time version of 'x & ((u64)1 << which) ? (u64)-1 : 0' */ + return ((s64)(x << (63 - which)) >> 63); +} + +static inline void gf128mul_x_lle(be128 *r, const be128 *x) +{ + u64 a = be64_to_cpu(x->a); + u64 b = be64_to_cpu(x->b); + + /* equivalent to gf128mul_table_le[(b << 7) & 0xff] << 48 + * (see crypto/gf128mul.c): */ + u64 _tt = gf128mul_mask_from_bit(b, 0) & ((u64)0xe1 << 56); + + r->b = cpu_to_be64((b >> 1) | (a << 63)); + r->a = cpu_to_be64((a >> 1) ^ _tt); +} + +static inline void gf128mul_x_bbe(be128 *r, const be128 *x) +{ + u64 a = be64_to_cpu(x->a); + u64 b = be64_to_cpu(x->b); + + /* equivalent to gf128mul_table_be[a >> 63] (see crypto/gf128mul.c): */ + u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; + + r->a = cpu_to_be64((a << 1) | (b >> 63)); + r->b = cpu_to_be64((b << 1) ^ _tt); +} + +/* needed by XTS */ +static inline void gf128mul_x_ble(le128 *r, const le128 *x) +{ + u64 a = le64_to_cpu(x->a); + u64 b = le64_to_cpu(x->b); + + /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */ + u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; + + r->a = cpu_to_le64((a << 1) | (b >> 63)); + r->b = cpu_to_le64((b << 1) ^ _tt); +} + +/* 4k table optimization */ + +struct gf128mul_4k { + be128 t[256]; +}; + +struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g); +struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g); +void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t); +void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t); +void gf128mul_x8_ble(le128 *r, const le128 *x); +static inline void gf128mul_free_4k(struct gf128mul_4k *t) +{ + kzfree(t); +} + + +/* 64k table optimization, implemented for bbe */ + +struct gf128mul_64k { + struct gf128mul_4k *t[16]; +}; + +/* First initialize with the constant factor with which you + * want to multiply and then call gf128mul_64k_bbe with the other + * factor in the first argument, and the table in the second. + * Afterwards, the result is stored in *a. + */ +struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g); +void gf128mul_free_64k(struct gf128mul_64k *t); +void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t); + +#endif /* _CRYPTO_GF128MUL_H */ diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h new file mode 100644 index 0000000..f832c9f --- /dev/null +++ b/include/crypto/ghash.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the GHASH hash function + */ + +#ifndef __CRYPTO_GHASH_H__ +#define __CRYPTO_GHASH_H__ + +#include +#include + +#define GHASH_BLOCK_SIZE 16 +#define GHASH_DIGEST_SIZE 16 + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[GHASH_BLOCK_SIZE]; + u32 bytes; +}; + +#endif diff --git a/include/crypto/hash.h b/include/crypto/hash.h new file mode 100644 index 0000000..d52b95b --- /dev/null +++ b/include/crypto/hash.h @@ -0,0 +1,961 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Hash: Hash algorithms under the crypto API + * + * Copyright (c) 2008 Herbert Xu + */ + +#ifndef _CRYPTO_HASH_H +#define _CRYPTO_HASH_H + +#include +#include + +struct crypto_ahash; + +/** + * DOC: Message Digest Algorithm Definitions + * + * These data structures define modular message digest algorithm + * implementations, managed via crypto_register_ahash(), + * crypto_register_shash(), crypto_unregister_ahash() and + * crypto_unregister_shash(). + */ + +/** + * struct hash_alg_common - define properties of message digest + * @digestsize: Size of the result of the transformation. A buffer of this size + * must be available to the @final and @finup calls, so they can + * store the resulting hash into it. For various predefined sizes, + * search include/crypto/ using + * git grep _DIGEST_SIZE include/crypto. + * @statesize: Size of the block for partial state of the transformation. A + * buffer of this size must be passed to the @export function as it + * will save the partial state of the transformation into it. On the + * other side, the @import function will load the state from a + * buffer of this size as well. + * @base: Start of data structure of cipher algorithm. The common data + * structure of crypto_alg contains information common to all ciphers. + * The hash_alg_common data structure now adds the hash-specific + * information. + */ +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + + struct crypto_alg base; +}; + +struct ahash_request { + struct crypto_async_request base; + + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + + /* This field may only be used by the ahash API code. */ + void *priv; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +#define AHASH_REQUEST_ON_STACK(name, ahash) \ + char __##name##_desc[sizeof(struct ahash_request) + \ + crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \ + struct ahash_request *name = (void *)__##name##_desc + +/** + * struct ahash_alg - asynchronous message digest definition + * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the + * state of the HASH transformation at the beginning. This shall fill in + * the internal structures used during the entire duration of the whole + * transformation. No data processing happens at this point. Driver code + * implementation must not use req->result. + * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This + * function actually pushes blocks of data from upper layers into the + * driver, which then passes those to the hardware as seen fit. This + * function must not finalize the HASH transformation by calculating the + * final message digest as this only adds more data into the + * transformation. This function shall not modify the transformation + * context, as this function may be called in parallel with the same + * transformation object. Data processing can happen synchronously + * [SHASH] or asynchronously [AHASH] at this point. Driver must not use + * req->result. + * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the + * transformation and retrieves the resulting hash from the driver and + * pushes it back to upper layers. No data processing happens at this + * point unless hardware requires it to finish the transformation + * (then the data buffered by the device driver is processed). + * @finup: **[optional]** Combination of @update and @final. This function is effectively a + * combination of @update and @final calls issued in sequence. As some + * hardware cannot do @update and @final separately, this callback was + * added to allow such hardware to be used at least by IPsec. Data + * processing can happen synchronously [SHASH] or asynchronously [AHASH] + * at this point. + * @digest: Combination of @init and @update and @final. This function + * effectively behaves as the entire chain of operations, @init, + * @update and @final issued in sequence. Just like @finup, this was + * added for hardware which cannot do even the @finup, but can only do + * the whole transformation in one run. Data processing can happen + * synchronously [SHASH] or asynchronously [AHASH] at this point. + * @setkey: Set optional key used by the hashing algorithm. Intended to push + * optional key used by the hashing algorithm from upper layers into + * the driver. This function can store the key in the transformation + * context or can outright program it into the hardware. In the former + * case, one must be careful to program the key into the hardware at + * appropriate time and one must be careful that .setkey() can be + * called multiple times during the existence of the transformation + * object. Not all hashing algorithms do implement this function as it + * is only needed for keyed message digests. SHAx/MDx/CRCx do NOT + * implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement + * this function. This function must be called before any other of the + * @init, @update, @final, @finup, @digest is called. No data + * processing happens at this point. + * @export: Export partial state of the transformation. This function dumps the + * entire state of the ongoing transformation into a provided block of + * data so it can be @import 'ed back later on. This is useful in case + * you want to save partial result of the transformation after + * processing certain amount of data and reload this partial result + * multiple times later on for multiple re-use. No data processing + * happens at this point. Driver must not use req->result. + * @import: Import partial state of the transformation. This function loads the + * entire state of the ongoing transformation from a provided block of + * data so the transformation can continue from this point onward. No + * data processing happens at this point. Driver must not use + * req->result. + * @halg: see struct hash_alg_common + */ +struct ahash_alg { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*export)(struct ahash_request *req, void *out); + int (*import)(struct ahash_request *req, const void *in); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + + struct hash_alg_common halg; +}; + +struct shash_desc { + struct crypto_shash *tfm; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +#define HASH_MAX_DIGESTSIZE 64 + +/* + * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc' + * containing a 'struct sha3_state'. + */ +#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) + +#define HASH_MAX_STATESIZE 512 + +#define SHASH_DESC_ON_STACK(shash, ctx) \ + char __##shash##_desc[sizeof(struct shash_desc) + \ + HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \ + struct shash_desc *shash = (struct shash_desc *)__##shash##_desc + +/** + * struct shash_alg - synchronous message digest definition + * @init: see struct ahash_alg + * @update: see struct ahash_alg + * @final: see struct ahash_alg + * @finup: see struct ahash_alg + * @digest: see struct ahash_alg + * @export: see struct ahash_alg + * @import: see struct ahash_alg + * @setkey: see struct ahash_alg + * @digestsize: see struct ahash_alg + * @statesize: see struct ahash_alg + * @descsize: Size of the operational state for the message digest. This state + * size is the memory size that needs to be allocated for + * shash_desc.__ctx + * @base: internally used + */ +struct shash_alg { + int (*init)(struct shash_desc *desc); + int (*update)(struct shash_desc *desc, const u8 *data, + unsigned int len); + int (*final)(struct shash_desc *desc, u8 *out); + int (*finup)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + int (*digest)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + int (*export)(struct shash_desc *desc, void *out); + int (*import)(struct shash_desc *desc, const void *in); + int (*setkey)(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + + unsigned int descsize; + + /* These fields must match hash_alg_common. */ + unsigned int digestsize + __attribute__ ((aligned(__alignof__(struct hash_alg_common)))); + unsigned int statesize; + + struct crypto_alg base; +}; + +struct crypto_ahash { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*export)(struct ahash_request *req, void *out); + int (*import)(struct ahash_request *req, const void *in); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct crypto_shash { + unsigned int descsize; + struct crypto_tfm base; +}; + +/** + * DOC: Asynchronous Message Digest API + * + * The asynchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto) + * + * The asynchronous cipher operation discussion provided for the + * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well. + */ + +static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_ahash, base); +} + +/** + * crypto_alloc_ahash() - allocate ahash cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ahash cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an ahash. The returned struct + * crypto_ahash is the cipher handle that is required for any subsequent + * API invocation for that ahash. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_ahash() - zeroize and free the ahash handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_ahash(struct crypto_ahash *tfm) +{ + crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); +} + +/** + * crypto_has_ahash() - Search for the availability of an ahash. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ahash + * @type: specifies the type of the ahash + * @mask: specifies the mask for the ahash + * + * Return: true when the ahash is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_ahash(const char *alg_name, u32 type, u32 mask); + +static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); +} + +static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); +} + +static inline unsigned int crypto_ahash_alignmask( + struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); +} + +/** + * crypto_ahash_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ +static inline unsigned int crypto_ahash_blocksize(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); +} + +static inline struct hash_alg_common *__crypto_hash_alg_common( + struct crypto_alg *alg) +{ + return container_of(alg, struct hash_alg_common, base); +} + +static inline struct hash_alg_common *crypto_hash_alg_common( + struct crypto_ahash *tfm) +{ + return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); +} + +/** + * crypto_ahash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * + * Return: message digest size of cipher + */ +static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->digestsize; +} + +/** + * crypto_ahash_statesize() - obtain size of the ahash state + * @tfm: cipher handle + * + * Return the size of the ahash state. With the crypto_ahash_export() + * function, the caller can export the state into a buffer whose size is + * defined with this function. + * + * Return: size of the ahash state + */ +static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->statesize; +} + +static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) +{ + return crypto_tfm_get_flags(crypto_ahash_tfm(tfm)); +} + +static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags); +} + +static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags); +} + +/** + * crypto_ahash_reqtfm() - obtain cipher handle from request + * @req: asynchronous request handle that contains the reference to the ahash + * cipher handle + * + * Return the ahash cipher handle that is registered with the asynchronous + * request handle ahash_request. + * + * Return: ahash cipher handle + */ +static inline struct crypto_ahash *crypto_ahash_reqtfm( + struct ahash_request *req) +{ + return __crypto_ahash_cast(req->base.tfm); +} + +/** + * crypto_ahash_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: size of the request data + */ +static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) +{ + return tfm->reqsize; +} + +static inline void *ahash_request_ctx(struct ahash_request *req) +{ + return req->__ctx; +} + +/** + * crypto_ahash_setkey - set key for cipher handle + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the ahash cipher. The cipher + * handle must point to a keyed hash in order for this function to succeed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + +/** + * crypto_ahash_finup() - update and finalize message digest + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * This function is a "short-hand" for the function calls of + * crypto_ahash_update and crypto_ahash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Return: see crypto_ahash_final() + */ +int crypto_ahash_finup(struct ahash_request *req); + +/** + * crypto_ahash_final() - calculate message digest + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer registered with the ahash_request handle. + * + * Return: + * 0 if the message digest was successfully calculated; + * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later; + * -EBUSY if queue is full and request should be resubmitted later; + * other < 0 if an error occurred + */ +int crypto_ahash_final(struct ahash_request *req); + +/** + * crypto_ahash_digest() - calculate message digest for a buffer + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * This function is a "short-hand" for the function calls of crypto_ahash_init, + * crypto_ahash_update and crypto_ahash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Return: see crypto_ahash_final() + */ +int crypto_ahash_digest(struct ahash_request *req); + +/** + * crypto_ahash_export() - extract current message digest state + * @req: reference to the ahash_request handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * This function exports the hash state of the ahash_request handle into the + * caller-allocated output buffer out which must have sufficient size (e.g. by + * calling crypto_ahash_statesize()). + * + * Return: 0 if the export was successful; < 0 if an error occurred + */ +static inline int crypto_ahash_export(struct ahash_request *req, void *out) +{ + return crypto_ahash_reqtfm(req)->export(req, out); +} + +/** + * crypto_ahash_import() - import message digest state + * @req: reference to ahash_request handle the state is imported into + * @in: buffer holding the state + * + * This function imports the hash state into the ahash_request handle from the + * input buffer. That buffer should have been generated with the + * crypto_ahash_export function. + * + * Return: 0 if the import was successful; < 0 if an error occurred + */ +static inline int crypto_ahash_import(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->import(req, in); +} + +/** + * crypto_ahash_init() - (re)initialize message digest handle + * @req: ahash_request handle that already is initialized with all necessary + * data using the ahash_request_* API functions + * + * The call (re-)initializes the message digest referenced by the ahash_request + * handle. Any potentially existing state created by previous operations is + * discarded. + * + * Return: see crypto_ahash_final() + */ +static inline int crypto_ahash_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->init(req); +} + +/** + * crypto_ahash_update() - add data to message digest for processing + * @req: ahash_request handle that was previously initialized with the + * crypto_ahash_init call. + * + * Updates the message digest state of the &ahash_request handle. The input data + * is pointed to by the scatter/gather list registered in the &ahash_request + * handle + * + * Return: see crypto_ahash_final() + */ +static inline int crypto_ahash_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int nbytes = req->nbytes; + int ret; + + crypto_stats_get(alg); + ret = crypto_ahash_reqtfm(req)->update(req); + crypto_stats_ahash_update(nbytes, ret, alg); + return ret; +} + +/** + * DOC: Asynchronous Hash Request Handle + * + * The &ahash_request data structure contains all pointers to data + * required for the asynchronous cipher operation. This includes the cipher + * handle (which can be used by multiple &ahash_request instances), pointer + * to plaintext and the message digest output buffer, asynchronous callback + * function, etc. It acts as a handle to the ahash_request_* API calls in a + * similar way as ahash handle to the crypto_ahash_* API calls. + */ + +/** + * ahash_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing ahash handle in the request + * data structure with a different one. + */ +static inline void ahash_request_set_tfm(struct ahash_request *req, + struct crypto_ahash *tfm) +{ + req->base.tfm = crypto_ahash_tfm(tfm); +} + +/** + * ahash_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the ahash + * message digest API calls. During + * the allocation, the provided ahash handle + * is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct ahash_request *ahash_request_alloc( + struct crypto_ahash *tfm, gfp_t gfp) +{ + struct ahash_request *req; + + req = kmalloc(sizeof(struct ahash_request) + + crypto_ahash_reqsize(tfm), gfp); + + if (likely(req)) + ahash_request_set_tfm(req, tfm); + + return req; +} + +/** + * ahash_request_free() - zeroize and free the request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void ahash_request_free(struct ahash_request *req) +{ + kzfree(req); +} + +static inline void ahash_request_zero(struct ahash_request *req) +{ + memzero_explicit(req, sizeof(*req) + + crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); +} + +static inline struct ahash_request *ahash_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct ahash_request, base); +} + +/** + * ahash_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * &crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once + * the cipher operation completes. + * + * The callback function is registered with the &ahash_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void ahash_request_set_callback(struct ahash_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * ahash_request_set_crypt() - set data buffers + * @req: ahash_request handle to be updated + * @src: source scatter/gather list + * @result: buffer that is filled with the message digest -- the caller must + * ensure that the buffer has sufficient space by, for example, calling + * crypto_ahash_digestsize() + * @nbytes: number of bytes to process from the source scatter/gather list + * + * By using this call, the caller references the source scatter/gather list. + * The source scatter/gather list points to the data the message digest is to + * be calculated for. + */ +static inline void ahash_request_set_crypt(struct ahash_request *req, + struct scatterlist *src, u8 *result, + unsigned int nbytes) +{ + req->src = src; + req->nbytes = nbytes; + req->result = result; +} + +/** + * DOC: Synchronous Message Digest API + * + * The synchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto) + * + * The message digest API is able to maintain state information for the + * caller. + * + * The synchronous message digest API can store user-related context in in its + * shash_desc request data structure. + */ + +/** + * crypto_alloc_shash() - allocate message digest handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a message digest. The returned &struct + * crypto_shash is the cipher handle that is required for any subsequent + * API invocation for that message digest. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_shash() - zeroize and free the message digest handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_shash(struct crypto_shash *tfm) +{ + crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); +} + +static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_name(crypto_shash_tfm(tfm)); +} + +static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); +} + +static inline unsigned int crypto_shash_alignmask( + struct crypto_shash *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); +} + +/** + * crypto_shash_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ +static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm)); +} + +static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct shash_alg, base); +} + +static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm) +{ + return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg); +} + +/** + * crypto_shash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * Return: digest size of cipher + */ +static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->digestsize; +} + +static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->statesize; +} + +static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm) +{ + return crypto_tfm_get_flags(crypto_shash_tfm(tfm)); +} + +static inline void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags); +} + +static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags); +} + +/** + * crypto_shash_descsize() - obtain the operational state size + * @tfm: cipher handle + * + * The size of the operational state the cipher needs during operation is + * returned for the hash referenced with the cipher handle. This size is + * required to calculate the memory requirements to allow the caller allocating + * sufficient memory for operational state. + * + * The operational state is defined with struct shash_desc where the size of + * that data structure is to be calculated as + * sizeof(struct shash_desc) + crypto_shash_descsize(alg) + * + * Return: size of the operational state + */ +static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) +{ + return tfm->descsize; +} + +static inline void *shash_desc_ctx(struct shash_desc *desc) +{ + return desc->__ctx; +} + +/** + * crypto_shash_setkey() - set key for message digest + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the keyed message digest cipher. The + * cipher handle must point to a keyed message digest cipher in order for this + * function to succeed. + * + * Context: Any context. + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + +/** + * crypto_shash_digest() - calculate message digest for buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of crypto_shash_init, + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Context: Any context. + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + +/** + * crypto_shash_export() - extract operational state for message digest + * @desc: reference to the operational state handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * This function exports the hash state of the operational state handle into the + * caller-allocated output buffer out which must have sufficient size (e.g. by + * calling crypto_shash_descsize). + * + * Context: Any context. + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ +static inline int crypto_shash_export(struct shash_desc *desc, void *out) +{ + return crypto_shash_alg(desc->tfm)->export(desc, out); +} + +/** + * crypto_shash_import() - import operational state + * @desc: reference to the operational state handle the state imported into + * @in: buffer holding the state + * + * This function imports the hash state into the operational state handle from + * the input buffer. That buffer should have been generated with the + * crypto_ahash_export function. + * + * Context: Any context. + * Return: 0 if the import was successful; < 0 if an error occurred + */ +static inline int crypto_shash_import(struct shash_desc *desc, const void *in) +{ + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->import(desc, in); +} + +/** + * crypto_shash_init() - (re)initialize message digest + * @desc: operational state handle that is already filled + * + * The call (re-)initializes the message digest referenced by the + * operational state handle. Any potentially existing state created by + * previous operations is discarded. + * + * Context: Any context. + * Return: 0 if the message digest initialization was successful; < 0 if an + * error occurred + */ +static inline int crypto_shash_init(struct shash_desc *desc) +{ + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->init(desc); +} + +/** + * crypto_shash_update() - add data to message digest for processing + * @desc: operational state handle that is already initialized + * @data: input data to be added to the message digest + * @len: length of the input data + * + * Updates the message digest state of the operational state handle. + * + * Context: Any context. + * Return: 0 if the message digest update was successful; < 0 if an error + * occurred + */ +int crypto_shash_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +/** + * crypto_shash_final() - calculate message digest + * @desc: operational state handle that is already filled with data + * @out: output buffer filled with the message digest + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer. The caller must ensure that the output buffer is + * large enough by using crypto_shash_digestsize. + * + * Context: Any context. + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_final(struct shash_desc *desc, u8 *out); + +/** + * crypto_shash_finup() - calculate message digest of buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Context: Any context. + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + +static inline void shash_desc_zero(struct shash_desc *desc) +{ + memzero_explicit(desc, + sizeof(*desc) + crypto_shash_descsize(desc->tfm)); +} + +#endif /* _CRYPTO_HASH_H */ diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h new file mode 100644 index 0000000..eb9d2e3 --- /dev/null +++ b/include/crypto/hash_info.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Hash Info: Hash algorithms information + * + * Copyright (c) 2013 Dmitry Kasatkin + */ + +#ifndef _CRYPTO_HASH_INFO_H +#define _CRYPTO_HASH_INFO_H + +#include +#include +#include + +#include + +/* not defined in include/crypto/ */ +#define RMD128_DIGEST_SIZE 16 +#define RMD160_DIGEST_SIZE 20 +#define RMD256_DIGEST_SIZE 32 +#define RMD320_DIGEST_SIZE 40 + +/* not defined in include/crypto/ */ +#define WP512_DIGEST_SIZE 64 +#define WP384_DIGEST_SIZE 48 +#define WP256_DIGEST_SIZE 32 + +/* not defined in include/crypto/ */ +#define TGR128_DIGEST_SIZE 16 +#define TGR160_DIGEST_SIZE 20 +#define TGR192_DIGEST_SIZE 24 + +/* not defined in include/crypto/ */ +#define SM3256_DIGEST_SIZE 32 + +extern const char *const hash_algo_name[HASH_ALGO__LAST]; +extern const int hash_digest_size[HASH_ALGO__LAST]; + +#endif /* _CRYPTO_HASH_INFO_H */ diff --git a/include/crypto/hmac.h b/include/crypto/hmac.h new file mode 100644 index 0000000..6677413 --- /dev/null +++ b/include/crypto/hmac.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_HMAC_H +#define _CRYPTO_HMAC_H + +#define HMAC_IPAD_VALUE 0x36 +#define HMAC_OPAD_VALUE 0x5c + +#endif /* _CRYPTO_HMAC_H */ diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h new file mode 100644 index 0000000..864849e --- /dev/null +++ b/include/crypto/if_alg.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * if_alg: User-space algorithm interface + * + * Copyright (c) 2010 Herbert Xu + */ + +#ifndef _CRYPTO_IF_ALG_H +#define _CRYPTO_IF_ALG_H + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define ALG_MAX_PAGES 16 + +struct crypto_async_request; + +struct alg_sock { + /* struct sock must be the first member of struct alg_sock */ + struct sock sk; + + struct sock *parent; + + atomic_t refcnt; + atomic_t nokey_refcnt; + + const struct af_alg_type *type; + void *private; +}; + +struct af_alg_control { + struct af_alg_iv *iv; + int op; + unsigned int aead_assoclen; +}; + +struct af_alg_type { + void *(*bind)(const char *name, u32 type, u32 mask); + void (*release)(void *private); + int (*setkey)(void *private, const u8 *key, unsigned int keylen); + int (*accept)(void *private, struct sock *sk); + int (*accept_nokey)(void *private, struct sock *sk); + int (*setauthsize)(void *private, unsigned int authsize); + + struct proto_ops *ops; + struct proto_ops *ops_nokey; + struct module *owner; + char name[14]; +}; + +struct af_alg_sgl { + struct scatterlist sg[ALG_MAX_PAGES + 1]; + struct page *pages[ALG_MAX_PAGES]; + unsigned int npages; +}; + +/* TX SGL entry */ +struct af_alg_tsgl { + struct list_head list; + unsigned int cur; /* Last processed SG entry */ + struct scatterlist sg[0]; /* Array of SGs forming the SGL */ +}; + +#define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \ + sizeof(struct scatterlist) - 1) + +/* RX SGL entry */ +struct af_alg_rsgl { + struct af_alg_sgl sgl; + struct list_head list; + size_t sg_num_bytes; /* Bytes of data in that SGL */ +}; + +/** + * struct af_alg_async_req - definition of crypto request + * @iocb: IOCB for AIO operations + * @sk: Socket the request is associated with + * @first_rsgl: First RX SG + * @last_rsgl: Pointer to last RX SG + * @rsgl_list: Track RX SGs + * @tsgl: Private, per request TX SGL of buffers to process + * @tsgl_entries: Number of entries in priv. TX SGL + * @outlen: Number of output bytes generated by crypto op + * @areqlen: Length of this data structure + * @cra_u: Cipher request + */ +struct af_alg_async_req { + struct kiocb *iocb; + struct sock *sk; + + struct af_alg_rsgl first_rsgl; + struct af_alg_rsgl *last_rsgl; + struct list_head rsgl_list; + + struct scatterlist *tsgl; + unsigned int tsgl_entries; + + unsigned int outlen; + unsigned int areqlen; + + union { + struct aead_request aead_req; + struct skcipher_request skcipher_req; + } cra_u; + + /* req ctx trails this struct */ +}; + +/** + * struct af_alg_ctx - definition of the crypto context + * + * The crypto context tracks the input data during the lifetime of an AF_ALG + * socket. + * + * @tsgl_list: Link to TX SGL + * @iv: IV for cipher operation + * @aead_assoclen: Length of AAD for AEAD cipher operations + * @completion: Work queue for synchronous operation + * @used: TX bytes sent to kernel. This variable is used to + * ensure that user space cannot cause the kernel + * to allocate too much memory in sendmsg operation. + * @rcvused: Total RX bytes to be filled by kernel. This variable + * is used to ensure user space cannot cause the kernel + * to allocate too much memory in a recvmsg operation. + * @more: More data to be expected from user space? + * @merge: Shall new data from user space be merged into existing + * SG? + * @enc: Cryptographic operation to be performed when + * recvmsg is invoked. + * @len: Length of memory allocated for this data structure. + */ +struct af_alg_ctx { + struct list_head tsgl_list; + + void *iv; + size_t aead_assoclen; + + struct crypto_wait wait; + + size_t used; + atomic_t rcvused; + + bool more; + bool merge; + bool enc; + + unsigned int len; +}; + +int af_alg_register_type(const struct af_alg_type *type); +int af_alg_unregister_type(const struct af_alg_type *type); + +int af_alg_release(struct socket *sock); +void af_alg_release_parent(struct sock *sk); +int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern); + +int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len); +void af_alg_free_sg(struct af_alg_sgl *sgl); + +static inline struct alg_sock *alg_sk(struct sock *sk) +{ + return (struct alg_sock *)sk; +} + +/** + * Size of available buffer for sending data from user space to kernel. + * + * @sk socket of connection to user space + * @return number of bytes still available + */ +static inline int af_alg_sndbuf(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + + return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - + ctx->used, 0); +} + +/** + * Can the send buffer still be written to? + * + * @sk socket of connection to user space + * @return true => writable, false => not writable + */ +static inline bool af_alg_writable(struct sock *sk) +{ + return PAGE_SIZE <= af_alg_sndbuf(sk); +} + +/** + * Size of available buffer used by kernel for the RX user space operation. + * + * @sk socket of connection to user space + * @return number of bytes still available + */ +static inline int af_alg_rcvbuf(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + + return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - + atomic_read(&ctx->rcvused), 0); +} + +/** + * Can the RX buffer still be written to? + * + * @sk socket of connection to user space + * @return true => writable, false => not writable + */ +static inline bool af_alg_readable(struct sock *sk) +{ + return PAGE_SIZE <= af_alg_rcvbuf(sk); +} + +unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); +void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, + size_t dst_offset); +void af_alg_wmem_wakeup(struct sock *sk); +int af_alg_wait_for_data(struct sock *sk, unsigned flags); +int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, + unsigned int ivsize); +ssize_t af_alg_sendpage(struct socket *sock, struct page *page, + int offset, size_t size, int flags); +void af_alg_free_resources(struct af_alg_async_req *areq); +void af_alg_async_cb(struct crypto_async_request *_req, int err); +__poll_t af_alg_poll(struct file *file, struct socket *sock, + poll_table *wait); +struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, + unsigned int areqlen); +int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, + struct af_alg_async_req *areq, size_t maxsize, + size_t *outlen); + +#endif /* _CRYPTO_IF_ALG_H */ diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h new file mode 100644 index 0000000..9de5736 --- /dev/null +++ b/include/crypto/internal/acompress.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Asynchronous Compression operations + * + * Copyright (c) 2016, Intel Corporation + * Authors: Weigang Li + * Giovanni Cabiddu + */ +#ifndef _CRYPTO_ACOMP_INT_H +#define _CRYPTO_ACOMP_INT_H +#include + +/* + * Transform internal helpers. + */ +static inline void *acomp_request_ctx(struct acomp_req *req) +{ + return req->__ctx; +} + +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void acomp_request_complete(struct acomp_req *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *acomp_alg_name(struct crypto_acomp *tfm) +{ + return crypto_acomp_tfm(tfm)->__crt_alg->cra_name; +} + +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) +{ + struct acomp_req *req; + + req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); + if (likely(req)) + acomp_request_set_tfm(req, tfm); + return req; +} + +static inline void __acomp_request_free(struct acomp_req *req) +{ + kzfree(req); +} + +/** + * crypto_register_acomp() -- Register asynchronous compression algorithm + * + * Function registers an implementation of an asynchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_acomp(struct acomp_alg *alg); + +/** + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm + * + * Function unregisters an implementation of an asynchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_unregister_acomp(struct acomp_alg *alg); + +int crypto_register_acomps(struct acomp_alg *algs, int count); +void crypto_unregister_acomps(struct acomp_alg *algs, int count); + +#endif diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h new file mode 100644 index 0000000..c509ec3 --- /dev/null +++ b/include/crypto/internal/aead.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007-2015 Herbert Xu + */ + +#ifndef _CRYPTO_INTERNAL_AEAD_H +#define _CRYPTO_INTERNAL_AEAD_H + +#include +#include +#include +#include + +struct rtattr; + +struct aead_instance { + void (*free)(struct aead_instance *inst); + union { + struct { + char head[offsetof(struct aead_alg, base)]; + struct crypto_instance base; + } s; + struct aead_alg alg; + }; +}; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +struct aead_queue { + struct crypto_queue base; +}; + +static inline void *crypto_aead_ctx(struct crypto_aead *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline struct crypto_instance *aead_crypto_instance( + struct aead_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct aead_instance *aead_instance(struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct aead_instance, alg.base); +} + +static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead) +{ + return aead_instance(crypto_tfm_alg_instance(&aead->base)); +} + +static inline void *aead_instance_ctx(struct aead_instance *inst) +{ + return crypto_instance_ctx(aead_crypto_instance(inst)); +} + +static inline void *aead_request_ctx(struct aead_request *req) +{ + return req->__ctx; +} + +static inline void aead_request_complete(struct aead_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 aead_request_flags(struct aead_request *req) +{ + return req->base.flags; +} + +static inline struct aead_request *aead_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct aead_request, base); +} + +static inline void crypto_set_aead_spawn( + struct crypto_aead_spawn *spawn, struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct aead_alg *crypto_spawn_aead_alg( + struct crypto_aead_spawn *spawn) +{ + return container_of(spawn->base.alg, struct aead_alg, base); +} + +static inline struct crypto_aead *crypto_spawn_aead( + struct crypto_aead_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_aead_set_reqsize(struct crypto_aead *aead, + unsigned int reqsize) +{ + aead->reqsize = reqsize; +} + +static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg) +{ + return alg->maxauthsize; +} + +static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead) +{ + return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead)); +} + +static inline void aead_init_queue(struct aead_queue *queue, + unsigned int max_qlen) +{ + crypto_init_queue(&queue->base, max_qlen); +} + +static inline int aead_enqueue_request(struct aead_queue *queue, + struct aead_request *request) +{ + return crypto_enqueue_request(&queue->base, &request->base); +} + +static inline struct aead_request *aead_dequeue_request( + struct aead_queue *queue) +{ + struct crypto_async_request *req; + + req = crypto_dequeue_request(&queue->base); + + return req ? container_of(req, struct aead_request, base) : NULL; +} + +static inline struct aead_request *aead_get_backlog(struct aead_queue *queue) +{ + struct crypto_async_request *req; + + req = crypto_get_backlog(&queue->base); + + return req ? container_of(req, struct aead_request, base) : NULL; +} + +static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg) +{ + return alg->chunksize; +} + +/** + * crypto_aead_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CCM. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm) +{ + return crypto_aead_alg_chunksize(crypto_aead_alg(tfm)); +} + +int crypto_register_aead(struct aead_alg *alg); +void crypto_unregister_aead(struct aead_alg *alg); +int crypto_register_aeads(struct aead_alg *algs, int count); +void crypto_unregister_aeads(struct aead_alg *algs, int count); +int aead_register_instance(struct crypto_template *tmpl, + struct aead_instance *inst); + +#endif /* _CRYPTO_INTERNAL_AEAD_H */ + diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h new file mode 100644 index 0000000..d6c8a42 --- /dev/null +++ b/include/crypto/internal/akcipher.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Public Key Encryption + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk + */ +#ifndef _CRYPTO_AKCIPHER_INT_H +#define _CRYPTO_AKCIPHER_INT_H +#include +#include + +struct akcipher_instance { + void (*free)(struct akcipher_instance *inst); + union { + struct { + char head[offsetof(struct akcipher_alg, base)]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; + +/* + * Transform internal helpers. + */ +static inline void *akcipher_request_ctx(struct akcipher_request *req) +{ + return req->__ctx; +} + +static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher, + unsigned int reqsize) +{ + crypto_akcipher_alg(akcipher)->reqsize = reqsize; +} + +static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void akcipher_request_complete(struct akcipher_request *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm) +{ + return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name; +} + +static inline struct crypto_instance *akcipher_crypto_instance( + struct akcipher_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct akcipher_instance *akcipher_instance( + struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct akcipher_instance, alg.base); +} + +static inline struct akcipher_instance *akcipher_alg_instance( + struct crypto_akcipher *akcipher) +{ + return akcipher_instance(crypto_tfm_alg_instance(&akcipher->base)); +} + +static inline void *akcipher_instance_ctx(struct akcipher_instance *inst) +{ + return crypto_instance_ctx(akcipher_crypto_instance(inst)); +} + +static inline void crypto_set_akcipher_spawn( + struct crypto_akcipher_spawn *spawn, + struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline struct crypto_akcipher *crypto_spawn_akcipher( + struct crypto_akcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_drop_akcipher(struct crypto_akcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct akcipher_alg *crypto_spawn_akcipher_alg( + struct crypto_akcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct akcipher_alg, base); +} + +/** + * crypto_register_akcipher() -- Register public key algorithm + * + * Function registers an implementation of a public key verify algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_akcipher(struct akcipher_alg *alg); + +/** + * crypto_unregister_akcipher() -- Unregister public key algorithm + * + * Function unregisters an implementation of a public key verify algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_akcipher(struct akcipher_alg *alg); + +/** + * akcipher_register_instance() -- Unregister public key template instance + * + * Function registers an implementation of an asymmetric key algorithm + * created from a template + * + * @tmpl: the template from which the algorithm was created + * @inst: the template instance + */ +int akcipher_register_instance(struct crypto_template *tmpl, + struct akcipher_instance *inst); +#endif diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h new file mode 100644 index 0000000..fd54074 --- /dev/null +++ b/include/crypto/internal/cryptouser.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include + +struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); + +#ifdef CONFIG_CRYPTO_STATS +int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); +#else +static inline int crypto_reportstat(struct sk_buff *in_skb, + struct nlmsghdr *in_nlh, + struct nlattr **attrs) +{ + return -ENOTSUPP; +} +#endif diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h new file mode 100644 index 0000000..81ea1a4 --- /dev/null +++ b/include/crypto/internal/des.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DES & Triple DES EDE key verification helpers + */ + +#ifndef __CRYPTO_INTERNAL_DES_H +#define __CRYPTO_INTERNAL_DES_H + +#include +#include +#include +#include +#include + +/** + * crypto_des_verify_key - Check whether a DES key is weak + * @tfm: the crypto algo + * @key: the key buffer + * + * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak + * keys. Otherwise, 0 is returned. + * + * It is the job of the caller to ensure that the size of the key equals + * DES_KEY_SIZE. + */ +static inline int crypto_des_verify_key(struct crypto_tfm *tfm, const u8 *key) +{ + struct des_ctx tmp; + int err; + + err = des_expand_key(&tmp, key, DES_KEY_SIZE); + if (err == -ENOKEY) { + if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) + err = -EINVAL; + else + err = 0; + } + + if (err) + crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); + + memzero_explicit(&tmp, sizeof(tmp)); + return err; +} + +/* + * RFC2451: + * + * For DES-EDE3, there is no known need to reject weak or + * complementation keys. Any weakness is obviated by the use of + * multiple keys. + * + * However, if the first two or last two independent 64-bit keys are + * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the + * same as DES. Implementers MUST reject keys that exhibit this + * property. + * + */ +static inline int des3_ede_verify_key(const u8 *key, unsigned int key_len, + bool check_weak) +{ + int ret = fips_enabled ? -EINVAL : -ENOKEY; + u32 K[6]; + + memcpy(K, key, DES3_EDE_KEY_SIZE); + + if ((!((K[0] ^ K[2]) | (K[1] ^ K[3])) || + !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && + (fips_enabled || check_weak)) + goto bad; + + if ((!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled) + goto bad; + + ret = 0; +bad: + memzero_explicit(K, DES3_EDE_KEY_SIZE); + + return ret; +} + +/** + * crypto_des3_ede_verify_key - Check whether a DES3-EDE key is weak + * @tfm: the crypto algo + * @key: the key buffer + * + * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak + * keys or when running in FIPS mode. Otherwise, 0 is returned. Note that some + * keys are rejected in FIPS mode even if weak keys are permitted by the TFM + * flags. + * + * It is the job of the caller to ensure that the size of the key equals + * DES3_EDE_KEY_SIZE. + */ +static inline int crypto_des3_ede_verify_key(struct crypto_tfm *tfm, + const u8 *key) +{ + int err; + + err = des3_ede_verify_key(key, DES3_EDE_KEY_SIZE, + crypto_tfm_get_flags(tfm) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + if (err) + crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); + return err; +} + +static inline int verify_skcipher_des_key(struct crypto_skcipher *tfm, + const u8 *key) +{ + return crypto_des_verify_key(crypto_skcipher_tfm(tfm), key); +} + +static inline int verify_skcipher_des3_key(struct crypto_skcipher *tfm, + const u8 *key) +{ + return crypto_des3_ede_verify_key(crypto_skcipher_tfm(tfm), key); +} + +static inline int verify_ablkcipher_des_key(struct crypto_ablkcipher *tfm, + const u8 *key) +{ + return crypto_des_verify_key(crypto_ablkcipher_tfm(tfm), key); +} + +static inline int verify_ablkcipher_des3_key(struct crypto_ablkcipher *tfm, + const u8 *key) +{ + return crypto_des3_ede_verify_key(crypto_ablkcipher_tfm(tfm), key); +} + +static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key, + int keylen) +{ + if (keylen != DES_KEY_SIZE) { + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + return crypto_des_verify_key(crypto_aead_tfm(tfm), key); +} + +static inline int verify_aead_des3_key(struct crypto_aead *tfm, const u8 *key, + int keylen) +{ + if (keylen != DES3_EDE_KEY_SIZE) { + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + return crypto_des3_ede_verify_key(crypto_aead_tfm(tfm), key); +} + +#endif /* __CRYPTO_INTERNAL_DES_H */ diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h new file mode 100644 index 0000000..0108c0c --- /dev/null +++ b/include/crypto/internal/geniv.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * geniv: IV generation + * + * Copyright (c) 2015 Herbert Xu + */ + +#ifndef _CRYPTO_INTERNAL_GENIV_H +#define _CRYPTO_INTERNAL_GENIV_H + +#include +#include +#include + +struct aead_geniv_ctx { + spinlock_t lock; + struct crypto_aead *child; + struct crypto_sync_skcipher *sknull; + u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); +}; + +struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb, u32 type, u32 mask); +void aead_geniv_free(struct aead_instance *inst); +int aead_init_geniv(struct crypto_aead *tfm); +void aead_exit_geniv(struct crypto_aead *tfm); + +#endif /* _CRYPTO_INTERNAL_GENIV_H */ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h new file mode 100644 index 0000000..bfc9db7 --- /dev/null +++ b/include/crypto/internal/hash.h @@ -0,0 +1,246 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Hash algorithms. + * + * Copyright (c) 2008 Herbert Xu + */ + +#ifndef _CRYPTO_INTERNAL_HASH_H +#define _CRYPTO_INTERNAL_HASH_H + +#include +#include + +struct ahash_request; +struct scatterlist; + +struct crypto_hash_walk { + char *data; + + unsigned int offset; + unsigned int alignmask; + + struct page *pg; + unsigned int entrylen; + + unsigned int total; + struct scatterlist *sg; + + unsigned int flags; +}; + +struct ahash_instance { + struct ahash_alg alg; +}; + +struct shash_instance { + struct shash_alg alg; +}; + +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + +extern const struct crypto_type crypto_ahash_type; + +int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); +int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk); +int crypto_ahash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk); + +static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk, + int err) +{ + return crypto_hash_walk_done(walk, err); +} + +static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) +{ + return !(walk->entrylen | walk->total); +} + +static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk) +{ + return crypto_hash_walk_last(walk); +} + +int crypto_register_ahash(struct ahash_alg *alg); +int crypto_unregister_ahash(struct ahash_alg *alg); +int crypto_register_ahashes(struct ahash_alg *algs, int count); +void crypto_unregister_ahashes(struct ahash_alg *algs, int count); +int ahash_register_instance(struct crypto_template *tmpl, + struct ahash_instance *inst); +void ahash_free_instance(struct crypto_instance *inst); + +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + +static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) +{ + return alg->setkey != shash_no_setkey; +} + +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + +int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, + struct hash_alg_common *alg, + struct crypto_instance *inst); + +static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask); + +int crypto_register_shash(struct shash_alg *alg); +int crypto_unregister_shash(struct shash_alg *alg); +int crypto_register_shashes(struct shash_alg *algs, int count); +int crypto_unregister_shashes(struct shash_alg *algs, int count); +int shash_register_instance(struct crypto_template *tmpl, + struct shash_instance *inst); +void shash_free_instance(struct crypto_instance *inst); + +int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, + struct shash_alg *alg, + struct crypto_instance *inst); + +static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask); + +int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); + +int crypto_init_shash_ops_async(struct crypto_tfm *tfm); + +static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) +{ + return crypto_tfm_ctx(crypto_ahash_tfm(tfm)); +} + +static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg) +{ + return container_of(__crypto_hash_alg_common(alg), struct ahash_alg, + halg); +} + +static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, + unsigned int reqsize) +{ + tfm->reqsize = reqsize; +} + +static inline struct crypto_instance *ahash_crypto_instance( + struct ahash_instance *inst) +{ + return container_of(&inst->alg.halg.base, struct crypto_instance, alg); +} + +static inline struct ahash_instance *ahash_instance( + struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct ahash_instance, alg.halg.base); +} + +static inline void *ahash_instance_ctx(struct ahash_instance *inst) +{ + return crypto_instance_ctx(ahash_crypto_instance(inst)); +} + +static inline unsigned int ahash_instance_headroom(void) +{ + return sizeof(struct ahash_alg) - sizeof(struct crypto_alg); +} + +static inline struct ahash_instance *ahash_alloc_instance( + const char *name, struct crypto_alg *alg) +{ + return crypto_alloc_instance(name, alg, ahash_instance_headroom()); +} + +static inline void ahash_request_complete(struct ahash_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 ahash_request_flags(struct ahash_request *req) +{ + return req->base.flags; +} + +static inline struct crypto_ahash *crypto_spawn_ahash( + struct crypto_ahash_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline int ahash_enqueue_request(struct crypto_queue *queue, + struct ahash_request *request) +{ + return crypto_enqueue_request(queue, &request->base); +} + +static inline struct ahash_request *ahash_dequeue_request( + struct crypto_queue *queue) +{ + return ahash_request_cast(crypto_dequeue_request(queue)); +} + +static inline void *crypto_shash_ctx(struct crypto_shash *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline struct crypto_instance *shash_crypto_instance( + struct shash_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct shash_instance *shash_instance( + struct crypto_instance *inst) +{ + return container_of(__crypto_shash_alg(&inst->alg), + struct shash_instance, alg); +} + +static inline void *shash_instance_ctx(struct shash_instance *inst) +{ + return crypto_instance_ctx(shash_crypto_instance(inst)); +} + +static inline struct shash_instance *shash_alloc_instance( + const char *name, struct crypto_alg *alg) +{ + return crypto_alloc_instance(name, alg, + sizeof(struct shash_alg) - sizeof(*alg)); +} + +static inline struct crypto_shash *crypto_spawn_shash( + struct crypto_shash_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_shash, base); +} + +#endif /* _CRYPTO_INTERNAL_HASH_H */ + diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h new file mode 100644 index 0000000..659b642 --- /dev/null +++ b/include/crypto/internal/kpp.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + */ +#ifndef _CRYPTO_KPP_INT_H +#define _CRYPTO_KPP_INT_H +#include +#include + +/* + * Transform internal helpers. + */ +static inline void *kpp_request_ctx(struct kpp_request *req) +{ + return req->__ctx; +} + +static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void kpp_request_complete(struct kpp_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *kpp_alg_name(struct crypto_kpp *tfm) +{ + return crypto_kpp_tfm(tfm)->__crt_alg->cra_name; +} + +/** + * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm + * + * Function registers an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_kpp(struct kpp_alg *alg); + +/** + * crypto_unregister_kpp() -- Unregister key-agreement protocol primitive + * algorithm + * + * Function unregisters an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_kpp(struct kpp_alg *alg); + +#endif diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h new file mode 100644 index 0000000..e0711b6 --- /dev/null +++ b/include/crypto/internal/rng.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RNG: Random Number Generator algorithms under the crypto API + * + * Copyright (c) 2008 Neil Horman + * Copyright (c) 2015 Herbert Xu + */ + +#ifndef _CRYPTO_INTERNAL_RNG_H +#define _CRYPTO_INTERNAL_RNG_H + +#include +#include + +int crypto_register_rng(struct rng_alg *alg); +void crypto_unregister_rng(struct rng_alg *alg); +int crypto_register_rngs(struct rng_alg *algs, int count); +void crypto_unregister_rngs(struct rng_alg *algs, int count); + +#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE) +int crypto_del_default_rng(void); +#else +static inline int crypto_del_default_rng(void) +{ + return 0; +} +#endif + +static inline void *crypto_rng_ctx(struct crypto_rng *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void crypto_rng_set_entropy(struct crypto_rng *tfm, + const u8 *data, unsigned int len) +{ + crypto_rng_alg(tfm)->set_ent(tfm, data, len); +} + +#endif diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h new file mode 100644 index 0000000..e870133 --- /dev/null +++ b/include/crypto/internal/rsa.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RSA internal helpers + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk + */ +#ifndef _RSA_HELPER_ +#define _RSA_HELPER_ +#include + +/** + * rsa_key - RSA key structure + * @n : RSA modulus raw byte stream + * @e : RSA public exponent raw byte stream + * @d : RSA private exponent raw byte stream + * @p : RSA prime factor p of n raw byte stream + * @q : RSA prime factor q of n raw byte stream + * @dp : RSA exponent d mod (p - 1) raw byte stream + * @dq : RSA exponent d mod (q - 1) raw byte stream + * @qinv : RSA CRT coefficient q^(-1) mod p raw byte stream + * @n_sz : length in bytes of RSA modulus n + * @e_sz : length in bytes of RSA public exponent + * @d_sz : length in bytes of RSA private exponent + * @p_sz : length in bytes of p field + * @q_sz : length in bytes of q field + * @dp_sz : length in bytes of dp field + * @dq_sz : length in bytes of dq field + * @qinv_sz : length in bytes of qinv field + */ +struct rsa_key { + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; +}; + +int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len); + +int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len); + +extern struct crypto_template rsa_pkcs1pad_tmpl; +#endif diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h new file mode 100644 index 0000000..6727ef0 --- /dev/null +++ b/include/crypto/internal/scompress.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Synchronous Compression operations + * + * Copyright 2015 LG Electronics Inc. + * Copyright (c) 2016, Intel Corporation + * Author: Giovanni Cabiddu + */ +#ifndef _CRYPTO_SCOMP_INT_H +#define _CRYPTO_SCOMP_INT_H +#include + +#define SCOMP_SCRATCH_SIZE 131072 + +struct crypto_scomp { + struct crypto_tfm base; +}; + +/** + * struct scomp_alg - synchronous compression algorithm + * + * @alloc_ctx: Function allocates algorithm specific context + * @free_ctx: Function frees context allocated with alloc_ctx + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @base: Common crypto API algorithm data structure + */ +struct scomp_alg { + void *(*alloc_ctx)(struct crypto_scomp *tfm); + void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); + int (*compress)(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx); + int (*decompress)(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx); + struct crypto_alg base; +}; + +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct scomp_alg, base); +} + +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_scomp, base); +} + +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_scomp(struct crypto_scomp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm)); +} + +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) +{ + return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); +} + +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) +{ + return crypto_scomp_alg(tfm)->alloc_ctx(tfm); +} + +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, + void *ctx) +{ + return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); +} + +static inline int crypto_scomp_compress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx); +} + +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, + void *ctx) +{ + return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen, + ctx); +} + +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); +void crypto_acomp_scomp_free_ctx(struct acomp_req *req); + +/** + * crypto_register_scomp() -- Register synchronous compression algorithm + * + * Function registers an implementation of a synchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_scomp(struct scomp_alg *alg); + +/** + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm + * + * Function unregisters an implementation of a synchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_unregister_scomp(struct scomp_alg *alg); + +int crypto_register_scomps(struct scomp_alg *algs, int count); +void crypto_unregister_scomps(struct scomp_alg *algs, int count); + +#endif diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h new file mode 100644 index 0000000..d231624 --- /dev/null +++ b/include/crypto/internal/simd.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Shared crypto simd helpers + */ + +#ifndef _CRYPTO_INTERNAL_SIMD_H +#define _CRYPTO_INTERNAL_SIMD_H + +#include +#include + +/* skcipher support */ + +struct simd_skcipher_alg; +struct skcipher_alg; + +struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, + const char *drvname, + const char *basename); +struct simd_skcipher_alg *simd_skcipher_create(const char *algname, + const char *basename); +void simd_skcipher_free(struct simd_skcipher_alg *alg); + +int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, + struct simd_skcipher_alg **simd_algs); + +void simd_unregister_skciphers(struct skcipher_alg *algs, int count, + struct simd_skcipher_alg **simd_algs); + +/* AEAD support */ + +struct simd_aead_alg; +struct aead_alg; + +struct simd_aead_alg *simd_aead_create_compat(const char *algname, + const char *drvname, + const char *basename); +struct simd_aead_alg *simd_aead_create(const char *algname, + const char *basename); +void simd_aead_free(struct simd_aead_alg *alg); + +int simd_register_aeads_compat(struct aead_alg *algs, int count, + struct simd_aead_alg **simd_algs); + +void simd_unregister_aeads(struct aead_alg *algs, int count, + struct simd_aead_alg **simd_algs); + +/* + * crypto_simd_usable() - is it allowed at this time to use SIMD instructions or + * access the SIMD register file? + * + * This delegates to may_use_simd(), except that this also returns false if SIMD + * in crypto code has been temporarily disabled on this CPU by the crypto + * self-tests, in order to test the no-SIMD fallback code. This override is + * currently limited to configurations where the extra self-tests are enabled, + * because it might be a bit too invasive to be part of the regular self-tests. + * + * This is a macro so that , which some architectures don't have, + * doesn't have to be included directly here. + */ +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test); +#define crypto_simd_usable() \ + (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test)) +#else +#define crypto_simd_usable() may_use_simd() +#endif + +#endif /* _CRYPTO_INTERNAL_SIMD_H */ diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h new file mode 100644 index 0000000..3175dfe --- /dev/null +++ b/include/crypto/internal/skcipher.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007 Herbert Xu + */ + +#ifndef _CRYPTO_INTERNAL_SKCIPHER_H +#define _CRYPTO_INTERNAL_SKCIPHER_H + +#include +#include +#include +#include + +struct aead_request; +struct rtattr; + +struct skcipher_instance { + void (*free)(struct skcipher_instance *inst); + union { + struct { + char head[offsetof(struct skcipher_alg, base)]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_walk { + union { + struct { + struct page *page; + unsigned long offset; + } phys; + + struct { + u8 *page; + void *addr; + } virt; + } src, dst; + + struct scatter_walk in; + unsigned int nbytes; + + struct scatter_walk out; + unsigned int total; + + struct list_head buffers; + + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + + unsigned int ivsize; + + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + +static inline struct crypto_instance *skcipher_crypto_instance( + struct skcipher_instance *inst) +{ + return &inst->s.base; +} + +static inline struct skcipher_instance *skcipher_alg_instance( + struct crypto_skcipher *skcipher) +{ + return container_of(crypto_skcipher_alg(skcipher), + struct skcipher_instance, alg); +} + +static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) +{ + return crypto_instance_ctx(skcipher_crypto_instance(inst)); +} + +static inline void skcipher_request_complete(struct skcipher_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline void crypto_set_skcipher_spawn( + struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct skcipher_alg *crypto_skcipher_spawn_alg( + struct crypto_skcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct skcipher_alg, base); +} + +static inline struct skcipher_alg *crypto_spawn_skcipher_alg( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_skcipher_spawn_alg(spawn); +} + +static inline struct crypto_skcipher *crypto_spawn_skcipher( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_skcipher_set_reqsize( + struct crypto_skcipher *skcipher, unsigned int reqsize) +{ + skcipher->reqsize = reqsize; +} + +int crypto_register_skcipher(struct skcipher_alg *alg); +void crypto_unregister_skcipher(struct skcipher_alg *alg); +int crypto_register_skciphers(struct skcipher_alg *algs, int count); +void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); +int skcipher_register_instance(struct crypto_template *tmpl, + struct skcipher_instance *inst); + +int skcipher_walk_done(struct skcipher_walk *walk, int err); +int skcipher_walk_virt(struct skcipher_walk *walk, + struct skcipher_request *req, + bool atomic); +void skcipher_walk_atomise(struct skcipher_walk *walk); +int skcipher_walk_async(struct skcipher_walk *walk, + struct skcipher_request *req); +int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, + bool atomic); +int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); +int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); +void skcipher_walk_complete(struct skcipher_walk *walk, int err); + +static inline void skcipher_walk_abort(struct skcipher_walk *walk) +{ + skcipher_walk_done(walk, -ECANCELED); +} + +static inline void ablkcipher_request_complete(struct ablkcipher_request *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) +{ + return req->base.flags; +} + +static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *skcipher_request_ctx(struct skcipher_request *req) +{ + return req->__ctx; +} + +static inline u32 skcipher_request_flags(struct skcipher_request *req) +{ + return req->base.flags; +} + +static inline unsigned int crypto_skcipher_alg_min_keysize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.min_keysize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.min_keysize; + + return alg->min_keysize; +} + +static inline unsigned int crypto_skcipher_alg_max_keysize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.max_keysize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.max_keysize; + + return alg->max_keysize; +} + +static inline unsigned int crypto_skcipher_alg_walksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->walksize; +} + +/** + * crypto_skcipher_walksize() - obtain walk size + * @tfm: cipher handle + * + * In some cases, algorithms can only perform optimally when operating on + * multiple blocks in parallel. This is reflected by the walksize, which + * must be a multiple of the chunksize (or equal if the concern does not + * apply) + * + * Return: walk size in bytes + */ +static inline unsigned int crypto_skcipher_walksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); +} + +/* Helpers for simple block cipher modes of operation */ +struct skcipher_ctx_simple { + struct crypto_cipher *cipher; /* underlying block cipher */ +}; +static inline struct crypto_cipher * +skcipher_cipher_simple(struct crypto_skcipher *tfm) +{ + struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); + + return ctx->cipher; +} +struct skcipher_instance * +skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_alg **cipher_alg_ret); + +#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ + diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h new file mode 100644 index 0000000..cd9a9b5 --- /dev/null +++ b/include/crypto/kpp.h @@ -0,0 +1,360 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + */ + +#ifndef _CRYPTO_KPP_ +#define _CRYPTO_KPP_ +#include + +/** + * struct kpp_request + * + * @base: Common attributes for async crypto requests + * @src: Source data + * @dst: Destination data + * @src_len: Size of the input buffer + * @dst_len: Size of the output buffer. It needs to be at least + * as big as the expected result depending on the operation + * After operation it will be updated with the actual size of the + * result. In case of error where the dst sgl size was insufficient, + * it will be updated to the size required for the operation. + * @__ctx: Start of private context data + */ +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_kpp - user-instantiated object which encapsulate + * algorithms and core processing logic + * + * @base: Common crypto API algorithm data structure + */ +struct crypto_kpp { + struct crypto_tfm base; +}; + +/** + * struct kpp_alg - generic key-agreement protocol primitives + * + * @set_secret: Function invokes the protocol specific function to + * store the secret private key along with parameters. + * The implementation knows how to decode the buffer + * @generate_public_key: Function generate the public key to be sent to the + * counterpart. In case of error, where output is not big + * enough req->dst_len will be updated to the size + * required + * @compute_shared_secret: Function compute the shared secret as defined by + * the algorithm. The result is given back to the user. + * In case of error, where output is not big enough, + * req->dst_len will be updated to the size required + * @max_size: Function returns the size of the output buffer + * @init: Initialize the object. This is called only once at + * instantiation time. In case the cryptographic hardware + * needs to be initialized. Software fallback should be + * put in place here. + * @exit: Undo everything @init did. + * + * @reqsize: Request context size required by algorithm + * implementation + * @base: Common crypto API algorithm data structure + */ +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *tfm, const void *buffer, + unsigned int len); + int (*generate_public_key)(struct kpp_request *req); + int (*compute_shared_secret)(struct kpp_request *req); + + unsigned int (*max_size)(struct crypto_kpp *tfm); + + int (*init)(struct crypto_kpp *tfm); + void (*exit)(struct crypto_kpp *tfm); + + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Generic Key-agreement Protocol Primitives API + * + * The KPP API is used with the algorithm type + * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto) + */ + +/** + * crypto_alloc_kpp() - allocate KPP tfm handle + * @alg_name: is the name of the kpp algorithm (e.g. "dh", "ecdh") + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for kpp algorithm. The returned struct crypto_kpp + * is required for any following API invocation + * + * Return: allocated handle in case of success; IS_ERR() is true in case of + * an error, PTR_ERR() returns the error code. + */ +struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm) +{ + return &tfm->base; +} + +static inline struct kpp_alg *__crypto_kpp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct kpp_alg, base); +} + +static inline struct crypto_kpp *__crypto_kpp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_kpp, base); +} + +static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm) +{ + return __crypto_kpp_alg(crypto_kpp_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm) +{ + return crypto_kpp_alg(tfm)->reqsize; +} + +static inline void kpp_request_set_tfm(struct kpp_request *req, + struct crypto_kpp *tfm) +{ + req->base.tfm = crypto_kpp_tfm(tfm); +} + +static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req) +{ + return __crypto_kpp_tfm(req->base.tfm); +} + +static inline u32 crypto_kpp_get_flags(struct crypto_kpp *tfm) +{ + return crypto_tfm_get_flags(crypto_kpp_tfm(tfm)); +} + +static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_kpp_tfm(tfm), flags); +} + +/** + * crypto_free_kpp() - free KPP tfm handle + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + */ +static inline void crypto_free_kpp(struct crypto_kpp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_kpp_tfm(tfm)); +} + +/** + * kpp_request_alloc() - allocates kpp request + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * @gfp: allocation flags + * + * Return: allocated handle in case of success or NULL in case of an error. + */ +static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm, + gfp_t gfp) +{ + struct kpp_request *req; + + req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp); + if (likely(req)) + kpp_request_set_tfm(req, tfm); + + return req; +} + +/** + * kpp_request_free() - zeroize and free kpp request + * + * @req: request to free + */ +static inline void kpp_request_free(struct kpp_request *req) +{ + kzfree(req); +} + +/** + * kpp_request_set_callback() - Sets an asynchronous callback. + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmpl: callback which will be called + * @data: private data used by the caller + */ +static inline void kpp_request_set_callback(struct kpp_request *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * kpp_request_set_input() - Sets input buffer + * + * Sets parameters required by generate_public_key + * + * @req: kpp request + * @input: ptr to input scatter list + * @input_len: size of the input scatter list + */ +static inline void kpp_request_set_input(struct kpp_request *req, + struct scatterlist *input, + unsigned int input_len) +{ + req->src = input; + req->src_len = input_len; +} + +/** + * kpp_request_set_output() - Sets output buffer + * + * Sets parameters required by kpp operation + * + * @req: kpp request + * @output: ptr to output scatter list + * @output_len: size of the output scatter list + */ +static inline void kpp_request_set_output(struct kpp_request *req, + struct scatterlist *output, + unsigned int output_len) +{ + req->dst = output; + req->dst_len = output_len; +} + +enum { + CRYPTO_KPP_SECRET_TYPE_UNKNOWN, + CRYPTO_KPP_SECRET_TYPE_DH, + CRYPTO_KPP_SECRET_TYPE_ECDH, +}; + +/** + * struct kpp_secret - small header for packing secret buffer + * + * @type: define type of secret. Each kpp type will define its own + * @len: specify the len of the secret, include the header, that + * follows the struct + */ +struct kpp_secret { + unsigned short type; + unsigned short len; +}; + +/** + * crypto_kpp_set_secret() - Invoke kpp operation + * + * Function invokes the specific kpp operation for a given alg. + * + * @tfm: tfm handle + * @buffer: Buffer holding the packet representation of the private + * key. The structure of the packet key depends on the particular + * KPP implementation. Packing and unpacking helpers are provided + * for ECDH and DH (see the respective header files for those + * implementations). + * @len: Length of the packet private key buffer. + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, + const void *buffer, unsigned int len) +{ + struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->set_secret(tfm, buffer, len); + crypto_stats_kpp_set_secret(calg, ret); + return ret; +} + +/** + * crypto_kpp_generate_public_key() - Invoke kpp operation + * + * Function invokes the specific kpp operation for generating the public part + * for a given kpp algorithm. + * + * To generate a private key, the caller should use a random number generator. + * The output of the requested length serves as the private key. + * + * @req: kpp key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->generate_public_key(req); + crypto_stats_kpp_generate_public_key(calg, ret); + return ret; +} + +/** + * crypto_kpp_compute_shared_secret() - Invoke kpp operation + * + * Function invokes the specific kpp operation for computing the shared secret + * for a given kpp algorithm. + * + * @req: kpp key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->compute_shared_secret(req); + crypto_stats_kpp_compute_shared_secret(calg, ret); + return ret; +} + +/** + * crypto_kpp_maxsize() - Get len for output buffer + * + * Function returns the output buffer size required for a given key. + * Function assumes that the key is already set in the transformation. If this + * function is called without a setkey or with a failed setkey, you will end up + * in a NULL dereference. + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + */ +static inline unsigned int crypto_kpp_maxsize(struct crypto_kpp *tfm) +{ + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->max_size(tfm); +} + +#endif diff --git a/include/crypto/md5.h b/include/crypto/md5.h new file mode 100644 index 0000000..cf9e9de --- /dev/null +++ b/include/crypto/md5.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_MD5_H +#define _CRYPTO_MD5_H + +#include + +#define MD5_DIGEST_SIZE 16 +#define MD5_HMAC_BLOCK_SIZE 64 +#define MD5_BLOCK_WORDS 16 +#define MD5_HASH_WORDS 4 + +#define MD5_H0 0x67452301UL +#define MD5_H1 0xefcdab89UL +#define MD5_H2 0x98badcfeUL +#define MD5_H3 0x10325476UL + +extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE]; + +struct md5_state { + u32 hash[MD5_HASH_WORDS]; + u32 block[MD5_BLOCK_WORDS]; + u64 byte_count; +}; + +#endif diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h new file mode 100644 index 0000000..53c0442 --- /dev/null +++ b/include/crypto/nhpoly1305.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values and helper functions for the NHPoly1305 hash function. + */ + +#ifndef _NHPOLY1305_H +#define _NHPOLY1305_H + +#include +#include + +/* NH parameterization: */ + +/* Endianness: little */ +/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */ + +/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */ +#define NH_PAIR_STRIDE 2 +#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32)) + +/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */ +#define NH_NUM_PASSES 4 +#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64)) + +/* Max message size: 1024 bytes (32x compression factor) */ +#define NH_NUM_STRIDES 64 +#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES) +#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32)) +#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \ + NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1)) +#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32)) + +#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES) + +struct nhpoly1305_key { + struct poly1305_key poly_key; + u32 nh_key[NH_KEY_WORDS]; +}; + +struct nhpoly1305_state { + + /* Running total of polynomial evaluation */ + struct poly1305_state poly_state; + + /* Partial block buffer */ + u8 buffer[NH_MESSAGE_UNIT]; + unsigned int buflen; + + /* + * Number of bytes remaining until the current NH message reaches + * NH_MESSAGE_BYTES. When nonzero, 'nh_hash' holds the partial NH hash. + */ + unsigned int nh_remaining; + + __le64 nh_hash[NH_NUM_PASSES]; +}; + +typedef void (*nh_t)(const u32 *key, const u8 *message, size_t message_len, + __le64 hash[NH_NUM_PASSES]); + +int crypto_nhpoly1305_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen); + +int crypto_nhpoly1305_init(struct shash_desc *desc); +int crypto_nhpoly1305_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen); +int crypto_nhpoly1305_update_helper(struct shash_desc *desc, + const u8 *src, unsigned int srclen, + nh_t nh_fn); +int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst); +int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst, + nh_t nh_fn); + +#endif /* _NHPOLY1305_H */ diff --git a/include/crypto/null.h b/include/crypto/null.h new file mode 100644 index 0000000..0ef577c --- /dev/null +++ b/include/crypto/null.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Values for NULL algorithms */ + +#ifndef _CRYPTO_NULL_H +#define _CRYPTO_NULL_H + +#define NULL_KEY_SIZE 0 +#define NULL_BLOCK_SIZE 1 +#define NULL_DIGEST_SIZE 0 +#define NULL_IV_SIZE 0 + +struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void); +void crypto_put_default_null_skcipher(void); + +#endif diff --git a/include/crypto/padlock.h b/include/crypto/padlock.h new file mode 100644 index 0000000..6de70e8 --- /dev/null +++ b/include/crypto/padlock.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Driver for VIA PadLock + * + * Copyright (c) 2004 Michal Ludvig + */ + +#ifndef _CRYPTO_PADLOCK_H +#define _CRYPTO_PADLOCK_H + +#define PADLOCK_ALIGNMENT 16 + +#define PFX KBUILD_MODNAME ": " + +#define PADLOCK_CRA_PRIORITY 300 +#define PADLOCK_COMPOSITE_PRIORITY 400 + +#ifdef CONFIG_64BIT +#define STACK_ALIGN 16 +#else +#define STACK_ALIGN 4 +#endif + +#endif /* _CRYPTO_PADLOCK_H */ diff --git a/include/crypto/pcrypt.h b/include/crypto/pcrypt.h new file mode 100644 index 0000000..b9bc343 --- /dev/null +++ b/include/crypto/pcrypt.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * pcrypt - Parallel crypto engine. + * + * Copyright (C) 2009 secunet Security Networks AG + * Copyright (C) 2009 Steffen Klassert + */ + +#ifndef _CRYPTO_PCRYPT_H +#define _CRYPTO_PCRYPT_H + +#include +#include +#include + +struct pcrypt_request { + struct padata_priv padata; + void *data; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +static inline void *pcrypt_request_ctx(struct pcrypt_request *req) +{ + return req->__ctx; +} + +static inline +struct padata_priv *pcrypt_request_padata(struct pcrypt_request *req) +{ + return &req->padata; +} + +static inline +struct pcrypt_request *pcrypt_padata_request(struct padata_priv *padata) +{ + return container_of(padata, struct pcrypt_request, padata); +} + +#endif diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h new file mode 100644 index 0000000..38ec7f5 --- /dev/null +++ b/include/crypto/pkcs7.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* PKCS#7 crypto data parser + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _CRYPTO_PKCS7_H +#define _CRYPTO_PKCS7_H + +#include +#include +#include + +struct key; +struct pkcs7_message; + +/* + * pkcs7_parser.c + */ +extern struct pkcs7_message *pkcs7_parse_message(const void *data, + size_t datalen); +extern void pkcs7_free_message(struct pkcs7_message *pkcs7); + +extern int pkcs7_get_content_data(const struct pkcs7_message *pkcs7, + const void **_data, size_t *_datalen, + size_t *_headerlen); + +/* + * pkcs7_trust.c + */ +extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7, + struct key *trust_keyring); + +/* + * pkcs7_verify.c + */ +extern int pkcs7_verify(struct pkcs7_message *pkcs7, + enum key_being_used_for usage); + +extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7, + const void *data, size_t datalen); + +extern int pkcs7_get_digest(struct pkcs7_message *pkcs7, const u8 **buf, + u32 *len, enum hash_algo *hash_algo); + +#endif /* _CRYPTO_PKCS7_H */ diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h new file mode 100644 index 0000000..34317ed --- /dev/null +++ b/include/crypto/poly1305.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Poly1305 algorithm + */ + +#ifndef _CRYPTO_POLY1305_H +#define _CRYPTO_POLY1305_H + +#include +#include + +#define POLY1305_BLOCK_SIZE 16 +#define POLY1305_KEY_SIZE 32 +#define POLY1305_DIGEST_SIZE 16 + +struct poly1305_key { + u32 r[5]; /* key, base 2^26 */ +}; + +struct poly1305_state { + u32 h[5]; /* accumulator, base 2^26 */ +}; + +struct poly1305_desc_ctx { + /* key */ + struct poly1305_key r; + /* finalize key */ + u32 s[4]; + /* accumulator */ + struct poly1305_state h; + /* partial buffer */ + u8 buf[POLY1305_BLOCK_SIZE]; + /* bytes used in partial buffer */ + unsigned int buflen; + /* r key has been set */ + bool rset; + /* s key has been set */ + bool sset; +}; + +/* + * Poly1305 core functions. These implement the ε-almost-∆-universal hash + * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce + * ("s key") at the end. They also only support block-aligned inputs. + */ +void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key); +static inline void poly1305_core_init(struct poly1305_state *state) +{ + memset(state->h, 0, sizeof(state->h)); +} +void poly1305_core_blocks(struct poly1305_state *state, + const struct poly1305_key *key, + const void *src, unsigned int nblocks); +void poly1305_core_emit(const struct poly1305_state *state, void *dst); + +/* Crypto API helper functions for the Poly1305 MAC */ +int crypto_poly1305_init(struct shash_desc *desc); +unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, + const u8 *src, unsigned int srclen); +int crypto_poly1305_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen); +int crypto_poly1305_final(struct shash_desc *desc, u8 *dst); + +#endif diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h new file mode 100644 index 0000000..0588ef3 --- /dev/null +++ b/include/crypto/public_key.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Asymmetric public-key algorithm definitions + * + * See Documentation/crypto/asymmetric-keys.txt + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_PUBLIC_KEY_H +#define _LINUX_PUBLIC_KEY_H + +#include +#include + +/* + * Cryptographic data for the public-key subtype of the asymmetric key type. + * + * Note that this may include private part of the key as well as the public + * part. + */ +struct public_key { + void *key; + u32 keylen; + enum OID algo; + void *params; + u32 paramlen; + bool key_is_private; + const char *id_type; + const char *pkey_algo; +}; + +extern void public_key_free(struct public_key *key); + +/* + * Public key cryptography signature data + */ +struct public_key_signature { + struct asymmetric_key_id *auth_ids[2]; + u8 *s; /* Signature */ + u32 s_size; /* Number of bytes in signature */ + u8 *digest; + u8 digest_size; /* Number of bytes in digest */ + const char *pkey_algo; + const char *hash_algo; + const char *encoding; +}; + +extern void public_key_signature_free(struct public_key_signature *sig); + +extern struct asymmetric_key_subtype public_key_subtype; + +struct key; +struct key_type; +union key_payload; + +extern int restrict_link_by_signature(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trust_keyring); + +extern int restrict_link_by_key_or_keyring(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trusted); + +extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trusted); + +extern int query_asymmetric_key(const struct kernel_pkey_params *, + struct kernel_pkey_query *); + +extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *); +extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *); +extern int create_signature(struct kernel_pkey_params *, const void *, void *); +extern int verify_signature(const struct key *, + const struct public_key_signature *); + +int public_key_verify_signature(const struct public_key *pkey, + const struct public_key_signature *sig); + +#endif /* _LINUX_PUBLIC_KEY_H */ diff --git a/include/crypto/rng.h b/include/crypto/rng.h new file mode 100644 index 0000000..8b4b844 --- /dev/null +++ b/include/crypto/rng.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RNG: Random Number Generator algorithms under the crypto API + * + * Copyright (c) 2008 Neil Horman + * Copyright (c) 2015 Herbert Xu + */ + +#ifndef _CRYPTO_RNG_H +#define _CRYPTO_RNG_H + +#include + +struct crypto_rng; + +/** + * struct rng_alg - random number generator definition + * + * @generate: The function defined by this variable obtains a + * random number. The random number generator transform + * must generate the random number out of the context + * provided with this call, plus any additional data + * if provided to the call. + * @seed: Seed or reseed the random number generator. With the + * invocation of this function call, the random number + * generator shall become ready for generation. If the + * random number generator requires a seed for setting + * up a new state, the seed must be provided by the + * consumer while invoking this function. The required + * size of the seed is defined with @seedsize . + * @set_ent: Set entropy that would otherwise be obtained from + * entropy source. Internal use only. + * @seedsize: The seed size required for a random number generator + * initialization defined with this variable. Some + * random number generators does not require a seed + * as the seeding is implemented internally without + * the need of support by the consumer. In this case, + * the seed size is set to zero. + * @base: Common crypto API algorithm data structure. + */ +struct rng_alg { + int (*generate)(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen); + int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); + void (*set_ent)(struct crypto_rng *tfm, const u8 *data, + unsigned int len); + + unsigned int seedsize; + + struct crypto_alg base; +}; + +struct crypto_rng { + struct crypto_tfm base; +}; + +extern struct crypto_rng *crypto_default_rng; + +int crypto_get_default_rng(void); +void crypto_put_default_rng(void); + +/** + * DOC: Random number generator API + * + * The random number generator API is used with the ciphers of type + * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto) + */ + +/** + * crypto_alloc_rng() -- allocate RNG handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a random number generator. The returned struct + * crypto_rng is the cipher handle that is required for any subsequent + * API invocation for that random number generator. + * + * For all random number generators, this call creates a new private copy of + * the random number generator that does not share a state with other + * instances. The only exception is the "krng" random number generator which + * is a kernel crypto API use case for the get_random_bytes() function of the + * /dev/random driver. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) +{ + return &tfm->base; +} + +/** + * crypto_rng_alg - obtain name of RNG + * @tfm: cipher handle + * + * Return the generic name (cra_name) of the initialized random number generator + * + * Return: generic name string + */ +static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) +{ + return container_of(crypto_rng_tfm(tfm)->__crt_alg, + struct rng_alg, base); +} + +/** + * crypto_free_rng() - zeroize and free RNG handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_rng(struct crypto_rng *tfm) +{ + crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); +} + +/** + * crypto_rng_generate() - get random number + * @tfm: cipher handle + * @src: Input buffer holding additional data, may be NULL + * @slen: Length of additional data + * @dst: output buffer holding the random numbers + * @dlen: length of the output buffer + * + * This function fills the caller-allocated buffer with random + * numbers using the random number generator referenced by the + * cipher handle. + * + * Return: 0 function was successful; < 0 if an error occurred + */ +static inline int crypto_rng_generate(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen) +{ + struct crypto_alg *alg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(alg); + ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); + crypto_stats_rng_generate(alg, dlen, ret); + return ret; +} + +/** + * crypto_rng_get_bytes() - get random number + * @tfm: cipher handle + * @rdata: output buffer holding the random numbers + * @dlen: length of the output buffer + * + * This function fills the caller-allocated buffer with random numbers using the + * random number generator referenced by the cipher handle. + * + * Return: 0 function was successful; < 0 if an error occurred + */ +static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, + u8 *rdata, unsigned int dlen) +{ + return crypto_rng_generate(tfm, NULL, 0, rdata, dlen); +} + +/** + * crypto_rng_reset() - re-initialize the RNG + * @tfm: cipher handle + * @seed: seed input data + * @slen: length of the seed input data + * + * The reset function completely re-initializes the random number generator + * referenced by the cipher handle by clearing the current state. The new state + * is initialized with the caller provided seed or automatically, depending + * on the random number generator type (the ANSI X9.31 RNG requires + * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding). + * The seed is provided as a parameter to this function call. The provided seed + * should have the length of the seed size defined for the random number + * generator as defined by crypto_rng_seedsize. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen); + +/** + * crypto_rng_seedsize() - obtain seed size of RNG + * @tfm: cipher handle + * + * The function returns the seed size for the random number generator + * referenced by the cipher handle. This value may be zero if the random + * number generator does not implement or require a reseeding. For example, + * the SP800-90A DRBGs implement an automated reseeding after reaching a + * pre-defined threshold. + * + * Return: seed size for the random number generator + */ +static inline int crypto_rng_seedsize(struct crypto_rng *tfm) +{ + return crypto_rng_alg(tfm)->seedsize; +} + +#endif diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h new file mode 100644 index 0000000..c837d07 --- /dev/null +++ b/include/crypto/scatterwalk.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic scatter and gather helpers. + * + * Copyright (c) 2002 James Morris + * Copyright (c) 2002 Adam J. Richter + * Copyright (c) 2004 Jean-Luc Cooke + * Copyright (c) 2007 Herbert Xu + */ + +#ifndef _CRYPTO_SCATTERWALK_H +#define _CRYPTO_SCATTERWALK_H + +#include +#include +#include +#include + +static inline void scatterwalk_crypto_chain(struct scatterlist *head, + struct scatterlist *sg, int num) +{ + if (sg) + sg_chain(head, num, sg); + else + sg_mark_end(head); +} + +static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) +{ + unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; + unsigned int len_this_page = offset_in_page(~walk->offset) + 1; + return len_this_page > len ? len : len_this_page; +} + +static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, + unsigned int nbytes) +{ + unsigned int len_this_page = scatterwalk_pagelen(walk); + return nbytes > len_this_page ? len_this_page : nbytes; +} + +static inline void scatterwalk_advance(struct scatter_walk *walk, + unsigned int nbytes) +{ + walk->offset += nbytes; +} + +static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, + unsigned int alignmask) +{ + return !(walk->offset & alignmask); +} + +static inline struct page *scatterwalk_page(struct scatter_walk *walk) +{ + return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); +} + +static inline void scatterwalk_unmap(void *vaddr) +{ + kunmap_atomic(vaddr); +} + +static inline void scatterwalk_start(struct scatter_walk *walk, + struct scatterlist *sg) +{ + walk->sg = sg; + walk->offset = sg->offset; +} + +static inline void *scatterwalk_map(struct scatter_walk *walk) +{ + return kmap_atomic(scatterwalk_page(walk)) + + offset_in_page(walk->offset); +} + +static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, + unsigned int more) +{ + if (out) { + struct page *page; + + page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); + /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as + * PageSlab cannot be optimised away per se due to + * use of volatile pointer. + */ + if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page)) + flush_dcache_page(page); + } + + if (more && walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); +} + +static inline void scatterwalk_done(struct scatter_walk *walk, int out, + int more) +{ + if (!more || walk->offset >= walk->sg->offset + walk->sg->length || + !(walk->offset & (PAGE_SIZE - 1))) + scatterwalk_pagedone(walk, out, more); +} + +void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, + size_t nbytes, int out); +void *scatterwalk_map(struct scatter_walk *walk); + +void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes, int out); + +struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], + struct scatterlist *src, + unsigned int len); + +#endif /* _CRYPTO_SCATTERWALK_H */ diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h new file mode 100644 index 0000000..7dd780c --- /dev/null +++ b/include/crypto/serpent.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for serpent algorithms + */ + +#ifndef _CRYPTO_SERPENT_H +#define _CRYPTO_SERPENT_H + +#include +#include + +#define SERPENT_MIN_KEY_SIZE 0 +#define SERPENT_MAX_KEY_SIZE 32 +#define SERPENT_EXPKEY_WORDS 132 +#define SERPENT_BLOCK_SIZE 16 + +struct serpent_ctx { + u32 expkey[SERPENT_EXPKEY_WORDS]; +}; + +int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, + unsigned int keylen); +int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); + +void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src); +void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src); + +#endif diff --git a/include/crypto/sha.h b/include/crypto/sha.h new file mode 100644 index 0000000..5c2132c --- /dev/null +++ b/include/crypto/sha.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for SHA algorithms + */ + +#ifndef _CRYPTO_SHA_H +#define _CRYPTO_SHA_H + +#include + +#define SHA1_DIGEST_SIZE 20 +#define SHA1_BLOCK_SIZE 64 + +#define SHA224_DIGEST_SIZE 28 +#define SHA224_BLOCK_SIZE 64 + +#define SHA256_DIGEST_SIZE 32 +#define SHA256_BLOCK_SIZE 64 + +#define SHA384_DIGEST_SIZE 48 +#define SHA384_BLOCK_SIZE 128 + +#define SHA512_DIGEST_SIZE 64 +#define SHA512_BLOCK_SIZE 128 + +#define SHA1_H0 0x67452301UL +#define SHA1_H1 0xefcdab89UL +#define SHA1_H2 0x98badcfeUL +#define SHA1_H3 0x10325476UL +#define SHA1_H4 0xc3d2e1f0UL + +#define SHA224_H0 0xc1059ed8UL +#define SHA224_H1 0x367cd507UL +#define SHA224_H2 0x3070dd17UL +#define SHA224_H3 0xf70e5939UL +#define SHA224_H4 0xffc00b31UL +#define SHA224_H5 0x68581511UL +#define SHA224_H6 0x64f98fa7UL +#define SHA224_H7 0xbefa4fa4UL + +#define SHA256_H0 0x6a09e667UL +#define SHA256_H1 0xbb67ae85UL +#define SHA256_H2 0x3c6ef372UL +#define SHA256_H3 0xa54ff53aUL +#define SHA256_H4 0x510e527fUL +#define SHA256_H5 0x9b05688cUL +#define SHA256_H6 0x1f83d9abUL +#define SHA256_H7 0x5be0cd19UL + +#define SHA384_H0 0xcbbb9d5dc1059ed8ULL +#define SHA384_H1 0x629a292a367cd507ULL +#define SHA384_H2 0x9159015a3070dd17ULL +#define SHA384_H3 0x152fecd8f70e5939ULL +#define SHA384_H4 0x67332667ffc00b31ULL +#define SHA384_H5 0x8eb44a8768581511ULL +#define SHA384_H6 0xdb0c2e0d64f98fa7ULL +#define SHA384_H7 0x47b5481dbefa4fa4ULL + +#define SHA512_H0 0x6a09e667f3bcc908ULL +#define SHA512_H1 0xbb67ae8584caa73bULL +#define SHA512_H2 0x3c6ef372fe94f82bULL +#define SHA512_H3 0xa54ff53a5f1d36f1ULL +#define SHA512_H4 0x510e527fade682d1ULL +#define SHA512_H5 0x9b05688c2b3e6c1fULL +#define SHA512_H6 0x1f83d9abfb41bd6bULL +#define SHA512_H7 0x5be0cd19137e2179ULL + +extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE]; + +extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE]; + +extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE]; + +extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE]; + +extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE]; + +struct sha1_state { + u32 state[SHA1_DIGEST_SIZE / 4]; + u64 count; + u8 buffer[SHA1_BLOCK_SIZE]; +}; + +struct sha256_state { + u32 state[SHA256_DIGEST_SIZE / 4]; + u64 count; + u8 buf[SHA256_BLOCK_SIZE]; +}; + +struct sha512_state { + u64 state[SHA512_DIGEST_SIZE / 8]; + u64 count[2]; + u8 buf[SHA512_BLOCK_SIZE]; +}; + +struct shash_desc; + +extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +/* + * Stand-alone implementation of the SHA256 algorithm. It is designed to + * have as little dependencies as possible so it can be used in the + * kexec_file purgatory. In other cases you should generally use the + * hash APIs from include/crypto/hash.h. Especially when hashing large + * amounts of data as those APIs may be hw-accelerated. + * + * For details see lib/crypto/sha256.c + */ + +static inline int sha256_init(struct sha256_state *sctx) +{ + sctx->state[0] = SHA256_H0; + sctx->state[1] = SHA256_H1; + sctx->state[2] = SHA256_H2; + sctx->state[3] = SHA256_H3; + sctx->state[4] = SHA256_H4; + sctx->state[5] = SHA256_H5; + sctx->state[6] = SHA256_H6; + sctx->state[7] = SHA256_H7; + sctx->count = 0; + + return 0; +} +extern int sha256_update(struct sha256_state *sctx, const u8 *input, + unsigned int length); +extern int sha256_final(struct sha256_state *sctx, u8 *hash); + +static inline int sha224_init(struct sha256_state *sctx) +{ + sctx->state[0] = SHA224_H0; + sctx->state[1] = SHA224_H1; + sctx->state[2] = SHA224_H2; + sctx->state[3] = SHA224_H3; + sctx->state[4] = SHA224_H4; + sctx->state[5] = SHA224_H5; + sctx->state[6] = SHA224_H6; + sctx->state[7] = SHA224_H7; + sctx->count = 0; + + return 0; +} +extern int sha224_update(struct sha256_state *sctx, const u8 *input, + unsigned int length); +extern int sha224_final(struct sha256_state *sctx, u8 *hash); + +#endif diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h new file mode 100644 index 0000000..20fd1f7 --- /dev/null +++ b/include/crypto/sha1_base.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sha1_base.h - core logic for SHA-1 implementations + * + * Copyright (C) 2015 Linaro Ltd + */ + +#ifndef _CRYPTO_SHA1_BASE_H +#define _CRYPTO_SHA1_BASE_H + +#include +#include +#include +#include + +#include + +typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks); + +static inline int sha1_base_init(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA1_H0; + sctx->state[1] = SHA1_H1; + sctx->state[2] = SHA1_H2; + sctx->state[3] = SHA1_H3; + sctx->state[4] = SHA1_H4; + sctx->count = 0; + + return 0; +} + +static inline int sha1_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha1_block_fn *block_fn) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; + + sctx->count += len; + + if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SHA1_BLOCK_SIZE - partial; + + memcpy(sctx->buffer + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buffer, 1); + } + + blocks = len / SHA1_BLOCK_SIZE; + len %= SHA1_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SHA1_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buffer + partial, data, len); + + return 0; +} + +static inline int sha1_base_do_finalize(struct shash_desc *desc, + sha1_block_fn *block_fn) +{ + const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64); + struct sha1_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); + unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; + + sctx->buffer[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buffer, 1); + } + + memset(sctx->buffer + partial, 0x0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + block_fn(sctx, sctx->buffer, 1); + + return 0; +} + +static inline int sha1_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + int i; + + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++) + put_unaligned_be32(sctx->state[i], digest++); + + *sctx = (struct sha1_state){}; + return 0; +} + +#endif /* _CRYPTO_SHA1_BASE_H */ diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h new file mode 100644 index 0000000..cea60cf --- /dev/null +++ b/include/crypto/sha256_base.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sha256_base.h - core logic for SHA-256 implementations + * + * Copyright (C) 2015 Linaro Ltd + */ + +#ifndef _CRYPTO_SHA256_BASE_H +#define _CRYPTO_SHA256_BASE_H + +#include +#include +#include +#include + +#include + +typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src, + int blocks); + +static inline int sha224_base_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + return sha224_init(sctx); +} + +static inline int sha256_base_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + return sha256_init(sctx); +} + +static inline int sha256_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha256_block_fn *block_fn) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; + + sctx->count += len; + + if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SHA256_BLOCK_SIZE - partial; + + memcpy(sctx->buf + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buf, 1); + } + + blocks = len / SHA256_BLOCK_SIZE; + len %= SHA256_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SHA256_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buf + partial, data, len); + + return 0; +} + +static inline int sha256_base_do_finalize(struct shash_desc *desc, + sha256_block_fn *block_fn) +{ + const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64); + struct sha256_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buf + bit_offset); + unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; + + sctx->buf[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buf, 1); + } + + memset(sctx->buf + partial, 0x0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + block_fn(sctx, sctx->buf, 1); + + return 0; +} + +static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) +{ + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + struct sha256_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + int i; + + for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32)) + put_unaligned_be32(sctx->state[i], digest++); + + *sctx = (struct sha256_state){}; + return 0; +} + +#endif /* _CRYPTO_SHA256_BASE_H */ diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h new file mode 100644 index 0000000..080f60c --- /dev/null +++ b/include/crypto/sha3.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for SHA-3 algorithms + */ +#ifndef __CRYPTO_SHA3_H__ +#define __CRYPTO_SHA3_H__ + +#define SHA3_224_DIGEST_SIZE (224 / 8) +#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE) + +#define SHA3_256_DIGEST_SIZE (256 / 8) +#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE) + +#define SHA3_384_DIGEST_SIZE (384 / 8) +#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE) + +#define SHA3_512_DIGEST_SIZE (512 / 8) +#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE) + +struct sha3_state { + u64 st[25]; + unsigned int rsiz; + unsigned int rsizw; + + unsigned int partial; + u8 buf[SHA3_224_BLOCK_SIZE]; +}; + +int crypto_sha3_init(struct shash_desc *desc); +int crypto_sha3_update(struct shash_desc *desc, const u8 *data, + unsigned int len); +int crypto_sha3_final(struct shash_desc *desc, u8 *out); + +#endif diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h new file mode 100644 index 0000000..fb19c77 --- /dev/null +++ b/include/crypto/sha512_base.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sha512_base.h - core logic for SHA-512 implementations + * + * Copyright (C) 2015 Linaro Ltd + */ + +#ifndef _CRYPTO_SHA512_BASE_H +#define _CRYPTO_SHA512_BASE_H + +#include +#include +#include +#include + +#include + +typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src, + int blocks); + +static inline int sha384_base_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA384_H0; + sctx->state[1] = SHA384_H1; + sctx->state[2] = SHA384_H2; + sctx->state[3] = SHA384_H3; + sctx->state[4] = SHA384_H4; + sctx->state[5] = SHA384_H5; + sctx->state[6] = SHA384_H6; + sctx->state[7] = SHA384_H7; + sctx->count[0] = sctx->count[1] = 0; + + return 0; +} + +static inline int sha512_base_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA512_H0; + sctx->state[1] = SHA512_H1; + sctx->state[2] = SHA512_H2; + sctx->state[3] = SHA512_H3; + sctx->state[4] = SHA512_H4; + sctx->state[5] = SHA512_H5; + sctx->state[6] = SHA512_H6; + sctx->state[7] = SHA512_H7; + sctx->count[0] = sctx->count[1] = 0; + + return 0; +} + +static inline int sha512_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha512_block_fn *block_fn) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; + + sctx->count[0] += len; + if (sctx->count[0] < len) + sctx->count[1]++; + + if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SHA512_BLOCK_SIZE - partial; + + memcpy(sctx->buf + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buf, 1); + } + + blocks = len / SHA512_BLOCK_SIZE; + len %= SHA512_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SHA512_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buf + partial, data, len); + + return 0; +} + +static inline int sha512_base_do_finalize(struct shash_desc *desc, + sha512_block_fn *block_fn) +{ + const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]); + struct sha512_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buf + bit_offset); + unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; + + sctx->buf[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buf, 1); + } + + memset(sctx->buf + partial, 0x0, bit_offset - partial); + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); + bits[1] = cpu_to_be64(sctx->count[0] << 3); + block_fn(sctx, sctx->buf, 1); + + return 0; +} + +static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) +{ + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + struct sha512_state *sctx = shash_desc_ctx(desc); + __be64 *digest = (__be64 *)out; + int i; + + for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64)) + put_unaligned_be64(sctx->state[i], digest++); + + *sctx = (struct sha512_state){}; + return 0; +} + +#endif /* _CRYPTO_SHA512_BASE_H */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h new file mode 100644 index 0000000..aada879 --- /dev/null +++ b/include/crypto/skcipher.h @@ -0,0 +1,619 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007-2015 Herbert Xu + */ + +#ifndef _CRYPTO_SKCIPHER_H +#define _CRYPTO_SKCIPHER_H + +#include +#include +#include + +/** + * struct skcipher_request - Symmetric key cipher request + * @cryptlen: Number of bytes to encrypt or decrypt + * @iv: Initialisation Vector + * @src: Source SG list + * @dst: Destination SG list + * @base: Underlying async request request + * @__ctx: Start of private context data + */ +struct skcipher_request { + unsigned int cryptlen; + + u8 *iv; + + struct scatterlist *src; + struct scatterlist *dst; + + struct crypto_async_request base; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct crypto_skcipher { + int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct skcipher_request *req); + int (*decrypt)(struct skcipher_request *req); + + unsigned int ivsize; + unsigned int reqsize; + unsigned int keysize; + + struct crypto_tfm base; +}; + +struct crypto_sync_skcipher { + struct crypto_skcipher base; +}; + +/** + * struct skcipher_alg - symmetric key cipher definition + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt + * the supplied scatterlist containing the blocks of data. The crypto + * API consumer is responsible for aligning the entries of the + * scatterlist properly and making sure the chunks are correctly + * sized. In case a software fallback was put in place in the + * @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. In case the + * key was stored in transformation context, the key might need to be + * re-programmed into the hardware in this function. This function + * shall not modify the transformation context, as this function may + * be called in parallel with the same transformation object. + * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt + * and the conditions are exactly the same. + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * @chunksize: Equal to the block size except for stream ciphers such as + * CTR where it is set to the underlying block size. + * @walksize: Equal to the chunk size except in cases where the algorithm is + * considerably more efficient if it can operate on multiple chunks + * in parallel. Should be a multiple of chunksize. + * @base: Definition of a generic crypto algorithm. + * + * All fields except @ivsize are mandatory and must be filled. + */ +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct skcipher_request *req); + int (*decrypt)(struct skcipher_request *req); + int (*init)(struct crypto_skcipher *tfm); + void (*exit)(struct crypto_skcipher *tfm); + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int walksize; + + struct crypto_alg base; +}; + +#define MAX_SYNC_SKCIPHER_REQSIZE 384 +/* + * This performs a type-check against the "tfm" argument to make sure + * all users have the correct skcipher tfm for doing on-stack requests. + */ +#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \ + char __##name##_desc[sizeof(struct skcipher_request) + \ + MAX_SYNC_SKCIPHER_REQSIZE + \ + (!(sizeof((struct crypto_sync_skcipher *)1 == \ + (typeof(tfm))1))) \ + ] CRYPTO_MINALIGN_ATTR; \ + struct skcipher_request *name = (void *)__##name##_desc + +/** + * DOC: Symmetric Key Cipher API + * + * Symmetric key cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto). + * + * Asynchronous cipher operations imply that the function invocation for a + * cipher request returns immediately before the completion of the operation. + * The cipher request is scheduled as a separate kernel thread and therefore + * load-balanced on the different CPUs via the process scheduler. To allow + * the kernel crypto API to inform the caller about the completion of a cipher + * request, the caller must provide a callback function. That function is + * invoked with the cipher handle when the request completes. + * + * To support the asynchronous operation, additional information than just the + * cipher handle must be supplied to the kernel crypto API. That additional + * information is given by filling in the skcipher_request data structure. + * + * For the symmetric key cipher API, the state is maintained with the tfm + * cipher handle. A single tfm can be used across multiple calls and in + * parallel. For asynchronous block cipher calls, context data supplied and + * only used by the caller can be referenced the request data structure in + * addition to the IV used for the cipher request. The maintenance of such + * state information would be important for a crypto driver implementer to + * have, because when calling the callback function upon completion of the + * cipher operation, that callback function may need some information about + * which operation just finished if it invoked multiple in parallel. This + * state information is unused by the kernel crypto API. + */ + +static inline struct crypto_skcipher *__crypto_skcipher_cast( + struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_skcipher, base); +} + +/** + * crypto_alloc_skcipher() - allocate symmetric key cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an skcipher. The returned struct + * crypto_skcipher is the cipher handle that is required for any subsequent + * API invocation for that skcipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, + u32 type, u32 mask); + +struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name, + u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_skcipher_tfm( + struct crypto_skcipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_skcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_skcipher(struct crypto_skcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm)); +} + +static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm) +{ + crypto_free_skcipher(&tfm->base); +} + +/** + * crypto_has_skcipher() - Search for the availability of an skcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the skcipher is known to the kernel crypto API; false + * otherwise + */ +static inline int crypto_has_skcipher(const char *alg_name, u32 type, + u32 mask) +{ + return crypto_has_alg(alg_name, crypto_skcipher_type(type), + crypto_skcipher_mask(mask)); +} + +/** + * crypto_has_skcipher2() - Search for the availability of an skcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher + * @type: specifies the type of the skcipher + * @mask: specifies the mask for the skcipher + * + * Return: true when the skcipher is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask); + +static inline const char *crypto_skcipher_driver_name( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); +} + +static inline struct skcipher_alg *crypto_skcipher_alg( + struct crypto_skcipher *tfm) +{ + return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, + struct skcipher_alg, base); +} + +static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.ivsize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.ivsize; + + return alg->ivsize; +} + +/** + * crypto_skcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the skcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) +{ + return tfm->ivsize; +} + +static inline unsigned int crypto_sync_skcipher_ivsize( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_ivsize(&tfm->base); +} + +/** + * crypto_skcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the skcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_skcipher_blocksize( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); +} + +static inline unsigned int crypto_skcipher_alg_chunksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->chunksize; +} + +/** + * crypto_skcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_skcipher_chunksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); +} + +static inline unsigned int crypto_sync_skcipher_blocksize( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_blocksize(&tfm->base); +} + +static inline unsigned int crypto_skcipher_alignmask( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)); +} + +static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm)); +} + +static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags); +} + +static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags); +} + +static inline u32 crypto_sync_skcipher_get_flags( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_get_flags(&tfm->base); +} + +static inline void crypto_sync_skcipher_set_flags( + struct crypto_sync_skcipher *tfm, u32 flags) +{ + crypto_skcipher_set_flags(&tfm->base, flags); +} + +static inline void crypto_sync_skcipher_clear_flags( + struct crypto_sync_skcipher *tfm, u32 flags) +{ + crypto_skcipher_clear_flags(&tfm->base, flags); +} + +/** + * crypto_skcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the skcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + return tfm->setkey(tfm, key, keylen); +} + +static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_skcipher_setkey(&tfm->base, key, keylen); +} + +static inline unsigned int crypto_skcipher_default_keysize( + struct crypto_skcipher *tfm) +{ + return tfm->keysize; +} + +/** + * crypto_skcipher_reqtfm() - obtain cipher handle from request + * @req: skcipher_request out of which the cipher handle is to be obtained + * + * Return the crypto_skcipher handle when furnishing an skcipher_request + * data structure. + * + * Return: crypto_skcipher handle + */ +static inline struct crypto_skcipher *crypto_skcipher_reqtfm( + struct skcipher_request *req) +{ + return __crypto_skcipher_cast(req->base.tfm); +} + +static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm( + struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + return container_of(tfm, struct crypto_sync_skcipher, base); +} + +/** + * crypto_skcipher_encrypt() - encrypt plaintext + * @req: reference to the skcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the skcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * skcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +int crypto_skcipher_encrypt(struct skcipher_request *req); + +/** + * crypto_skcipher_decrypt() - decrypt ciphertext + * @req: reference to the skcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the skcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * skcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +int crypto_skcipher_decrypt(struct skcipher_request *req); + +/** + * DOC: Symmetric Key Cipher Request Handle + * + * The skcipher_request data structure contains all pointers to data + * required for the symmetric key cipher operation. This includes the cipher + * handle (which can be used by multiple skcipher_request instances), pointer + * to plaintext and ciphertext, asynchronous callback function, etc. It acts + * as a handle to the skcipher_request_* API calls in a similar way as + * skcipher handle to the crypto_skcipher_* API calls. + */ + +/** + * crypto_skcipher_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ +static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm) +{ + return tfm->reqsize; +} + +/** + * skcipher_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing skcipher handle in the request + * data structure with a different one. + */ +static inline void skcipher_request_set_tfm(struct skcipher_request *req, + struct crypto_skcipher *tfm) +{ + req->base.tfm = crypto_skcipher_tfm(tfm); +} + +static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req, + struct crypto_sync_skcipher *tfm) +{ + skcipher_request_set_tfm(req, &tfm->base); +} + +static inline struct skcipher_request *skcipher_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct skcipher_request, base); +} + +/** + * skcipher_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the skcipher + * encrypt and decrypt API calls. During the allocation, the provided skcipher + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct skcipher_request *skcipher_request_alloc( + struct crypto_skcipher *tfm, gfp_t gfp) +{ + struct skcipher_request *req; + + req = kmalloc(sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(tfm), gfp); + + if (likely(req)) + skcipher_request_set_tfm(req, tfm); + + return req; +} + +/** + * skcipher_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void skcipher_request_free(struct skcipher_request *req) +{ + kzfree(req); +} + +static inline void skcipher_request_zero(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + memzero_explicit(req, sizeof(*req) + crypto_skcipher_reqsize(tfm)); +} + +/** + * skcipher_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once the + * cipher operation completes. + * + * The callback function is registered with the skcipher_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void skcipher_request_set_callback(struct skcipher_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * skcipher_request_set_crypt() - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @cryptlen: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_skcipher_ivsize + * + * This function allows setting of the source data and destination data + * scatter / gather lists. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed - the source is the ciphertext and the destination is the plaintext. + */ +static inline void skcipher_request_set_crypt( + struct skcipher_request *req, + struct scatterlist *src, struct scatterlist *dst, + unsigned int cryptlen, void *iv) +{ + req->src = src; + req->dst = dst; + req->cryptlen = cryptlen; + req->iv = iv; +} + +#endif /* _CRYPTO_SKCIPHER_H */ + diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h new file mode 100644 index 0000000..1438942 --- /dev/null +++ b/include/crypto/sm3.h @@ -0,0 +1,40 @@ +/* + * Common values for SM3 algorithm + */ + +#ifndef _CRYPTO_SM3_H +#define _CRYPTO_SM3_H + +#include + +#define SM3_DIGEST_SIZE 32 +#define SM3_BLOCK_SIZE 64 + +#define SM3_T1 0x79CC4519 +#define SM3_T2 0x7A879D8A + +#define SM3_IVA 0x7380166f +#define SM3_IVB 0x4914b2b9 +#define SM3_IVC 0x172442d7 +#define SM3_IVD 0xda8a0600 +#define SM3_IVE 0xa96f30bc +#define SM3_IVF 0x163138aa +#define SM3_IVG 0xe38dee4d +#define SM3_IVH 0xb0fb0e4e + +extern const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE]; + +struct sm3_state { + u32 state[SM3_DIGEST_SIZE / 4]; + u64 count; + u8 buffer[SM3_BLOCK_SIZE]; +}; + +struct shash_desc; + +extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); +#endif diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h new file mode 100644 index 0000000..1cbf9aa --- /dev/null +++ b/include/crypto/sm3_base.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sm3_base.h - core logic for SM3 implementations + * + * Copyright (C) 2017 ARM Limited or its affiliates. + * Written by Gilad Ben-Yossef + */ + +#ifndef _CRYPTO_SM3_BASE_H +#define _CRYPTO_SM3_BASE_H + +#include +#include +#include +#include +#include + +typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks); + +static inline int sm3_base_init(struct shash_desc *desc) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SM3_IVA; + sctx->state[1] = SM3_IVB; + sctx->state[2] = SM3_IVC; + sctx->state[3] = SM3_IVD; + sctx->state[4] = SM3_IVE; + sctx->state[5] = SM3_IVF; + sctx->state[6] = SM3_IVG; + sctx->state[7] = SM3_IVH; + sctx->count = 0; + + return 0; +} + +static inline int sm3_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sm3_block_fn *block_fn) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + + sctx->count += len; + + if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SM3_BLOCK_SIZE - partial; + + memcpy(sctx->buffer + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buffer, 1); + } + + blocks = len / SM3_BLOCK_SIZE; + len %= SM3_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SM3_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buffer + partial, data, len); + + return 0; +} + +static inline int sm3_base_do_finalize(struct shash_desc *desc, + sm3_block_fn *block_fn) +{ + const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64); + struct sm3_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + + sctx->buffer[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buffer, 1); + } + + memset(sctx->buffer + partial, 0x0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + block_fn(sctx, sctx->buffer, 1); + + return 0; +} + +static inline int sm3_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + int i; + + for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++) + put_unaligned_be32(sctx->state[i], digest++); + + *sctx = (struct sm3_state){}; + return 0; +} + +#endif /* _CRYPTO_SM3_BASE_H */ diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h new file mode 100644 index 0000000..7afd730 --- /dev/null +++ b/include/crypto/sm4.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Common values for the SM4 algorithm + * Copyright (C) 2018 ARM Limited or its affiliates. + */ + +#ifndef _CRYPTO_SM4_H +#define _CRYPTO_SM4_H + +#include +#include + +#define SM4_KEY_SIZE 16 +#define SM4_BLOCK_SIZE 16 +#define SM4_RKEY_WORDS 32 + +struct crypto_sm4_ctx { + u32 rkey_enc[SM4_RKEY_WORDS]; + u32 rkey_dec[SM4_RKEY_WORDS]; +}; + +int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len); +int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key, + unsigned int key_len); + +void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in); +void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in); + +#endif diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h new file mode 100644 index 0000000..cae1b4a --- /dev/null +++ b/include/crypto/streebog.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-2-Clause */ +/* + * Copyright (c) 2013 Alexey Degtyarev + * Copyright (c) 2018 Vitaly Chikunov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#ifndef _CRYPTO_STREEBOG_H_ +#define _CRYPTO_STREEBOG_H_ + +#include + +#define STREEBOG256_DIGEST_SIZE 32 +#define STREEBOG512_DIGEST_SIZE 64 +#define STREEBOG_BLOCK_SIZE 64 + +struct streebog_uint512 { + __le64 qword[8]; +}; + +struct streebog_state { + union { + u8 buffer[STREEBOG_BLOCK_SIZE]; + struct streebog_uint512 m; + }; + struct streebog_uint512 hash; + struct streebog_uint512 h; + struct streebog_uint512 N; + struct streebog_uint512 Sigma; + size_t fillsize; +}; + +#endif /* !_CRYPTO_STREEBOG_H_ */ diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h new file mode 100644 index 0000000..2e2c096 --- /dev/null +++ b/include/crypto/twofish.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_TWOFISH_H +#define _CRYPTO_TWOFISH_H + +#include + +#define TF_MIN_KEY_SIZE 16 +#define TF_MAX_KEY_SIZE 32 +#define TF_BLOCK_SIZE 16 + +struct crypto_tfm; + +/* Structure for an expanded Twofish key. s contains the key-dependent + * S-boxes composed with the MDS matrix; w contains the eight "whitening" + * subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note + * that k[i] corresponds to what the Twofish paper calls K[i+8]. */ +struct twofish_ctx { + u32 s[4][256], w[8], k[32]; +}; + +int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key, + unsigned int key_len, u32 *flags); +int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len); + +#endif diff --git a/include/crypto/xts.h b/include/crypto/xts.h new file mode 100644 index 0000000..75fd96f --- /dev/null +++ b/include/crypto/xts.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_XTS_H +#define _CRYPTO_XTS_H + +#include +#include +#include + +#define XTS_BLOCK_SIZE 16 + +#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x)) + +static inline int xts_check_key(struct crypto_tfm *tfm, + const u8 *key, unsigned int keylen) +{ + u32 *flags = &tfm->crt_flags; + + /* + * key consists of keys of equal size concatenated, therefore + * the length must be even. + */ + if (keylen % 2) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + /* ensure that the AES and tweak key are not identical */ + if (fips_enabled && + !crypto_memneq(key, key + (keylen / 2), keylen / 2)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + + return 0; +} + +static inline int xts_verify_key(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + /* + * key consists of keys of equal size concatenated, therefore + * the length must be even. + */ + if (keylen % 2) { + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + /* ensure that the AES and tweak key are not identical */ + if ((fips_enabled || (crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) && + !crypto_memneq(key, key + (keylen / 2), keylen / 2)) { + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; + } + + return 0; +} + +#endif /* _CRYPTO_XTS_H */ diff --git a/include/dt-bindings/arm/ux500_pm_domains.h b/include/dt-bindings/arm/ux500_pm_domains.h new file mode 100644 index 0000000..9bd764f --- /dev/null +++ b/include/dt-bindings/arm/ux500_pm_domains.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014 Linaro Ltd. + * + * Author: Ulf Hansson + */ +#ifndef _DT_BINDINGS_ARM_UX500_PM_DOMAINS_H +#define _DT_BINDINGS_ARM_UX500_PM_DOMAINS_H + +#define DOMAIN_VAPE 0 + +/* Number of PM domains. */ +#define NR_DOMAINS (DOMAIN_VAPE + 1) + +#endif diff --git a/include/dt-bindings/bus/moxtet.h b/include/dt-bindings/bus/moxtet.h new file mode 100644 index 0000000..dc93454 --- /dev/null +++ b/include/dt-bindings/bus/moxtet.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Constant for device tree bindings for Turris Mox module configuration bus + * + * Copyright (C) 2019 Marek Behun + */ + +#ifndef _DT_BINDINGS_BUS_MOXTET_H +#define _DT_BINDINGS_BUS_MOXTET_H + +#define MOXTET_IRQ_PCI 0 +#define MOXTET_IRQ_USB3 4 +#define MOXTET_IRQ_PERIDOT(n) (8 + (n)) +#define MOXTET_IRQ_TOPAZ 12 + +#endif /* _DT_BINDINGS_BUS_MOXTET_H */ diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h new file mode 100644 index 0000000..babd08a --- /dev/null +++ b/include/dt-bindings/bus/ti-sysc.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* TI sysc interconnect target module defines */ + +/* Generic sysc found on omap2 and later, also known as type1 */ +#define SYSC_OMAP2_CLOCKACTIVITY (3 << 8) +#define SYSC_OMAP2_EMUFREE (1 << 5) +#define SYSC_OMAP2_ENAWAKEUP (1 << 2) +#define SYSC_OMAP2_SOFTRESET (1 << 1) +#define SYSC_OMAP2_AUTOIDLE (1 << 0) + +/* Generic sysc found on omap4 and later, also known as type2 */ +#define SYSC_OMAP4_DMADISABLE (1 << 16) +#define SYSC_OMAP4_FREEEMU (1 << 1) /* Also known as EMUFREE */ +#define SYSC_OMAP4_SOFTRESET (1 << 0) + +/* SmartReflex sysc found on 36xx and later */ +#define SYSC_OMAP3_SR_ENAWAKEUP (1 << 26) + +#define SYSC_DRA7_MCAN_ENAWAKEUP (1 << 4) + +/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */ +#define SYSC_IDLE_FORCE 0 +#define SYSC_IDLE_NO 1 +#define SYSC_IDLE_SMART 2 +#define SYSC_IDLE_SMART_WKUP 3 diff --git a/include/dt-bindings/clk/lochnagar.h b/include/dt-bindings/clk/lochnagar.h new file mode 100644 index 0000000..8fa2055 --- /dev/null +++ b/include/dt-bindings/clk/lochnagar.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Device Tree defines for Lochnagar clocking + * + * Copyright (c) 2017-2018 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + * + * Author: Charles Keepax + */ + +#ifndef DT_BINDINGS_CLK_LOCHNAGAR_H +#define DT_BINDINGS_CLK_LOCHNAGAR_H + +#define LOCHNAGAR_CDC_MCLK1 0 +#define LOCHNAGAR_CDC_MCLK2 1 +#define LOCHNAGAR_DSP_CLKIN 2 +#define LOCHNAGAR_GF_CLKOUT1 3 +#define LOCHNAGAR_GF_CLKOUT2 4 +#define LOCHNAGAR_PSIA1_MCLK 5 +#define LOCHNAGAR_PSIA2_MCLK 6 +#define LOCHNAGAR_SPDIF_MCLK 7 +#define LOCHNAGAR_ADAT_MCLK 8 +#define LOCHNAGAR_SOUNDCARD_MCLK 9 +#define LOCHNAGAR_SPDIF_CLKOUT 10 + +#endif diff --git a/include/dt-bindings/clk/ti-dra7-atl.h b/include/dt-bindings/clk/ti-dra7-atl.h new file mode 100644 index 0000000..42dd416 --- /dev/null +++ b/include/dt-bindings/clk/ti-dra7-atl.h @@ -0,0 +1,40 @@ +/* + * This header provides constants for DRA7 ATL (Audio Tracking Logic) + * + * The constants defined in this header are used in dts files + * + * Copyright (C) 2013 Texas Instruments, Inc. + * + * Peter Ujfalusi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_DRA7_ATL_H +#define _DT_BINDINGS_CLK_DRA7_ATL_H + +#define DRA7_ATL_WS_MCASP1_FSR 0 +#define DRA7_ATL_WS_MCASP1_FSX 1 +#define DRA7_ATL_WS_MCASP2_FSR 2 +#define DRA7_ATL_WS_MCASP2_FSX 3 +#define DRA7_ATL_WS_MCASP3_FSX 4 +#define DRA7_ATL_WS_MCASP4_FSX 5 +#define DRA7_ATL_WS_MCASP5_FSX 6 +#define DRA7_ATL_WS_MCASP6_FSX 7 +#define DRA7_ATL_WS_MCASP7_FSX 8 +#define DRA7_ATL_WS_MCASP8_FSX 9 +#define DRA7_ATL_WS_MCASP8_AHCLKX 10 +#define DRA7_ATL_WS_XREF_CLK3 11 +#define DRA7_ATL_WS_XREF_CLK0 12 +#define DRA7_ATL_WS_XREF_CLK1 13 +#define DRA7_ATL_WS_XREF_CLK2 14 +#define DRA7_ATL_WS_OSC1_X1 15 + +#endif diff --git a/include/dt-bindings/clock/actions,s500-cmu.h b/include/dt-bindings/clock/actions,s500-cmu.h new file mode 100644 index 0000000..030981c --- /dev/null +++ b/include/dt-bindings/clock/actions,s500-cmu.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Device Tree binding constants for Actions Semi S500 Clock Management Unit + * + * Copyright (c) 2014 Actions Semi Inc. + * Copyright (c) 2018 LSI-TEC - Caninos Loucos + */ + +#ifndef __DT_BINDINGS_CLOCK_S500_CMU_H +#define __DT_BINDINGS_CLOCK_S500_CMU_H + +#define CLK_NONE 0 + +/* fixed rate clocks */ +#define CLK_LOSC 1 +#define CLK_HOSC 2 + +/* pll clocks */ +#define CLK_CORE_PLL 3 +#define CLK_DEV_PLL 4 +#define CLK_DDR_PLL 5 +#define CLK_NAND_PLL 6 +#define CLK_DISPLAY_PLL 7 +#define CLK_ETHERNET_PLL 8 +#define CLK_AUDIO_PLL 9 + +/* system clock */ +#define CLK_DEV 10 +#define CLK_H 11 +#define CLK_AHBPREDIV 12 +#define CLK_AHB 13 +#define CLK_DE 14 +#define CLK_BISP 15 +#define CLK_VCE 16 +#define CLK_VDE 17 + +/* peripheral device clock */ +#define CLK_TIMER 18 +#define CLK_I2C0 19 +#define CLK_I2C1 20 +#define CLK_I2C2 21 +#define CLK_I2C3 22 +#define CLK_PWM0 23 +#define CLK_PWM1 24 +#define CLK_PWM2 25 +#define CLK_PWM3 26 +#define CLK_PWM4 27 +#define CLK_PWM5 28 +#define CLK_SD0 29 +#define CLK_SD1 30 +#define CLK_SD2 31 +#define CLK_SENSOR0 32 +#define CLK_SENSOR1 33 +#define CLK_SPI0 34 +#define CLK_SPI1 35 +#define CLK_SPI2 36 +#define CLK_SPI3 37 +#define CLK_UART0 38 +#define CLK_UART1 39 +#define CLK_UART2 40 +#define CLK_UART3 41 +#define CLK_UART4 42 +#define CLK_UART5 43 +#define CLK_UART6 44 +#define CLK_DE1 45 +#define CLK_DE2 46 +#define CLK_I2SRX 47 +#define CLK_I2STX 48 +#define CLK_HDMI_AUDIO 49 +#define CLK_HDMI 50 +#define CLK_SPDIF 51 +#define CLK_NAND 52 +#define CLK_ECC 53 +#define CLK_RMII_REF 54 + +#define CLK_NR_CLKS (CLK_RMII_REF + 1) + +#endif /* __DT_BINDINGS_CLOCK_S500_CMU_H */ diff --git a/include/dt-bindings/clock/actions,s700-cmu.h b/include/dt-bindings/clock/actions,s700-cmu.h new file mode 100644 index 0000000..3e19429 --- /dev/null +++ b/include/dt-bindings/clock/actions,s700-cmu.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Device Tree binding constants for Actions Semi S700 Clock Management Unit + * + * Copyright (c) 2014 Actions Semi Inc. + * Author: David Liu + * + * Author: Pathiban Nallathambi + * Author: Saravanan Sekar + */ + +#ifndef __DT_BINDINGS_CLOCK_S700_H +#define __DT_BINDINGS_CLOCK_S700_H + +#define CLK_NONE 0 + +/* pll clocks */ +#define CLK_CORE_PLL 1 +#define CLK_DEV_PLL 2 +#define CLK_DDR_PLL 3 +#define CLK_NAND_PLL 4 +#define CLK_DISPLAY_PLL 5 +#define CLK_TVOUT_PLL 6 +#define CLK_CVBS_PLL 7 +#define CLK_AUDIO_PLL 8 +#define CLK_ETHERNET_PLL 9 + +/* system clock */ +#define CLK_CPU 10 +#define CLK_DEV 11 +#define CLK_AHB 12 +#define CLK_APB 13 +#define CLK_DMAC 14 +#define CLK_NOC0_CLK_MUX 15 +#define CLK_NOC1_CLK_MUX 16 +#define CLK_HP_CLK_MUX 17 +#define CLK_HP_CLK_DIV 18 +#define CLK_NOC1_CLK_DIV 19 +#define CLK_NOC0 20 +#define CLK_NOC1 21 +#define CLK_SENOR_SRC 22 + +/* peripheral device clock */ +#define CLK_GPIO 23 +#define CLK_TIMER 24 +#define CLK_DSI 25 +#define CLK_CSI 26 +#define CLK_SI 27 +#define CLK_DE 28 +#define CLK_HDE 29 +#define CLK_VDE 30 +#define CLK_VCE 31 +#define CLK_NAND 32 +#define CLK_SD0 33 +#define CLK_SD1 34 +#define CLK_SD2 35 + +#define CLK_UART0 36 +#define CLK_UART1 37 +#define CLK_UART2 38 +#define CLK_UART3 39 +#define CLK_UART4 40 +#define CLK_UART5 41 +#define CLK_UART6 42 + +#define CLK_PWM0 43 +#define CLK_PWM1 44 +#define CLK_PWM2 45 +#define CLK_PWM3 46 +#define CLK_PWM4 47 +#define CLK_PWM5 48 +#define CLK_GPU3D 49 + +#define CLK_I2C0 50 +#define CLK_I2C1 51 +#define CLK_I2C2 52 +#define CLK_I2C3 53 + +#define CLK_SPI0 54 +#define CLK_SPI1 55 +#define CLK_SPI2 56 +#define CLK_SPI3 57 + +#define CLK_USB3_480MPLL0 58 +#define CLK_USB3_480MPHY0 59 +#define CLK_USB3_5GPHY 60 +#define CLK_USB3_CCE 61 +#define CLK_USB3_MAC 62 + +#define CLK_LCD 63 +#define CLK_HDMI_AUDIO 64 +#define CLK_I2SRX 65 +#define CLK_I2STX 66 + +#define CLK_SENSOR0 67 +#define CLK_SENSOR1 68 + +#define CLK_HDMI_DEV 69 + +#define CLK_ETHERNET 70 +#define CLK_RMII_REF 71 + +#define CLK_USB2H0_PLLEN 72 +#define CLK_USB2H0_PHY 73 +#define CLK_USB2H0_CCE 74 +#define CLK_USB2H1_PLLEN 75 +#define CLK_USB2H1_PHY 76 +#define CLK_USB2H1_CCE 77 + +#define CLK_TVOUT 78 + +#define CLK_THERMAL_SENSOR 79 + +#define CLK_IRC_SWITCH 80 +#define CLK_PCM1 81 +#define CLK_NR_CLKS (CLK_PCM1 + 1) + +#endif /* __DT_BINDINGS_CLOCK_S700_H */ diff --git a/include/dt-bindings/clock/actions,s900-cmu.h b/include/dt-bindings/clock/actions,s900-cmu.h new file mode 100644 index 0000000..7c12515 --- /dev/null +++ b/include/dt-bindings/clock/actions,s900-cmu.h @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Device Tree binding constants for Actions Semi S900 Clock Management Unit +// +// Copyright (c) 2014 Actions Semi Inc. +// Copyright (c) 2018 Linaro Ltd. + +#ifndef __DT_BINDINGS_CLOCK_S900_CMU_H +#define __DT_BINDINGS_CLOCK_S900_CMU_H + +#define CLK_NONE 0 + +/* fixed rate clocks */ +#define CLK_LOSC 1 +#define CLK_HOSC 2 + +/* pll clocks */ +#define CLK_CORE_PLL 3 +#define CLK_DEV_PLL 4 +#define CLK_DDR_PLL 5 +#define CLK_NAND_PLL 6 +#define CLK_DISPLAY_PLL 7 +#define CLK_DSI_PLL 8 +#define CLK_ASSIST_PLL 9 +#define CLK_AUDIO_PLL 10 + +/* system clock */ +#define CLK_CPU 15 +#define CLK_DEV 16 +#define CLK_NOC 17 +#define CLK_NOC_MUX 18 +#define CLK_NOC_DIV 19 +#define CLK_AHB 20 +#define CLK_APB 21 +#define CLK_DMAC 22 + +/* peripheral device clock */ +#define CLK_GPIO 23 + +#define CLK_BISP 24 +#define CLK_CSI0 25 +#define CLK_CSI1 26 + +#define CLK_DE0 27 +#define CLK_DE1 28 +#define CLK_DE2 29 +#define CLK_DE3 30 +#define CLK_DSI 32 + +#define CLK_GPU 33 +#define CLK_GPU_CORE 34 +#define CLK_GPU_MEM 35 +#define CLK_GPU_SYS 36 + +#define CLK_HDE 37 +#define CLK_I2C0 38 +#define CLK_I2C1 39 +#define CLK_I2C2 40 +#define CLK_I2C3 41 +#define CLK_I2C4 42 +#define CLK_I2C5 43 +#define CLK_I2SRX 44 +#define CLK_I2STX 45 +#define CLK_IMX 46 +#define CLK_LCD 47 +#define CLK_NAND0 48 +#define CLK_NAND1 49 +#define CLK_PWM0 50 +#define CLK_PWM1 51 +#define CLK_PWM2 52 +#define CLK_PWM3 53 +#define CLK_PWM4 54 +#define CLK_PWM5 55 +#define CLK_SD0 56 +#define CLK_SD1 57 +#define CLK_SD2 58 +#define CLK_SD3 59 +#define CLK_SENSOR 60 +#define CLK_SPEED_SENSOR 61 +#define CLK_SPI0 62 +#define CLK_SPI1 63 +#define CLK_SPI2 64 +#define CLK_SPI3 65 +#define CLK_THERMAL_SENSOR 66 +#define CLK_UART0 67 +#define CLK_UART1 68 +#define CLK_UART2 69 +#define CLK_UART3 70 +#define CLK_UART4 71 +#define CLK_UART5 72 +#define CLK_UART6 73 +#define CLK_VCE 74 +#define CLK_VDE 75 + +#define CLK_USB3_480MPLL0 76 +#define CLK_USB3_480MPHY0 77 +#define CLK_USB3_5GPHY 78 +#define CLK_USB3_CCE 79 +#define CLK_USB3_MAC 80 + +#define CLK_TIMER 83 + +#define CLK_HDMI_AUDIO 84 + +#define CLK_24M 85 + +#define CLK_EDP 86 + +#define CLK_24M_EDP 87 +#define CLK_EDP_PLL 88 +#define CLK_EDP_LINK 89 + +#define CLK_USB2H0_PLLEN 90 +#define CLK_USB2H0_PHY 91 +#define CLK_USB2H0_CCE 92 +#define CLK_USB2H1_PLLEN 93 +#define CLK_USB2H1_PHY 94 +#define CLK_USB2H1_CCE 95 + +#define CLK_DDR0 96 +#define CLK_DDR1 97 +#define CLK_DMM 98 + +#define CLK_ETH_MAC 99 +#define CLK_RMII_REF 100 + +#define CLK_NR_CLKS (CLK_RMII_REF + 1) + +#endif /* __DT_BINDINGS_CLOCK_S900_CMU_H */ diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h new file mode 100644 index 0000000..d3871c6 --- /dev/null +++ b/include/dt-bindings/clock/alphascale,asm9260.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2014 Oleksij Rempel + */ + +#ifndef _DT_BINDINGS_CLK_ASM9260_H +#define _DT_BINDINGS_CLK_ASM9260_H + +/* ahb gate */ +#define CLKID_AHB_ROM 0 +#define CLKID_AHB_RAM 1 +#define CLKID_AHB_GPIO 2 +#define CLKID_AHB_MAC 3 +#define CLKID_AHB_EMI 4 +#define CLKID_AHB_USB0 5 +#define CLKID_AHB_USB1 6 +#define CLKID_AHB_DMA0 7 +#define CLKID_AHB_DMA1 8 +#define CLKID_AHB_UART0 9 +#define CLKID_AHB_UART1 10 +#define CLKID_AHB_UART2 11 +#define CLKID_AHB_UART3 12 +#define CLKID_AHB_UART4 13 +#define CLKID_AHB_UART5 14 +#define CLKID_AHB_UART6 15 +#define CLKID_AHB_UART7 16 +#define CLKID_AHB_UART8 17 +#define CLKID_AHB_UART9 18 +#define CLKID_AHB_I2S0 19 +#define CLKID_AHB_I2C0 20 +#define CLKID_AHB_I2C1 21 +#define CLKID_AHB_SSP0 22 +#define CLKID_AHB_IOCONFIG 23 +#define CLKID_AHB_WDT 24 +#define CLKID_AHB_CAN0 25 +#define CLKID_AHB_CAN1 26 +#define CLKID_AHB_MPWM 27 +#define CLKID_AHB_SPI0 28 +#define CLKID_AHB_SPI1 29 +#define CLKID_AHB_QEI 30 +#define CLKID_AHB_QUADSPI0 31 +#define CLKID_AHB_CAMIF 32 +#define CLKID_AHB_LCDIF 33 +#define CLKID_AHB_TIMER0 34 +#define CLKID_AHB_TIMER1 35 +#define CLKID_AHB_TIMER2 36 +#define CLKID_AHB_TIMER3 37 +#define CLKID_AHB_IRQ 38 +#define CLKID_AHB_RTC 39 +#define CLKID_AHB_NAND 40 +#define CLKID_AHB_ADC0 41 +#define CLKID_AHB_LED 42 +#define CLKID_AHB_DAC0 43 +#define CLKID_AHB_LCD 44 +#define CLKID_AHB_I2S1 45 +#define CLKID_AHB_MAC1 46 + +/* devider */ +#define CLKID_SYS_CPU 47 +#define CLKID_SYS_AHB 48 +#define CLKID_SYS_I2S0M 49 +#define CLKID_SYS_I2S0S 50 +#define CLKID_SYS_I2S1M 51 +#define CLKID_SYS_I2S1S 52 +#define CLKID_SYS_UART0 53 +#define CLKID_SYS_UART1 54 +#define CLKID_SYS_UART2 55 +#define CLKID_SYS_UART3 56 +#define CLKID_SYS_UART4 56 +#define CLKID_SYS_UART5 57 +#define CLKID_SYS_UART6 58 +#define CLKID_SYS_UART7 59 +#define CLKID_SYS_UART8 60 +#define CLKID_SYS_UART9 61 +#define CLKID_SYS_SPI0 62 +#define CLKID_SYS_SPI1 63 +#define CLKID_SYS_QUADSPI 64 +#define CLKID_SYS_SSP0 65 +#define CLKID_SYS_NAND 66 +#define CLKID_SYS_TRACE 67 +#define CLKID_SYS_CAMM 68 +#define CLKID_SYS_WDT 69 +#define CLKID_SYS_CLKOUT 70 +#define CLKID_SYS_MAC 71 +#define CLKID_SYS_LCD 72 +#define CLKID_SYS_ADCANA 73 + +#define MAX_CLKS 74 +#endif diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h new file mode 100644 index 0000000..8949515 --- /dev/null +++ b/include/dt-bindings/clock/am3.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2017 Texas Instruments, Inc. + */ +#ifndef __DT_BINDINGS_CLK_AM3_H +#define __DT_BINDINGS_CLK_AM3_H + +#define AM3_CLKCTRL_OFFSET 0x0 +#define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET) + +/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */ + +/* l4_per clocks */ +#define AM3_L4_PER_CLKCTRL_OFFSET 0x14 +#define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET) +#define AM3_CPGMAC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14) +#define AM3_LCDC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x18) +#define AM3_USB_OTG_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x1c) +#define AM3_TPTC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x24) +#define AM3_EMIF_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x28) +#define AM3_OCMCRAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x2c) +#define AM3_GPMC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x30) +#define AM3_MCASP0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x34) +#define AM3_UART6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x38) +#define AM3_MMC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x3c) +#define AM3_ELM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x40) +#define AM3_I2C3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x44) +#define AM3_I2C2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x48) +#define AM3_SPI0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x4c) +#define AM3_SPI1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x50) +#define AM3_L4_LS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x60) +#define AM3_MCASP1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x68) +#define AM3_UART2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x6c) +#define AM3_UART3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x70) +#define AM3_UART4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x74) +#define AM3_UART5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x78) +#define AM3_TIMER7_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x7c) +#define AM3_TIMER2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x80) +#define AM3_TIMER3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x84) +#define AM3_TIMER4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x88) +#define AM3_RNG_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x90) +#define AM3_AES_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x94) +#define AM3_SHAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xa0) +#define AM3_GPIO2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xac) +#define AM3_GPIO3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb0) +#define AM3_GPIO4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb4) +#define AM3_TPCC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xbc) +#define AM3_D_CAN0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc0) +#define AM3_D_CAN1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc4) +#define AM3_EPWMSS1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xcc) +#define AM3_EPWMSS0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd4) +#define AM3_EPWMSS2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd8) +#define AM3_L3_INSTR_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xdc) +#define AM3_L3_MAIN_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe0) +#define AM3_PRUSS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe8) +#define AM3_TIMER5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xec) +#define AM3_TIMER6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf0) +#define AM3_MMC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf4) +#define AM3_MMC3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf8) +#define AM3_TPTC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xfc) +#define AM3_TPTC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x100) +#define AM3_SPINLOCK_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x10c) +#define AM3_MAILBOX_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x110) +#define AM3_L4_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x120) +#define AM3_OCPWP_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x130) +#define AM3_CLKDIV32K_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14c) + +/* l4_wkup clocks */ +#define AM3_L4_WKUP_CLKCTRL_OFFSET 0x4 +#define AM3_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_CLKCTRL_OFFSET) +#define AM3_CONTROL_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x4) +#define AM3_GPIO1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x8) +#define AM3_L4_WKUP_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc) +#define AM3_DEBUGSS_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x14) +#define AM3_WKUP_M3_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb0) +#define AM3_UART1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb4) +#define AM3_I2C1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb8) +#define AM3_ADC_TSC_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xbc) +#define AM3_SMARTREFLEX0_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc0) +#define AM3_TIMER1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc4) +#define AM3_SMARTREFLEX1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc8) +#define AM3_WD_TIMER2_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xd4) + +/* mpu clocks */ +#define AM3_MPU_CLKCTRL_OFFSET 0x4 +#define AM3_MPU_CLKCTRL_INDEX(offset) ((offset) - AM3_MPU_CLKCTRL_OFFSET) +#define AM3_MPU_CLKCTRL AM3_MPU_CLKCTRL_INDEX(0x4) + +/* l4_rtc clocks */ +#define AM3_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0) + +/* gfx_l3 clocks */ +#define AM3_GFX_L3_CLKCTRL_OFFSET 0x4 +#define AM3_GFX_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_GFX_L3_CLKCTRL_OFFSET) +#define AM3_GFX_CLKCTRL AM3_GFX_L3_CLKCTRL_INDEX(0x4) + +/* l4_cefuse clocks */ +#define AM3_L4_CEFUSE_CLKCTRL_OFFSET 0x20 +#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET) +#define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20) + +/* XXX: Compatibility part end */ + +/* l4ls clocks */ +#define AM3_L4LS_CLKCTRL_OFFSET 0x38 +#define AM3_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4LS_CLKCTRL_OFFSET) +#define AM3_L4LS_UART6_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x38) +#define AM3_L4LS_MMC1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x3c) +#define AM3_L4LS_ELM_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x40) +#define AM3_L4LS_I2C3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x44) +#define AM3_L4LS_I2C2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x48) +#define AM3_L4LS_SPI0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x4c) +#define AM3_L4LS_SPI1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x50) +#define AM3_L4LS_L4_LS_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x60) +#define AM3_L4LS_UART2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x6c) +#define AM3_L4LS_UART3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x70) +#define AM3_L4LS_UART4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x74) +#define AM3_L4LS_UART5_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x78) +#define AM3_L4LS_TIMER7_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x7c) +#define AM3_L4LS_TIMER2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x80) +#define AM3_L4LS_TIMER3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x84) +#define AM3_L4LS_TIMER4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x88) +#define AM3_L4LS_RNG_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x90) +#define AM3_L4LS_GPIO2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xac) +#define AM3_L4LS_GPIO3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xb0) +#define AM3_L4LS_GPIO4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xb4) +#define AM3_L4LS_D_CAN0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xc0) +#define AM3_L4LS_D_CAN1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xc4) +#define AM3_L4LS_EPWMSS1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xcc) +#define AM3_L4LS_EPWMSS0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xd4) +#define AM3_L4LS_EPWMSS2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xd8) +#define AM3_L4LS_TIMER5_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xec) +#define AM3_L4LS_TIMER6_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xf0) +#define AM3_L4LS_MMC2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xf4) +#define AM3_L4LS_SPINLOCK_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x10c) +#define AM3_L4LS_MAILBOX_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x110) +#define AM3_L4LS_OCPWP_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x130) + +/* l3s clocks */ +#define AM3_L3S_CLKCTRL_OFFSET 0x1c +#define AM3_L3S_CLKCTRL_INDEX(offset) ((offset) - AM3_L3S_CLKCTRL_OFFSET) +#define AM3_L3S_USB_OTG_HS_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x1c) +#define AM3_L3S_GPMC_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x30) +#define AM3_L3S_MCASP0_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x34) +#define AM3_L3S_MCASP1_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x68) +#define AM3_L3S_MMC3_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0xf8) + +/* l3 clocks */ +#define AM3_L3_CLKCTRL_OFFSET 0x24 +#define AM3_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_L3_CLKCTRL_OFFSET) +#define AM3_L3_TPTC0_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x24) +#define AM3_L3_EMIF_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x28) +#define AM3_L3_OCMCRAM_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x2c) +#define AM3_L3_AES_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x94) +#define AM3_L3_SHAM_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xa0) +#define AM3_L3_TPCC_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xbc) +#define AM3_L3_L3_INSTR_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xdc) +#define AM3_L3_L3_MAIN_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xe0) +#define AM3_L3_TPTC1_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xfc) +#define AM3_L3_TPTC2_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x100) + +/* l4hs clocks */ +#define AM3_L4HS_CLKCTRL_OFFSET 0x120 +#define AM3_L4HS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4HS_CLKCTRL_OFFSET) +#define AM3_L4HS_L4_HS_CLKCTRL AM3_L4HS_CLKCTRL_INDEX(0x120) + +/* pruss_ocp clocks */ +#define AM3_PRUSS_OCP_CLKCTRL_OFFSET 0xe8 +#define AM3_PRUSS_OCP_CLKCTRL_INDEX(offset) ((offset) - AM3_PRUSS_OCP_CLKCTRL_OFFSET) +#define AM3_PRUSS_OCP_PRUSS_CLKCTRL AM3_PRUSS_OCP_CLKCTRL_INDEX(0xe8) + +/* cpsw_125mhz clocks */ +#define AM3_CPSW_125MHZ_CPGMAC0_CLKCTRL AM3_CLKCTRL_INDEX(0x14) + +/* lcdc clocks */ +#define AM3_LCDC_CLKCTRL_OFFSET 0x18 +#define AM3_LCDC_CLKCTRL_INDEX(offset) ((offset) - AM3_LCDC_CLKCTRL_OFFSET) +#define AM3_LCDC_LCDC_CLKCTRL AM3_LCDC_CLKCTRL_INDEX(0x18) + +/* clk_24mhz clocks */ +#define AM3_CLK_24MHZ_CLKCTRL_OFFSET 0x14c +#define AM3_CLK_24MHZ_CLKCTRL_INDEX(offset) ((offset) - AM3_CLK_24MHZ_CLKCTRL_OFFSET) +#define AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL AM3_CLK_24MHZ_CLKCTRL_INDEX(0x14c) + +/* l4_wkup clocks */ +#define AM3_L4_WKUP_CONTROL_CLKCTRL AM3_CLKCTRL_INDEX(0x4) +#define AM3_L4_WKUP_GPIO1_CLKCTRL AM3_CLKCTRL_INDEX(0x8) +#define AM3_L4_WKUP_L4_WKUP_CLKCTRL AM3_CLKCTRL_INDEX(0xc) +#define AM3_L4_WKUP_UART1_CLKCTRL AM3_CLKCTRL_INDEX(0xb4) +#define AM3_L4_WKUP_I2C1_CLKCTRL AM3_CLKCTRL_INDEX(0xb8) +#define AM3_L4_WKUP_ADC_TSC_CLKCTRL AM3_CLKCTRL_INDEX(0xbc) +#define AM3_L4_WKUP_SMARTREFLEX0_CLKCTRL AM3_CLKCTRL_INDEX(0xc0) +#define AM3_L4_WKUP_TIMER1_CLKCTRL AM3_CLKCTRL_INDEX(0xc4) +#define AM3_L4_WKUP_SMARTREFLEX1_CLKCTRL AM3_CLKCTRL_INDEX(0xc8) +#define AM3_L4_WKUP_WD_TIMER2_CLKCTRL AM3_CLKCTRL_INDEX(0xd4) + +/* l3_aon clocks */ +#define AM3_L3_AON_CLKCTRL_OFFSET 0x14 +#define AM3_L3_AON_CLKCTRL_INDEX(offset) ((offset) - AM3_L3_AON_CLKCTRL_OFFSET) +#define AM3_L3_AON_DEBUGSS_CLKCTRL AM3_L3_AON_CLKCTRL_INDEX(0x14) + +/* l4_wkup_aon clocks */ +#define AM3_L4_WKUP_AON_CLKCTRL_OFFSET 0xb0 +#define AM3_L4_WKUP_AON_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_AON_CLKCTRL_OFFSET) +#define AM3_L4_WKUP_AON_WKUP_M3_CLKCTRL AM3_L4_WKUP_AON_CLKCTRL_INDEX(0xb0) + +/* mpu clocks */ +#define AM3_MPU_MPU_CLKCTRL AM3_CLKCTRL_INDEX(0x4) + +/* l4_rtc clocks */ +#define AM3_L4_RTC_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0) + +/* gfx_l3 clocks */ +#define AM3_GFX_L3_GFX_CLKCTRL AM3_CLKCTRL_INDEX(0x4) + +/* l4_cefuse clocks */ +#define AM3_L4_CEFUSE_CEFUSE_CLKCTRL AM3_CLKCTRL_INDEX(0x20) + +#endif diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h new file mode 100644 index 0000000..d961e7c --- /dev/null +++ b/include/dt-bindings/clock/am4.h @@ -0,0 +1,237 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2017 Texas Instruments, Inc. + */ +#ifndef __DT_BINDINGS_CLK_AM4_H +#define __DT_BINDINGS_CLK_AM4_H + +#define AM4_CLKCTRL_OFFSET 0x20 +#define AM4_CLKCTRL_INDEX(offset) ((offset) - AM4_CLKCTRL_OFFSET) + +/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */ + +/* l4_wkup clocks */ +#define AM4_ADC_TSC_CLKCTRL AM4_CLKCTRL_INDEX(0x120) +#define AM4_L4_WKUP_CLKCTRL AM4_CLKCTRL_INDEX(0x220) +#define AM4_WKUP_M3_CLKCTRL AM4_CLKCTRL_INDEX(0x228) +#define AM4_COUNTER_32K_CLKCTRL AM4_CLKCTRL_INDEX(0x230) +#define AM4_TIMER1_CLKCTRL AM4_CLKCTRL_INDEX(0x328) +#define AM4_WD_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x338) +#define AM4_I2C1_CLKCTRL AM4_CLKCTRL_INDEX(0x340) +#define AM4_UART1_CLKCTRL AM4_CLKCTRL_INDEX(0x348) +#define AM4_SMARTREFLEX0_CLKCTRL AM4_CLKCTRL_INDEX(0x350) +#define AM4_SMARTREFLEX1_CLKCTRL AM4_CLKCTRL_INDEX(0x358) +#define AM4_CONTROL_CLKCTRL AM4_CLKCTRL_INDEX(0x360) +#define AM4_GPIO1_CLKCTRL AM4_CLKCTRL_INDEX(0x368) + +/* mpu clocks */ +#define AM4_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* gfx_l3 clocks */ +#define AM4_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* l4_rtc clocks */ +#define AM4_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* l4_per clocks */ +#define AM4_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20) +#define AM4_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28) +#define AM4_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30) +#define AM4_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40) +#define AM4_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50) +#define AM4_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58) +#define AM4_VPFE0_CLKCTRL AM4_CLKCTRL_INDEX(0x68) +#define AM4_VPFE1_CLKCTRL AM4_CLKCTRL_INDEX(0x70) +#define AM4_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78) +#define AM4_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80) +#define AM4_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88) +#define AM4_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90) +#define AM4_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0) +#define AM4_GPMC_CLKCTRL AM4_CLKCTRL_INDEX(0x220) +#define AM4_MCASP0_CLKCTRL AM4_CLKCTRL_INDEX(0x238) +#define AM4_MCASP1_CLKCTRL AM4_CLKCTRL_INDEX(0x240) +#define AM4_MMC3_CLKCTRL AM4_CLKCTRL_INDEX(0x248) +#define AM4_QSPI_CLKCTRL AM4_CLKCTRL_INDEX(0x258) +#define AM4_USB_OTG_SS0_CLKCTRL AM4_CLKCTRL_INDEX(0x260) +#define AM4_USB_OTG_SS1_CLKCTRL AM4_CLKCTRL_INDEX(0x268) +#define AM4_PRUSS_CLKCTRL AM4_CLKCTRL_INDEX(0x320) +#define AM4_L4_LS_CLKCTRL AM4_CLKCTRL_INDEX(0x420) +#define AM4_D_CAN0_CLKCTRL AM4_CLKCTRL_INDEX(0x428) +#define AM4_D_CAN1_CLKCTRL AM4_CLKCTRL_INDEX(0x430) +#define AM4_EPWMSS0_CLKCTRL AM4_CLKCTRL_INDEX(0x438) +#define AM4_EPWMSS1_CLKCTRL AM4_CLKCTRL_INDEX(0x440) +#define AM4_EPWMSS2_CLKCTRL AM4_CLKCTRL_INDEX(0x448) +#define AM4_EPWMSS3_CLKCTRL AM4_CLKCTRL_INDEX(0x450) +#define AM4_EPWMSS4_CLKCTRL AM4_CLKCTRL_INDEX(0x458) +#define AM4_EPWMSS5_CLKCTRL AM4_CLKCTRL_INDEX(0x460) +#define AM4_ELM_CLKCTRL AM4_CLKCTRL_INDEX(0x468) +#define AM4_GPIO2_CLKCTRL AM4_CLKCTRL_INDEX(0x478) +#define AM4_GPIO3_CLKCTRL AM4_CLKCTRL_INDEX(0x480) +#define AM4_GPIO4_CLKCTRL AM4_CLKCTRL_INDEX(0x488) +#define AM4_GPIO5_CLKCTRL AM4_CLKCTRL_INDEX(0x490) +#define AM4_GPIO6_CLKCTRL AM4_CLKCTRL_INDEX(0x498) +#define AM4_HDQ1W_CLKCTRL AM4_CLKCTRL_INDEX(0x4a0) +#define AM4_I2C2_CLKCTRL AM4_CLKCTRL_INDEX(0x4a8) +#define AM4_I2C3_CLKCTRL AM4_CLKCTRL_INDEX(0x4b0) +#define AM4_MAILBOX_CLKCTRL AM4_CLKCTRL_INDEX(0x4b8) +#define AM4_MMC1_CLKCTRL AM4_CLKCTRL_INDEX(0x4c0) +#define AM4_MMC2_CLKCTRL AM4_CLKCTRL_INDEX(0x4c8) +#define AM4_RNG_CLKCTRL AM4_CLKCTRL_INDEX(0x4e0) +#define AM4_SPI0_CLKCTRL AM4_CLKCTRL_INDEX(0x500) +#define AM4_SPI1_CLKCTRL AM4_CLKCTRL_INDEX(0x508) +#define AM4_SPI2_CLKCTRL AM4_CLKCTRL_INDEX(0x510) +#define AM4_SPI3_CLKCTRL AM4_CLKCTRL_INDEX(0x518) +#define AM4_SPI4_CLKCTRL AM4_CLKCTRL_INDEX(0x520) +#define AM4_SPINLOCK_CLKCTRL AM4_CLKCTRL_INDEX(0x528) +#define AM4_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x530) +#define AM4_TIMER3_CLKCTRL AM4_CLKCTRL_INDEX(0x538) +#define AM4_TIMER4_CLKCTRL AM4_CLKCTRL_INDEX(0x540) +#define AM4_TIMER5_CLKCTRL AM4_CLKCTRL_INDEX(0x548) +#define AM4_TIMER6_CLKCTRL AM4_CLKCTRL_INDEX(0x550) +#define AM4_TIMER7_CLKCTRL AM4_CLKCTRL_INDEX(0x558) +#define AM4_TIMER8_CLKCTRL AM4_CLKCTRL_INDEX(0x560) +#define AM4_TIMER9_CLKCTRL AM4_CLKCTRL_INDEX(0x568) +#define AM4_TIMER10_CLKCTRL AM4_CLKCTRL_INDEX(0x570) +#define AM4_TIMER11_CLKCTRL AM4_CLKCTRL_INDEX(0x578) +#define AM4_UART2_CLKCTRL AM4_CLKCTRL_INDEX(0x580) +#define AM4_UART3_CLKCTRL AM4_CLKCTRL_INDEX(0x588) +#define AM4_UART4_CLKCTRL AM4_CLKCTRL_INDEX(0x590) +#define AM4_UART5_CLKCTRL AM4_CLKCTRL_INDEX(0x598) +#define AM4_UART6_CLKCTRL AM4_CLKCTRL_INDEX(0x5a0) +#define AM4_OCP2SCP0_CLKCTRL AM4_CLKCTRL_INDEX(0x5b8) +#define AM4_OCP2SCP1_CLKCTRL AM4_CLKCTRL_INDEX(0x5c0) +#define AM4_EMIF_CLKCTRL AM4_CLKCTRL_INDEX(0x720) +#define AM4_DSS_CORE_CLKCTRL AM4_CLKCTRL_INDEX(0xa20) +#define AM4_CPGMAC0_CLKCTRL AM4_CLKCTRL_INDEX(0xb20) + +/* XXX: Compatibility part end. */ + +/* l3s_tsc clocks */ +#define AM4_L3S_TSC_CLKCTRL_OFFSET 0x120 +#define AM4_L3S_TSC_CLKCTRL_INDEX(offset) ((offset) - AM4_L3S_TSC_CLKCTRL_OFFSET) +#define AM4_L3S_TSC_ADC_TSC_CLKCTRL AM4_L3S_TSC_CLKCTRL_INDEX(0x120) + +/* l4_wkup_aon clocks */ +#define AM4_L4_WKUP_AON_CLKCTRL_OFFSET 0x228 +#define AM4_L4_WKUP_AON_CLKCTRL_INDEX(offset) ((offset) - AM4_L4_WKUP_AON_CLKCTRL_OFFSET) +#define AM4_L4_WKUP_AON_WKUP_M3_CLKCTRL AM4_L4_WKUP_AON_CLKCTRL_INDEX(0x228) +#define AM4_L4_WKUP_AON_COUNTER_32K_CLKCTRL AM4_L4_WKUP_AON_CLKCTRL_INDEX(0x230) + +/* l4_wkup clocks */ +#define AM4_L4_WKUP_CLKCTRL_OFFSET 0x220 +#define AM4_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM4_L4_WKUP_CLKCTRL_OFFSET) +#define AM4_L4_WKUP_L4_WKUP_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x220) +#define AM4_L4_WKUP_TIMER1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x328) +#define AM4_L4_WKUP_WD_TIMER2_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x338) +#define AM4_L4_WKUP_I2C1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x340) +#define AM4_L4_WKUP_UART1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x348) +#define AM4_L4_WKUP_SMARTREFLEX0_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x350) +#define AM4_L4_WKUP_SMARTREFLEX1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x358) +#define AM4_L4_WKUP_CONTROL_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x360) +#define AM4_L4_WKUP_GPIO1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x368) + +/* mpu clocks */ +#define AM4_MPU_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* gfx_l3 clocks */ +#define AM4_GFX_L3_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* l4_rtc clocks */ +#define AM4_L4_RTC_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* l3 clocks */ +#define AM4_L3_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20) +#define AM4_L3_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28) +#define AM4_L3_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30) +#define AM4_L3_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40) +#define AM4_L3_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50) +#define AM4_L3_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58) +#define AM4_L3_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78) +#define AM4_L3_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80) +#define AM4_L3_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88) +#define AM4_L3_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90) +#define AM4_L3_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0) + +/* l3s clocks */ +#define AM4_L3S_CLKCTRL_OFFSET 0x68 +#define AM4_L3S_CLKCTRL_INDEX(offset) ((offset) - AM4_L3S_CLKCTRL_OFFSET) +#define AM4_L3S_VPFE0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x68) +#define AM4_L3S_VPFE1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x70) +#define AM4_L3S_GPMC_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x220) +#define AM4_L3S_MCASP0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x238) +#define AM4_L3S_MCASP1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x240) +#define AM4_L3S_MMC3_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x248) +#define AM4_L3S_QSPI_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x258) +#define AM4_L3S_USB_OTG_SS0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x260) +#define AM4_L3S_USB_OTG_SS1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x268) + +/* pruss_ocp clocks */ +#define AM4_PRUSS_OCP_CLKCTRL_OFFSET 0x320 +#define AM4_PRUSS_OCP_CLKCTRL_INDEX(offset) ((offset) - AM4_PRUSS_OCP_CLKCTRL_OFFSET) +#define AM4_PRUSS_OCP_PRUSS_CLKCTRL AM4_PRUSS_OCP_CLKCTRL_INDEX(0x320) + +/* l4ls clocks */ +#define AM4_L4LS_CLKCTRL_OFFSET 0x420 +#define AM4_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM4_L4LS_CLKCTRL_OFFSET) +#define AM4_L4LS_L4_LS_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x420) +#define AM4_L4LS_D_CAN0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x428) +#define AM4_L4LS_D_CAN1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x430) +#define AM4_L4LS_EPWMSS0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x438) +#define AM4_L4LS_EPWMSS1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x440) +#define AM4_L4LS_EPWMSS2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x448) +#define AM4_L4LS_EPWMSS3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x450) +#define AM4_L4LS_EPWMSS4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x458) +#define AM4_L4LS_EPWMSS5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x460) +#define AM4_L4LS_ELM_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x468) +#define AM4_L4LS_GPIO2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x478) +#define AM4_L4LS_GPIO3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x480) +#define AM4_L4LS_GPIO4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x488) +#define AM4_L4LS_GPIO5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x490) +#define AM4_L4LS_GPIO6_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x498) +#define AM4_L4LS_HDQ1W_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4a0) +#define AM4_L4LS_I2C2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4a8) +#define AM4_L4LS_I2C3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4b0) +#define AM4_L4LS_MAILBOX_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4b8) +#define AM4_L4LS_MMC1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4c0) +#define AM4_L4LS_MMC2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4c8) +#define AM4_L4LS_RNG_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4e0) +#define AM4_L4LS_SPI0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x500) +#define AM4_L4LS_SPI1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x508) +#define AM4_L4LS_SPI2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x510) +#define AM4_L4LS_SPI3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x518) +#define AM4_L4LS_SPI4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x520) +#define AM4_L4LS_SPINLOCK_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x528) +#define AM4_L4LS_TIMER2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x530) +#define AM4_L4LS_TIMER3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x538) +#define AM4_L4LS_TIMER4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x540) +#define AM4_L4LS_TIMER5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x548) +#define AM4_L4LS_TIMER6_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x550) +#define AM4_L4LS_TIMER7_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x558) +#define AM4_L4LS_TIMER8_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x560) +#define AM4_L4LS_TIMER9_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x568) +#define AM4_L4LS_TIMER10_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x570) +#define AM4_L4LS_TIMER11_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x578) +#define AM4_L4LS_UART2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x580) +#define AM4_L4LS_UART3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x588) +#define AM4_L4LS_UART4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x590) +#define AM4_L4LS_UART5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x598) +#define AM4_L4LS_UART6_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x5a0) +#define AM4_L4LS_OCP2SCP0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x5b8) +#define AM4_L4LS_OCP2SCP1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x5c0) + +/* emif clocks */ +#define AM4_EMIF_CLKCTRL_OFFSET 0x720 +#define AM4_EMIF_CLKCTRL_INDEX(offset) ((offset) - AM4_EMIF_CLKCTRL_OFFSET) +#define AM4_EMIF_EMIF_CLKCTRL AM4_EMIF_CLKCTRL_INDEX(0x720) + +/* dss clocks */ +#define AM4_DSS_CLKCTRL_OFFSET 0xa20 +#define AM4_DSS_CLKCTRL_INDEX(offset) ((offset) - AM4_DSS_CLKCTRL_OFFSET) +#define AM4_DSS_DSS_CORE_CLKCTRL AM4_DSS_CLKCTRL_INDEX(0xa20) + +/* cpsw_125mhz clocks */ +#define AM4_CPSW_125MHZ_CLKCTRL_OFFSET 0xb20 +#define AM4_CPSW_125MHZ_CLKCTRL_INDEX(offset) ((offset) - AM4_CPSW_125MHZ_CLKCTRL_OFFSET) +#define AM4_CPSW_125MHZ_CPGMAC0_CLKCTRL AM4_CPSW_125MHZ_CLKCTRL_INDEX(0xb20) + +#endif diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h new file mode 100644 index 0000000..f437386 --- /dev/null +++ b/include/dt-bindings/clock/aspeed-clock.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ + +#ifndef DT_BINDINGS_ASPEED_CLOCK_H +#define DT_BINDINGS_ASPEED_CLOCK_H + +#define ASPEED_CLK_GATE_ECLK 0 +#define ASPEED_CLK_GATE_GCLK 1 +#define ASPEED_CLK_GATE_MCLK 2 +#define ASPEED_CLK_GATE_VCLK 3 +#define ASPEED_CLK_GATE_BCLK 4 +#define ASPEED_CLK_GATE_DCLK 5 +#define ASPEED_CLK_GATE_REFCLK 6 +#define ASPEED_CLK_GATE_USBPORT2CLK 7 +#define ASPEED_CLK_GATE_LCLK 8 +#define ASPEED_CLK_GATE_USBUHCICLK 9 +#define ASPEED_CLK_GATE_D1CLK 10 +#define ASPEED_CLK_GATE_YCLK 11 +#define ASPEED_CLK_GATE_USBPORT1CLK 12 +#define ASPEED_CLK_GATE_UART1CLK 13 +#define ASPEED_CLK_GATE_UART2CLK 14 +#define ASPEED_CLK_GATE_UART5CLK 15 +#define ASPEED_CLK_GATE_ESPICLK 16 +#define ASPEED_CLK_GATE_MAC1CLK 17 +#define ASPEED_CLK_GATE_MAC2CLK 18 +#define ASPEED_CLK_GATE_RSACLK 19 +#define ASPEED_CLK_GATE_UART3CLK 20 +#define ASPEED_CLK_GATE_UART4CLK 21 +#define ASPEED_CLK_GATE_SDCLK 22 +#define ASPEED_CLK_GATE_LHCCLK 23 +#define ASPEED_CLK_HPLL 24 +#define ASPEED_CLK_AHB 25 +#define ASPEED_CLK_APB 26 +#define ASPEED_CLK_UART 27 +#define ASPEED_CLK_SDIO 28 +#define ASPEED_CLK_ECLK 29 +#define ASPEED_CLK_ECLK_MUX 30 +#define ASPEED_CLK_LHCLK 31 +#define ASPEED_CLK_MAC 32 +#define ASPEED_CLK_BCLK 33 +#define ASPEED_CLK_MPLL 34 +#define ASPEED_CLK_24M 35 + +#define ASPEED_RESET_XDMA 0 +#define ASPEED_RESET_MCTP 1 +#define ASPEED_RESET_ADC 2 +#define ASPEED_RESET_JTAG_MASTER 3 +#define ASPEED_RESET_MIC 4 +#define ASPEED_RESET_PWM 5 +#define ASPEED_RESET_PECI 6 +#define ASPEED_RESET_I2C 7 +#define ASPEED_RESET_AHB 8 +#define ASPEED_RESET_CRT1 9 + +#endif diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h new file mode 100644 index 0000000..38074a5 --- /dev/null +++ b/include/dt-bindings/clock/ast2600-clock.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later OR MIT */ +#ifndef DT_BINDINGS_AST2600_CLOCK_H +#define DT_BINDINGS_AST2600_CLOCK_H + +#define ASPEED_CLK_GATE_ECLK 0 +#define ASPEED_CLK_GATE_GCLK 1 + +#define ASPEED_CLK_GATE_MCLK 2 + +#define ASPEED_CLK_GATE_VCLK 3 +#define ASPEED_CLK_GATE_BCLK 4 +#define ASPEED_CLK_GATE_DCLK 5 + +#define ASPEED_CLK_GATE_LCLK 6 +#define ASPEED_CLK_GATE_LHCCLK 7 + +#define ASPEED_CLK_GATE_D1CLK 8 +#define ASPEED_CLK_GATE_YCLK 9 + +#define ASPEED_CLK_GATE_REF0CLK 10 +#define ASPEED_CLK_GATE_REF1CLK 11 + +#define ASPEED_CLK_GATE_ESPICLK 12 + +#define ASPEED_CLK_GATE_USBUHCICLK 13 +#define ASPEED_CLK_GATE_USBPORT1CLK 14 +#define ASPEED_CLK_GATE_USBPORT2CLK 15 + +#define ASPEED_CLK_GATE_RSACLK 16 +#define ASPEED_CLK_GATE_RVASCLK 17 + +#define ASPEED_CLK_GATE_MAC1CLK 18 +#define ASPEED_CLK_GATE_MAC2CLK 19 +#define ASPEED_CLK_GATE_MAC3CLK 20 +#define ASPEED_CLK_GATE_MAC4CLK 21 + +#define ASPEED_CLK_GATE_UART1CLK 22 +#define ASPEED_CLK_GATE_UART2CLK 23 +#define ASPEED_CLK_GATE_UART3CLK 24 +#define ASPEED_CLK_GATE_UART4CLK 25 +#define ASPEED_CLK_GATE_UART5CLK 26 +#define ASPEED_CLK_GATE_UART6CLK 27 +#define ASPEED_CLK_GATE_UART7CLK 28 +#define ASPEED_CLK_GATE_UART8CLK 29 +#define ASPEED_CLK_GATE_UART9CLK 30 +#define ASPEED_CLK_GATE_UART10CLK 31 +#define ASPEED_CLK_GATE_UART11CLK 32 +#define ASPEED_CLK_GATE_UART12CLK 33 +#define ASPEED_CLK_GATE_UART13CLK 34 + +#define ASPEED_CLK_GATE_SDCLK 35 +#define ASPEED_CLK_GATE_EMMCCLK 36 + +#define ASPEED_CLK_GATE_I3C0CLK 37 +#define ASPEED_CLK_GATE_I3C1CLK 38 +#define ASPEED_CLK_GATE_I3C2CLK 39 +#define ASPEED_CLK_GATE_I3C3CLK 40 +#define ASPEED_CLK_GATE_I3C4CLK 41 +#define ASPEED_CLK_GATE_I3C5CLK 42 +#define ASPEED_CLK_GATE_I3C6CLK 43 +#define ASPEED_CLK_GATE_I3C7CLK 44 + +#define ASPEED_CLK_GATE_FSICLK 45 + +#define ASPEED_CLK_HPLL 46 +#define ASPEED_CLK_MPLL 47 +#define ASPEED_CLK_DPLL 48 +#define ASPEED_CLK_EPLL 49 +#define ASPEED_CLK_APLL 50 +#define ASPEED_CLK_AHB 51 +#define ASPEED_CLK_APB1 52 +#define ASPEED_CLK_APB2 53 +#define ASPEED_CLK_BCLK 54 +#define ASPEED_CLK_D1CLK 55 +#define ASPEED_CLK_VCLK 56 +#define ASPEED_CLK_LHCLK 57 +#define ASPEED_CLK_UART 58 +#define ASPEED_CLK_UARTX 59 +#define ASPEED_CLK_SDIO 60 +#define ASPEED_CLK_EMMC 61 +#define ASPEED_CLK_ECLK 62 +#define ASPEED_CLK_ECLK_MUX 63 +#define ASPEED_CLK_MAC12 64 +#define ASPEED_CLK_MAC34 65 +#define ASPEED_CLK_USBPHY_40M 66 + +/* Only list resets here that are not part of a gate */ +#define ASPEED_RESET_ADC 55 +#define ASPEED_RESET_JTAG_MASTER2 54 +#define ASPEED_RESET_I3C_DMA 39 +#define ASPEED_RESET_PWM 37 +#define ASPEED_RESET_PECI 36 +#define ASPEED_RESET_MII 35 +#define ASPEED_RESET_I2C 34 +#define ASPEED_RESET_H2X 31 +#define ASPEED_RESET_GP_MCU 30 +#define ASPEED_RESET_DP_MCU 29 +#define ASPEED_RESET_DP 28 +#define ASPEED_RESET_RC_XDMA 27 +#define ASPEED_RESET_GRAPHICS 26 +#define ASPEED_RESET_DEV_XDMA 25 +#define ASPEED_RESET_DEV_MCTP 24 +#define ASPEED_RESET_RC_MCTP 23 +#define ASPEED_RESET_JTAG_MASTER 22 +#define ASPEED_RESET_PCIE_DEV_O 21 +#define ASPEED_RESET_PCIE_DEV_OEN 20 +#define ASPEED_RESET_PCIE_RC_O 19 +#define ASPEED_RESET_PCIE_RC_OEN 18 +#define ASPEED_RESET_PCI_DP 5 +#define ASPEED_RESET_AHB 1 +#define ASPEED_RESET_SDRAM 0 + +#endif diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h new file mode 100644 index 0000000..38b5554 --- /dev/null +++ b/include/dt-bindings/clock/at91.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This header provides constants for AT91 pmc status. + * + * The constants defined in this header are being used in dts. + */ + +#ifndef _DT_BINDINGS_CLK_AT91_H +#define _DT_BINDINGS_CLK_AT91_H + +#define PMC_TYPE_CORE 0 +#define PMC_TYPE_SYSTEM 1 +#define PMC_TYPE_PERIPHERAL 2 +#define PMC_TYPE_GCK 3 + +#define PMC_SLOW 0 +#define PMC_MCK 1 +#define PMC_UTMI 2 +#define PMC_MAIN 3 +#define PMC_MCK2 4 +#define PMC_I2S0_MUX 5 +#define PMC_I2S1_MUX 6 + +#ifndef AT91_PMC_MOSCS +#define AT91_PMC_MOSCS 0 /* MOSCS Flag */ +#define AT91_PMC_LOCKA 1 /* PLLA Lock */ +#define AT91_PMC_LOCKB 2 /* PLLB Lock */ +#define AT91_PMC_MCKRDY 3 /* Master Clock */ +#define AT91_PMC_LOCKU 6 /* UPLL Lock */ +#define AT91_PMC_PCKRDY(id) (8 + (id)) /* Programmable Clock */ +#define AT91_PMC_MOSCSELS 16 /* Main Oscillator Selection */ +#define AT91_PMC_MOSCRCS 17 /* Main On-Chip RC */ +#define AT91_PMC_CFDEV 18 /* Clock Failure Detector Event */ +#define AT91_PMC_GCKRDY 24 /* Generated Clocks */ +#endif + +#endif diff --git a/include/dt-bindings/clock/ath79-clk.h b/include/dt-bindings/clock/ath79-clk.h new file mode 100644 index 0000000..eec8f39 --- /dev/null +++ b/include/dt-bindings/clock/ath79-clk.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014, 2016 Antony Pavlov + */ + +#ifndef __DT_BINDINGS_ATH79_CLK_H +#define __DT_BINDINGS_ATH79_CLK_H + +#define ATH79_CLK_CPU 0 +#define ATH79_CLK_DDR 1 +#define ATH79_CLK_AHB 2 +#define ATH79_CLK_REF 3 +#define ATH79_CLK_MDIO 4 + +#define ATH79_CLK_END 5 + +#endif /* __DT_BINDINGS_ATH79_CLK_H */ diff --git a/include/dt-bindings/clock/axg-aoclkc.h b/include/dt-bindings/clock/axg-aoclkc.h new file mode 100644 index 0000000..8ec4a26 --- /dev/null +++ b/include/dt-bindings/clock/axg-aoclkc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (c) 2016 BayLibre, SAS + * Author: Neil Armstrong + * + * Copyright (c) 2018 Amlogic, inc. + * Author: Qiufang Dai + */ + +#ifndef DT_BINDINGS_CLOCK_AMLOGIC_MESON_AXG_AOCLK +#define DT_BINDINGS_CLOCK_AMLOGIC_MESON_AXG_AOCLK + +#define CLKID_AO_REMOTE 0 +#define CLKID_AO_I2C_MASTER 1 +#define CLKID_AO_I2C_SLAVE 2 +#define CLKID_AO_UART1 3 +#define CLKID_AO_UART2 4 +#define CLKID_AO_IR_BLASTER 5 +#define CLKID_AO_SAR_ADC 6 +#define CLKID_AO_CLK81 7 +#define CLKID_AO_SAR_ADC_SEL 8 +#define CLKID_AO_SAR_ADC_DIV 9 +#define CLKID_AO_SAR_ADC_CLK 10 +#define CLKID_AO_CTS_OSCIN 11 +#define CLKID_AO_32K_PRE 12 +#define CLKID_AO_32K_DIV 13 +#define CLKID_AO_32K_SEL 14 +#define CLKID_AO_32K 15 +#define CLKID_AO_CTS_RTC_OSCIN 16 + +#endif diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h new file mode 100644 index 0000000..75901c6 --- /dev/null +++ b/include/dt-bindings/clock/axg-audio-clkc.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (c) 2018 Baylibre SAS. + * Author: Jerome Brunet + */ + +#ifndef __AXG_AUDIO_CLKC_BINDINGS_H +#define __AXG_AUDIO_CLKC_BINDINGS_H + +#define AUD_CLKID_DDR_ARB 29 +#define AUD_CLKID_PDM 30 +#define AUD_CLKID_TDMIN_A 31 +#define AUD_CLKID_TDMIN_B 32 +#define AUD_CLKID_TDMIN_C 33 +#define AUD_CLKID_TDMIN_LB 34 +#define AUD_CLKID_TDMOUT_A 35 +#define AUD_CLKID_TDMOUT_B 36 +#define AUD_CLKID_TDMOUT_C 37 +#define AUD_CLKID_FRDDR_A 38 +#define AUD_CLKID_FRDDR_B 39 +#define AUD_CLKID_FRDDR_C 40 +#define AUD_CLKID_TODDR_A 41 +#define AUD_CLKID_TODDR_B 42 +#define AUD_CLKID_TODDR_C 43 +#define AUD_CLKID_LOOPBACK 44 +#define AUD_CLKID_SPDIFIN 45 +#define AUD_CLKID_SPDIFOUT 46 +#define AUD_CLKID_RESAMPLE 47 +#define AUD_CLKID_POWER_DETECT 48 +#define AUD_CLKID_MST_A_MCLK 49 +#define AUD_CLKID_MST_B_MCLK 50 +#define AUD_CLKID_MST_C_MCLK 51 +#define AUD_CLKID_MST_D_MCLK 52 +#define AUD_CLKID_MST_E_MCLK 53 +#define AUD_CLKID_MST_F_MCLK 54 +#define AUD_CLKID_SPDIFOUT_CLK 55 +#define AUD_CLKID_SPDIFIN_CLK 56 +#define AUD_CLKID_PDM_DCLK 57 +#define AUD_CLKID_PDM_SYSCLK 58 +#define AUD_CLKID_MST_A_SCLK 79 +#define AUD_CLKID_MST_B_SCLK 80 +#define AUD_CLKID_MST_C_SCLK 81 +#define AUD_CLKID_MST_D_SCLK 82 +#define AUD_CLKID_MST_E_SCLK 83 +#define AUD_CLKID_MST_F_SCLK 84 +#define AUD_CLKID_MST_A_LRCLK 86 +#define AUD_CLKID_MST_B_LRCLK 87 +#define AUD_CLKID_MST_C_LRCLK 88 +#define AUD_CLKID_MST_D_LRCLK 89 +#define AUD_CLKID_MST_E_LRCLK 90 +#define AUD_CLKID_MST_F_LRCLK 91 +#define AUD_CLKID_TDMIN_A_SCLK_SEL 116 +#define AUD_CLKID_TDMIN_B_SCLK_SEL 117 +#define AUD_CLKID_TDMIN_C_SCLK_SEL 118 +#define AUD_CLKID_TDMIN_LB_SCLK_SEL 119 +#define AUD_CLKID_TDMOUT_A_SCLK_SEL 120 +#define AUD_CLKID_TDMOUT_B_SCLK_SEL 121 +#define AUD_CLKID_TDMOUT_C_SCLK_SEL 122 +#define AUD_CLKID_TDMIN_A_SCLK 123 +#define AUD_CLKID_TDMIN_B_SCLK 124 +#define AUD_CLKID_TDMIN_C_SCLK 125 +#define AUD_CLKID_TDMIN_LB_SCLK 126 +#define AUD_CLKID_TDMOUT_A_SCLK 127 +#define AUD_CLKID_TDMOUT_B_SCLK 128 +#define AUD_CLKID_TDMOUT_C_SCLK 129 +#define AUD_CLKID_TDMIN_A_LRCLK 130 +#define AUD_CLKID_TDMIN_B_LRCLK 131 +#define AUD_CLKID_TDMIN_C_LRCLK 132 +#define AUD_CLKID_TDMIN_LB_LRCLK 133 +#define AUD_CLKID_TDMOUT_A_LRCLK 134 +#define AUD_CLKID_TDMOUT_B_LRCLK 135 +#define AUD_CLKID_TDMOUT_C_LRCLK 136 +#define AUD_CLKID_SPDIFOUT_B 151 +#define AUD_CLKID_SPDIFOUT_B_CLK 152 +#define AUD_CLKID_TDM_MCLK_PAD0 155 +#define AUD_CLKID_TDM_MCLK_PAD1 156 +#define AUD_CLKID_TDM_LRCLK_PAD0 157 +#define AUD_CLKID_TDM_LRCLK_PAD1 158 +#define AUD_CLKID_TDM_LRCLK_PAD2 159 +#define AUD_CLKID_TDM_SCLK_PAD0 160 +#define AUD_CLKID_TDM_SCLK_PAD1 161 +#define AUD_CLKID_TDM_SCLK_PAD2 162 + +#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */ diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h new file mode 100644 index 0000000..fd1f938 --- /dev/null +++ b/include/dt-bindings/clock/axg-clkc.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ +/* + * Meson-AXG clock tree IDs + * + * Copyright (c) 2017 Amlogic, Inc. All rights reserved. + */ + +#ifndef __AXG_CLKC_H +#define __AXG_CLKC_H + +#define CLKID_SYS_PLL 0 +#define CLKID_FIXED_PLL 1 +#define CLKID_FCLK_DIV2 2 +#define CLKID_FCLK_DIV3 3 +#define CLKID_FCLK_DIV4 4 +#define CLKID_FCLK_DIV5 5 +#define CLKID_FCLK_DIV7 6 +#define CLKID_GP0_PLL 7 +#define CLKID_CLK81 10 +#define CLKID_MPLL0 11 +#define CLKID_MPLL1 12 +#define CLKID_MPLL2 13 +#define CLKID_MPLL3 14 +#define CLKID_DDR 15 +#define CLKID_AUDIO_LOCKER 16 +#define CLKID_MIPI_DSI_HOST 17 +#define CLKID_ISA 18 +#define CLKID_PL301 19 +#define CLKID_PERIPHS 20 +#define CLKID_SPICC0 21 +#define CLKID_I2C 22 +#define CLKID_RNG0 23 +#define CLKID_UART0 24 +#define CLKID_MIPI_DSI_PHY 25 +#define CLKID_SPICC1 26 +#define CLKID_PCIE_A 27 +#define CLKID_PCIE_B 28 +#define CLKID_HIU_IFACE 29 +#define CLKID_ASSIST_MISC 30 +#define CLKID_SD_EMMC_B 31 +#define CLKID_SD_EMMC_C 32 +#define CLKID_DMA 33 +#define CLKID_SPI 34 +#define CLKID_AUDIO 35 +#define CLKID_ETH 36 +#define CLKID_UART1 37 +#define CLKID_G2D 38 +#define CLKID_USB0 39 +#define CLKID_USB1 40 +#define CLKID_RESET 41 +#define CLKID_USB 42 +#define CLKID_AHB_ARB0 43 +#define CLKID_EFUSE 44 +#define CLKID_BOOT_ROM 45 +#define CLKID_AHB_DATA_BUS 46 +#define CLKID_AHB_CTRL_BUS 47 +#define CLKID_USB1_DDR_BRIDGE 48 +#define CLKID_USB0_DDR_BRIDGE 49 +#define CLKID_MMC_PCLK 50 +#define CLKID_VPU_INTR 51 +#define CLKID_SEC_AHB_AHB3_BRIDGE 52 +#define CLKID_GIC 53 +#define CLKID_AO_MEDIA_CPU 54 +#define CLKID_AO_AHB_SRAM 55 +#define CLKID_AO_AHB_BUS 56 +#define CLKID_AO_IFACE 57 +#define CLKID_AO_I2C 58 +#define CLKID_SD_EMMC_B_CLK0 59 +#define CLKID_SD_EMMC_C_CLK0 60 +#define CLKID_HIFI_PLL 69 +#define CLKID_PCIE_CML_EN0 79 +#define CLKID_PCIE_CML_EN1 80 +#define CLKID_MIPI_ENABLE 81 +#define CLKID_GEN_CLK 84 + +#endif /* __AXG_CLKC_H */ diff --git a/include/dt-bindings/clock/axis,artpec6-clkctrl.h b/include/dt-bindings/clock/axis,artpec6-clkctrl.h new file mode 100644 index 0000000..b1f4971 --- /dev/null +++ b/include/dt-bindings/clock/axis,artpec6-clkctrl.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * ARTPEC-6 clock controller indexes + * + * Copyright 2016 Axis Comunications AB. + */ + +#ifndef DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H +#define DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H + +#define ARTPEC6_CLK_CPU 0 +#define ARTPEC6_CLK_CPU_PERIPH 1 +#define ARTPEC6_CLK_NAND_CLKA 2 +#define ARTPEC6_CLK_NAND_CLKB 3 +#define ARTPEC6_CLK_ETH_ACLK 4 +#define ARTPEC6_CLK_DMA_ACLK 5 +#define ARTPEC6_CLK_PTP_REF 6 +#define ARTPEC6_CLK_SD_PCLK 7 +#define ARTPEC6_CLK_SD_IMCLK 8 +#define ARTPEC6_CLK_I2S_HST 9 +#define ARTPEC6_CLK_I2S0_CLK 10 +#define ARTPEC6_CLK_I2S1_CLK 11 +#define ARTPEC6_CLK_UART_PCLK 12 +#define ARTPEC6_CLK_UART_REFCLK 13 +#define ARTPEC6_CLK_I2C 14 +#define ARTPEC6_CLK_SPI_PCLK 15 +#define ARTPEC6_CLK_SPI_SSPCLK 16 +#define ARTPEC6_CLK_SYS_TIMER 17 +#define ARTPEC6_CLK_FRACDIV_IN 18 +#define ARTPEC6_CLK_DBG_PCLK 19 + +/* This must be the highest clock index plus one. */ +#define ARTPEC6_CLK_NUMCLOCKS 20 + +#endif diff --git a/include/dt-bindings/clock/bcm-cygnus.h b/include/dt-bindings/clock/bcm-cygnus.h new file mode 100644 index 0000000..62ac5d7 --- /dev/null +++ b/include/dt-bindings/clock/bcm-cygnus.h @@ -0,0 +1,74 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2014 Broadcom Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CLOCK_BCM_CYGNUS_H +#define _CLOCK_BCM_CYGNUS_H + +/* GENPLL clock ID */ +#define BCM_CYGNUS_GENPLL 0 +#define BCM_CYGNUS_GENPLL_AXI21_CLK 1 +#define BCM_CYGNUS_GENPLL_250MHZ_CLK 2 +#define BCM_CYGNUS_GENPLL_IHOST_SYS_CLK 3 +#define BCM_CYGNUS_GENPLL_ENET_SW_CLK 4 +#define BCM_CYGNUS_GENPLL_AUDIO_125_CLK 5 +#define BCM_CYGNUS_GENPLL_CAN_CLK 6 + +/* LCPLL0 clock ID */ +#define BCM_CYGNUS_LCPLL0 0 +#define BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK 1 +#define BCM_CYGNUS_LCPLL0_DDR_PHY_CLK 2 +#define BCM_CYGNUS_LCPLL0_SDIO_CLK 3 +#define BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK 4 +#define BCM_CYGNUS_LCPLL0_SMART_CARD_CLK 5 +#define BCM_CYGNUS_LCPLL0_CH5_UNUSED 6 + +/* MIPI PLL clock ID */ +#define BCM_CYGNUS_MIPIPLL 0 +#define BCM_CYGNUS_MIPIPLL_CH0_UNUSED 1 +#define BCM_CYGNUS_MIPIPLL_CH1_LCD 2 +#define BCM_CYGNUS_MIPIPLL_CH2_V3D 3 +#define BCM_CYGNUS_MIPIPLL_CH3_UNUSED 4 +#define BCM_CYGNUS_MIPIPLL_CH4_UNUSED 5 +#define BCM_CYGNUS_MIPIPLL_CH5_UNUSED 6 + +/* ASIU clock ID */ +#define BCM_CYGNUS_ASIU_KEYPAD_CLK 0 +#define BCM_CYGNUS_ASIU_ADC_CLK 1 +#define BCM_CYGNUS_ASIU_PWM_CLK 2 + +/* AUDIO clock ID */ +#define BCM_CYGNUS_AUDIOPLL 0 +#define BCM_CYGNUS_AUDIOPLL_CH0 1 +#define BCM_CYGNUS_AUDIOPLL_CH1 2 +#define BCM_CYGNUS_AUDIOPLL_CH2 3 + +#endif /* _CLOCK_BCM_CYGNUS_H */ diff --git a/include/dt-bindings/clock/bcm-ns2.h b/include/dt-bindings/clock/bcm-ns2.h new file mode 100644 index 0000000..d99c7a2 --- /dev/null +++ b/include/dt-bindings/clock/bcm-ns2.h @@ -0,0 +1,72 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2015 Broadcom Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CLOCK_BCM_NS2_H +#define _CLOCK_BCM_NS2_H + +/* GENPLL SCR clock channel ID */ +#define BCM_NS2_GENPLL_SCR 0 +#define BCM_NS2_GENPLL_SCR_SCR_CLK 1 +#define BCM_NS2_GENPLL_SCR_FS_CLK 2 +#define BCM_NS2_GENPLL_SCR_AUDIO_CLK 3 +#define BCM_NS2_GENPLL_SCR_CH3_UNUSED 4 +#define BCM_NS2_GENPLL_SCR_CH4_UNUSED 5 +#define BCM_NS2_GENPLL_SCR_CH5_UNUSED 6 + +/* GENPLL SW clock channel ID */ +#define BCM_NS2_GENPLL_SW 0 +#define BCM_NS2_GENPLL_SW_RPE_CLK 1 +#define BCM_NS2_GENPLL_SW_250_CLK 2 +#define BCM_NS2_GENPLL_SW_NIC_CLK 3 +#define BCM_NS2_GENPLL_SW_CHIMP_CLK 4 +#define BCM_NS2_GENPLL_SW_PORT_CLK 5 +#define BCM_NS2_GENPLL_SW_SDIO_CLK 6 + +/* LCPLL DDR clock channel ID */ +#define BCM_NS2_LCPLL_DDR 0 +#define BCM_NS2_LCPLL_DDR_PCIE_SATA_USB_CLK 1 +#define BCM_NS2_LCPLL_DDR_DDR_CLK 2 +#define BCM_NS2_LCPLL_DDR_CH2_UNUSED 3 +#define BCM_NS2_LCPLL_DDR_CH3_UNUSED 4 +#define BCM_NS2_LCPLL_DDR_CH4_UNUSED 5 +#define BCM_NS2_LCPLL_DDR_CH5_UNUSED 6 + +/* LCPLL PORTS clock channel ID */ +#define BCM_NS2_LCPLL_PORTS 0 +#define BCM_NS2_LCPLL_PORTS_WAN_CLK 1 +#define BCM_NS2_LCPLL_PORTS_RGMII_CLK 2 +#define BCM_NS2_LCPLL_PORTS_CH2_UNUSED 3 +#define BCM_NS2_LCPLL_PORTS_CH3_UNUSED 4 +#define BCM_NS2_LCPLL_PORTS_CH4_UNUSED 5 +#define BCM_NS2_LCPLL_PORTS_CH5_UNUSED 6 + +#endif /* _CLOCK_BCM_NS2_H */ diff --git a/include/dt-bindings/clock/bcm-nsp.h b/include/dt-bindings/clock/bcm-nsp.h new file mode 100644 index 0000000..ad5827c --- /dev/null +++ b/include/dt-bindings/clock/bcm-nsp.h @@ -0,0 +1,51 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2015 Broadcom Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CLOCK_BCM_NSP_H +#define _CLOCK_BCM_NSP_H + +/* GENPLL clock channel ID */ +#define BCM_NSP_GENPLL 0 +#define BCM_NSP_GENPLL_PHY_CLK 1 +#define BCM_NSP_GENPLL_ENET_SW_CLK 2 +#define BCM_NSP_GENPLL_USB_PHY_REF_CLK 3 +#define BCM_NSP_GENPLL_IPROCFAST_CLK 4 +#define BCM_NSP_GENPLL_SATA1_CLK 5 +#define BCM_NSP_GENPLL_SATA2_CLK 6 + +/* LCPLL0 clock channel ID */ +#define BCM_NSP_LCPLL0 0 +#define BCM_NSP_LCPLL0_PCIE_PHY_REF_CLK 1 +#define BCM_NSP_LCPLL0_SDIO_CLK 2 +#define BCM_NSP_LCPLL0_DDR_PHY_CLK 3 + +#endif /* _CLOCK_BCM_NSP_H */ diff --git a/include/dt-bindings/clock/bcm-sr.h b/include/dt-bindings/clock/bcm-sr.h new file mode 100644 index 0000000..419011b --- /dev/null +++ b/include/dt-bindings/clock/bcm-sr.h @@ -0,0 +1,111 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2017 Broadcom. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CLOCK_BCM_SR_H +#define _CLOCK_BCM_SR_H + +/* GENPLL 0 clock channel ID SCR HSLS FS PCIE */ +#define BCM_SR_GENPLL0 0 +#define BCM_SR_GENPLL0_125M_CLK 1 +#define BCM_SR_GENPLL0_SCR_CLK 2 +#define BCM_SR_GENPLL0_250M_CLK 3 +#define BCM_SR_GENPLL0_PCIE_AXI_CLK 4 +#define BCM_SR_GENPLL0_PAXC_AXI_X2_CLK 5 +#define BCM_SR_GENPLL0_PAXC_AXI_CLK 6 + +/* GENPLL 1 clock channel ID MHB PCIE NITRO */ +#define BCM_SR_GENPLL1 0 +#define BCM_SR_GENPLL1_PCIE_TL_CLK 1 +#define BCM_SR_GENPLL1_MHB_APB_CLK 2 + +/* GENPLL 2 clock channel ID NITRO MHB*/ +#define BCM_SR_GENPLL2 0 +#define BCM_SR_GENPLL2_NIC_CLK 1 +#define BCM_SR_GENPLL2_TS_500_CLK 2 +#define BCM_SR_GENPLL2_125_NITRO_CLK 3 +#define BCM_SR_GENPLL2_CHIMP_CLK 4 +#define BCM_SR_GENPLL2_NIC_FLASH_CLK 5 +#define BCM_SR_GENPLL2_FS4_CLK 6 + +/* GENPLL 3 HSLS clock channel ID */ +#define BCM_SR_GENPLL3 0 +#define BCM_SR_GENPLL3_HSLS_CLK 1 +#define BCM_SR_GENPLL3_SDIO_CLK 2 + +/* GENPLL 4 SCR clock channel ID */ +#define BCM_SR_GENPLL4 0 +#define BCM_SR_GENPLL4_CCN_CLK 1 +#define BCM_SR_GENPLL4_TPIU_PLL_CLK 2 +#define BCM_SR_GENPLL4_NOC_CLK 3 +#define BCM_SR_GENPLL4_CHCLK_FS4_CLK 4 +#define BCM_SR_GENPLL4_BRIDGE_FSCPU_CLK 5 + +/* GENPLL 5 FS4 clock channel ID */ +#define BCM_SR_GENPLL5 0 +#define BCM_SR_GENPLL5_FS4_HF_CLK 1 +#define BCM_SR_GENPLL5_CRYPTO_AE_CLK 2 +#define BCM_SR_GENPLL5_RAID_AE_CLK 3 + +/* GENPLL 6 NITRO clock channel ID */ +#define BCM_SR_GENPLL6 0 +#define BCM_SR_GENPLL6_48_USB_CLK 1 + +/* LCPLL0 clock channel ID */ +#define BCM_SR_LCPLL0 0 +#define BCM_SR_LCPLL0_SATA_REFP_CLK 1 +#define BCM_SR_LCPLL0_SATA_REFN_CLK 2 +#define BCM_SR_LCPLL0_SATA_350_CLK 3 +#define BCM_SR_LCPLL0_SATA_500_CLK 4 + +/* LCPLL1 clock channel ID */ +#define BCM_SR_LCPLL1 0 +#define BCM_SR_LCPLL1_WAN_CLK 1 +#define BCM_SR_LCPLL1_USB_REF_CLK 2 +#define BCM_SR_LCPLL1_CRMU_TS_CLK 3 + +/* LCPLL PCIE clock channel ID */ +#define BCM_SR_LCPLL_PCIE 0 +#define BCM_SR_LCPLL_PCIE_PHY_REF_CLK 1 + +/* GENPLL EMEM0 clock channel ID */ +#define BCM_SR_EMEMPLL0 0 +#define BCM_SR_EMEMPLL0_EMEM_CLK 1 + +/* GENPLL EMEM0 clock channel ID */ +#define BCM_SR_EMEMPLL1 0 +#define BCM_SR_EMEMPLL1_EMEM_CLK 1 + +/* GENPLL EMEM0 clock channel ID */ +#define BCM_SR_EMEMPLL2 0 +#define BCM_SR_EMEMPLL2_EMEM_CLK 1 + +#endif /* _CLOCK_BCM_SR_H */ diff --git a/include/dt-bindings/clock/bcm21664.h b/include/dt-bindings/clock/bcm21664.h new file mode 100644 index 0000000..5a7f0e4 --- /dev/null +++ b/include/dt-bindings/clock/bcm21664.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CLOCK_BCM21664_H +#define _CLOCK_BCM21664_H + +/* + * This file defines the values used to specify clocks provided by + * the clock control units (CCUs) on Broadcom BCM21664 family SoCs. + */ + +/* bcm21664 CCU device tree "compatible" strings */ +#define BCM21664_DT_ROOT_CCU_COMPAT "brcm,bcm21664-root-ccu" +#define BCM21664_DT_AON_CCU_COMPAT "brcm,bcm21664-aon-ccu" +#define BCM21664_DT_MASTER_CCU_COMPAT "brcm,bcm21664-master-ccu" +#define BCM21664_DT_SLAVE_CCU_COMPAT "brcm,bcm21664-slave-ccu" + +/* root CCU clock ids */ + +#define BCM21664_ROOT_CCU_FRAC_1M 0 +#define BCM21664_ROOT_CCU_CLOCK_COUNT 1 + +/* aon CCU clock ids */ + +#define BCM21664_AON_CCU_HUB_TIMER 0 +#define BCM21664_AON_CCU_CLOCK_COUNT 1 + +/* master CCU clock ids */ + +#define BCM21664_MASTER_CCU_SDIO1 0 +#define BCM21664_MASTER_CCU_SDIO2 1 +#define BCM21664_MASTER_CCU_SDIO3 2 +#define BCM21664_MASTER_CCU_SDIO4 3 +#define BCM21664_MASTER_CCU_SDIO1_SLEEP 4 +#define BCM21664_MASTER_CCU_SDIO2_SLEEP 5 +#define BCM21664_MASTER_CCU_SDIO3_SLEEP 6 +#define BCM21664_MASTER_CCU_SDIO4_SLEEP 7 +#define BCM21664_MASTER_CCU_CLOCK_COUNT 8 + +/* slave CCU clock ids */ + +#define BCM21664_SLAVE_CCU_UARTB 0 +#define BCM21664_SLAVE_CCU_UARTB2 1 +#define BCM21664_SLAVE_CCU_UARTB3 2 +#define BCM21664_SLAVE_CCU_BSC1 3 +#define BCM21664_SLAVE_CCU_BSC2 4 +#define BCM21664_SLAVE_CCU_BSC3 5 +#define BCM21664_SLAVE_CCU_BSC4 6 +#define BCM21664_SLAVE_CCU_CLOCK_COUNT 7 + +#endif /* _CLOCK_BCM21664_H */ diff --git a/include/dt-bindings/clock/bcm281xx.h b/include/dt-bindings/clock/bcm281xx.h new file mode 100644 index 0000000..a763460 --- /dev/null +++ b/include/dt-bindings/clock/bcm281xx.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CLOCK_BCM281XX_H +#define _CLOCK_BCM281XX_H + +/* + * This file defines the values used to specify clocks provided by + * the clock control units (CCUs) on Broadcom BCM281XX family SoCs. + */ + +/* + * These are the bcm281xx CCU device tree "compatible" strings. + * We're stuck with using "bcm11351" in the string because wild + * cards aren't allowed, and that name was the first one defined + * in this family of devices. + */ +#define BCM281XX_DT_ROOT_CCU_COMPAT "brcm,bcm11351-root-ccu" +#define BCM281XX_DT_AON_CCU_COMPAT "brcm,bcm11351-aon-ccu" +#define BCM281XX_DT_HUB_CCU_COMPAT "brcm,bcm11351-hub-ccu" +#define BCM281XX_DT_MASTER_CCU_COMPAT "brcm,bcm11351-master-ccu" +#define BCM281XX_DT_SLAVE_CCU_COMPAT "brcm,bcm11351-slave-ccu" + +/* root CCU clock ids */ + +#define BCM281XX_ROOT_CCU_FRAC_1M 0 +#define BCM281XX_ROOT_CCU_CLOCK_COUNT 1 + +/* aon CCU clock ids */ + +#define BCM281XX_AON_CCU_HUB_TIMER 0 +#define BCM281XX_AON_CCU_PMU_BSC 1 +#define BCM281XX_AON_CCU_PMU_BSC_VAR 2 +#define BCM281XX_AON_CCU_CLOCK_COUNT 3 + +/* hub CCU clock ids */ + +#define BCM281XX_HUB_CCU_TMON_1M 0 +#define BCM281XX_HUB_CCU_CLOCK_COUNT 1 + +/* master CCU clock ids */ + +#define BCM281XX_MASTER_CCU_SDIO1 0 +#define BCM281XX_MASTER_CCU_SDIO2 1 +#define BCM281XX_MASTER_CCU_SDIO3 2 +#define BCM281XX_MASTER_CCU_SDIO4 3 +#define BCM281XX_MASTER_CCU_USB_IC 4 +#define BCM281XX_MASTER_CCU_HSIC2_48M 5 +#define BCM281XX_MASTER_CCU_HSIC2_12M 6 +#define BCM281XX_MASTER_CCU_CLOCK_COUNT 7 + +/* slave CCU clock ids */ + +#define BCM281XX_SLAVE_CCU_UARTB 0 +#define BCM281XX_SLAVE_CCU_UARTB2 1 +#define BCM281XX_SLAVE_CCU_UARTB3 2 +#define BCM281XX_SLAVE_CCU_UARTB4 3 +#define BCM281XX_SLAVE_CCU_SSP0 4 +#define BCM281XX_SLAVE_CCU_SSP2 5 +#define BCM281XX_SLAVE_CCU_BSC1 6 +#define BCM281XX_SLAVE_CCU_BSC2 7 +#define BCM281XX_SLAVE_CCU_BSC3 8 +#define BCM281XX_SLAVE_CCU_PWM 9 +#define BCM281XX_SLAVE_CCU_CLOCK_COUNT 10 + +#endif /* _CLOCK_BCM281XX_H */ diff --git a/include/dt-bindings/clock/bcm2835-aux.h b/include/dt-bindings/clock/bcm2835-aux.h new file mode 100644 index 0000000..bb79de3 --- /dev/null +++ b/include/dt-bindings/clock/bcm2835-aux.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015 Broadcom Corporation + */ + +#define BCM2835_AUX_CLOCK_UART 0 +#define BCM2835_AUX_CLOCK_SPI1 1 +#define BCM2835_AUX_CLOCK_SPI2 2 +#define BCM2835_AUX_CLOCK_COUNT 3 diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h new file mode 100644 index 0000000..b60c034 --- /dev/null +++ b/include/dt-bindings/clock/bcm2835.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015 Broadcom Corporation + */ + +#define BCM2835_PLLA 0 +#define BCM2835_PLLB 1 +#define BCM2835_PLLC 2 +#define BCM2835_PLLD 3 +#define BCM2835_PLLH 4 + +#define BCM2835_PLLA_CORE 5 +#define BCM2835_PLLA_PER 6 +#define BCM2835_PLLB_ARM 7 +#define BCM2835_PLLC_CORE0 8 +#define BCM2835_PLLC_CORE1 9 +#define BCM2835_PLLC_CORE2 10 +#define BCM2835_PLLC_PER 11 +#define BCM2835_PLLD_CORE 12 +#define BCM2835_PLLD_PER 13 +#define BCM2835_PLLH_RCAL 14 +#define BCM2835_PLLH_AUX 15 +#define BCM2835_PLLH_PIX 16 + +#define BCM2835_CLOCK_TIMER 17 +#define BCM2835_CLOCK_OTP 18 +#define BCM2835_CLOCK_UART 19 +#define BCM2835_CLOCK_VPU 20 +#define BCM2835_CLOCK_V3D 21 +#define BCM2835_CLOCK_ISP 22 +#define BCM2835_CLOCK_H264 23 +#define BCM2835_CLOCK_VEC 24 +#define BCM2835_CLOCK_HSM 25 +#define BCM2835_CLOCK_SDRAM 26 +#define BCM2835_CLOCK_TSENS 27 +#define BCM2835_CLOCK_EMMC 28 +#define BCM2835_CLOCK_PERI_IMAGE 29 +#define BCM2835_CLOCK_PWM 30 +#define BCM2835_CLOCK_PCM 31 + +#define BCM2835_PLLA_DSI0 32 +#define BCM2835_PLLA_CCP2 33 +#define BCM2835_PLLD_DSI0 34 +#define BCM2835_PLLD_DSI1 35 + +#define BCM2835_CLOCK_AVEO 36 +#define BCM2835_CLOCK_DFT 37 +#define BCM2835_CLOCK_GP0 38 +#define BCM2835_CLOCK_GP1 39 +#define BCM2835_CLOCK_GP2 40 +#define BCM2835_CLOCK_SLIM 41 +#define BCM2835_CLOCK_SMI 42 +#define BCM2835_CLOCK_TEC 43 +#define BCM2835_CLOCK_DPI 44 +#define BCM2835_CLOCK_CAM0 45 +#define BCM2835_CLOCK_CAM1 46 +#define BCM2835_CLOCK_DSI0E 47 +#define BCM2835_CLOCK_DSI1E 48 +#define BCM2835_CLOCK_DSI0P 49 +#define BCM2835_CLOCK_DSI1P 50 + +#define BCM2711_CLOCK_EMMC2 51 diff --git a/include/dt-bindings/clock/berlin2.h b/include/dt-bindings/clock/berlin2.h new file mode 100644 index 0000000..b07b8ef --- /dev/null +++ b/include/dt-bindings/clock/berlin2.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Berlin2 BG2/BG2CD clock tree IDs + */ + +#define CLKID_SYS 0 +#define CLKID_CPU 1 +#define CLKID_DRMFIGO 2 +#define CLKID_CFG 3 +#define CLKID_GFX 4 +#define CLKID_ZSP 5 +#define CLKID_PERIF 6 +#define CLKID_PCUBE 7 +#define CLKID_VSCOPE 8 +#define CLKID_NFC_ECC 9 +#define CLKID_VPP 10 +#define CLKID_APP 11 +#define CLKID_AUDIO0 12 +#define CLKID_AUDIO2 13 +#define CLKID_AUDIO3 14 +#define CLKID_AUDIO1 15 +#define CLKID_GFX3D_CORE 16 +#define CLKID_GFX3D_SYS 17 +#define CLKID_ARC 18 +#define CLKID_VIP 19 +#define CLKID_SDIO0XIN 20 +#define CLKID_SDIO1XIN 21 +#define CLKID_GFX3D_EXTRA 22 +#define CLKID_GC360 23 +#define CLKID_SDIO_DLLMST 24 +#define CLKID_GETH0 25 +#define CLKID_GETH1 26 +#define CLKID_SATA 27 +#define CLKID_AHBAPB 28 +#define CLKID_USB0 29 +#define CLKID_USB1 30 +#define CLKID_PBRIDGE 31 +#define CLKID_SDIO0 32 +#define CLKID_SDIO1 33 +#define CLKID_NFC 34 +#define CLKID_SMEMC 35 +#define CLKID_AUDIOHD 36 +#define CLKID_VIDEO0 37 +#define CLKID_VIDEO1 38 +#define CLKID_VIDEO2 39 +#define CLKID_TWD 40 diff --git a/include/dt-bindings/clock/berlin2q.h b/include/dt-bindings/clock/berlin2q.h new file mode 100644 index 0000000..44b4ac3 --- /dev/null +++ b/include/dt-bindings/clock/berlin2q.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Berlin2 BG2Q clock tree IDs + */ + +#define CLKID_SYS 0 +#define CLKID_DRMFIGO 1 +#define CLKID_CFG 2 +#define CLKID_GFX2D 3 +#define CLKID_ZSP 4 +#define CLKID_PERIF 5 +#define CLKID_PCUBE 6 +#define CLKID_VSCOPE 7 +#define CLKID_NFC_ECC 8 +#define CLKID_VPP 9 +#define CLKID_APP 10 +#define CLKID_SDIO0XIN 11 +#define CLKID_SDIO1XIN 12 +#define CLKID_GFX2DAXI 13 +#define CLKID_GETH0 14 +#define CLKID_SATA 15 +#define CLKID_AHBAPB 16 +#define CLKID_USB0 17 +#define CLKID_USB1 18 +#define CLKID_USB2 19 +#define CLKID_USB3 20 +#define CLKID_PBRIDGE 21 +#define CLKID_SDIO 22 +#define CLKID_NFC 23 +#define CLKID_SMEMC 24 +#define CLKID_PCIE 25 +#define CLKID_TWD 26 +#define CLKID_CPU 27 diff --git a/include/dt-bindings/clock/boston-clock.h b/include/dt-bindings/clock/boston-clock.h new file mode 100644 index 0000000..a6f0098 --- /dev/null +++ b/include/dt-bindings/clock/boston-clock.h @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2016 Imagination Technologies + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__ +#define __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__ + +#define BOSTON_CLK_INPUT 0 +#define BOSTON_CLK_SYS 1 +#define BOSTON_CLK_CPU 2 + +#endif /* __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__ */ diff --git a/include/dt-bindings/clock/clps711x-clock.h b/include/dt-bindings/clock/clps711x-clock.h new file mode 100644 index 0000000..55b403d --- /dev/null +++ b/include/dt-bindings/clock/clps711x-clock.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014 Alexander Shiyan + */ + +#ifndef __DT_BINDINGS_CLOCK_CLPS711X_H +#define __DT_BINDINGS_CLOCK_CLPS711X_H + +#define CLPS711X_CLK_DUMMY 0 +#define CLPS711X_CLK_CPU 1 +#define CLPS711X_CLK_BUS 2 +#define CLPS711X_CLK_PLL 3 +#define CLPS711X_CLK_TIMERREF 4 +#define CLPS711X_CLK_TIMER1 5 +#define CLPS711X_CLK_TIMER2 6 +#define CLPS711X_CLK_PWM 7 +#define CLPS711X_CLK_SPIREF 8 +#define CLPS711X_CLK_SPI 9 +#define CLPS711X_CLK_UART 10 +#define CLPS711X_CLK_TICK 11 +#define CLPS711X_CLK_MAX 12 + +#endif diff --git a/include/dt-bindings/clock/cortina,gemini-clock.h b/include/dt-bindings/clock/cortina,gemini-clock.h new file mode 100644 index 0000000..04c3404 --- /dev/null +++ b/include/dt-bindings/clock/cortina,gemini-clock.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DT_BINDINGS_CORTINA_GEMINI_CLOCK_H +#define DT_BINDINGS_CORTINA_GEMINI_CLOCK_H + +/* RTC, AHB, APB, CPU, PCI, TVC, UART clocks and 13 gates */ +#define GEMINI_NUM_CLKS 20 + +#define GEMINI_CLK_RTC 0 +#define GEMINI_CLK_AHB 1 +#define GEMINI_CLK_APB 2 +#define GEMINI_CLK_CPU 3 +#define GEMINI_CLK_PCI 4 +#define GEMINI_CLK_TVC 5 +#define GEMINI_CLK_UART 6 +#define GEMINI_CLK_GATES 7 +#define GEMINI_CLK_GATE_SECURITY 7 +#define GEMINI_CLK_GATE_GMAC0 8 +#define GEMINI_CLK_GATE_GMAC1 9 +#define GEMINI_CLK_GATE_SATA0 10 +#define GEMINI_CLK_GATE_SATA1 11 +#define GEMINI_CLK_GATE_USB0 12 +#define GEMINI_CLK_GATE_USB1 13 +#define GEMINI_CLK_GATE_IDE 14 +#define GEMINI_CLK_GATE_PCI 15 +#define GEMINI_CLK_GATE_DDR 16 +#define GEMINI_CLK_GATE_FLASH 17 +#define GEMINI_CLK_GATE_TVC 18 +#define GEMINI_CLK_GATE_BOOT 19 + +#endif /* DT_BINDINGS_CORTINA_GEMINI_CLOCK_H */ diff --git a/include/dt-bindings/clock/dm814.h b/include/dt-bindings/clock/dm814.h new file mode 100644 index 0000000..f0f04e0 --- /dev/null +++ b/include/dt-bindings/clock/dm814.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2017 Texas Instruments, Inc. + */ +#ifndef __DT_BINDINGS_CLK_DM814_H +#define __DT_BINDINGS_CLK_DM814_H + +#define DM814_CLKCTRL_OFFSET 0x0 +#define DM814_CLKCTRL_INDEX(offset) ((offset) - DM814_CLKCTRL_OFFSET) + +/* default clocks */ +#define DM814_USB_OTG_HS_CLKCTRL DM814_CLKCTRL_INDEX(0x58) + +/* alwon clocks */ +#define DM814_UART1_CLKCTRL DM814_CLKCTRL_INDEX(0x150) +#define DM814_UART2_CLKCTRL DM814_CLKCTRL_INDEX(0x154) +#define DM814_UART3_CLKCTRL DM814_CLKCTRL_INDEX(0x158) +#define DM814_GPIO1_CLKCTRL DM814_CLKCTRL_INDEX(0x15c) +#define DM814_GPIO2_CLKCTRL DM814_CLKCTRL_INDEX(0x160) +#define DM814_I2C1_CLKCTRL DM814_CLKCTRL_INDEX(0x164) +#define DM814_I2C2_CLKCTRL DM814_CLKCTRL_INDEX(0x168) +#define DM814_WD_TIMER_CLKCTRL DM814_CLKCTRL_INDEX(0x18c) +#define DM814_MCSPI1_CLKCTRL DM814_CLKCTRL_INDEX(0x190) +#define DM814_GPMC_CLKCTRL DM814_CLKCTRL_INDEX(0x1d0) +#define DM814_CPGMAC0_CLKCTRL DM814_CLKCTRL_INDEX(0x1d4) +#define DM814_MPU_CLKCTRL DM814_CLKCTRL_INDEX(0x1dc) +#define DM814_RTC_CLKCTRL DM814_CLKCTRL_INDEX(0x1f0) +#define DM814_TPCC_CLKCTRL DM814_CLKCTRL_INDEX(0x1f4) +#define DM814_TPTC0_CLKCTRL DM814_CLKCTRL_INDEX(0x1f8) +#define DM814_TPTC1_CLKCTRL DM814_CLKCTRL_INDEX(0x1fc) +#define DM814_TPTC2_CLKCTRL DM814_CLKCTRL_INDEX(0x200) +#define DM814_TPTC3_CLKCTRL DM814_CLKCTRL_INDEX(0x204) +#define DM814_MMC1_CLKCTRL DM814_CLKCTRL_INDEX(0x21c) +#define DM814_MMC2_CLKCTRL DM814_CLKCTRL_INDEX(0x220) +#define DM814_MMC3_CLKCTRL DM814_CLKCTRL_INDEX(0x224) + +#endif diff --git a/include/dt-bindings/clock/dm816.h b/include/dt-bindings/clock/dm816.h new file mode 100644 index 0000000..fb0d941 --- /dev/null +++ b/include/dt-bindings/clock/dm816.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2017 Texas Instruments, Inc. + */ +#ifndef __DT_BINDINGS_CLK_DM816_H +#define __DT_BINDINGS_CLK_DM816_H + +#define DM816_CLKCTRL_OFFSET 0x0 +#define DM816_CLKCTRL_INDEX(offset) ((offset) - DM816_CLKCTRL_OFFSET) + +/* default clocks */ +#define DM816_USB_OTG_HS_CLKCTRL DM816_CLKCTRL_INDEX(0x58) + +/* alwon clocks */ +#define DM816_UART1_CLKCTRL DM816_CLKCTRL_INDEX(0x150) +#define DM816_UART2_CLKCTRL DM816_CLKCTRL_INDEX(0x154) +#define DM816_UART3_CLKCTRL DM816_CLKCTRL_INDEX(0x158) +#define DM816_GPIO1_CLKCTRL DM816_CLKCTRL_INDEX(0x15c) +#define DM816_GPIO2_CLKCTRL DM816_CLKCTRL_INDEX(0x160) +#define DM816_I2C1_CLKCTRL DM816_CLKCTRL_INDEX(0x164) +#define DM816_I2C2_CLKCTRL DM816_CLKCTRL_INDEX(0x168) +#define DM816_TIMER1_CLKCTRL DM816_CLKCTRL_INDEX(0x170) +#define DM816_TIMER2_CLKCTRL DM816_CLKCTRL_INDEX(0x174) +#define DM816_TIMER3_CLKCTRL DM816_CLKCTRL_INDEX(0x178) +#define DM816_TIMER4_CLKCTRL DM816_CLKCTRL_INDEX(0x17c) +#define DM816_TIMER5_CLKCTRL DM816_CLKCTRL_INDEX(0x180) +#define DM816_TIMER6_CLKCTRL DM816_CLKCTRL_INDEX(0x184) +#define DM816_TIMER7_CLKCTRL DM816_CLKCTRL_INDEX(0x188) +#define DM816_WD_TIMER_CLKCTRL DM816_CLKCTRL_INDEX(0x18c) +#define DM816_MCSPI1_CLKCTRL DM816_CLKCTRL_INDEX(0x190) +#define DM816_MAILBOX_CLKCTRL DM816_CLKCTRL_INDEX(0x194) +#define DM816_SPINBOX_CLKCTRL DM816_CLKCTRL_INDEX(0x198) +#define DM816_MMC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1b0) +#define DM816_GPMC_CLKCTRL DM816_CLKCTRL_INDEX(0x1d0) +#define DM816_DAVINCI_MDIO_CLKCTRL DM816_CLKCTRL_INDEX(0x1d4) +#define DM816_EMAC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1d8) +#define DM816_MPU_CLKCTRL DM816_CLKCTRL_INDEX(0x1dc) +#define DM816_RTC_CLKCTRL DM816_CLKCTRL_INDEX(0x1f0) +#define DM816_TPCC_CLKCTRL DM816_CLKCTRL_INDEX(0x1f4) +#define DM816_TPTC0_CLKCTRL DM816_CLKCTRL_INDEX(0x1f8) +#define DM816_TPTC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1fc) +#define DM816_TPTC2_CLKCTRL DM816_CLKCTRL_INDEX(0x200) +#define DM816_TPTC3_CLKCTRL DM816_CLKCTRL_INDEX(0x204) + +#endif diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h new file mode 100644 index 0000000..72f2e84 --- /dev/null +++ b/include/dt-bindings/clock/dra7.h @@ -0,0 +1,355 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2017 Texas Instruments, Inc. + */ +#ifndef __DT_BINDINGS_CLK_DRA7_H +#define __DT_BINDINGS_CLK_DRA7_H + +#define DRA7_CLKCTRL_OFFSET 0x20 +#define DRA7_CLKCTRL_INDEX(offset) ((offset) - DRA7_CLKCTRL_OFFSET) + +/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */ + +/* mpu clocks */ +#define DRA7_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* ipu clocks */ +#define _DRA7_IPU_CLKCTRL_OFFSET 0x40 +#define _DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - _DRA7_IPU_CLKCTRL_OFFSET) +#define DRA7_MCASP1_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x50) +#define DRA7_TIMER5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x58) +#define DRA7_TIMER6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x60) +#define DRA7_TIMER7_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x68) +#define DRA7_TIMER8_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x70) +#define DRA7_I2C5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x78) +#define DRA7_UART6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x80) + +/* rtc clocks */ +#define DRA7_RTC_CLKCTRL_OFFSET 0x40 +#define DRA7_RTC_CLKCTRL_INDEX(offset) ((offset) - DRA7_RTC_CLKCTRL_OFFSET) +#define DRA7_RTCSS_CLKCTRL DRA7_RTC_CLKCTRL_INDEX(0x44) + +/* coreaon clocks */ +#define DRA7_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38) + +/* l3main1 clocks */ +#define DRA7_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70) +#define DRA7_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78) +#define DRA7_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90) + +/* dma clocks */ +#define DRA7_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* emif clocks */ +#define DRA7_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* atl clocks */ +#define DRA7_ATL_CLKCTRL_OFFSET 0x0 +#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET) +#define DRA7_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0) + +/* l4cfg clocks */ +#define DRA7_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58) +#define DRA7_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60) +#define DRA7_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68) +#define DRA7_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70) +#define DRA7_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78) +#define DRA7_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90) +#define DRA7_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98) +#define DRA7_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0) + +/* l3instr clocks */ +#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) + +/* dss clocks */ +#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) + +/* l3init clocks */ +#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40) +#define DRA7_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_PCIE1_CLKCTRL DRA7_CLKCTRL_INDEX(0xb0) +#define DRA7_PCIE2_CLKCTRL DRA7_CLKCTRL_INDEX(0xb8) +#define DRA7_GMAC_CLKCTRL DRA7_CLKCTRL_INDEX(0xd0) +#define DRA7_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0) +#define DRA7_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8) +#define DRA7_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0) + +/* l4per clocks */ +#define _DRA7_L4PER_CLKCTRL_OFFSET 0x0 +#define _DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - _DRA7_L4PER_CLKCTRL_OFFSET) +#define DRA7_L4_PER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc) +#define DRA7_L4_PER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x14) +#define DRA7_TIMER10_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x28) +#define DRA7_TIMER11_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x30) +#define DRA7_TIMER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x38) +#define DRA7_TIMER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x40) +#define DRA7_TIMER4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x48) +#define DRA7_TIMER9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x50) +#define DRA7_ELM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x58) +#define DRA7_GPIO2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x60) +#define DRA7_GPIO3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x68) +#define DRA7_GPIO4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x70) +#define DRA7_GPIO5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x78) +#define DRA7_GPIO6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x80) +#define DRA7_HDQ1W_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x88) +#define DRA7_EPWMSS1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x90) +#define DRA7_EPWMSS2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x98) +#define DRA7_I2C1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa0) +#define DRA7_I2C2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa8) +#define DRA7_I2C3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb0) +#define DRA7_I2C4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb8) +#define DRA7_L4_PER1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc0) +#define DRA7_EPWMSS0_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc4) +#define DRA7_TIMER13_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc8) +#define DRA7_TIMER14_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd0) +#define DRA7_TIMER15_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd8) +#define DRA7_MCSPI1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf0) +#define DRA7_MCSPI2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf8) +#define DRA7_MCSPI3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x100) +#define DRA7_MCSPI4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x108) +#define DRA7_GPIO7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x110) +#define DRA7_GPIO8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x118) +#define DRA7_MMC3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x120) +#define DRA7_MMC4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x128) +#define DRA7_TIMER16_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x130) +#define DRA7_QSPI_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x138) +#define DRA7_UART1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x140) +#define DRA7_UART2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x148) +#define DRA7_UART3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x150) +#define DRA7_UART4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x158) +#define DRA7_MCASP2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x160) +#define DRA7_MCASP3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x168) +#define DRA7_UART5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x170) +#define DRA7_MCASP5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x178) +#define DRA7_MCASP8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x190) +#define DRA7_MCASP4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x198) +#define DRA7_AES1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a0) +#define DRA7_AES2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a8) +#define DRA7_DES_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1b0) +#define DRA7_RNG_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c0) +#define DRA7_SHAM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c8) +#define DRA7_UART7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1d0) +#define DRA7_UART8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e0) +#define DRA7_UART9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e8) +#define DRA7_DCAN2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1f0) +#define DRA7_MCASP6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x204) +#define DRA7_MCASP7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x208) + +/* wkupaon clocks */ +#define DRA7_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38) +#define DRA7_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40) +#define DRA7_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0) + +/* XXX: Compatibility part end. */ + +/* mpu clocks */ +#define DRA7_MPU_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* dsp1 clocks */ +#define DRA7_DSP1_MMU0_DSP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* ipu1 clocks */ +#define DRA7_IPU1_MMU_IPU1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* ipu clocks */ +#define DRA7_IPU_CLKCTRL_OFFSET 0x50 +#define DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - DRA7_IPU_CLKCTRL_OFFSET) +#define DRA7_IPU_MCASP1_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x50) +#define DRA7_IPU_TIMER5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x58) +#define DRA7_IPU_TIMER6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x60) +#define DRA7_IPU_TIMER7_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x68) +#define DRA7_IPU_TIMER8_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x70) +#define DRA7_IPU_I2C5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x78) +#define DRA7_IPU_UART6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x80) + +/* dsp2 clocks */ +#define DRA7_DSP2_MMU0_DSP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* rtc clocks */ +#define DRA7_RTC_RTCSS_CLKCTRL DRA7_CLKCTRL_INDEX(0x44) + +/* coreaon clocks */ +#define DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38) + +/* l3main1 clocks */ +#define DRA7_L3MAIN1_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_L3MAIN1_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_L3MAIN1_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70) +#define DRA7_L3MAIN1_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78) +#define DRA7_L3MAIN1_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_L3MAIN1_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_L3MAIN1_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90) + +/* ipu2 clocks */ +#define DRA7_IPU2_MMU_IPU2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* dma clocks */ +#define DRA7_DMA_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* emif clocks */ +#define DRA7_EMIF_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* atl clocks */ +#define DRA7_ATL_CLKCTRL_OFFSET 0x0 +#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET) +#define DRA7_ATL_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0) + +/* l4cfg clocks */ +#define DRA7_L4CFG_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_L4CFG_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_L4CFG_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_L4CFG_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_L4CFG_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_L4CFG_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58) +#define DRA7_L4CFG_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60) +#define DRA7_L4CFG_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68) +#define DRA7_L4CFG_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70) +#define DRA7_L4CFG_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78) +#define DRA7_L4CFG_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_L4CFG_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_L4CFG_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90) +#define DRA7_L4CFG_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98) +#define DRA7_L4CFG_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0) + +/* l3instr clocks */ +#define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_L3INSTR_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) + +/* dss clocks */ +#define DRA7_DSS_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_DSS_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) + +/* l3init clocks */ +#define DRA7_L3INIT_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_L3INIT_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_L3INIT_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40) +#define DRA7_L3INIT_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_L3INIT_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_L3INIT_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_L3INIT_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0) +#define DRA7_L3INIT_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8) +#define DRA7_L3INIT_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0) + +/* pcie clocks */ +#define DRA7_PCIE_CLKCTRL_OFFSET 0xb0 +#define DRA7_PCIE_CLKCTRL_INDEX(offset) ((offset) - DRA7_PCIE_CLKCTRL_OFFSET) +#define DRA7_PCIE_PCIE1_CLKCTRL DRA7_PCIE_CLKCTRL_INDEX(0xb0) +#define DRA7_PCIE_PCIE2_CLKCTRL DRA7_PCIE_CLKCTRL_INDEX(0xb8) + +/* gmac clocks */ +#define DRA7_GMAC_CLKCTRL_OFFSET 0xd0 +#define DRA7_GMAC_CLKCTRL_INDEX(offset) ((offset) - DRA7_GMAC_CLKCTRL_OFFSET) +#define DRA7_GMAC_GMAC_CLKCTRL DRA7_GMAC_CLKCTRL_INDEX(0xd0) + +/* l4per clocks */ +#define DRA7_L4PER_CLKCTRL_OFFSET 0x28 +#define DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER_CLKCTRL_OFFSET) +#define DRA7_L4PER_TIMER10_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x28) +#define DRA7_L4PER_TIMER11_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x30) +#define DRA7_L4PER_TIMER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x38) +#define DRA7_L4PER_TIMER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x40) +#define DRA7_L4PER_TIMER4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x48) +#define DRA7_L4PER_TIMER9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x50) +#define DRA7_L4PER_ELM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x58) +#define DRA7_L4PER_GPIO2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x60) +#define DRA7_L4PER_GPIO3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x68) +#define DRA7_L4PER_GPIO4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x70) +#define DRA7_L4PER_GPIO5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x78) +#define DRA7_L4PER_GPIO6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x80) +#define DRA7_L4PER_HDQ1W_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x88) +#define DRA7_L4PER_I2C1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa0) +#define DRA7_L4PER_I2C2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa8) +#define DRA7_L4PER_I2C3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb0) +#define DRA7_L4PER_I2C4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb8) +#define DRA7_L4PER_L4_PER1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc0) +#define DRA7_L4PER_MCSPI1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf0) +#define DRA7_L4PER_MCSPI2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf8) +#define DRA7_L4PER_MCSPI3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x100) +#define DRA7_L4PER_MCSPI4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x108) +#define DRA7_L4PER_GPIO7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x110) +#define DRA7_L4PER_GPIO8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x118) +#define DRA7_L4PER_MMC3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x120) +#define DRA7_L4PER_MMC4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x128) +#define DRA7_L4PER_UART1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x140) +#define DRA7_L4PER_UART2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x148) +#define DRA7_L4PER_UART3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x150) +#define DRA7_L4PER_UART4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x158) +#define DRA7_L4PER_UART5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x170) + +/* l4sec clocks */ +#define DRA7_L4SEC_CLKCTRL_OFFSET 0x1a0 +#define DRA7_L4SEC_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4SEC_CLKCTRL_OFFSET) +#define DRA7_L4SEC_AES1_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1a0) +#define DRA7_L4SEC_AES2_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1a8) +#define DRA7_L4SEC_DES_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1b0) +#define DRA7_L4SEC_RNG_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c0) +#define DRA7_L4SEC_SHAM_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c8) + +/* l4per2 clocks */ +#define DRA7_L4PER2_CLKCTRL_OFFSET 0xc +#define DRA7_L4PER2_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER2_CLKCTRL_OFFSET) +#define DRA7_L4PER2_L4_PER2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0xc) +#define DRA7_L4PER2_PRUSS1_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x18) +#define DRA7_L4PER2_PRUSS2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x20) +#define DRA7_L4PER2_EPWMSS1_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x90) +#define DRA7_L4PER2_EPWMSS2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x98) +#define DRA7_L4PER2_EPWMSS0_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0xc4) +#define DRA7_L4PER2_QSPI_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x138) +#define DRA7_L4PER2_MCASP2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x160) +#define DRA7_L4PER2_MCASP3_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x168) +#define DRA7_L4PER2_MCASP5_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x178) +#define DRA7_L4PER2_MCASP8_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x190) +#define DRA7_L4PER2_MCASP4_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x198) +#define DRA7_L4PER2_UART7_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1d0) +#define DRA7_L4PER2_UART8_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1e0) +#define DRA7_L4PER2_UART9_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1e8) +#define DRA7_L4PER2_DCAN2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1f0) +#define DRA7_L4PER2_MCASP6_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x204) +#define DRA7_L4PER2_MCASP7_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x208) + +/* l4per3 clocks */ +#define DRA7_L4PER3_CLKCTRL_OFFSET 0x14 +#define DRA7_L4PER3_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER3_CLKCTRL_OFFSET) +#define DRA7_L4PER3_L4_PER3_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0x14) +#define DRA7_L4PER3_TIMER13_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0xc8) +#define DRA7_L4PER3_TIMER14_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0xd0) +#define DRA7_L4PER3_TIMER15_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0xd8) +#define DRA7_L4PER3_TIMER16_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0x130) + +/* wkupaon clocks */ +#define DRA7_WKUPAON_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_WKUPAON_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_WKUPAON_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38) +#define DRA7_WKUPAON_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40) +#define DRA7_WKUPAON_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_WKUPAON_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_WKUPAON_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_WKUPAON_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_WKUPAON_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0) + +#endif diff --git a/include/dt-bindings/clock/efm32-cmu.h b/include/dt-bindings/clock/efm32-cmu.h new file mode 100644 index 0000000..4b48d15 --- /dev/null +++ b/include/dt-bindings/clock/efm32-cmu.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_CLOCK_EFM32_CMU_H +#define __DT_BINDINGS_CLOCK_EFM32_CMU_H + +#define clk_HFXO 0 +#define clk_HFRCO 1 +#define clk_LFXO 2 +#define clk_LFRCO 3 +#define clk_ULFRCO 4 +#define clk_AUXHFRCO 5 +#define clk_HFCLKNODIV 6 +#define clk_HFCLK 7 +#define clk_HFPERCLK 8 +#define clk_HFCORECLK 9 +#define clk_LFACLK 10 +#define clk_LFBCLK 11 +#define clk_WDOGCLK 12 +#define clk_HFCORECLKDMA 13 +#define clk_HFCORECLKAES 14 +#define clk_HFCORECLKUSBC 15 +#define clk_HFCORECLKUSB 16 +#define clk_HFCORECLKLE 17 +#define clk_HFCORECLKEBI 18 +#define clk_HFPERCLKUSART0 19 +#define clk_HFPERCLKUSART1 20 +#define clk_HFPERCLKUSART2 21 +#define clk_HFPERCLKUART0 22 +#define clk_HFPERCLKUART1 23 +#define clk_HFPERCLKTIMER0 24 +#define clk_HFPERCLKTIMER1 25 +#define clk_HFPERCLKTIMER2 26 +#define clk_HFPERCLKTIMER3 27 +#define clk_HFPERCLKACMP0 28 +#define clk_HFPERCLKACMP1 29 +#define clk_HFPERCLKI2C0 30 +#define clk_HFPERCLKI2C1 31 +#define clk_HFPERCLKGPIO 32 +#define clk_HFPERCLKVCMP 33 +#define clk_HFPERCLKPRS 34 +#define clk_HFPERCLKADC0 35 +#define clk_HFPERCLKDAC0 36 + +#endif /* __DT_BINDINGS_CLOCK_EFM32_CMU_H */ diff --git a/include/dt-bindings/clock/exynos-audss-clk.h b/include/dt-bindings/clock/exynos-audss-clk.h new file mode 100644 index 0000000..eee9fcc --- /dev/null +++ b/include/dt-bindings/clock/exynos-audss-clk.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for Samsung audio subsystem + * clock controller. + * + * The constants defined in this header are being used in dts + * and exynos audss driver. + */ + +#ifndef _DT_BINDINGS_CLK_EXYNOS_AUDSS_H +#define _DT_BINDINGS_CLK_EXYNOS_AUDSS_H + +#define EXYNOS_MOUT_AUDSS 0 +#define EXYNOS_MOUT_I2S 1 +#define EXYNOS_DOUT_SRP 2 +#define EXYNOS_DOUT_AUD_BUS 3 +#define EXYNOS_DOUT_I2S 4 +#define EXYNOS_SRP_CLK 5 +#define EXYNOS_I2S_BUS 6 +#define EXYNOS_SCLK_I2S 7 +#define EXYNOS_PCM_BUS 8 +#define EXYNOS_SCLK_PCM 9 +#define EXYNOS_ADMA 10 + +#define EXYNOS_AUDSS_MAX_CLKS 11 + +#endif diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h new file mode 100644 index 0000000..fe82140 --- /dev/null +++ b/include/dt-bindings/clock/exynos3250.h @@ -0,0 +1,353 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Tomasz Figa + * + * Device Tree binding constants for Samsung Exynos3250 clock controllers. + */ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H + +/* + * Let each exported clock get a unique index, which is used on DT-enabled + * platforms to lookup the clock from a clock specifier. These indices are + * therefore considered an ABI and so must not be changed. This implies + * that new clocks should be added either in free spaces between clock groups + * or at the end. + */ + + +/* + * Main CMU + */ + +#define CLK_OSCSEL 1 +#define CLK_FIN_PLL 2 +#define CLK_FOUT_APLL 3 +#define CLK_FOUT_VPLL 4 +#define CLK_FOUT_UPLL 5 +#define CLK_FOUT_MPLL 6 +#define CLK_ARM_CLK 7 + +/* Muxes */ +#define CLK_MOUT_MPLL_USER_L 16 +#define CLK_MOUT_GDL 17 +#define CLK_MOUT_MPLL_USER_R 18 +#define CLK_MOUT_GDR 19 +#define CLK_MOUT_EBI 20 +#define CLK_MOUT_ACLK_200 21 +#define CLK_MOUT_ACLK_160 22 +#define CLK_MOUT_ACLK_100 23 +#define CLK_MOUT_ACLK_266_1 24 +#define CLK_MOUT_ACLK_266_0 25 +#define CLK_MOUT_ACLK_266 26 +#define CLK_MOUT_VPLL 27 +#define CLK_MOUT_EPLL_USER 28 +#define CLK_MOUT_EBI_1 29 +#define CLK_MOUT_UPLL 30 +#define CLK_MOUT_ACLK_400_MCUISP_SUB 31 +#define CLK_MOUT_MPLL 32 +#define CLK_MOUT_ACLK_400_MCUISP 33 +#define CLK_MOUT_VPLLSRC 34 +#define CLK_MOUT_CAM1 35 +#define CLK_MOUT_CAM_BLK 36 +#define CLK_MOUT_MFC 37 +#define CLK_MOUT_MFC_1 38 +#define CLK_MOUT_MFC_0 39 +#define CLK_MOUT_G3D 40 +#define CLK_MOUT_G3D_1 41 +#define CLK_MOUT_G3D_0 42 +#define CLK_MOUT_MIPI0 43 +#define CLK_MOUT_FIMD0 44 +#define CLK_MOUT_UART_ISP 45 +#define CLK_MOUT_SPI1_ISP 46 +#define CLK_MOUT_SPI0_ISP 47 +#define CLK_MOUT_TSADC 48 +#define CLK_MOUT_MMC1 49 +#define CLK_MOUT_MMC0 50 +#define CLK_MOUT_UART1 51 +#define CLK_MOUT_UART0 52 +#define CLK_MOUT_SPI1 53 +#define CLK_MOUT_SPI0 54 +#define CLK_MOUT_AUDIO 55 +#define CLK_MOUT_MPLL_USER_C 56 +#define CLK_MOUT_HPM 57 +#define CLK_MOUT_CORE 58 +#define CLK_MOUT_APLL 59 +#define CLK_MOUT_ACLK_266_SUB 60 +#define CLK_MOUT_UART2 61 +#define CLK_MOUT_MMC2 62 + +/* Dividers */ +#define CLK_DIV_GPL 64 +#define CLK_DIV_GDL 65 +#define CLK_DIV_GPR 66 +#define CLK_DIV_GDR 67 +#define CLK_DIV_MPLL_PRE 68 +#define CLK_DIV_ACLK_400_MCUISP 69 +#define CLK_DIV_EBI 70 +#define CLK_DIV_ACLK_200 71 +#define CLK_DIV_ACLK_160 72 +#define CLK_DIV_ACLK_100 73 +#define CLK_DIV_ACLK_266 74 +#define CLK_DIV_CAM1 75 +#define CLK_DIV_CAM_BLK 76 +#define CLK_DIV_MFC 77 +#define CLK_DIV_G3D 78 +#define CLK_DIV_MIPI0_PRE 79 +#define CLK_DIV_MIPI0 80 +#define CLK_DIV_FIMD0 81 +#define CLK_DIV_UART_ISP 82 +#define CLK_DIV_SPI1_ISP_PRE 83 +#define CLK_DIV_SPI1_ISP 84 +#define CLK_DIV_SPI0_ISP_PRE 85 +#define CLK_DIV_SPI0_ISP 86 +#define CLK_DIV_TSADC_PRE 87 +#define CLK_DIV_TSADC 88 +#define CLK_DIV_MMC1_PRE 89 +#define CLK_DIV_MMC1 90 +#define CLK_DIV_MMC0_PRE 91 +#define CLK_DIV_MMC0 92 +#define CLK_DIV_UART1 93 +#define CLK_DIV_UART0 94 +#define CLK_DIV_SPI1_PRE 95 +#define CLK_DIV_SPI1 96 +#define CLK_DIV_SPI0_PRE 97 +#define CLK_DIV_SPI0 98 +#define CLK_DIV_PCM 99 +#define CLK_DIV_AUDIO 100 +#define CLK_DIV_I2S 101 +#define CLK_DIV_CORE2 102 +#define CLK_DIV_APLL 103 +#define CLK_DIV_PCLK_DBG 104 +#define CLK_DIV_ATB 105 +#define CLK_DIV_COREM 106 +#define CLK_DIV_CORE 107 +#define CLK_DIV_HPM 108 +#define CLK_DIV_COPY 109 +#define CLK_DIV_UART2 110 +#define CLK_DIV_MMC2_PRE 111 +#define CLK_DIV_MMC2 112 + +/* Gates */ +#define CLK_ASYNC_G3D 128 +#define CLK_ASYNC_MFCL 129 +#define CLK_PPMULEFT 130 +#define CLK_GPIO_LEFT 131 +#define CLK_ASYNC_ISPMX 132 +#define CLK_ASYNC_FSYSD 133 +#define CLK_ASYNC_LCD0X 134 +#define CLK_ASYNC_CAMX 135 +#define CLK_PPMURIGHT 136 +#define CLK_GPIO_RIGHT 137 +#define CLK_MONOCNT 138 +#define CLK_TZPC6 139 +#define CLK_PROVISIONKEY1 140 +#define CLK_PROVISIONKEY0 141 +#define CLK_CMU_ISPPART 142 +#define CLK_TMU_APBIF 143 +#define CLK_KEYIF 144 +#define CLK_RTC 145 +#define CLK_WDT 146 +#define CLK_MCT 147 +#define CLK_SECKEY 148 +#define CLK_TZPC5 149 +#define CLK_TZPC4 150 +#define CLK_TZPC3 151 +#define CLK_TZPC2 152 +#define CLK_TZPC1 153 +#define CLK_TZPC0 154 +#define CLK_CMU_COREPART 155 +#define CLK_CMU_TOPPART 156 +#define CLK_PMU_APBIF 157 +#define CLK_SYSREG 158 +#define CLK_CHIP_ID 159 +#define CLK_QEJPEG 160 +#define CLK_PIXELASYNCM1 161 +#define CLK_PIXELASYNCM0 162 +#define CLK_PPMUCAMIF 163 +#define CLK_QEM2MSCALER 164 +#define CLK_QEGSCALER1 165 +#define CLK_QEGSCALER0 166 +#define CLK_SMMUJPEG 167 +#define CLK_SMMUM2M2SCALER 168 +#define CLK_SMMUGSCALER1 169 +#define CLK_SMMUGSCALER0 170 +#define CLK_JPEG 171 +#define CLK_M2MSCALER 172 +#define CLK_GSCALER1 173 +#define CLK_GSCALER0 174 +#define CLK_QEMFC 175 +#define CLK_PPMUMFC_L 176 +#define CLK_SMMUMFC_L 177 +#define CLK_MFC 178 +#define CLK_SMMUG3D 179 +#define CLK_QEG3D 180 +#define CLK_PPMUG3D 181 +#define CLK_G3D 182 +#define CLK_QE_CH1_LCD 183 +#define CLK_QE_CH0_LCD 184 +#define CLK_PPMULCD0 185 +#define CLK_SMMUFIMD0 186 +#define CLK_DSIM0 187 +#define CLK_FIMD0 188 +#define CLK_CAM1 189 +#define CLK_UART_ISP_TOP 190 +#define CLK_SPI1_ISP_TOP 191 +#define CLK_SPI0_ISP_TOP 192 +#define CLK_TSADC 193 +#define CLK_PPMUFILE 194 +#define CLK_USBOTG 195 +#define CLK_USBHOST 196 +#define CLK_SROMC 197 +#define CLK_SDMMC1 198 +#define CLK_SDMMC0 199 +#define CLK_PDMA1 200 +#define CLK_PDMA0 201 +#define CLK_PWM 202 +#define CLK_PCM 203 +#define CLK_I2S 204 +#define CLK_SPI1 205 +#define CLK_SPI0 206 +#define CLK_I2C7 207 +#define CLK_I2C6 208 +#define CLK_I2C5 209 +#define CLK_I2C4 210 +#define CLK_I2C3 211 +#define CLK_I2C2 212 +#define CLK_I2C1 213 +#define CLK_I2C0 214 +#define CLK_UART1 215 +#define CLK_UART0 216 +#define CLK_BLOCK_LCD 217 +#define CLK_BLOCK_G3D 218 +#define CLK_BLOCK_MFC 219 +#define CLK_BLOCK_CAM 220 +#define CLK_SMIES 221 +#define CLK_UART2 222 +#define CLK_SDMMC2 223 + +/* Special clocks */ +#define CLK_SCLK_JPEG 224 +#define CLK_SCLK_M2MSCALER 225 +#define CLK_SCLK_GSCALER1 226 +#define CLK_SCLK_GSCALER0 227 +#define CLK_SCLK_MFC 228 +#define CLK_SCLK_G3D 229 +#define CLK_SCLK_MIPIDPHY2L 230 +#define CLK_SCLK_MIPI0 231 +#define CLK_SCLK_FIMD0 232 +#define CLK_SCLK_CAM1 233 +#define CLK_SCLK_UART_ISP 234 +#define CLK_SCLK_SPI1_ISP 235 +#define CLK_SCLK_SPI0_ISP 236 +#define CLK_SCLK_UPLL 237 +#define CLK_SCLK_TSADC 238 +#define CLK_SCLK_EBI 239 +#define CLK_SCLK_MMC1 240 +#define CLK_SCLK_MMC0 241 +#define CLK_SCLK_I2S 242 +#define CLK_SCLK_PCM 243 +#define CLK_SCLK_SPI1 244 +#define CLK_SCLK_SPI0 245 +#define CLK_SCLK_UART1 246 +#define CLK_SCLK_UART0 247 +#define CLK_SCLK_UART2 248 +#define CLK_SCLK_MMC2 249 + +/* + * Total number of clocks of main CMU. + * NOTE: Must be equal to last clock ID increased by one. + */ +#define CLK_NR_CLKS 250 + +/* + * CMU DMC + */ + +#define CLK_FOUT_BPLL 1 +#define CLK_FOUT_EPLL 2 + +/* Muxes */ +#define CLK_MOUT_MPLL_MIF 8 +#define CLK_MOUT_BPLL 9 +#define CLK_MOUT_DPHY 10 +#define CLK_MOUT_DMC_BUS 11 +#define CLK_MOUT_EPLL 12 + +/* Dividers */ +#define CLK_DIV_DMC 16 +#define CLK_DIV_DPHY 17 +#define CLK_DIV_DMC_PRE 18 +#define CLK_DIV_DMCP 19 +#define CLK_DIV_DMCD 20 + +/* + * Total number of clocks of main CMU. + * NOTE: Must be equal to last clock ID increased by one. + */ +#define NR_CLKS_DMC 21 + +/* + * CMU ISP + */ + +/* Dividers */ + +#define CLK_DIV_ISP1 1 +#define CLK_DIV_ISP0 2 +#define CLK_DIV_MCUISP1 3 +#define CLK_DIV_MCUISP0 4 +#define CLK_DIV_MPWM 5 + +/* Gates */ + +#define CLK_UART_ISP 8 +#define CLK_WDT_ISP 9 +#define CLK_PWM_ISP 10 +#define CLK_I2C1_ISP 11 +#define CLK_I2C0_ISP 12 +#define CLK_MPWM_ISP 13 +#define CLK_MCUCTL_ISP 14 +#define CLK_PPMUISPX 15 +#define CLK_PPMUISPMX 16 +#define CLK_QE_LITE1 17 +#define CLK_QE_LITE0 18 +#define CLK_QE_FD 19 +#define CLK_QE_DRC 20 +#define CLK_QE_ISP 21 +#define CLK_CSIS1 22 +#define CLK_SMMU_LITE1 23 +#define CLK_SMMU_LITE0 24 +#define CLK_SMMU_FD 25 +#define CLK_SMMU_DRC 26 +#define CLK_SMMU_ISP 27 +#define CLK_GICISP 28 +#define CLK_CSIS0 29 +#define CLK_MCUISP 30 +#define CLK_LITE1 31 +#define CLK_LITE0 32 +#define CLK_FD 33 +#define CLK_DRC 34 +#define CLK_ISP 35 +#define CLK_QE_ISPCX 36 +#define CLK_QE_SCALERP 37 +#define CLK_QE_SCALERC 38 +#define CLK_SMMU_SCALERP 39 +#define CLK_SMMU_SCALERC 40 +#define CLK_SCALERP 41 +#define CLK_SCALERC 42 +#define CLK_SPI1_ISP 43 +#define CLK_SPI0_ISP 44 +#define CLK_SMMU_ISPCX 45 +#define CLK_ASYNCAXIM 46 +#define CLK_SCLK_MPWM_ISP 47 + +/* + * Total number of clocks of CMU_ISP. + * NOTE: Must be equal to last clock ID increased by one. + */ +#define NR_CLKS_ISP 48 + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H */ diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h new file mode 100644 index 0000000..88ec396 --- /dev/null +++ b/include/dt-bindings/clock/exynos4.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Andrzej Hajda + * + * Device Tree binding constants for Exynos4 clock controller. + */ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS_4_H +#define _DT_BINDINGS_CLOCK_EXYNOS_4_H + +/* core clocks */ +#define CLK_XXTI 1 +#define CLK_XUSBXTI 2 +#define CLK_FIN_PLL 3 +#define CLK_FOUT_APLL 4 +#define CLK_FOUT_MPLL 5 +#define CLK_FOUT_EPLL 6 +#define CLK_FOUT_VPLL 7 +#define CLK_SCLK_APLL 8 +#define CLK_SCLK_MPLL 9 +#define CLK_SCLK_EPLL 10 +#define CLK_SCLK_VPLL 11 +#define CLK_ARM_CLK 12 +#define CLK_ACLK200 13 +#define CLK_ACLK100 14 +#define CLK_ACLK160 15 +#define CLK_ACLK133 16 +#define CLK_MOUT_MPLL_USER_T 17 /* Exynos4x12 only */ +#define CLK_MOUT_MPLL_USER_C 18 /* Exynos4x12 only */ +#define CLK_MOUT_CORE 19 +#define CLK_MOUT_APLL 20 +#define CLK_SCLK_HDMIPHY 22 +#define CLK_OUT_DMC 23 +#define CLK_OUT_TOP 24 +#define CLK_OUT_LEFTBUS 25 +#define CLK_OUT_RIGHTBUS 26 +#define CLK_OUT_CPU 27 + +/* gate for special clocks (sclk) */ +#define CLK_SCLK_FIMC0 128 +#define CLK_SCLK_FIMC1 129 +#define CLK_SCLK_FIMC2 130 +#define CLK_SCLK_FIMC3 131 +#define CLK_SCLK_CAM0 132 +#define CLK_SCLK_CAM1 133 +#define CLK_SCLK_CSIS0 134 +#define CLK_SCLK_CSIS1 135 +#define CLK_SCLK_HDMI 136 +#define CLK_SCLK_MIXER 137 +#define CLK_SCLK_DAC 138 +#define CLK_SCLK_PIXEL 139 +#define CLK_SCLK_FIMD0 140 +#define CLK_SCLK_MDNIE0 141 /* Exynos4412 only */ +#define CLK_SCLK_MDNIE_PWM0 142 +#define CLK_SCLK_MIPI0 143 +#define CLK_SCLK_AUDIO0 144 +#define CLK_SCLK_MMC0 145 +#define CLK_SCLK_MMC1 146 +#define CLK_SCLK_MMC2 147 +#define CLK_SCLK_MMC3 148 +#define CLK_SCLK_MMC4 149 +#define CLK_SCLK_SATA 150 /* Exynos4210 only */ +#define CLK_SCLK_UART0 151 +#define CLK_SCLK_UART1 152 +#define CLK_SCLK_UART2 153 +#define CLK_SCLK_UART3 154 +#define CLK_SCLK_UART4 155 +#define CLK_SCLK_AUDIO1 156 +#define CLK_SCLK_AUDIO2 157 +#define CLK_SCLK_SPDIF 158 +#define CLK_SCLK_SPI0 159 +#define CLK_SCLK_SPI1 160 +#define CLK_SCLK_SPI2 161 +#define CLK_SCLK_SLIMBUS 162 +#define CLK_SCLK_FIMD1 163 /* Exynos4210 only */ +#define CLK_SCLK_MIPI1 164 /* Exynos4210 only */ +#define CLK_SCLK_PCM1 165 +#define CLK_SCLK_PCM2 166 +#define CLK_SCLK_I2S1 167 +#define CLK_SCLK_I2S2 168 +#define CLK_SCLK_MIPIHSI 169 /* Exynos4412 only */ +#define CLK_SCLK_MFC 170 +#define CLK_SCLK_PCM0 171 +#define CLK_SCLK_G3D 172 +#define CLK_SCLK_PWM_ISP 173 /* Exynos4x12 only */ +#define CLK_SCLK_SPI0_ISP 174 /* Exynos4x12 only */ +#define CLK_SCLK_SPI1_ISP 175 /* Exynos4x12 only */ +#define CLK_SCLK_UART_ISP 176 /* Exynos4x12 only */ +#define CLK_SCLK_FIMG2D 177 + +/* gate clocks */ +#define CLK_SSS 255 +#define CLK_FIMC0 256 +#define CLK_FIMC1 257 +#define CLK_FIMC2 258 +#define CLK_FIMC3 259 +#define CLK_CSIS0 260 +#define CLK_CSIS1 261 +#define CLK_JPEG 262 +#define CLK_SMMU_FIMC0 263 +#define CLK_SMMU_FIMC1 264 +#define CLK_SMMU_FIMC2 265 +#define CLK_SMMU_FIMC3 266 +#define CLK_SMMU_JPEG 267 +#define CLK_VP 268 +#define CLK_MIXER 269 +#define CLK_TVENC 270 /* Exynos4210 only */ +#define CLK_HDMI 271 +#define CLK_SMMU_TV 272 +#define CLK_MFC 273 +#define CLK_SMMU_MFCL 274 +#define CLK_SMMU_MFCR 275 +#define CLK_G3D 276 +#define CLK_G2D 277 +#define CLK_ROTATOR 278 +#define CLK_MDMA 279 +#define CLK_SMMU_G2D 280 +#define CLK_SMMU_ROTATOR 281 +#define CLK_SMMU_MDMA 282 +#define CLK_FIMD0 283 +#define CLK_MIE0 284 +#define CLK_MDNIE0 285 /* Exynos4412 only */ +#define CLK_DSIM0 286 +#define CLK_SMMU_FIMD0 287 +#define CLK_FIMD1 288 /* Exynos4210 only */ +#define CLK_MIE1 289 /* Exynos4210 only */ +#define CLK_DSIM1 290 /* Exynos4210 only */ +#define CLK_SMMU_FIMD1 291 /* Exynos4210 only */ +#define CLK_PDMA0 292 +#define CLK_PDMA1 293 +#define CLK_PCIE_PHY 294 +#define CLK_SATA_PHY 295 /* Exynos4210 only */ +#define CLK_TSI 296 +#define CLK_SDMMC0 297 +#define CLK_SDMMC1 298 +#define CLK_SDMMC2 299 +#define CLK_SDMMC3 300 +#define CLK_SDMMC4 301 +#define CLK_SATA 302 /* Exynos4210 only */ +#define CLK_SROMC 303 +#define CLK_USB_HOST 304 +#define CLK_USB_DEVICE 305 +#define CLK_PCIE 306 +#define CLK_ONENAND 307 +#define CLK_NFCON 308 +#define CLK_SMMU_PCIE 309 +#define CLK_GPS 310 +#define CLK_SMMU_GPS 311 +#define CLK_UART0 312 +#define CLK_UART1 313 +#define CLK_UART2 314 +#define CLK_UART3 315 +#define CLK_UART4 316 +#define CLK_I2C0 317 +#define CLK_I2C1 318 +#define CLK_I2C2 319 +#define CLK_I2C3 320 +#define CLK_I2C4 321 +#define CLK_I2C5 322 +#define CLK_I2C6 323 +#define CLK_I2C7 324 +#define CLK_I2C_HDMI 325 +#define CLK_TSADC 326 +#define CLK_SPI0 327 +#define CLK_SPI1 328 +#define CLK_SPI2 329 +#define CLK_I2S1 330 +#define CLK_I2S2 331 +#define CLK_PCM0 332 +#define CLK_I2S0 333 +#define CLK_PCM1 334 +#define CLK_PCM2 335 +#define CLK_PWM 336 +#define CLK_SLIMBUS 337 +#define CLK_SPDIF 338 +#define CLK_AC97 339 +#define CLK_MODEMIF 340 +#define CLK_CHIPID 341 +#define CLK_SYSREG 342 +#define CLK_HDMI_CEC 343 +#define CLK_MCT 344 +#define CLK_WDT 345 +#define CLK_RTC 346 +#define CLK_KEYIF 347 +#define CLK_AUDSS 348 +#define CLK_MIPI_HSI 349 /* Exynos4210 only */ +#define CLK_PIXELASYNCM0 351 +#define CLK_PIXELASYNCM1 352 +#define CLK_ASYNC_G3D 353 /* Exynos4x12 only */ +#define CLK_PWM_ISP_SCLK 379 /* Exynos4x12 only */ +#define CLK_SPI0_ISP_SCLK 380 /* Exynos4x12 only */ +#define CLK_SPI1_ISP_SCLK 381 /* Exynos4x12 only */ +#define CLK_UART_ISP_SCLK 382 /* Exynos4x12 only */ +#define CLK_TMU_APBIF 383 + +/* mux clocks */ +#define CLK_MOUT_FIMC0 384 +#define CLK_MOUT_FIMC1 385 +#define CLK_MOUT_FIMC2 386 +#define CLK_MOUT_FIMC3 387 +#define CLK_MOUT_CAM0 388 +#define CLK_MOUT_CAM1 389 +#define CLK_MOUT_CSIS0 390 +#define CLK_MOUT_CSIS1 391 +#define CLK_MOUT_G3D0 392 +#define CLK_MOUT_G3D1 393 +#define CLK_MOUT_G3D 394 +#define CLK_ACLK400_MCUISP 395 /* Exynos4x12 only */ +#define CLK_MOUT_HDMI 396 +#define CLK_MOUT_MIXER 397 + +/* gate clocks - ppmu */ +#define CLK_PPMULEFT 400 +#define CLK_PPMURIGHT 401 +#define CLK_PPMUCAMIF 402 +#define CLK_PPMUTV 403 +#define CLK_PPMUMFC_L 404 +#define CLK_PPMUMFC_R 405 +#define CLK_PPMUG3D 406 +#define CLK_PPMUIMAGE 407 +#define CLK_PPMULCD0 408 +#define CLK_PPMULCD1 409 /* Exynos4210 only */ +#define CLK_PPMUFILE 410 +#define CLK_PPMUGPS 411 +#define CLK_PPMUDMC0 412 +#define CLK_PPMUDMC1 413 +#define CLK_PPMUCPU 414 +#define CLK_PPMUACP 415 + +/* div clocks */ +#define CLK_DIV_ACLK200 454 /* Exynos4x12 only */ +#define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */ +#define CLK_DIV_ACP 456 +#define CLK_DIV_DMC 457 +#define CLK_DIV_C2C 458 /* Exynos4x12 only */ +#define CLK_DIV_GDL 459 +#define CLK_DIV_GDR 460 + +/* must be greater than maximal clock id */ +#define CLK_NR_CLKS 461 + +/* Exynos4x12 ISP clocks */ +#define CLK_ISP_FIMC_ISP 1 +#define CLK_ISP_FIMC_DRC 2 +#define CLK_ISP_FIMC_FD 3 +#define CLK_ISP_FIMC_LITE0 4 +#define CLK_ISP_FIMC_LITE1 5 +#define CLK_ISP_MCUISP 6 +#define CLK_ISP_GICISP 7 +#define CLK_ISP_SMMU_ISP 8 +#define CLK_ISP_SMMU_DRC 9 +#define CLK_ISP_SMMU_FD 10 +#define CLK_ISP_SMMU_LITE0 11 +#define CLK_ISP_SMMU_LITE1 12 +#define CLK_ISP_PPMUISPMX 13 +#define CLK_ISP_PPMUISPX 14 +#define CLK_ISP_MCUCTL_ISP 15 +#define CLK_ISP_MPWM_ISP 16 +#define CLK_ISP_I2C0_ISP 17 +#define CLK_ISP_I2C1_ISP 18 +#define CLK_ISP_MTCADC_ISP 19 +#define CLK_ISP_PWM_ISP 20 +#define CLK_ISP_WDT_ISP 21 +#define CLK_ISP_UART_ISP 22 +#define CLK_ISP_ASYNCAXIM 23 +#define CLK_ISP_SMMU_ISPCX 24 +#define CLK_ISP_SPI0_ISP 25 +#define CLK_ISP_SPI1_ISP 26 + +#define CLK_ISP_DIV_ISP0 27 +#define CLK_ISP_DIV_ISP1 28 +#define CLK_ISP_DIV_MCUISP0 29 +#define CLK_ISP_DIV_MCUISP1 30 + +#define CLK_NR_ISP_CLKS 31 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */ diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h new file mode 100644 index 0000000..bc8a3c5 --- /dev/null +++ b/include/dt-bindings/clock/exynos5250.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Andrzej Hajda + * + * Device Tree binding constants for Exynos5250 clock controller. + */ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5250_H +#define _DT_BINDINGS_CLOCK_EXYNOS_5250_H + +/* core clocks */ +#define CLK_FIN_PLL 1 +#define CLK_FOUT_APLL 2 +#define CLK_FOUT_MPLL 3 +#define CLK_FOUT_BPLL 4 +#define CLK_FOUT_GPLL 5 +#define CLK_FOUT_CPLL 6 +#define CLK_FOUT_EPLL 7 +#define CLK_FOUT_VPLL 8 +#define CLK_ARM_CLK 9 + +/* gate for special clocks (sclk) */ +#define CLK_SCLK_CAM_BAYER 128 +#define CLK_SCLK_CAM0 129 +#define CLK_SCLK_CAM1 130 +#define CLK_SCLK_GSCL_WA 131 +#define CLK_SCLK_GSCL_WB 132 +#define CLK_SCLK_FIMD1 133 +#define CLK_SCLK_MIPI1 134 +#define CLK_SCLK_DP 135 +#define CLK_SCLK_HDMI 136 +#define CLK_SCLK_PIXEL 137 +#define CLK_SCLK_AUDIO0 138 +#define CLK_SCLK_MMC0 139 +#define CLK_SCLK_MMC1 140 +#define CLK_SCLK_MMC2 141 +#define CLK_SCLK_MMC3 142 +#define CLK_SCLK_SATA 143 +#define CLK_SCLK_USB3 144 +#define CLK_SCLK_JPEG 145 +#define CLK_SCLK_UART0 146 +#define CLK_SCLK_UART1 147 +#define CLK_SCLK_UART2 148 +#define CLK_SCLK_UART3 149 +#define CLK_SCLK_PWM 150 +#define CLK_SCLK_AUDIO1 151 +#define CLK_SCLK_AUDIO2 152 +#define CLK_SCLK_SPDIF 153 +#define CLK_SCLK_SPI0 154 +#define CLK_SCLK_SPI1 155 +#define CLK_SCLK_SPI2 156 +#define CLK_DIV_I2S1 157 +#define CLK_DIV_I2S2 158 +#define CLK_SCLK_HDMIPHY 159 +#define CLK_DIV_PCM0 160 + +/* gate clocks */ +#define CLK_GSCL0 256 +#define CLK_GSCL1 257 +#define CLK_GSCL2 258 +#define CLK_GSCL3 259 +#define CLK_GSCL_WA 260 +#define CLK_GSCL_WB 261 +#define CLK_SMMU_GSCL0 262 +#define CLK_SMMU_GSCL1 263 +#define CLK_SMMU_GSCL2 264 +#define CLK_SMMU_GSCL3 265 +#define CLK_MFC 266 +#define CLK_SMMU_MFCL 267 +#define CLK_SMMU_MFCR 268 +#define CLK_ROTATOR 269 +#define CLK_JPEG 270 +#define CLK_MDMA1 271 +#define CLK_SMMU_ROTATOR 272 +#define CLK_SMMU_JPEG 273 +#define CLK_SMMU_MDMA1 274 +#define CLK_PDMA0 275 +#define CLK_PDMA1 276 +#define CLK_SATA 277 +#define CLK_USBOTG 278 +#define CLK_MIPI_HSI 279 +#define CLK_SDMMC0 280 +#define CLK_SDMMC1 281 +#define CLK_SDMMC2 282 +#define CLK_SDMMC3 283 +#define CLK_SROMC 284 +#define CLK_USB2 285 +#define CLK_USB3 286 +#define CLK_SATA_PHYCTRL 287 +#define CLK_SATA_PHYI2C 288 +#define CLK_UART0 289 +#define CLK_UART1 290 +#define CLK_UART2 291 +#define CLK_UART3 292 +#define CLK_UART4 293 +#define CLK_I2C0 294 +#define CLK_I2C1 295 +#define CLK_I2C2 296 +#define CLK_I2C3 297 +#define CLK_I2C4 298 +#define CLK_I2C5 299 +#define CLK_I2C6 300 +#define CLK_I2C7 301 +#define CLK_I2C_HDMI 302 +#define CLK_ADC 303 +#define CLK_SPI0 304 +#define CLK_SPI1 305 +#define CLK_SPI2 306 +#define CLK_I2S1 307 +#define CLK_I2S2 308 +#define CLK_PCM1 309 +#define CLK_PCM2 310 +#define CLK_PWM 311 +#define CLK_SPDIF 312 +#define CLK_AC97 313 +#define CLK_HSI2C0 314 +#define CLK_HSI2C1 315 +#define CLK_HSI2C2 316 +#define CLK_HSI2C3 317 +#define CLK_CHIPID 318 +#define CLK_SYSREG 319 +#define CLK_PMU 320 +#define CLK_CMU_TOP 321 +#define CLK_CMU_CORE 322 +#define CLK_CMU_MEM 323 +#define CLK_TZPC0 324 +#define CLK_TZPC1 325 +#define CLK_TZPC2 326 +#define CLK_TZPC3 327 +#define CLK_TZPC4 328 +#define CLK_TZPC5 329 +#define CLK_TZPC6 330 +#define CLK_TZPC7 331 +#define CLK_TZPC8 332 +#define CLK_TZPC9 333 +#define CLK_HDMI_CEC 334 +#define CLK_MCT 335 +#define CLK_WDT 336 +#define CLK_RTC 337 +#define CLK_TMU 338 +#define CLK_FIMD1 339 +#define CLK_MIE1 340 +#define CLK_DSIM0 341 +#define CLK_DP 342 +#define CLK_MIXER 343 +#define CLK_HDMI 344 +#define CLK_G2D 345 +#define CLK_MDMA0 346 +#define CLK_SMMU_MDMA0 347 +#define CLK_SSS 348 +#define CLK_G3D 349 +#define CLK_SMMU_TV 350 +#define CLK_SMMU_FIMD1 351 +#define CLK_SMMU_2D 352 +#define CLK_SMMU_FIMC_ISP 353 +#define CLK_SMMU_FIMC_DRC 354 +#define CLK_SMMU_FIMC_SCC 355 +#define CLK_SMMU_FIMC_SCP 356 +#define CLK_SMMU_FIMC_FD 357 +#define CLK_SMMU_FIMC_MCU 358 +#define CLK_SMMU_FIMC_ODC 359 +#define CLK_SMMU_FIMC_DIS0 360 +#define CLK_SMMU_FIMC_DIS1 361 +#define CLK_SMMU_FIMC_3DNR 362 +#define CLK_SMMU_FIMC_LITE0 363 +#define CLK_SMMU_FIMC_LITE1 364 +#define CLK_CAMIF_TOP 365 + +/* mux clocks */ +#define CLK_MOUT_HDMI 1024 +#define CLK_MOUT_GPLL 1025 +#define CLK_MOUT_ACLK200_DISP1_SUB 1026 +#define CLK_MOUT_ACLK300_DISP1_SUB 1027 + +/* must be greater than maximal clock id */ +#define CLK_NR_CLKS 1028 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5250_H */ diff --git a/include/dt-bindings/clock/exynos5260-clk.h b/include/dt-bindings/clock/exynos5260-clk.h new file mode 100644 index 0000000..98a58cb --- /dev/null +++ b/include/dt-bindings/clock/exynos5260-clk.h @@ -0,0 +1,466 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Rahul Sharma + * + * Provides Constants for Exynos5260 clocks. + */ + +#ifndef _DT_BINDINGS_CLK_EXYNOS5260_H +#define _DT_BINDINGS_CLK_EXYNOS5260_H + +/* Clock names: */ + +/* List Of Clocks For CMU_TOP */ + +#define TOP_FOUT_DISP_PLL 1 +#define TOP_FOUT_AUD_PLL 2 +#define TOP_MOUT_AUDTOP_PLL_USER 3 +#define TOP_MOUT_AUD_PLL 4 +#define TOP_MOUT_DISP_PLL 5 +#define TOP_MOUT_BUSTOP_PLL_USER 6 +#define TOP_MOUT_MEMTOP_PLL_USER 7 +#define TOP_MOUT_MEDIATOP_PLL_USER 8 +#define TOP_MOUT_DISP_DISP_333 9 +#define TOP_MOUT_ACLK_DISP_333 10 +#define TOP_MOUT_DISP_DISP_222 11 +#define TOP_MOUT_ACLK_DISP_222 12 +#define TOP_MOUT_DISP_MEDIA_PIXEL 13 +#define TOP_MOUT_FIMD1 14 +#define TOP_MOUT_SCLK_PERI_SPI0_CLK 15 +#define TOP_MOUT_SCLK_PERI_SPI1_CLK 16 +#define TOP_MOUT_SCLK_PERI_SPI2_CLK 17 +#define TOP_MOUT_SCLK_PERI_UART0_UCLK 18 +#define TOP_MOUT_SCLK_PERI_UART2_UCLK 19 +#define TOP_MOUT_SCLK_PERI_UART1_UCLK 20 +#define TOP_MOUT_BUS4_BUSTOP_100 21 +#define TOP_MOUT_BUS4_BUSTOP_400 22 +#define TOP_MOUT_BUS3_BUSTOP_100 23 +#define TOP_MOUT_BUS3_BUSTOP_400 24 +#define TOP_MOUT_BUS2_BUSTOP_400 25 +#define TOP_MOUT_BUS2_BUSTOP_100 26 +#define TOP_MOUT_BUS1_BUSTOP_100 27 +#define TOP_MOUT_BUS1_BUSTOP_400 28 +#define TOP_MOUT_SCLK_FSYS_USB 29 +#define TOP_MOUT_SCLK_FSYS_MMC0_SDCLKIN_A 30 +#define TOP_MOUT_SCLK_FSYS_MMC1_SDCLKIN_A 31 +#define TOP_MOUT_SCLK_FSYS_MMC2_SDCLKIN_A 32 +#define TOP_MOUT_SCLK_FSYS_MMC0_SDCLKIN_B 33 +#define TOP_MOUT_SCLK_FSYS_MMC1_SDCLKIN_B 34 +#define TOP_MOUT_SCLK_FSYS_MMC2_SDCLKIN_B 35 +#define TOP_MOUT_ACLK_ISP1_266 36 +#define TOP_MOUT_ISP1_MEDIA_266 37 +#define TOP_MOUT_ACLK_ISP1_400 38 +#define TOP_MOUT_ISP1_MEDIA_400 39 +#define TOP_MOUT_SCLK_ISP1_SPI0 40 +#define TOP_MOUT_SCLK_ISP1_SPI1 41 +#define TOP_MOUT_SCLK_ISP1_UART 42 +#define TOP_MOUT_SCLK_ISP1_SENSOR2 43 +#define TOP_MOUT_SCLK_ISP1_SENSOR1 44 +#define TOP_MOUT_SCLK_ISP1_SENSOR0 45 +#define TOP_MOUT_ACLK_MFC_333 46 +#define TOP_MOUT_MFC_BUSTOP_333 47 +#define TOP_MOUT_ACLK_G2D_333 48 +#define TOP_MOUT_G2D_BUSTOP_333 49 +#define TOP_MOUT_ACLK_GSCL_FIMC 50 +#define TOP_MOUT_GSCL_BUSTOP_FIMC 51 +#define TOP_MOUT_ACLK_GSCL_333 52 +#define TOP_MOUT_GSCL_BUSTOP_333 53 +#define TOP_MOUT_ACLK_GSCL_400 54 +#define TOP_MOUT_M2M_MEDIATOP_400 55 +#define TOP_DOUT_ACLK_MFC_333 56 +#define TOP_DOUT_ACLK_G2D_333 57 +#define TOP_DOUT_SCLK_ISP1_SENSOR2_A 58 +#define TOP_DOUT_SCLK_ISP1_SENSOR1_A 59 +#define TOP_DOUT_SCLK_ISP1_SENSOR0_A 60 +#define TOP_DOUT_ACLK_GSCL_FIMC 61 +#define TOP_DOUT_ACLK_GSCL_400 62 +#define TOP_DOUT_ACLK_GSCL_333 63 +#define TOP_DOUT_SCLK_ISP1_SPI0_B 64 +#define TOP_DOUT_SCLK_ISP1_SPI0_A 65 +#define TOP_DOUT_ACLK_ISP1_400 66 +#define TOP_DOUT_ACLK_ISP1_266 67 +#define TOP_DOUT_SCLK_ISP1_UART 68 +#define TOP_DOUT_SCLK_ISP1_SPI1_B 69 +#define TOP_DOUT_SCLK_ISP1_SPI1_A 70 +#define TOP_DOUT_SCLK_ISP1_SENSOR2_B 71 +#define TOP_DOUT_SCLK_ISP1_SENSOR1_B 72 +#define TOP_DOUT_SCLK_ISP1_SENSOR0_B 73 +#define TOP_DOUTTOP__SCLK_HPM_TARGETCLK 74 +#define TOP_DOUT_SCLK_DISP_PIXEL 75 +#define TOP_DOUT_ACLK_DISP_222 76 +#define TOP_DOUT_ACLK_DISP_333 77 +#define TOP_DOUT_ACLK_BUS4_100 78 +#define TOP_DOUT_ACLK_BUS4_400 79 +#define TOP_DOUT_ACLK_BUS3_100 80 +#define TOP_DOUT_ACLK_BUS3_400 81 +#define TOP_DOUT_ACLK_BUS2_100 82 +#define TOP_DOUT_ACLK_BUS2_400 83 +#define TOP_DOUT_ACLK_BUS1_100 84 +#define TOP_DOUT_ACLK_BUS1_400 85 +#define TOP_DOUT_SCLK_PERI_SPI1_B 86 +#define TOP_DOUT_SCLK_PERI_SPI1_A 87 +#define TOP_DOUT_SCLK_PERI_SPI0_B 88 +#define TOP_DOUT_SCLK_PERI_SPI0_A 89 +#define TOP_DOUT_SCLK_PERI_UART0 90 +#define TOP_DOUT_SCLK_PERI_UART2 91 +#define TOP_DOUT_SCLK_PERI_UART1 92 +#define TOP_DOUT_SCLK_PERI_SPI2_B 93 +#define TOP_DOUT_SCLK_PERI_SPI2_A 94 +#define TOP_DOUT_ACLK_PERI_AUD 95 +#define TOP_DOUT_ACLK_PERI_66 96 +#define TOP_DOUT_SCLK_FSYS_MMC0_SDCLKIN_B 97 +#define TOP_DOUT_SCLK_FSYS_MMC0_SDCLKIN_A 98 +#define TOP_DOUT_SCLK_FSYS_USBDRD30_SUSPEND_CLK 99 +#define TOP_DOUT_ACLK_FSYS_200 100 +#define TOP_DOUT_SCLK_FSYS_MMC2_SDCLKIN_B 101 +#define TOP_DOUT_SCLK_FSYS_MMC2_SDCLKIN_A 102 +#define TOP_DOUT_SCLK_FSYS_MMC1_SDCLKIN_B 103 +#define TOP_DOUT_SCLK_FSYS_MMC1_SDCLKIN_A 104 +#define TOP_SCLK_FIMD1 105 +#define TOP_SCLK_MMC2 106 +#define TOP_SCLK_MMC1 107 +#define TOP_SCLK_MMC0 108 +#define PHYCLK_DPTX_PHY_CH3_TXD_CLK 109 +#define PHYCLK_DPTX_PHY_CH2_TXD_CLK 110 +#define PHYCLK_DPTX_PHY_CH1_TXD_CLK 111 +#define PHYCLK_DPTX_PHY_CH0_TXD_CLK 112 +#define phyclk_hdmi_phy_tmds_clko 113 +#define PHYCLK_HDMI_PHY_PIXEL_CLKO 114 +#define PHYCLK_HDMI_LINK_O_TMDS_CLKHI 115 +#define PHYCLK_MIPI_DPHY_4L_M_TXBYTECLKHS 116 +#define PHYCLK_DPTX_PHY_O_REF_CLK_24M 117 +#define PHYCLK_DPTX_PHY_CLK_DIV2 118 +#define PHYCLK_MIPI_DPHY_4L_M_RXCLKESC0 119 +#define PHYCLK_USBHOST20_PHY_PHYCLOCK 120 +#define PHYCLK_USBHOST20_PHY_FREECLK 121 +#define PHYCLK_USBHOST20_PHY_CLK48MOHCI 122 +#define PHYCLK_USBDRD30_UDRD30_PIPE_PCLK 123 +#define PHYCLK_USBDRD30_UDRD30_PHYCLOCK 124 +#define TOP_NR_CLK 125 + + +/* List Of Clocks For CMU_EGL */ + +#define EGL_FOUT_EGL_PLL 1 +#define EGL_FOUT_EGL_DPLL 2 +#define EGL_MOUT_EGL_B 3 +#define EGL_MOUT_EGL_PLL 4 +#define EGL_DOUT_EGL_PLL 5 +#define EGL_DOUT_EGL_PCLK_DBG 6 +#define EGL_DOUT_EGL_ATCLK 7 +#define EGL_DOUT_PCLK_EGL 8 +#define EGL_DOUT_ACLK_EGL 9 +#define EGL_DOUT_EGL2 10 +#define EGL_DOUT_EGL1 11 +#define EGL_NR_CLK 12 + + +/* List Of Clocks For CMU_KFC */ + +#define KFC_FOUT_KFC_PLL 1 +#define KFC_MOUT_KFC_PLL 2 +#define KFC_MOUT_KFC 3 +#define KFC_DOUT_KFC_PLL 4 +#define KFC_DOUT_PCLK_KFC 5 +#define KFC_DOUT_ACLK_KFC 6 +#define KFC_DOUT_KFC_PCLK_DBG 7 +#define KFC_DOUT_KFC_ATCLK 8 +#define KFC_DOUT_KFC2 9 +#define KFC_DOUT_KFC1 10 +#define KFC_NR_CLK 11 + + +/* List Of Clocks For CMU_MIF */ + +#define MIF_FOUT_MEM_PLL 1 +#define MIF_FOUT_MEDIA_PLL 2 +#define MIF_FOUT_BUS_PLL 3 +#define MIF_MOUT_CLK2X_PHY 4 +#define MIF_MOUT_MIF_DREX2X 5 +#define MIF_MOUT_CLKM_PHY 6 +#define MIF_MOUT_MIF_DREX 7 +#define MIF_MOUT_MEDIA_PLL 8 +#define MIF_MOUT_BUS_PLL 9 +#define MIF_MOUT_MEM_PLL 10 +#define MIF_DOUT_ACLK_BUS_100 11 +#define MIF_DOUT_ACLK_BUS_200 12 +#define MIF_DOUT_ACLK_MIF_466 13 +#define MIF_DOUT_CLK2X_PHY 14 +#define MIF_DOUT_CLKM_PHY 15 +#define MIF_DOUT_BUS_PLL 16 +#define MIF_DOUT_MEM_PLL 17 +#define MIF_DOUT_MEDIA_PLL 18 +#define MIF_CLK_LPDDR3PHY_WRAP1 19 +#define MIF_CLK_LPDDR3PHY_WRAP0 20 +#define MIF_CLK_MONOCNT 21 +#define MIF_CLK_MIF_RTC 22 +#define MIF_CLK_DREX1 23 +#define MIF_CLK_DREX0 24 +#define MIF_CLK_INTMEM 25 +#define MIF_SCLK_LPDDR3PHY_WRAP_U1 26 +#define MIF_SCLK_LPDDR3PHY_WRAP_U0 27 +#define MIF_NR_CLK 28 + + +/* List Of Clocks For CMU_G3D */ + +#define G3D_FOUT_G3D_PLL 1 +#define G3D_MOUT_G3D_PLL 2 +#define G3D_DOUT_PCLK_G3D 3 +#define G3D_DOUT_ACLK_G3D 4 +#define G3D_CLK_G3D_HPM 5 +#define G3D_CLK_G3D 6 +#define G3D_NR_CLK 7 + + +/* List Of Clocks For CMU_AUD */ + +#define AUD_MOUT_SCLK_AUD_PCM 1 +#define AUD_MOUT_SCLK_AUD_I2S 2 +#define AUD_MOUT_AUD_PLL_USER 3 +#define AUD_DOUT_ACLK_AUD_131 4 +#define AUD_DOUT_SCLK_AUD_UART 5 +#define AUD_DOUT_SCLK_AUD_PCM 6 +#define AUD_DOUT_SCLK_AUD_I2S 7 +#define AUD_CLK_AUD_UART 8 +#define AUD_CLK_PCM 9 +#define AUD_CLK_I2S 10 +#define AUD_CLK_DMAC 11 +#define AUD_CLK_SRAMC 12 +#define AUD_SCLK_AUD_UART 13 +#define AUD_SCLK_PCM 14 +#define AUD_SCLK_I2S 15 +#define AUD_NR_CLK 16 + + +/* List Of Clocks For CMU_MFC */ + +#define MFC_MOUT_ACLK_MFC_333_USER 1 +#define MFC_DOUT_PCLK_MFC_83 2 +#define MFC_CLK_MFC 3 +#define MFC_CLK_SMMU2_MFCM1 4 +#define MFC_CLK_SMMU2_MFCM0 5 +#define MFC_NR_CLK 6 + + +/* List Of Clocks For CMU_GSCL */ + +#define GSCL_MOUT_ACLK_CSIS 1 +#define GSCL_MOUT_ACLK_GSCL_FIMC_USER 2 +#define GSCL_MOUT_ACLK_M2M_400_USER 3 +#define GSCL_MOUT_ACLK_GSCL_333_USER 4 +#define GSCL_DOUT_ACLK_CSIS_200 5 +#define GSCL_DOUT_PCLK_M2M_100 6 +#define GSCL_CLK_PIXEL_GSCL1 7 +#define GSCL_CLK_PIXEL_GSCL0 8 +#define GSCL_CLK_MSCL1 9 +#define GSCL_CLK_MSCL0 10 +#define GSCL_CLK_GSCL1 11 +#define GSCL_CLK_GSCL0 12 +#define GSCL_CLK_FIMC_LITE_D 13 +#define GSCL_CLK_FIMC_LITE_B 14 +#define GSCL_CLK_FIMC_LITE_A 15 +#define GSCL_CLK_CSIS1 16 +#define GSCL_CLK_CSIS0 17 +#define GSCL_CLK_SMMU3_LITE_D 18 +#define GSCL_CLK_SMMU3_LITE_B 19 +#define GSCL_CLK_SMMU3_LITE_A 20 +#define GSCL_CLK_SMMU3_GSCL0 21 +#define GSCL_CLK_SMMU3_GSCL1 22 +#define GSCL_CLK_SMMU3_MSCL0 23 +#define GSCL_CLK_SMMU3_MSCL1 24 +#define GSCL_SCLK_CSIS1_WRAP 25 +#define GSCL_SCLK_CSIS0_WRAP 26 +#define GSCL_NR_CLK 27 + + +/* List Of Clocks For CMU_FSYS */ + +#define FSYS_MOUT_PHYCLK_USBHOST20_PHYCLK_USER 1 +#define FSYS_MOUT_PHYCLK_USBHOST20_FREECLK_USER 2 +#define FSYS_MOUT_PHYCLK_USBHOST20_CLK48MOHCI_USER 3 +#define FSYS_MOUT_PHYCLK_USBDRD30_PIPE_PCLK_USER 4 +#define FSYS_MOUT_PHYCLK_USBDRD30_PHYCLOCK_USER 5 +#define FSYS_CLK_TSI 6 +#define FSYS_CLK_USBLINK 7 +#define FSYS_CLK_USBHOST20 8 +#define FSYS_CLK_USBDRD30 9 +#define FSYS_CLK_SROMC 10 +#define FSYS_CLK_PDMA 11 +#define FSYS_CLK_MMC2 12 +#define FSYS_CLK_MMC1 13 +#define FSYS_CLK_MMC0 14 +#define FSYS_CLK_RTIC 15 +#define FSYS_CLK_SMMU_RTIC 16 +#define FSYS_PHYCLK_USBDRD30 17 +#define FSYS_PHYCLK_USBHOST20 18 +#define FSYS_NR_CLK 19 + + +/* List Of Clocks For CMU_PERI */ + +#define PERI_MOUT_SCLK_SPDIF 1 +#define PERI_MOUT_SCLK_I2SCOD 2 +#define PERI_MOUT_SCLK_PCM 3 +#define PERI_DOUT_I2S 4 +#define PERI_DOUT_PCM 5 +#define PERI_CLK_WDT_KFC 6 +#define PERI_CLK_WDT_EGL 7 +#define PERI_CLK_HSIC3 8 +#define PERI_CLK_HSIC2 9 +#define PERI_CLK_HSIC1 10 +#define PERI_CLK_HSIC0 11 +#define PERI_CLK_PCM 12 +#define PERI_CLK_MCT 13 +#define PERI_CLK_I2S 14 +#define PERI_CLK_I2CHDMI 15 +#define PERI_CLK_I2C7 16 +#define PERI_CLK_I2C6 17 +#define PERI_CLK_I2C5 18 +#define PERI_CLK_I2C4 19 +#define PERI_CLK_I2C9 20 +#define PERI_CLK_I2C8 21 +#define PERI_CLK_I2C11 22 +#define PERI_CLK_I2C10 23 +#define PERI_CLK_HDMICEC 24 +#define PERI_CLK_EFUSE_WRITER 25 +#define PERI_CLK_ABB 26 +#define PERI_CLK_UART2 27 +#define PERI_CLK_UART1 28 +#define PERI_CLK_UART0 29 +#define PERI_CLK_ADC 30 +#define PERI_CLK_TMU4 31 +#define PERI_CLK_TMU3 32 +#define PERI_CLK_TMU2 33 +#define PERI_CLK_TMU1 34 +#define PERI_CLK_TMU0 35 +#define PERI_CLK_SPI2 36 +#define PERI_CLK_SPI1 37 +#define PERI_CLK_SPI0 38 +#define PERI_CLK_SPDIF 39 +#define PERI_CLK_PWM 40 +#define PERI_CLK_UART4 41 +#define PERI_CLK_CHIPID 42 +#define PERI_CLK_PROVKEY0 43 +#define PERI_CLK_PROVKEY1 44 +#define PERI_CLK_SECKEY 45 +#define PERI_CLK_TOP_RTC 46 +#define PERI_CLK_TZPC10 47 +#define PERI_CLK_TZPC9 48 +#define PERI_CLK_TZPC8 49 +#define PERI_CLK_TZPC7 50 +#define PERI_CLK_TZPC6 51 +#define PERI_CLK_TZPC5 52 +#define PERI_CLK_TZPC4 53 +#define PERI_CLK_TZPC3 54 +#define PERI_CLK_TZPC2 55 +#define PERI_CLK_TZPC1 56 +#define PERI_CLK_TZPC0 57 +#define PERI_SCLK_UART2 58 +#define PERI_SCLK_UART1 59 +#define PERI_SCLK_UART0 60 +#define PERI_SCLK_SPI2 61 +#define PERI_SCLK_SPI1 62 +#define PERI_SCLK_SPI0 63 +#define PERI_SCLK_SPDIF 64 +#define PERI_SCLK_I2S 65 +#define PERI_SCLK_PCM1 66 +#define PERI_NR_CLK 67 + + +/* List Of Clocks For CMU_DISP */ + +#define DISP_MOUT_SCLK_HDMI_SPDIF 1 +#define DISP_MOUT_SCLK_HDMI_PIXEL 2 +#define DISP_MOUT_PHYCLK_MIPI_DPHY_4LMRXCLK_ESC0_USER 3 +#define DISP_MOUT_PHYCLK_HDMI_PHY_TMDS_CLKO_USER 4 +#define DISP_MOUT_PHYCLK_HDMI_PHY_REF_CLKO_USER 5 +#define DISP_MOUT_HDMI_PHY_PIXEL 6 +#define DISP_MOUT_PHYCLK_HDMI_LINK_O_TMDS_CLKHI_USER 7 +#define DISP_MOUT_PHYCLK_MIPI_DPHY_4L_M_TXBYTE_CLKHS 8 +#define DISP_MOUT_PHYCLK_DPTX_PHY_O_REF_CLK_24M_USER 9 +#define DISP_MOUT_PHYCLK_DPTX_PHY_CLK_DIV2_USER 10 +#define DISP_MOUT_PHYCLK_DPTX_PHY_CH3_TXD_CLK_USER 11 +#define DISP_MOUT_PHYCLK_DPTX_PHY_CH2_TXD_CLK_USER 12 +#define DISP_MOUT_PHYCLK_DPTX_PHY_CH1_TXD_CLK_USER 13 +#define DISP_MOUT_PHYCLK_DPTX_PHY_CH0_TXD_CLK_USER 14 +#define DISP_MOUT_ACLK_DISP_222_USER 15 +#define DISP_MOUT_SCLK_DISP_PIXEL_USER 16 +#define DISP_MOUT_ACLK_DISP_333_USER 17 +#define DISP_DOUT_SCLK_HDMI_PHY_PIXEL_CLKI 18 +#define DISP_DOUT_SCLK_FIMD1_EXTCLKPLL 19 +#define DISP_DOUT_PCLK_DISP_111 20 +#define DISP_CLK_SMMU_TV 21 +#define DISP_CLK_SMMU_FIMD1M1 22 +#define DISP_CLK_SMMU_FIMD1M0 23 +#define DISP_CLK_PIXEL_MIXER 24 +#define DISP_CLK_PIXEL_DISP 25 +#define DISP_CLK_MIXER 26 +#define DISP_CLK_MIPIPHY 27 +#define DISP_CLK_HDMIPHY 28 +#define DISP_CLK_HDMI 29 +#define DISP_CLK_FIMD1 30 +#define DISP_CLK_DSIM1 31 +#define DISP_CLK_DPPHY 32 +#define DISP_CLK_DP 33 +#define DISP_SCLK_PIXEL 34 +#define DISP_MOUT_HDMI_PHY_PIXEL_USER 35 +#define DISP_NR_CLK 36 + + +/* List Of Clocks For CMU_G2D */ + +#define G2D_MOUT_ACLK_G2D_333_USER 1 +#define G2D_DOUT_PCLK_G2D_83 2 +#define G2D_CLK_SMMU3_JPEG 3 +#define G2D_CLK_MDMA 4 +#define G2D_CLK_JPEG 5 +#define G2D_CLK_G2D 6 +#define G2D_CLK_SSS 7 +#define G2D_CLK_SLIM_SSS 8 +#define G2D_CLK_SMMU_SLIM_SSS 9 +#define G2D_CLK_SMMU_SSS 10 +#define G2D_CLK_SMMU_MDMA 11 +#define G2D_CLK_SMMU3_G2D 12 +#define G2D_NR_CLK 13 + + +/* List Of Clocks For CMU_ISP */ + +#define ISP_MOUT_ISP_400_USER 1 +#define ISP_MOUT_ISP_266_USER 2 +#define ISP_DOUT_SCLK_MPWM 3 +#define ISP_DOUT_CA5_PCLKDBG 4 +#define ISP_DOUT_CA5_ATCLKIN 5 +#define ISP_DOUT_PCLK_ISP_133 6 +#define ISP_DOUT_PCLK_ISP_66 7 +#define ISP_CLK_GIC 8 +#define ISP_CLK_WDT 9 +#define ISP_CLK_UART 10 +#define ISP_CLK_SPI1 11 +#define ISP_CLK_SPI0 12 +#define ISP_CLK_SMMU_SCALERP 13 +#define ISP_CLK_SMMU_SCALERC 14 +#define ISP_CLK_SMMU_ISPCX 15 +#define ISP_CLK_SMMU_ISP 16 +#define ISP_CLK_SMMU_FD 17 +#define ISP_CLK_SMMU_DRC 18 +#define ISP_CLK_PWM 19 +#define ISP_CLK_MTCADC 20 +#define ISP_CLK_MPWM 21 +#define ISP_CLK_MCUCTL 22 +#define ISP_CLK_I2C1 23 +#define ISP_CLK_I2C0 24 +#define ISP_CLK_FIMC_SCALERP 25 +#define ISP_CLK_FIMC_SCALERC 26 +#define ISP_CLK_FIMC 27 +#define ISP_CLK_FIMC_FD 28 +#define ISP_CLK_FIMC_DRC 29 +#define ISP_CLK_CA5 30 +#define ISP_SCLK_SPI0_EXT 31 +#define ISP_SCLK_SPI1_EXT 32 +#define ISP_SCLK_UART_EXT 33 +#define ISP_NR_CLK 34 + +#endif diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h new file mode 100644 index 0000000..86c2ad5 --- /dev/null +++ b/include/dt-bindings/clock/exynos5410.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Copyright (c) 2016 Krzysztof Kozlowski + * + * Device Tree binding constants for Exynos5421 clock controller. + */ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5410_H +#define _DT_BINDINGS_CLOCK_EXYNOS_5410_H + +/* core clocks */ +#define CLK_FIN_PLL 1 +#define CLK_FOUT_APLL 2 +#define CLK_FOUT_CPLL 3 +#define CLK_FOUT_MPLL 4 +#define CLK_FOUT_BPLL 5 +#define CLK_FOUT_KPLL 6 +#define CLK_FOUT_EPLL 7 + +/* gate for special clocks (sclk) */ +#define CLK_SCLK_UART0 128 +#define CLK_SCLK_UART1 129 +#define CLK_SCLK_UART2 130 +#define CLK_SCLK_UART3 131 +#define CLK_SCLK_MMC0 132 +#define CLK_SCLK_MMC1 133 +#define CLK_SCLK_MMC2 134 +#define CLK_SCLK_USBD300 150 +#define CLK_SCLK_USBD301 151 +#define CLK_SCLK_USBPHY300 152 +#define CLK_SCLK_USBPHY301 153 +#define CLK_SCLK_PWM 155 + +/* gate clocks */ +#define CLK_UART0 257 +#define CLK_UART1 258 +#define CLK_UART2 259 +#define CLK_UART3 260 +#define CLK_I2C0 261 +#define CLK_I2C1 262 +#define CLK_I2C2 263 +#define CLK_I2C3 264 +#define CLK_USI0 265 +#define CLK_USI1 266 +#define CLK_USI2 267 +#define CLK_USI3 268 +#define CLK_TSADC 270 +#define CLK_PWM 279 +#define CLK_MCT 315 +#define CLK_WDT 316 +#define CLK_RTC 317 +#define CLK_TMU 318 +#define CLK_MMC0 351 +#define CLK_MMC1 352 +#define CLK_MMC2 353 +#define CLK_PDMA0 362 +#define CLK_PDMA1 363 +#define CLK_USBH20 365 +#define CLK_USBD300 366 +#define CLK_USBD301 367 +#define CLK_SSS 471 + +#define CLK_NR_CLKS 512 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5410_H */ diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h new file mode 100644 index 0000000..02d5ac4 --- /dev/null +++ b/include/dt-bindings/clock/exynos5420.h @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Andrzej Hajda + * + * Device Tree binding constants for Exynos5420 clock controller. + */ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5420_H +#define _DT_BINDINGS_CLOCK_EXYNOS_5420_H + +/* core clocks */ +#define CLK_FIN_PLL 1 +#define CLK_FOUT_APLL 2 +#define CLK_FOUT_CPLL 3 +#define CLK_FOUT_DPLL 4 +#define CLK_FOUT_EPLL 5 +#define CLK_FOUT_RPLL 6 +#define CLK_FOUT_IPLL 7 +#define CLK_FOUT_SPLL 8 +#define CLK_FOUT_VPLL 9 +#define CLK_FOUT_MPLL 10 +#define CLK_FOUT_BPLL 11 +#define CLK_FOUT_KPLL 12 +#define CLK_ARM_CLK 13 +#define CLK_KFC_CLK 14 + +/* gate for special clocks (sclk) */ +#define CLK_SCLK_UART0 128 +#define CLK_SCLK_UART1 129 +#define CLK_SCLK_UART2 130 +#define CLK_SCLK_UART3 131 +#define CLK_SCLK_MMC0 132 +#define CLK_SCLK_MMC1 133 +#define CLK_SCLK_MMC2 134 +#define CLK_SCLK_SPI0 135 +#define CLK_SCLK_SPI1 136 +#define CLK_SCLK_SPI2 137 +#define CLK_SCLK_I2S1 138 +#define CLK_SCLK_I2S2 139 +#define CLK_SCLK_PCM1 140 +#define CLK_SCLK_PCM2 141 +#define CLK_SCLK_SPDIF 142 +#define CLK_SCLK_HDMI 143 +#define CLK_SCLK_PIXEL 144 +#define CLK_SCLK_DP1 145 +#define CLK_SCLK_MIPI1 146 +#define CLK_SCLK_FIMD1 147 +#define CLK_SCLK_MAUDIO0 148 +#define CLK_SCLK_MAUPCM0 149 +#define CLK_SCLK_USBD300 150 +#define CLK_SCLK_USBD301 151 +#define CLK_SCLK_USBPHY300 152 +#define CLK_SCLK_USBPHY301 153 +#define CLK_SCLK_UNIPRO 154 +#define CLK_SCLK_PWM 155 +#define CLK_SCLK_GSCL_WA 156 +#define CLK_SCLK_GSCL_WB 157 +#define CLK_SCLK_HDMIPHY 158 +#define CLK_MAU_EPLL 159 +#define CLK_SCLK_HSIC_12M 160 +#define CLK_SCLK_MPHY_IXTAL24 161 +#define CLK_SCLK_BPLL 162 + +/* gate clocks */ +#define CLK_UART0 257 +#define CLK_UART1 258 +#define CLK_UART2 259 +#define CLK_UART3 260 +#define CLK_I2C0 261 +#define CLK_I2C1 262 +#define CLK_I2C2 263 +#define CLK_I2C3 264 +#define CLK_USI0 265 +#define CLK_USI1 266 +#define CLK_USI2 267 +#define CLK_USI3 268 +#define CLK_I2C_HDMI 269 +#define CLK_TSADC 270 +#define CLK_SPI0 271 +#define CLK_SPI1 272 +#define CLK_SPI2 273 +#define CLK_KEYIF 274 +#define CLK_I2S1 275 +#define CLK_I2S2 276 +#define CLK_PCM1 277 +#define CLK_PCM2 278 +#define CLK_PWM 279 +#define CLK_SPDIF 280 +#define CLK_USI4 281 +#define CLK_USI5 282 +#define CLK_USI6 283 +#define CLK_ACLK66_PSGEN 300 +#define CLK_CHIPID 301 +#define CLK_SYSREG 302 +#define CLK_TZPC0 303 +#define CLK_TZPC1 304 +#define CLK_TZPC2 305 +#define CLK_TZPC3 306 +#define CLK_TZPC4 307 +#define CLK_TZPC5 308 +#define CLK_TZPC6 309 +#define CLK_TZPC7 310 +#define CLK_TZPC8 311 +#define CLK_TZPC9 312 +#define CLK_HDMI_CEC 313 +#define CLK_SECKEY 314 +#define CLK_MCT 315 +#define CLK_WDT 316 +#define CLK_RTC 317 +#define CLK_TMU 318 +#define CLK_TMU_GPU 319 +#define CLK_PCLK66_GPIO 330 +#define CLK_ACLK200_FSYS2 350 +#define CLK_MMC0 351 +#define CLK_MMC1 352 +#define CLK_MMC2 353 +#define CLK_SROMC 354 +#define CLK_UFS 355 +#define CLK_ACLK200_FSYS 360 +#define CLK_TSI 361 +#define CLK_PDMA0 362 +#define CLK_PDMA1 363 +#define CLK_RTIC 364 +#define CLK_USBH20 365 +#define CLK_USBD300 366 +#define CLK_USBD301 367 +#define CLK_ACLK400_MSCL 380 +#define CLK_MSCL0 381 +#define CLK_MSCL1 382 +#define CLK_MSCL2 383 +#define CLK_SMMU_MSCL0 384 +#define CLK_SMMU_MSCL1 385 +#define CLK_SMMU_MSCL2 386 +#define CLK_ACLK333 400 +#define CLK_MFC 401 +#define CLK_SMMU_MFCL 402 +#define CLK_SMMU_MFCR 403 +#define CLK_ACLK200_DISP1 410 +#define CLK_DSIM1 411 +#define CLK_DP1 412 +#define CLK_HDMI 413 +#define CLK_ACLK300_DISP1 420 +#define CLK_FIMD1 421 +#define CLK_SMMU_FIMD1M0 422 +#define CLK_SMMU_FIMD1M1 423 +#define CLK_ACLK166 430 +#define CLK_MIXER 431 +#define CLK_ACLK266 440 +#define CLK_ROTATOR 441 +#define CLK_MDMA1 442 +#define CLK_SMMU_ROTATOR 443 +#define CLK_SMMU_MDMA1 444 +#define CLK_ACLK300_JPEG 450 +#define CLK_JPEG 451 +#define CLK_JPEG2 452 +#define CLK_SMMU_JPEG 453 +#define CLK_SMMU_JPEG2 454 +#define CLK_ACLK300_GSCL 460 +#define CLK_SMMU_GSCL0 461 +#define CLK_SMMU_GSCL1 462 +#define CLK_GSCL_WA 463 +#define CLK_GSCL_WB 464 +#define CLK_GSCL0 465 +#define CLK_GSCL1 466 +#define CLK_FIMC_3AA 467 +#define CLK_ACLK266_G2D 470 +#define CLK_SSS 471 +#define CLK_SLIM_SSS 472 +#define CLK_MDMA0 473 +#define CLK_ACLK333_G2D 480 +#define CLK_G2D 481 +#define CLK_ACLK333_432_GSCL 490 +#define CLK_SMMU_3AA 491 +#define CLK_SMMU_FIMCL0 492 +#define CLK_SMMU_FIMCL1 493 +#define CLK_SMMU_FIMCL3 494 +#define CLK_FIMC_LITE3 495 +#define CLK_FIMC_LITE0 496 +#define CLK_FIMC_LITE1 497 +#define CLK_ACLK_G3D 500 +#define CLK_G3D 501 +#define CLK_SMMU_MIXER 502 +#define CLK_SMMU_G2D 503 +#define CLK_SMMU_MDMA0 504 +#define CLK_MC 505 +#define CLK_TOP_RTC 506 +#define CLK_SCLK_UART_ISP 510 +#define CLK_SCLK_SPI0_ISP 511 +#define CLK_SCLK_SPI1_ISP 512 +#define CLK_SCLK_PWM_ISP 513 +#define CLK_SCLK_ISP_SENSOR0 514 +#define CLK_SCLK_ISP_SENSOR1 515 +#define CLK_SCLK_ISP_SENSOR2 516 +#define CLK_ACLK432_SCALER 517 +#define CLK_ACLK432_CAM 518 +#define CLK_ACLK_FL1550_CAM 519 +#define CLK_ACLK550_CAM 520 +#define CLK_CLKM_PHY0 521 +#define CLK_CLKM_PHY1 522 +#define CLK_ACLK_PPMU_DREX0_0 523 +#define CLK_ACLK_PPMU_DREX0_1 524 +#define CLK_ACLK_PPMU_DREX1_0 525 +#define CLK_ACLK_PPMU_DREX1_1 526 +#define CLK_PCLK_PPMU_DREX0_0 527 +#define CLK_PCLK_PPMU_DREX0_1 528 +#define CLK_PCLK_PPMU_DREX1_0 529 +#define CLK_PCLK_PPMU_DREX1_1 530 + +/* mux clocks */ +#define CLK_MOUT_HDMI 640 +#define CLK_MOUT_G3D 641 +#define CLK_MOUT_VPLL 642 +#define CLK_MOUT_MAUDIO0 643 +#define CLK_MOUT_USER_ACLK333 644 +#define CLK_MOUT_SW_ACLK333 645 +#define CLK_MOUT_USER_ACLK200_DISP1 646 +#define CLK_MOUT_SW_ACLK200 647 +#define CLK_MOUT_USER_ACLK300_DISP1 648 +#define CLK_MOUT_SW_ACLK300 649 +#define CLK_MOUT_USER_ACLK400_DISP1 650 +#define CLK_MOUT_SW_ACLK400 651 +#define CLK_MOUT_USER_ACLK300_GSCL 652 +#define CLK_MOUT_SW_ACLK300_GSCL 653 +#define CLK_MOUT_MCLK_CDREX 654 +#define CLK_MOUT_BPLL 655 +#define CLK_MOUT_MX_MSPLL_CCORE 656 +#define CLK_MOUT_EPLL 657 +#define CLK_MOUT_MAU_EPLL 658 +#define CLK_MOUT_USER_MAU_EPLL 659 +#define CLK_MOUT_SCLK_SPLL 660 +#define CLK_MOUT_MX_MSPLL_CCORE_PHY 661 + +/* divider clocks */ +#define CLK_DOUT_PIXEL 768 +#define CLK_DOUT_ACLK400_WCORE 769 +#define CLK_DOUT_ACLK400_ISP 770 +#define CLK_DOUT_ACLK400_MSCL 771 +#define CLK_DOUT_ACLK200 772 +#define CLK_DOUT_ACLK200_FSYS2 773 +#define CLK_DOUT_ACLK100_NOC 774 +#define CLK_DOUT_PCLK200_FSYS 775 +#define CLK_DOUT_ACLK200_FSYS 776 +#define CLK_DOUT_ACLK333_432_GSCL 777 +#define CLK_DOUT_ACLK333_432_ISP 778 +#define CLK_DOUT_ACLK66 779 +#define CLK_DOUT_ACLK333_432_ISP0 780 +#define CLK_DOUT_ACLK266 781 +#define CLK_DOUT_ACLK166 782 +#define CLK_DOUT_ACLK333 783 +#define CLK_DOUT_ACLK333_G2D 784 +#define CLK_DOUT_ACLK266_G2D 785 +#define CLK_DOUT_ACLK_G3D 786 +#define CLK_DOUT_ACLK300_JPEG 787 +#define CLK_DOUT_ACLK300_DISP1 788 +#define CLK_DOUT_ACLK300_GSCL 789 +#define CLK_DOUT_ACLK400_DISP1 790 +#define CLK_DOUT_PCLK_CDREX 791 +#define CLK_DOUT_SCLK_CDREX 792 +#define CLK_DOUT_ACLK_CDREX1 793 +#define CLK_DOUT_CCLK_DREX0 794 +#define CLK_DOUT_CLK2X_PHY0 795 +#define CLK_DOUT_PCLK_CORE_MEM 796 +#define CLK_FF_DOUT_SPLL2 797 +#define CLK_DOUT_PCLK_DREX0 798 +#define CLK_DOUT_PCLK_DREX1 799 + +/* must be greater than maximal clock id */ +#define CLK_NR_CLKS 800 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */ diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h new file mode 100644 index 0000000..25ffa53 --- /dev/null +++ b/include/dt-bindings/clock/exynos5433.h @@ -0,0 +1,1415 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Chanwoo Choi + */ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS5433_H +#define _DT_BINDINGS_CLOCK_EXYNOS5433_H + +/* CMU_TOP */ +#define CLK_FOUT_ISP_PLL 1 +#define CLK_FOUT_AUD_PLL 2 + +#define CLK_MOUT_AUD_PLL 10 +#define CLK_MOUT_ISP_PLL 11 +#define CLK_MOUT_AUD_PLL_USER_T 12 +#define CLK_MOUT_MPHY_PLL_USER 13 +#define CLK_MOUT_MFC_PLL_USER 14 +#define CLK_MOUT_BUS_PLL_USER 15 +#define CLK_MOUT_ACLK_HEVC_400 16 +#define CLK_MOUT_ACLK_CAM1_333 17 +#define CLK_MOUT_ACLK_CAM1_552_B 18 +#define CLK_MOUT_ACLK_CAM1_552_A 19 +#define CLK_MOUT_ACLK_ISP_DIS_400 20 +#define CLK_MOUT_ACLK_ISP_400 21 +#define CLK_MOUT_ACLK_BUS0_400 22 +#define CLK_MOUT_ACLK_MSCL_400_B 23 +#define CLK_MOUT_ACLK_MSCL_400_A 24 +#define CLK_MOUT_ACLK_GSCL_333 25 +#define CLK_MOUT_ACLK_G2D_400_B 26 +#define CLK_MOUT_ACLK_G2D_400_A 27 +#define CLK_MOUT_SCLK_JPEG_C 28 +#define CLK_MOUT_SCLK_JPEG_B 29 +#define CLK_MOUT_SCLK_JPEG_A 30 +#define CLK_MOUT_SCLK_MMC2_B 31 +#define CLK_MOUT_SCLK_MMC2_A 32 +#define CLK_MOUT_SCLK_MMC1_B 33 +#define CLK_MOUT_SCLK_MMC1_A 34 +#define CLK_MOUT_SCLK_MMC0_D 35 +#define CLK_MOUT_SCLK_MMC0_C 36 +#define CLK_MOUT_SCLK_MMC0_B 37 +#define CLK_MOUT_SCLK_MMC0_A 38 +#define CLK_MOUT_SCLK_SPI4 39 +#define CLK_MOUT_SCLK_SPI3 40 +#define CLK_MOUT_SCLK_UART2 41 +#define CLK_MOUT_SCLK_UART1 42 +#define CLK_MOUT_SCLK_UART0 43 +#define CLK_MOUT_SCLK_SPI2 44 +#define CLK_MOUT_SCLK_SPI1 45 +#define CLK_MOUT_SCLK_SPI0 46 +#define CLK_MOUT_ACLK_MFC_400_C 47 +#define CLK_MOUT_ACLK_MFC_400_B 48 +#define CLK_MOUT_ACLK_MFC_400_A 49 +#define CLK_MOUT_SCLK_ISP_SENSOR2 50 +#define CLK_MOUT_SCLK_ISP_SENSOR1 51 +#define CLK_MOUT_SCLK_ISP_SENSOR0 52 +#define CLK_MOUT_SCLK_ISP_UART 53 +#define CLK_MOUT_SCLK_ISP_SPI1 54 +#define CLK_MOUT_SCLK_ISP_SPI0 55 +#define CLK_MOUT_SCLK_PCIE_100 56 +#define CLK_MOUT_SCLK_UFSUNIPRO 57 +#define CLK_MOUT_SCLK_USBHOST30 58 +#define CLK_MOUT_SCLK_USBDRD30 59 +#define CLK_MOUT_SCLK_SLIMBUS 60 +#define CLK_MOUT_SCLK_SPDIF 61 +#define CLK_MOUT_SCLK_AUDIO1 62 +#define CLK_MOUT_SCLK_AUDIO0 63 +#define CLK_MOUT_SCLK_HDMI_SPDIF 64 + +#define CLK_DIV_ACLK_FSYS_200 100 +#define CLK_DIV_ACLK_IMEM_SSSX_266 101 +#define CLK_DIV_ACLK_IMEM_200 102 +#define CLK_DIV_ACLK_IMEM_266 103 +#define CLK_DIV_ACLK_PERIC_66_B 104 +#define CLK_DIV_ACLK_PERIC_66_A 105 +#define CLK_DIV_ACLK_PERIS_66_B 106 +#define CLK_DIV_ACLK_PERIS_66_A 107 +#define CLK_DIV_SCLK_MMC1_B 108 +#define CLK_DIV_SCLK_MMC1_A 109 +#define CLK_DIV_SCLK_MMC0_B 110 +#define CLK_DIV_SCLK_MMC0_A 111 +#define CLK_DIV_SCLK_MMC2_B 112 +#define CLK_DIV_SCLK_MMC2_A 113 +#define CLK_DIV_SCLK_SPI1_B 114 +#define CLK_DIV_SCLK_SPI1_A 115 +#define CLK_DIV_SCLK_SPI0_B 116 +#define CLK_DIV_SCLK_SPI0_A 117 +#define CLK_DIV_SCLK_SPI2_B 118 +#define CLK_DIV_SCLK_SPI2_A 119 +#define CLK_DIV_SCLK_UART2 120 +#define CLK_DIV_SCLK_UART1 121 +#define CLK_DIV_SCLK_UART0 122 +#define CLK_DIV_SCLK_SPI4_B 123 +#define CLK_DIV_SCLK_SPI4_A 124 +#define CLK_DIV_SCLK_SPI3_B 125 +#define CLK_DIV_SCLK_SPI3_A 126 +#define CLK_DIV_SCLK_I2S1 127 +#define CLK_DIV_SCLK_PCM1 128 +#define CLK_DIV_SCLK_AUDIO1 129 +#define CLK_DIV_SCLK_AUDIO0 130 +#define CLK_DIV_ACLK_GSCL_111 131 +#define CLK_DIV_ACLK_GSCL_333 132 +#define CLK_DIV_ACLK_HEVC_400 133 +#define CLK_DIV_ACLK_MFC_400 134 +#define CLK_DIV_ACLK_G2D_266 135 +#define CLK_DIV_ACLK_G2D_400 136 +#define CLK_DIV_ACLK_G3D_400 137 +#define CLK_DIV_ACLK_BUS0_400 138 +#define CLK_DIV_ACLK_BUS1_400 139 +#define CLK_DIV_SCLK_PCIE_100 140 +#define CLK_DIV_SCLK_USBHOST30 141 +#define CLK_DIV_SCLK_UFSUNIPRO 142 +#define CLK_DIV_SCLK_USBDRD30 143 +#define CLK_DIV_SCLK_JPEG 144 +#define CLK_DIV_ACLK_MSCL_400 145 +#define CLK_DIV_ACLK_ISP_DIS_400 146 +#define CLK_DIV_ACLK_ISP_400 147 +#define CLK_DIV_ACLK_CAM0_333 148 +#define CLK_DIV_ACLK_CAM0_400 149 +#define CLK_DIV_ACLK_CAM0_552 150 +#define CLK_DIV_ACLK_CAM1_333 151 +#define CLK_DIV_ACLK_CAM1_400 152 +#define CLK_DIV_ACLK_CAM1_552 153 +#define CLK_DIV_SCLK_ISP_UART 154 +#define CLK_DIV_SCLK_ISP_SPI1_B 155 +#define CLK_DIV_SCLK_ISP_SPI1_A 156 +#define CLK_DIV_SCLK_ISP_SPI0_B 157 +#define CLK_DIV_SCLK_ISP_SPI0_A 158 +#define CLK_DIV_SCLK_ISP_SENSOR2_B 159 +#define CLK_DIV_SCLK_ISP_SENSOR2_A 160 +#define CLK_DIV_SCLK_ISP_SENSOR1_B 161 +#define CLK_DIV_SCLK_ISP_SENSOR1_A 162 +#define CLK_DIV_SCLK_ISP_SENSOR0_B 163 +#define CLK_DIV_SCLK_ISP_SENSOR0_A 164 + +#define CLK_ACLK_PERIC_66 200 +#define CLK_ACLK_PERIS_66 201 +#define CLK_ACLK_FSYS_200 202 +#define CLK_SCLK_MMC2_FSYS 203 +#define CLK_SCLK_MMC1_FSYS 204 +#define CLK_SCLK_MMC0_FSYS 205 +#define CLK_SCLK_SPI4_PERIC 206 +#define CLK_SCLK_SPI3_PERIC 207 +#define CLK_SCLK_UART2_PERIC 208 +#define CLK_SCLK_UART1_PERIC 209 +#define CLK_SCLK_UART0_PERIC 210 +#define CLK_SCLK_SPI2_PERIC 211 +#define CLK_SCLK_SPI1_PERIC 212 +#define CLK_SCLK_SPI0_PERIC 213 +#define CLK_SCLK_SPDIF_PERIC 214 +#define CLK_SCLK_I2S1_PERIC 215 +#define CLK_SCLK_PCM1_PERIC 216 +#define CLK_SCLK_SLIMBUS 217 +#define CLK_SCLK_AUDIO1 218 +#define CLK_SCLK_AUDIO0 219 +#define CLK_ACLK_G2D_266 220 +#define CLK_ACLK_G2D_400 221 +#define CLK_ACLK_G3D_400 222 +#define CLK_ACLK_IMEM_SSSX_266 223 +#define CLK_ACLK_BUS0_400 224 +#define CLK_ACLK_BUS1_400 225 +#define CLK_ACLK_IMEM_200 226 +#define CLK_ACLK_IMEM_266 227 +#define CLK_SCLK_PCIE_100_FSYS 228 +#define CLK_SCLK_UFSUNIPRO_FSYS 229 +#define CLK_SCLK_USBHOST30_FSYS 230 +#define CLK_SCLK_USBDRD30_FSYS 231 +#define CLK_ACLK_GSCL_111 232 +#define CLK_ACLK_GSCL_333 233 +#define CLK_SCLK_JPEG_MSCL 234 +#define CLK_ACLK_MSCL_400 235 +#define CLK_ACLK_MFC_400 236 +#define CLK_ACLK_HEVC_400 237 +#define CLK_ACLK_ISP_DIS_400 238 +#define CLK_ACLK_ISP_400 239 +#define CLK_ACLK_CAM0_333 240 +#define CLK_ACLK_CAM0_400 241 +#define CLK_ACLK_CAM0_552 242 +#define CLK_ACLK_CAM1_333 243 +#define CLK_ACLK_CAM1_400 244 +#define CLK_ACLK_CAM1_552 245 +#define CLK_SCLK_ISP_SENSOR2 246 +#define CLK_SCLK_ISP_SENSOR1 247 +#define CLK_SCLK_ISP_SENSOR0 248 +#define CLK_SCLK_ISP_MCTADC_CAM1 249 +#define CLK_SCLK_ISP_UART_CAM1 250 +#define CLK_SCLK_ISP_SPI1_CAM1 251 +#define CLK_SCLK_ISP_SPI0_CAM1 252 +#define CLK_SCLK_HDMI_SPDIF_DISP 253 + +#define TOP_NR_CLK 254 + +/* CMU_CPIF */ +#define CLK_FOUT_MPHY_PLL 1 + +#define CLK_MOUT_MPHY_PLL 2 + +#define CLK_DIV_SCLK_MPHY 10 + +#define CLK_SCLK_MPHY_PLL 11 +#define CLK_SCLK_UFS_MPHY 11 + +#define CPIF_NR_CLK 12 + +/* CMU_MIF */ +#define CLK_FOUT_MEM0_PLL 1 +#define CLK_FOUT_MEM1_PLL 2 +#define CLK_FOUT_BUS_PLL 3 +#define CLK_FOUT_MFC_PLL 4 +#define CLK_DOUT_MFC_PLL 5 +#define CLK_DOUT_BUS_PLL 6 +#define CLK_DOUT_MEM1_PLL 7 +#define CLK_DOUT_MEM0_PLL 8 + +#define CLK_MOUT_MFC_PLL_DIV2 10 +#define CLK_MOUT_BUS_PLL_DIV2 11 +#define CLK_MOUT_MEM1_PLL_DIV2 12 +#define CLK_MOUT_MEM0_PLL_DIV2 13 +#define CLK_MOUT_MFC_PLL 14 +#define CLK_MOUT_BUS_PLL 15 +#define CLK_MOUT_MEM1_PLL 16 +#define CLK_MOUT_MEM0_PLL 17 +#define CLK_MOUT_CLK2X_PHY_C 18 +#define CLK_MOUT_CLK2X_PHY_B 19 +#define CLK_MOUT_CLK2X_PHY_A 20 +#define CLK_MOUT_CLKM_PHY_C 21 +#define CLK_MOUT_CLKM_PHY_B 22 +#define CLK_MOUT_CLKM_PHY_A 23 +#define CLK_MOUT_ACLK_MIFNM_200 24 +#define CLK_MOUT_ACLK_MIFNM_400 25 +#define CLK_MOUT_ACLK_DISP_333_B 26 +#define CLK_MOUT_ACLK_DISP_333_A 27 +#define CLK_MOUT_SCLK_DECON_VCLK_C 28 +#define CLK_MOUT_SCLK_DECON_VCLK_B 29 +#define CLK_MOUT_SCLK_DECON_VCLK_A 30 +#define CLK_MOUT_SCLK_DECON_ECLK_C 31 +#define CLK_MOUT_SCLK_DECON_ECLK_B 32 +#define CLK_MOUT_SCLK_DECON_ECLK_A 33 +#define CLK_MOUT_SCLK_DECON_TV_ECLK_C 34 +#define CLK_MOUT_SCLK_DECON_TV_ECLK_B 35 +#define CLK_MOUT_SCLK_DECON_TV_ECLK_A 36 +#define CLK_MOUT_SCLK_DSD_C 37 +#define CLK_MOUT_SCLK_DSD_B 38 +#define CLK_MOUT_SCLK_DSD_A 39 +#define CLK_MOUT_SCLK_DSIM0_C 40 +#define CLK_MOUT_SCLK_DSIM0_B 41 +#define CLK_MOUT_SCLK_DSIM0_A 42 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_C 46 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_B 47 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_A 48 +#define CLK_MOUT_SCLK_DSIM1_C 49 +#define CLK_MOUT_SCLK_DSIM1_B 50 +#define CLK_MOUT_SCLK_DSIM1_A 51 + +#define CLK_DIV_SCLK_HPM_MIF 55 +#define CLK_DIV_ACLK_DREX1 56 +#define CLK_DIV_ACLK_DREX0 57 +#define CLK_DIV_CLK2XPHY 58 +#define CLK_DIV_ACLK_MIF_266 59 +#define CLK_DIV_ACLK_MIFND_133 60 +#define CLK_DIV_ACLK_MIF_133 61 +#define CLK_DIV_ACLK_MIFNM_200 62 +#define CLK_DIV_ACLK_MIF_200 63 +#define CLK_DIV_ACLK_MIF_400 64 +#define CLK_DIV_ACLK_BUS2_400 65 +#define CLK_DIV_ACLK_DISP_333 66 +#define CLK_DIV_ACLK_CPIF_200 67 +#define CLK_DIV_SCLK_DSIM1 68 +#define CLK_DIV_SCLK_DECON_TV_VCLK 69 +#define CLK_DIV_SCLK_DSIM0 70 +#define CLK_DIV_SCLK_DSD 71 +#define CLK_DIV_SCLK_DECON_TV_ECLK 72 +#define CLK_DIV_SCLK_DECON_VCLK 73 +#define CLK_DIV_SCLK_DECON_ECLK 74 +#define CLK_DIV_MIF_PRE 75 + +#define CLK_CLK2X_PHY1 80 +#define CLK_CLK2X_PHY0 81 +#define CLK_CLKM_PHY1 82 +#define CLK_CLKM_PHY0 83 +#define CLK_RCLK_DREX1 84 +#define CLK_RCLK_DREX0 85 +#define CLK_ACLK_DREX1_TZ 86 +#define CLK_ACLK_DREX0_TZ 87 +#define CLK_ACLK_DREX1_PEREV 88 +#define CLK_ACLK_DREX0_PEREV 89 +#define CLK_ACLK_DREX1_MEMIF 90 +#define CLK_ACLK_DREX0_MEMIF 91 +#define CLK_ACLK_DREX1_SCH 92 +#define CLK_ACLK_DREX0_SCH 93 +#define CLK_ACLK_DREX1_BUSIF 94 +#define CLK_ACLK_DREX0_BUSIF 95 +#define CLK_ACLK_DREX1_BUSIF_RD 96 +#define CLK_ACLK_DREX0_BUSIF_RD 97 +#define CLK_ACLK_DREX1 98 +#define CLK_ACLK_DREX0 99 +#define CLK_ACLK_ASYNCAXIM_ATLAS_CCIX 100 +#define CLK_ACLK_ASYNCAXIS_ATLAS_MIF 101 +#define CLK_ACLK_ASYNCAXIM_ATLAS_MIF 102 +#define CLK_ACLK_ASYNCAXIS_MIF_IMEM 103 +#define CLK_ACLK_ASYNCAXIS_NOC_P_CCI 104 +#define CLK_ACLK_ASYNCAXIM_NOC_P_CCI 105 +#define CLK_ACLK_ASYNCAXIS_CP1 106 +#define CLK_ACLK_ASYNCAXIM_CP1 107 +#define CLK_ACLK_ASYNCAXIS_CP0 108 +#define CLK_ACLK_ASYNCAXIM_CP0 109 +#define CLK_ACLK_ASYNCAXIS_DREX1_3 110 +#define CLK_ACLK_ASYNCAXIM_DREX1_3 111 +#define CLK_ACLK_ASYNCAXIS_DREX1_1 112 +#define CLK_ACLK_ASYNCAXIM_DREX1_1 113 +#define CLK_ACLK_ASYNCAXIS_DREX1_0 114 +#define CLK_ACLK_ASYNCAXIM_DREX1_0 115 +#define CLK_ACLK_ASYNCAXIS_DREX0_3 116 +#define CLK_ACLK_ASYNCAXIM_DREX0_3 117 +#define CLK_ACLK_ASYNCAXIS_DREX0_1 118 +#define CLK_ACLK_ASYNCAXIM_DREX0_1 119 +#define CLK_ACLK_ASYNCAXIS_DREX0_0 120 +#define CLK_ACLK_ASYNCAXIM_DREX0_0 121 +#define CLK_ACLK_AHB2APB_MIF2P 122 +#define CLK_ACLK_AHB2APB_MIF1P 123 +#define CLK_ACLK_AHB2APB_MIF0P 124 +#define CLK_ACLK_IXIU_CCI 125 +#define CLK_ACLK_XIU_MIFSFRX 126 +#define CLK_ACLK_MIFNP_133 127 +#define CLK_ACLK_MIFNM_200 128 +#define CLK_ACLK_MIFND_133 129 +#define CLK_ACLK_MIFND_400 130 +#define CLK_ACLK_CCI 131 +#define CLK_ACLK_MIFND_266 132 +#define CLK_ACLK_PPMU_DREX1S3 133 +#define CLK_ACLK_PPMU_DREX1S1 134 +#define CLK_ACLK_PPMU_DREX1S0 135 +#define CLK_ACLK_PPMU_DREX0S3 136 +#define CLK_ACLK_PPMU_DREX0S1 137 +#define CLK_ACLK_PPMU_DREX0S0 138 +#define CLK_ACLK_BTS_APOLLO 139 +#define CLK_ACLK_BTS_ATLAS 140 +#define CLK_ACLK_ACE_SEL_APOLL 141 +#define CLK_ACLK_ACE_SEL_ATLAS 142 +#define CLK_ACLK_AXIDS_CCI_MIFSFRX 143 +#define CLK_ACLK_AXIUS_ATLAS_CCI 144 +#define CLK_ACLK_AXISYNCDNS_CCI 145 +#define CLK_ACLK_AXISYNCDN_CCI 146 +#define CLK_ACLK_AXISYNCDN_NOC_D 147 +#define CLK_ACLK_ASYNCACEM_APOLLO_CCI 148 +#define CLK_ACLK_ASYNCACEM_ATLAS_CCI 149 +#define CLK_ACLK_ASYNCAPBS_MIF_CSSYS 150 +#define CLK_ACLK_BUS2_400 151 +#define CLK_ACLK_DISP_333 152 +#define CLK_ACLK_CPIF_200 153 +#define CLK_PCLK_PPMU_DREX1S3 154 +#define CLK_PCLK_PPMU_DREX1S1 155 +#define CLK_PCLK_PPMU_DREX1S0 156 +#define CLK_PCLK_PPMU_DREX0S3 157 +#define CLK_PCLK_PPMU_DREX0S1 158 +#define CLK_PCLK_PPMU_DREX0S0 159 +#define CLK_PCLK_BTS_APOLLO 160 +#define CLK_PCLK_BTS_ATLAS 161 +#define CLK_PCLK_ASYNCAXI_NOC_P_CCI 162 +#define CLK_PCLK_ASYNCAXI_CP1 163 +#define CLK_PCLK_ASYNCAXI_CP0 164 +#define CLK_PCLK_ASYNCAXI_DREX1_3 165 +#define CLK_PCLK_ASYNCAXI_DREX1_1 166 +#define CLK_PCLK_ASYNCAXI_DREX1_0 167 +#define CLK_PCLK_ASYNCAXI_DREX0_3 168 +#define CLK_PCLK_ASYNCAXI_DREX0_1 169 +#define CLK_PCLK_ASYNCAXI_DREX0_0 170 +#define CLK_PCLK_MIFSRVND_133 171 +#define CLK_PCLK_PMU_MIF 172 +#define CLK_PCLK_SYSREG_MIF 173 +#define CLK_PCLK_GPIO_ALIVE 174 +#define CLK_PCLK_ABB 175 +#define CLK_PCLK_PMU_APBIF 176 +#define CLK_PCLK_DDR_PHY1 177 +#define CLK_PCLK_DREX1 178 +#define CLK_PCLK_DDR_PHY0 179 +#define CLK_PCLK_DREX0 180 +#define CLK_PCLK_DREX0_TZ 181 +#define CLK_PCLK_DREX1_TZ 182 +#define CLK_PCLK_MONOTONIC_CNT 183 +#define CLK_PCLK_RTC 184 +#define CLK_SCLK_DSIM1_DISP 185 +#define CLK_SCLK_DECON_TV_VCLK_DISP 186 +#define CLK_SCLK_FREQ_DET_BUS_PLL 187 +#define CLK_SCLK_FREQ_DET_MFC_PLL 188 +#define CLK_SCLK_FREQ_DET_MEM0_PLL 189 +#define CLK_SCLK_FREQ_DET_MEM1_PLL 190 +#define CLK_SCLK_DSIM0_DISP 191 +#define CLK_SCLK_DSD_DISP 192 +#define CLK_SCLK_DECON_TV_ECLK_DISP 193 +#define CLK_SCLK_DECON_VCLK_DISP 194 +#define CLK_SCLK_DECON_ECLK_DISP 195 +#define CLK_SCLK_HPM_MIF 196 +#define CLK_SCLK_MFC_PLL 197 +#define CLK_SCLK_BUS_PLL 198 +#define CLK_SCLK_BUS_PLL_APOLLO 199 +#define CLK_SCLK_BUS_PLL_ATLAS 200 + +#define MIF_NR_CLK 201 + +/* CMU_PERIC */ +#define CLK_PCLK_SPI2 1 +#define CLK_PCLK_SPI1 2 +#define CLK_PCLK_SPI0 3 +#define CLK_PCLK_UART2 4 +#define CLK_PCLK_UART1 5 +#define CLK_PCLK_UART0 6 +#define CLK_PCLK_HSI2C3 7 +#define CLK_PCLK_HSI2C2 8 +#define CLK_PCLK_HSI2C1 9 +#define CLK_PCLK_HSI2C0 10 +#define CLK_PCLK_I2C7 11 +#define CLK_PCLK_I2C6 12 +#define CLK_PCLK_I2C5 13 +#define CLK_PCLK_I2C4 14 +#define CLK_PCLK_I2C3 15 +#define CLK_PCLK_I2C2 16 +#define CLK_PCLK_I2C1 17 +#define CLK_PCLK_I2C0 18 +#define CLK_PCLK_SPI4 19 +#define CLK_PCLK_SPI3 20 +#define CLK_PCLK_HSI2C11 21 +#define CLK_PCLK_HSI2C10 22 +#define CLK_PCLK_HSI2C9 23 +#define CLK_PCLK_HSI2C8 24 +#define CLK_PCLK_HSI2C7 25 +#define CLK_PCLK_HSI2C6 26 +#define CLK_PCLK_HSI2C5 27 +#define CLK_PCLK_HSI2C4 28 +#define CLK_SCLK_SPI4 29 +#define CLK_SCLK_SPI3 30 +#define CLK_SCLK_SPI2 31 +#define CLK_SCLK_SPI1 32 +#define CLK_SCLK_SPI0 33 +#define CLK_SCLK_UART2 34 +#define CLK_SCLK_UART1 35 +#define CLK_SCLK_UART0 36 +#define CLK_ACLK_AHB2APB_PERIC2P 37 +#define CLK_ACLK_AHB2APB_PERIC1P 38 +#define CLK_ACLK_AHB2APB_PERIC0P 39 +#define CLK_ACLK_PERICNP_66 40 +#define CLK_PCLK_SCI 41 +#define CLK_PCLK_GPIO_FINGER 42 +#define CLK_PCLK_GPIO_ESE 43 +#define CLK_PCLK_PWM 44 +#define CLK_PCLK_SPDIF 45 +#define CLK_PCLK_PCM1 46 +#define CLK_PCLK_I2S1 47 +#define CLK_PCLK_ADCIF 48 +#define CLK_PCLK_GPIO_TOUCH 49 +#define CLK_PCLK_GPIO_NFC 50 +#define CLK_PCLK_GPIO_PERIC 51 +#define CLK_PCLK_PMU_PERIC 52 +#define CLK_PCLK_SYSREG_PERIC 53 +#define CLK_SCLK_IOCLK_SPI4 54 +#define CLK_SCLK_IOCLK_SPI3 55 +#define CLK_SCLK_SCI 56 +#define CLK_SCLK_SC_IN 57 +#define CLK_SCLK_PWM 58 +#define CLK_SCLK_IOCLK_SPI2 59 +#define CLK_SCLK_IOCLK_SPI1 60 +#define CLK_SCLK_IOCLK_SPI0 61 +#define CLK_SCLK_IOCLK_I2S1_BCLK 62 +#define CLK_SCLK_SPDIF 63 +#define CLK_SCLK_PCM1 64 +#define CLK_SCLK_I2S1 65 + +#define CLK_DIV_SCLK_SCI 70 +#define CLK_DIV_SCLK_SC_IN 71 + +#define PERIC_NR_CLK 72 + +/* CMU_PERIS */ +#define CLK_PCLK_HPM_APBIF 1 +#define CLK_PCLK_TMU1_APBIF 2 +#define CLK_PCLK_TMU0_APBIF 3 +#define CLK_PCLK_PMU_PERIS 4 +#define CLK_PCLK_SYSREG_PERIS 5 +#define CLK_PCLK_CMU_TOP_APBIF 6 +#define CLK_PCLK_WDT_APOLLO 7 +#define CLK_PCLK_WDT_ATLAS 8 +#define CLK_PCLK_MCT 9 +#define CLK_PCLK_HDMI_CEC 10 +#define CLK_ACLK_AHB2APB_PERIS1P 11 +#define CLK_ACLK_AHB2APB_PERIS0P 12 +#define CLK_ACLK_PERISNP_66 13 +#define CLK_PCLK_TZPC12 14 +#define CLK_PCLK_TZPC11 15 +#define CLK_PCLK_TZPC10 16 +#define CLK_PCLK_TZPC9 17 +#define CLK_PCLK_TZPC8 18 +#define CLK_PCLK_TZPC7 19 +#define CLK_PCLK_TZPC6 20 +#define CLK_PCLK_TZPC5 21 +#define CLK_PCLK_TZPC4 22 +#define CLK_PCLK_TZPC3 23 +#define CLK_PCLK_TZPC2 24 +#define CLK_PCLK_TZPC1 25 +#define CLK_PCLK_TZPC0 26 +#define CLK_PCLK_SECKEY_APBIF 27 +#define CLK_PCLK_CHIPID_APBIF 28 +#define CLK_PCLK_TOPRTC 29 +#define CLK_PCLK_CUSTOM_EFUSE_APBIF 30 +#define CLK_PCLK_ANTIRBK_CNT_APBIF 31 +#define CLK_PCLK_OTP_CON_APBIF 32 +#define CLK_SCLK_ASV_TB 33 +#define CLK_SCLK_TMU1 34 +#define CLK_SCLK_TMU0 35 +#define CLK_SCLK_SECKEY 36 +#define CLK_SCLK_CHIPID 37 +#define CLK_SCLK_TOPRTC 38 +#define CLK_SCLK_CUSTOM_EFUSE 39 +#define CLK_SCLK_ANTIRBK_CNT 40 +#define CLK_SCLK_OTP_CON 41 + +#define PERIS_NR_CLK 42 + +/* CMU_FSYS */ +#define CLK_MOUT_ACLK_FSYS_200_USER 1 +#define CLK_MOUT_SCLK_MMC2_USER 2 +#define CLK_MOUT_SCLK_MMC1_USER 3 +#define CLK_MOUT_SCLK_MMC0_USER 4 +#define CLK_MOUT_SCLK_UFS_MPHY_USER 5 +#define CLK_MOUT_SCLK_PCIE_100_USER 6 +#define CLK_MOUT_SCLK_UFSUNIPRO_USER 7 +#define CLK_MOUT_SCLK_USBHOST30_USER 8 +#define CLK_MOUT_SCLK_USBDRD30_USER 9 +#define CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_USER 10 +#define CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_USER 11 +#define CLK_MOUT_PHYCLK_USBHOST20_PHY_HSIC1_USER 12 +#define CLK_MOUT_PHYCLK_USBHOST20_PHY_CLK48MOHCI_USER 13 +#define CLK_MOUT_PHYCLK_USBHOST20_PHY_PHYCLOCK_USER 14 +#define CLK_MOUT_PHYCLK_USBHOST20_PHY_PHY_FREECLK_USER 15 +#define CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_USER 16 +#define CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_USER 17 +#define CLK_MOUT_PHYCLK_UFS_RX1_SYMBOL_USER 18 +#define CLK_MOUT_PHYCLK_UFS_RX0_SYMBOL_USER 19 +#define CLK_MOUT_PHYCLK_UFS_TX1_SYMBOL_USER 20 +#define CLK_MOUT_PHYCLK_UFS_TX0_SYMBOL_USER 21 +#define CLK_MOUT_PHYCLK_LLI_MPHY_TO_UFS_USER 22 +#define CLK_MOUT_SCLK_MPHY 23 + +#define CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY 25 +#define CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_PHY 26 +#define CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_PHY 27 +#define CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_PHY 28 +#define CLK_PHYCLK_USBHOST20_PHY_FREECLK_PHY 29 +#define CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK_PHY 30 +#define CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI_PHY 31 +#define CLK_PHYCLK_USBHOST20_PHY_HSIC1_PHY 32 +#define CLK_PHYCLK_UFS_TX0_SYMBOL_PHY 33 +#define CLK_PHYCLK_UFS_RX0_SYMBOL_PHY 34 +#define CLK_PHYCLK_UFS_TX1_SYMBOL_PHY 35 +#define CLK_PHYCLK_UFS_RX1_SYMBOL_PHY 36 +#define CLK_PHYCLK_LLI_MPHY_TO_UFS_PHY 37 + +#define CLK_ACLK_PCIE 50 +#define CLK_ACLK_PDMA1 51 +#define CLK_ACLK_TSI 52 +#define CLK_ACLK_MMC2 53 +#define CLK_ACLK_MMC1 54 +#define CLK_ACLK_MMC0 55 +#define CLK_ACLK_UFS 56 +#define CLK_ACLK_USBHOST20 57 +#define CLK_ACLK_USBHOST30 58 +#define CLK_ACLK_USBDRD30 59 +#define CLK_ACLK_PDMA0 60 +#define CLK_SCLK_MMC2 61 +#define CLK_SCLK_MMC1 62 +#define CLK_SCLK_MMC0 63 +#define CLK_PDMA1 64 +#define CLK_PDMA0 65 +#define CLK_ACLK_XIU_FSYSPX 66 +#define CLK_ACLK_AHB_USBLINKH1 67 +#define CLK_ACLK_SMMU_PDMA1 68 +#define CLK_ACLK_BTS_PCIE 69 +#define CLK_ACLK_AXIUS_PDMA1 70 +#define CLK_ACLK_SMMU_PDMA0 71 +#define CLK_ACLK_BTS_UFS 72 +#define CLK_ACLK_BTS_USBHOST30 73 +#define CLK_ACLK_BTS_USBDRD30 74 +#define CLK_ACLK_AXIUS_PDMA0 75 +#define CLK_ACLK_AXIUS_USBHS 76 +#define CLK_ACLK_AXIUS_FSYSSX 77 +#define CLK_ACLK_AHB2APB_FSYSP 78 +#define CLK_ACLK_AHB2AXI_USBHS 79 +#define CLK_ACLK_AHB_USBLINKH0 80 +#define CLK_ACLK_AHB_USBHS 81 +#define CLK_ACLK_AHB_FSYSH 82 +#define CLK_ACLK_XIU_FSYSX 83 +#define CLK_ACLK_XIU_FSYSSX 84 +#define CLK_ACLK_FSYSNP_200 85 +#define CLK_ACLK_FSYSND_200 86 +#define CLK_PCLK_PCIE_CTRL 87 +#define CLK_PCLK_SMMU_PDMA1 88 +#define CLK_PCLK_PCIE_PHY 89 +#define CLK_PCLK_BTS_PCIE 90 +#define CLK_PCLK_SMMU_PDMA0 91 +#define CLK_PCLK_BTS_UFS 92 +#define CLK_PCLK_BTS_USBHOST30 93 +#define CLK_PCLK_BTS_USBDRD30 94 +#define CLK_PCLK_GPIO_FSYS 95 +#define CLK_PCLK_PMU_FSYS 96 +#define CLK_PCLK_SYSREG_FSYS 97 +#define CLK_SCLK_PCIE_100 98 +#define CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK 99 +#define CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK 100 +#define CLK_PHYCLK_UFS_RX1_SYMBOL 101 +#define CLK_PHYCLK_UFS_RX0_SYMBOL 102 +#define CLK_PHYCLK_UFS_TX1_SYMBOL 103 +#define CLK_PHYCLK_UFS_TX0_SYMBOL 104 +#define CLK_PHYCLK_USBHOST20_PHY_HSIC1 105 +#define CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI 106 +#define CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK 107 +#define CLK_PHYCLK_USBHOST20_PHY_FREECLK 108 +#define CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK 109 +#define CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK 110 +#define CLK_SCLK_MPHY 111 +#define CLK_SCLK_UFSUNIPRO 112 +#define CLK_SCLK_USBHOST30 113 +#define CLK_SCLK_USBDRD30 114 +#define CLK_PCIE 115 + +#define FSYS_NR_CLK 116 + +/* CMU_G2D */ +#define CLK_MUX_ACLK_G2D_266_USER 1 +#define CLK_MUX_ACLK_G2D_400_USER 2 + +#define CLK_DIV_PCLK_G2D 3 + +#define CLK_ACLK_SMMU_MDMA1 4 +#define CLK_ACLK_BTS_MDMA1 5 +#define CLK_ACLK_BTS_G2D 6 +#define CLK_ACLK_ALB_G2D 7 +#define CLK_ACLK_AXIUS_G2DX 8 +#define CLK_ACLK_ASYNCAXI_SYSX 9 +#define CLK_ACLK_AHB2APB_G2D1P 10 +#define CLK_ACLK_AHB2APB_G2D0P 11 +#define CLK_ACLK_XIU_G2DX 12 +#define CLK_ACLK_G2DNP_133 13 +#define CLK_ACLK_G2DND_400 14 +#define CLK_ACLK_MDMA1 15 +#define CLK_ACLK_G2D 16 +#define CLK_ACLK_SMMU_G2D 17 +#define CLK_PCLK_SMMU_MDMA1 18 +#define CLK_PCLK_BTS_MDMA1 19 +#define CLK_PCLK_BTS_G2D 20 +#define CLK_PCLK_ALB_G2D 21 +#define CLK_PCLK_ASYNCAXI_SYSX 22 +#define CLK_PCLK_PMU_G2D 23 +#define CLK_PCLK_SYSREG_G2D 24 +#define CLK_PCLK_G2D 25 +#define CLK_PCLK_SMMU_G2D 26 + +#define G2D_NR_CLK 27 + +/* CMU_DISP */ +#define CLK_FOUT_DISP_PLL 1 + +#define CLK_MOUT_DISP_PLL 2 +#define CLK_MOUT_SCLK_DSIM1_USER 3 +#define CLK_MOUT_SCLK_DSIM0_USER 4 +#define CLK_MOUT_SCLK_DSD_USER 5 +#define CLK_MOUT_SCLK_DECON_TV_ECLK_USER 6 +#define CLK_MOUT_SCLK_DECON_VCLK_USER 7 +#define CLK_MOUT_SCLK_DECON_ECLK_USER 8 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_USER 9 +#define CLK_MOUT_ACLK_DISP_333_USER 10 +#define CLK_MOUT_PHYCLK_MIPIDPHY1_BITCLKDIV8_USER 11 +#define CLK_MOUT_PHYCLK_MIPIDPHY1_RXCLKESC0_USER 12 +#define CLK_MOUT_PHYCLK_MIPIDPHY0_BITCLKDIV8_USER 13 +#define CLK_MOUT_PHYCLK_MIPIDPHY0_RXCLKESC0_USER 14 +#define CLK_MOUT_PHYCLK_HDMIPHY_TMDS_CLKO_USER 15 +#define CLK_MOUT_PHYCLK_HDMIPHY_PIXEL_CLKO_USER 16 +#define CLK_MOUT_SCLK_DSIM0 17 +#define CLK_MOUT_SCLK_DECON_TV_ECLK 18 +#define CLK_MOUT_SCLK_DECON_VCLK 19 +#define CLK_MOUT_SCLK_DECON_ECLK 20 +#define CLK_MOUT_SCLK_DSIM1_B_DISP 21 +#define CLK_MOUT_SCLK_DSIM1_A_DISP 22 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_C_DISP 23 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_B_DISP 24 +#define CLK_MOUT_SCLK_DECON_TV_VCLK_A_DISP 25 + +#define CLK_DIV_SCLK_DSIM1_DISP 30 +#define CLK_DIV_SCLK_DECON_TV_VCLK_DISP 31 +#define CLK_DIV_SCLK_DSIM0_DISP 32 +#define CLK_DIV_SCLK_DECON_TV_ECLK_DISP 33 +#define CLK_DIV_SCLK_DECON_VCLK_DISP 34 +#define CLK_DIV_SCLK_DECON_ECLK_DISP 35 +#define CLK_DIV_PCLK_DISP 36 + +#define CLK_ACLK_DECON_TV 40 +#define CLK_ACLK_DECON 41 +#define CLK_ACLK_SMMU_TV1X 42 +#define CLK_ACLK_SMMU_TV0X 43 +#define CLK_ACLK_SMMU_DECON1X 44 +#define CLK_ACLK_SMMU_DECON0X 45 +#define CLK_ACLK_BTS_DECON_TV_M3 46 +#define CLK_ACLK_BTS_DECON_TV_M2 47 +#define CLK_ACLK_BTS_DECON_TV_M1 48 +#define CLK_ACLK_BTS_DECON_TV_M0 49 +#define CLK_ACLK_BTS_DECON_NM4 50 +#define CLK_ACLK_BTS_DECON_NM3 51 +#define CLK_ACLK_BTS_DECON_NM2 52 +#define CLK_ACLK_BTS_DECON_NM1 53 +#define CLK_ACLK_BTS_DECON_NM0 54 +#define CLK_ACLK_AHB2APB_DISPSFR2P 55 +#define CLK_ACLK_AHB2APB_DISPSFR1P 56 +#define CLK_ACLK_AHB2APB_DISPSFR0P 57 +#define CLK_ACLK_AHB_DISPH 58 +#define CLK_ACLK_XIU_TV1X 59 +#define CLK_ACLK_XIU_TV0X 60 +#define CLK_ACLK_XIU_DECON1X 61 +#define CLK_ACLK_XIU_DECON0X 62 +#define CLK_ACLK_XIU_DISP1X 63 +#define CLK_ACLK_XIU_DISPNP_100 64 +#define CLK_ACLK_DISP1ND_333 65 +#define CLK_ACLK_DISP0ND_333 66 +#define CLK_PCLK_SMMU_TV1X 67 +#define CLK_PCLK_SMMU_TV0X 68 +#define CLK_PCLK_SMMU_DECON1X 69 +#define CLK_PCLK_SMMU_DECON0X 70 +#define CLK_PCLK_BTS_DECON_TV_M3 71 +#define CLK_PCLK_BTS_DECON_TV_M2 72 +#define CLK_PCLK_BTS_DECON_TV_M1 73 +#define CLK_PCLK_BTS_DECON_TV_M0 74 +#define CLK_PCLK_BTS_DECONM4 75 +#define CLK_PCLK_BTS_DECONM3 76 +#define CLK_PCLK_BTS_DECONM2 77 +#define CLK_PCLK_BTS_DECONM1 78 +#define CLK_PCLK_BTS_DECONM0 79 +#define CLK_PCLK_MIC1 80 +#define CLK_PCLK_PMU_DISP 81 +#define CLK_PCLK_SYSREG_DISP 82 +#define CLK_PCLK_HDMIPHY 83 +#define CLK_PCLK_HDMI 84 +#define CLK_PCLK_MIC0 85 +#define CLK_PCLK_DSIM1 86 +#define CLK_PCLK_DSIM0 87 +#define CLK_PCLK_DECON_TV 88 +#define CLK_PHYCLK_MIPIDPHY1_BITCLKDIV8 89 +#define CLK_PHYCLK_MIPIDPHY1_RXCLKESC0 90 +#define CLK_SCLK_RGB_TV_VCLK_TO_DSIM1 91 +#define CLK_SCLK_RGB_TV_VCLK_TO_MIC1 92 +#define CLK_SCLK_DSIM1 93 +#define CLK_SCLK_DECON_TV_VCLK 94 +#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8 95 +#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0 96 +#define CLK_PHYCLK_HDMIPHY_TMDS_CLKO 97 +#define CLK_PHYCLK_HDMI_PIXEL 98 +#define CLK_SCLK_RGB_VCLK_TO_SMIES 99 +#define CLK_SCLK_FREQ_DET_DISP_PLL 100 +#define CLK_SCLK_RGB_VCLK_TO_DSIM0 101 +#define CLK_SCLK_RGB_VCLK_TO_MIC0 102 +#define CLK_SCLK_DSD 103 +#define CLK_SCLK_HDMI_SPDIF 104 +#define CLK_SCLK_DSIM0 105 +#define CLK_SCLK_DECON_TV_ECLK 106 +#define CLK_SCLK_DECON_VCLK 107 +#define CLK_SCLK_DECON_ECLK 108 +#define CLK_SCLK_RGB_VCLK 109 +#define CLK_SCLK_RGB_TV_VCLK 110 + +#define CLK_PHYCLK_HDMIPHY_PIXEL_CLKO_PHY 111 +#define CLK_PHYCLK_HDMIPHY_TMDS_CLKO_PHY 112 + +#define CLK_PCLK_DECON 113 + +#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114 +#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115 + +#define DISP_NR_CLK 116 + +/* CMU_AUD */ +#define CLK_MOUT_AUD_PLL_USER 1 +#define CLK_MOUT_SCLK_AUD_PCM 2 +#define CLK_MOUT_SCLK_AUD_I2S 3 + +#define CLK_DIV_ATCLK_AUD 4 +#define CLK_DIV_PCLK_DBG_AUD 5 +#define CLK_DIV_ACLK_AUD 6 +#define CLK_DIV_AUD_CA5 7 +#define CLK_DIV_SCLK_AUD_SLIMBUS 8 +#define CLK_DIV_SCLK_AUD_UART 9 +#define CLK_DIV_SCLK_AUD_PCM 10 +#define CLK_DIV_SCLK_AUD_I2S 11 + +#define CLK_ACLK_INTR_CTRL 12 +#define CLK_ACLK_AXIDS2_LPASSP 13 +#define CLK_ACLK_AXIDS1_LPASSP 14 +#define CLK_ACLK_AXI2APB1_LPASSP 15 +#define CLK_ACLK_AXI2APH_LPASSP 16 +#define CLK_ACLK_SMMU_LPASSX 17 +#define CLK_ACLK_AXIDS0_LPASSP 18 +#define CLK_ACLK_AXI2APB0_LPASSP 19 +#define CLK_ACLK_XIU_LPASSX 20 +#define CLK_ACLK_AUDNP_133 21 +#define CLK_ACLK_AUDND_133 22 +#define CLK_ACLK_SRAMC 23 +#define CLK_ACLK_DMAC 24 +#define CLK_PCLK_WDT1 25 +#define CLK_PCLK_WDT0 26 +#define CLK_PCLK_SFR1 27 +#define CLK_PCLK_SMMU_LPASSX 28 +#define CLK_PCLK_GPIO_AUD 29 +#define CLK_PCLK_PMU_AUD 30 +#define CLK_PCLK_SYSREG_AUD 31 +#define CLK_PCLK_AUD_SLIMBUS 32 +#define CLK_PCLK_AUD_UART 33 +#define CLK_PCLK_AUD_PCM 34 +#define CLK_PCLK_AUD_I2S 35 +#define CLK_PCLK_TIMER 36 +#define CLK_PCLK_SFR0_CTRL 37 +#define CLK_ATCLK_AUD 38 +#define CLK_PCLK_DBG_AUD 39 +#define CLK_SCLK_AUD_CA5 40 +#define CLK_SCLK_JTAG_TCK 41 +#define CLK_SCLK_SLIMBUS_CLKIN 42 +#define CLK_SCLK_AUD_SLIMBUS 43 +#define CLK_SCLK_AUD_UART 44 +#define CLK_SCLK_AUD_PCM 45 +#define CLK_SCLK_I2S_BCLK 46 +#define CLK_SCLK_AUD_I2S 47 + +#define AUD_NR_CLK 48 + +/* CMU_BUS{0|1|2} */ +#define CLK_DIV_PCLK_BUS_133 1 + +#define CLK_ACLK_AHB2APB_BUSP 2 +#define CLK_ACLK_BUSNP_133 3 +#define CLK_ACLK_BUSND_400 4 +#define CLK_PCLK_BUSSRVND_133 5 +#define CLK_PCLK_PMU_BUS 6 +#define CLK_PCLK_SYSREG_BUS 7 + +#define CLK_MOUT_ACLK_BUS2_400_USER 8 /* Only CMU_BUS2 */ +#define CLK_ACLK_BUS2BEND_400 9 /* Only CMU_BUS2 */ +#define CLK_ACLK_BUS2RTND_400 10 /* Only CMU_BUS2 */ + +#define BUSx_NR_CLK 11 + +/* CMU_G3D */ +#define CLK_FOUT_G3D_PLL 1 + +#define CLK_MOUT_ACLK_G3D_400 2 +#define CLK_MOUT_G3D_PLL 3 + +#define CLK_DIV_SCLK_HPM_G3D 4 +#define CLK_DIV_PCLK_G3D 5 +#define CLK_DIV_ACLK_G3D 6 +#define CLK_ACLK_BTS_G3D1 7 +#define CLK_ACLK_BTS_G3D0 8 +#define CLK_ACLK_ASYNCAPBS_G3D 9 +#define CLK_ACLK_ASYNCAPBM_G3D 10 +#define CLK_ACLK_AHB2APB_G3DP 11 +#define CLK_ACLK_G3DNP_150 12 +#define CLK_ACLK_G3DND_600 13 +#define CLK_ACLK_G3D 14 +#define CLK_PCLK_BTS_G3D1 15 +#define CLK_PCLK_BTS_G3D0 16 +#define CLK_PCLK_PMU_G3D 17 +#define CLK_PCLK_SYSREG_G3D 18 +#define CLK_SCLK_HPM_G3D 19 + +#define G3D_NR_CLK 20 + +/* CMU_GSCL */ +#define CLK_MOUT_ACLK_GSCL_111_USER 1 +#define CLK_MOUT_ACLK_GSCL_333_USER 2 + +#define CLK_ACLK_BTS_GSCL2 3 +#define CLK_ACLK_BTS_GSCL1 4 +#define CLK_ACLK_BTS_GSCL0 5 +#define CLK_ACLK_AHB2APB_GSCLP 6 +#define CLK_ACLK_XIU_GSCLX 7 +#define CLK_ACLK_GSCLNP_111 8 +#define CLK_ACLK_GSCLRTND_333 9 +#define CLK_ACLK_GSCLBEND_333 10 +#define CLK_ACLK_GSD 11 +#define CLK_ACLK_GSCL2 12 +#define CLK_ACLK_GSCL1 13 +#define CLK_ACLK_GSCL0 14 +#define CLK_ACLK_SMMU_GSCL0 15 +#define CLK_ACLK_SMMU_GSCL1 16 +#define CLK_ACLK_SMMU_GSCL2 17 +#define CLK_PCLK_BTS_GSCL2 18 +#define CLK_PCLK_BTS_GSCL1 19 +#define CLK_PCLK_BTS_GSCL0 20 +#define CLK_PCLK_PMU_GSCL 21 +#define CLK_PCLK_SYSREG_GSCL 22 +#define CLK_PCLK_GSCL2 23 +#define CLK_PCLK_GSCL1 24 +#define CLK_PCLK_GSCL0 25 +#define CLK_PCLK_SMMU_GSCL0 26 +#define CLK_PCLK_SMMU_GSCL1 27 +#define CLK_PCLK_SMMU_GSCL2 28 + +#define GSCL_NR_CLK 29 + +/* CMU_APOLLO */ +#define CLK_FOUT_APOLLO_PLL 1 + +#define CLK_MOUT_APOLLO_PLL 2 +#define CLK_MOUT_BUS_PLL_APOLLO_USER 3 +#define CLK_MOUT_APOLLO 4 + +#define CLK_DIV_CNTCLK_APOLLO 5 +#define CLK_DIV_PCLK_DBG_APOLLO 6 +#define CLK_DIV_ATCLK_APOLLO 7 +#define CLK_DIV_PCLK_APOLLO 8 +#define CLK_DIV_ACLK_APOLLO 9 +#define CLK_DIV_APOLLO2 10 +#define CLK_DIV_APOLLO1 11 +#define CLK_DIV_SCLK_HPM_APOLLO 12 +#define CLK_DIV_APOLLO_PLL 13 + +#define CLK_ACLK_ATBDS_APOLLO_3 14 +#define CLK_ACLK_ATBDS_APOLLO_2 15 +#define CLK_ACLK_ATBDS_APOLLO_1 16 +#define CLK_ACLK_ATBDS_APOLLO_0 17 +#define CLK_ACLK_ASATBSLV_APOLLO_3_CSSYS 18 +#define CLK_ACLK_ASATBSLV_APOLLO_2_CSSYS 19 +#define CLK_ACLK_ASATBSLV_APOLLO_1_CSSYS 20 +#define CLK_ACLK_ASATBSLV_APOLLO_0_CSSYS 21 +#define CLK_ACLK_ASYNCACES_APOLLO_CCI 22 +#define CLK_ACLK_AHB2APB_APOLLOP 23 +#define CLK_ACLK_APOLLONP_200 24 +#define CLK_PCLK_ASAPBMST_CSSYS_APOLLO 25 +#define CLK_PCLK_PMU_APOLLO 26 +#define CLK_PCLK_SYSREG_APOLLO 27 +#define CLK_CNTCLK_APOLLO 28 +#define CLK_SCLK_HPM_APOLLO 29 +#define CLK_SCLK_APOLLO 30 + +#define APOLLO_NR_CLK 31 + +/* CMU_ATLAS */ +#define CLK_FOUT_ATLAS_PLL 1 + +#define CLK_MOUT_ATLAS_PLL 2 +#define CLK_MOUT_BUS_PLL_ATLAS_USER 3 +#define CLK_MOUT_ATLAS 4 + +#define CLK_DIV_CNTCLK_ATLAS 5 +#define CLK_DIV_PCLK_DBG_ATLAS 6 +#define CLK_DIV_ATCLK_ATLASO 7 +#define CLK_DIV_PCLK_ATLAS 8 +#define CLK_DIV_ACLK_ATLAS 9 +#define CLK_DIV_ATLAS2 10 +#define CLK_DIV_ATLAS1 11 +#define CLK_DIV_SCLK_HPM_ATLAS 12 +#define CLK_DIV_ATLAS_PLL 13 + +#define CLK_ACLK_ATB_AUD_CSSYS 14 +#define CLK_ACLK_ATB_APOLLO3_CSSYS 15 +#define CLK_ACLK_ATB_APOLLO2_CSSYS 16 +#define CLK_ACLK_ATB_APOLLO1_CSSYS 17 +#define CLK_ACLK_ATB_APOLLO0_CSSYS 18 +#define CLK_ACLK_ASYNCAHBS_CSSYS_SSS 19 +#define CLK_ACLK_ASYNCAXIS_CSSYS_CCIX 20 +#define CLK_ACLK_ASYNCACES_ATLAS_CCI 21 +#define CLK_ACLK_AHB2APB_ATLASP 22 +#define CLK_ACLK_ATLASNP_200 23 +#define CLK_PCLK_ASYNCAPB_AUD_CSSYS 24 +#define CLK_PCLK_ASYNCAPB_ISP_CSSYS 25 +#define CLK_PCLK_ASYNCAPB_APOLLO_CSSYS 26 +#define CLK_PCLK_PMU_ATLAS 27 +#define CLK_PCLK_SYSREG_ATLAS 28 +#define CLK_PCLK_SECJTAG 29 +#define CLK_CNTCLK_ATLAS 30 +#define CLK_SCLK_FREQ_DET_ATLAS_PLL 31 +#define CLK_SCLK_HPM_ATLAS 32 +#define CLK_TRACECLK 33 +#define CLK_CTMCLK 34 +#define CLK_HCLK_CSSYS 35 +#define CLK_PCLK_DBG_CSSYS 36 +#define CLK_PCLK_DBG 37 +#define CLK_ATCLK 38 +#define CLK_SCLK_ATLAS 39 + +#define ATLAS_NR_CLK 40 + +/* CMU_MSCL */ +#define CLK_MOUT_SCLK_JPEG_USER 1 +#define CLK_MOUT_ACLK_MSCL_400_USER 2 +#define CLK_MOUT_SCLK_JPEG 3 + +#define CLK_DIV_PCLK_MSCL 4 + +#define CLK_ACLK_BTS_JPEG 5 +#define CLK_ACLK_BTS_M2MSCALER1 6 +#define CLK_ACLK_BTS_M2MSCALER0 7 +#define CLK_ACLK_AHB2APB_MSCL0P 8 +#define CLK_ACLK_XIU_MSCLX 9 +#define CLK_ACLK_MSCLNP_100 10 +#define CLK_ACLK_MSCLND_400 11 +#define CLK_ACLK_JPEG 12 +#define CLK_ACLK_M2MSCALER1 13 +#define CLK_ACLK_M2MSCALER0 14 +#define CLK_ACLK_SMMU_M2MSCALER0 15 +#define CLK_ACLK_SMMU_M2MSCALER1 16 +#define CLK_ACLK_SMMU_JPEG 17 +#define CLK_PCLK_BTS_JPEG 18 +#define CLK_PCLK_BTS_M2MSCALER1 19 +#define CLK_PCLK_BTS_M2MSCALER0 20 +#define CLK_PCLK_PMU_MSCL 21 +#define CLK_PCLK_SYSREG_MSCL 22 +#define CLK_PCLK_JPEG 23 +#define CLK_PCLK_M2MSCALER1 24 +#define CLK_PCLK_M2MSCALER0 25 +#define CLK_PCLK_SMMU_M2MSCALER0 26 +#define CLK_PCLK_SMMU_M2MSCALER1 27 +#define CLK_PCLK_SMMU_JPEG 28 +#define CLK_SCLK_JPEG 29 + +#define MSCL_NR_CLK 30 + +/* CMU_MFC */ +#define CLK_MOUT_ACLK_MFC_400_USER 1 + +#define CLK_DIV_PCLK_MFC 2 + +#define CLK_ACLK_BTS_MFC_1 3 +#define CLK_ACLK_BTS_MFC_0 4 +#define CLK_ACLK_AHB2APB_MFCP 5 +#define CLK_ACLK_XIU_MFCX 6 +#define CLK_ACLK_MFCNP_100 7 +#define CLK_ACLK_MFCND_400 8 +#define CLK_ACLK_MFC 9 +#define CLK_ACLK_SMMU_MFC_1 10 +#define CLK_ACLK_SMMU_MFC_0 11 +#define CLK_PCLK_BTS_MFC_1 12 +#define CLK_PCLK_BTS_MFC_0 13 +#define CLK_PCLK_PMU_MFC 14 +#define CLK_PCLK_SYSREG_MFC 15 +#define CLK_PCLK_MFC 16 +#define CLK_PCLK_SMMU_MFC_1 17 +#define CLK_PCLK_SMMU_MFC_0 18 + +#define MFC_NR_CLK 19 + +/* CMU_HEVC */ +#define CLK_MOUT_ACLK_HEVC_400_USER 1 + +#define CLK_DIV_PCLK_HEVC 2 + +#define CLK_ACLK_BTS_HEVC_1 3 +#define CLK_ACLK_BTS_HEVC_0 4 +#define CLK_ACLK_AHB2APB_HEVCP 5 +#define CLK_ACLK_XIU_HEVCX 6 +#define CLK_ACLK_HEVCNP_100 7 +#define CLK_ACLK_HEVCND_400 8 +#define CLK_ACLK_HEVC 9 +#define CLK_ACLK_SMMU_HEVC_1 10 +#define CLK_ACLK_SMMU_HEVC_0 11 +#define CLK_PCLK_BTS_HEVC_1 12 +#define CLK_PCLK_BTS_HEVC_0 13 +#define CLK_PCLK_PMU_HEVC 14 +#define CLK_PCLK_SYSREG_HEVC 15 +#define CLK_PCLK_HEVC 16 +#define CLK_PCLK_SMMU_HEVC_1 17 +#define CLK_PCLK_SMMU_HEVC_0 18 + +#define HEVC_NR_CLK 19 + +/* CMU_ISP */ +#define CLK_MOUT_ACLK_ISP_DIS_400_USER 1 +#define CLK_MOUT_ACLK_ISP_400_USER 2 + +#define CLK_DIV_PCLK_ISP_DIS 3 +#define CLK_DIV_PCLK_ISP 4 +#define CLK_DIV_ACLK_ISP_D_200 5 +#define CLK_DIV_ACLK_ISP_C_200 6 + +#define CLK_ACLK_ISP_D_GLUE 7 +#define CLK_ACLK_SCALERP 8 +#define CLK_ACLK_3DNR 9 +#define CLK_ACLK_DIS 10 +#define CLK_ACLK_SCALERC 11 +#define CLK_ACLK_DRC 12 +#define CLK_ACLK_ISP 13 +#define CLK_ACLK_AXIUS_SCALERP 14 +#define CLK_ACLK_AXIUS_SCALERC 15 +#define CLK_ACLK_AXIUS_DRC 16 +#define CLK_ACLK_ASYNCAHBM_ISP2P 17 +#define CLK_ACLK_ASYNCAHBM_ISP1P 18 +#define CLK_ACLK_ASYNCAXIS_DIS1 19 +#define CLK_ACLK_ASYNCAXIS_DIS0 20 +#define CLK_ACLK_ASYNCAXIM_DIS1 21 +#define CLK_ACLK_ASYNCAXIM_DIS0 22 +#define CLK_ACLK_ASYNCAXIM_ISP2P 23 +#define CLK_ACLK_ASYNCAXIM_ISP1P 24 +#define CLK_ACLK_AHB2APB_ISP2P 25 +#define CLK_ACLK_AHB2APB_ISP1P 26 +#define CLK_ACLK_AXI2APB_ISP2P 27 +#define CLK_ACLK_AXI2APB_ISP1P 28 +#define CLK_ACLK_XIU_ISPEX1 29 +#define CLK_ACLK_XIU_ISPEX0 30 +#define CLK_ACLK_ISPND_400 31 +#define CLK_ACLK_SMMU_SCALERP 32 +#define CLK_ACLK_SMMU_3DNR 33 +#define CLK_ACLK_SMMU_DIS1 34 +#define CLK_ACLK_SMMU_DIS0 35 +#define CLK_ACLK_SMMU_SCALERC 36 +#define CLK_ACLK_SMMU_DRC 37 +#define CLK_ACLK_SMMU_ISP 38 +#define CLK_ACLK_BTS_SCALERP 39 +#define CLK_ACLK_BTS_3DR 40 +#define CLK_ACLK_BTS_DIS1 41 +#define CLK_ACLK_BTS_DIS0 42 +#define CLK_ACLK_BTS_SCALERC 43 +#define CLK_ACLK_BTS_DRC 44 +#define CLK_ACLK_BTS_ISP 45 +#define CLK_PCLK_SMMU_SCALERP 46 +#define CLK_PCLK_SMMU_3DNR 47 +#define CLK_PCLK_SMMU_DIS1 48 +#define CLK_PCLK_SMMU_DIS0 49 +#define CLK_PCLK_SMMU_SCALERC 50 +#define CLK_PCLK_SMMU_DRC 51 +#define CLK_PCLK_SMMU_ISP 52 +#define CLK_PCLK_BTS_SCALERP 53 +#define CLK_PCLK_BTS_3DNR 54 +#define CLK_PCLK_BTS_DIS1 55 +#define CLK_PCLK_BTS_DIS0 56 +#define CLK_PCLK_BTS_SCALERC 57 +#define CLK_PCLK_BTS_DRC 58 +#define CLK_PCLK_BTS_ISP 59 +#define CLK_PCLK_ASYNCAXI_DIS1 60 +#define CLK_PCLK_ASYNCAXI_DIS0 61 +#define CLK_PCLK_PMU_ISP 62 +#define CLK_PCLK_SYSREG_ISP 63 +#define CLK_PCLK_CMU_ISP_LOCAL 64 +#define CLK_PCLK_SCALERP 65 +#define CLK_PCLK_3DNR 66 +#define CLK_PCLK_DIS_CORE 67 +#define CLK_PCLK_DIS 68 +#define CLK_PCLK_SCALERC 69 +#define CLK_PCLK_DRC 70 +#define CLK_PCLK_ISP 71 +#define CLK_SCLK_PIXELASYNCS_DIS 72 +#define CLK_SCLK_PIXELASYNCM_DIS 73 +#define CLK_SCLK_PIXELASYNCS_SCALERP 74 +#define CLK_SCLK_PIXELASYNCM_ISPD 75 +#define CLK_SCLK_PIXELASYNCS_ISPC 76 +#define CLK_SCLK_PIXELASYNCM_ISPC 77 + +#define ISP_NR_CLK 78 + +/* CMU_CAM0 */ +#define CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY 1 +#define CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY 2 + +#define CLK_MOUT_ACLK_CAM0_333_USER 3 +#define CLK_MOUT_ACLK_CAM0_400_USER 4 +#define CLK_MOUT_ACLK_CAM0_552_USER 5 +#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S4_USER 6 +#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2A_USER 7 +#define CLK_MOUT_ACLK_LITE_D_B 8 +#define CLK_MOUT_ACLK_LITE_D_A 9 +#define CLK_MOUT_ACLK_LITE_B_B 10 +#define CLK_MOUT_ACLK_LITE_B_A 11 +#define CLK_MOUT_ACLK_LITE_A_B 12 +#define CLK_MOUT_ACLK_LITE_A_A 13 +#define CLK_MOUT_ACLK_CAM0_400 14 +#define CLK_MOUT_ACLK_CSIS1_B 15 +#define CLK_MOUT_ACLK_CSIS1_A 16 +#define CLK_MOUT_ACLK_CSIS0_B 17 +#define CLK_MOUT_ACLK_CSIS0_A 18 +#define CLK_MOUT_ACLK_3AA1_B 19 +#define CLK_MOUT_ACLK_3AA1_A 20 +#define CLK_MOUT_ACLK_3AA0_B 21 +#define CLK_MOUT_ACLK_3AA0_A 22 +#define CLK_MOUT_SCLK_LITE_FREECNT_C 23 +#define CLK_MOUT_SCLK_LITE_FREECNT_B 24 +#define CLK_MOUT_SCLK_LITE_FREECNT_A 25 +#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_B 26 +#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_A 27 +#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_B 28 +#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_A 29 + +#define CLK_DIV_PCLK_CAM0_50 30 +#define CLK_DIV_ACLK_CAM0_200 31 +#define CLK_DIV_ACLK_CAM0_BUS_400 32 +#define CLK_DIV_PCLK_LITE_D 33 +#define CLK_DIV_ACLK_LITE_D 34 +#define CLK_DIV_PCLK_LITE_B 35 +#define CLK_DIV_ACLK_LITE_B 36 +#define CLK_DIV_PCLK_LITE_A 37 +#define CLK_DIV_ACLK_LITE_A 38 +#define CLK_DIV_ACLK_CSIS1 39 +#define CLK_DIV_ACLK_CSIS0 40 +#define CLK_DIV_PCLK_3AA1 41 +#define CLK_DIV_ACLK_3AA1 42 +#define CLK_DIV_PCLK_3AA0 43 +#define CLK_DIV_ACLK_3AA0 44 +#define CLK_DIV_SCLK_PIXELASYNC_LITE_C 45 +#define CLK_DIV_PCLK_PIXELASYNC_LITE_C 46 +#define CLK_DIV_SCLK_PIXELASYNC_LITE_C_INIT 47 + +#define CLK_ACLK_CSIS1 50 +#define CLK_ACLK_CSIS0 51 +#define CLK_ACLK_3AA1 52 +#define CLK_ACLK_3AA0 53 +#define CLK_ACLK_LITE_D 54 +#define CLK_ACLK_LITE_B 55 +#define CLK_ACLK_LITE_A 56 +#define CLK_ACLK_AHBSYNCDN 57 +#define CLK_ACLK_AXIUS_LITE_D 58 +#define CLK_ACLK_AXIUS_LITE_B 59 +#define CLK_ACLK_AXIUS_LITE_A 60 +#define CLK_ACLK_ASYNCAPBM_3AA1 61 +#define CLK_ACLK_ASYNCAPBS_3AA1 62 +#define CLK_ACLK_ASYNCAPBM_3AA0 63 +#define CLK_ACLK_ASYNCAPBS_3AA0 64 +#define CLK_ACLK_ASYNCAPBM_LITE_D 65 +#define CLK_ACLK_ASYNCAPBS_LITE_D 66 +#define CLK_ACLK_ASYNCAPBM_LITE_B 67 +#define CLK_ACLK_ASYNCAPBS_LITE_B 68 +#define CLK_ACLK_ASYNCAPBM_LITE_A 69 +#define CLK_ACLK_ASYNCAPBS_LITE_A 70 +#define CLK_ACLK_ASYNCAXIM_ISP0P 71 +#define CLK_ACLK_ASYNCAXIM_3AA1 72 +#define CLK_ACLK_ASYNCAXIS_3AA1 73 +#define CLK_ACLK_ASYNCAXIM_3AA0 74 +#define CLK_ACLK_ASYNCAXIS_3AA0 75 +#define CLK_ACLK_ASYNCAXIM_LITE_D 76 +#define CLK_ACLK_ASYNCAXIS_LITE_D 77 +#define CLK_ACLK_ASYNCAXIM_LITE_B 78 +#define CLK_ACLK_ASYNCAXIS_LITE_B 79 +#define CLK_ACLK_ASYNCAXIM_LITE_A 80 +#define CLK_ACLK_ASYNCAXIS_LITE_A 81 +#define CLK_ACLK_AHB2APB_ISPSFRP 82 +#define CLK_ACLK_AXI2APB_ISP0P 83 +#define CLK_ACLK_AXI2AHB_ISP0P 84 +#define CLK_ACLK_XIU_IS0X 85 +#define CLK_ACLK_XIU_ISP0EX 86 +#define CLK_ACLK_CAM0NP_276 87 +#define CLK_ACLK_CAM0ND_400 88 +#define CLK_ACLK_SMMU_3AA1 89 +#define CLK_ACLK_SMMU_3AA0 90 +#define CLK_ACLK_SMMU_LITE_D 91 +#define CLK_ACLK_SMMU_LITE_B 92 +#define CLK_ACLK_SMMU_LITE_A 93 +#define CLK_ACLK_BTS_3AA1 94 +#define CLK_ACLK_BTS_3AA0 95 +#define CLK_ACLK_BTS_LITE_D 96 +#define CLK_ACLK_BTS_LITE_B 97 +#define CLK_ACLK_BTS_LITE_A 98 +#define CLK_PCLK_SMMU_3AA1 99 +#define CLK_PCLK_SMMU_3AA0 100 +#define CLK_PCLK_SMMU_LITE_D 101 +#define CLK_PCLK_SMMU_LITE_B 102 +#define CLK_PCLK_SMMU_LITE_A 103 +#define CLK_PCLK_BTS_3AA1 104 +#define CLK_PCLK_BTS_3AA0 105 +#define CLK_PCLK_BTS_LITE_D 106 +#define CLK_PCLK_BTS_LITE_B 107 +#define CLK_PCLK_BTS_LITE_A 108 +#define CLK_PCLK_ASYNCAXI_CAM1 109 +#define CLK_PCLK_ASYNCAXI_3AA1 110 +#define CLK_PCLK_ASYNCAXI_3AA0 111 +#define CLK_PCLK_ASYNCAXI_LITE_D 112 +#define CLK_PCLK_ASYNCAXI_LITE_B 113 +#define CLK_PCLK_ASYNCAXI_LITE_A 114 +#define CLK_PCLK_PMU_CAM0 115 +#define CLK_PCLK_SYSREG_CAM0 116 +#define CLK_PCLK_CMU_CAM0_LOCAL 117 +#define CLK_PCLK_CSIS1 118 +#define CLK_PCLK_CSIS0 119 +#define CLK_PCLK_3AA1 120 +#define CLK_PCLK_3AA0 121 +#define CLK_PCLK_LITE_D 122 +#define CLK_PCLK_LITE_B 123 +#define CLK_PCLK_LITE_A 124 +#define CLK_PHYCLK_RXBYTECLKHS0_S4 125 +#define CLK_PHYCLK_RXBYTECLKHS0_S2A 126 +#define CLK_SCLK_LITE_FREECNT 127 +#define CLK_SCLK_PIXELASYNCM_3AA1 128 +#define CLK_SCLK_PIXELASYNCM_3AA0 129 +#define CLK_SCLK_PIXELASYNCS_3AA0 130 +#define CLK_SCLK_PIXELASYNCM_LITE_C 131 +#define CLK_SCLK_PIXELASYNCM_LITE_C_INIT 132 +#define CLK_SCLK_PIXELASYNCS_LITE_C_INIT 133 + +#define CAM0_NR_CLK 134 + +/* CMU_CAM1 */ +#define CLK_PHYCLK_RXBYTEECLKHS0_S2B 1 + +#define CLK_MOUT_SCLK_ISP_UART_USER 2 +#define CLK_MOUT_SCLK_ISP_SPI1_USER 3 +#define CLK_MOUT_SCLK_ISP_SPI0_USER 4 +#define CLK_MOUT_ACLK_CAM1_333_USER 5 +#define CLK_MOUT_ACLK_CAM1_400_USER 6 +#define CLK_MOUT_ACLK_CAM1_552_USER 7 +#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2B_USER 8 +#define CLK_MOUT_ACLK_CSIS2_B 9 +#define CLK_MOUT_ACLK_CSIS2_A 10 +#define CLK_MOUT_ACLK_FD_B 11 +#define CLK_MOUT_ACLK_FD_A 12 +#define CLK_MOUT_ACLK_LITE_C_B 13 +#define CLK_MOUT_ACLK_LITE_C_A 14 + +#define CLK_DIV_SCLK_ISP_MPWM 15 +#define CLK_DIV_PCLK_CAM1_83 16 +#define CLK_DIV_PCLK_CAM1_166 17 +#define CLK_DIV_PCLK_DBG_CAM1 18 +#define CLK_DIV_ATCLK_CAM1 19 +#define CLK_DIV_ACLK_CSIS2 20 +#define CLK_DIV_PCLK_FD 21 +#define CLK_DIV_ACLK_FD 22 +#define CLK_DIV_PCLK_LITE_C 23 +#define CLK_DIV_ACLK_LITE_C 24 + +#define CLK_ACLK_ISP_GIC 25 +#define CLK_ACLK_FD 26 +#define CLK_ACLK_LITE_C 27 +#define CLK_ACLK_CSIS2 28 +#define CLK_ACLK_ASYNCAPBM_FD 29 +#define CLK_ACLK_ASYNCAPBS_FD 30 +#define CLK_ACLK_ASYNCAPBM_LITE_C 31 +#define CLK_ACLK_ASYNCAPBS_LITE_C 32 +#define CLK_ACLK_ASYNCAHBS_SFRISP2H2 33 +#define CLK_ACLK_ASYNCAHBS_SFRISP2H1 34 +#define CLK_ACLK_ASYNCAXIM_CA5 35 +#define CLK_ACLK_ASYNCAXIS_CA5 36 +#define CLK_ACLK_ASYNCAXIS_ISPX2 37 +#define CLK_ACLK_ASYNCAXIS_ISPX1 38 +#define CLK_ACLK_ASYNCAXIS_ISPX0 39 +#define CLK_ACLK_ASYNCAXIM_ISPEX 40 +#define CLK_ACLK_ASYNCAXIM_ISP3P 41 +#define CLK_ACLK_ASYNCAXIS_ISP3P 42 +#define CLK_ACLK_ASYNCAXIM_FD 43 +#define CLK_ACLK_ASYNCAXIS_FD 44 +#define CLK_ACLK_ASYNCAXIM_LITE_C 45 +#define CLK_ACLK_ASYNCAXIS_LITE_C 46 +#define CLK_ACLK_AHB2APB_ISP5P 47 +#define CLK_ACLK_AHB2APB_ISP3P 48 +#define CLK_ACLK_AXI2APB_ISP3P 49 +#define CLK_ACLK_AHB_SFRISP2H 50 +#define CLK_ACLK_AXI_ISP_HX_R 51 +#define CLK_ACLK_AXI_ISP_CX_R 52 +#define CLK_ACLK_AXI_ISP_HX 53 +#define CLK_ACLK_AXI_ISP_CX 54 +#define CLK_ACLK_XIU_ISPX 55 +#define CLK_ACLK_XIU_ISPEX 56 +#define CLK_ACLK_CAM1NP_333 57 +#define CLK_ACLK_CAM1ND_400 58 +#define CLK_ACLK_SMMU_ISPCPU 59 +#define CLK_ACLK_SMMU_FD 60 +#define CLK_ACLK_SMMU_LITE_C 61 +#define CLK_ACLK_BTS_ISP3P 62 +#define CLK_ACLK_BTS_FD 63 +#define CLK_ACLK_BTS_LITE_C 64 +#define CLK_ACLK_AHBDN_SFRISP2H 65 +#define CLK_ACLK_AHBDN_ISP5P 66 +#define CLK_ACLK_AXIUS_ISP3P 67 +#define CLK_ACLK_AXIUS_FD 68 +#define CLK_ACLK_AXIUS_LITE_C 69 +#define CLK_PCLK_SMMU_ISPCPU 70 +#define CLK_PCLK_SMMU_FD 71 +#define CLK_PCLK_SMMU_LITE_C 72 +#define CLK_PCLK_BTS_ISP3P 73 +#define CLK_PCLK_BTS_FD 74 +#define CLK_PCLK_BTS_LITE_C 75 +#define CLK_PCLK_ASYNCAXIM_CA5 76 +#define CLK_PCLK_ASYNCAXIM_ISPEX 77 +#define CLK_PCLK_ASYNCAXIM_ISP3P 78 +#define CLK_PCLK_ASYNCAXIM_FD 79 +#define CLK_PCLK_ASYNCAXIM_LITE_C 80 +#define CLK_PCLK_PMU_CAM1 81 +#define CLK_PCLK_SYSREG_CAM1 82 +#define CLK_PCLK_CMU_CAM1_LOCAL 83 +#define CLK_PCLK_ISP_MCTADC 84 +#define CLK_PCLK_ISP_WDT 85 +#define CLK_PCLK_ISP_PWM 86 +#define CLK_PCLK_ISP_UART 87 +#define CLK_PCLK_ISP_MCUCTL 88 +#define CLK_PCLK_ISP_SPI1 89 +#define CLK_PCLK_ISP_SPI0 90 +#define CLK_PCLK_ISP_I2C2 91 +#define CLK_PCLK_ISP_I2C1 92 +#define CLK_PCLK_ISP_I2C0 93 +#define CLK_PCLK_ISP_MPWM 94 +#define CLK_PCLK_FD 95 +#define CLK_PCLK_LITE_C 96 +#define CLK_PCLK_CSIS2 97 +#define CLK_SCLK_ISP_I2C2 98 +#define CLK_SCLK_ISP_I2C1 99 +#define CLK_SCLK_ISP_I2C0 100 +#define CLK_SCLK_ISP_PWM 101 +#define CLK_PHYCLK_RXBYTECLKHS0_S2B 102 +#define CLK_SCLK_LITE_C_FREECNT 103 +#define CLK_SCLK_PIXELASYNCM_FD 104 +#define CLK_SCLK_ISP_MCTADC 105 +#define CLK_SCLK_ISP_UART 106 +#define CLK_SCLK_ISP_SPI1 107 +#define CLK_SCLK_ISP_SPI0 108 +#define CLK_SCLK_ISP_MPWM 109 +#define CLK_PCLK_DBG_ISP 110 +#define CLK_ATCLK_ISP 111 +#define CLK_SCLK_ISP_CA5 112 + +#define CAM1_NR_CLK 113 + +/* CMU_IMEM */ +#define CLK_ACLK_SLIMSSS 2 +#define CLK_PCLK_SLIMSSS 35 + +#define IMEM_NR_CLK 36 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */ diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h new file mode 100644 index 0000000..fce33c7 --- /dev/null +++ b/include/dt-bindings/clock/exynos7-clk.h @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Naveen Krishna Ch + */ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS7_H +#define _DT_BINDINGS_CLOCK_EXYNOS7_H + +/* TOPC */ +#define DOUT_ACLK_PERIS 1 +#define DOUT_SCLK_BUS0_PLL 2 +#define DOUT_SCLK_BUS1_PLL 3 +#define DOUT_SCLK_CC_PLL 4 +#define DOUT_SCLK_MFC_PLL 5 +#define DOUT_ACLK_CCORE_133 6 +#define DOUT_ACLK_MSCL_532 7 +#define ACLK_MSCL_532 8 +#define DOUT_SCLK_AUD_PLL 9 +#define FOUT_AUD_PLL 10 +#define SCLK_AUD_PLL 11 +#define SCLK_MFC_PLL_B 12 +#define SCLK_MFC_PLL_A 13 +#define SCLK_BUS1_PLL_B 14 +#define SCLK_BUS1_PLL_A 15 +#define SCLK_BUS0_PLL_B 16 +#define SCLK_BUS0_PLL_A 17 +#define SCLK_CC_PLL_B 18 +#define SCLK_CC_PLL_A 19 +#define ACLK_CCORE_133 20 +#define ACLK_PERIS_66 21 +#define TOPC_NR_CLK 22 + +/* TOP0 */ +#define DOUT_ACLK_PERIC1 1 +#define DOUT_ACLK_PERIC0 2 +#define CLK_SCLK_UART0 3 +#define CLK_SCLK_UART1 4 +#define CLK_SCLK_UART2 5 +#define CLK_SCLK_UART3 6 +#define CLK_SCLK_SPI0 7 +#define CLK_SCLK_SPI1 8 +#define CLK_SCLK_SPI2 9 +#define CLK_SCLK_SPI3 10 +#define CLK_SCLK_SPI4 11 +#define CLK_SCLK_SPDIF 12 +#define CLK_SCLK_PCM1 13 +#define CLK_SCLK_I2S1 14 +#define CLK_ACLK_PERIC0_66 15 +#define CLK_ACLK_PERIC1_66 16 +#define TOP0_NR_CLK 17 + +/* TOP1 */ +#define DOUT_ACLK_FSYS1_200 1 +#define DOUT_ACLK_FSYS0_200 2 +#define DOUT_SCLK_MMC2 3 +#define DOUT_SCLK_MMC1 4 +#define DOUT_SCLK_MMC0 5 +#define CLK_SCLK_MMC2 6 +#define CLK_SCLK_MMC1 7 +#define CLK_SCLK_MMC0 8 +#define CLK_ACLK_FSYS0_200 9 +#define CLK_ACLK_FSYS1_200 10 +#define CLK_SCLK_PHY_FSYS1 11 +#define CLK_SCLK_PHY_FSYS1_26M 12 +#define MOUT_SCLK_UFSUNIPRO20 13 +#define DOUT_SCLK_UFSUNIPRO20 14 +#define CLK_SCLK_UFSUNIPRO20 15 +#define DOUT_SCLK_PHY_FSYS1 16 +#define DOUT_SCLK_PHY_FSYS1_26M 17 +#define TOP1_NR_CLK 18 + +/* CCORE */ +#define PCLK_RTC 1 +#define CCORE_NR_CLK 2 + +/* PERIC0 */ +#define PCLK_UART0 1 +#define SCLK_UART0 2 +#define PCLK_HSI2C0 3 +#define PCLK_HSI2C1 4 +#define PCLK_HSI2C4 5 +#define PCLK_HSI2C5 6 +#define PCLK_HSI2C9 7 +#define PCLK_HSI2C10 8 +#define PCLK_HSI2C11 9 +#define PCLK_PWM 10 +#define SCLK_PWM 11 +#define PCLK_ADCIF 12 +#define PERIC0_NR_CLK 13 + +/* PERIC1 */ +#define PCLK_UART1 1 +#define PCLK_UART2 2 +#define PCLK_UART3 3 +#define SCLK_UART1 4 +#define SCLK_UART2 5 +#define SCLK_UART3 6 +#define PCLK_HSI2C2 7 +#define PCLK_HSI2C3 8 +#define PCLK_HSI2C6 9 +#define PCLK_HSI2C7 10 +#define PCLK_HSI2C8 11 +#define PCLK_SPI0 12 +#define PCLK_SPI1 13 +#define PCLK_SPI2 14 +#define PCLK_SPI3 15 +#define PCLK_SPI4 16 +#define SCLK_SPI0 17 +#define SCLK_SPI1 18 +#define SCLK_SPI2 19 +#define SCLK_SPI3 20 +#define SCLK_SPI4 21 +#define PCLK_I2S1 22 +#define PCLK_PCM1 23 +#define PCLK_SPDIF 24 +#define SCLK_I2S1 25 +#define SCLK_PCM1 26 +#define SCLK_SPDIF 27 +#define PERIC1_NR_CLK 28 + +/* PERIS */ +#define PCLK_CHIPID 1 +#define SCLK_CHIPID 2 +#define PCLK_WDT 3 +#define PCLK_TMU 4 +#define SCLK_TMU 5 +#define PERIS_NR_CLK 6 + +/* FSYS0 */ +#define ACLK_MMC2 1 +#define ACLK_AXIUS_USBDRD30X_FSYS0X 2 +#define ACLK_USBDRD300 3 +#define SCLK_USBDRD300_SUSPENDCLK 4 +#define SCLK_USBDRD300_REFCLK 5 +#define PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER 6 +#define PHYCLK_USBDRD300_UDRD30_PHYCLK_USER 7 +#define OSCCLK_PHY_CLKOUT_USB30_PHY 8 +#define ACLK_PDMA0 9 +#define ACLK_PDMA1 10 +#define FSYS0_NR_CLK 11 + +/* FSYS1 */ +#define ACLK_MMC1 1 +#define ACLK_MMC0 2 +#define PHYCLK_UFS20_TX0_SYMBOL 3 +#define PHYCLK_UFS20_RX0_SYMBOL 4 +#define PHYCLK_UFS20_RX1_SYMBOL 5 +#define ACLK_UFS20_LINK 6 +#define SCLK_UFSUNIPRO20_USER 7 +#define PHYCLK_UFS20_RX1_SYMBOL_USER 8 +#define PHYCLK_UFS20_RX0_SYMBOL_USER 9 +#define PHYCLK_UFS20_TX0_SYMBOL_USER 10 +#define OSCCLK_PHY_CLKOUT_EMBEDDED_COMBO_PHY 11 +#define SCLK_COMBO_PHY_EMBEDDED_26M 12 +#define DOUT_PCLK_FSYS1 13 +#define PCLK_GPIO_FSYS1 14 +#define MOUT_FSYS1_PHYCLK_SEL1 15 +#define FSYS1_NR_CLK 16 + +/* MSCL */ +#define USERMUX_ACLK_MSCL_532 1 +#define DOUT_PCLK_MSCL 2 +#define ACLK_MSCL_0 3 +#define ACLK_MSCL_1 4 +#define ACLK_JPEG 5 +#define ACLK_G2D 6 +#define ACLK_LH_ASYNC_SI_MSCL_0 7 +#define ACLK_LH_ASYNC_SI_MSCL_1 8 +#define ACLK_AXI2ACEL_BRIDGE 9 +#define ACLK_XIU_MSCLX_0 10 +#define ACLK_XIU_MSCLX_1 11 +#define ACLK_QE_MSCL_0 12 +#define ACLK_QE_MSCL_1 13 +#define ACLK_QE_JPEG 14 +#define ACLK_QE_G2D 15 +#define ACLK_PPMU_MSCL_0 16 +#define ACLK_PPMU_MSCL_1 17 +#define ACLK_MSCLNP_133 18 +#define ACLK_AHB2APB_MSCL0P 19 +#define ACLK_AHB2APB_MSCL1P 20 + +#define PCLK_MSCL_0 21 +#define PCLK_MSCL_1 22 +#define PCLK_JPEG 23 +#define PCLK_G2D 24 +#define PCLK_QE_MSCL_0 25 +#define PCLK_QE_MSCL_1 26 +#define PCLK_QE_JPEG 27 +#define PCLK_QE_G2D 28 +#define PCLK_PPMU_MSCL_0 29 +#define PCLK_PPMU_MSCL_1 30 +#define PCLK_AXI2ACEL_BRIDGE 31 +#define PCLK_PMU_MSCL 32 +#define MSCL_NR_CLK 33 + +/* AUD */ +#define SCLK_I2S 1 +#define SCLK_PCM 2 +#define PCLK_I2S 3 +#define PCLK_PCM 4 +#define ACLK_ADMA 5 +#define AUD_NR_CLK 6 +#endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */ diff --git a/include/dt-bindings/clock/g12a-aoclkc.h b/include/dt-bindings/clock/g12a-aoclkc.h new file mode 100644 index 0000000..e916e49 --- /dev/null +++ b/include/dt-bindings/clock/g12a-aoclkc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (c) 2016 BayLibre, SAS + * Author: Neil Armstrong + * + * Copyright (c) 2018 Amlogic, inc. + * Author: Qiufang Dai + */ + +#ifndef DT_BINDINGS_CLOCK_AMLOGIC_MESON_G12A_AOCLK +#define DT_BINDINGS_CLOCK_AMLOGIC_MESON_G12A_AOCLK + +#define CLKID_AO_AHB 0 +#define CLKID_AO_IR_IN 1 +#define CLKID_AO_I2C_M0 2 +#define CLKID_AO_I2C_S0 3 +#define CLKID_AO_UART 4 +#define CLKID_AO_PROD_I2C 5 +#define CLKID_AO_UART2 6 +#define CLKID_AO_IR_OUT 7 +#define CLKID_AO_SAR_ADC 8 +#define CLKID_AO_MAILBOX 9 +#define CLKID_AO_M3 10 +#define CLKID_AO_AHB_SRAM 11 +#define CLKID_AO_RTI 12 +#define CLKID_AO_M4_FCLK 13 +#define CLKID_AO_M4_HCLK 14 +#define CLKID_AO_CLK81 15 +#define CLKID_AO_SAR_ADC_SEL 16 +#define CLKID_AO_SAR_ADC_CLK 18 +#define CLKID_AO_CTS_OSCIN 19 +#define CLKID_AO_32K 23 +#define CLKID_AO_CEC 27 +#define CLKID_AO_CTS_RTC_OSCIN 28 + +#endif diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h new file mode 100644 index 0000000..0837c1a --- /dev/null +++ b/include/dt-bindings/clock/g12a-clkc.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR MIT */ +/* + * Meson-G12A clock tree IDs + * + * Copyright (c) 2018 Amlogic, Inc. All rights reserved. + */ + +#ifndef __G12A_CLKC_H +#define __G12A_CLKC_H + +#define CLKID_SYS_PLL 0 +#define CLKID_FIXED_PLL 1 +#define CLKID_FCLK_DIV2 2 +#define CLKID_FCLK_DIV3 3 +#define CLKID_FCLK_DIV4 4 +#define CLKID_FCLK_DIV5 5 +#define CLKID_FCLK_DIV7 6 +#define CLKID_GP0_PLL 7 +#define CLKID_CLK81 10 +#define CLKID_MPLL0 11 +#define CLKID_MPLL1 12 +#define CLKID_MPLL2 13 +#define CLKID_MPLL3 14 +#define CLKID_DDR 15 +#define CLKID_DOS 16 +#define CLKID_AUDIO_LOCKER 17 +#define CLKID_MIPI_DSI_HOST 18 +#define CLKID_ETH_PHY 19 +#define CLKID_ISA 20 +#define CLKID_PL301 21 +#define CLKID_PERIPHS 22 +#define CLKID_SPICC0 23 +#define CLKID_I2C 24 +#define CLKID_SANA 25 +#define CLKID_SD 26 +#define CLKID_RNG0 27 +#define CLKID_UART0 28 +#define CLKID_SPICC1 29 +#define CLKID_HIU_IFACE 30 +#define CLKID_MIPI_DSI_PHY 31 +#define CLKID_ASSIST_MISC 32 +#define CLKID_SD_EMMC_A 33 +#define CLKID_SD_EMMC_B 34 +#define CLKID_SD_EMMC_C 35 +#define CLKID_AUDIO_CODEC 36 +#define CLKID_AUDIO 37 +#define CLKID_ETH 38 +#define CLKID_DEMUX 39 +#define CLKID_AUDIO_IFIFO 40 +#define CLKID_ADC 41 +#define CLKID_UART1 42 +#define CLKID_G2D 43 +#define CLKID_RESET 44 +#define CLKID_PCIE_COMB 45 +#define CLKID_PARSER 46 +#define CLKID_USB 47 +#define CLKID_PCIE_PHY 48 +#define CLKID_AHB_ARB0 49 +#define CLKID_AHB_DATA_BUS 50 +#define CLKID_AHB_CTRL_BUS 51 +#define CLKID_HTX_HDCP22 52 +#define CLKID_HTX_PCLK 53 +#define CLKID_BT656 54 +#define CLKID_USB1_DDR_BRIDGE 55 +#define CLKID_MMC_PCLK 56 +#define CLKID_UART2 57 +#define CLKID_VPU_INTR 58 +#define CLKID_GIC 59 +#define CLKID_SD_EMMC_A_CLK0 60 +#define CLKID_SD_EMMC_B_CLK0 61 +#define CLKID_SD_EMMC_C_CLK0 62 +#define CLKID_HIFI_PLL 74 +#define CLKID_VCLK2_VENCI0 80 +#define CLKID_VCLK2_VENCI1 81 +#define CLKID_VCLK2_VENCP0 82 +#define CLKID_VCLK2_VENCP1 83 +#define CLKID_VCLK2_VENCT0 84 +#define CLKID_VCLK2_VENCT1 85 +#define CLKID_VCLK2_OTHER 86 +#define CLKID_VCLK2_ENCI 87 +#define CLKID_VCLK2_ENCP 88 +#define CLKID_DAC_CLK 89 +#define CLKID_AOCLK 90 +#define CLKID_IEC958 91 +#define CLKID_ENC480P 92 +#define CLKID_RNG1 93 +#define CLKID_VCLK2_ENCT 94 +#define CLKID_VCLK2_ENCL 95 +#define CLKID_VCLK2_VENCLMMC 96 +#define CLKID_VCLK2_VENCL 97 +#define CLKID_VCLK2_OTHER1 98 +#define CLKID_FCLK_DIV2P5 99 +#define CLKID_DMA 105 +#define CLKID_EFUSE 106 +#define CLKID_ROM_BOOT 107 +#define CLKID_RESET_SEC 108 +#define CLKID_SEC_AHB_APB3 109 +#define CLKID_VPU_0_SEL 110 +#define CLKID_VPU_0 112 +#define CLKID_VPU_1_SEL 113 +#define CLKID_VPU_1 115 +#define CLKID_VPU 116 +#define CLKID_VAPB_0_SEL 117 +#define CLKID_VAPB_0 119 +#define CLKID_VAPB_1_SEL 120 +#define CLKID_VAPB_1 122 +#define CLKID_VAPB_SEL 123 +#define CLKID_VAPB 124 +#define CLKID_HDMI_PLL 128 +#define CLKID_VID_PLL 129 +#define CLKID_VCLK 138 +#define CLKID_VCLK2 139 +#define CLKID_VCLK_DIV1 148 +#define CLKID_VCLK_DIV2 149 +#define CLKID_VCLK_DIV4 150 +#define CLKID_VCLK_DIV6 151 +#define CLKID_VCLK_DIV12 152 +#define CLKID_VCLK2_DIV1 153 +#define CLKID_VCLK2_DIV2 154 +#define CLKID_VCLK2_DIV4 155 +#define CLKID_VCLK2_DIV6 156 +#define CLKID_VCLK2_DIV12 157 +#define CLKID_CTS_ENCI 162 +#define CLKID_CTS_ENCP 163 +#define CLKID_CTS_VDAC 164 +#define CLKID_HDMI_TX 165 +#define CLKID_HDMI 168 +#define CLKID_MALI_0_SEL 169 +#define CLKID_MALI_0 171 +#define CLKID_MALI_1_SEL 172 +#define CLKID_MALI_1 174 +#define CLKID_MALI 175 +#define CLKID_MPLL_50M 177 +#define CLKID_CPU_CLK 187 +#define CLKID_PCIE_PLL 201 +#define CLKID_VDEC_1 204 +#define CLKID_VDEC_HEVC 207 +#define CLKID_VDEC_HEVCF 210 +#define CLKID_TS 212 +#define CLKID_CPUB_CLK 224 +#define CLKID_GP1_PLL 243 +#define CLKID_DSU_CLK 252 +#define CLKID_CPU1_CLK 253 +#define CLKID_CPU2_CLK 254 +#define CLKID_CPU3_CLK 255 + +#endif /* __G12A_CLKC_H */ diff --git a/include/dt-bindings/clock/gxbb-aoclkc.h b/include/dt-bindings/clock/gxbb-aoclkc.h new file mode 100644 index 0000000..ec3b263 --- /dev/null +++ b/include/dt-bindings/clock/gxbb-aoclkc.h @@ -0,0 +1,74 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DT_BINDINGS_CLOCK_AMLOGIC_MESON_GXBB_AOCLK +#define DT_BINDINGS_CLOCK_AMLOGIC_MESON_GXBB_AOCLK + +#define CLKID_AO_REMOTE 0 +#define CLKID_AO_I2C_MASTER 1 +#define CLKID_AO_I2C_SLAVE 2 +#define CLKID_AO_UART1 3 +#define CLKID_AO_UART2 4 +#define CLKID_AO_IR_BLASTER 5 +#define CLKID_AO_CEC_32K 6 +#define CLKID_AO_CTS_OSCIN 7 +#define CLKID_AO_32K_PRE 8 +#define CLKID_AO_32K_DIV 9 +#define CLKID_AO_32K_SEL 10 +#define CLKID_AO_32K 11 +#define CLKID_AO_CTS_RTC_OSCIN 12 +#define CLKID_AO_CLK81 13 + +#endif diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h new file mode 100644 index 0000000..db0763e --- /dev/null +++ b/include/dt-bindings/clock/gxbb-clkc.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * GXBB clock tree IDs + */ + +#ifndef __GXBB_CLKC_H +#define __GXBB_CLKC_H + +#define CLKID_SYS_PLL 0 +#define CLKID_HDMI_PLL 2 +#define CLKID_FIXED_PLL 3 +#define CLKID_FCLK_DIV2 4 +#define CLKID_FCLK_DIV3 5 +#define CLKID_FCLK_DIV4 6 +#define CLKID_FCLK_DIV5 7 +#define CLKID_FCLK_DIV7 8 +#define CLKID_GP0_PLL 9 +#define CLKID_CLK81 12 +#define CLKID_MPLL0 13 +#define CLKID_MPLL1 14 +#define CLKID_MPLL2 15 +#define CLKID_DDR 16 +#define CLKID_DOS 17 +#define CLKID_ISA 18 +#define CLKID_PL301 19 +#define CLKID_PERIPHS 20 +#define CLKID_SPICC 21 +#define CLKID_I2C 22 +#define CLKID_SAR_ADC 23 +#define CLKID_SMART_CARD 24 +#define CLKID_RNG0 25 +#define CLKID_UART0 26 +#define CLKID_SDHC 27 +#define CLKID_STREAM 28 +#define CLKID_ASYNC_FIFO 29 +#define CLKID_SDIO 30 +#define CLKID_ABUF 31 +#define CLKID_HIU_IFACE 32 +#define CLKID_ASSIST_MISC 33 +#define CLKID_SPI 34 +#define CLKID_ETH 36 +#define CLKID_I2S_SPDIF 35 +#define CLKID_DEMUX 37 +#define CLKID_AIU_GLUE 38 +#define CLKID_IEC958 39 +#define CLKID_I2S_OUT 40 +#define CLKID_AMCLK 41 +#define CLKID_AIFIFO2 42 +#define CLKID_MIXER 43 +#define CLKID_MIXER_IFACE 44 +#define CLKID_ADC 45 +#define CLKID_BLKMV 46 +#define CLKID_AIU 47 +#define CLKID_UART1 48 +#define CLKID_G2D 49 +#define CLKID_USB0 50 +#define CLKID_USB1 51 +#define CLKID_RESET 52 +#define CLKID_NAND 53 +#define CLKID_DOS_PARSER 54 +#define CLKID_USB 55 +#define CLKID_VDIN1 56 +#define CLKID_AHB_ARB0 57 +#define CLKID_EFUSE 58 +#define CLKID_BOOT_ROM 59 +#define CLKID_AHB_DATA_BUS 60 +#define CLKID_AHB_CTRL_BUS 61 +#define CLKID_HDMI_INTR_SYNC 62 +#define CLKID_HDMI_PCLK 63 +#define CLKID_USB1_DDR_BRIDGE 64 +#define CLKID_USB0_DDR_BRIDGE 65 +#define CLKID_MMC_PCLK 66 +#define CLKID_DVIN 67 +#define CLKID_UART2 68 +#define CLKID_SANA 69 +#define CLKID_VPU_INTR 70 +#define CLKID_SEC_AHB_AHB3_BRIDGE 71 +#define CLKID_CLK81_A53 72 +#define CLKID_VCLK2_VENCI0 73 +#define CLKID_VCLK2_VENCI1 74 +#define CLKID_VCLK2_VENCP0 75 +#define CLKID_VCLK2_VENCP1 76 +#define CLKID_GCLK_VENCI_INT0 77 +#define CLKID_GCLK_VENCI_INT 78 +#define CLKID_DAC_CLK 79 +#define CLKID_AOCLK_GATE 80 +#define CLKID_IEC958_GATE 81 +#define CLKID_ENC480P 82 +#define CLKID_RNG1 83 +#define CLKID_GCLK_VENCI_INT1 84 +#define CLKID_VCLK2_VENCLMCC 85 +#define CLKID_VCLK2_VENCL 86 +#define CLKID_VCLK_OTHER 87 +#define CLKID_EDP 88 +#define CLKID_AO_MEDIA_CPU 89 +#define CLKID_AO_AHB_SRAM 90 +#define CLKID_AO_AHB_BUS 91 +#define CLKID_AO_IFACE 92 +#define CLKID_AO_I2C 93 +#define CLKID_SD_EMMC_A 94 +#define CLKID_SD_EMMC_B 95 +#define CLKID_SD_EMMC_C 96 +#define CLKID_SAR_ADC_CLK 97 +#define CLKID_SAR_ADC_SEL 98 +#define CLKID_MALI_0_SEL 100 +#define CLKID_MALI_0 102 +#define CLKID_MALI_1_SEL 103 +#define CLKID_MALI_1 105 +#define CLKID_MALI 106 +#define CLKID_CTS_AMCLK 107 +#define CLKID_CTS_MCLK_I958 110 +#define CLKID_CTS_I958 113 +#define CLKID_32K_CLK 114 +#define CLKID_SD_EMMC_A_CLK0 119 +#define CLKID_SD_EMMC_B_CLK0 122 +#define CLKID_SD_EMMC_C_CLK0 125 +#define CLKID_VPU_0_SEL 126 +#define CLKID_VPU_0 128 +#define CLKID_VPU_1_SEL 129 +#define CLKID_VPU_1 131 +#define CLKID_VPU 132 +#define CLKID_VAPB_0_SEL 133 +#define CLKID_VAPB_0 135 +#define CLKID_VAPB_1_SEL 136 +#define CLKID_VAPB_1 138 +#define CLKID_VAPB_SEL 139 +#define CLKID_VAPB 140 +#define CLKID_VDEC_1 153 +#define CLKID_VDEC_HEVC 156 +#define CLKID_GEN_CLK 159 +#define CLKID_VID_PLL 166 +#define CLKID_VCLK 175 +#define CLKID_VCLK2 176 +#define CLKID_VCLK_DIV1 185 +#define CLKID_VCLK_DIV2 186 +#define CLKID_VCLK_DIV4 187 +#define CLKID_VCLK_DIV6 188 +#define CLKID_VCLK_DIV12 189 +#define CLKID_VCLK2_DIV1 190 +#define CLKID_VCLK2_DIV2 191 +#define CLKID_VCLK2_DIV4 192 +#define CLKID_VCLK2_DIV6 193 +#define CLKID_VCLK2_DIV12 194 +#define CLKID_CTS_ENCI 199 +#define CLKID_CTS_ENCP 200 +#define CLKID_CTS_VDAC 201 +#define CLKID_HDMI_TX 202 +#define CLKID_HDMI 205 + +#endif /* __GXBB_CLKC_H */ diff --git a/include/dt-bindings/clock/hi3516cv300-clock.h b/include/dt-bindings/clock/hi3516cv300-clock.h new file mode 100644 index 0000000..ccea1ba --- /dev/null +++ b/include/dt-bindings/clock/hi3516cv300-clock.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. + */ + +#ifndef __DTS_HI3516CV300_CLOCK_H +#define __DTS_HI3516CV300_CLOCK_H + +/* hi3516CV300 core CRG */ +#define HI3516CV300_APB_CLK 0 +#define HI3516CV300_UART0_CLK 1 +#define HI3516CV300_UART1_CLK 2 +#define HI3516CV300_UART2_CLK 3 +#define HI3516CV300_SPI0_CLK 4 +#define HI3516CV300_SPI1_CLK 5 +#define HI3516CV300_FMC_CLK 6 +#define HI3516CV300_MMC0_CLK 7 +#define HI3516CV300_MMC1_CLK 8 +#define HI3516CV300_MMC2_CLK 9 +#define HI3516CV300_MMC3_CLK 10 +#define HI3516CV300_ETH_CLK 11 +#define HI3516CV300_ETH_MACIF_CLK 12 +#define HI3516CV300_DMAC_CLK 13 +#define HI3516CV300_PWM_CLK 14 +#define HI3516CV300_USB2_BUS_CLK 15 +#define HI3516CV300_USB2_OHCI48M_CLK 16 +#define HI3516CV300_USB2_OHCI12M_CLK 17 +#define HI3516CV300_USB2_OTG_UTMI_CLK 18 +#define HI3516CV300_USB2_HST_PHY_CLK 19 +#define HI3516CV300_USB2_UTMI0_CLK 20 +#define HI3516CV300_USB2_PHY_CLK 21 + +/* hi3516CV300 sysctrl CRG */ +#define HI3516CV300_WDT_CLK 1 + +#endif /* __DTS_HI3516CV300_CLOCK_H */ diff --git a/include/dt-bindings/clock/hi3519-clock.h b/include/dt-bindings/clock/hi3519-clock.h new file mode 100644 index 0000000..4335410 --- /dev/null +++ b/include/dt-bindings/clock/hi3519-clock.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2015 HiSilicon Technologies Co., Ltd. + */ + +#ifndef __DTS_HI3519_CLOCK_H +#define __DTS_HI3519_CLOCK_H + +#define HI3519_FMC_CLK 1 +#define HI3519_SPI0_CLK 2 +#define HI3519_SPI1_CLK 3 +#define HI3519_SPI2_CLK 4 +#define HI3519_UART0_CLK 5 +#define HI3519_UART1_CLK 6 +#define HI3519_UART2_CLK 7 +#define HI3519_UART3_CLK 8 +#define HI3519_UART4_CLK 9 +#define HI3519_PWM_CLK 10 +#define HI3519_DMA_CLK 11 +#define HI3519_IR_CLK 12 +#define HI3519_ETH_PHY_CLK 13 +#define HI3519_ETH_MAC_CLK 14 +#define HI3519_ETH_MACIF_CLK 15 +#define HI3519_USB2_BUS_CLK 16 +#define HI3519_USB2_PORT_CLK 17 +#define HI3519_USB3_CLK 18 + +#endif /* __DTS_HI3519_CLOCK_H */ diff --git a/include/dt-bindings/clock/hi3620-clock.h b/include/dt-bindings/clock/hi3620-clock.h new file mode 100644 index 0000000..f9dc6f6 --- /dev/null +++ b/include/dt-bindings/clock/hi3620-clock.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2012-2013 Hisilicon Limited. + * Copyright (c) 2012-2013 Linaro Limited. + * + * Author: Haojian Zhuang + * Xin Li + */ + +#ifndef __DTS_HI3620_CLOCK_H +#define __DTS_HI3620_CLOCK_H + +#define HI3620_NONE_CLOCK 0 + +/* fixed rate & fixed factor clocks */ +#define HI3620_OSC32K 1 +#define HI3620_OSC26M 2 +#define HI3620_PCLK 3 +#define HI3620_PLL_ARM0 4 +#define HI3620_PLL_ARM1 5 +#define HI3620_PLL_PERI 6 +#define HI3620_PLL_USB 7 +#define HI3620_PLL_HDMI 8 +#define HI3620_PLL_GPU 9 +#define HI3620_RCLK_TCXO 10 +#define HI3620_RCLK_CFGAXI 11 +#define HI3620_RCLK_PICO 12 + +/* mux clocks */ +#define HI3620_TIMER0_MUX 32 +#define HI3620_TIMER1_MUX 33 +#define HI3620_TIMER2_MUX 34 +#define HI3620_TIMER3_MUX 35 +#define HI3620_TIMER4_MUX 36 +#define HI3620_TIMER5_MUX 37 +#define HI3620_TIMER6_MUX 38 +#define HI3620_TIMER7_MUX 39 +#define HI3620_TIMER8_MUX 40 +#define HI3620_TIMER9_MUX 41 +#define HI3620_UART0_MUX 42 +#define HI3620_UART1_MUX 43 +#define HI3620_UART2_MUX 44 +#define HI3620_UART3_MUX 45 +#define HI3620_UART4_MUX 46 +#define HI3620_SPI0_MUX 47 +#define HI3620_SPI1_MUX 48 +#define HI3620_SPI2_MUX 49 +#define HI3620_SAXI_MUX 50 +#define HI3620_PWM0_MUX 51 +#define HI3620_PWM1_MUX 52 +#define HI3620_SD_MUX 53 +#define HI3620_MMC1_MUX 54 +#define HI3620_MMC1_MUX2 55 +#define HI3620_G2D_MUX 56 +#define HI3620_VENC_MUX 57 +#define HI3620_VDEC_MUX 58 +#define HI3620_VPP_MUX 59 +#define HI3620_EDC0_MUX 60 +#define HI3620_LDI0_MUX 61 +#define HI3620_EDC1_MUX 62 +#define HI3620_LDI1_MUX 63 +#define HI3620_RCLK_HSIC 64 +#define HI3620_MMC2_MUX 65 +#define HI3620_MMC3_MUX 66 + +/* divider clocks */ +#define HI3620_SHAREAXI_DIV 128 +#define HI3620_CFGAXI_DIV 129 +#define HI3620_SD_DIV 130 +#define HI3620_MMC1_DIV 131 +#define HI3620_HSIC_DIV 132 +#define HI3620_MMC2_DIV 133 +#define HI3620_MMC3_DIV 134 + +/* gate clocks */ +#define HI3620_TIMERCLK01 160 +#define HI3620_TIMER_RCLK01 161 +#define HI3620_TIMERCLK23 162 +#define HI3620_TIMER_RCLK23 163 +#define HI3620_TIMERCLK45 164 +#define HI3620_TIMERCLK67 165 +#define HI3620_TIMERCLK89 166 +#define HI3620_RTCCLK 167 +#define HI3620_KPC_CLK 168 +#define HI3620_GPIOCLK0 169 +#define HI3620_GPIOCLK1 170 +#define HI3620_GPIOCLK2 171 +#define HI3620_GPIOCLK3 172 +#define HI3620_GPIOCLK4 173 +#define HI3620_GPIOCLK5 174 +#define HI3620_GPIOCLK6 175 +#define HI3620_GPIOCLK7 176 +#define HI3620_GPIOCLK8 177 +#define HI3620_GPIOCLK9 178 +#define HI3620_GPIOCLK10 179 +#define HI3620_GPIOCLK11 180 +#define HI3620_GPIOCLK12 181 +#define HI3620_GPIOCLK13 182 +#define HI3620_GPIOCLK14 183 +#define HI3620_GPIOCLK15 184 +#define HI3620_GPIOCLK16 185 +#define HI3620_GPIOCLK17 186 +#define HI3620_GPIOCLK18 187 +#define HI3620_GPIOCLK19 188 +#define HI3620_GPIOCLK20 189 +#define HI3620_GPIOCLK21 190 +#define HI3620_DPHY0_CLK 191 +#define HI3620_DPHY1_CLK 192 +#define HI3620_DPHY2_CLK 193 +#define HI3620_USBPHY_CLK 194 +#define HI3620_ACP_CLK 195 +#define HI3620_PWMCLK0 196 +#define HI3620_PWMCLK1 197 +#define HI3620_UARTCLK0 198 +#define HI3620_UARTCLK1 199 +#define HI3620_UARTCLK2 200 +#define HI3620_UARTCLK3 201 +#define HI3620_UARTCLK4 202 +#define HI3620_SPICLK0 203 +#define HI3620_SPICLK1 204 +#define HI3620_SPICLK2 205 +#define HI3620_I2CCLK0 206 +#define HI3620_I2CCLK1 207 +#define HI3620_I2CCLK2 208 +#define HI3620_I2CCLK3 209 +#define HI3620_SCI_CLK 210 +#define HI3620_DDRC_PER_CLK 211 +#define HI3620_DMAC_CLK 212 +#define HI3620_USB2DVC_CLK 213 +#define HI3620_SD_CLK 214 +#define HI3620_MMC_CLK1 215 +#define HI3620_MMC_CLK2 216 +#define HI3620_MMC_CLK3 217 +#define HI3620_MCU_CLK 218 + +#define HI3620_SD_CIUCLK 0 +#define HI3620_MMC_CIUCLK1 1 +#define HI3620_MMC_CIUCLK2 2 +#define HI3620_MMC_CIUCLK3 3 + +#define HI3620_NR_CLKS 219 + +#endif /* __DTS_HI3620_CLOCK_H */ diff --git a/include/dt-bindings/clock/hi3660-clock.h b/include/dt-bindings/clock/hi3660-clock.h new file mode 100644 index 0000000..e1374e1 --- /dev/null +++ b/include/dt-bindings/clock/hi3660-clock.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2016-2017 Linaro Ltd. + * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd. + */ + +#ifndef __DTS_HI3660_CLOCK_H +#define __DTS_HI3660_CLOCK_H + +/* fixed rate clocks */ +#define HI3660_CLKIN_SYS 0 +#define HI3660_CLKIN_REF 1 +#define HI3660_CLK_FLL_SRC 2 +#define HI3660_CLK_PPLL0 3 +#define HI3660_CLK_PPLL1 4 +#define HI3660_CLK_PPLL2 5 +#define HI3660_CLK_PPLL3 6 +#define HI3660_CLK_SCPLL 7 +#define HI3660_PCLK 8 +#define HI3660_CLK_UART0_DBG 9 +#define HI3660_CLK_UART6 10 +#define HI3660_OSC32K 11 +#define HI3660_OSC19M 12 +#define HI3660_CLK_480M 13 +#define HI3660_CLK_INV 14 + +/* clk in crgctrl */ +#define HI3660_FACTOR_UART3 15 +#define HI3660_CLK_FACTOR_MMC 16 +#define HI3660_CLK_GATE_I2C0 17 +#define HI3660_CLK_GATE_I2C1 18 +#define HI3660_CLK_GATE_I2C2 19 +#define HI3660_CLK_GATE_I2C6 20 +#define HI3660_CLK_DIV_SYSBUS 21 +#define HI3660_CLK_DIV_320M 22 +#define HI3660_CLK_DIV_A53 23 +#define HI3660_CLK_GATE_SPI0 24 +#define HI3660_CLK_GATE_SPI2 25 +#define HI3660_PCIEPHY_REF 26 +#define HI3660_CLK_ABB_USB 27 +#define HI3660_HCLK_GATE_SDIO0 28 +#define HI3660_HCLK_GATE_SD 29 +#define HI3660_CLK_GATE_AOMM 30 +#define HI3660_PCLK_GPIO0 31 +#define HI3660_PCLK_GPIO1 32 +#define HI3660_PCLK_GPIO2 33 +#define HI3660_PCLK_GPIO3 34 +#define HI3660_PCLK_GPIO4 35 +#define HI3660_PCLK_GPIO5 36 +#define HI3660_PCLK_GPIO6 37 +#define HI3660_PCLK_GPIO7 38 +#define HI3660_PCLK_GPIO8 39 +#define HI3660_PCLK_GPIO9 40 +#define HI3660_PCLK_GPIO10 41 +#define HI3660_PCLK_GPIO11 42 +#define HI3660_PCLK_GPIO12 43 +#define HI3660_PCLK_GPIO13 44 +#define HI3660_PCLK_GPIO14 45 +#define HI3660_PCLK_GPIO15 46 +#define HI3660_PCLK_GPIO16 47 +#define HI3660_PCLK_GPIO17 48 +#define HI3660_PCLK_GPIO18 49 +#define HI3660_PCLK_GPIO19 50 +#define HI3660_PCLK_GPIO20 51 +#define HI3660_PCLK_GPIO21 52 +#define HI3660_CLK_GATE_SPI3 53 +#define HI3660_CLK_GATE_I2C7 54 +#define HI3660_CLK_GATE_I2C3 55 +#define HI3660_CLK_GATE_SPI1 56 +#define HI3660_CLK_GATE_UART1 57 +#define HI3660_CLK_GATE_UART2 58 +#define HI3660_CLK_GATE_UART4 59 +#define HI3660_CLK_GATE_UART5 60 +#define HI3660_CLK_GATE_I2C4 61 +#define HI3660_CLK_GATE_DMAC 62 +#define HI3660_PCLK_GATE_DSS 63 +#define HI3660_ACLK_GATE_DSS 64 +#define HI3660_CLK_GATE_LDI1 65 +#define HI3660_CLK_GATE_LDI0 66 +#define HI3660_CLK_GATE_VIVOBUS 67 +#define HI3660_CLK_GATE_EDC0 68 +#define HI3660_CLK_GATE_TXDPHY0_CFG 69 +#define HI3660_CLK_GATE_TXDPHY0_REF 70 +#define HI3660_CLK_GATE_TXDPHY1_CFG 71 +#define HI3660_CLK_GATE_TXDPHY1_REF 72 +#define HI3660_ACLK_GATE_USB3OTG 73 +#define HI3660_CLK_GATE_SPI4 74 +#define HI3660_CLK_GATE_SD 75 +#define HI3660_CLK_GATE_SDIO0 76 +#define HI3660_CLK_GATE_UFS_SUBSYS 77 +#define HI3660_PCLK_GATE_DSI0 78 +#define HI3660_PCLK_GATE_DSI1 79 +#define HI3660_ACLK_GATE_PCIE 80 +#define HI3660_PCLK_GATE_PCIE_SYS 81 +#define HI3660_CLK_GATE_PCIEAUX 82 +#define HI3660_PCLK_GATE_PCIE_PHY 83 +#define HI3660_CLK_ANDGT_LDI0 84 +#define HI3660_CLK_ANDGT_LDI1 85 +#define HI3660_CLK_ANDGT_EDC0 86 +#define HI3660_CLK_GATE_UFSPHY_GT 87 +#define HI3660_CLK_ANDGT_MMC 88 +#define HI3660_CLK_ANDGT_SD 89 +#define HI3660_CLK_A53HPM_ANDGT 90 +#define HI3660_CLK_ANDGT_SDIO 91 +#define HI3660_CLK_ANDGT_UART0 92 +#define HI3660_CLK_ANDGT_UART1 93 +#define HI3660_CLK_ANDGT_UARTH 94 +#define HI3660_CLK_ANDGT_SPI 95 +#define HI3660_CLK_VIVOBUS_ANDGT 96 +#define HI3660_CLK_AOMM_ANDGT 97 +#define HI3660_CLK_320M_PLL_GT 98 +#define HI3660_AUTODIV_EMMC0BUS 99 +#define HI3660_AUTODIV_SYSBUS 100 +#define HI3660_CLK_GATE_UFSPHY_CFG 101 +#define HI3660_CLK_GATE_UFSIO_REF 102 +#define HI3660_CLK_MUX_SYSBUS 103 +#define HI3660_CLK_MUX_UART0 104 +#define HI3660_CLK_MUX_UART1 105 +#define HI3660_CLK_MUX_UARTH 106 +#define HI3660_CLK_MUX_SPI 107 +#define HI3660_CLK_MUX_I2C 108 +#define HI3660_CLK_MUX_MMC_PLL 109 +#define HI3660_CLK_MUX_LDI1 110 +#define HI3660_CLK_MUX_LDI0 111 +#define HI3660_CLK_MUX_SD_PLL 112 +#define HI3660_CLK_MUX_SD_SYS 113 +#define HI3660_CLK_MUX_EDC0 114 +#define HI3660_CLK_MUX_SDIO_SYS 115 +#define HI3660_CLK_MUX_SDIO_PLL 116 +#define HI3660_CLK_MUX_VIVOBUS 117 +#define HI3660_CLK_MUX_A53HPM 118 +#define HI3660_CLK_MUX_320M 119 +#define HI3660_CLK_MUX_IOPERI 120 +#define HI3660_CLK_DIV_UART0 121 +#define HI3660_CLK_DIV_UART1 122 +#define HI3660_CLK_DIV_UARTH 123 +#define HI3660_CLK_DIV_MMC 124 +#define HI3660_CLK_DIV_SD 125 +#define HI3660_CLK_DIV_EDC0 126 +#define HI3660_CLK_DIV_LDI0 127 +#define HI3660_CLK_DIV_SDIO 128 +#define HI3660_CLK_DIV_LDI1 129 +#define HI3660_CLK_DIV_SPI 130 +#define HI3660_CLK_DIV_VIVOBUS 131 +#define HI3660_CLK_DIV_I2C 132 +#define HI3660_CLK_DIV_UFSPHY 133 +#define HI3660_CLK_DIV_CFGBUS 134 +#define HI3660_CLK_DIV_MMC0BUS 135 +#define HI3660_CLK_DIV_MMC1BUS 136 +#define HI3660_CLK_DIV_UFSPERI 137 +#define HI3660_CLK_DIV_AOMM 138 +#define HI3660_CLK_DIV_IOPERI 139 +#define HI3660_VENC_VOLT_HOLD 140 +#define HI3660_PERI_VOLT_HOLD 141 +#define HI3660_CLK_GATE_VENC 142 +#define HI3660_CLK_GATE_VDEC 143 +#define HI3660_CLK_ANDGT_VENC 144 +#define HI3660_CLK_ANDGT_VDEC 145 +#define HI3660_CLK_MUX_VENC 146 +#define HI3660_CLK_MUX_VDEC 147 +#define HI3660_CLK_DIV_VENC 148 +#define HI3660_CLK_DIV_VDEC 149 +#define HI3660_CLK_FAC_ISP_SNCLK 150 +#define HI3660_CLK_GATE_ISP_SNCLK0 151 +#define HI3660_CLK_GATE_ISP_SNCLK1 152 +#define HI3660_CLK_GATE_ISP_SNCLK2 153 +#define HI3660_CLK_ANGT_ISP_SNCLK 154 +#define HI3660_CLK_MUX_ISP_SNCLK 155 +#define HI3660_CLK_DIV_ISP_SNCLK 156 + +/* clk in pmuctrl */ +#define HI3660_GATE_ABB_192 0 + +/* clk in pctrl */ +#define HI3660_GATE_UFS_TCXO_EN 0 +#define HI3660_GATE_USB_TCXO_EN 1 + +/* clk in sctrl */ +#define HI3660_PCLK_AO_GPIO0 0 +#define HI3660_PCLK_AO_GPIO1 1 +#define HI3660_PCLK_AO_GPIO2 2 +#define HI3660_PCLK_AO_GPIO3 3 +#define HI3660_PCLK_AO_GPIO4 4 +#define HI3660_PCLK_AO_GPIO5 5 +#define HI3660_PCLK_AO_GPIO6 6 +#define HI3660_PCLK_GATE_MMBUF 7 +#define HI3660_CLK_GATE_DSS_AXI_MM 8 +#define HI3660_PCLK_MMBUF_ANDGT 9 +#define HI3660_CLK_MMBUF_PLL_ANDGT 10 +#define HI3660_CLK_FLL_MMBUF_ANDGT 11 +#define HI3660_CLK_SYS_MMBUF_ANDGT 12 +#define HI3660_CLK_GATE_PCIEPHY_GT 13 +#define HI3660_ACLK_MUX_MMBUF 14 +#define HI3660_CLK_SW_MMBUF 15 +#define HI3660_CLK_DIV_AOBUS 16 +#define HI3660_PCLK_DIV_MMBUF 17 +#define HI3660_ACLK_DIV_MMBUF 18 +#define HI3660_CLK_DIV_PCIEPHY 19 + +/* clk in iomcu */ +#define HI3660_CLK_I2C0_IOMCU 0 +#define HI3660_CLK_I2C1_IOMCU 1 +#define HI3660_CLK_I2C2_IOMCU 2 +#define HI3660_CLK_I2C6_IOMCU 3 +#define HI3660_CLK_IOMCU_PERI0 4 + +/* clk in stub clock */ +#define HI3660_CLK_STUB_CLUSTER0 0 +#define HI3660_CLK_STUB_CLUSTER1 1 +#define HI3660_CLK_STUB_GPU 2 +#define HI3660_CLK_STUB_DDR 3 +#define HI3660_CLK_STUB_NUM 4 + +#endif /* __DTS_HI3660_CLOCK_H */ diff --git a/include/dt-bindings/clock/hi3670-clock.h b/include/dt-bindings/clock/hi3670-clock.h new file mode 100644 index 0000000..fa48583 --- /dev/null +++ b/include/dt-bindings/clock/hi3670-clock.h @@ -0,0 +1,348 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Device Tree binding constants for HiSilicon Hi3670 SoC + * + * Copyright (c) 2001-2021, Huawei Tech. Co., Ltd. + * Copyright (c) 2018 Linaro Ltd. + */ + +#ifndef __DT_BINDINGS_CLOCK_HI3670_H +#define __DT_BINDINGS_CLOCK_HI3670_H + +/* clk in stub clock */ +#define HI3670_CLK_STUB_CLUSTER0 0 +#define HI3670_CLK_STUB_CLUSTER1 1 +#define HI3670_CLK_STUB_GPU 2 +#define HI3670_CLK_STUB_DDR 3 +#define HI3670_CLK_STUB_DDR_VOTE 4 +#define HI3670_CLK_STUB_DDR_LIMIT 5 +#define HI3670_CLK_STUB_NUM 6 + +/* clk in crg clock */ +#define HI3670_CLKIN_SYS 0 +#define HI3670_CLKIN_REF 1 +#define HI3670_CLK_FLL_SRC 2 +#define HI3670_CLK_PPLL0 3 +#define HI3670_CLK_PPLL1 4 +#define HI3670_CLK_PPLL2 5 +#define HI3670_CLK_PPLL3 6 +#define HI3670_CLK_PPLL4 7 +#define HI3670_CLK_PPLL6 8 +#define HI3670_CLK_PPLL7 9 +#define HI3670_CLK_PPLL_PCIE 10 +#define HI3670_CLK_PCIEPLL_REV 11 +#define HI3670_CLK_SCPLL 12 +#define HI3670_PCLK 13 +#define HI3670_CLK_UART0_DBG 14 +#define HI3670_CLK_UART6 15 +#define HI3670_OSC32K 16 +#define HI3670_OSC19M 17 +#define HI3670_CLK_480M 18 +#define HI3670_CLK_INVALID 19 +#define HI3670_CLK_DIV_SYSBUS 20 +#define HI3670_CLK_FACTOR_MMC 21 +#define HI3670_CLK_SD_SYS 22 +#define HI3670_CLK_SDIO_SYS 23 +#define HI3670_CLK_DIV_A53HPM 24 +#define HI3670_CLK_DIV_320M 25 +#define HI3670_PCLK_GATE_UART0 26 +#define HI3670_CLK_FACTOR_UART0 27 +#define HI3670_CLK_FACTOR_USB3PHY_PLL 28 +#define HI3670_CLK_GATE_ABB_USB 29 +#define HI3670_CLK_GATE_UFSPHY_REF 30 +#define HI3670_ICS_VOLT_HIGH 31 +#define HI3670_ICS_VOLT_MIDDLE 32 +#define HI3670_VENC_VOLT_HOLD 33 +#define HI3670_VDEC_VOLT_HOLD 34 +#define HI3670_EDC_VOLT_HOLD 35 +#define HI3670_CLK_ISP_SNCLK_FAC 36 +#define HI3670_CLK_FACTOR_RXDPHY 37 +#define HI3670_AUTODIV_SYSBUS 38 +#define HI3670_AUTODIV_EMMC0BUS 39 +#define HI3670_PCLK_ANDGT_MMC1_PCIE 40 +#define HI3670_CLK_GATE_VCODECBUS_GT 41 +#define HI3670_CLK_ANDGT_SD 42 +#define HI3670_CLK_SD_SYS_GT 43 +#define HI3670_CLK_ANDGT_SDIO 44 +#define HI3670_CLK_SDIO_SYS_GT 45 +#define HI3670_CLK_A53HPM_ANDGT 46 +#define HI3670_CLK_320M_PLL_GT 47 +#define HI3670_CLK_ANDGT_UARTH 48 +#define HI3670_CLK_ANDGT_UARTL 49 +#define HI3670_CLK_ANDGT_UART0 50 +#define HI3670_CLK_ANDGT_SPI 51 +#define HI3670_CLK_ANDGT_PCIEAXI 52 +#define HI3670_CLK_DIV_AO_ASP_GT 53 +#define HI3670_CLK_GATE_CSI_TRANS 54 +#define HI3670_CLK_GATE_DSI_TRANS 55 +#define HI3670_CLK_ANDGT_PTP 56 +#define HI3670_CLK_ANDGT_OUT0 57 +#define HI3670_CLK_ANDGT_OUT1 58 +#define HI3670_CLKGT_DP_AUDIO_PLL_AO 59 +#define HI3670_CLK_ANDGT_VDEC 60 +#define HI3670_CLK_ANDGT_VENC 61 +#define HI3670_CLK_ISP_SNCLK_ANGT 62 +#define HI3670_CLK_ANDGT_RXDPHY 63 +#define HI3670_CLK_ANDGT_ICS 64 +#define HI3670_AUTODIV_DMABUS 65 +#define HI3670_CLK_MUX_SYSBUS 66 +#define HI3670_CLK_MUX_VCODECBUS 67 +#define HI3670_CLK_MUX_SD_SYS 68 +#define HI3670_CLK_MUX_SD_PLL 69 +#define HI3670_CLK_MUX_SDIO_SYS 70 +#define HI3670_CLK_MUX_SDIO_PLL 71 +#define HI3670_CLK_MUX_A53HPM 72 +#define HI3670_CLK_MUX_320M 73 +#define HI3670_CLK_MUX_UARTH 74 +#define HI3670_CLK_MUX_UARTL 75 +#define HI3670_CLK_MUX_UART0 76 +#define HI3670_CLK_MUX_I2C 77 +#define HI3670_CLK_MUX_SPI 78 +#define HI3670_CLK_MUX_PCIEAXI 79 +#define HI3670_CLK_MUX_AO_ASP 80 +#define HI3670_CLK_MUX_VDEC 81 +#define HI3670_CLK_MUX_VENC 82 +#define HI3670_CLK_ISP_SNCLK_MUX0 83 +#define HI3670_CLK_ISP_SNCLK_MUX1 84 +#define HI3670_CLK_ISP_SNCLK_MUX2 85 +#define HI3670_CLK_MUX_RXDPHY_CFG 86 +#define HI3670_CLK_MUX_ICS 87 +#define HI3670_CLK_DIV_CFGBUS 88 +#define HI3670_CLK_DIV_MMC0BUS 89 +#define HI3670_CLK_DIV_MMC1BUS 90 +#define HI3670_PCLK_DIV_MMC1_PCIE 91 +#define HI3670_CLK_DIV_VCODECBUS 92 +#define HI3670_CLK_DIV_SD 93 +#define HI3670_CLK_DIV_SDIO 94 +#define HI3670_CLK_DIV_UARTH 95 +#define HI3670_CLK_DIV_UARTL 96 +#define HI3670_CLK_DIV_UART0 97 +#define HI3670_CLK_DIV_I2C 98 +#define HI3670_CLK_DIV_SPI 99 +#define HI3670_CLK_DIV_PCIEAXI 100 +#define HI3670_CLK_DIV_AO_ASP 101 +#define HI3670_CLK_DIV_CSI_TRANS 102 +#define HI3670_CLK_DIV_DSI_TRANS 103 +#define HI3670_CLK_DIV_PTP 104 +#define HI3670_CLK_DIV_CLKOUT0_PLL 105 +#define HI3670_CLK_DIV_CLKOUT1_PLL 106 +#define HI3670_CLKDIV_DP_AUDIO_PLL_AO 107 +#define HI3670_CLK_DIV_VDEC 108 +#define HI3670_CLK_DIV_VENC 109 +#define HI3670_CLK_ISP_SNCLK_DIV0 110 +#define HI3670_CLK_ISP_SNCLK_DIV1 111 +#define HI3670_CLK_ISP_SNCLK_DIV2 112 +#define HI3670_CLK_DIV_ICS 113 +#define HI3670_PPLL1_EN_ACPU 114 +#define HI3670_PPLL2_EN_ACPU 115 +#define HI3670_PPLL3_EN_ACPU 116 +#define HI3670_PPLL1_GT_CPU 117 +#define HI3670_PPLL2_GT_CPU 118 +#define HI3670_PPLL3_GT_CPU 119 +#define HI3670_CLK_GATE_PPLL2_MEDIA 120 +#define HI3670_CLK_GATE_PPLL3_MEDIA 121 +#define HI3670_CLK_GATE_PPLL4_MEDIA 122 +#define HI3670_CLK_GATE_PPLL6_MEDIA 123 +#define HI3670_CLK_GATE_PPLL7_MEDIA 124 +#define HI3670_PCLK_GPIO0 125 +#define HI3670_PCLK_GPIO1 126 +#define HI3670_PCLK_GPIO2 127 +#define HI3670_PCLK_GPIO3 128 +#define HI3670_PCLK_GPIO4 129 +#define HI3670_PCLK_GPIO5 130 +#define HI3670_PCLK_GPIO6 131 +#define HI3670_PCLK_GPIO7 132 +#define HI3670_PCLK_GPIO8 133 +#define HI3670_PCLK_GPIO9 134 +#define HI3670_PCLK_GPIO10 135 +#define HI3670_PCLK_GPIO11 136 +#define HI3670_PCLK_GPIO12 137 +#define HI3670_PCLK_GPIO13 138 +#define HI3670_PCLK_GPIO14 139 +#define HI3670_PCLK_GPIO15 140 +#define HI3670_PCLK_GPIO16 141 +#define HI3670_PCLK_GPIO17 142 +#define HI3670_PCLK_GPIO20 143 +#define HI3670_PCLK_GPIO21 144 +#define HI3670_PCLK_GATE_DSI0 145 +#define HI3670_PCLK_GATE_DSI1 146 +#define HI3670_HCLK_GATE_USB3OTG 147 +#define HI3670_ACLK_GATE_USB3DVFS 148 +#define HI3670_HCLK_GATE_SDIO 149 +#define HI3670_PCLK_GATE_PCIE_SYS 150 +#define HI3670_PCLK_GATE_PCIE_PHY 151 +#define HI3670_PCLK_GATE_MMC1_PCIE 152 +#define HI3670_PCLK_GATE_MMC0_IOC 153 +#define HI3670_PCLK_GATE_MMC1_IOC 154 +#define HI3670_CLK_GATE_DMAC 155 +#define HI3670_CLK_GATE_VCODECBUS2DDR 156 +#define HI3670_CLK_CCI400_BYPASS 157 +#define HI3670_CLK_GATE_CCI400 158 +#define HI3670_CLK_GATE_SD 159 +#define HI3670_HCLK_GATE_SD 160 +#define HI3670_CLK_GATE_SDIO 161 +#define HI3670_CLK_GATE_A57HPM 162 +#define HI3670_CLK_GATE_A53HPM 163 +#define HI3670_CLK_GATE_PA_A53 164 +#define HI3670_CLK_GATE_PA_A57 165 +#define HI3670_CLK_GATE_PA_G3D 166 +#define HI3670_CLK_GATE_GPUHPM 167 +#define HI3670_CLK_GATE_PERIHPM 168 +#define HI3670_CLK_GATE_AOHPM 169 +#define HI3670_CLK_GATE_UART1 170 +#define HI3670_CLK_GATE_UART4 171 +#define HI3670_PCLK_GATE_UART1 172 +#define HI3670_PCLK_GATE_UART4 173 +#define HI3670_CLK_GATE_UART2 174 +#define HI3670_CLK_GATE_UART5 175 +#define HI3670_PCLK_GATE_UART2 176 +#define HI3670_PCLK_GATE_UART5 177 +#define HI3670_CLK_GATE_UART0 178 +#define HI3670_CLK_GATE_I2C3 179 +#define HI3670_CLK_GATE_I2C4 180 +#define HI3670_CLK_GATE_I2C7 181 +#define HI3670_PCLK_GATE_I2C3 182 +#define HI3670_PCLK_GATE_I2C4 183 +#define HI3670_PCLK_GATE_I2C7 184 +#define HI3670_CLK_GATE_SPI1 185 +#define HI3670_CLK_GATE_SPI4 186 +#define HI3670_PCLK_GATE_SPI1 187 +#define HI3670_PCLK_GATE_SPI4 188 +#define HI3670_CLK_GATE_USB3OTG_REF 189 +#define HI3670_CLK_GATE_USB2PHY_REF 190 +#define HI3670_CLK_GATE_PCIEAUX 191 +#define HI3670_ACLK_GATE_PCIE 192 +#define HI3670_CLK_GATE_MMC1_PCIEAXI 193 +#define HI3670_CLK_GATE_PCIEPHY_REF 194 +#define HI3670_CLK_GATE_PCIE_DEBOUNCE 195 +#define HI3670_CLK_GATE_PCIEIO 196 +#define HI3670_CLK_GATE_PCIE_HP 197 +#define HI3670_CLK_GATE_AO_ASP 198 +#define HI3670_PCLK_GATE_PCTRL 199 +#define HI3670_CLK_CSI_TRANS_GT 200 +#define HI3670_CLK_DSI_TRANS_GT 201 +#define HI3670_CLK_GATE_PWM 202 +#define HI3670_ABB_AUDIO_EN0 203 +#define HI3670_ABB_AUDIO_EN1 204 +#define HI3670_ABB_AUDIO_GT_EN0 205 +#define HI3670_ABB_AUDIO_GT_EN1 206 +#define HI3670_CLK_GATE_DP_AUDIO_PLL_AO 207 +#define HI3670_PERI_VOLT_HOLD 208 +#define HI3670_PERI_VOLT_MIDDLE 209 +#define HI3670_CLK_GATE_ISP_SNCLK0 210 +#define HI3670_CLK_GATE_ISP_SNCLK1 211 +#define HI3670_CLK_GATE_ISP_SNCLK2 212 +#define HI3670_CLK_GATE_RXDPHY0_CFG 213 +#define HI3670_CLK_GATE_RXDPHY1_CFG 214 +#define HI3670_CLK_GATE_RXDPHY2_CFG 215 +#define HI3670_CLK_GATE_TXDPHY0_CFG 216 +#define HI3670_CLK_GATE_TXDPHY0_REF 217 +#define HI3670_CLK_GATE_TXDPHY1_CFG 218 +#define HI3670_CLK_GATE_TXDPHY1_REF 219 +#define HI3670_CLK_GATE_MEDIA_TCXO 220 + +/* clk in sctrl */ +#define HI3670_CLK_ANDGT_IOPERI 0 +#define HI3670_CLKANDGT_ASP_SUBSYS_PERI 1 +#define HI3670_CLK_ANGT_ASP_SUBSYS 2 +#define HI3670_CLK_MUX_UFS_SUBSYS 3 +#define HI3670_CLK_MUX_CLKOUT0 4 +#define HI3670_CLK_MUX_CLKOUT1 5 +#define HI3670_CLK_MUX_ASP_SUBSYS_PERI 6 +#define HI3670_CLK_MUX_ASP_PLL 7 +#define HI3670_CLK_DIV_AOBUS 8 +#define HI3670_CLK_DIV_UFS_SUBSYS 9 +#define HI3670_CLK_DIV_IOPERI 10 +#define HI3670_CLK_DIV_CLKOUT0_TCXO 11 +#define HI3670_CLK_DIV_CLKOUT1_TCXO 12 +#define HI3670_CLK_ASP_SUBSYS_PERI_DIV 13 +#define HI3670_CLK_DIV_ASP_SUBSYS 14 +#define HI3670_PPLL0_EN_ACPU 15 +#define HI3670_PPLL0_GT_CPU 16 +#define HI3670_CLK_GATE_PPLL0_MEDIA 17 +#define HI3670_PCLK_GPIO18 18 +#define HI3670_PCLK_GPIO19 19 +#define HI3670_CLK_GATE_SPI 20 +#define HI3670_PCLK_GATE_SPI 21 +#define HI3670_CLK_GATE_UFS_SUBSYS 22 +#define HI3670_CLK_GATE_UFSIO_REF 23 +#define HI3670_PCLK_AO_GPIO0 24 +#define HI3670_PCLK_AO_GPIO1 25 +#define HI3670_PCLK_AO_GPIO2 26 +#define HI3670_PCLK_AO_GPIO3 27 +#define HI3670_PCLK_AO_GPIO4 28 +#define HI3670_PCLK_AO_GPIO5 29 +#define HI3670_PCLK_AO_GPIO6 30 +#define HI3670_CLK_GATE_OUT0 31 +#define HI3670_CLK_GATE_OUT1 32 +#define HI3670_PCLK_GATE_SYSCNT 33 +#define HI3670_CLK_GATE_SYSCNT 34 +#define HI3670_CLK_GATE_ASP_SUBSYS_PERI 35 +#define HI3670_CLK_GATE_ASP_SUBSYS 36 +#define HI3670_CLK_GATE_ASP_TCXO 37 +#define HI3670_CLK_GATE_DP_AUDIO_PLL 38 + +/* clk in pmuctrl */ +#define HI3670_GATE_ABB_192 0 + +/* clk in pctrl */ +#define HI3670_GATE_UFS_TCXO_EN 0 +#define HI3670_GATE_USB_TCXO_EN 1 + +/* clk in iomcu */ +#define HI3670_CLK_GATE_I2C0 0 +#define HI3670_CLK_GATE_I2C1 1 +#define HI3670_CLK_GATE_I2C2 2 +#define HI3670_CLK_GATE_SPI0 3 +#define HI3670_CLK_GATE_SPI2 4 +#define HI3670_CLK_GATE_UART3 5 +#define HI3670_CLK_I2C0_GATE_IOMCU 6 +#define HI3670_CLK_I2C1_GATE_IOMCU 7 +#define HI3670_CLK_I2C2_GATE_IOMCU 8 +#define HI3670_CLK_SPI0_GATE_IOMCU 9 +#define HI3670_CLK_SPI2_GATE_IOMCU 10 +#define HI3670_CLK_UART3_GATE_IOMCU 11 +#define HI3670_CLK_GATE_PERI0_IOMCU 12 + +/* clk in media1 */ +#define HI3670_CLK_GATE_VIVOBUS_ANDGT 0 +#define HI3670_CLK_ANDGT_EDC0 1 +#define HI3670_CLK_ANDGT_LDI0 2 +#define HI3670_CLK_ANDGT_LDI1 3 +#define HI3670_CLK_MMBUF_PLL_ANDGT 4 +#define HI3670_PCLK_MMBUF_ANDGT 5 +#define HI3670_CLK_MUX_VIVOBUS 6 +#define HI3670_CLK_MUX_EDC0 7 +#define HI3670_CLK_MUX_LDI0 8 +#define HI3670_CLK_MUX_LDI1 9 +#define HI3670_CLK_SW_MMBUF 10 +#define HI3670_CLK_DIV_VIVOBUS 11 +#define HI3670_CLK_DIV_EDC0 12 +#define HI3670_CLK_DIV_LDI0 13 +#define HI3670_CLK_DIV_LDI1 14 +#define HI3670_ACLK_DIV_MMBUF 15 +#define HI3670_PCLK_DIV_MMBUF 16 +#define HI3670_ACLK_GATE_NOC_DSS 17 +#define HI3670_PCLK_GATE_NOC_DSS_CFG 18 +#define HI3670_PCLK_GATE_MMBUF_CFG 19 +#define HI3670_PCLK_GATE_DISP_NOC_SUBSYS 20 +#define HI3670_ACLK_GATE_DISP_NOC_SUBSYS 21 +#define HI3670_PCLK_GATE_DSS 22 +#define HI3670_ACLK_GATE_DSS 23 +#define HI3670_CLK_GATE_VIVOBUSFREQ 24 +#define HI3670_CLK_GATE_EDC0 25 +#define HI3670_CLK_GATE_LDI0 26 +#define HI3670_CLK_GATE_LDI1FREQ 27 +#define HI3670_CLK_GATE_BRG 28 +#define HI3670_ACLK_GATE_ASC 29 +#define HI3670_CLK_GATE_DSS_AXI_MM 30 +#define HI3670_CLK_GATE_MMBUF 31 +#define HI3670_PCLK_GATE_MMBUF 32 +#define HI3670_CLK_GATE_ATDIV_VIVO 33 + +/* clk in media2 */ +#define HI3670_CLK_GATE_VDECFREQ 0 +#define HI3670_CLK_GATE_VENCFREQ 1 +#define HI3670_CLK_GATE_ICSFREQ 2 + +#endif /* __DT_BINDINGS_CLOCK_HI3670_H */ diff --git a/include/dt-bindings/clock/hi6220-clock.h b/include/dt-bindings/clock/hi6220-clock.h new file mode 100644 index 0000000..9e40605 --- /dev/null +++ b/include/dt-bindings/clock/hi6220-clock.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 Hisilicon Limited. + * + * Author: Bintian Wang + */ + +#ifndef __DT_BINDINGS_CLOCK_HI6220_H +#define __DT_BINDINGS_CLOCK_HI6220_H + +/* clk in Hi6220 AO (always on) controller */ +#define HI6220_NONE_CLOCK 0 + +/* fixed rate clocks */ +#define HI6220_REF32K 1 +#define HI6220_CLK_TCXO 2 +#define HI6220_MMC1_PAD 3 +#define HI6220_MMC2_PAD 4 +#define HI6220_MMC0_PAD 5 +#define HI6220_PLL_BBP 6 +#define HI6220_PLL_GPU 7 +#define HI6220_PLL1_DDR 8 +#define HI6220_PLL_SYS 9 +#define HI6220_PLL_SYS_MEDIA 10 +#define HI6220_DDR_SRC 11 +#define HI6220_PLL_MEDIA 12 +#define HI6220_PLL_DDR 13 + +/* fixed factor clocks */ +#define HI6220_300M 14 +#define HI6220_150M 15 +#define HI6220_PICOPHY_SRC 16 +#define HI6220_MMC0_SRC_SEL 17 +#define HI6220_MMC1_SRC_SEL 18 +#define HI6220_MMC2_SRC_SEL 19 +#define HI6220_VPU_CODEC 20 +#define HI6220_MMC0_SMP 21 +#define HI6220_MMC1_SMP 22 +#define HI6220_MMC2_SMP 23 + +/* gate clocks */ +#define HI6220_WDT0_PCLK 24 +#define HI6220_WDT1_PCLK 25 +#define HI6220_WDT2_PCLK 26 +#define HI6220_TIMER0_PCLK 27 +#define HI6220_TIMER1_PCLK 28 +#define HI6220_TIMER2_PCLK 29 +#define HI6220_TIMER3_PCLK 30 +#define HI6220_TIMER4_PCLK 31 +#define HI6220_TIMER5_PCLK 32 +#define HI6220_TIMER6_PCLK 33 +#define HI6220_TIMER7_PCLK 34 +#define HI6220_TIMER8_PCLK 35 +#define HI6220_UART0_PCLK 36 +#define HI6220_RTC0_PCLK 37 +#define HI6220_RTC1_PCLK 38 +#define HI6220_AO_NR_CLKS 39 + +/* clk in Hi6220 systrl */ +/* gate clock */ +#define HI6220_MMC0_CLK 1 +#define HI6220_MMC0_CIUCLK 2 +#define HI6220_MMC1_CLK 3 +#define HI6220_MMC1_CIUCLK 4 +#define HI6220_MMC2_CLK 5 +#define HI6220_MMC2_CIUCLK 6 +#define HI6220_USBOTG_HCLK 7 +#define HI6220_CLK_PICOPHY 8 +#define HI6220_HIFI 9 +#define HI6220_DACODEC_PCLK 10 +#define HI6220_EDMAC_ACLK 11 +#define HI6220_CS_ATB 12 +#define HI6220_I2C0_CLK 13 +#define HI6220_I2C1_CLK 14 +#define HI6220_I2C2_CLK 15 +#define HI6220_I2C3_CLK 16 +#define HI6220_UART1_PCLK 17 +#define HI6220_UART2_PCLK 18 +#define HI6220_UART3_PCLK 19 +#define HI6220_UART4_PCLK 20 +#define HI6220_SPI_CLK 21 +#define HI6220_TSENSOR_CLK 22 +#define HI6220_MMU_CLK 23 +#define HI6220_HIFI_SEL 24 +#define HI6220_MMC0_SYSPLL 25 +#define HI6220_MMC1_SYSPLL 26 +#define HI6220_MMC2_SYSPLL 27 +#define HI6220_MMC0_SEL 28 +#define HI6220_MMC1_SEL 29 +#define HI6220_BBPPLL_SEL 30 +#define HI6220_MEDIA_PLL_SRC 31 +#define HI6220_MMC2_SEL 32 +#define HI6220_CS_ATB_SYSPLL 33 + +/* mux clocks */ +#define HI6220_MMC0_SRC 34 +#define HI6220_MMC0_SMP_IN 35 +#define HI6220_MMC1_SRC 36 +#define HI6220_MMC1_SMP_IN 37 +#define HI6220_MMC2_SRC 38 +#define HI6220_MMC2_SMP_IN 39 +#define HI6220_HIFI_SRC 40 +#define HI6220_UART1_SRC 41 +#define HI6220_UART2_SRC 42 +#define HI6220_UART3_SRC 43 +#define HI6220_UART4_SRC 44 +#define HI6220_MMC0_MUX0 45 +#define HI6220_MMC1_MUX0 46 +#define HI6220_MMC2_MUX0 47 +#define HI6220_MMC0_MUX1 48 +#define HI6220_MMC1_MUX1 49 +#define HI6220_MMC2_MUX1 50 + +/* divider clocks */ +#define HI6220_CLK_BUS 51 +#define HI6220_MMC0_DIV 52 +#define HI6220_MMC1_DIV 53 +#define HI6220_MMC2_DIV 54 +#define HI6220_HIFI_DIV 55 +#define HI6220_BBPPLL0_DIV 56 +#define HI6220_CS_DAPB 57 +#define HI6220_CS_ATB_DIV 58 + +/* gate clock */ +#define HI6220_DAPB_CLK 59 + +#define HI6220_SYS_NR_CLKS 60 + +/* clk in Hi6220 media controller */ +/* gate clocks */ +#define HI6220_DSI_PCLK 1 +#define HI6220_G3D_PCLK 2 +#define HI6220_ACLK_CODEC_VPU 3 +#define HI6220_ISP_SCLK 4 +#define HI6220_ADE_CORE 5 +#define HI6220_MED_MMU 6 +#define HI6220_CFG_CSI4PHY 7 +#define HI6220_CFG_CSI2PHY 8 +#define HI6220_ISP_SCLK_GATE 9 +#define HI6220_ISP_SCLK_GATE1 10 +#define HI6220_ADE_CORE_GATE 11 +#define HI6220_CODEC_VPU_GATE 12 +#define HI6220_MED_SYSPLL 13 + +/* mux clocks */ +#define HI6220_1440_1200 14 +#define HI6220_1000_1200 15 +#define HI6220_1000_1440 16 + +/* divider clocks */ +#define HI6220_CODEC_JPEG 17 +#define HI6220_ISP_SCLK_SRC 18 +#define HI6220_ISP_SCLK1 19 +#define HI6220_ADE_CORE_SRC 20 +#define HI6220_ADE_PIX_SRC 21 +#define HI6220_G3D_CLK 22 +#define HI6220_CODEC_VPU_SRC 23 + +#define HI6220_MEDIA_NR_CLKS 24 + +/* clk in Hi6220 power controller */ +/* gate clocks */ +#define HI6220_PLL_GPU_GATE 1 +#define HI6220_PLL1_DDR_GATE 2 +#define HI6220_PLL_DDR_GATE 3 +#define HI6220_PLL_MEDIA_GATE 4 +#define HI6220_PLL0_BBP_GATE 5 + +/* divider clocks */ +#define HI6220_DDRC_SRC 6 +#define HI6220_DDRC_AXI1 7 + +#define HI6220_POWER_NR_CLKS 8 + +/* clk in Hi6220 acpu sctrl */ +#define HI6220_ACPU_SFT_AT_S 0 + +#endif diff --git a/include/dt-bindings/clock/hip04-clock.h b/include/dt-bindings/clock/hip04-clock.h new file mode 100644 index 0000000..088d70c --- /dev/null +++ b/include/dt-bindings/clock/hip04-clock.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2013-2014 Hisilicon Limited. + * Copyright (c) 2013-2014 Linaro Limited. + * + * Author: Haojian Zhuang + */ + +#ifndef __DTS_HIP04_CLOCK_H +#define __DTS_HIP04_CLOCK_H + +#define HIP04_NONE_CLOCK 0 + +/* fixed rate & fixed factor clocks */ +#define HIP04_OSC50M 1 +#define HIP04_CLK_50M 2 +#define HIP04_CLK_168M 3 + +#define HIP04_NR_CLKS 64 + +#endif /* __DTS_HIP04_CLOCK_H */ diff --git a/include/dt-bindings/clock/histb-clock.h b/include/dt-bindings/clock/histb-clock.h new file mode 100644 index 0000000..e64e577 --- /dev/null +++ b/include/dt-bindings/clock/histb-clock.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. + */ + +#ifndef __DTS_HISTB_CLOCK_H +#define __DTS_HISTB_CLOCK_H + +/* clocks provided by core CRG */ +#define HISTB_OSC_CLK 0 +#define HISTB_APB_CLK 1 +#define HISTB_AHB_CLK 2 +#define HISTB_UART1_CLK 3 +#define HISTB_UART2_CLK 4 +#define HISTB_UART3_CLK 5 +#define HISTB_I2C0_CLK 6 +#define HISTB_I2C1_CLK 7 +#define HISTB_I2C2_CLK 8 +#define HISTB_I2C3_CLK 9 +#define HISTB_I2C4_CLK 10 +#define HISTB_I2C5_CLK 11 +#define HISTB_SPI0_CLK 12 +#define HISTB_SPI1_CLK 13 +#define HISTB_SPI2_CLK 14 +#define HISTB_SCI_CLK 15 +#define HISTB_FMC_CLK 16 +#define HISTB_MMC_BIU_CLK 17 +#define HISTB_MMC_CIU_CLK 18 +#define HISTB_MMC_DRV_CLK 19 +#define HISTB_MMC_SAMPLE_CLK 20 +#define HISTB_SDIO0_BIU_CLK 21 +#define HISTB_SDIO0_CIU_CLK 22 +#define HISTB_SDIO0_DRV_CLK 23 +#define HISTB_SDIO0_SAMPLE_CLK 24 +#define HISTB_PCIE_AUX_CLK 25 +#define HISTB_PCIE_PIPE_CLK 26 +#define HISTB_PCIE_SYS_CLK 27 +#define HISTB_PCIE_BUS_CLK 28 +#define HISTB_ETH0_MAC_CLK 29 +#define HISTB_ETH0_MACIF_CLK 30 +#define HISTB_ETH1_MAC_CLK 31 +#define HISTB_ETH1_MACIF_CLK 32 +#define HISTB_COMBPHY1_CLK 33 +#define HISTB_USB2_BUS_CLK 34 +#define HISTB_USB2_PHY_CLK 35 +#define HISTB_USB2_UTMI_CLK 36 +#define HISTB_USB2_12M_CLK 37 +#define HISTB_USB2_48M_CLK 38 +#define HISTB_USB2_OTG_UTMI_CLK 39 +#define HISTB_USB2_PHY1_REF_CLK 40 +#define HISTB_USB2_PHY2_REF_CLK 41 +#define HISTB_COMBPHY0_CLK 42 +#define HISTB_USB3_BUS_CLK 43 +#define HISTB_USB3_UTMI_CLK 44 +#define HISTB_USB3_PIPE_CLK 45 +#define HISTB_USB3_SUSPEND_CLK 46 +#define HISTB_USB3_BUS_CLK1 47 +#define HISTB_USB3_UTMI_CLK1 48 +#define HISTB_USB3_PIPE_CLK1 49 +#define HISTB_USB3_SUSPEND_CLK1 50 + +/* clocks provided by mcu CRG */ +#define HISTB_MCE_CLK 1 +#define HISTB_IR_CLK 2 +#define HISTB_TIMER01_CLK 3 +#define HISTB_LEDC_CLK 4 +#define HISTB_UART0_CLK 5 +#define HISTB_LSADC_CLK 6 + +#endif /* __DTS_HISTB_CLOCK_H */ diff --git a/include/dt-bindings/clock/hix5hd2-clock.h b/include/dt-bindings/clock/hix5hd2-clock.h new file mode 100644 index 0000000..2b8779f --- /dev/null +++ b/include/dt-bindings/clock/hix5hd2-clock.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 Linaro Ltd. + * Copyright (c) 2014 Hisilicon Limited. + */ + +#ifndef __DTS_HIX5HD2_CLOCK_H +#define __DTS_HIX5HD2_CLOCK_H + +/* fixed rate */ +#define HIX5HD2_FIXED_1200M 1 +#define HIX5HD2_FIXED_400M 2 +#define HIX5HD2_FIXED_48M 3 +#define HIX5HD2_FIXED_24M 4 +#define HIX5HD2_FIXED_600M 5 +#define HIX5HD2_FIXED_300M 6 +#define HIX5HD2_FIXED_75M 7 +#define HIX5HD2_FIXED_200M 8 +#define HIX5HD2_FIXED_100M 9 +#define HIX5HD2_FIXED_40M 10 +#define HIX5HD2_FIXED_150M 11 +#define HIX5HD2_FIXED_1728M 12 +#define HIX5HD2_FIXED_28P8M 13 +#define HIX5HD2_FIXED_432M 14 +#define HIX5HD2_FIXED_345P6M 15 +#define HIX5HD2_FIXED_288M 16 +#define HIX5HD2_FIXED_60M 17 +#define HIX5HD2_FIXED_750M 18 +#define HIX5HD2_FIXED_500M 19 +#define HIX5HD2_FIXED_54M 20 +#define HIX5HD2_FIXED_27M 21 +#define HIX5HD2_FIXED_1500M 22 +#define HIX5HD2_FIXED_375M 23 +#define HIX5HD2_FIXED_187M 24 +#define HIX5HD2_FIXED_250M 25 +#define HIX5HD2_FIXED_125M 26 +#define HIX5HD2_FIXED_2P02M 27 +#define HIX5HD2_FIXED_50M 28 +#define HIX5HD2_FIXED_25M 29 +#define HIX5HD2_FIXED_83M 30 + +/* mux clocks */ +#define HIX5HD2_SFC_MUX 64 +#define HIX5HD2_MMC_MUX 65 +#define HIX5HD2_FEPHY_MUX 66 +#define HIX5HD2_SD_MUX 67 + +/* gate clocks */ +#define HIX5HD2_SFC_RST 128 +#define HIX5HD2_SFC_CLK 129 +#define HIX5HD2_MMC_CIU_CLK 130 +#define HIX5HD2_MMC_BIU_CLK 131 +#define HIX5HD2_MMC_CIU_RST 132 +#define HIX5HD2_FWD_BUS_CLK 133 +#define HIX5HD2_FWD_SYS_CLK 134 +#define HIX5HD2_MAC0_PHY_CLK 135 +#define HIX5HD2_SD_CIU_CLK 136 +#define HIX5HD2_SD_BIU_CLK 137 +#define HIX5HD2_SD_CIU_RST 138 +#define HIX5HD2_WDG0_CLK 139 +#define HIX5HD2_WDG0_RST 140 +#define HIX5HD2_I2C0_CLK 141 +#define HIX5HD2_I2C0_RST 142 +#define HIX5HD2_I2C1_CLK 143 +#define HIX5HD2_I2C1_RST 144 +#define HIX5HD2_I2C2_CLK 145 +#define HIX5HD2_I2C2_RST 146 +#define HIX5HD2_I2C3_CLK 147 +#define HIX5HD2_I2C3_RST 148 +#define HIX5HD2_I2C4_CLK 149 +#define HIX5HD2_I2C4_RST 150 +#define HIX5HD2_I2C5_CLK 151 +#define HIX5HD2_I2C5_RST 152 + +/* complex */ +#define HIX5HD2_MAC0_CLK 192 +#define HIX5HD2_MAC1_CLK 193 +#define HIX5HD2_SATA_CLK 194 +#define HIX5HD2_USB_CLK 195 + +#define HIX5HD2_NR_CLKS 256 +#endif /* __DTS_HIX5HD2_CLOCK_H */ diff --git a/include/dt-bindings/clock/imx1-clock.h b/include/dt-bindings/clock/imx1-clock.h new file mode 100644 index 0000000..3730a46 --- /dev/null +++ b/include/dt-bindings/clock/imx1-clock.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014 Alexander Shiyan + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX1_H +#define __DT_BINDINGS_CLOCK_IMX1_H + +#define IMX1_CLK_DUMMY 0 +#define IMX1_CLK_CLK32 1 +#define IMX1_CLK_CLK16M_EXT 2 +#define IMX1_CLK_CLK16M 3 +#define IMX1_CLK_CLK32_PREMULT 4 +#define IMX1_CLK_PREM 5 +#define IMX1_CLK_MPLL 6 +#define IMX1_CLK_MPLL_GATE 7 +#define IMX1_CLK_SPLL 8 +#define IMX1_CLK_SPLL_GATE 9 +#define IMX1_CLK_MCU 10 +#define IMX1_CLK_FCLK 11 +#define IMX1_CLK_HCLK 12 +#define IMX1_CLK_CLK48M 13 +#define IMX1_CLK_PER1 14 +#define IMX1_CLK_PER2 15 +#define IMX1_CLK_PER3 16 +#define IMX1_CLK_CLKO 17 +#define IMX1_CLK_UART3_GATE 18 +#define IMX1_CLK_SSI2_GATE 19 +#define IMX1_CLK_BROM_GATE 20 +#define IMX1_CLK_DMA_GATE 21 +#define IMX1_CLK_CSI_GATE 22 +#define IMX1_CLK_MMA_GATE 23 +#define IMX1_CLK_USBD_GATE 24 +#define IMX1_CLK_MAX 25 + +#endif diff --git a/include/dt-bindings/clock/imx21-clock.h b/include/dt-bindings/clock/imx21-clock.h new file mode 100644 index 0000000..66d0ec5 --- /dev/null +++ b/include/dt-bindings/clock/imx21-clock.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014 Alexander Shiyan + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX21_H +#define __DT_BINDINGS_CLOCK_IMX21_H + +#define IMX21_CLK_DUMMY 0 +#define IMX21_CLK_CKIL 1 +#define IMX21_CLK_CKIH 2 +#define IMX21_CLK_FPM 3 +#define IMX21_CLK_CKIH_DIV1P5 4 +#define IMX21_CLK_MPLL_GATE 5 +#define IMX21_CLK_SPLL_GATE 6 +#define IMX21_CLK_FPM_GATE 7 +#define IMX21_CLK_CKIH_GATE 8 +#define IMX21_CLK_MPLL_OSC_SEL 9 +#define IMX21_CLK_IPG 10 +#define IMX21_CLK_HCLK 11 +#define IMX21_CLK_MPLL_SEL 12 +#define IMX21_CLK_SPLL_SEL 13 +#define IMX21_CLK_SSI1_SEL 14 +#define IMX21_CLK_SSI2_SEL 15 +#define IMX21_CLK_USB_DIV 16 +#define IMX21_CLK_FCLK 17 +#define IMX21_CLK_MPLL 18 +#define IMX21_CLK_SPLL 19 +#define IMX21_CLK_NFC_DIV 20 +#define IMX21_CLK_SSI1_DIV 21 +#define IMX21_CLK_SSI2_DIV 22 +#define IMX21_CLK_PER1 23 +#define IMX21_CLK_PER2 24 +#define IMX21_CLK_PER3 25 +#define IMX21_CLK_PER4 26 +#define IMX21_CLK_UART1_IPG_GATE 27 +#define IMX21_CLK_UART2_IPG_GATE 28 +#define IMX21_CLK_UART3_IPG_GATE 29 +#define IMX21_CLK_UART4_IPG_GATE 30 +#define IMX21_CLK_CSPI1_IPG_GATE 31 +#define IMX21_CLK_CSPI2_IPG_GATE 32 +#define IMX21_CLK_SSI1_GATE 33 +#define IMX21_CLK_SSI2_GATE 34 +#define IMX21_CLK_SDHC1_IPG_GATE 35 +#define IMX21_CLK_SDHC2_IPG_GATE 36 +#define IMX21_CLK_GPIO_GATE 37 +#define IMX21_CLK_I2C_GATE 38 +#define IMX21_CLK_DMA_GATE 39 +#define IMX21_CLK_USB_GATE 40 +#define IMX21_CLK_EMMA_GATE 41 +#define IMX21_CLK_SSI2_BAUD_GATE 42 +#define IMX21_CLK_SSI1_BAUD_GATE 43 +#define IMX21_CLK_LCDC_IPG_GATE 44 +#define IMX21_CLK_NFC_GATE 45 +#define IMX21_CLK_LCDC_HCLK_GATE 46 +#define IMX21_CLK_PER4_GATE 47 +#define IMX21_CLK_BMI_GATE 48 +#define IMX21_CLK_USB_HCLK_GATE 49 +#define IMX21_CLK_SLCDC_GATE 50 +#define IMX21_CLK_SLCDC_HCLK_GATE 51 +#define IMX21_CLK_EMMA_HCLK_GATE 52 +#define IMX21_CLK_BROM_GATE 53 +#define IMX21_CLK_DMA_HCLK_GATE 54 +#define IMX21_CLK_CSI_HCLK_GATE 55 +#define IMX21_CLK_CSPI3_IPG_GATE 56 +#define IMX21_CLK_WDOG_GATE 57 +#define IMX21_CLK_GPT1_IPG_GATE 58 +#define IMX21_CLK_GPT2_IPG_GATE 59 +#define IMX21_CLK_GPT3_IPG_GATE 60 +#define IMX21_CLK_PWM_IPG_GATE 61 +#define IMX21_CLK_RTC_GATE 62 +#define IMX21_CLK_KPP_GATE 63 +#define IMX21_CLK_OWIRE_GATE 64 +#define IMX21_CLK_MAX 65 + +#endif diff --git a/include/dt-bindings/clock/imx27-clock.h b/include/dt-bindings/clock/imx27-clock.h new file mode 100644 index 0000000..1ff448b --- /dev/null +++ b/include/dt-bindings/clock/imx27-clock.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014 Alexander Shiyan + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX27_H +#define __DT_BINDINGS_CLOCK_IMX27_H + +#define IMX27_CLK_DUMMY 0 +#define IMX27_CLK_CKIH 1 +#define IMX27_CLK_CKIL 2 +#define IMX27_CLK_MPLL 3 +#define IMX27_CLK_SPLL 4 +#define IMX27_CLK_MPLL_MAIN2 5 +#define IMX27_CLK_AHB 6 +#define IMX27_CLK_IPG 7 +#define IMX27_CLK_NFC_DIV 8 +#define IMX27_CLK_PER1_DIV 9 +#define IMX27_CLK_PER2_DIV 10 +#define IMX27_CLK_PER3_DIV 11 +#define IMX27_CLK_PER4_DIV 12 +#define IMX27_CLK_VPU_SEL 13 +#define IMX27_CLK_VPU_DIV 14 +#define IMX27_CLK_USB_DIV 15 +#define IMX27_CLK_CPU_SEL 16 +#define IMX27_CLK_CLKO_SEL 17 +#define IMX27_CLK_CPU_DIV 18 +#define IMX27_CLK_CLKO_DIV 19 +#define IMX27_CLK_SSI1_SEL 20 +#define IMX27_CLK_SSI2_SEL 21 +#define IMX27_CLK_SSI1_DIV 22 +#define IMX27_CLK_SSI2_DIV 23 +#define IMX27_CLK_CLKO_EN 24 +#define IMX27_CLK_SSI2_IPG_GATE 25 +#define IMX27_CLK_SSI1_IPG_GATE 26 +#define IMX27_CLK_SLCDC_IPG_GATE 27 +#define IMX27_CLK_SDHC3_IPG_GATE 28 +#define IMX27_CLK_SDHC2_IPG_GATE 29 +#define IMX27_CLK_SDHC1_IPG_GATE 30 +#define IMX27_CLK_SCC_IPG_GATE 31 +#define IMX27_CLK_SAHARA_IPG_GATE 32 +#define IMX27_CLK_RTC_IPG_GATE 33 +#define IMX27_CLK_PWM_IPG_GATE 34 +#define IMX27_CLK_OWIRE_IPG_GATE 35 +#define IMX27_CLK_LCDC_IPG_GATE 36 +#define IMX27_CLK_KPP_IPG_GATE 37 +#define IMX27_CLK_IIM_IPG_GATE 38 +#define IMX27_CLK_I2C2_IPG_GATE 39 +#define IMX27_CLK_I2C1_IPG_GATE 40 +#define IMX27_CLK_GPT6_IPG_GATE 41 +#define IMX27_CLK_GPT5_IPG_GATE 42 +#define IMX27_CLK_GPT4_IPG_GATE 43 +#define IMX27_CLK_GPT3_IPG_GATE 44 +#define IMX27_CLK_GPT2_IPG_GATE 45 +#define IMX27_CLK_GPT1_IPG_GATE 46 +#define IMX27_CLK_GPIO_IPG_GATE 47 +#define IMX27_CLK_FEC_IPG_GATE 48 +#define IMX27_CLK_EMMA_IPG_GATE 49 +#define IMX27_CLK_DMA_IPG_GATE 50 +#define IMX27_CLK_CSPI3_IPG_GATE 51 +#define IMX27_CLK_CSPI2_IPG_GATE 52 +#define IMX27_CLK_CSPI1_IPG_GATE 53 +#define IMX27_CLK_NFC_BAUD_GATE 54 +#define IMX27_CLK_SSI2_BAUD_GATE 55 +#define IMX27_CLK_SSI1_BAUD_GATE 56 +#define IMX27_CLK_VPU_BAUD_GATE 57 +#define IMX27_CLK_PER4_GATE 58 +#define IMX27_CLK_PER3_GATE 59 +#define IMX27_CLK_PER2_GATE 60 +#define IMX27_CLK_PER1_GATE 61 +#define IMX27_CLK_USB_AHB_GATE 62 +#define IMX27_CLK_SLCDC_AHB_GATE 63 +#define IMX27_CLK_SAHARA_AHB_GATE 64 +#define IMX27_CLK_LCDC_AHB_GATE 65 +#define IMX27_CLK_VPU_AHB_GATE 66 +#define IMX27_CLK_FEC_AHB_GATE 67 +#define IMX27_CLK_EMMA_AHB_GATE 68 +#define IMX27_CLK_EMI_AHB_GATE 69 +#define IMX27_CLK_DMA_AHB_GATE 70 +#define IMX27_CLK_CSI_AHB_GATE 71 +#define IMX27_CLK_BROM_AHB_GATE 72 +#define IMX27_CLK_ATA_AHB_GATE 73 +#define IMX27_CLK_WDOG_IPG_GATE 74 +#define IMX27_CLK_USB_IPG_GATE 75 +#define IMX27_CLK_UART6_IPG_GATE 76 +#define IMX27_CLK_UART5_IPG_GATE 77 +#define IMX27_CLK_UART4_IPG_GATE 78 +#define IMX27_CLK_UART3_IPG_GATE 79 +#define IMX27_CLK_UART2_IPG_GATE 80 +#define IMX27_CLK_UART1_IPG_GATE 81 +#define IMX27_CLK_CKIH_DIV1P5 82 +#define IMX27_CLK_FPM 83 +#define IMX27_CLK_MPLL_OSC_SEL 84 +#define IMX27_CLK_MPLL_SEL 85 +#define IMX27_CLK_SPLL_GATE 86 +#define IMX27_CLK_MSHC_DIV 87 +#define IMX27_CLK_RTIC_IPG_GATE 88 +#define IMX27_CLK_MSHC_IPG_GATE 89 +#define IMX27_CLK_RTIC_AHB_GATE 90 +#define IMX27_CLK_MSHC_BAUD_GATE 91 +#define IMX27_CLK_CKIH_GATE 92 +#define IMX27_CLK_MAX 93 + +#endif diff --git a/include/dt-bindings/clock/imx5-clock.h b/include/dt-bindings/clock/imx5-clock.h new file mode 100644 index 0000000..bc65e30 --- /dev/null +++ b/include/dt-bindings/clock/imx5-clock.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2013 Lucas Stach, Pengutronix + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX5_H +#define __DT_BINDINGS_CLOCK_IMX5_H + +#define IMX5_CLK_DUMMY 0 +#define IMX5_CLK_CKIL 1 +#define IMX5_CLK_OSC 2 +#define IMX5_CLK_CKIH1 3 +#define IMX5_CLK_CKIH2 4 +#define IMX5_CLK_AHB 5 +#define IMX5_CLK_IPG 6 +#define IMX5_CLK_AXI_A 7 +#define IMX5_CLK_AXI_B 8 +#define IMX5_CLK_UART_PRED 9 +#define IMX5_CLK_UART_ROOT 10 +#define IMX5_CLK_ESDHC_A_PRED 11 +#define IMX5_CLK_ESDHC_B_PRED 12 +#define IMX5_CLK_ESDHC_C_SEL 13 +#define IMX5_CLK_ESDHC_D_SEL 14 +#define IMX5_CLK_EMI_SEL 15 +#define IMX5_CLK_EMI_SLOW_PODF 16 +#define IMX5_CLK_NFC_PODF 17 +#define IMX5_CLK_ECSPI_PRED 18 +#define IMX5_CLK_ECSPI_PODF 19 +#define IMX5_CLK_USBOH3_PRED 20 +#define IMX5_CLK_USBOH3_PODF 21 +#define IMX5_CLK_USB_PHY_PRED 22 +#define IMX5_CLK_USB_PHY_PODF 23 +#define IMX5_CLK_CPU_PODF 24 +#define IMX5_CLK_DI_PRED 25 +#define IMX5_CLK_TVE_SEL 27 +#define IMX5_CLK_UART1_IPG_GATE 28 +#define IMX5_CLK_UART1_PER_GATE 29 +#define IMX5_CLK_UART2_IPG_GATE 30 +#define IMX5_CLK_UART2_PER_GATE 31 +#define IMX5_CLK_UART3_IPG_GATE 32 +#define IMX5_CLK_UART3_PER_GATE 33 +#define IMX5_CLK_I2C1_GATE 34 +#define IMX5_CLK_I2C2_GATE 35 +#define IMX5_CLK_GPT_IPG_GATE 36 +#define IMX5_CLK_PWM1_IPG_GATE 37 +#define IMX5_CLK_PWM1_HF_GATE 38 +#define IMX5_CLK_PWM2_IPG_GATE 39 +#define IMX5_CLK_PWM2_HF_GATE 40 +#define IMX5_CLK_GPT_HF_GATE 41 +#define IMX5_CLK_FEC_GATE 42 +#define IMX5_CLK_USBOH3_PER_GATE 43 +#define IMX5_CLK_ESDHC1_IPG_GATE 44 +#define IMX5_CLK_ESDHC2_IPG_GATE 45 +#define IMX5_CLK_ESDHC3_IPG_GATE 46 +#define IMX5_CLK_ESDHC4_IPG_GATE 47 +#define IMX5_CLK_SSI1_IPG_GATE 48 +#define IMX5_CLK_SSI2_IPG_GATE 49 +#define IMX5_CLK_SSI3_IPG_GATE 50 +#define IMX5_CLK_ECSPI1_IPG_GATE 51 +#define IMX5_CLK_ECSPI1_PER_GATE 52 +#define IMX5_CLK_ECSPI2_IPG_GATE 53 +#define IMX5_CLK_ECSPI2_PER_GATE 54 +#define IMX5_CLK_CSPI_IPG_GATE 55 +#define IMX5_CLK_SDMA_GATE 56 +#define IMX5_CLK_EMI_SLOW_GATE 57 +#define IMX5_CLK_IPU_SEL 58 +#define IMX5_CLK_IPU_GATE 59 +#define IMX5_CLK_NFC_GATE 60 +#define IMX5_CLK_IPU_DI1_GATE 61 +#define IMX5_CLK_VPU_SEL 62 +#define IMX5_CLK_VPU_GATE 63 +#define IMX5_CLK_VPU_REFERENCE_GATE 64 +#define IMX5_CLK_UART4_IPG_GATE 65 +#define IMX5_CLK_UART4_PER_GATE 66 +#define IMX5_CLK_UART5_IPG_GATE 67 +#define IMX5_CLK_UART5_PER_GATE 68 +#define IMX5_CLK_TVE_GATE 69 +#define IMX5_CLK_TVE_PRED 70 +#define IMX5_CLK_ESDHC1_PER_GATE 71 +#define IMX5_CLK_ESDHC2_PER_GATE 72 +#define IMX5_CLK_ESDHC3_PER_GATE 73 +#define IMX5_CLK_ESDHC4_PER_GATE 74 +#define IMX5_CLK_USB_PHY_GATE 75 +#define IMX5_CLK_HSI2C_GATE 76 +#define IMX5_CLK_MIPI_HSC1_GATE 77 +#define IMX5_CLK_MIPI_HSC2_GATE 78 +#define IMX5_CLK_MIPI_ESC_GATE 79 +#define IMX5_CLK_MIPI_HSP_GATE 80 +#define IMX5_CLK_LDB_DI1_DIV_3_5 81 +#define IMX5_CLK_LDB_DI1_DIV 82 +#define IMX5_CLK_LDB_DI0_DIV_3_5 83 +#define IMX5_CLK_LDB_DI0_DIV 84 +#define IMX5_CLK_LDB_DI1_GATE 85 +#define IMX5_CLK_CAN2_SERIAL_GATE 86 +#define IMX5_CLK_CAN2_IPG_GATE 87 +#define IMX5_CLK_I2C3_GATE 88 +#define IMX5_CLK_LP_APM 89 +#define IMX5_CLK_PERIPH_APM 90 +#define IMX5_CLK_MAIN_BUS 91 +#define IMX5_CLK_AHB_MAX 92 +#define IMX5_CLK_AIPS_TZ1 93 +#define IMX5_CLK_AIPS_TZ2 94 +#define IMX5_CLK_TMAX1 95 +#define IMX5_CLK_TMAX2 96 +#define IMX5_CLK_TMAX3 97 +#define IMX5_CLK_SPBA 98 +#define IMX5_CLK_UART_SEL 99 +#define IMX5_CLK_ESDHC_A_SEL 100 +#define IMX5_CLK_ESDHC_B_SEL 101 +#define IMX5_CLK_ESDHC_A_PODF 102 +#define IMX5_CLK_ESDHC_B_PODF 103 +#define IMX5_CLK_ECSPI_SEL 104 +#define IMX5_CLK_USBOH3_SEL 105 +#define IMX5_CLK_USB_PHY_SEL 106 +#define IMX5_CLK_IIM_GATE 107 +#define IMX5_CLK_USBOH3_GATE 108 +#define IMX5_CLK_EMI_FAST_GATE 109 +#define IMX5_CLK_IPU_DI0_GATE 110 +#define IMX5_CLK_GPC_DVFS 111 +#define IMX5_CLK_PLL1_SW 112 +#define IMX5_CLK_PLL2_SW 113 +#define IMX5_CLK_PLL3_SW 114 +#define IMX5_CLK_IPU_DI0_SEL 115 +#define IMX5_CLK_IPU_DI1_SEL 116 +#define IMX5_CLK_TVE_EXT_SEL 117 +#define IMX5_CLK_MX51_MIPI 118 +#define IMX5_CLK_PLL4_SW 119 +#define IMX5_CLK_LDB_DI1_SEL 120 +#define IMX5_CLK_DI_PLL4_PODF 121 +#define IMX5_CLK_LDB_DI0_SEL 122 +#define IMX5_CLK_LDB_DI0_GATE 123 +#define IMX5_CLK_USB_PHY1_GATE 124 +#define IMX5_CLK_USB_PHY2_GATE 125 +#define IMX5_CLK_PER_LP_APM 126 +#define IMX5_CLK_PER_PRED1 127 +#define IMX5_CLK_PER_PRED2 128 +#define IMX5_CLK_PER_PODF 129 +#define IMX5_CLK_PER_ROOT 130 +#define IMX5_CLK_SSI_APM 131 +#define IMX5_CLK_SSI1_ROOT_SEL 132 +#define IMX5_CLK_SSI2_ROOT_SEL 133 +#define IMX5_CLK_SSI3_ROOT_SEL 134 +#define IMX5_CLK_SSI_EXT1_SEL 135 +#define IMX5_CLK_SSI_EXT2_SEL 136 +#define IMX5_CLK_SSI_EXT1_COM_SEL 137 +#define IMX5_CLK_SSI_EXT2_COM_SEL 138 +#define IMX5_CLK_SSI1_ROOT_PRED 139 +#define IMX5_CLK_SSI1_ROOT_PODF 140 +#define IMX5_CLK_SSI2_ROOT_PRED 141 +#define IMX5_CLK_SSI2_ROOT_PODF 142 +#define IMX5_CLK_SSI_EXT1_PRED 143 +#define IMX5_CLK_SSI_EXT1_PODF 144 +#define IMX5_CLK_SSI_EXT2_PRED 145 +#define IMX5_CLK_SSI_EXT2_PODF 146 +#define IMX5_CLK_SSI1_ROOT_GATE 147 +#define IMX5_CLK_SSI2_ROOT_GATE 148 +#define IMX5_CLK_SSI3_ROOT_GATE 149 +#define IMX5_CLK_SSI_EXT1_GATE 150 +#define IMX5_CLK_SSI_EXT2_GATE 151 +#define IMX5_CLK_EPIT1_IPG_GATE 152 +#define IMX5_CLK_EPIT1_HF_GATE 153 +#define IMX5_CLK_EPIT2_IPG_GATE 154 +#define IMX5_CLK_EPIT2_HF_GATE 155 +#define IMX5_CLK_CAN_SEL 156 +#define IMX5_CLK_CAN1_SERIAL_GATE 157 +#define IMX5_CLK_CAN1_IPG_GATE 158 +#define IMX5_CLK_OWIRE_GATE 159 +#define IMX5_CLK_GPU3D_SEL 160 +#define IMX5_CLK_GPU2D_SEL 161 +#define IMX5_CLK_GPU3D_GATE 162 +#define IMX5_CLK_GPU2D_GATE 163 +#define IMX5_CLK_GARB_GATE 164 +#define IMX5_CLK_CKO1_SEL 165 +#define IMX5_CLK_CKO1_PODF 166 +#define IMX5_CLK_CKO1 167 +#define IMX5_CLK_CKO2_SEL 168 +#define IMX5_CLK_CKO2_PODF 169 +#define IMX5_CLK_CKO2 170 +#define IMX5_CLK_SRTC_GATE 171 +#define IMX5_CLK_PATA_GATE 172 +#define IMX5_CLK_SATA_GATE 173 +#define IMX5_CLK_SPDIF_XTAL_SEL 174 +#define IMX5_CLK_SPDIF0_SEL 175 +#define IMX5_CLK_SPDIF1_SEL 176 +#define IMX5_CLK_SPDIF0_PRED 177 +#define IMX5_CLK_SPDIF0_PODF 178 +#define IMX5_CLK_SPDIF1_PRED 179 +#define IMX5_CLK_SPDIF1_PODF 180 +#define IMX5_CLK_SPDIF0_COM_SEL 181 +#define IMX5_CLK_SPDIF1_COM_SEL 182 +#define IMX5_CLK_SPDIF0_GATE 183 +#define IMX5_CLK_SPDIF1_GATE 184 +#define IMX5_CLK_SPDIF_IPG_GATE 185 +#define IMX5_CLK_OCRAM 186 +#define IMX5_CLK_SAHARA_IPG_GATE 187 +#define IMX5_CLK_SATA_REF 188 +#define IMX5_CLK_STEP_SEL 189 +#define IMX5_CLK_CPU_PODF_SEL 190 +#define IMX5_CLK_ARM 191 +#define IMX5_CLK_FIRI_PRED 192 +#define IMX5_CLK_FIRI_SEL 193 +#define IMX5_CLK_FIRI_PODF 194 +#define IMX5_CLK_FIRI_SERIAL_GATE 195 +#define IMX5_CLK_FIRI_IPG_GATE 196 +#define IMX5_CLK_CSI0_MCLK1_PRED 197 +#define IMX5_CLK_CSI0_MCLK1_SEL 198 +#define IMX5_CLK_CSI0_MCLK1_PODF 199 +#define IMX5_CLK_CSI0_MCLK1_GATE 200 +#define IMX5_CLK_IEEE1588_PRED 201 +#define IMX5_CLK_IEEE1588_SEL 202 +#define IMX5_CLK_IEEE1588_PODF 203 +#define IMX5_CLK_IEEE1588_GATE 204 +#define IMX5_CLK_SCC2_IPG_GATE 205 +#define IMX5_CLK_END 206 + +#endif /* __DT_BINDINGS_CLOCK_IMX5_H */ diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h new file mode 100644 index 0000000..e20c43c --- /dev/null +++ b/include/dt-bindings/clock/imx6qdl-clock.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2014 Freescale Semiconductor, Inc. + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX6QDL_H +#define __DT_BINDINGS_CLOCK_IMX6QDL_H + +#define IMX6QDL_CLK_DUMMY 0 +#define IMX6QDL_CLK_CKIL 1 +#define IMX6QDL_CLK_CKIH 2 +#define IMX6QDL_CLK_OSC 3 +#define IMX6QDL_CLK_PLL2_PFD0_352M 4 +#define IMX6QDL_CLK_PLL2_PFD1_594M 5 +#define IMX6QDL_CLK_PLL2_PFD2_396M 6 +#define IMX6QDL_CLK_PLL3_PFD0_720M 7 +#define IMX6QDL_CLK_PLL3_PFD1_540M 8 +#define IMX6QDL_CLK_PLL3_PFD2_508M 9 +#define IMX6QDL_CLK_PLL3_PFD3_454M 10 +#define IMX6QDL_CLK_PLL2_198M 11 +#define IMX6QDL_CLK_PLL3_120M 12 +#define IMX6QDL_CLK_PLL3_80M 13 +#define IMX6QDL_CLK_PLL3_60M 14 +#define IMX6QDL_CLK_TWD 15 +#define IMX6QDL_CLK_STEP 16 +#define IMX6QDL_CLK_PLL1_SW 17 +#define IMX6QDL_CLK_PERIPH_PRE 18 +#define IMX6QDL_CLK_PERIPH2_PRE 19 +#define IMX6QDL_CLK_PERIPH_CLK2_SEL 20 +#define IMX6QDL_CLK_PERIPH2_CLK2_SEL 21 +#define IMX6QDL_CLK_AXI_SEL 22 +#define IMX6QDL_CLK_ESAI_SEL 23 +#define IMX6QDL_CLK_ASRC_SEL 24 +#define IMX6QDL_CLK_SPDIF_SEL 25 +#define IMX6QDL_CLK_GPU2D_AXI 26 +#define IMX6QDL_CLK_GPU3D_AXI 27 +#define IMX6QDL_CLK_GPU2D_CORE_SEL 28 +#define IMX6QDL_CLK_GPU3D_CORE_SEL 29 +#define IMX6QDL_CLK_GPU3D_SHADER_SEL 30 +#define IMX6QDL_CLK_IPU1_SEL 31 +#define IMX6QDL_CLK_IPU2_SEL 32 +#define IMX6QDL_CLK_LDB_DI0_SEL 33 +#define IMX6QDL_CLK_LDB_DI1_SEL 34 +#define IMX6QDL_CLK_IPU1_DI0_PRE_SEL 35 +#define IMX6QDL_CLK_IPU1_DI1_PRE_SEL 36 +#define IMX6QDL_CLK_IPU2_DI0_PRE_SEL 37 +#define IMX6QDL_CLK_IPU2_DI1_PRE_SEL 38 +#define IMX6QDL_CLK_IPU1_DI0_SEL 39 +#define IMX6QDL_CLK_IPU1_DI1_SEL 40 +#define IMX6QDL_CLK_IPU2_DI0_SEL 41 +#define IMX6QDL_CLK_IPU2_DI1_SEL 42 +#define IMX6QDL_CLK_HSI_TX_SEL 43 +#define IMX6QDL_CLK_PCIE_AXI_SEL 44 +#define IMX6QDL_CLK_SSI1_SEL 45 +#define IMX6QDL_CLK_SSI2_SEL 46 +#define IMX6QDL_CLK_SSI3_SEL 47 +#define IMX6QDL_CLK_USDHC1_SEL 48 +#define IMX6QDL_CLK_USDHC2_SEL 49 +#define IMX6QDL_CLK_USDHC3_SEL 50 +#define IMX6QDL_CLK_USDHC4_SEL 51 +#define IMX6QDL_CLK_ENFC_SEL 52 +#define IMX6QDL_CLK_EIM_SEL 53 +#define IMX6QDL_CLK_EIM_SLOW_SEL 54 +#define IMX6QDL_CLK_VDO_AXI_SEL 55 +#define IMX6QDL_CLK_VPU_AXI_SEL 56 +#define IMX6QDL_CLK_CKO1_SEL 57 +#define IMX6QDL_CLK_PERIPH 58 +#define IMX6QDL_CLK_PERIPH2 59 +#define IMX6QDL_CLK_PERIPH_CLK2 60 +#define IMX6QDL_CLK_PERIPH2_CLK2 61 +#define IMX6QDL_CLK_IPG 62 +#define IMX6QDL_CLK_IPG_PER 63 +#define IMX6QDL_CLK_ESAI_PRED 64 +#define IMX6QDL_CLK_ESAI_PODF 65 +#define IMX6QDL_CLK_ASRC_PRED 66 +#define IMX6QDL_CLK_ASRC_PODF 67 +#define IMX6QDL_CLK_SPDIF_PRED 68 +#define IMX6QDL_CLK_SPDIF_PODF 69 +#define IMX6QDL_CLK_CAN_ROOT 70 +#define IMX6QDL_CLK_ECSPI_ROOT 71 +#define IMX6QDL_CLK_GPU2D_CORE_PODF 72 +#define IMX6QDL_CLK_GPU3D_CORE_PODF 73 +#define IMX6QDL_CLK_GPU3D_SHADER 74 +#define IMX6QDL_CLK_IPU1_PODF 75 +#define IMX6QDL_CLK_IPU2_PODF 76 +#define IMX6QDL_CLK_LDB_DI0_PODF 77 +#define IMX6QDL_CLK_LDB_DI1_PODF 78 +#define IMX6QDL_CLK_IPU1_DI0_PRE 79 +#define IMX6QDL_CLK_IPU1_DI1_PRE 80 +#define IMX6QDL_CLK_IPU2_DI0_PRE 81 +#define IMX6QDL_CLK_IPU2_DI1_PRE 82 +#define IMX6QDL_CLK_HSI_TX_PODF 83 +#define IMX6QDL_CLK_SSI1_PRED 84 +#define IMX6QDL_CLK_SSI1_PODF 85 +#define IMX6QDL_CLK_SSI2_PRED 86 +#define IMX6QDL_CLK_SSI2_PODF 87 +#define IMX6QDL_CLK_SSI3_PRED 88 +#define IMX6QDL_CLK_SSI3_PODF 89 +#define IMX6QDL_CLK_UART_SERIAL_PODF 90 +#define IMX6QDL_CLK_USDHC1_PODF 91 +#define IMX6QDL_CLK_USDHC2_PODF 92 +#define IMX6QDL_CLK_USDHC3_PODF 93 +#define IMX6QDL_CLK_USDHC4_PODF 94 +#define IMX6QDL_CLK_ENFC_PRED 95 +#define IMX6QDL_CLK_ENFC_PODF 96 +#define IMX6QDL_CLK_EIM_PODF 97 +#define IMX6QDL_CLK_EIM_SLOW_PODF 98 +#define IMX6QDL_CLK_VPU_AXI_PODF 99 +#define IMX6QDL_CLK_CKO1_PODF 100 +#define IMX6QDL_CLK_AXI 101 +#define IMX6QDL_CLK_MMDC_CH0_AXI_PODF 102 +#define IMX6QDL_CLK_MMDC_CH1_AXI_PODF 103 +#define IMX6QDL_CLK_ARM 104 +#define IMX6QDL_CLK_AHB 105 +#define IMX6QDL_CLK_APBH_DMA 106 +#define IMX6QDL_CLK_ASRC 107 +#define IMX6QDL_CLK_CAN1_IPG 108 +#define IMX6QDL_CLK_CAN1_SERIAL 109 +#define IMX6QDL_CLK_CAN2_IPG 110 +#define IMX6QDL_CLK_CAN2_SERIAL 111 +#define IMX6QDL_CLK_ECSPI1 112 +#define IMX6QDL_CLK_ECSPI2 113 +#define IMX6QDL_CLK_ECSPI3 114 +#define IMX6QDL_CLK_ECSPI4 115 +#define IMX6Q_CLK_ECSPI5 116 +#define IMX6DL_CLK_I2C4 116 +#define IMX6QDL_CLK_ENET 117 +#define IMX6QDL_CLK_ESAI_EXTAL 118 +#define IMX6QDL_CLK_GPT_IPG 119 +#define IMX6QDL_CLK_GPT_IPG_PER 120 +#define IMX6QDL_CLK_GPU2D_CORE 121 +#define IMX6QDL_CLK_GPU3D_CORE 122 +#define IMX6QDL_CLK_HDMI_IAHB 123 +#define IMX6QDL_CLK_HDMI_ISFR 124 +#define IMX6QDL_CLK_I2C1 125 +#define IMX6QDL_CLK_I2C2 126 +#define IMX6QDL_CLK_I2C3 127 +#define IMX6QDL_CLK_IIM 128 +#define IMX6QDL_CLK_ENFC 129 +#define IMX6QDL_CLK_IPU1 130 +#define IMX6QDL_CLK_IPU1_DI0 131 +#define IMX6QDL_CLK_IPU1_DI1 132 +#define IMX6QDL_CLK_IPU2 133 +#define IMX6QDL_CLK_IPU2_DI0 134 +#define IMX6QDL_CLK_LDB_DI0 135 +#define IMX6QDL_CLK_LDB_DI1 136 +#define IMX6QDL_CLK_IPU2_DI1 137 +#define IMX6QDL_CLK_HSI_TX 138 +#define IMX6QDL_CLK_MLB 139 +#define IMX6QDL_CLK_MMDC_CH0_AXI 140 +#define IMX6QDL_CLK_MMDC_CH1_AXI 141 +#define IMX6QDL_CLK_OCRAM 142 +#define IMX6QDL_CLK_OPENVG_AXI 143 +#define IMX6QDL_CLK_PCIE_AXI 144 +#define IMX6QDL_CLK_PWM1 145 +#define IMX6QDL_CLK_PWM2 146 +#define IMX6QDL_CLK_PWM3 147 +#define IMX6QDL_CLK_PWM4 148 +#define IMX6QDL_CLK_PER1_BCH 149 +#define IMX6QDL_CLK_GPMI_BCH_APB 150 +#define IMX6QDL_CLK_GPMI_BCH 151 +#define IMX6QDL_CLK_GPMI_IO 152 +#define IMX6QDL_CLK_GPMI_APB 153 +#define IMX6QDL_CLK_SATA 154 +#define IMX6QDL_CLK_SDMA 155 +#define IMX6QDL_CLK_SPBA 156 +#define IMX6QDL_CLK_SSI1 157 +#define IMX6QDL_CLK_SSI2 158 +#define IMX6QDL_CLK_SSI3 159 +#define IMX6QDL_CLK_UART_IPG 160 +#define IMX6QDL_CLK_UART_SERIAL 161 +#define IMX6QDL_CLK_USBOH3 162 +#define IMX6QDL_CLK_USDHC1 163 +#define IMX6QDL_CLK_USDHC2 164 +#define IMX6QDL_CLK_USDHC3 165 +#define IMX6QDL_CLK_USDHC4 166 +#define IMX6QDL_CLK_VDO_AXI 167 +#define IMX6QDL_CLK_VPU_AXI 168 +#define IMX6QDL_CLK_CKO1 169 +#define IMX6QDL_CLK_PLL1_SYS 170 +#define IMX6QDL_CLK_PLL2_BUS 171 +#define IMX6QDL_CLK_PLL3_USB_OTG 172 +#define IMX6QDL_CLK_PLL4_AUDIO 173 +#define IMX6QDL_CLK_PLL5_VIDEO 174 +#define IMX6QDL_CLK_PLL8_MLB 175 +#define IMX6QDL_CLK_PLL7_USB_HOST 176 +#define IMX6QDL_CLK_PLL6_ENET 177 +#define IMX6QDL_CLK_SSI1_IPG 178 +#define IMX6QDL_CLK_SSI2_IPG 179 +#define IMX6QDL_CLK_SSI3_IPG 180 +#define IMX6QDL_CLK_ROM 181 +#define IMX6QDL_CLK_USBPHY1 182 +#define IMX6QDL_CLK_USBPHY2 183 +#define IMX6QDL_CLK_LDB_DI0_DIV_3_5 184 +#define IMX6QDL_CLK_LDB_DI1_DIV_3_5 185 +#define IMX6QDL_CLK_SATA_REF 186 +#define IMX6QDL_CLK_SATA_REF_100M 187 +#define IMX6QDL_CLK_PCIE_REF 188 +#define IMX6QDL_CLK_PCIE_REF_125M 189 +#define IMX6QDL_CLK_ENET_REF 190 +#define IMX6QDL_CLK_USBPHY1_GATE 191 +#define IMX6QDL_CLK_USBPHY2_GATE 192 +#define IMX6QDL_CLK_PLL4_POST_DIV 193 +#define IMX6QDL_CLK_PLL5_POST_DIV 194 +#define IMX6QDL_CLK_PLL5_VIDEO_DIV 195 +#define IMX6QDL_CLK_EIM_SLOW 196 +#define IMX6QDL_CLK_SPDIF 197 +#define IMX6QDL_CLK_CKO2_SEL 198 +#define IMX6QDL_CLK_CKO2_PODF 199 +#define IMX6QDL_CLK_CKO2 200 +#define IMX6QDL_CLK_CKO 201 +#define IMX6QDL_CLK_VDOA 202 +#define IMX6QDL_CLK_PLL4_AUDIO_DIV 203 +#define IMX6QDL_CLK_LVDS1_SEL 204 +#define IMX6QDL_CLK_LVDS2_SEL 205 +#define IMX6QDL_CLK_LVDS1_GATE 206 +#define IMX6QDL_CLK_LVDS2_GATE 207 +#define IMX6QDL_CLK_ESAI_IPG 208 +#define IMX6QDL_CLK_ESAI_MEM 209 +#define IMX6QDL_CLK_ASRC_IPG 210 +#define IMX6QDL_CLK_ASRC_MEM 211 +#define IMX6QDL_CLK_LVDS1_IN 212 +#define IMX6QDL_CLK_LVDS2_IN 213 +#define IMX6QDL_CLK_ANACLK1 214 +#define IMX6QDL_CLK_ANACLK2 215 +#define IMX6QDL_PLL1_BYPASS_SRC 216 +#define IMX6QDL_PLL2_BYPASS_SRC 217 +#define IMX6QDL_PLL3_BYPASS_SRC 218 +#define IMX6QDL_PLL4_BYPASS_SRC 219 +#define IMX6QDL_PLL5_BYPASS_SRC 220 +#define IMX6QDL_PLL6_BYPASS_SRC 221 +#define IMX6QDL_PLL7_BYPASS_SRC 222 +#define IMX6QDL_CLK_PLL1 223 +#define IMX6QDL_CLK_PLL2 224 +#define IMX6QDL_CLK_PLL3 225 +#define IMX6QDL_CLK_PLL4 226 +#define IMX6QDL_CLK_PLL5 227 +#define IMX6QDL_CLK_PLL6 228 +#define IMX6QDL_CLK_PLL7 229 +#define IMX6QDL_PLL1_BYPASS 230 +#define IMX6QDL_PLL2_BYPASS 231 +#define IMX6QDL_PLL3_BYPASS 232 +#define IMX6QDL_PLL4_BYPASS 233 +#define IMX6QDL_PLL5_BYPASS 234 +#define IMX6QDL_PLL6_BYPASS 235 +#define IMX6QDL_PLL7_BYPASS 236 +#define IMX6QDL_CLK_GPT_3M 237 +#define IMX6QDL_CLK_VIDEO_27M 238 +#define IMX6QDL_CLK_MIPI_CORE_CFG 239 +#define IMX6QDL_CLK_MIPI_IPG 240 +#define IMX6QDL_CLK_CAAM_MEM 241 +#define IMX6QDL_CLK_CAAM_ACLK 242 +#define IMX6QDL_CLK_CAAM_IPG 243 +#define IMX6QDL_CLK_SPDIF_GCLK 244 +#define IMX6QDL_CLK_UART_SEL 245 +#define IMX6QDL_CLK_IPG_PER_SEL 246 +#define IMX6QDL_CLK_ECSPI_SEL 247 +#define IMX6QDL_CLK_CAN_SEL 248 +#define IMX6QDL_CLK_MMDC_CH1_AXI_CG 249 +#define IMX6QDL_CLK_PRE0 250 +#define IMX6QDL_CLK_PRE1 251 +#define IMX6QDL_CLK_PRE2 252 +#define IMX6QDL_CLK_PRE3 253 +#define IMX6QDL_CLK_PRG0_AXI 254 +#define IMX6QDL_CLK_PRG1_AXI 255 +#define IMX6QDL_CLK_PRG0_APB 256 +#define IMX6QDL_CLK_PRG1_APB 257 +#define IMX6QDL_CLK_PRE_AXI 258 +#define IMX6QDL_CLK_MLB_SEL 259 +#define IMX6QDL_CLK_MLB_PODF 260 +#define IMX6QDL_CLK_EPIT1 261 +#define IMX6QDL_CLK_EPIT2 262 +#define IMX6QDL_CLK_MMDC_P0_IPG 263 +#define IMX6QDL_CLK_DCIC1 264 +#define IMX6QDL_CLK_DCIC2 265 +#define IMX6QDL_CLK_END 266 + +#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */ diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h new file mode 100644 index 0000000..31364d2 --- /dev/null +++ b/include/dt-bindings/clock/imx6sl-clock.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2013 Freescale Semiconductor, Inc. + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX6SL_H +#define __DT_BINDINGS_CLOCK_IMX6SL_H + +#define IMX6SL_CLK_DUMMY 0 +#define IMX6SL_CLK_CKIL 1 +#define IMX6SL_CLK_OSC 2 +#define IMX6SL_CLK_PLL1_SYS 3 +#define IMX6SL_CLK_PLL2_BUS 4 +#define IMX6SL_CLK_PLL3_USB_OTG 5 +#define IMX6SL_CLK_PLL4_AUDIO 6 +#define IMX6SL_CLK_PLL5_VIDEO 7 +#define IMX6SL_CLK_PLL6_ENET 8 +#define IMX6SL_CLK_PLL7_USB_HOST 9 +#define IMX6SL_CLK_USBPHY1 10 +#define IMX6SL_CLK_USBPHY2 11 +#define IMX6SL_CLK_USBPHY1_GATE 12 +#define IMX6SL_CLK_USBPHY2_GATE 13 +#define IMX6SL_CLK_PLL4_POST_DIV 14 +#define IMX6SL_CLK_PLL5_POST_DIV 15 +#define IMX6SL_CLK_PLL5_VIDEO_DIV 16 +#define IMX6SL_CLK_ENET_REF 17 +#define IMX6SL_CLK_PLL2_PFD0 18 +#define IMX6SL_CLK_PLL2_PFD1 19 +#define IMX6SL_CLK_PLL2_PFD2 20 +#define IMX6SL_CLK_PLL3_PFD0 21 +#define IMX6SL_CLK_PLL3_PFD1 22 +#define IMX6SL_CLK_PLL3_PFD2 23 +#define IMX6SL_CLK_PLL3_PFD3 24 +#define IMX6SL_CLK_PLL2_198M 25 +#define IMX6SL_CLK_PLL3_120M 26 +#define IMX6SL_CLK_PLL3_80M 27 +#define IMX6SL_CLK_PLL3_60M 28 +#define IMX6SL_CLK_STEP 29 +#define IMX6SL_CLK_PLL1_SW 30 +#define IMX6SL_CLK_OCRAM_ALT_SEL 31 +#define IMX6SL_CLK_OCRAM_SEL 32 +#define IMX6SL_CLK_PRE_PERIPH2_SEL 33 +#define IMX6SL_CLK_PRE_PERIPH_SEL 34 +#define IMX6SL_CLK_PERIPH2_CLK2_SEL 35 +#define IMX6SL_CLK_PERIPH_CLK2_SEL 36 +#define IMX6SL_CLK_CSI_SEL 37 +#define IMX6SL_CLK_LCDIF_AXI_SEL 38 +#define IMX6SL_CLK_USDHC1_SEL 39 +#define IMX6SL_CLK_USDHC2_SEL 40 +#define IMX6SL_CLK_USDHC3_SEL 41 +#define IMX6SL_CLK_USDHC4_SEL 42 +#define IMX6SL_CLK_SSI1_SEL 43 +#define IMX6SL_CLK_SSI2_SEL 44 +#define IMX6SL_CLK_SSI3_SEL 45 +#define IMX6SL_CLK_PERCLK_SEL 46 +#define IMX6SL_CLK_PXP_AXI_SEL 47 +#define IMX6SL_CLK_EPDC_AXI_SEL 48 +#define IMX6SL_CLK_GPU2D_OVG_SEL 49 +#define IMX6SL_CLK_GPU2D_SEL 50 +#define IMX6SL_CLK_LCDIF_PIX_SEL 51 +#define IMX6SL_CLK_EPDC_PIX_SEL 52 +#define IMX6SL_CLK_SPDIF0_SEL 53 +#define IMX6SL_CLK_SPDIF1_SEL 54 +#define IMX6SL_CLK_EXTERN_AUDIO_SEL 55 +#define IMX6SL_CLK_ECSPI_SEL 56 +#define IMX6SL_CLK_UART_SEL 57 +#define IMX6SL_CLK_PERIPH 58 +#define IMX6SL_CLK_PERIPH2 59 +#define IMX6SL_CLK_OCRAM_PODF 60 +#define IMX6SL_CLK_PERIPH_CLK2_PODF 61 +#define IMX6SL_CLK_PERIPH2_CLK2_PODF 62 +#define IMX6SL_CLK_IPG 63 +#define IMX6SL_CLK_CSI_PODF 64 +#define IMX6SL_CLK_LCDIF_AXI_PODF 65 +#define IMX6SL_CLK_USDHC1_PODF 66 +#define IMX6SL_CLK_USDHC2_PODF 67 +#define IMX6SL_CLK_USDHC3_PODF 68 +#define IMX6SL_CLK_USDHC4_PODF 69 +#define IMX6SL_CLK_SSI1_PRED 70 +#define IMX6SL_CLK_SSI1_PODF 71 +#define IMX6SL_CLK_SSI2_PRED 72 +#define IMX6SL_CLK_SSI2_PODF 73 +#define IMX6SL_CLK_SSI3_PRED 74 +#define IMX6SL_CLK_SSI3_PODF 75 +#define IMX6SL_CLK_PERCLK 76 +#define IMX6SL_CLK_PXP_AXI_PODF 77 +#define IMX6SL_CLK_EPDC_AXI_PODF 78 +#define IMX6SL_CLK_GPU2D_OVG_PODF 79 +#define IMX6SL_CLK_GPU2D_PODF 80 +#define IMX6SL_CLK_LCDIF_PIX_PRED 81 +#define IMX6SL_CLK_EPDC_PIX_PRED 82 +#define IMX6SL_CLK_LCDIF_PIX_PODF 83 +#define IMX6SL_CLK_EPDC_PIX_PODF 84 +#define IMX6SL_CLK_SPDIF0_PRED 85 +#define IMX6SL_CLK_SPDIF0_PODF 86 +#define IMX6SL_CLK_SPDIF1_PRED 87 +#define IMX6SL_CLK_SPDIF1_PODF 88 +#define IMX6SL_CLK_EXTERN_AUDIO_PRED 89 +#define IMX6SL_CLK_EXTERN_AUDIO_PODF 90 +#define IMX6SL_CLK_ECSPI_ROOT 91 +#define IMX6SL_CLK_UART_ROOT 92 +#define IMX6SL_CLK_AHB 93 +#define IMX6SL_CLK_MMDC_ROOT 94 +#define IMX6SL_CLK_ARM 95 +#define IMX6SL_CLK_ECSPI1 96 +#define IMX6SL_CLK_ECSPI2 97 +#define IMX6SL_CLK_ECSPI3 98 +#define IMX6SL_CLK_ECSPI4 99 +#define IMX6SL_CLK_EPIT1 100 +#define IMX6SL_CLK_EPIT2 101 +#define IMX6SL_CLK_EXTERN_AUDIO 102 +#define IMX6SL_CLK_GPT 103 +#define IMX6SL_CLK_GPT_SERIAL 104 +#define IMX6SL_CLK_GPU2D_OVG 105 +#define IMX6SL_CLK_I2C1 106 +#define IMX6SL_CLK_I2C2 107 +#define IMX6SL_CLK_I2C3 108 +#define IMX6SL_CLK_OCOTP 109 +#define IMX6SL_CLK_CSI 110 +#define IMX6SL_CLK_PXP_AXI 111 +#define IMX6SL_CLK_EPDC_AXI 112 +#define IMX6SL_CLK_LCDIF_AXI 113 +#define IMX6SL_CLK_LCDIF_PIX 114 +#define IMX6SL_CLK_EPDC_PIX 115 +#define IMX6SL_CLK_OCRAM 116 +#define IMX6SL_CLK_PWM1 117 +#define IMX6SL_CLK_PWM2 118 +#define IMX6SL_CLK_PWM3 119 +#define IMX6SL_CLK_PWM4 120 +#define IMX6SL_CLK_SDMA 121 +#define IMX6SL_CLK_SPDIF 122 +#define IMX6SL_CLK_SSI1 123 +#define IMX6SL_CLK_SSI2 124 +#define IMX6SL_CLK_SSI3 125 +#define IMX6SL_CLK_UART 126 +#define IMX6SL_CLK_UART_SERIAL 127 +#define IMX6SL_CLK_USBOH3 128 +#define IMX6SL_CLK_USDHC1 129 +#define IMX6SL_CLK_USDHC2 130 +#define IMX6SL_CLK_USDHC3 131 +#define IMX6SL_CLK_USDHC4 132 +#define IMX6SL_CLK_PLL4_AUDIO_DIV 133 +#define IMX6SL_CLK_SPBA 134 +#define IMX6SL_CLK_ENET 135 +#define IMX6SL_CLK_LVDS1_SEL 136 +#define IMX6SL_CLK_LVDS1_OUT 137 +#define IMX6SL_CLK_LVDS1_IN 138 +#define IMX6SL_CLK_ANACLK1 139 +#define IMX6SL_PLL1_BYPASS_SRC 140 +#define IMX6SL_PLL2_BYPASS_SRC 141 +#define IMX6SL_PLL3_BYPASS_SRC 142 +#define IMX6SL_PLL4_BYPASS_SRC 143 +#define IMX6SL_PLL5_BYPASS_SRC 144 +#define IMX6SL_PLL6_BYPASS_SRC 145 +#define IMX6SL_PLL7_BYPASS_SRC 146 +#define IMX6SL_CLK_PLL1 147 +#define IMX6SL_CLK_PLL2 148 +#define IMX6SL_CLK_PLL3 149 +#define IMX6SL_CLK_PLL4 150 +#define IMX6SL_CLK_PLL5 151 +#define IMX6SL_CLK_PLL6 152 +#define IMX6SL_CLK_PLL7 153 +#define IMX6SL_PLL1_BYPASS 154 +#define IMX6SL_PLL2_BYPASS 155 +#define IMX6SL_PLL3_BYPASS 156 +#define IMX6SL_PLL4_BYPASS 157 +#define IMX6SL_PLL5_BYPASS 158 +#define IMX6SL_PLL6_BYPASS 159 +#define IMX6SL_PLL7_BYPASS 160 +#define IMX6SL_CLK_SSI1_IPG 161 +#define IMX6SL_CLK_SSI2_IPG 162 +#define IMX6SL_CLK_SSI3_IPG 163 +#define IMX6SL_CLK_SPDIF_GCLK 164 +#define IMX6SL_CLK_MMDC_P0_IPG 165 +#define IMX6SL_CLK_MMDC_P1_IPG 166 +#define IMX6SL_CLK_END 167 + +#endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */ diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h new file mode 100644 index 0000000..f446710 --- /dev/null +++ b/include/dt-bindings/clock/imx6sll-clock.h @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX6SLL_H +#define __DT_BINDINGS_CLOCK_IMX6SLL_H + +#define IMX6SLL_CLK_DUMMY 0 +#define IMX6SLL_CLK_CKIL 1 +#define IMX6SLL_CLK_OSC 2 +#define IMX6SLL_PLL1_BYPASS_SRC 3 +#define IMX6SLL_PLL2_BYPASS_SRC 4 +#define IMX6SLL_PLL3_BYPASS_SRC 5 +#define IMX6SLL_PLL4_BYPASS_SRC 6 +#define IMX6SLL_PLL5_BYPASS_SRC 7 +#define IMX6SLL_PLL6_BYPASS_SRC 8 +#define IMX6SLL_PLL7_BYPASS_SRC 9 +#define IMX6SLL_CLK_PLL1 10 +#define IMX6SLL_CLK_PLL2 11 +#define IMX6SLL_CLK_PLL3 12 +#define IMX6SLL_CLK_PLL4 13 +#define IMX6SLL_CLK_PLL5 14 +#define IMX6SLL_CLK_PLL6 15 +#define IMX6SLL_CLK_PLL7 16 +#define IMX6SLL_PLL1_BYPASS 17 +#define IMX6SLL_PLL2_BYPASS 18 +#define IMX6SLL_PLL3_BYPASS 19 +#define IMX6SLL_PLL4_BYPASS 20 +#define IMX6SLL_PLL5_BYPASS 21 +#define IMX6SLL_PLL6_BYPASS 22 +#define IMX6SLL_PLL7_BYPASS 23 +#define IMX6SLL_CLK_PLL1_SYS 24 +#define IMX6SLL_CLK_PLL2_BUS 25 +#define IMX6SLL_CLK_PLL3_USB_OTG 26 +#define IMX6SLL_CLK_PLL4_AUDIO 27 +#define IMX6SLL_CLK_PLL5_VIDEO 28 +#define IMX6SLL_CLK_PLL6_ENET 29 +#define IMX6SLL_CLK_PLL7_USB_HOST 30 +#define IMX6SLL_CLK_USBPHY1 31 +#define IMX6SLL_CLK_USBPHY2 32 +#define IMX6SLL_CLK_USBPHY1_GATE 33 +#define IMX6SLL_CLK_USBPHY2_GATE 34 +#define IMX6SLL_CLK_PLL2_PFD0 35 +#define IMX6SLL_CLK_PLL2_PFD1 36 +#define IMX6SLL_CLK_PLL2_PFD2 37 +#define IMX6SLL_CLK_PLL2_PFD3 38 +#define IMX6SLL_CLK_PLL3_PFD0 39 +#define IMX6SLL_CLK_PLL3_PFD1 40 +#define IMX6SLL_CLK_PLL3_PFD2 41 +#define IMX6SLL_CLK_PLL3_PFD3 42 +#define IMX6SLL_CLK_PLL4_POST_DIV 43 +#define IMX6SLL_CLK_PLL4_AUDIO_DIV 44 +#define IMX6SLL_CLK_PLL5_POST_DIV 45 +#define IMX6SLL_CLK_PLL5_VIDEO_DIV 46 +#define IMX6SLL_CLK_PLL2_198M 47 +#define IMX6SLL_CLK_PLL3_120M 48 +#define IMX6SLL_CLK_PLL3_80M 49 +#define IMX6SLL_CLK_PLL3_60M 50 +#define IMX6SLL_CLK_STEP 51 +#define IMX6SLL_CLK_PLL1_SW 52 +#define IMX6SLL_CLK_AXI_ALT_SEL 53 +#define IMX6SLL_CLK_AXI_SEL 54 +#define IMX6SLL_CLK_PERIPH_PRE 55 +#define IMX6SLL_CLK_PERIPH2_PRE 56 +#define IMX6SLL_CLK_PERIPH_CLK2_SEL 57 +#define IMX6SLL_CLK_PERIPH2_CLK2_SEL 58 +#define IMX6SLL_CLK_PERCLK_SEL 59 +#define IMX6SLL_CLK_USDHC1_SEL 60 +#define IMX6SLL_CLK_USDHC2_SEL 61 +#define IMX6SLL_CLK_USDHC3_SEL 62 +#define IMX6SLL_CLK_SSI1_SEL 63 +#define IMX6SLL_CLK_SSI2_SEL 64 +#define IMX6SLL_CLK_SSI3_SEL 65 +#define IMX6SLL_CLK_PXP_SEL 66 +#define IMX6SLL_CLK_LCDIF_PRE_SEL 67 +#define IMX6SLL_CLK_LCDIF_SEL 68 +#define IMX6SLL_CLK_EPDC_PRE_SEL 69 +#define IMX6SLL_CLK_SPDIF_SEL 70 +#define IMX6SLL_CLK_ECSPI_SEL 71 +#define IMX6SLL_CLK_UART_SEL 72 +#define IMX6SLL_CLK_ARM 73 +#define IMX6SLL_CLK_PERIPH 74 +#define IMX6SLL_CLK_PERIPH2 75 +#define IMX6SLL_CLK_PERIPH2_CLK2 76 +#define IMX6SLL_CLK_PERIPH_CLK2 77 +#define IMX6SLL_CLK_MMDC_PODF 78 +#define IMX6SLL_CLK_AXI_PODF 79 +#define IMX6SLL_CLK_AHB 80 +#define IMX6SLL_CLK_IPG 81 +#define IMX6SLL_CLK_PERCLK 82 +#define IMX6SLL_CLK_USDHC1_PODF 83 +#define IMX6SLL_CLK_USDHC2_PODF 84 +#define IMX6SLL_CLK_USDHC3_PODF 85 +#define IMX6SLL_CLK_SSI1_PRED 86 +#define IMX6SLL_CLK_SSI2_PRED 87 +#define IMX6SLL_CLK_SSI3_PRED 88 +#define IMX6SLL_CLK_SSI1_PODF 89 +#define IMX6SLL_CLK_SSI2_PODF 90 +#define IMX6SLL_CLK_SSI3_PODF 91 +#define IMX6SLL_CLK_PXP_PODF 92 +#define IMX6SLL_CLK_LCDIF_PRED 93 +#define IMX6SLL_CLK_LCDIF_PODF 94 +#define IMX6SLL_CLK_EPDC_SEL 95 +#define IMX6SLL_CLK_EPDC_PODF 96 +#define IMX6SLL_CLK_SPDIF_PRED 97 +#define IMX6SLL_CLK_SPDIF_PODF 98 +#define IMX6SLL_CLK_ECSPI_PODF 99 +#define IMX6SLL_CLK_UART_PODF 100 + +/* CCGR 0 */ +#define IMX6SLL_CLK_AIPSTZ1 101 +#define IMX6SLL_CLK_AIPSTZ2 102 +#define IMX6SLL_CLK_DCP 103 +#define IMX6SLL_CLK_UART2_IPG 104 +#define IMX6SLL_CLK_UART2_SERIAL 105 + +/* CCGR 1 */ +#define IMX6SLL_CLK_ECSPI1 106 +#define IMX6SLL_CLK_ECSPI2 107 +#define IMX6SLL_CLK_ECSPI3 108 +#define IMX6SLL_CLK_ECSPI4 109 +#define IMX6SLL_CLK_UART3_IPG 110 +#define IMX6SLL_CLK_UART3_SERIAL 111 +#define IMX6SLL_CLK_UART4_IPG 112 +#define IMX6SLL_CLK_UART4_SERIAL 113 +#define IMX6SLL_CLK_EPIT1 114 +#define IMX6SLL_CLK_EPIT2 115 +#define IMX6SLL_CLK_GPT_BUS 116 +#define IMX6SLL_CLK_GPT_SERIAL 117 + +/* CCGR2 */ +#define IMX6SLL_CLK_CSI 118 +#define IMX6SLL_CLK_I2C1 119 +#define IMX6SLL_CLK_I2C2 120 +#define IMX6SLL_CLK_I2C3 121 +#define IMX6SLL_CLK_OCOTP 122 +#define IMX6SLL_CLK_LCDIF_APB 123 +#define IMX6SLL_CLK_PXP 124 + +/* CCGR3 */ +#define IMX6SLL_CLK_UART5_IPG 125 +#define IMX6SLL_CLK_UART5_SERIAL 126 +#define IMX6SLL_CLK_EPDC_AXI 127 +#define IMX6SLL_CLK_EPDC_PIX 128 +#define IMX6SLL_CLK_LCDIF_PIX 129 +#define IMX6SLL_CLK_WDOG1 130 +#define IMX6SLL_CLK_MMDC_P0_FAST 131 +#define IMX6SLL_CLK_MMDC_P0_IPG 132 +#define IMX6SLL_CLK_OCRAM 133 + +/* CCGR4 */ +#define IMX6SLL_CLK_PWM1 134 +#define IMX6SLL_CLK_PWM2 135 +#define IMX6SLL_CLK_PWM3 136 +#define IMX6SLL_CLK_PWM4 137 + +/* CCGR 5 */ +#define IMX6SLL_CLK_ROM 138 +#define IMX6SLL_CLK_SDMA 139 +#define IMX6SLL_CLK_KPP 140 +#define IMX6SLL_CLK_WDOG2 141 +#define IMX6SLL_CLK_SPBA 142 +#define IMX6SLL_CLK_SPDIF 143 +#define IMX6SLL_CLK_SPDIF_GCLK 144 +#define IMX6SLL_CLK_SSI1 145 +#define IMX6SLL_CLK_SSI1_IPG 146 +#define IMX6SLL_CLK_SSI2 147 +#define IMX6SLL_CLK_SSI2_IPG 148 +#define IMX6SLL_CLK_SSI3 149 +#define IMX6SLL_CLK_SSI3_IPG 150 +#define IMX6SLL_CLK_UART1_IPG 151 +#define IMX6SLL_CLK_UART1_SERIAL 152 + +/* CCGR 6 */ +#define IMX6SLL_CLK_USBOH3 153 +#define IMX6SLL_CLK_USDHC1 154 +#define IMX6SLL_CLK_USDHC2 155 +#define IMX6SLL_CLK_USDHC3 156 + +#define IMX6SLL_CLK_IPP_DI0 157 +#define IMX6SLL_CLK_IPP_DI1 158 +#define IMX6SLL_CLK_LDB_DI0_SEL 159 +#define IMX6SLL_CLK_LDB_DI0_DIV_3_5 160 +#define IMX6SLL_CLK_LDB_DI0_DIV_7 161 +#define IMX6SLL_CLK_LDB_DI0_DIV_SEL 162 +#define IMX6SLL_CLK_LDB_DI0 163 +#define IMX6SLL_CLK_LDB_DI1_SEL 164 +#define IMX6SLL_CLK_LDB_DI1_DIV_3_5 165 +#define IMX6SLL_CLK_LDB_DI1_DIV_7 166 +#define IMX6SLL_CLK_LDB_DI1_DIV_SEL 167 +#define IMX6SLL_CLK_LDB_DI1 168 +#define IMX6SLL_CLK_EXTERN_AUDIO_SEL 169 +#define IMX6SLL_CLK_EXTERN_AUDIO_PRED 170 +#define IMX6SLL_CLK_EXTERN_AUDIO_PODF 171 +#define IMX6SLL_CLK_EXTERN_AUDIO 172 + +#define IMX6SLL_CLK_GPIO1 173 +#define IMX6SLL_CLK_GPIO2 174 +#define IMX6SLL_CLK_GPIO3 175 +#define IMX6SLL_CLK_GPIO4 176 +#define IMX6SLL_CLK_GPIO5 177 +#define IMX6SLL_CLK_GPIO6 178 +#define IMX6SLL_CLK_MMDC_P1_IPG 179 + +#define IMX6SLL_CLK_END 180 + +#endif /* __DT_BINDINGS_CLOCK_IMX6SLL_H */ diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h new file mode 100644 index 0000000..1c64997 --- /dev/null +++ b/include/dt-bindings/clock/imx6sx-clock.h @@ -0,0 +1,281 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014 Freescale Semiconductor, Inc. + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX6SX_H +#define __DT_BINDINGS_CLOCK_IMX6SX_H + +#define IMX6SX_CLK_DUMMY 0 +#define IMX6SX_CLK_CKIL 1 +#define IMX6SX_CLK_CKIH 2 +#define IMX6SX_CLK_OSC 3 +#define IMX6SX_CLK_PLL1_SYS 4 +#define IMX6SX_CLK_PLL2_BUS 5 +#define IMX6SX_CLK_PLL3_USB_OTG 6 +#define IMX6SX_CLK_PLL4_AUDIO 7 +#define IMX6SX_CLK_PLL5_VIDEO 8 +#define IMX6SX_CLK_PLL6_ENET 9 +#define IMX6SX_CLK_PLL7_USB_HOST 10 +#define IMX6SX_CLK_USBPHY1 11 +#define IMX6SX_CLK_USBPHY2 12 +#define IMX6SX_CLK_USBPHY1_GATE 13 +#define IMX6SX_CLK_USBPHY2_GATE 14 +#define IMX6SX_CLK_PCIE_REF 15 +#define IMX6SX_CLK_PCIE_REF_125M 16 +#define IMX6SX_CLK_ENET_REF 17 +#define IMX6SX_CLK_PLL2_PFD0 18 +#define IMX6SX_CLK_PLL2_PFD1 19 +#define IMX6SX_CLK_PLL2_PFD2 20 +#define IMX6SX_CLK_PLL2_PFD3 21 +#define IMX6SX_CLK_PLL3_PFD0 22 +#define IMX6SX_CLK_PLL3_PFD1 23 +#define IMX6SX_CLK_PLL3_PFD2 24 +#define IMX6SX_CLK_PLL3_PFD3 25 +#define IMX6SX_CLK_PLL2_198M 26 +#define IMX6SX_CLK_PLL3_120M 27 +#define IMX6SX_CLK_PLL3_80M 28 +#define IMX6SX_CLK_PLL3_60M 29 +#define IMX6SX_CLK_TWD 30 +#define IMX6SX_CLK_PLL4_POST_DIV 31 +#define IMX6SX_CLK_PLL4_AUDIO_DIV 32 +#define IMX6SX_CLK_PLL5_POST_DIV 33 +#define IMX6SX_CLK_PLL5_VIDEO_DIV 34 +#define IMX6SX_CLK_STEP 35 +#define IMX6SX_CLK_PLL1_SW 36 +#define IMX6SX_CLK_OCRAM_SEL 37 +#define IMX6SX_CLK_PERIPH_PRE 38 +#define IMX6SX_CLK_PERIPH2_PRE 39 +#define IMX6SX_CLK_PERIPH_CLK2_SEL 40 +#define IMX6SX_CLK_PERIPH2_CLK2_SEL 41 +#define IMX6SX_CLK_PCIE_AXI_SEL 42 +#define IMX6SX_CLK_GPU_AXI_SEL 43 +#define IMX6SX_CLK_GPU_CORE_SEL 44 +#define IMX6SX_CLK_EIM_SLOW_SEL 45 +#define IMX6SX_CLK_USDHC1_SEL 46 +#define IMX6SX_CLK_USDHC2_SEL 47 +#define IMX6SX_CLK_USDHC3_SEL 48 +#define IMX6SX_CLK_USDHC4_SEL 49 +#define IMX6SX_CLK_SSI1_SEL 50 +#define IMX6SX_CLK_SSI2_SEL 51 +#define IMX6SX_CLK_SSI3_SEL 52 +#define IMX6SX_CLK_QSPI1_SEL 53 +#define IMX6SX_CLK_PERCLK_SEL 54 +#define IMX6SX_CLK_VID_SEL 55 +#define IMX6SX_CLK_ESAI_SEL 56 +#define IMX6SX_CLK_LDB_DI0_DIV_SEL 57 +#define IMX6SX_CLK_LDB_DI1_DIV_SEL 58 +#define IMX6SX_CLK_CAN_SEL 59 +#define IMX6SX_CLK_UART_SEL 60 +#define IMX6SX_CLK_QSPI2_SEL 61 +#define IMX6SX_CLK_LDB_DI1_SEL 62 +#define IMX6SX_CLK_LDB_DI0_SEL 63 +#define IMX6SX_CLK_SPDIF_SEL 64 +#define IMX6SX_CLK_AUDIO_SEL 65 +#define IMX6SX_CLK_ENET_PRE_SEL 66 +#define IMX6SX_CLK_ENET_SEL 67 +#define IMX6SX_CLK_M4_PRE_SEL 68 +#define IMX6SX_CLK_M4_SEL 69 +#define IMX6SX_CLK_ECSPI_SEL 70 +#define IMX6SX_CLK_LCDIF1_PRE_SEL 71 +#define IMX6SX_CLK_LCDIF2_PRE_SEL 72 +#define IMX6SX_CLK_LCDIF1_SEL 73 +#define IMX6SX_CLK_LCDIF2_SEL 74 +#define IMX6SX_CLK_DISPLAY_SEL 75 +#define IMX6SX_CLK_CSI_SEL 76 +#define IMX6SX_CLK_CKO1_SEL 77 +#define IMX6SX_CLK_CKO2_SEL 78 +#define IMX6SX_CLK_CKO 79 +#define IMX6SX_CLK_PERIPH_CLK2 80 +#define IMX6SX_CLK_PERIPH2_CLK2 81 +#define IMX6SX_CLK_IPG 82 +#define IMX6SX_CLK_GPU_CORE_PODF 83 +#define IMX6SX_CLK_GPU_AXI_PODF 84 +#define IMX6SX_CLK_LCDIF1_PODF 85 +#define IMX6SX_CLK_QSPI1_PODF 86 +#define IMX6SX_CLK_EIM_SLOW_PODF 87 +#define IMX6SX_CLK_LCDIF2_PODF 88 +#define IMX6SX_CLK_PERCLK 89 +#define IMX6SX_CLK_VID_PODF 90 +#define IMX6SX_CLK_CAN_PODF 91 +#define IMX6SX_CLK_USDHC1_PODF 92 +#define IMX6SX_CLK_USDHC2_PODF 93 +#define IMX6SX_CLK_USDHC3_PODF 94 +#define IMX6SX_CLK_USDHC4_PODF 95 +#define IMX6SX_CLK_UART_PODF 96 +#define IMX6SX_CLK_ESAI_PRED 97 +#define IMX6SX_CLK_ESAI_PODF 98 +#define IMX6SX_CLK_SSI3_PRED 99 +#define IMX6SX_CLK_SSI3_PODF 100 +#define IMX6SX_CLK_SSI1_PRED 101 +#define IMX6SX_CLK_SSI1_PODF 102 +#define IMX6SX_CLK_QSPI2_PRED 103 +#define IMX6SX_CLK_QSPI2_PODF 104 +#define IMX6SX_CLK_SSI2_PRED 105 +#define IMX6SX_CLK_SSI2_PODF 106 +#define IMX6SX_CLK_SPDIF_PRED 107 +#define IMX6SX_CLK_SPDIF_PODF 108 +#define IMX6SX_CLK_AUDIO_PRED 109 +#define IMX6SX_CLK_AUDIO_PODF 110 +#define IMX6SX_CLK_ENET_PODF 111 +#define IMX6SX_CLK_M4_PODF 112 +#define IMX6SX_CLK_ECSPI_PODF 113 +#define IMX6SX_CLK_LCDIF1_PRED 114 +#define IMX6SX_CLK_LCDIF2_PRED 115 +#define IMX6SX_CLK_DISPLAY_PODF 116 +#define IMX6SX_CLK_CSI_PODF 117 +#define IMX6SX_CLK_LDB_DI0_DIV_3_5 118 +#define IMX6SX_CLK_LDB_DI0_DIV_7 119 +#define IMX6SX_CLK_LDB_DI1_DIV_3_5 120 +#define IMX6SX_CLK_LDB_DI1_DIV_7 121 +#define IMX6SX_CLK_CKO1_PODF 122 +#define IMX6SX_CLK_CKO2_PODF 123 +#define IMX6SX_CLK_PERIPH 124 +#define IMX6SX_CLK_PERIPH2 125 +#define IMX6SX_CLK_OCRAM 126 +#define IMX6SX_CLK_AHB 127 +#define IMX6SX_CLK_MMDC_PODF 128 +#define IMX6SX_CLK_ARM 129 +#define IMX6SX_CLK_AIPS_TZ1 130 +#define IMX6SX_CLK_AIPS_TZ2 131 +#define IMX6SX_CLK_APBH_DMA 132 +#define IMX6SX_CLK_ASRC_GATE 133 +#define IMX6SX_CLK_CAAM_MEM 134 +#define IMX6SX_CLK_CAAM_ACLK 135 +#define IMX6SX_CLK_CAAM_IPG 136 +#define IMX6SX_CLK_CAN1_IPG 137 +#define IMX6SX_CLK_CAN1_SERIAL 138 +#define IMX6SX_CLK_CAN2_IPG 139 +#define IMX6SX_CLK_CAN2_SERIAL 140 +#define IMX6SX_CLK_CPU_DEBUG 141 +#define IMX6SX_CLK_DCIC1 142 +#define IMX6SX_CLK_DCIC2 143 +#define IMX6SX_CLK_AIPS_TZ3 144 +#define IMX6SX_CLK_ECSPI1 145 +#define IMX6SX_CLK_ECSPI2 146 +#define IMX6SX_CLK_ECSPI3 147 +#define IMX6SX_CLK_ECSPI4 148 +#define IMX6SX_CLK_ECSPI5 149 +#define IMX6SX_CLK_EPIT1 150 +#define IMX6SX_CLK_EPIT2 151 +#define IMX6SX_CLK_ESAI_EXTAL 152 +#define IMX6SX_CLK_WAKEUP 153 +#define IMX6SX_CLK_GPT_BUS 154 +#define IMX6SX_CLK_GPT_SERIAL 155 +#define IMX6SX_CLK_GPU 156 +#define IMX6SX_CLK_OCRAM_S 157 +#define IMX6SX_CLK_CANFD 158 +#define IMX6SX_CLK_CSI 159 +#define IMX6SX_CLK_I2C1 160 +#define IMX6SX_CLK_I2C2 161 +#define IMX6SX_CLK_I2C3 162 +#define IMX6SX_CLK_OCOTP 163 +#define IMX6SX_CLK_IOMUXC 164 +#define IMX6SX_CLK_IPMUX1 165 +#define IMX6SX_CLK_IPMUX2 166 +#define IMX6SX_CLK_IPMUX3 167 +#define IMX6SX_CLK_TZASC1 168 +#define IMX6SX_CLK_LCDIF_APB 169 +#define IMX6SX_CLK_PXP_AXI 170 +#define IMX6SX_CLK_M4 171 +#define IMX6SX_CLK_ENET 172 +#define IMX6SX_CLK_DISPLAY_AXI 173 +#define IMX6SX_CLK_LCDIF2_PIX 174 +#define IMX6SX_CLK_LCDIF1_PIX 175 +#define IMX6SX_CLK_LDB_DI0 176 +#define IMX6SX_CLK_QSPI1 177 +#define IMX6SX_CLK_MLB 178 +#define IMX6SX_CLK_MMDC_P0_FAST 179 +#define IMX6SX_CLK_MMDC_P0_IPG 180 +#define IMX6SX_CLK_AXI 181 +#define IMX6SX_CLK_PCIE_AXI 182 +#define IMX6SX_CLK_QSPI2 183 +#define IMX6SX_CLK_PER1_BCH 184 +#define IMX6SX_CLK_PER2_MAIN 185 +#define IMX6SX_CLK_PWM1 186 +#define IMX6SX_CLK_PWM2 187 +#define IMX6SX_CLK_PWM3 188 +#define IMX6SX_CLK_PWM4 189 +#define IMX6SX_CLK_GPMI_BCH_APB 190 +#define IMX6SX_CLK_GPMI_BCH 191 +#define IMX6SX_CLK_GPMI_IO 192 +#define IMX6SX_CLK_GPMI_APB 193 +#define IMX6SX_CLK_ROM 194 +#define IMX6SX_CLK_SDMA 195 +#define IMX6SX_CLK_SPBA 196 +#define IMX6SX_CLK_SPDIF 197 +#define IMX6SX_CLK_SSI1_IPG 198 +#define IMX6SX_CLK_SSI2_IPG 199 +#define IMX6SX_CLK_SSI3_IPG 200 +#define IMX6SX_CLK_SSI1 201 +#define IMX6SX_CLK_SSI2 202 +#define IMX6SX_CLK_SSI3 203 +#define IMX6SX_CLK_UART_IPG 204 +#define IMX6SX_CLK_UART_SERIAL 205 +#define IMX6SX_CLK_SAI1 206 +#define IMX6SX_CLK_SAI2 207 +#define IMX6SX_CLK_USBOH3 208 +#define IMX6SX_CLK_USDHC1 209 +#define IMX6SX_CLK_USDHC2 210 +#define IMX6SX_CLK_USDHC3 211 +#define IMX6SX_CLK_USDHC4 212 +#define IMX6SX_CLK_EIM_SLOW 213 +#define IMX6SX_CLK_PWM8 214 +#define IMX6SX_CLK_VADC 215 +#define IMX6SX_CLK_GIS 216 +#define IMX6SX_CLK_I2C4 217 +#define IMX6SX_CLK_PWM5 218 +#define IMX6SX_CLK_PWM6 219 +#define IMX6SX_CLK_PWM7 220 +#define IMX6SX_CLK_CKO1 221 +#define IMX6SX_CLK_CKO2 222 +#define IMX6SX_CLK_IPP_DI0 223 +#define IMX6SX_CLK_IPP_DI1 224 +#define IMX6SX_CLK_ENET_AHB 225 +#define IMX6SX_CLK_OCRAM_PODF 226 +#define IMX6SX_CLK_GPT_3M 227 +#define IMX6SX_CLK_ENET_PTP 228 +#define IMX6SX_CLK_ENET_PTP_REF 229 +#define IMX6SX_CLK_ENET2_REF 230 +#define IMX6SX_CLK_ENET2_REF_125M 231 +#define IMX6SX_CLK_AUDIO 232 +#define IMX6SX_CLK_LVDS1_SEL 233 +#define IMX6SX_CLK_LVDS1_OUT 234 +#define IMX6SX_CLK_ASRC_IPG 235 +#define IMX6SX_CLK_ASRC_MEM 236 +#define IMX6SX_CLK_SAI1_IPG 237 +#define IMX6SX_CLK_SAI2_IPG 238 +#define IMX6SX_CLK_ESAI_IPG 239 +#define IMX6SX_CLK_ESAI_MEM 240 +#define IMX6SX_CLK_LVDS1_IN 241 +#define IMX6SX_CLK_ANACLK1 242 +#define IMX6SX_PLL1_BYPASS_SRC 243 +#define IMX6SX_PLL2_BYPASS_SRC 244 +#define IMX6SX_PLL3_BYPASS_SRC 245 +#define IMX6SX_PLL4_BYPASS_SRC 246 +#define IMX6SX_PLL5_BYPASS_SRC 247 +#define IMX6SX_PLL6_BYPASS_SRC 248 +#define IMX6SX_PLL7_BYPASS_SRC 249 +#define IMX6SX_CLK_PLL1 250 +#define IMX6SX_CLK_PLL2 251 +#define IMX6SX_CLK_PLL3 252 +#define IMX6SX_CLK_PLL4 253 +#define IMX6SX_CLK_PLL5 254 +#define IMX6SX_CLK_PLL6 255 +#define IMX6SX_CLK_PLL7 256 +#define IMX6SX_PLL1_BYPASS 257 +#define IMX6SX_PLL2_BYPASS 258 +#define IMX6SX_PLL3_BYPASS 259 +#define IMX6SX_PLL4_BYPASS 260 +#define IMX6SX_PLL5_BYPASS 261 +#define IMX6SX_PLL6_BYPASS 262 +#define IMX6SX_PLL7_BYPASS 263 +#define IMX6SX_CLK_SPDIF_GCLK 264 +#define IMX6SX_CLK_LVDS2_SEL 265 +#define IMX6SX_CLK_LVDS2_OUT 266 +#define IMX6SX_CLK_LVDS2_IN 267 +#define IMX6SX_CLK_ANACLK2 268 +#define IMX6SX_CLK_MMDC_P1_IPG 269 +#define IMX6SX_CLK_CLK_END 270 + +#endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */ diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h new file mode 100644 index 0000000..7909433 --- /dev/null +++ b/include/dt-bindings/clock/imx6ul-clock.h @@ -0,0 +1,262 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015 Freescale Semiconductor, Inc. + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX6UL_H +#define __DT_BINDINGS_CLOCK_IMX6UL_H + +#define IMX6UL_CLK_DUMMY 0 +#define IMX6UL_CLK_CKIL 1 +#define IMX6UL_CLK_CKIH 2 +#define IMX6UL_CLK_OSC 3 +#define IMX6UL_PLL1_BYPASS_SRC 4 +#define IMX6UL_PLL2_BYPASS_SRC 5 +#define IMX6UL_PLL3_BYPASS_SRC 6 +#define IMX6UL_PLL4_BYPASS_SRC 7 +#define IMX6UL_PLL5_BYPASS_SRC 8 +#define IMX6UL_PLL6_BYPASS_SRC 9 +#define IMX6UL_PLL7_BYPASS_SRC 10 +#define IMX6UL_CLK_PLL1 11 +#define IMX6UL_CLK_PLL2 12 +#define IMX6UL_CLK_PLL3 13 +#define IMX6UL_CLK_PLL4 14 +#define IMX6UL_CLK_PLL5 15 +#define IMX6UL_CLK_PLL6 16 +#define IMX6UL_CLK_PLL7 17 +#define IMX6UL_PLL1_BYPASS 18 +#define IMX6UL_PLL2_BYPASS 19 +#define IMX6UL_PLL3_BYPASS 20 +#define IMX6UL_PLL4_BYPASS 21 +#define IMX6UL_PLL5_BYPASS 22 +#define IMX6UL_PLL6_BYPASS 23 +#define IMX6UL_PLL7_BYPASS 24 +#define IMX6UL_CLK_PLL1_SYS 25 +#define IMX6UL_CLK_PLL2_BUS 26 +#define IMX6UL_CLK_PLL3_USB_OTG 27 +#define IMX6UL_CLK_PLL4_AUDIO 28 +#define IMX6UL_CLK_PLL5_VIDEO 29 +#define IMX6UL_CLK_PLL6_ENET 30 +#define IMX6UL_CLK_PLL7_USB_HOST 31 +#define IMX6UL_CLK_USBPHY1 32 +#define IMX6UL_CLK_USBPHY2 33 +#define IMX6UL_CLK_USBPHY1_GATE 34 +#define IMX6UL_CLK_USBPHY2_GATE 35 +#define IMX6UL_CLK_PLL2_PFD0 36 +#define IMX6UL_CLK_PLL2_PFD1 37 +#define IMX6UL_CLK_PLL2_PFD2 38 +#define IMX6UL_CLK_PLL2_PFD3 39 +#define IMX6UL_CLK_PLL3_PFD0 40 +#define IMX6UL_CLK_PLL3_PFD1 41 +#define IMX6UL_CLK_PLL3_PFD2 42 +#define IMX6UL_CLK_PLL3_PFD3 43 +#define IMX6UL_CLK_ENET_REF 44 +#define IMX6UL_CLK_ENET2_REF 45 +#define IMX6UL_CLK_ENET2_REF_125M 46 +#define IMX6UL_CLK_ENET_PTP_REF 47 +#define IMX6UL_CLK_ENET_PTP 48 +#define IMX6UL_CLK_PLL4_POST_DIV 49 +#define IMX6UL_CLK_PLL4_AUDIO_DIV 50 +#define IMX6UL_CLK_PLL5_POST_DIV 51 +#define IMX6UL_CLK_PLL5_VIDEO_DIV 52 +#define IMX6UL_CLK_PLL2_198M 53 +#define IMX6UL_CLK_PLL3_80M 54 +#define IMX6UL_CLK_PLL3_60M 55 +#define IMX6UL_CLK_STEP 56 +#define IMX6UL_CLK_PLL1_SW 57 +#define IMX6UL_CLK_AXI_ALT_SEL 58 +#define IMX6UL_CLK_AXI_SEL 59 +#define IMX6UL_CLK_PERIPH_PRE 60 +#define IMX6UL_CLK_PERIPH2_PRE 61 +#define IMX6UL_CLK_PERIPH_CLK2_SEL 62 +#define IMX6UL_CLK_PERIPH2_CLK2_SEL 63 +#define IMX6UL_CLK_USDHC1_SEL 64 +#define IMX6UL_CLK_USDHC2_SEL 65 +#define IMX6UL_CLK_BCH_SEL 66 +#define IMX6UL_CLK_GPMI_SEL 67 +#define IMX6UL_CLK_EIM_SLOW_SEL 68 +#define IMX6UL_CLK_SPDIF_SEL 69 +#define IMX6UL_CLK_SAI1_SEL 70 +#define IMX6UL_CLK_SAI2_SEL 71 +#define IMX6UL_CLK_SAI3_SEL 72 +#define IMX6UL_CLK_LCDIF_PRE_SEL 73 +#define IMX6UL_CLK_SIM_PRE_SEL 74 +#define IMX6UL_CLK_LDB_DI0_SEL 75 +#define IMX6UL_CLK_LDB_DI1_SEL 76 +#define IMX6UL_CLK_ENFC_SEL 77 +#define IMX6UL_CLK_CAN_SEL 78 +#define IMX6UL_CLK_ECSPI_SEL 79 +#define IMX6UL_CLK_UART_SEL 80 +#define IMX6UL_CLK_QSPI1_SEL 81 +#define IMX6UL_CLK_PERCLK_SEL 82 +#define IMX6UL_CLK_LCDIF_SEL 83 +#define IMX6UL_CLK_SIM_SEL 84 +#define IMX6UL_CLK_PERIPH 85 +#define IMX6UL_CLK_PERIPH2 86 +#define IMX6UL_CLK_LDB_DI0_DIV_3_5 87 +#define IMX6UL_CLK_LDB_DI0_DIV_7 88 +#define IMX6UL_CLK_LDB_DI1_DIV_3_5 89 +#define IMX6UL_CLK_LDB_DI1_DIV_7 90 +#define IMX6UL_CLK_LDB_DI0_DIV_SEL 91 +#define IMX6UL_CLK_LDB_DI1_DIV_SEL 92 +#define IMX6UL_CLK_ARM 93 +#define IMX6UL_CLK_PERIPH_CLK2 94 +#define IMX6UL_CLK_PERIPH2_CLK2 95 +#define IMX6UL_CLK_AHB 96 +#define IMX6UL_CLK_MMDC_PODF 97 +#define IMX6UL_CLK_AXI_PODF 98 +#define IMX6UL_CLK_PERCLK 99 +#define IMX6UL_CLK_IPG 100 +#define IMX6UL_CLK_USDHC1_PODF 101 +#define IMX6UL_CLK_USDHC2_PODF 102 +#define IMX6UL_CLK_BCH_PODF 103 +#define IMX6UL_CLK_GPMI_PODF 104 +#define IMX6UL_CLK_EIM_SLOW_PODF 105 +#define IMX6UL_CLK_SPDIF_PRED 106 +#define IMX6UL_CLK_SPDIF_PODF 107 +#define IMX6UL_CLK_SAI1_PRED 108 +#define IMX6UL_CLK_SAI1_PODF 109 +#define IMX6UL_CLK_SAI2_PRED 110 +#define IMX6UL_CLK_SAI2_PODF 111 +#define IMX6UL_CLK_SAI3_PRED 112 +#define IMX6UL_CLK_SAI3_PODF 113 +#define IMX6UL_CLK_LCDIF_PRED 114 +#define IMX6UL_CLK_LCDIF_PODF 115 +#define IMX6UL_CLK_SIM_PODF 116 +#define IMX6UL_CLK_QSPI1_PDOF 117 +#define IMX6UL_CLK_ENFC_PRED 118 +#define IMX6UL_CLK_ENFC_PODF 119 +#define IMX6UL_CLK_CAN_PODF 120 +#define IMX6UL_CLK_ECSPI_PODF 121 +#define IMX6UL_CLK_UART_PODF 122 +#define IMX6UL_CLK_ADC1 123 +#define IMX6UL_CLK_ADC2 124 +#define IMX6UL_CLK_AIPSTZ1 125 +#define IMX6UL_CLK_AIPSTZ2 126 +#define IMX6UL_CLK_AIPSTZ3 127 +#define IMX6UL_CLK_APBHDMA 128 +#define IMX6UL_CLK_ASRC_IPG 129 +#define IMX6UL_CLK_ASRC_MEM 130 +#define IMX6UL_CLK_GPMI_BCH_APB 131 +#define IMX6UL_CLK_GPMI_BCH 132 +#define IMX6UL_CLK_GPMI_IO 133 +#define IMX6UL_CLK_GPMI_APB 134 +#define IMX6UL_CLK_CAAM_MEM 135 +#define IMX6UL_CLK_CAAM_ACLK 136 +#define IMX6UL_CLK_CAAM_IPG 137 +#define IMX6UL_CLK_CSI 138 +#define IMX6UL_CLK_ECSPI1 139 +#define IMX6UL_CLK_ECSPI2 140 +#define IMX6UL_CLK_ECSPI3 141 +#define IMX6UL_CLK_ECSPI4 142 +#define IMX6UL_CLK_EIM 143 +#define IMX6UL_CLK_ENET 144 +#define IMX6UL_CLK_ENET_AHB 145 +#define IMX6UL_CLK_EPIT1 146 +#define IMX6UL_CLK_EPIT2 147 +#define IMX6UL_CLK_CAN1_IPG 148 +#define IMX6UL_CLK_CAN1_SERIAL 149 +#define IMX6UL_CLK_CAN2_IPG 150 +#define IMX6UL_CLK_CAN2_SERIAL 151 +#define IMX6UL_CLK_GPT1_BUS 152 +#define IMX6UL_CLK_GPT1_SERIAL 153 +#define IMX6UL_CLK_GPT2_BUS 154 +#define IMX6UL_CLK_GPT2_SERIAL 155 +#define IMX6UL_CLK_I2C1 156 +#define IMX6UL_CLK_I2C2 157 +#define IMX6UL_CLK_I2C3 158 +#define IMX6UL_CLK_I2C4 159 +#define IMX6UL_CLK_IOMUXC 160 +#define IMX6UL_CLK_LCDIF_APB 161 +#define IMX6UL_CLK_LCDIF_PIX 162 +#define IMX6UL_CLK_MMDC_P0_FAST 163 +#define IMX6UL_CLK_MMDC_P0_IPG 164 +#define IMX6UL_CLK_OCOTP 165 +#define IMX6UL_CLK_OCRAM 166 +#define IMX6UL_CLK_PWM1 167 +#define IMX6UL_CLK_PWM2 168 +#define IMX6UL_CLK_PWM3 169 +#define IMX6UL_CLK_PWM4 170 +#define IMX6UL_CLK_PWM5 171 +#define IMX6UL_CLK_PWM6 172 +#define IMX6UL_CLK_PWM7 173 +#define IMX6UL_CLK_PWM8 174 +#define IMX6UL_CLK_PXP 175 +#define IMX6UL_CLK_QSPI 176 +#define IMX6UL_CLK_ROM 177 +#define IMX6UL_CLK_SAI1 178 +#define IMX6UL_CLK_SAI1_IPG 179 +#define IMX6UL_CLK_SAI2 180 +#define IMX6UL_CLK_SAI2_IPG 181 +#define IMX6UL_CLK_SAI3 182 +#define IMX6UL_CLK_SAI3_IPG 183 +#define IMX6UL_CLK_SDMA 184 +#define IMX6UL_CLK_SIM 185 +#define IMX6UL_CLK_SIM_S 186 +#define IMX6UL_CLK_SPBA 187 +#define IMX6UL_CLK_SPDIF 188 +#define IMX6UL_CLK_UART1_IPG 189 +#define IMX6UL_CLK_UART1_SERIAL 190 +#define IMX6UL_CLK_UART2_IPG 191 +#define IMX6UL_CLK_UART2_SERIAL 192 +#define IMX6UL_CLK_UART3_IPG 193 +#define IMX6UL_CLK_UART3_SERIAL 194 +#define IMX6UL_CLK_UART4_IPG 195 +#define IMX6UL_CLK_UART4_SERIAL 196 +#define IMX6UL_CLK_UART5_IPG 197 +#define IMX6UL_CLK_UART5_SERIAL 198 +#define IMX6UL_CLK_UART6_IPG 199 +#define IMX6UL_CLK_UART6_SERIAL 200 +#define IMX6UL_CLK_UART7_IPG 201 +#define IMX6UL_CLK_UART7_SERIAL 202 +#define IMX6UL_CLK_UART8_IPG 203 +#define IMX6UL_CLK_UART8_SERIAL 204 +#define IMX6UL_CLK_USBOH3 205 +#define IMX6UL_CLK_USDHC1 206 +#define IMX6UL_CLK_USDHC2 207 +#define IMX6UL_CLK_WDOG1 208 +#define IMX6UL_CLK_WDOG2 209 +#define IMX6UL_CLK_WDOG3 210 +#define IMX6UL_CLK_LDB_DI0 211 +#define IMX6UL_CLK_AXI 212 +#define IMX6UL_CLK_SPDIF_GCLK 213 +#define IMX6UL_CLK_GPT_3M 214 +#define IMX6UL_CLK_SIM2 215 +#define IMX6UL_CLK_SIM1 216 +#define IMX6UL_CLK_IPP_DI0 217 +#define IMX6UL_CLK_IPP_DI1 218 +#define IMX6UL_CA7_SECONDARY_SEL 219 +#define IMX6UL_CLK_PER_BCH 220 +#define IMX6UL_CLK_CSI_SEL 221 +#define IMX6UL_CLK_CSI_PODF 222 +#define IMX6UL_CLK_PLL3_120M 223 +#define IMX6UL_CLK_KPP 224 +#define IMX6ULL_CLK_ESAI_PRED 225 +#define IMX6ULL_CLK_ESAI_PODF 226 +#define IMX6ULL_CLK_ESAI_EXTAL 227 +#define IMX6ULL_CLK_ESAI_MEM 228 +#define IMX6ULL_CLK_ESAI_IPG 229 +#define IMX6ULL_CLK_DCP_CLK 230 +#define IMX6ULL_CLK_EPDC_PRE_SEL 231 +#define IMX6ULL_CLK_EPDC_SEL 232 +#define IMX6ULL_CLK_EPDC_PODF 233 +#define IMX6ULL_CLK_EPDC_ACLK 234 +#define IMX6ULL_CLK_EPDC_PIX 235 +#define IMX6ULL_CLK_ESAI_SEL 236 +#define IMX6UL_CLK_CKO1_SEL 237 +#define IMX6UL_CLK_CKO1_PODF 238 +#define IMX6UL_CLK_CKO1 239 +#define IMX6UL_CLK_CKO2_SEL 240 +#define IMX6UL_CLK_CKO2_PODF 241 +#define IMX6UL_CLK_CKO2 242 +#define IMX6UL_CLK_CKO 243 +#define IMX6UL_CLK_GPIO1 244 +#define IMX6UL_CLK_GPIO2 245 +#define IMX6UL_CLK_GPIO3 246 +#define IMX6UL_CLK_GPIO4 247 +#define IMX6UL_CLK_GPIO5 248 +#define IMX6UL_CLK_MMDC_P1_IPG 249 + +#define IMX6UL_CLK_END 250 + +#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */ diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h new file mode 100644 index 0000000..e6a670e --- /dev/null +++ b/include/dt-bindings/clock/imx7d-clock.h @@ -0,0 +1,455 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014-2015 Freescale Semiconductor, Inc. + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX7D_H +#define __DT_BINDINGS_CLOCK_IMX7D_H + +#define IMX7D_OSC_24M_CLK 0 +#define IMX7D_PLL_ARM_MAIN 1 +#define IMX7D_PLL_ARM_MAIN_CLK 2 +#define IMX7D_PLL_ARM_MAIN_SRC 3 +#define IMX7D_PLL_ARM_MAIN_BYPASS 4 +#define IMX7D_PLL_SYS_MAIN 5 +#define IMX7D_PLL_SYS_MAIN_CLK 6 +#define IMX7D_PLL_SYS_MAIN_SRC 7 +#define IMX7D_PLL_SYS_MAIN_BYPASS 8 +#define IMX7D_PLL_SYS_MAIN_480M 9 +#define IMX7D_PLL_SYS_MAIN_240M 10 +#define IMX7D_PLL_SYS_MAIN_120M 11 +#define IMX7D_PLL_SYS_MAIN_480M_CLK 12 +#define IMX7D_PLL_SYS_MAIN_240M_CLK 13 +#define IMX7D_PLL_SYS_MAIN_120M_CLK 14 +#define IMX7D_PLL_SYS_PFD0_392M_CLK 15 +#define IMX7D_PLL_SYS_PFD0_196M 16 +#define IMX7D_PLL_SYS_PFD0_196M_CLK 17 +#define IMX7D_PLL_SYS_PFD1_332M_CLK 18 +#define IMX7D_PLL_SYS_PFD1_166M 19 +#define IMX7D_PLL_SYS_PFD1_166M_CLK 20 +#define IMX7D_PLL_SYS_PFD2_270M_CLK 21 +#define IMX7D_PLL_SYS_PFD2_135M 22 +#define IMX7D_PLL_SYS_PFD2_135M_CLK 23 +#define IMX7D_PLL_SYS_PFD3_CLK 24 +#define IMX7D_PLL_SYS_PFD4_CLK 25 +#define IMX7D_PLL_SYS_PFD5_CLK 26 +#define IMX7D_PLL_SYS_PFD6_CLK 27 +#define IMX7D_PLL_SYS_PFD7_CLK 28 +#define IMX7D_PLL_ENET_MAIN 29 +#define IMX7D_PLL_ENET_MAIN_CLK 30 +#define IMX7D_PLL_ENET_MAIN_SRC 31 +#define IMX7D_PLL_ENET_MAIN_BYPASS 32 +#define IMX7D_PLL_ENET_MAIN_500M 33 +#define IMX7D_PLL_ENET_MAIN_250M 34 +#define IMX7D_PLL_ENET_MAIN_125M 35 +#define IMX7D_PLL_ENET_MAIN_100M 36 +#define IMX7D_PLL_ENET_MAIN_50M 37 +#define IMX7D_PLL_ENET_MAIN_40M 38 +#define IMX7D_PLL_ENET_MAIN_25M 39 +#define IMX7D_PLL_ENET_MAIN_500M_CLK 40 +#define IMX7D_PLL_ENET_MAIN_250M_CLK 41 +#define IMX7D_PLL_ENET_MAIN_125M_CLK 42 +#define IMX7D_PLL_ENET_MAIN_100M_CLK 43 +#define IMX7D_PLL_ENET_MAIN_50M_CLK 44 +#define IMX7D_PLL_ENET_MAIN_40M_CLK 45 +#define IMX7D_PLL_ENET_MAIN_25M_CLK 46 +#define IMX7D_PLL_DRAM_MAIN 47 +#define IMX7D_PLL_DRAM_MAIN_CLK 48 +#define IMX7D_PLL_DRAM_MAIN_SRC 49 +#define IMX7D_PLL_DRAM_MAIN_BYPASS 50 +#define IMX7D_PLL_DRAM_MAIN_533M 51 +#define IMX7D_PLL_DRAM_MAIN_533M_CLK 52 +#define IMX7D_PLL_AUDIO_MAIN 53 +#define IMX7D_PLL_AUDIO_MAIN_CLK 54 +#define IMX7D_PLL_AUDIO_MAIN_SRC 55 +#define IMX7D_PLL_AUDIO_MAIN_BYPASS 56 +#define IMX7D_PLL_VIDEO_MAIN_CLK 57 +#define IMX7D_PLL_VIDEO_MAIN 58 +#define IMX7D_PLL_VIDEO_MAIN_SRC 59 +#define IMX7D_PLL_VIDEO_MAIN_BYPASS 60 +#define IMX7D_USB_MAIN_480M_CLK 61 +#define IMX7D_ARM_A7_ROOT_CLK 62 +#define IMX7D_ARM_A7_ROOT_SRC 63 +#define IMX7D_ARM_A7_ROOT_CG 64 +#define IMX7D_ARM_A7_ROOT_DIV 65 +#define IMX7D_ARM_M4_ROOT_CLK 66 +#define IMX7D_ARM_M4_ROOT_SRC 67 +#define IMX7D_ARM_M4_ROOT_CG 68 +#define IMX7D_ARM_M4_ROOT_DIV 69 +#define IMX7D_ARM_M0_ROOT_CLK 70 /* unused */ +#define IMX7D_ARM_M0_ROOT_SRC 71 /* unused */ +#define IMX7D_ARM_M0_ROOT_CG 72 /* unused */ +#define IMX7D_ARM_M0_ROOT_DIV 73 /* unused */ +#define IMX7D_MAIN_AXI_ROOT_CLK 74 +#define IMX7D_MAIN_AXI_ROOT_SRC 75 +#define IMX7D_MAIN_AXI_ROOT_CG 76 +#define IMX7D_MAIN_AXI_ROOT_DIV 77 +#define IMX7D_DISP_AXI_ROOT_CLK 78 +#define IMX7D_DISP_AXI_ROOT_SRC 79 +#define IMX7D_DISP_AXI_ROOT_CG 80 +#define IMX7D_DISP_AXI_ROOT_DIV 81 +#define IMX7D_ENET_AXI_ROOT_CLK 82 +#define IMX7D_ENET_AXI_ROOT_SRC 83 +#define IMX7D_ENET_AXI_ROOT_CG 84 +#define IMX7D_ENET_AXI_ROOT_DIV 85 +#define IMX7D_NAND_USDHC_BUS_ROOT_CLK 86 +#define IMX7D_NAND_USDHC_BUS_ROOT_SRC 87 +#define IMX7D_NAND_USDHC_BUS_ROOT_CG 88 +#define IMX7D_NAND_USDHC_BUS_ROOT_DIV 89 +#define IMX7D_AHB_CHANNEL_ROOT_CLK 90 +#define IMX7D_AHB_CHANNEL_ROOT_SRC 91 +#define IMX7D_AHB_CHANNEL_ROOT_CG 92 +#define IMX7D_AHB_CHANNEL_ROOT_DIV 93 +#define IMX7D_DRAM_PHYM_ROOT_CLK 94 +#define IMX7D_DRAM_PHYM_ROOT_SRC 95 +#define IMX7D_DRAM_PHYM_ROOT_CG 96 +#define IMX7D_DRAM_PHYM_ROOT_DIV 97 +#define IMX7D_DRAM_ROOT_CLK 98 +#define IMX7D_DRAM_ROOT_SRC 99 +#define IMX7D_DRAM_ROOT_CG 100 +#define IMX7D_DRAM_ROOT_DIV 101 +#define IMX7D_DRAM_PHYM_ALT_ROOT_CLK 102 +#define IMX7D_DRAM_PHYM_ALT_ROOT_SRC 103 +#define IMX7D_DRAM_PHYM_ALT_ROOT_CG 104 +#define IMX7D_DRAM_PHYM_ALT_ROOT_DIV 105 +#define IMX7D_DRAM_ALT_ROOT_CLK 106 +#define IMX7D_DRAM_ALT_ROOT_SRC 107 +#define IMX7D_DRAM_ALT_ROOT_CG 108 +#define IMX7D_DRAM_ALT_ROOT_DIV 109 +#define IMX7D_USB_HSIC_ROOT_CLK 110 +#define IMX7D_USB_HSIC_ROOT_SRC 111 +#define IMX7D_USB_HSIC_ROOT_CG 112 +#define IMX7D_USB_HSIC_ROOT_DIV 113 +#define IMX7D_PCIE_CTRL_ROOT_CLK 114 +#define IMX7D_PCIE_CTRL_ROOT_SRC 115 +#define IMX7D_PCIE_CTRL_ROOT_CG 116 +#define IMX7D_PCIE_CTRL_ROOT_DIV 117 +#define IMX7D_PCIE_PHY_ROOT_CLK 118 +#define IMX7D_PCIE_PHY_ROOT_SRC 119 +#define IMX7D_PCIE_PHY_ROOT_CG 120 +#define IMX7D_PCIE_PHY_ROOT_DIV 121 +#define IMX7D_EPDC_PIXEL_ROOT_CLK 122 +#define IMX7D_EPDC_PIXEL_ROOT_SRC 123 +#define IMX7D_EPDC_PIXEL_ROOT_CG 124 +#define IMX7D_EPDC_PIXEL_ROOT_DIV 125 +#define IMX7D_LCDIF_PIXEL_ROOT_CLK 126 +#define IMX7D_LCDIF_PIXEL_ROOT_SRC 127 +#define IMX7D_LCDIF_PIXEL_ROOT_CG 128 +#define IMX7D_LCDIF_PIXEL_ROOT_DIV 129 +#define IMX7D_MIPI_DSI_ROOT_CLK 130 +#define IMX7D_MIPI_DSI_ROOT_SRC 131 +#define IMX7D_MIPI_DSI_ROOT_CG 132 +#define IMX7D_MIPI_DSI_ROOT_DIV 133 +#define IMX7D_MIPI_CSI_ROOT_CLK 134 +#define IMX7D_MIPI_CSI_ROOT_SRC 135 +#define IMX7D_MIPI_CSI_ROOT_CG 136 +#define IMX7D_MIPI_CSI_ROOT_DIV 137 +#define IMX7D_MIPI_DPHY_ROOT_CLK 138 +#define IMX7D_MIPI_DPHY_ROOT_SRC 139 +#define IMX7D_MIPI_DPHY_ROOT_CG 140 +#define IMX7D_MIPI_DPHY_ROOT_DIV 141 +#define IMX7D_SAI1_ROOT_CLK 142 +#define IMX7D_SAI1_ROOT_SRC 143 +#define IMX7D_SAI1_ROOT_CG 144 +#define IMX7D_SAI1_ROOT_DIV 145 +#define IMX7D_SAI2_ROOT_CLK 146 +#define IMX7D_SAI2_ROOT_SRC 147 +#define IMX7D_SAI2_ROOT_CG 148 +#define IMX7D_SAI2_ROOT_DIV 149 +#define IMX7D_SAI3_ROOT_CLK 150 +#define IMX7D_SAI3_ROOT_SRC 151 +#define IMX7D_SAI3_ROOT_CG 152 +#define IMX7D_SAI3_ROOT_DIV 153 +#define IMX7D_SPDIF_ROOT_CLK 154 +#define IMX7D_SPDIF_ROOT_SRC 155 +#define IMX7D_SPDIF_ROOT_CG 156 +#define IMX7D_SPDIF_ROOT_DIV 157 +#define IMX7D_ENET1_IPG_ROOT_CLK 158 +#define IMX7D_ENET1_REF_ROOT_SRC 159 +#define IMX7D_ENET1_REF_ROOT_CG 160 +#define IMX7D_ENET1_REF_ROOT_DIV 161 +#define IMX7D_ENET1_TIME_ROOT_CLK 162 +#define IMX7D_ENET1_TIME_ROOT_SRC 163 +#define IMX7D_ENET1_TIME_ROOT_CG 164 +#define IMX7D_ENET1_TIME_ROOT_DIV 165 +#define IMX7D_ENET2_IPG_ROOT_CLK 166 +#define IMX7D_ENET2_REF_ROOT_SRC 167 +#define IMX7D_ENET2_REF_ROOT_CG 168 +#define IMX7D_ENET2_REF_ROOT_DIV 169 +#define IMX7D_ENET2_TIME_ROOT_CLK 170 +#define IMX7D_ENET2_TIME_ROOT_SRC 171 +#define IMX7D_ENET2_TIME_ROOT_CG 172 +#define IMX7D_ENET2_TIME_ROOT_DIV 173 +#define IMX7D_ENET_PHY_REF_ROOT_CLK 174 +#define IMX7D_ENET_PHY_REF_ROOT_SRC 175 +#define IMX7D_ENET_PHY_REF_ROOT_CG 176 +#define IMX7D_ENET_PHY_REF_ROOT_DIV 177 +#define IMX7D_EIM_ROOT_CLK 178 +#define IMX7D_EIM_ROOT_SRC 179 +#define IMX7D_EIM_ROOT_CG 180 +#define IMX7D_EIM_ROOT_DIV 181 +#define IMX7D_NAND_ROOT_CLK 182 +#define IMX7D_NAND_ROOT_SRC 183 +#define IMX7D_NAND_ROOT_CG 184 +#define IMX7D_NAND_ROOT_DIV 185 +#define IMX7D_QSPI_ROOT_CLK 186 +#define IMX7D_QSPI_ROOT_SRC 187 +#define IMX7D_QSPI_ROOT_CG 188 +#define IMX7D_QSPI_ROOT_DIV 189 +#define IMX7D_USDHC1_ROOT_CLK 190 +#define IMX7D_USDHC1_ROOT_SRC 191 +#define IMX7D_USDHC1_ROOT_CG 192 +#define IMX7D_USDHC1_ROOT_DIV 193 +#define IMX7D_USDHC2_ROOT_CLK 194 +#define IMX7D_USDHC2_ROOT_SRC 195 +#define IMX7D_USDHC2_ROOT_CG 196 +#define IMX7D_USDHC2_ROOT_DIV 197 +#define IMX7D_USDHC3_ROOT_CLK 198 +#define IMX7D_USDHC3_ROOT_SRC 199 +#define IMX7D_USDHC3_ROOT_CG 200 +#define IMX7D_USDHC3_ROOT_DIV 201 +#define IMX7D_CAN1_ROOT_CLK 202 +#define IMX7D_CAN1_ROOT_SRC 203 +#define IMX7D_CAN1_ROOT_CG 204 +#define IMX7D_CAN1_ROOT_DIV 205 +#define IMX7D_CAN2_ROOT_CLK 206 +#define IMX7D_CAN2_ROOT_SRC 207 +#define IMX7D_CAN2_ROOT_CG 208 +#define IMX7D_CAN2_ROOT_DIV 209 +#define IMX7D_I2C1_ROOT_CLK 210 +#define IMX7D_I2C1_ROOT_SRC 211 +#define IMX7D_I2C1_ROOT_CG 212 +#define IMX7D_I2C1_ROOT_DIV 213 +#define IMX7D_I2C2_ROOT_CLK 214 +#define IMX7D_I2C2_ROOT_SRC 215 +#define IMX7D_I2C2_ROOT_CG 216 +#define IMX7D_I2C2_ROOT_DIV 217 +#define IMX7D_I2C3_ROOT_CLK 218 +#define IMX7D_I2C3_ROOT_SRC 219 +#define IMX7D_I2C3_ROOT_CG 220 +#define IMX7D_I2C3_ROOT_DIV 221 +#define IMX7D_I2C4_ROOT_CLK 222 +#define IMX7D_I2C4_ROOT_SRC 223 +#define IMX7D_I2C4_ROOT_CG 224 +#define IMX7D_I2C4_ROOT_DIV 225 +#define IMX7D_UART1_ROOT_CLK 226 +#define IMX7D_UART1_ROOT_SRC 227 +#define IMX7D_UART1_ROOT_CG 228 +#define IMX7D_UART1_ROOT_DIV 229 +#define IMX7D_UART2_ROOT_CLK 230 +#define IMX7D_UART2_ROOT_SRC 231 +#define IMX7D_UART2_ROOT_CG 232 +#define IMX7D_UART2_ROOT_DIV 233 +#define IMX7D_UART3_ROOT_CLK 234 +#define IMX7D_UART3_ROOT_SRC 235 +#define IMX7D_UART3_ROOT_CG 236 +#define IMX7D_UART3_ROOT_DIV 237 +#define IMX7D_UART4_ROOT_CLK 238 +#define IMX7D_UART4_ROOT_SRC 239 +#define IMX7D_UART4_ROOT_CG 240 +#define IMX7D_UART4_ROOT_DIV 241 +#define IMX7D_UART5_ROOT_CLK 242 +#define IMX7D_UART5_ROOT_SRC 243 +#define IMX7D_UART5_ROOT_CG 244 +#define IMX7D_UART5_ROOT_DIV 245 +#define IMX7D_UART6_ROOT_CLK 246 +#define IMX7D_UART6_ROOT_SRC 247 +#define IMX7D_UART6_ROOT_CG 248 +#define IMX7D_UART6_ROOT_DIV 249 +#define IMX7D_UART7_ROOT_CLK 250 +#define IMX7D_UART7_ROOT_SRC 251 +#define IMX7D_UART7_ROOT_CG 252 +#define IMX7D_UART7_ROOT_DIV 253 +#define IMX7D_ECSPI1_ROOT_CLK 254 +#define IMX7D_ECSPI1_ROOT_SRC 255 +#define IMX7D_ECSPI1_ROOT_CG 256 +#define IMX7D_ECSPI1_ROOT_DIV 257 +#define IMX7D_ECSPI2_ROOT_CLK 258 +#define IMX7D_ECSPI2_ROOT_SRC 259 +#define IMX7D_ECSPI2_ROOT_CG 260 +#define IMX7D_ECSPI2_ROOT_DIV 261 +#define IMX7D_ECSPI3_ROOT_CLK 262 +#define IMX7D_ECSPI3_ROOT_SRC 263 +#define IMX7D_ECSPI3_ROOT_CG 264 +#define IMX7D_ECSPI3_ROOT_DIV 265 +#define IMX7D_ECSPI4_ROOT_CLK 266 +#define IMX7D_ECSPI4_ROOT_SRC 267 +#define IMX7D_ECSPI4_ROOT_CG 268 +#define IMX7D_ECSPI4_ROOT_DIV 269 +#define IMX7D_PWM1_ROOT_CLK 270 +#define IMX7D_PWM1_ROOT_SRC 271 +#define IMX7D_PWM1_ROOT_CG 272 +#define IMX7D_PWM1_ROOT_DIV 273 +#define IMX7D_PWM2_ROOT_CLK 274 +#define IMX7D_PWM2_ROOT_SRC 275 +#define IMX7D_PWM2_ROOT_CG 276 +#define IMX7D_PWM2_ROOT_DIV 277 +#define IMX7D_PWM3_ROOT_CLK 278 +#define IMX7D_PWM3_ROOT_SRC 279 +#define IMX7D_PWM3_ROOT_CG 280 +#define IMX7D_PWM3_ROOT_DIV 281 +#define IMX7D_PWM4_ROOT_CLK 282 +#define IMX7D_PWM4_ROOT_SRC 283 +#define IMX7D_PWM4_ROOT_CG 284 +#define IMX7D_PWM4_ROOT_DIV 285 +#define IMX7D_FLEXTIMER1_ROOT_CLK 286 +#define IMX7D_FLEXTIMER1_ROOT_SRC 287 +#define IMX7D_FLEXTIMER1_ROOT_CG 288 +#define IMX7D_FLEXTIMER1_ROOT_DIV 289 +#define IMX7D_FLEXTIMER2_ROOT_CLK 290 +#define IMX7D_FLEXTIMER2_ROOT_SRC 291 +#define IMX7D_FLEXTIMER2_ROOT_CG 292 +#define IMX7D_FLEXTIMER2_ROOT_DIV 293 +#define IMX7D_SIM1_ROOT_CLK 294 +#define IMX7D_SIM1_ROOT_SRC 295 +#define IMX7D_SIM1_ROOT_CG 296 +#define IMX7D_SIM1_ROOT_DIV 297 +#define IMX7D_SIM2_ROOT_CLK 298 +#define IMX7D_SIM2_ROOT_SRC 299 +#define IMX7D_SIM2_ROOT_CG 300 +#define IMX7D_SIM2_ROOT_DIV 301 +#define IMX7D_GPT1_ROOT_CLK 302 +#define IMX7D_GPT1_ROOT_SRC 303 +#define IMX7D_GPT1_ROOT_CG 304 +#define IMX7D_GPT1_ROOT_DIV 305 +#define IMX7D_GPT2_ROOT_CLK 306 +#define IMX7D_GPT2_ROOT_SRC 307 +#define IMX7D_GPT2_ROOT_CG 308 +#define IMX7D_GPT2_ROOT_DIV 309 +#define IMX7D_GPT3_ROOT_CLK 310 +#define IMX7D_GPT3_ROOT_SRC 311 +#define IMX7D_GPT3_ROOT_CG 312 +#define IMX7D_GPT3_ROOT_DIV 313 +#define IMX7D_GPT4_ROOT_CLK 314 +#define IMX7D_GPT4_ROOT_SRC 315 +#define IMX7D_GPT4_ROOT_CG 316 +#define IMX7D_GPT4_ROOT_DIV 317 +#define IMX7D_TRACE_ROOT_CLK 318 +#define IMX7D_TRACE_ROOT_SRC 319 +#define IMX7D_TRACE_ROOT_CG 320 +#define IMX7D_TRACE_ROOT_DIV 321 +#define IMX7D_WDOG1_ROOT_CLK 322 +#define IMX7D_WDOG_ROOT_SRC 323 +#define IMX7D_WDOG_ROOT_CG 324 +#define IMX7D_WDOG_ROOT_DIV 325 +#define IMX7D_CSI_MCLK_ROOT_CLK 326 +#define IMX7D_CSI_MCLK_ROOT_SRC 327 +#define IMX7D_CSI_MCLK_ROOT_CG 328 +#define IMX7D_CSI_MCLK_ROOT_DIV 329 +#define IMX7D_AUDIO_MCLK_ROOT_CLK 330 +#define IMX7D_AUDIO_MCLK_ROOT_SRC 331 +#define IMX7D_AUDIO_MCLK_ROOT_CG 332 +#define IMX7D_AUDIO_MCLK_ROOT_DIV 333 +#define IMX7D_WRCLK_ROOT_CLK 334 +#define IMX7D_WRCLK_ROOT_SRC 335 +#define IMX7D_WRCLK_ROOT_CG 336 +#define IMX7D_WRCLK_ROOT_DIV 337 +#define IMX7D_CLKO1_ROOT_SRC 338 +#define IMX7D_CLKO1_ROOT_CG 339 +#define IMX7D_CLKO1_ROOT_DIV 340 +#define IMX7D_CLKO2_ROOT_SRC 341 +#define IMX7D_CLKO2_ROOT_CG 342 +#define IMX7D_CLKO2_ROOT_DIV 343 +#define IMX7D_MAIN_AXI_ROOT_PRE_DIV 344 +#define IMX7D_DISP_AXI_ROOT_PRE_DIV 345 +#define IMX7D_ENET_AXI_ROOT_PRE_DIV 346 +#define IMX7D_NAND_USDHC_BUS_ROOT_PRE_DIV 347 +#define IMX7D_AHB_CHANNEL_ROOT_PRE_DIV 348 +#define IMX7D_USB_HSIC_ROOT_PRE_DIV 349 +#define IMX7D_PCIE_CTRL_ROOT_PRE_DIV 350 +#define IMX7D_PCIE_PHY_ROOT_PRE_DIV 351 +#define IMX7D_EPDC_PIXEL_ROOT_PRE_DIV 352 +#define IMX7D_LCDIF_PIXEL_ROOT_PRE_DIV 353 +#define IMX7D_MIPI_DSI_ROOT_PRE_DIV 354 +#define IMX7D_MIPI_CSI_ROOT_PRE_DIV 355 +#define IMX7D_MIPI_DPHY_ROOT_PRE_DIV 356 +#define IMX7D_SAI1_ROOT_PRE_DIV 357 +#define IMX7D_SAI2_ROOT_PRE_DIV 358 +#define IMX7D_SAI3_ROOT_PRE_DIV 359 +#define IMX7D_SPDIF_ROOT_PRE_DIV 360 +#define IMX7D_ENET1_REF_ROOT_PRE_DIV 361 +#define IMX7D_ENET1_TIME_ROOT_PRE_DIV 362 +#define IMX7D_ENET2_REF_ROOT_PRE_DIV 363 +#define IMX7D_ENET2_TIME_ROOT_PRE_DIV 364 +#define IMX7D_ENET_PHY_REF_ROOT_PRE_DIV 365 +#define IMX7D_EIM_ROOT_PRE_DIV 366 +#define IMX7D_NAND_ROOT_PRE_DIV 367 +#define IMX7D_QSPI_ROOT_PRE_DIV 368 +#define IMX7D_USDHC1_ROOT_PRE_DIV 369 +#define IMX7D_USDHC2_ROOT_PRE_DIV 370 +#define IMX7D_USDHC3_ROOT_PRE_DIV 371 +#define IMX7D_CAN1_ROOT_PRE_DIV 372 +#define IMX7D_CAN2_ROOT_PRE_DIV 373 +#define IMX7D_I2C1_ROOT_PRE_DIV 374 +#define IMX7D_I2C2_ROOT_PRE_DIV 375 +#define IMX7D_I2C3_ROOT_PRE_DIV 376 +#define IMX7D_I2C4_ROOT_PRE_DIV 377 +#define IMX7D_UART1_ROOT_PRE_DIV 378 +#define IMX7D_UART2_ROOT_PRE_DIV 379 +#define IMX7D_UART3_ROOT_PRE_DIV 380 +#define IMX7D_UART4_ROOT_PRE_DIV 381 +#define IMX7D_UART5_ROOT_PRE_DIV 382 +#define IMX7D_UART6_ROOT_PRE_DIV 383 +#define IMX7D_UART7_ROOT_PRE_DIV 384 +#define IMX7D_ECSPI1_ROOT_PRE_DIV 385 +#define IMX7D_ECSPI2_ROOT_PRE_DIV 386 +#define IMX7D_ECSPI3_ROOT_PRE_DIV 387 +#define IMX7D_ECSPI4_ROOT_PRE_DIV 388 +#define IMX7D_PWM1_ROOT_PRE_DIV 389 +#define IMX7D_PWM2_ROOT_PRE_DIV 390 +#define IMX7D_PWM3_ROOT_PRE_DIV 391 +#define IMX7D_PWM4_ROOT_PRE_DIV 392 +#define IMX7D_FLEXTIMER1_ROOT_PRE_DIV 393 +#define IMX7D_FLEXTIMER2_ROOT_PRE_DIV 394 +#define IMX7D_SIM1_ROOT_PRE_DIV 395 +#define IMX7D_SIM2_ROOT_PRE_DIV 396 +#define IMX7D_GPT1_ROOT_PRE_DIV 397 +#define IMX7D_GPT2_ROOT_PRE_DIV 398 +#define IMX7D_GPT3_ROOT_PRE_DIV 399 +#define IMX7D_GPT4_ROOT_PRE_DIV 400 +#define IMX7D_TRACE_ROOT_PRE_DIV 401 +#define IMX7D_WDOG_ROOT_PRE_DIV 402 +#define IMX7D_CSI_MCLK_ROOT_PRE_DIV 403 +#define IMX7D_AUDIO_MCLK_ROOT_PRE_DIV 404 +#define IMX7D_WRCLK_ROOT_PRE_DIV 405 +#define IMX7D_CLKO1_ROOT_PRE_DIV 406 +#define IMX7D_CLKO2_ROOT_PRE_DIV 407 +#define IMX7D_DRAM_PHYM_ALT_ROOT_PRE_DIV 408 +#define IMX7D_DRAM_ALT_ROOT_PRE_DIV 409 +#define IMX7D_LVDS1_IN_CLK 410 +#define IMX7D_LVDS1_OUT_SEL 411 +#define IMX7D_LVDS1_OUT_CLK 412 +#define IMX7D_CLK_DUMMY 413 +#define IMX7D_GPT_3M_CLK 414 +#define IMX7D_OCRAM_CLK 415 +#define IMX7D_OCRAM_S_CLK 416 +#define IMX7D_WDOG2_ROOT_CLK 417 +#define IMX7D_WDOG3_ROOT_CLK 418 +#define IMX7D_WDOG4_ROOT_CLK 419 +#define IMX7D_SDMA_CORE_CLK 420 +#define IMX7D_USB1_MAIN_480M_CLK 421 +#define IMX7D_USB_CTRL_CLK 422 +#define IMX7D_USB_PHY1_CLK 423 +#define IMX7D_USB_PHY2_CLK 424 +#define IMX7D_IPG_ROOT_CLK 425 +#define IMX7D_SAI1_IPG_CLK 426 +#define IMX7D_SAI2_IPG_CLK 427 +#define IMX7D_SAI3_IPG_CLK 428 +#define IMX7D_PLL_AUDIO_TEST_DIV 429 +#define IMX7D_PLL_AUDIO_POST_DIV 430 +#define IMX7D_PLL_VIDEO_TEST_DIV 431 +#define IMX7D_PLL_VIDEO_POST_DIV 432 +#define IMX7D_MU_ROOT_CLK 433 +#define IMX7D_SEMA4_HS_ROOT_CLK 434 +#define IMX7D_PLL_DRAM_TEST_DIV 435 +#define IMX7D_ADC_ROOT_CLK 436 +#define IMX7D_CLK_ARM 437 +#define IMX7D_CKIL 438 +#define IMX7D_OCOTP_CLK 439 +#define IMX7D_NAND_RAWNAND_CLK 440 +#define IMX7D_NAND_USDHC_BUS_RAWNAND_CLK 441 +#define IMX7D_SNVS_CLK 442 +#define IMX7D_CAAM_CLK 443 +#define IMX7D_KPP_ROOT_CLK 444 +#define IMX7D_CLK_END 445 +#endif /* __DT_BINDINGS_CLOCK_IMX7D_H */ diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h new file mode 100644 index 0000000..6f66f90 --- /dev/null +++ b/include/dt-bindings/clock/imx7ulp-clock.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017~2018 NXP + * + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX7ULP_H +#define __DT_BINDINGS_CLOCK_IMX7ULP_H + +/* SCG1 */ + +#define IMX7ULP_CLK_DUMMY 0 +#define IMX7ULP_CLK_ROSC 1 +#define IMX7ULP_CLK_SOSC 2 +#define IMX7ULP_CLK_FIRC 3 +#define IMX7ULP_CLK_SPLL_PRE_SEL 4 +#define IMX7ULP_CLK_SPLL_PRE_DIV 5 +#define IMX7ULP_CLK_SPLL 6 +#define IMX7ULP_CLK_SPLL_POST_DIV1 7 +#define IMX7ULP_CLK_SPLL_POST_DIV2 8 +#define IMX7ULP_CLK_SPLL_PFD0 9 +#define IMX7ULP_CLK_SPLL_PFD1 10 +#define IMX7ULP_CLK_SPLL_PFD2 11 +#define IMX7ULP_CLK_SPLL_PFD3 12 +#define IMX7ULP_CLK_SPLL_PFD_SEL 13 +#define IMX7ULP_CLK_SPLL_SEL 14 +#define IMX7ULP_CLK_APLL_PRE_SEL 15 +#define IMX7ULP_CLK_APLL_PRE_DIV 16 +#define IMX7ULP_CLK_APLL 17 +#define IMX7ULP_CLK_APLL_POST_DIV1 18 +#define IMX7ULP_CLK_APLL_POST_DIV2 19 +#define IMX7ULP_CLK_APLL_PFD0 20 +#define IMX7ULP_CLK_APLL_PFD1 21 +#define IMX7ULP_CLK_APLL_PFD2 22 +#define IMX7ULP_CLK_APLL_PFD3 23 +#define IMX7ULP_CLK_APLL_PFD_SEL 24 +#define IMX7ULP_CLK_APLL_SEL 25 +#define IMX7ULP_CLK_UPLL 26 +#define IMX7ULP_CLK_SYS_SEL 27 +#define IMX7ULP_CLK_CORE_DIV 28 +#define IMX7ULP_CLK_BUS_DIV 29 +#define IMX7ULP_CLK_PLAT_DIV 30 +#define IMX7ULP_CLK_DDR_SEL 31 +#define IMX7ULP_CLK_DDR_DIV 32 +#define IMX7ULP_CLK_NIC_SEL 33 +#define IMX7ULP_CLK_NIC0_DIV 34 +#define IMX7ULP_CLK_GPU_DIV 35 +#define IMX7ULP_CLK_NIC1_DIV 36 +#define IMX7ULP_CLK_NIC1_BUS_DIV 37 +#define IMX7ULP_CLK_NIC1_EXT_DIV 38 +#define IMX7ULP_CLK_MIPI_PLL 39 +#define IMX7ULP_CLK_SIRC 40 +#define IMX7ULP_CLK_SOSC_BUS_CLK 41 +#define IMX7ULP_CLK_FIRC_BUS_CLK 42 +#define IMX7ULP_CLK_SPLL_BUS_CLK 43 +#define IMX7ULP_CLK_HSRUN_SYS_SEL 44 +#define IMX7ULP_CLK_HSRUN_CORE_DIV 45 + +#define IMX7ULP_CLK_SCG1_END 46 + +/* PCC2 */ +#define IMX7ULP_CLK_DMA1 0 +#define IMX7ULP_CLK_RGPIO2P1 1 +#define IMX7ULP_CLK_FLEXBUS 2 +#define IMX7ULP_CLK_SEMA42_1 3 +#define IMX7ULP_CLK_DMA_MUX1 4 +#define IMX7ULP_CLK_CAAM 6 +#define IMX7ULP_CLK_LPTPM4 7 +#define IMX7ULP_CLK_LPTPM5 8 +#define IMX7ULP_CLK_LPIT1 9 +#define IMX7ULP_CLK_LPSPI2 10 +#define IMX7ULP_CLK_LPSPI3 11 +#define IMX7ULP_CLK_LPI2C4 12 +#define IMX7ULP_CLK_LPI2C5 13 +#define IMX7ULP_CLK_LPUART4 14 +#define IMX7ULP_CLK_LPUART5 15 +#define IMX7ULP_CLK_FLEXIO1 16 +#define IMX7ULP_CLK_USB0 17 +#define IMX7ULP_CLK_USB1 18 +#define IMX7ULP_CLK_USB_PHY 19 +#define IMX7ULP_CLK_USB_PL301 20 +#define IMX7ULP_CLK_USDHC0 21 +#define IMX7ULP_CLK_USDHC1 22 +#define IMX7ULP_CLK_WDG1 23 +#define IMX7ULP_CLK_WDG2 24 + +#define IMX7ULP_CLK_PCC2_END 25 + +/* PCC3 */ +#define IMX7ULP_CLK_LPTPM6 0 +#define IMX7ULP_CLK_LPTPM7 1 +#define IMX7ULP_CLK_LPI2C6 2 +#define IMX7ULP_CLK_LPI2C7 3 +#define IMX7ULP_CLK_LPUART6 4 +#define IMX7ULP_CLK_LPUART7 5 +#define IMX7ULP_CLK_VIU 6 +#define IMX7ULP_CLK_DSI 7 +#define IMX7ULP_CLK_LCDIF 8 +#define IMX7ULP_CLK_MMDC 9 +#define IMX7ULP_CLK_PCTLC 10 +#define IMX7ULP_CLK_PCTLD 11 +#define IMX7ULP_CLK_PCTLE 12 +#define IMX7ULP_CLK_PCTLF 13 +#define IMX7ULP_CLK_GPU3D 14 +#define IMX7ULP_CLK_GPU2D 15 + +#define IMX7ULP_CLK_PCC3_END 16 + +/* SMC1 */ +#define IMX7ULP_CLK_ARM 0 + +#define IMX7ULP_CLK_SMC1_END 1 + +#endif /* __DT_BINDINGS_CLOCK_IMX7ULP_H */ diff --git a/include/dt-bindings/clock/imx8-clock.h b/include/dt-bindings/clock/imx8-clock.h new file mode 100644 index 0000000..673a8c6 --- /dev/null +++ b/include/dt-bindings/clock/imx8-clock.h @@ -0,0 +1,293 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2018 NXP + * Dong Aisheng + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX_H +#define __DT_BINDINGS_CLOCK_IMX_H + +/* SCU Clocks */ + +#define IMX_CLK_DUMMY 0 + +/* CPU */ +#define IMX_A35_CLK 1 + +/* LSIO SS */ +#define IMX_LSIO_MEM_CLK 2 +#define IMX_LSIO_BUS_CLK 3 +#define IMX_LSIO_PWM0_CLK 10 +#define IMX_LSIO_PWM1_CLK 11 +#define IMX_LSIO_PWM2_CLK 12 +#define IMX_LSIO_PWM3_CLK 13 +#define IMX_LSIO_PWM4_CLK 14 +#define IMX_LSIO_PWM5_CLK 15 +#define IMX_LSIO_PWM6_CLK 16 +#define IMX_LSIO_PWM7_CLK 17 +#define IMX_LSIO_GPT0_CLK 18 +#define IMX_LSIO_GPT1_CLK 19 +#define IMX_LSIO_GPT2_CLK 20 +#define IMX_LSIO_GPT3_CLK 21 +#define IMX_LSIO_GPT4_CLK 22 +#define IMX_LSIO_FSPI0_CLK 23 +#define IMX_LSIO_FSPI1_CLK 24 + +/* Connectivity SS */ +#define IMX_CONN_AXI_CLK_ROOT 30 +#define IMX_CONN_AHB_CLK_ROOT 31 +#define IMX_CONN_IPG_CLK_ROOT 32 +#define IMX_CONN_SDHC0_CLK 40 +#define IMX_CONN_SDHC1_CLK 41 +#define IMX_CONN_SDHC2_CLK 42 +#define IMX_CONN_ENET0_ROOT_CLK 43 +#define IMX_CONN_ENET0_BYPASS_CLK 44 +#define IMX_CONN_ENET0_RGMII_CLK 45 +#define IMX_CONN_ENET1_ROOT_CLK 46 +#define IMX_CONN_ENET1_BYPASS_CLK 47 +#define IMX_CONN_ENET1_RGMII_CLK 48 +#define IMX_CONN_GPMI_BCH_IO_CLK 49 +#define IMX_CONN_GPMI_BCH_CLK 50 +#define IMX_CONN_USB2_ACLK 51 +#define IMX_CONN_USB2_BUS_CLK 52 +#define IMX_CONN_USB2_LPM_CLK 53 + +/* HSIO SS */ +#define IMX_HSIO_AXI_CLK 60 +#define IMX_HSIO_PER_CLK 61 + +/* Display controller SS */ +#define IMX_DC_AXI_EXT_CLK 70 +#define IMX_DC_AXI_INT_CLK 71 +#define IMX_DC_CFG_CLK 72 +#define IMX_DC0_PLL0_CLK 80 +#define IMX_DC0_PLL1_CLK 81 +#define IMX_DC0_DISP0_CLK 82 +#define IMX_DC0_DISP1_CLK 83 + +/* MIPI-LVDS SS */ +#define IMX_MIPI_IPG_CLK 90 +#define IMX_MIPI0_PIXEL_CLK 100 +#define IMX_MIPI0_BYPASS_CLK 101 +#define IMX_MIPI0_LVDS_PIXEL_CLK 102 +#define IMX_MIPI0_LVDS_BYPASS_CLK 103 +#define IMX_MIPI0_LVDS_PHY_CLK 104 +#define IMX_MIPI0_I2C0_CLK 105 +#define IMX_MIPI0_I2C1_CLK 106 +#define IMX_MIPI0_PWM0_CLK 107 +#define IMX_MIPI1_PIXEL_CLK 108 +#define IMX_MIPI1_BYPASS_CLK 109 +#define IMX_MIPI1_LVDS_PIXEL_CLK 110 +#define IMX_MIPI1_LVDS_BYPASS_CLK 111 +#define IMX_MIPI1_LVDS_PHY_CLK 112 +#define IMX_MIPI1_I2C0_CLK 113 +#define IMX_MIPI1_I2C1_CLK 114 +#define IMX_MIPI1_PWM0_CLK 115 + +/* IMG SS */ +#define IMX_IMG_AXI_CLK 120 +#define IMX_IMG_IPG_CLK 121 +#define IMX_IMG_PXL_CLK 122 + +/* MIPI-CSI SS */ +#define IMX_CSI0_CORE_CLK 130 +#define IMX_CSI0_ESC_CLK 131 +#define IMX_CSI0_PWM0_CLK 132 +#define IMX_CSI0_I2C0_CLK 133 + +/* PARALLER CSI SS */ +#define IMX_PARALLEL_CSI_DPLL_CLK 140 +#define IMX_PARALLEL_CSI_PIXEL_CLK 141 +#define IMX_PARALLEL_CSI_MCLK_CLK 142 + +/* VPU SS */ +#define IMX_VPU_ENC_CLK 150 +#define IMX_VPU_DEC_CLK 151 + +/* GPU SS */ +#define IMX_GPU0_CORE_CLK 160 +#define IMX_GPU0_SHADER_CLK 161 + +/* ADMA SS */ +#define IMX_ADMA_IPG_CLK_ROOT 165 +#define IMX_ADMA_UART0_CLK 170 +#define IMX_ADMA_UART1_CLK 171 +#define IMX_ADMA_UART2_CLK 172 +#define IMX_ADMA_UART3_CLK 173 +#define IMX_ADMA_SPI0_CLK 174 +#define IMX_ADMA_SPI1_CLK 175 +#define IMX_ADMA_SPI2_CLK 176 +#define IMX_ADMA_SPI3_CLK 177 +#define IMX_ADMA_CAN0_CLK 178 +#define IMX_ADMA_CAN1_CLK 179 +#define IMX_ADMA_CAN2_CLK 180 +#define IMX_ADMA_I2C0_CLK 181 +#define IMX_ADMA_I2C1_CLK 182 +#define IMX_ADMA_I2C2_CLK 183 +#define IMX_ADMA_I2C3_CLK 184 +#define IMX_ADMA_FTM0_CLK 185 +#define IMX_ADMA_FTM1_CLK 186 +#define IMX_ADMA_ADC0_CLK 187 +#define IMX_ADMA_PWM_CLK 188 +#define IMX_ADMA_LCD_CLK 189 + +#define IMX_SCU_CLK_END 190 + +/* LPCG clocks */ + +/* LSIO SS LPCG */ +#define IMX_LSIO_LPCG_PWM0_IPG_CLK 0 +#define IMX_LSIO_LPCG_PWM0_IPG_S_CLK 1 +#define IMX_LSIO_LPCG_PWM0_IPG_HF_CLK 2 +#define IMX_LSIO_LPCG_PWM0_IPG_SLV_CLK 3 +#define IMX_LSIO_LPCG_PWM0_IPG_MSTR_CLK 4 +#define IMX_LSIO_LPCG_PWM1_IPG_CLK 5 +#define IMX_LSIO_LPCG_PWM1_IPG_S_CLK 6 +#define IMX_LSIO_LPCG_PWM1_IPG_HF_CLK 7 +#define IMX_LSIO_LPCG_PWM1_IPG_SLV_CLK 8 +#define IMX_LSIO_LPCG_PWM1_IPG_MSTR_CLK 9 +#define IMX_LSIO_LPCG_PWM2_IPG_CLK 10 +#define IMX_LSIO_LPCG_PWM2_IPG_S_CLK 11 +#define IMX_LSIO_LPCG_PWM2_IPG_HF_CLK 12 +#define IMX_LSIO_LPCG_PWM2_IPG_SLV_CLK 13 +#define IMX_LSIO_LPCG_PWM2_IPG_MSTR_CLK 14 +#define IMX_LSIO_LPCG_PWM3_IPG_CLK 15 +#define IMX_LSIO_LPCG_PWM3_IPG_S_CLK 16 +#define IMX_LSIO_LPCG_PWM3_IPG_HF_CLK 17 +#define IMX_LSIO_LPCG_PWM3_IPG_SLV_CLK 18 +#define IMX_LSIO_LPCG_PWM3_IPG_MSTR_CLK 19 +#define IMX_LSIO_LPCG_PWM4_IPG_CLK 20 +#define IMX_LSIO_LPCG_PWM4_IPG_S_CLK 21 +#define IMX_LSIO_LPCG_PWM4_IPG_HF_CLK 22 +#define IMX_LSIO_LPCG_PWM4_IPG_SLV_CLK 23 +#define IMX_LSIO_LPCG_PWM4_IPG_MSTR_CLK 24 +#define IMX_LSIO_LPCG_PWM5_IPG_CLK 25 +#define IMX_LSIO_LPCG_PWM5_IPG_S_CLK 26 +#define IMX_LSIO_LPCG_PWM5_IPG_HF_CLK 27 +#define IMX_LSIO_LPCG_PWM5_IPG_SLV_CLK 28 +#define IMX_LSIO_LPCG_PWM5_IPG_MSTR_CLK 29 +#define IMX_LSIO_LPCG_PWM6_IPG_CLK 30 +#define IMX_LSIO_LPCG_PWM6_IPG_S_CLK 31 +#define IMX_LSIO_LPCG_PWM6_IPG_HF_CLK 32 +#define IMX_LSIO_LPCG_PWM6_IPG_SLV_CLK 33 +#define IMX_LSIO_LPCG_PWM6_IPG_MSTR_CLK 34 +#define IMX_LSIO_LPCG_PWM7_IPG_CLK 35 +#define IMX_LSIO_LPCG_PWM7_IPG_S_CLK 36 +#define IMX_LSIO_LPCG_PWM7_IPG_HF_CLK 37 +#define IMX_LSIO_LPCG_PWM7_IPG_SLV_CLK 38 +#define IMX_LSIO_LPCG_PWM7_IPG_MSTR_CLK 39 +#define IMX_LSIO_LPCG_GPT0_IPG_CLK 40 +#define IMX_LSIO_LPCG_GPT0_IPG_S_CLK 41 +#define IMX_LSIO_LPCG_GPT0_IPG_HF_CLK 42 +#define IMX_LSIO_LPCG_GPT0_IPG_SLV_CLK 43 +#define IMX_LSIO_LPCG_GPT0_IPG_MSTR_CLK 44 +#define IMX_LSIO_LPCG_GPT1_IPG_CLK 45 +#define IMX_LSIO_LPCG_GPT1_IPG_S_CLK 46 +#define IMX_LSIO_LPCG_GPT1_IPG_HF_CLK 47 +#define IMX_LSIO_LPCG_GPT1_IPG_SLV_CLK 48 +#define IMX_LSIO_LPCG_GPT1_IPG_MSTR_CLK 49 +#define IMX_LSIO_LPCG_GPT2_IPG_CLK 50 +#define IMX_LSIO_LPCG_GPT2_IPG_S_CLK 51 +#define IMX_LSIO_LPCG_GPT2_IPG_HF_CLK 52 +#define IMX_LSIO_LPCG_GPT2_IPG_SLV_CLK 53 +#define IMX_LSIO_LPCG_GPT2_IPG_MSTR_CLK 54 +#define IMX_LSIO_LPCG_GPT3_IPG_CLK 55 +#define IMX_LSIO_LPCG_GPT3_IPG_S_CLK 56 +#define IMX_LSIO_LPCG_GPT3_IPG_HF_CLK 57 +#define IMX_LSIO_LPCG_GPT3_IPG_SLV_CLK 58 +#define IMX_LSIO_LPCG_GPT3_IPG_MSTR_CLK 59 +#define IMX_LSIO_LPCG_GPT4_IPG_CLK 60 +#define IMX_LSIO_LPCG_GPT4_IPG_S_CLK 61 +#define IMX_LSIO_LPCG_GPT4_IPG_HF_CLK 62 +#define IMX_LSIO_LPCG_GPT4_IPG_SLV_CLK 63 +#define IMX_LSIO_LPCG_GPT4_IPG_MSTR_CLK 64 +#define IMX_LSIO_LPCG_FSPI0_HCLK 65 +#define IMX_LSIO_LPCG_FSPI0_IPG_CLK 66 +#define IMX_LSIO_LPCG_FSPI0_IPG_S_CLK 67 +#define IMX_LSIO_LPCG_FSPI0_IPG_SFCK 68 +#define IMX_LSIO_LPCG_FSPI1_HCLK 69 +#define IMX_LSIO_LPCG_FSPI1_IPG_CLK 70 +#define IMX_LSIO_LPCG_FSPI1_IPG_S_CLK 71 +#define IMX_LSIO_LPCG_FSPI1_IPG_SFCK 72 + +#define IMX_LSIO_LPCG_CLK_END 73 + +/* Connectivity SS LPCG */ +#define IMX_CONN_LPCG_SDHC0_IPG_CLK 0 +#define IMX_CONN_LPCG_SDHC0_PER_CLK 1 +#define IMX_CONN_LPCG_SDHC0_HCLK 2 +#define IMX_CONN_LPCG_SDHC1_IPG_CLK 3 +#define IMX_CONN_LPCG_SDHC1_PER_CLK 4 +#define IMX_CONN_LPCG_SDHC1_HCLK 5 +#define IMX_CONN_LPCG_SDHC2_IPG_CLK 6 +#define IMX_CONN_LPCG_SDHC2_PER_CLK 7 +#define IMX_CONN_LPCG_SDHC2_HCLK 8 +#define IMX_CONN_LPCG_GPMI_APB_CLK 9 +#define IMX_CONN_LPCG_GPMI_BCH_APB_CLK 10 +#define IMX_CONN_LPCG_GPMI_BCH_IO_CLK 11 +#define IMX_CONN_LPCG_GPMI_BCH_CLK 12 +#define IMX_CONN_LPCG_APBHDMA_CLK 13 +#define IMX_CONN_LPCG_ENET0_ROOT_CLK 14 +#define IMX_CONN_LPCG_ENET0_TX_CLK 15 +#define IMX_CONN_LPCG_ENET0_AHB_CLK 16 +#define IMX_CONN_LPCG_ENET0_IPG_S_CLK 17 +#define IMX_CONN_LPCG_ENET0_IPG_CLK 18 + +#define IMX_CONN_LPCG_ENET1_ROOT_CLK 19 +#define IMX_CONN_LPCG_ENET1_TX_CLK 20 +#define IMX_CONN_LPCG_ENET1_AHB_CLK 21 +#define IMX_CONN_LPCG_ENET1_IPG_S_CLK 22 +#define IMX_CONN_LPCG_ENET1_IPG_CLK 23 + +#define IMX_CONN_LPCG_CLK_END 24 + +/* ADMA SS LPCG */ +#define IMX_ADMA_LPCG_UART0_IPG_CLK 0 +#define IMX_ADMA_LPCG_UART0_BAUD_CLK 1 +#define IMX_ADMA_LPCG_UART1_IPG_CLK 2 +#define IMX_ADMA_LPCG_UART1_BAUD_CLK 3 +#define IMX_ADMA_LPCG_UART2_IPG_CLK 4 +#define IMX_ADMA_LPCG_UART2_BAUD_CLK 5 +#define IMX_ADMA_LPCG_UART3_IPG_CLK 6 +#define IMX_ADMA_LPCG_UART3_BAUD_CLK 7 +#define IMX_ADMA_LPCG_SPI0_IPG_CLK 8 +#define IMX_ADMA_LPCG_SPI1_IPG_CLK 9 +#define IMX_ADMA_LPCG_SPI2_IPG_CLK 10 +#define IMX_ADMA_LPCG_SPI3_IPG_CLK 11 +#define IMX_ADMA_LPCG_SPI0_CLK 12 +#define IMX_ADMA_LPCG_SPI1_CLK 13 +#define IMX_ADMA_LPCG_SPI2_CLK 14 +#define IMX_ADMA_LPCG_SPI3_CLK 15 +#define IMX_ADMA_LPCG_CAN0_IPG_CLK 16 +#define IMX_ADMA_LPCG_CAN0_IPG_PE_CLK 17 +#define IMX_ADMA_LPCG_CAN0_IPG_CHI_CLK 18 +#define IMX_ADMA_LPCG_CAN1_IPG_CLK 19 +#define IMX_ADMA_LPCG_CAN1_IPG_PE_CLK 20 +#define IMX_ADMA_LPCG_CAN1_IPG_CHI_CLK 21 +#define IMX_ADMA_LPCG_CAN2_IPG_CLK 22 +#define IMX_ADMA_LPCG_CAN2_IPG_PE_CLK 23 +#define IMX_ADMA_LPCG_CAN2_IPG_CHI_CLK 24 +#define IMX_ADMA_LPCG_I2C0_CLK 25 +#define IMX_ADMA_LPCG_I2C1_CLK 26 +#define IMX_ADMA_LPCG_I2C2_CLK 27 +#define IMX_ADMA_LPCG_I2C3_CLK 28 +#define IMX_ADMA_LPCG_I2C0_IPG_CLK 29 +#define IMX_ADMA_LPCG_I2C1_IPG_CLK 30 +#define IMX_ADMA_LPCG_I2C2_IPG_CLK 31 +#define IMX_ADMA_LPCG_I2C3_IPG_CLK 32 +#define IMX_ADMA_LPCG_FTM0_CLK 33 +#define IMX_ADMA_LPCG_FTM1_CLK 34 +#define IMX_ADMA_LPCG_FTM0_IPG_CLK 35 +#define IMX_ADMA_LPCG_FTM1_IPG_CLK 36 +#define IMX_ADMA_LPCG_PWM_HI_CLK 37 +#define IMX_ADMA_LPCG_PWM_IPG_CLK 38 +#define IMX_ADMA_LPCG_LCD_PIX_CLK 39 +#define IMX_ADMA_LPCG_LCD_APB_CLK 40 +#define IMX_ADMA_LPCG_DSP_ADB_CLK 41 +#define IMX_ADMA_LPCG_DSP_IPG_CLK 42 +#define IMX_ADMA_LPCG_DSP_CORE_CLK 43 +#define IMX_ADMA_LPCG_OCRAM_IPG_CLK 44 + +#define IMX_ADMA_LPCG_CLK_END 45 + +#endif /* __DT_BINDINGS_CLOCK_IMX_H */ diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h new file mode 100644 index 0000000..07e6c68 --- /dev/null +++ b/include/dt-bindings/clock/imx8mm-clock.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2017-2018 NXP + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX8MM_H +#define __DT_BINDINGS_CLOCK_IMX8MM_H + +#define IMX8MM_CLK_DUMMY 0 +#define IMX8MM_CLK_32K 1 +#define IMX8MM_CLK_24M 2 +#define IMX8MM_OSC_HDMI_CLK 3 +#define IMX8MM_CLK_EXT1 4 +#define IMX8MM_CLK_EXT2 5 +#define IMX8MM_CLK_EXT3 6 +#define IMX8MM_CLK_EXT4 7 +#define IMX8MM_AUDIO_PLL1_REF_SEL 8 +#define IMX8MM_AUDIO_PLL2_REF_SEL 9 +#define IMX8MM_VIDEO_PLL1_REF_SEL 10 +#define IMX8MM_DRAM_PLL_REF_SEL 11 +#define IMX8MM_GPU_PLL_REF_SEL 12 +#define IMX8MM_VPU_PLL_REF_SEL 13 +#define IMX8MM_ARM_PLL_REF_SEL 14 +#define IMX8MM_SYS_PLL1_REF_SEL 15 +#define IMX8MM_SYS_PLL2_REF_SEL 16 +#define IMX8MM_SYS_PLL3_REF_SEL 17 +#define IMX8MM_AUDIO_PLL1 18 +#define IMX8MM_AUDIO_PLL2 19 +#define IMX8MM_VIDEO_PLL1 20 +#define IMX8MM_DRAM_PLL 21 +#define IMX8MM_GPU_PLL 22 +#define IMX8MM_VPU_PLL 23 +#define IMX8MM_ARM_PLL 24 +#define IMX8MM_SYS_PLL1 25 +#define IMX8MM_SYS_PLL2 26 +#define IMX8MM_SYS_PLL3 27 +#define IMX8MM_AUDIO_PLL1_BYPASS 28 +#define IMX8MM_AUDIO_PLL2_BYPASS 29 +#define IMX8MM_VIDEO_PLL1_BYPASS 30 +#define IMX8MM_DRAM_PLL_BYPASS 31 +#define IMX8MM_GPU_PLL_BYPASS 32 +#define IMX8MM_VPU_PLL_BYPASS 33 +#define IMX8MM_ARM_PLL_BYPASS 34 +#define IMX8MM_SYS_PLL1_BYPASS 35 +#define IMX8MM_SYS_PLL2_BYPASS 36 +#define IMX8MM_SYS_PLL3_BYPASS 37 +#define IMX8MM_AUDIO_PLL1_OUT 38 +#define IMX8MM_AUDIO_PLL2_OUT 39 +#define IMX8MM_VIDEO_PLL1_OUT 40 +#define IMX8MM_DRAM_PLL_OUT 41 +#define IMX8MM_GPU_PLL_OUT 42 +#define IMX8MM_VPU_PLL_OUT 43 +#define IMX8MM_ARM_PLL_OUT 44 +#define IMX8MM_SYS_PLL1_OUT 45 +#define IMX8MM_SYS_PLL2_OUT 46 +#define IMX8MM_SYS_PLL3_OUT 47 +#define IMX8MM_SYS_PLL1_40M 48 +#define IMX8MM_SYS_PLL1_80M 49 +#define IMX8MM_SYS_PLL1_100M 50 +#define IMX8MM_SYS_PLL1_133M 51 +#define IMX8MM_SYS_PLL1_160M 52 +#define IMX8MM_SYS_PLL1_200M 53 +#define IMX8MM_SYS_PLL1_266M 54 +#define IMX8MM_SYS_PLL1_400M 55 +#define IMX8MM_SYS_PLL1_800M 56 +#define IMX8MM_SYS_PLL2_50M 57 +#define IMX8MM_SYS_PLL2_100M 58 +#define IMX8MM_SYS_PLL2_125M 59 +#define IMX8MM_SYS_PLL2_166M 60 +#define IMX8MM_SYS_PLL2_200M 61 +#define IMX8MM_SYS_PLL2_250M 62 +#define IMX8MM_SYS_PLL2_333M 63 +#define IMX8MM_SYS_PLL2_500M 64 +#define IMX8MM_SYS_PLL2_1000M 65 + +/* core */ +#define IMX8MM_CLK_A53_SRC 66 +#define IMX8MM_CLK_M4_SRC 67 +#define IMX8MM_CLK_VPU_SRC 68 +#define IMX8MM_CLK_GPU3D_SRC 69 +#define IMX8MM_CLK_GPU2D_SRC 70 +#define IMX8MM_CLK_A53_CG 71 +#define IMX8MM_CLK_M4_CG 72 +#define IMX8MM_CLK_VPU_CG 73 +#define IMX8MM_CLK_GPU3D_CG 74 +#define IMX8MM_CLK_GPU2D_CG 75 +#define IMX8MM_CLK_A53_DIV 76 +#define IMX8MM_CLK_M4_DIV 77 +#define IMX8MM_CLK_VPU_DIV 78 +#define IMX8MM_CLK_GPU3D_DIV 79 +#define IMX8MM_CLK_GPU2D_DIV 80 + +/* bus */ +#define IMX8MM_CLK_MAIN_AXI 81 +#define IMX8MM_CLK_ENET_AXI 82 +#define IMX8MM_CLK_NAND_USDHC_BUS 83 +#define IMX8MM_CLK_VPU_BUS 84 +#define IMX8MM_CLK_DISP_AXI 85 +#define IMX8MM_CLK_DISP_APB 86 +#define IMX8MM_CLK_DISP_RTRM 87 +#define IMX8MM_CLK_USB_BUS 88 +#define IMX8MM_CLK_GPU_AXI 89 +#define IMX8MM_CLK_GPU_AHB 90 +#define IMX8MM_CLK_NOC 91 +#define IMX8MM_CLK_NOC_APB 92 + +#define IMX8MM_CLK_AHB 93 +#define IMX8MM_CLK_AUDIO_AHB 94 +#define IMX8MM_CLK_IPG_ROOT 95 +#define IMX8MM_CLK_IPG_AUDIO_ROOT 96 + +#define IMX8MM_CLK_DRAM_ALT 97 +#define IMX8MM_CLK_DRAM_APB 98 +#define IMX8MM_CLK_VPU_G1 99 +#define IMX8MM_CLK_VPU_G2 100 +#define IMX8MM_CLK_DISP_DTRC 101 +#define IMX8MM_CLK_DISP_DC8000 102 +#define IMX8MM_CLK_PCIE1_CTRL 103 +#define IMX8MM_CLK_PCIE1_PHY 104 +#define IMX8MM_CLK_PCIE1_AUX 105 +#define IMX8MM_CLK_DC_PIXEL 106 +#define IMX8MM_CLK_LCDIF_PIXEL 107 +#define IMX8MM_CLK_SAI1 108 +#define IMX8MM_CLK_SAI2 109 +#define IMX8MM_CLK_SAI3 110 +#define IMX8MM_CLK_SAI4 111 +#define IMX8MM_CLK_SAI5 112 +#define IMX8MM_CLK_SAI6 113 +#define IMX8MM_CLK_SPDIF1 114 +#define IMX8MM_CLK_SPDIF2 115 +#define IMX8MM_CLK_ENET_REF 116 +#define IMX8MM_CLK_ENET_TIMER 117 +#define IMX8MM_CLK_ENET_PHY_REF 118 +#define IMX8MM_CLK_NAND 119 +#define IMX8MM_CLK_QSPI 120 +#define IMX8MM_CLK_USDHC1 121 +#define IMX8MM_CLK_USDHC2 122 +#define IMX8MM_CLK_I2C1 123 +#define IMX8MM_CLK_I2C2 124 +#define IMX8MM_CLK_I2C3 125 +#define IMX8MM_CLK_I2C4 126 +#define IMX8MM_CLK_UART1 127 +#define IMX8MM_CLK_UART2 128 +#define IMX8MM_CLK_UART3 129 +#define IMX8MM_CLK_UART4 130 +#define IMX8MM_CLK_USB_CORE_REF 131 +#define IMX8MM_CLK_USB_PHY_REF 132 +#define IMX8MM_CLK_ECSPI1 133 +#define IMX8MM_CLK_ECSPI2 134 +#define IMX8MM_CLK_PWM1 135 +#define IMX8MM_CLK_PWM2 136 +#define IMX8MM_CLK_PWM3 137 +#define IMX8MM_CLK_PWM4 138 +#define IMX8MM_CLK_GPT1 139 +#define IMX8MM_CLK_WDOG 140 +#define IMX8MM_CLK_WRCLK 141 +#define IMX8MM_CLK_DSI_CORE 142 +#define IMX8MM_CLK_DSI_PHY_REF 143 +#define IMX8MM_CLK_DSI_DBI 144 +#define IMX8MM_CLK_USDHC3 145 +#define IMX8MM_CLK_CSI1_CORE 146 +#define IMX8MM_CLK_CSI1_PHY_REF 147 +#define IMX8MM_CLK_CSI1_ESC 148 +#define IMX8MM_CLK_CSI2_CORE 149 +#define IMX8MM_CLK_CSI2_PHY_REF 150 +#define IMX8MM_CLK_CSI2_ESC 151 +#define IMX8MM_CLK_PCIE2_CTRL 152 +#define IMX8MM_CLK_PCIE2_PHY 153 +#define IMX8MM_CLK_PCIE2_AUX 154 +#define IMX8MM_CLK_ECSPI3 155 +#define IMX8MM_CLK_PDM 156 +#define IMX8MM_CLK_VPU_H1 157 +#define IMX8MM_CLK_CLKO1 158 + +#define IMX8MM_CLK_ECSPI1_ROOT 159 +#define IMX8MM_CLK_ECSPI2_ROOT 160 +#define IMX8MM_CLK_ECSPI3_ROOT 161 +#define IMX8MM_CLK_ENET1_ROOT 162 +#define IMX8MM_CLK_GPT1_ROOT 163 +#define IMX8MM_CLK_I2C1_ROOT 164 +#define IMX8MM_CLK_I2C2_ROOT 165 +#define IMX8MM_CLK_I2C3_ROOT 166 +#define IMX8MM_CLK_I2C4_ROOT 167 +#define IMX8MM_CLK_OCOTP_ROOT 168 +#define IMX8MM_CLK_PCIE1_ROOT 169 +#define IMX8MM_CLK_PWM1_ROOT 170 +#define IMX8MM_CLK_PWM2_ROOT 171 +#define IMX8MM_CLK_PWM3_ROOT 172 +#define IMX8MM_CLK_PWM4_ROOT 173 +#define IMX8MM_CLK_QSPI_ROOT 174 +#define IMX8MM_CLK_NAND_ROOT 175 +#define IMX8MM_CLK_SAI1_ROOT 176 +#define IMX8MM_CLK_SAI1_IPG 177 +#define IMX8MM_CLK_SAI2_ROOT 178 +#define IMX8MM_CLK_SAI2_IPG 179 +#define IMX8MM_CLK_SAI3_ROOT 180 +#define IMX8MM_CLK_SAI3_IPG 181 +#define IMX8MM_CLK_SAI4_ROOT 182 +#define IMX8MM_CLK_SAI4_IPG 183 +#define IMX8MM_CLK_SAI5_ROOT 184 +#define IMX8MM_CLK_SAI5_IPG 185 +#define IMX8MM_CLK_SAI6_ROOT 186 +#define IMX8MM_CLK_SAI6_IPG 187 +#define IMX8MM_CLK_UART1_ROOT 188 +#define IMX8MM_CLK_UART2_ROOT 189 +#define IMX8MM_CLK_UART3_ROOT 190 +#define IMX8MM_CLK_UART4_ROOT 191 +#define IMX8MM_CLK_USB1_CTRL_ROOT 192 +#define IMX8MM_CLK_GPU3D_ROOT 193 +#define IMX8MM_CLK_USDHC1_ROOT 194 +#define IMX8MM_CLK_USDHC2_ROOT 195 +#define IMX8MM_CLK_WDOG1_ROOT 196 +#define IMX8MM_CLK_WDOG2_ROOT 197 +#define IMX8MM_CLK_WDOG3_ROOT 198 +#define IMX8MM_CLK_VPU_G1_ROOT 199 +#define IMX8MM_CLK_GPU_BUS_ROOT 200 +#define IMX8MM_CLK_VPU_H1_ROOT 201 +#define IMX8MM_CLK_VPU_G2_ROOT 202 +#define IMX8MM_CLK_PDM_ROOT 203 +#define IMX8MM_CLK_DISP_ROOT 204 +#define IMX8MM_CLK_DISP_AXI_ROOT 205 +#define IMX8MM_CLK_DISP_APB_ROOT 206 +#define IMX8MM_CLK_DISP_RTRM_ROOT 207 +#define IMX8MM_CLK_USDHC3_ROOT 208 +#define IMX8MM_CLK_TMU_ROOT 209 +#define IMX8MM_CLK_VPU_DEC_ROOT 210 +#define IMX8MM_CLK_SDMA1_ROOT 211 +#define IMX8MM_CLK_SDMA2_ROOT 212 +#define IMX8MM_CLK_SDMA3_ROOT 213 +#define IMX8MM_CLK_GPT_3M 214 +#define IMX8MM_CLK_ARM 215 +#define IMX8MM_CLK_PDM_IPG 216 +#define IMX8MM_CLK_GPU2D_ROOT 217 +#define IMX8MM_CLK_MU_ROOT 218 +#define IMX8MM_CLK_CSI1_ROOT 219 + +#define IMX8MM_CLK_DRAM_CORE 220 +#define IMX8MM_CLK_DRAM_ALT_ROOT 221 + +#define IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK 222 + +#define IMX8MM_CLK_GPIO1_ROOT 223 +#define IMX8MM_CLK_GPIO2_ROOT 224 +#define IMX8MM_CLK_GPIO3_ROOT 225 +#define IMX8MM_CLK_GPIO4_ROOT 226 +#define IMX8MM_CLK_GPIO5_ROOT 227 + +#define IMX8MM_CLK_SNVS_ROOT 228 +#define IMX8MM_CLK_GIC 229 + +#define IMX8MM_CLK_END 230 + +#endif diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h new file mode 100644 index 0000000..0c7c750 --- /dev/null +++ b/include/dt-bindings/clock/imx8mn-clock.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2018-2019 NXP + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX8MN_H +#define __DT_BINDINGS_CLOCK_IMX8MN_H + +#define IMX8MN_CLK_DUMMY 0 +#define IMX8MN_CLK_32K 1 +#define IMX8MN_CLK_24M 2 +#define IMX8MN_OSC_HDMI_CLK 3 +#define IMX8MN_CLK_EXT1 4 +#define IMX8MN_CLK_EXT2 5 +#define IMX8MN_CLK_EXT3 6 +#define IMX8MN_CLK_EXT4 7 +#define IMX8MN_AUDIO_PLL1_REF_SEL 8 +#define IMX8MN_AUDIO_PLL2_REF_SEL 9 +#define IMX8MN_VIDEO_PLL1_REF_SEL 10 +#define IMX8MN_DRAM_PLL_REF_SEL 11 +#define IMX8MN_GPU_PLL_REF_SEL 12 +#define IMX8MN_VPU_PLL_REF_SEL 13 +#define IMX8MN_ARM_PLL_REF_SEL 14 +#define IMX8MN_SYS_PLL1_REF_SEL 15 +#define IMX8MN_SYS_PLL2_REF_SEL 16 +#define IMX8MN_SYS_PLL3_REF_SEL 17 +#define IMX8MN_AUDIO_PLL1 18 +#define IMX8MN_AUDIO_PLL2 19 +#define IMX8MN_VIDEO_PLL1 20 +#define IMX8MN_DRAM_PLL 21 +#define IMX8MN_GPU_PLL 22 +#define IMX8MN_VPU_PLL 23 +#define IMX8MN_ARM_PLL 24 +#define IMX8MN_SYS_PLL1 25 +#define IMX8MN_SYS_PLL2 26 +#define IMX8MN_SYS_PLL3 27 +#define IMX8MN_AUDIO_PLL1_BYPASS 28 +#define IMX8MN_AUDIO_PLL2_BYPASS 29 +#define IMX8MN_VIDEO_PLL1_BYPASS 30 +#define IMX8MN_DRAM_PLL_BYPASS 31 +#define IMX8MN_GPU_PLL_BYPASS 32 +#define IMX8MN_VPU_PLL_BYPASS 33 +#define IMX8MN_ARM_PLL_BYPASS 34 +#define IMX8MN_SYS_PLL1_BYPASS 35 +#define IMX8MN_SYS_PLL2_BYPASS 36 +#define IMX8MN_SYS_PLL3_BYPASS 37 +#define IMX8MN_AUDIO_PLL1_OUT 38 +#define IMX8MN_AUDIO_PLL2_OUT 39 +#define IMX8MN_VIDEO_PLL1_OUT 40 +#define IMX8MN_DRAM_PLL_OUT 41 +#define IMX8MN_GPU_PLL_OUT 42 +#define IMX8MN_VPU_PLL_OUT 43 +#define IMX8MN_ARM_PLL_OUT 44 +#define IMX8MN_SYS_PLL1_OUT 45 +#define IMX8MN_SYS_PLL2_OUT 46 +#define IMX8MN_SYS_PLL3_OUT 47 +#define IMX8MN_SYS_PLL1_40M 48 +#define IMX8MN_SYS_PLL1_80M 49 +#define IMX8MN_SYS_PLL1_100M 50 +#define IMX8MN_SYS_PLL1_133M 51 +#define IMX8MN_SYS_PLL1_160M 52 +#define IMX8MN_SYS_PLL1_200M 53 +#define IMX8MN_SYS_PLL1_266M 54 +#define IMX8MN_SYS_PLL1_400M 55 +#define IMX8MN_SYS_PLL1_800M 56 +#define IMX8MN_SYS_PLL2_50M 57 +#define IMX8MN_SYS_PLL2_100M 58 +#define IMX8MN_SYS_PLL2_125M 59 +#define IMX8MN_SYS_PLL2_166M 60 +#define IMX8MN_SYS_PLL2_200M 61 +#define IMX8MN_SYS_PLL2_250M 62 +#define IMX8MN_SYS_PLL2_333M 63 +#define IMX8MN_SYS_PLL2_500M 64 +#define IMX8MN_SYS_PLL2_1000M 65 + +/* CORE CLOCK ROOT */ +#define IMX8MN_CLK_A53_SRC 66 +#define IMX8MN_CLK_GPU_CORE_SRC 67 +#define IMX8MN_CLK_GPU_SHADER_SRC 68 +#define IMX8MN_CLK_A53_CG 69 +#define IMX8MN_CLK_GPU_CORE_CG 70 +#define IMX8MN_CLK_GPU_SHADER_CG 71 +#define IMX8MN_CLK_A53_DIV 72 +#define IMX8MN_CLK_GPU_CORE_DIV 73 +#define IMX8MN_CLK_GPU_SHADER_DIV 74 + +/* BUS CLOCK ROOT */ +#define IMX8MN_CLK_MAIN_AXI 75 +#define IMX8MN_CLK_ENET_AXI 76 +#define IMX8MN_CLK_NAND_USDHC_BUS 77 +#define IMX8MN_CLK_DISP_AXI 78 +#define IMX8MN_CLK_DISP_APB 79 +#define IMX8MN_CLK_USB_BUS 80 +#define IMX8MN_CLK_GPU_AXI 81 +#define IMX8MN_CLK_GPU_AHB 82 +#define IMX8MN_CLK_NOC 83 +#define IMX8MN_CLK_AHB 84 +#define IMX8MN_CLK_AUDIO_AHB 85 + +/* IPG CLOCK ROOT */ +#define IMX8MN_CLK_IPG_ROOT 86 +#define IMX8MN_CLK_IPG_AUDIO_ROOT 87 + +/* IP */ +#define IMX8MN_CLK_DRAM_CORE 88 +#define IMX8MN_CLK_DRAM_ALT 89 +#define IMX8MN_CLK_DRAM_APB 90 +#define IMX8MN_CLK_DRAM_ALT_ROOT 91 +#define IMX8MN_CLK_DISP_PIXEL 92 +#define IMX8MN_CLK_SAI2 93 +#define IMX8MN_CLK_SAI3 94 +#define IMX8MN_CLK_SAI5 95 +#define IMX8MN_CLK_SAI6 96 +#define IMX8MN_CLK_SPDIF1 97 +#define IMX8MN_CLK_ENET_REF 98 +#define IMX8MN_CLK_ENET_TIMER 99 +#define IMX8MN_CLK_ENET_PHY_REF 100 +#define IMX8MN_CLK_NAND 101 +#define IMX8MN_CLK_QSPI 102 +#define IMX8MN_CLK_USDHC1 103 +#define IMX8MN_CLK_USDHC2 104 +#define IMX8MN_CLK_I2C1 105 +#define IMX8MN_CLK_I2C2 106 +#define IMX8MN_CLK_I2C3 107 +#define IMX8MN_CLK_I2C4 108 +#define IMX8MN_CLK_UART1 109 +#define IMX8MN_CLK_UART2 110 +#define IMX8MN_CLK_UART3 111 +#define IMX8MN_CLK_UART4 112 +#define IMX8MN_CLK_USB_CORE_REF 113 +#define IMX8MN_CLK_USB_PHY_REF 114 +#define IMX8MN_CLK_ECSPI1 115 +#define IMX8MN_CLK_ECSPI2 116 +#define IMX8MN_CLK_PWM1 117 +#define IMX8MN_CLK_PWM2 118 +#define IMX8MN_CLK_PWM3 119 +#define IMX8MN_CLK_PWM4 120 +#define IMX8MN_CLK_WDOG 121 +#define IMX8MN_CLK_WRCLK 122 +#define IMX8MN_CLK_CLKO1 123 +#define IMX8MN_CLK_CLKO2 124 +#define IMX8MN_CLK_DSI_CORE 125 +#define IMX8MN_CLK_DSI_PHY_REF 126 +#define IMX8MN_CLK_DSI_DBI 127 +#define IMX8MN_CLK_USDHC3 128 +#define IMX8MN_CLK_CAMERA_PIXEL 129 +#define IMX8MN_CLK_CSI1_PHY_REF 130 +#define IMX8MN_CLK_CSI2_PHY_REF 131 +#define IMX8MN_CLK_CSI2_ESC 132 +#define IMX8MN_CLK_ECSPI3 133 +#define IMX8MN_CLK_PDM 134 +#define IMX8MN_CLK_SAI7 135 + +#define IMX8MN_CLK_ECSPI1_ROOT 136 +#define IMX8MN_CLK_ECSPI2_ROOT 137 +#define IMX8MN_CLK_ECSPI3_ROOT 138 +#define IMX8MN_CLK_ENET1_ROOT 139 +#define IMX8MN_CLK_GPIO1_ROOT 140 +#define IMX8MN_CLK_GPIO2_ROOT 141 +#define IMX8MN_CLK_GPIO3_ROOT 142 +#define IMX8MN_CLK_GPIO4_ROOT 143 +#define IMX8MN_CLK_GPIO5_ROOT 144 +#define IMX8MN_CLK_I2C1_ROOT 145 +#define IMX8MN_CLK_I2C2_ROOT 146 +#define IMX8MN_CLK_I2C3_ROOT 147 +#define IMX8MN_CLK_I2C4_ROOT 148 +#define IMX8MN_CLK_MU_ROOT 149 +#define IMX8MN_CLK_OCOTP_ROOT 150 +#define IMX8MN_CLK_PWM1_ROOT 151 +#define IMX8MN_CLK_PWM2_ROOT 152 +#define IMX8MN_CLK_PWM3_ROOT 153 +#define IMX8MN_CLK_PWM4_ROOT 154 +#define IMX8MN_CLK_QSPI_ROOT 155 +#define IMX8MN_CLK_NAND_ROOT 156 +#define IMX8MN_CLK_SAI2_ROOT 157 +#define IMX8MN_CLK_SAI2_IPG 158 +#define IMX8MN_CLK_SAI3_ROOT 159 +#define IMX8MN_CLK_SAI3_IPG 160 +#define IMX8MN_CLK_SAI5_ROOT 161 +#define IMX8MN_CLK_SAI5_IPG 162 +#define IMX8MN_CLK_SAI6_ROOT 163 +#define IMX8MN_CLK_SAI6_IPG 164 +#define IMX8MN_CLK_SAI7_ROOT 165 +#define IMX8MN_CLK_SAI7_IPG 166 +#define IMX8MN_CLK_SDMA1_ROOT 167 +#define IMX8MN_CLK_SDMA2_ROOT 168 +#define IMX8MN_CLK_UART1_ROOT 169 +#define IMX8MN_CLK_UART2_ROOT 170 +#define IMX8MN_CLK_UART3_ROOT 171 +#define IMX8MN_CLK_UART4_ROOT 172 +#define IMX8MN_CLK_USB1_CTRL_ROOT 173 +#define IMX8MN_CLK_USDHC1_ROOT 174 +#define IMX8MN_CLK_USDHC2_ROOT 175 +#define IMX8MN_CLK_WDOG1_ROOT 176 +#define IMX8MN_CLK_WDOG2_ROOT 177 +#define IMX8MN_CLK_WDOG3_ROOT 178 +#define IMX8MN_CLK_GPU_BUS_ROOT 179 +#define IMX8MN_CLK_ASRC_ROOT 180 +#define IMX8MN_CLK_GPU3D_ROOT 181 +#define IMX8MN_CLK_PDM_ROOT 182 +#define IMX8MN_CLK_PDM_IPG 183 +#define IMX8MN_CLK_DISP_AXI_ROOT 184 +#define IMX8MN_CLK_DISP_APB_ROOT 185 +#define IMX8MN_CLK_DISP_PIXEL_ROOT 186 +#define IMX8MN_CLK_CAMERA_PIXEL_ROOT 187 +#define IMX8MN_CLK_USDHC3_ROOT 188 +#define IMX8MN_CLK_SDMA3_ROOT 189 +#define IMX8MN_CLK_TMU_ROOT 190 +#define IMX8MN_CLK_ARM 191 +#define IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK 192 +#define IMX8MN_CLK_GPU_CORE_ROOT 193 +#define IMX8MN_CLK_GIC 194 + +#define IMX8MN_CLK_END 195 + +#endif diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h new file mode 100644 index 0000000..6546367 --- /dev/null +++ b/include/dt-bindings/clock/imx8mq-clock.h @@ -0,0 +1,407 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2016 Freescale Semiconductor, Inc. + * Copyright 2017 NXP + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX8MQ_H +#define __DT_BINDINGS_CLOCK_IMX8MQ_H + +#define IMX8MQ_CLK_DUMMY 0 +#define IMX8MQ_CLK_32K 1 +#define IMX8MQ_CLK_25M 2 +#define IMX8MQ_CLK_27M 3 +#define IMX8MQ_CLK_EXT1 4 +#define IMX8MQ_CLK_EXT2 5 +#define IMX8MQ_CLK_EXT3 6 +#define IMX8MQ_CLK_EXT4 7 + +/* ANAMIX PLL clocks */ +/* FRAC PLLs */ +/* ARM PLL */ +#define IMX8MQ_ARM_PLL_REF_SEL 8 +#define IMX8MQ_ARM_PLL_REF_DIV 9 +#define IMX8MQ_ARM_PLL 10 +#define IMX8MQ_ARM_PLL_BYPASS 11 +#define IMX8MQ_ARM_PLL_OUT 12 + +/* GPU PLL */ +#define IMX8MQ_GPU_PLL_REF_SEL 13 +#define IMX8MQ_GPU_PLL_REF_DIV 14 +#define IMX8MQ_GPU_PLL 15 +#define IMX8MQ_GPU_PLL_BYPASS 16 +#define IMX8MQ_GPU_PLL_OUT 17 + +/* VPU PLL */ +#define IMX8MQ_VPU_PLL_REF_SEL 18 +#define IMX8MQ_VPU_PLL_REF_DIV 19 +#define IMX8MQ_VPU_PLL 20 +#define IMX8MQ_VPU_PLL_BYPASS 21 +#define IMX8MQ_VPU_PLL_OUT 22 + +/* AUDIO PLL1 */ +#define IMX8MQ_AUDIO_PLL1_REF_SEL 23 +#define IMX8MQ_AUDIO_PLL1_REF_DIV 24 +#define IMX8MQ_AUDIO_PLL1 25 +#define IMX8MQ_AUDIO_PLL1_BYPASS 26 +#define IMX8MQ_AUDIO_PLL1_OUT 27 + +/* AUDIO PLL2 */ +#define IMX8MQ_AUDIO_PLL2_REF_SEL 28 +#define IMX8MQ_AUDIO_PLL2_REF_DIV 29 +#define IMX8MQ_AUDIO_PLL2 30 +#define IMX8MQ_AUDIO_PLL2_BYPASS 31 +#define IMX8MQ_AUDIO_PLL2_OUT 32 + +/* VIDEO PLL1 */ +#define IMX8MQ_VIDEO_PLL1_REF_SEL 33 +#define IMX8MQ_VIDEO_PLL1_REF_DIV 34 +#define IMX8MQ_VIDEO_PLL1 35 +#define IMX8MQ_VIDEO_PLL1_BYPASS 36 +#define IMX8MQ_VIDEO_PLL1_OUT 37 + +/* SYS1 PLL */ +#define IMX8MQ_SYS1_PLL1_REF_SEL 38 +#define IMX8MQ_SYS1_PLL1_REF_DIV 39 +#define IMX8MQ_SYS1_PLL1 40 +#define IMX8MQ_SYS1_PLL1_OUT 41 +#define IMX8MQ_SYS1_PLL1_OUT_DIV 42 +#define IMX8MQ_SYS1_PLL2 43 +#define IMX8MQ_SYS1_PLL2_DIV 44 +#define IMX8MQ_SYS1_PLL2_OUT 45 + +/* SYS2 PLL */ +#define IMX8MQ_SYS2_PLL1_REF_SEL 46 +#define IMX8MQ_SYS2_PLL1_REF_DIV 47 +#define IMX8MQ_SYS2_PLL1 48 +#define IMX8MQ_SYS2_PLL1_OUT 49 +#define IMX8MQ_SYS2_PLL1_OUT_DIV 50 +#define IMX8MQ_SYS2_PLL2 51 +#define IMX8MQ_SYS2_PLL2_DIV 52 +#define IMX8MQ_SYS2_PLL2_OUT 53 + +/* SYS3 PLL */ +#define IMX8MQ_SYS3_PLL1_REF_SEL 54 +#define IMX8MQ_SYS3_PLL1_REF_DIV 55 +#define IMX8MQ_SYS3_PLL1 56 +#define IMX8MQ_SYS3_PLL1_OUT 57 +#define IMX8MQ_SYS3_PLL1_OUT_DIV 58 +#define IMX8MQ_SYS3_PLL2 59 +#define IMX8MQ_SYS3_PLL2_DIV 60 +#define IMX8MQ_SYS3_PLL2_OUT 61 + +/* DRAM PLL */ +#define IMX8MQ_DRAM_PLL1_REF_SEL 62 +#define IMX8MQ_DRAM_PLL1_REF_DIV 63 +#define IMX8MQ_DRAM_PLL1 64 +#define IMX8MQ_DRAM_PLL1_OUT 65 +#define IMX8MQ_DRAM_PLL1_OUT_DIV 66 +#define IMX8MQ_DRAM_PLL2 67 +#define IMX8MQ_DRAM_PLL2_DIV 68 +#define IMX8MQ_DRAM_PLL2_OUT 69 + +/* SYS PLL DIV */ +#define IMX8MQ_SYS1_PLL_40M 70 +#define IMX8MQ_SYS1_PLL_80M 71 +#define IMX8MQ_SYS1_PLL_100M 72 +#define IMX8MQ_SYS1_PLL_133M 73 +#define IMX8MQ_SYS1_PLL_160M 74 +#define IMX8MQ_SYS1_PLL_200M 75 +#define IMX8MQ_SYS1_PLL_266M 76 +#define IMX8MQ_SYS1_PLL_400M 77 +#define IMX8MQ_SYS1_PLL_800M 78 + +#define IMX8MQ_SYS2_PLL_50M 79 +#define IMX8MQ_SYS2_PLL_100M 80 +#define IMX8MQ_SYS2_PLL_125M 81 +#define IMX8MQ_SYS2_PLL_166M 82 +#define IMX8MQ_SYS2_PLL_200M 83 +#define IMX8MQ_SYS2_PLL_250M 84 +#define IMX8MQ_SYS2_PLL_333M 85 +#define IMX8MQ_SYS2_PLL_500M 86 +#define IMX8MQ_SYS2_PLL_1000M 87 + +/* CCM ROOT clocks */ +/* A53 */ +#define IMX8MQ_CLK_A53_SRC 88 +#define IMX8MQ_CLK_A53_CG 89 +#define IMX8MQ_CLK_A53_DIV 90 +/* M4 */ +#define IMX8MQ_CLK_M4_SRC 91 +#define IMX8MQ_CLK_M4_CG 92 +#define IMX8MQ_CLK_M4_DIV 93 +/* VPU */ +#define IMX8MQ_CLK_VPU_SRC 94 +#define IMX8MQ_CLK_VPU_CG 95 +#define IMX8MQ_CLK_VPU_DIV 96 +/* GPU CORE */ +#define IMX8MQ_CLK_GPU_CORE_SRC 97 +#define IMX8MQ_CLK_GPU_CORE_CG 98 +#define IMX8MQ_CLK_GPU_CORE_DIV 99 +/* GPU SHADER */ +#define IMX8MQ_CLK_GPU_SHADER_SRC 100 +#define IMX8MQ_CLK_GPU_SHADER_CG 101 +#define IMX8MQ_CLK_GPU_SHADER_DIV 102 + +/* BUS TYPE */ +/* MAIN AXI */ +#define IMX8MQ_CLK_MAIN_AXI 103 +/* ENET AXI */ +#define IMX8MQ_CLK_ENET_AXI 104 +/* NAND_USDHC_BUS */ +#define IMX8MQ_CLK_NAND_USDHC_BUS 105 +/* VPU BUS */ +#define IMX8MQ_CLK_VPU_BUS 106 +/* DISP_AXI */ +#define IMX8MQ_CLK_DISP_AXI 107 +/* DISP APB */ +#define IMX8MQ_CLK_DISP_APB 108 +/* DISP RTRM */ +#define IMX8MQ_CLK_DISP_RTRM 109 +/* USB_BUS */ +#define IMX8MQ_CLK_USB_BUS 110 +/* GPU_AXI */ +#define IMX8MQ_CLK_GPU_AXI 111 +/* GPU_AHB */ +#define IMX8MQ_CLK_GPU_AHB 112 +/* NOC */ +#define IMX8MQ_CLK_NOC 113 +/* NOC_APB */ +#define IMX8MQ_CLK_NOC_APB 115 + +/* AHB */ +#define IMX8MQ_CLK_AHB 116 +/* AUDIO AHB */ +#define IMX8MQ_CLK_AUDIO_AHB 117 + +/* DRAM_ALT */ +#define IMX8MQ_CLK_DRAM_ALT 118 +/* DRAM APB */ +#define IMX8MQ_CLK_DRAM_APB 119 +/* VPU_G1 */ +#define IMX8MQ_CLK_VPU_G1 120 +/* VPU_G2 */ +#define IMX8MQ_CLK_VPU_G2 121 +/* DISP_DTRC */ +#define IMX8MQ_CLK_DISP_DTRC 122 +/* DISP_DC8000 */ +#define IMX8MQ_CLK_DISP_DC8000 123 +/* PCIE_CTRL */ +#define IMX8MQ_CLK_PCIE1_CTRL 124 +/* PCIE_PHY */ +#define IMX8MQ_CLK_PCIE1_PHY 125 +/* PCIE_AUX */ +#define IMX8MQ_CLK_PCIE1_AUX 126 +/* DC_PIXEL */ +#define IMX8MQ_CLK_DC_PIXEL 127 +/* LCDIF_PIXEL */ +#define IMX8MQ_CLK_LCDIF_PIXEL 128 +/* SAI1~6 */ +#define IMX8MQ_CLK_SAI1 129 + +#define IMX8MQ_CLK_SAI2 130 + +#define IMX8MQ_CLK_SAI3 131 + +#define IMX8MQ_CLK_SAI4 132 + +#define IMX8MQ_CLK_SAI5 133 + +#define IMX8MQ_CLK_SAI6 134 +/* SPDIF1 */ +#define IMX8MQ_CLK_SPDIF1 135 +/* SPDIF2 */ +#define IMX8MQ_CLK_SPDIF2 136 +/* ENET_REF */ +#define IMX8MQ_CLK_ENET_REF 137 +/* ENET_TIMER */ +#define IMX8MQ_CLK_ENET_TIMER 138 +/* ENET_PHY */ +#define IMX8MQ_CLK_ENET_PHY_REF 139 +/* NAND */ +#define IMX8MQ_CLK_NAND 140 +/* QSPI */ +#define IMX8MQ_CLK_QSPI 141 +/* USDHC1 */ +#define IMX8MQ_CLK_USDHC1 142 +/* USDHC2 */ +#define IMX8MQ_CLK_USDHC2 143 +/* I2C1 */ +#define IMX8MQ_CLK_I2C1 144 +/* I2C2 */ +#define IMX8MQ_CLK_I2C2 145 +/* I2C3 */ +#define IMX8MQ_CLK_I2C3 146 +/* I2C4 */ +#define IMX8MQ_CLK_I2C4 147 +/* UART1 */ +#define IMX8MQ_CLK_UART1 148 +/* UART2 */ +#define IMX8MQ_CLK_UART2 149 +/* UART3 */ +#define IMX8MQ_CLK_UART3 150 +/* UART4 */ +#define IMX8MQ_CLK_UART4 151 +/* USB_CORE_REF */ +#define IMX8MQ_CLK_USB_CORE_REF 152 +/* USB_PHY_REF */ +#define IMX8MQ_CLK_USB_PHY_REF 153 +/* ECSPI1 */ +#define IMX8MQ_CLK_ECSPI1 154 +/* ECSPI2 */ +#define IMX8MQ_CLK_ECSPI2 155 +/* PWM1 */ +#define IMX8MQ_CLK_PWM1 156 +/* PWM2 */ +#define IMX8MQ_CLK_PWM2 157 +/* PWM3 */ +#define IMX8MQ_CLK_PWM3 158 +/* PWM4 */ +#define IMX8MQ_CLK_PWM4 159 +/* GPT1 */ +#define IMX8MQ_CLK_GPT1 160 +/* WDOG */ +#define IMX8MQ_CLK_WDOG 161 +/* WRCLK */ +#define IMX8MQ_CLK_WRCLK 162 +/* DSI_CORE */ +#define IMX8MQ_CLK_DSI_CORE 163 +/* DSI_PHY */ +#define IMX8MQ_CLK_DSI_PHY_REF 164 +/* DSI_DBI */ +#define IMX8MQ_CLK_DSI_DBI 165 +/*DSI_ESC */ +#define IMX8MQ_CLK_DSI_ESC 166 +/* CSI1_CORE */ +#define IMX8MQ_CLK_CSI1_CORE 167 +/* CSI1_PHY */ +#define IMX8MQ_CLK_CSI1_PHY_REF 168 +/* CSI_ESC */ +#define IMX8MQ_CLK_CSI1_ESC 169 +/* CSI2_CORE */ +#define IMX8MQ_CLK_CSI2_CORE 170 +/* CSI2_PHY */ +#define IMX8MQ_CLK_CSI2_PHY_REF 171 +/* CSI2_ESC */ +#define IMX8MQ_CLK_CSI2_ESC 172 +/* PCIE2_CTRL */ +#define IMX8MQ_CLK_PCIE2_CTRL 173 +/* PCIE2_PHY */ +#define IMX8MQ_CLK_PCIE2_PHY 174 +/* PCIE2_AUX */ +#define IMX8MQ_CLK_PCIE2_AUX 175 +/* ECSPI3 */ +#define IMX8MQ_CLK_ECSPI3 176 + +/* CCGR clocks */ +#define IMX8MQ_CLK_A53_ROOT 177 +#define IMX8MQ_CLK_DRAM_ROOT 178 +#define IMX8MQ_CLK_ECSPI1_ROOT 179 +#define IMX8MQ_CLK_ECSPI2_ROOT 180 +#define IMX8MQ_CLK_ECSPI3_ROOT 181 +#define IMX8MQ_CLK_ENET1_ROOT 182 +#define IMX8MQ_CLK_GPT1_ROOT 183 +#define IMX8MQ_CLK_I2C1_ROOT 184 +#define IMX8MQ_CLK_I2C2_ROOT 185 +#define IMX8MQ_CLK_I2C3_ROOT 186 +#define IMX8MQ_CLK_I2C4_ROOT 187 +#define IMX8MQ_CLK_M4_ROOT 188 +#define IMX8MQ_CLK_PCIE1_ROOT 189 +#define IMX8MQ_CLK_PCIE2_ROOT 190 +#define IMX8MQ_CLK_PWM1_ROOT 191 +#define IMX8MQ_CLK_PWM2_ROOT 192 +#define IMX8MQ_CLK_PWM3_ROOT 193 +#define IMX8MQ_CLK_PWM4_ROOT 194 +#define IMX8MQ_CLK_QSPI_ROOT 195 +#define IMX8MQ_CLK_SAI1_ROOT 196 +#define IMX8MQ_CLK_SAI2_ROOT 197 +#define IMX8MQ_CLK_SAI3_ROOT 198 +#define IMX8MQ_CLK_SAI4_ROOT 199 +#define IMX8MQ_CLK_SAI5_ROOT 200 +#define IMX8MQ_CLK_SAI6_ROOT 201 +#define IMX8MQ_CLK_UART1_ROOT 202 +#define IMX8MQ_CLK_UART2_ROOT 203 +#define IMX8MQ_CLK_UART3_ROOT 204 +#define IMX8MQ_CLK_UART4_ROOT 205 +#define IMX8MQ_CLK_USB1_CTRL_ROOT 206 +#define IMX8MQ_CLK_USB2_CTRL_ROOT 207 +#define IMX8MQ_CLK_USB1_PHY_ROOT 208 +#define IMX8MQ_CLK_USB2_PHY_ROOT 209 +#define IMX8MQ_CLK_USDHC1_ROOT 210 +#define IMX8MQ_CLK_USDHC2_ROOT 211 +#define IMX8MQ_CLK_WDOG1_ROOT 212 +#define IMX8MQ_CLK_WDOG2_ROOT 213 +#define IMX8MQ_CLK_WDOG3_ROOT 214 +#define IMX8MQ_CLK_GPU_ROOT 215 +#define IMX8MQ_CLK_HEVC_ROOT 216 +#define IMX8MQ_CLK_AVC_ROOT 217 +#define IMX8MQ_CLK_VP9_ROOT 218 +#define IMX8MQ_CLK_HEVC_INTER_ROOT 219 +#define IMX8MQ_CLK_DISP_ROOT 220 +#define IMX8MQ_CLK_HDMI_ROOT 221 +#define IMX8MQ_CLK_HDMI_PHY_ROOT 222 +#define IMX8MQ_CLK_VPU_DEC_ROOT 223 +#define IMX8MQ_CLK_CSI1_ROOT 224 +#define IMX8MQ_CLK_CSI2_ROOT 225 +#define IMX8MQ_CLK_RAWNAND_ROOT 226 +#define IMX8MQ_CLK_SDMA1_ROOT 227 +#define IMX8MQ_CLK_SDMA2_ROOT 228 +#define IMX8MQ_CLK_VPU_G1_ROOT 229 +#define IMX8MQ_CLK_VPU_G2_ROOT 230 + +/* SCCG PLL GATE */ +#define IMX8MQ_SYS1_PLL_OUT 231 +#define IMX8MQ_SYS2_PLL_OUT 232 +#define IMX8MQ_SYS3_PLL_OUT 233 +#define IMX8MQ_DRAM_PLL_OUT 234 + +#define IMX8MQ_GPT_3M_CLK 235 + +#define IMX8MQ_CLK_IPG_ROOT 236 +#define IMX8MQ_CLK_IPG_AUDIO_ROOT 237 +#define IMX8MQ_CLK_SAI1_IPG 238 +#define IMX8MQ_CLK_SAI2_IPG 239 +#define IMX8MQ_CLK_SAI3_IPG 240 +#define IMX8MQ_CLK_SAI4_IPG 241 +#define IMX8MQ_CLK_SAI5_IPG 242 +#define IMX8MQ_CLK_SAI6_IPG 243 + +/* DSI AHB/IPG clocks */ +/* rxesc clock */ +#define IMX8MQ_CLK_DSI_AHB 244 +/* txesc clock */ +#define IMX8MQ_CLK_DSI_IPG_DIV 245 + +#define IMX8MQ_CLK_TMU_ROOT 246 + +/* Display root clocks */ +#define IMX8MQ_CLK_DISP_AXI_ROOT 247 +#define IMX8MQ_CLK_DISP_APB_ROOT 248 +#define IMX8MQ_CLK_DISP_RTRM_ROOT 249 + +#define IMX8MQ_CLK_OCOTP_ROOT 250 + +#define IMX8MQ_CLK_DRAM_ALT_ROOT 251 +#define IMX8MQ_CLK_DRAM_CORE 252 + +#define IMX8MQ_CLK_MU_ROOT 253 +#define IMX8MQ_VIDEO2_PLL_OUT 254 + +#define IMX8MQ_CLK_CLKO2 255 + +#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 256 + +#define IMX8MQ_CLK_CLKO1 257 +#define IMX8MQ_CLK_ARM 258 + +#define IMX8MQ_CLK_GPIO1_ROOT 259 +#define IMX8MQ_CLK_GPIO2_ROOT 260 +#define IMX8MQ_CLK_GPIO3_ROOT 261 +#define IMX8MQ_CLK_GPIO4_ROOT 262 +#define IMX8MQ_CLK_GPIO5_ROOT 263 + +#define IMX8MQ_CLK_SNVS_ROOT 264 +#define IMX8MQ_CLK_GIC 265 + +#define IMX8MQ_CLK_END 266 +#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */ diff --git a/include/dt-bindings/clock/ingenic,tcu.h b/include/dt-bindings/clock/ingenic,tcu.h new file mode 100644 index 0000000..d569650 --- /dev/null +++ b/include/dt-bindings/clock/ingenic,tcu.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,tcu DT binding. + */ + +#ifndef __DT_BINDINGS_CLOCK_INGENIC_TCU_H__ +#define __DT_BINDINGS_CLOCK_INGENIC_TCU_H__ + +#define TCU_CLK_TIMER0 0 +#define TCU_CLK_TIMER1 1 +#define TCU_CLK_TIMER2 2 +#define TCU_CLK_TIMER3 3 +#define TCU_CLK_TIMER4 4 +#define TCU_CLK_TIMER5 5 +#define TCU_CLK_TIMER6 6 +#define TCU_CLK_TIMER7 7 +#define TCU_CLK_WDT 8 +#define TCU_CLK_OST 9 + +#endif /* __DT_BINDINGS_CLOCK_INGENIC_TCU_H__ */ diff --git a/include/dt-bindings/clock/jz4725b-cgu.h b/include/dt-bindings/clock/jz4725b-cgu.h new file mode 100644 index 0000000..31f1ab0 --- /dev/null +++ b/include/dt-bindings/clock/jz4725b-cgu.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,jz4725b-cgu DT binding. + */ + +#ifndef __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__ +#define __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__ + +#define JZ4725B_CLK_EXT 0 +#define JZ4725B_CLK_OSC32K 1 +#define JZ4725B_CLK_PLL 2 +#define JZ4725B_CLK_PLL_HALF 3 +#define JZ4725B_CLK_CCLK 4 +#define JZ4725B_CLK_HCLK 5 +#define JZ4725B_CLK_PCLK 6 +#define JZ4725B_CLK_MCLK 7 +#define JZ4725B_CLK_IPU 8 +#define JZ4725B_CLK_LCD 9 +#define JZ4725B_CLK_I2S 10 +#define JZ4725B_CLK_SPI 11 +#define JZ4725B_CLK_MMC_MUX 12 +#define JZ4725B_CLK_UDC 13 +#define JZ4725B_CLK_UART 14 +#define JZ4725B_CLK_DMA 15 +#define JZ4725B_CLK_ADC 16 +#define JZ4725B_CLK_I2C 17 +#define JZ4725B_CLK_AIC 18 +#define JZ4725B_CLK_MMC0 19 +#define JZ4725B_CLK_MMC1 20 +#define JZ4725B_CLK_BCH 21 +#define JZ4725B_CLK_TCU 22 +#define JZ4725B_CLK_EXT512 23 +#define JZ4725B_CLK_RTC 24 +#define JZ4725B_CLK_UDC_PHY 25 + +#endif /* __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__ */ diff --git a/include/dt-bindings/clock/jz4740-cgu.h b/include/dt-bindings/clock/jz4740-cgu.h new file mode 100644 index 0000000..e82d770 --- /dev/null +++ b/include/dt-bindings/clock/jz4740-cgu.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,jz4740-cgu DT binding. + * + * They are roughly ordered as: + * - external clocks + * - PLLs + * - muxes/dividers in the order they appear in the jz4740 programmers manual + * - gates in order of their bit in the CLKGR* registers + */ + +#ifndef __DT_BINDINGS_CLOCK_JZ4740_CGU_H__ +#define __DT_BINDINGS_CLOCK_JZ4740_CGU_H__ + +#define JZ4740_CLK_EXT 0 +#define JZ4740_CLK_RTC 1 +#define JZ4740_CLK_PLL 2 +#define JZ4740_CLK_PLL_HALF 3 +#define JZ4740_CLK_CCLK 4 +#define JZ4740_CLK_HCLK 5 +#define JZ4740_CLK_PCLK 6 +#define JZ4740_CLK_MCLK 7 +#define JZ4740_CLK_LCD 8 +#define JZ4740_CLK_LCD_PCLK 9 +#define JZ4740_CLK_I2S 10 +#define JZ4740_CLK_SPI 11 +#define JZ4740_CLK_MMC 12 +#define JZ4740_CLK_UHC 13 +#define JZ4740_CLK_UDC 14 +#define JZ4740_CLK_UART0 15 +#define JZ4740_CLK_UART1 16 +#define JZ4740_CLK_DMA 17 +#define JZ4740_CLK_IPU 18 +#define JZ4740_CLK_ADC 19 +#define JZ4740_CLK_I2C 20 +#define JZ4740_CLK_AIC 21 +#define JZ4740_CLK_TCU 22 + +#endif /* __DT_BINDINGS_CLOCK_JZ4740_CGU_H__ */ diff --git a/include/dt-bindings/clock/jz4770-cgu.h b/include/dt-bindings/clock/jz4770-cgu.h new file mode 100644 index 0000000..d68a769 --- /dev/null +++ b/include/dt-bindings/clock/jz4770-cgu.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,jz4770-cgu DT binding. + */ + +#ifndef __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ +#define __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ + +#define JZ4770_CLK_EXT 0 +#define JZ4770_CLK_OSC32K 1 +#define JZ4770_CLK_PLL0 2 +#define JZ4770_CLK_PLL1 3 +#define JZ4770_CLK_CCLK 4 +#define JZ4770_CLK_H0CLK 5 +#define JZ4770_CLK_H1CLK 6 +#define JZ4770_CLK_H2CLK 7 +#define JZ4770_CLK_C1CLK 8 +#define JZ4770_CLK_PCLK 9 +#define JZ4770_CLK_MMC0_MUX 10 +#define JZ4770_CLK_MMC0 11 +#define JZ4770_CLK_MMC1_MUX 12 +#define JZ4770_CLK_MMC1 13 +#define JZ4770_CLK_MMC2_MUX 14 +#define JZ4770_CLK_MMC2 15 +#define JZ4770_CLK_CIM 16 +#define JZ4770_CLK_UHC 17 +#define JZ4770_CLK_GPU 18 +#define JZ4770_CLK_BCH 19 +#define JZ4770_CLK_LPCLK_MUX 20 +#define JZ4770_CLK_GPS 21 +#define JZ4770_CLK_SSI_MUX 22 +#define JZ4770_CLK_PCM_MUX 23 +#define JZ4770_CLK_I2S 24 +#define JZ4770_CLK_OTG 25 +#define JZ4770_CLK_SSI0 26 +#define JZ4770_CLK_SSI1 27 +#define JZ4770_CLK_SSI2 28 +#define JZ4770_CLK_PCM0 29 +#define JZ4770_CLK_PCM1 30 +#define JZ4770_CLK_DMA 31 +#define JZ4770_CLK_I2C0 32 +#define JZ4770_CLK_I2C1 33 +#define JZ4770_CLK_I2C2 34 +#define JZ4770_CLK_UART0 35 +#define JZ4770_CLK_UART1 36 +#define JZ4770_CLK_UART2 37 +#define JZ4770_CLK_UART3 38 +#define JZ4770_CLK_IPU 39 +#define JZ4770_CLK_ADC 40 +#define JZ4770_CLK_AIC 41 +#define JZ4770_CLK_AUX 42 +#define JZ4770_CLK_VPU 43 +#define JZ4770_CLK_UHC_PHY 44 +#define JZ4770_CLK_OTG_PHY 45 +#define JZ4770_CLK_EXT512 46 +#define JZ4770_CLK_RTC 47 + +#endif /* __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ */ diff --git a/include/dt-bindings/clock/jz4780-cgu.h b/include/dt-bindings/clock/jz4780-cgu.h new file mode 100644 index 0000000..1859ce5 --- /dev/null +++ b/include/dt-bindings/clock/jz4780-cgu.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,jz4780-cgu DT binding. + * + * They are roughly ordered as: + * - external clocks + * - PLLs + * - muxes/dividers in the order they appear in the jz4780 programmers manual + * - gates in order of their bit in the CLKGR* registers + */ + +#ifndef __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ +#define __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ + +#define JZ4780_CLK_EXCLK 0 +#define JZ4780_CLK_RTCLK 1 +#define JZ4780_CLK_APLL 2 +#define JZ4780_CLK_MPLL 3 +#define JZ4780_CLK_EPLL 4 +#define JZ4780_CLK_VPLL 5 +#define JZ4780_CLK_OTGPHY 6 +#define JZ4780_CLK_SCLKA 7 +#define JZ4780_CLK_CPUMUX 8 +#define JZ4780_CLK_CPU 9 +#define JZ4780_CLK_L2CACHE 10 +#define JZ4780_CLK_AHB0 11 +#define JZ4780_CLK_AHB2PMUX 12 +#define JZ4780_CLK_AHB2 13 +#define JZ4780_CLK_PCLK 14 +#define JZ4780_CLK_DDR 15 +#define JZ4780_CLK_VPU 16 +#define JZ4780_CLK_I2SPLL 17 +#define JZ4780_CLK_I2S 18 +#define JZ4780_CLK_LCD0PIXCLK 19 +#define JZ4780_CLK_LCD1PIXCLK 20 +#define JZ4780_CLK_MSCMUX 21 +#define JZ4780_CLK_MSC0 22 +#define JZ4780_CLK_MSC1 23 +#define JZ4780_CLK_MSC2 24 +#define JZ4780_CLK_UHC 25 +#define JZ4780_CLK_SSIPLL 26 +#define JZ4780_CLK_SSI 27 +#define JZ4780_CLK_CIMMCLK 28 +#define JZ4780_CLK_PCMPLL 29 +#define JZ4780_CLK_PCM 30 +#define JZ4780_CLK_GPU 31 +#define JZ4780_CLK_HDMI 32 +#define JZ4780_CLK_BCH 33 +#define JZ4780_CLK_NEMC 34 +#define JZ4780_CLK_OTG0 35 +#define JZ4780_CLK_SSI0 36 +#define JZ4780_CLK_SMB0 37 +#define JZ4780_CLK_SMB1 38 +#define JZ4780_CLK_SCC 39 +#define JZ4780_CLK_AIC 40 +#define JZ4780_CLK_TSSI0 41 +#define JZ4780_CLK_OWI 42 +#define JZ4780_CLK_KBC 43 +#define JZ4780_CLK_SADC 44 +#define JZ4780_CLK_UART0 45 +#define JZ4780_CLK_UART1 46 +#define JZ4780_CLK_UART2 47 +#define JZ4780_CLK_UART3 48 +#define JZ4780_CLK_SSI1 49 +#define JZ4780_CLK_SSI2 50 +#define JZ4780_CLK_PDMA 51 +#define JZ4780_CLK_GPS 52 +#define JZ4780_CLK_MAC 53 +#define JZ4780_CLK_SMB2 54 +#define JZ4780_CLK_CIM 55 +#define JZ4780_CLK_LCD 56 +#define JZ4780_CLK_TVE 57 +#define JZ4780_CLK_IPU 58 +#define JZ4780_CLK_DDR0 59 +#define JZ4780_CLK_DDR1 60 +#define JZ4780_CLK_SMB3 61 +#define JZ4780_CLK_TSSI1 62 +#define JZ4780_CLK_COMPRESS 63 +#define JZ4780_CLK_AIC1 64 +#define JZ4780_CLK_GPVLC 65 +#define JZ4780_CLK_OTG1 66 +#define JZ4780_CLK_UART4 67 +#define JZ4780_CLK_AHBMON 68 +#define JZ4780_CLK_SMB4 69 +#define JZ4780_CLK_DES 70 +#define JZ4780_CLK_X2D 71 +#define JZ4780_CLK_CORE1 72 + +#endif /* __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ */ diff --git a/include/dt-bindings/clock/lpc18xx-ccu.h b/include/dt-bindings/clock/lpc18xx-ccu.h new file mode 100644 index 0000000..bbfe00b --- /dev/null +++ b/include/dt-bindings/clock/lpc18xx-ccu.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2015 Joachim Eastwood + * + * This code is released using a dual license strategy: BSD/GPL + * You can choose the licence that better fits your requirements. + * + * Released under the terms of 3-clause BSD License + * Released under the terms of GNU General Public License Version 2.0 + * + */ + +/* Clock Control Unit 1 (CCU1) clock offsets */ +#define CLK_APB3_BUS 0x100 +#define CLK_APB3_I2C1 0x108 +#define CLK_APB3_DAC 0x110 +#define CLK_APB3_ADC0 0x118 +#define CLK_APB3_ADC1 0x120 +#define CLK_APB3_CAN0 0x128 +#define CLK_APB1_BUS 0x200 +#define CLK_APB1_MOTOCON_PWM 0x208 +#define CLK_APB1_I2C0 0x210 +#define CLK_APB1_I2S 0x218 +#define CLK_APB1_CAN1 0x220 +#define CLK_SPIFI 0x300 +#define CLK_CPU_BUS 0x400 +#define CLK_CPU_SPIFI 0x408 +#define CLK_CPU_GPIO 0x410 +#define CLK_CPU_LCD 0x418 +#define CLK_CPU_ETHERNET 0x420 +#define CLK_CPU_USB0 0x428 +#define CLK_CPU_EMC 0x430 +#define CLK_CPU_SDIO 0x438 +#define CLK_CPU_DMA 0x440 +#define CLK_CPU_CORE 0x448 +#define CLK_CPU_SCT 0x468 +#define CLK_CPU_USB1 0x470 +#define CLK_CPU_EMCDIV 0x478 +#define CLK_CPU_FLASHA 0x480 +#define CLK_CPU_FLASHB 0x488 +#define CLK_CPU_M0APP 0x490 +#define CLK_CPU_ADCHS 0x498 +#define CLK_CPU_EEPROM 0x4a0 +#define CLK_CPU_WWDT 0x500 +#define CLK_CPU_UART0 0x508 +#define CLK_CPU_UART1 0x510 +#define CLK_CPU_SSP0 0x518 +#define CLK_CPU_TIMER0 0x520 +#define CLK_CPU_TIMER1 0x528 +#define CLK_CPU_SCU 0x530 +#define CLK_CPU_CREG 0x538 +#define CLK_CPU_RITIMER 0x600 +#define CLK_CPU_UART2 0x608 +#define CLK_CPU_UART3 0x610 +#define CLK_CPU_TIMER2 0x618 +#define CLK_CPU_TIMER3 0x620 +#define CLK_CPU_SSP1 0x628 +#define CLK_CPU_QEI 0x630 +#define CLK_PERIPH_BUS 0x700 +#define CLK_PERIPH_CORE 0x710 +#define CLK_PERIPH_SGPIO 0x718 +#define CLK_USB0 0x800 +#define CLK_USB1 0x900 +#define CLK_SPI 0xA00 +#define CLK_ADCHS 0xB00 + +/* Clock Control Unit 2 (CCU2) clock offsets */ +#define CLK_AUDIO 0x100 +#define CLK_APB2_UART3 0x200 +#define CLK_APB2_UART2 0x300 +#define CLK_APB0_UART1 0x400 +#define CLK_APB0_UART0 0x500 +#define CLK_APB2_SSP1 0x600 +#define CLK_APB0_SSP0 0x700 +#define CLK_SDIO 0x800 diff --git a/include/dt-bindings/clock/lpc18xx-cgu.h b/include/dt-bindings/clock/lpc18xx-cgu.h new file mode 100644 index 0000000..6e57c6d --- /dev/null +++ b/include/dt-bindings/clock/lpc18xx-cgu.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2015 Joachim Eastwood + * + * This code is released using a dual license strategy: BSD/GPL + * You can choose the licence that better fits your requirements. + * + * Released under the terms of 3-clause BSD License + * Released under the terms of GNU General Public License Version 2.0 + * + */ + +/* LPC18xx/43xx base clock ids */ +#define BASE_SAFE_CLK 0 +#define BASE_USB0_CLK 1 +#define BASE_PERIPH_CLK 2 +#define BASE_USB1_CLK 3 +#define BASE_CPU_CLK 4 +#define BASE_SPIFI_CLK 5 +#define BASE_SPI_CLK 6 +#define BASE_PHY_RX_CLK 7 +#define BASE_PHY_TX_CLK 8 +#define BASE_APB1_CLK 9 +#define BASE_APB3_CLK 10 +#define BASE_LCD_CLK 11 +#define BASE_ADCHS_CLK 12 +#define BASE_SDIO_CLK 13 +#define BASE_SSP0_CLK 14 +#define BASE_SSP1_CLK 15 +#define BASE_UART0_CLK 16 +#define BASE_UART1_CLK 17 +#define BASE_UART2_CLK 18 +#define BASE_UART3_CLK 19 +#define BASE_OUT_CLK 20 +#define BASE_RES1_CLK 21 +#define BASE_RES2_CLK 22 +#define BASE_RES3_CLK 23 +#define BASE_RES4_CLK 24 +#define BASE_AUDIO_CLK 25 +#define BASE_CGU_OUT0_CLK 26 +#define BASE_CGU_OUT1_CLK 27 +#define BASE_CLK_MAX (BASE_CGU_OUT1_CLK + 1) diff --git a/include/dt-bindings/clock/lpc32xx-clock.h b/include/dt-bindings/clock/lpc32xx-clock.h new file mode 100644 index 0000000..e624d3a --- /dev/null +++ b/include/dt-bindings/clock/lpc32xx-clock.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2015 Vladimir Zapolskiy + * + * This code is released using a dual license strategy: BSD/GPL + * You can choose the licence that better fits your requirements. + * + * Released under the terms of 3-clause BSD License + * Released under the terms of GNU General Public License Version 2.0 + * + */ + +#ifndef __DT_BINDINGS_LPC32XX_CLOCK_H +#define __DT_BINDINGS_LPC32XX_CLOCK_H + +/* LPC32XX System Control Block clocks */ +#define LPC32XX_CLK_RTC 1 +#define LPC32XX_CLK_DMA 2 +#define LPC32XX_CLK_MLC 3 +#define LPC32XX_CLK_SLC 4 +#define LPC32XX_CLK_LCD 5 +#define LPC32XX_CLK_MAC 6 +#define LPC32XX_CLK_SD 7 +#define LPC32XX_CLK_DDRAM 8 +#define LPC32XX_CLK_SSP0 9 +#define LPC32XX_CLK_SSP1 10 +#define LPC32XX_CLK_UART3 11 +#define LPC32XX_CLK_UART4 12 +#define LPC32XX_CLK_UART5 13 +#define LPC32XX_CLK_UART6 14 +#define LPC32XX_CLK_IRDA 15 +#define LPC32XX_CLK_I2C1 16 +#define LPC32XX_CLK_I2C2 17 +#define LPC32XX_CLK_TIMER0 18 +#define LPC32XX_CLK_TIMER1 19 +#define LPC32XX_CLK_TIMER2 20 +#define LPC32XX_CLK_TIMER3 21 +#define LPC32XX_CLK_TIMER4 22 +#define LPC32XX_CLK_TIMER5 23 +#define LPC32XX_CLK_WDOG 24 +#define LPC32XX_CLK_I2S0 25 +#define LPC32XX_CLK_I2S1 26 +#define LPC32XX_CLK_SPI1 27 +#define LPC32XX_CLK_SPI2 28 +#define LPC32XX_CLK_MCPWM 29 +#define LPC32XX_CLK_HSTIMER 30 +#define LPC32XX_CLK_KEY 31 +#define LPC32XX_CLK_PWM1 32 +#define LPC32XX_CLK_PWM2 33 +#define LPC32XX_CLK_ADC 34 +#define LPC32XX_CLK_HCLK_PLL 35 +#define LPC32XX_CLK_PERIPH 36 + +/* LPC32XX USB clocks */ +#define LPC32XX_USB_CLK_I2C 1 +#define LPC32XX_USB_CLK_DEVICE 2 +#define LPC32XX_USB_CLK_HOST 3 + +#endif /* __DT_BINDINGS_LPC32XX_CLOCK_H */ diff --git a/include/dt-bindings/clock/lsi,axm5516-clks.h b/include/dt-bindings/clock/lsi,axm5516-clks.h new file mode 100644 index 0000000..050bbda --- /dev/null +++ b/include/dt-bindings/clock/lsi,axm5516-clks.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 LSI Corporation + */ + +#ifndef _DT_BINDINGS_CLK_AXM5516_H +#define _DT_BINDINGS_CLK_AXM5516_H + +#define AXXIA_CLK_FAB_PLL 0 +#define AXXIA_CLK_CPU_PLL 1 +#define AXXIA_CLK_SYS_PLL 2 +#define AXXIA_CLK_SM0_PLL 3 +#define AXXIA_CLK_SM1_PLL 4 +#define AXXIA_CLK_FAB_DIV 5 +#define AXXIA_CLK_SYS_DIV 6 +#define AXXIA_CLK_NRCP_DIV 7 +#define AXXIA_CLK_CPU0_DIV 8 +#define AXXIA_CLK_CPU1_DIV 9 +#define AXXIA_CLK_CPU2_DIV 10 +#define AXXIA_CLK_CPU3_DIV 11 +#define AXXIA_CLK_PER_DIV 12 +#define AXXIA_CLK_MMC_DIV 13 +#define AXXIA_CLK_FAB 14 +#define AXXIA_CLK_SYS 15 +#define AXXIA_CLK_NRCP 16 +#define AXXIA_CLK_CPU0 17 +#define AXXIA_CLK_CPU1 18 +#define AXXIA_CLK_CPU2 19 +#define AXXIA_CLK_CPU3 20 +#define AXXIA_CLK_PER 21 +#define AXXIA_CLK_MMC 22 + +#endif diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h new file mode 100644 index 0000000..e785c6e --- /dev/null +++ b/include/dt-bindings/clock/marvell,mmp2.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MARVELL_MMP2_CLOCK_H +#define __DTS_MARVELL_MMP2_CLOCK_H + +/* fixed clocks and plls */ +#define MMP2_CLK_CLK32 1 +#define MMP2_CLK_VCTCXO 2 +#define MMP2_CLK_PLL1 3 +#define MMP2_CLK_PLL1_2 8 +#define MMP2_CLK_PLL1_4 9 +#define MMP2_CLK_PLL1_8 10 +#define MMP2_CLK_PLL1_16 11 +#define MMP2_CLK_PLL1_3 12 +#define MMP2_CLK_PLL1_6 13 +#define MMP2_CLK_PLL1_12 14 +#define MMP2_CLK_PLL1_20 15 +#define MMP2_CLK_PLL2 16 +#define MMP2_CLK_PLL2_2 17 +#define MMP2_CLK_PLL2_4 18 +#define MMP2_CLK_PLL2_8 19 +#define MMP2_CLK_PLL2_16 20 +#define MMP2_CLK_PLL2_3 21 +#define MMP2_CLK_PLL2_6 22 +#define MMP2_CLK_PLL2_12 23 +#define MMP2_CLK_VCTCXO_2 24 +#define MMP2_CLK_VCTCXO_4 25 +#define MMP2_CLK_UART_PLL 26 +#define MMP2_CLK_USB_PLL 27 + +/* apb periphrals */ +#define MMP2_CLK_TWSI0 60 +#define MMP2_CLK_TWSI1 61 +#define MMP2_CLK_TWSI2 62 +#define MMP2_CLK_TWSI3 63 +#define MMP2_CLK_TWSI4 64 +#define MMP2_CLK_TWSI5 65 +#define MMP2_CLK_GPIO 66 +#define MMP2_CLK_KPC 67 +#define MMP2_CLK_RTC 68 +#define MMP2_CLK_PWM0 69 +#define MMP2_CLK_PWM1 70 +#define MMP2_CLK_PWM2 71 +#define MMP2_CLK_PWM3 72 +#define MMP2_CLK_UART0 73 +#define MMP2_CLK_UART1 74 +#define MMP2_CLK_UART2 75 +#define MMP2_CLK_UART3 76 +#define MMP2_CLK_SSP0 77 +#define MMP2_CLK_SSP1 78 +#define MMP2_CLK_SSP2 79 +#define MMP2_CLK_SSP3 80 +#define MMP2_CLK_TIMER 81 + +/* axi periphrals */ +#define MMP2_CLK_SDH0 101 +#define MMP2_CLK_SDH1 102 +#define MMP2_CLK_SDH2 103 +#define MMP2_CLK_SDH3 104 +#define MMP2_CLK_USB 105 +#define MMP2_CLK_DISP0 106 +#define MMP2_CLK_DISP0_MUX 107 +#define MMP2_CLK_DISP0_SPHY 108 +#define MMP2_CLK_DISP1 109 +#define MMP2_CLK_DISP1_MUX 110 +#define MMP2_CLK_CCIC_ARBITER 111 +#define MMP2_CLK_CCIC0 112 +#define MMP2_CLK_CCIC0_MIX 113 +#define MMP2_CLK_CCIC0_PHY 114 +#define MMP2_CLK_CCIC0_SPHY 115 +#define MMP2_CLK_CCIC1 116 +#define MMP2_CLK_CCIC1_MIX 117 +#define MMP2_CLK_CCIC1_PHY 118 +#define MMP2_CLK_CCIC1_SPHY 119 +#define MMP2_CLK_DISP0_LCDC 120 + +#define MMP2_NR_CLKS 200 +#endif diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h new file mode 100644 index 0000000..caf9043 --- /dev/null +++ b/include/dt-bindings/clock/marvell,pxa168.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MARVELL_PXA168_CLOCK_H +#define __DTS_MARVELL_PXA168_CLOCK_H + +/* fixed clocks and plls */ +#define PXA168_CLK_CLK32 1 +#define PXA168_CLK_VCTCXO 2 +#define PXA168_CLK_PLL1 3 +#define PXA168_CLK_PLL1_2 8 +#define PXA168_CLK_PLL1_4 9 +#define PXA168_CLK_PLL1_8 10 +#define PXA168_CLK_PLL1_16 11 +#define PXA168_CLK_PLL1_6 12 +#define PXA168_CLK_PLL1_12 13 +#define PXA168_CLK_PLL1_24 14 +#define PXA168_CLK_PLL1_48 15 +#define PXA168_CLK_PLL1_96 16 +#define PXA168_CLK_PLL1_13 17 +#define PXA168_CLK_PLL1_13_1_5 18 +#define PXA168_CLK_PLL1_2_1_5 19 +#define PXA168_CLK_PLL1_3_16 20 +#define PXA168_CLK_PLL1_192 21 +#define PXA168_CLK_UART_PLL 27 +#define PXA168_CLK_USB_PLL 28 + +/* apb periphrals */ +#define PXA168_CLK_TWSI0 60 +#define PXA168_CLK_TWSI1 61 +#define PXA168_CLK_TWSI2 62 +#define PXA168_CLK_TWSI3 63 +#define PXA168_CLK_GPIO 64 +#define PXA168_CLK_KPC 65 +#define PXA168_CLK_RTC 66 +#define PXA168_CLK_PWM0 67 +#define PXA168_CLK_PWM1 68 +#define PXA168_CLK_PWM2 69 +#define PXA168_CLK_PWM3 70 +#define PXA168_CLK_UART0 71 +#define PXA168_CLK_UART1 72 +#define PXA168_CLK_UART2 73 +#define PXA168_CLK_SSP0 74 +#define PXA168_CLK_SSP1 75 +#define PXA168_CLK_SSP2 76 +#define PXA168_CLK_SSP3 77 +#define PXA168_CLK_SSP4 78 +#define PXA168_CLK_TIMER 79 + +/* axi periphrals */ +#define PXA168_CLK_DFC 100 +#define PXA168_CLK_SDH0 101 +#define PXA168_CLK_SDH1 102 +#define PXA168_CLK_SDH2 103 +#define PXA168_CLK_USB 104 +#define PXA168_CLK_SPH 105 +#define PXA168_CLK_DISP0 106 +#define PXA168_CLK_CCIC0 107 +#define PXA168_CLK_CCIC0_PHY 108 +#define PXA168_CLK_CCIC0_SPHY 109 + +#define PXA168_NR_CLKS 200 +#endif diff --git a/include/dt-bindings/clock/marvell,pxa1928.h b/include/dt-bindings/clock/marvell,pxa1928.h new file mode 100644 index 0000000..5dca482 --- /dev/null +++ b/include/dt-bindings/clock/marvell,pxa1928.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MARVELL_PXA1928_CLOCK_H +#define __DTS_MARVELL_PXA1928_CLOCK_H + +/* + * Clock ID values here correspond to the control register offset/4. + */ + +/* apb peripherals */ +#define PXA1928_CLK_RTC 0x00 +#define PXA1928_CLK_TWSI0 0x01 +#define PXA1928_CLK_TWSI1 0x02 +#define PXA1928_CLK_TWSI2 0x03 +#define PXA1928_CLK_TWSI3 0x04 +#define PXA1928_CLK_OWIRE 0x05 +#define PXA1928_CLK_KPC 0x06 +#define PXA1928_CLK_TB_ROTARY 0x07 +#define PXA1928_CLK_SW_JTAG 0x08 +#define PXA1928_CLK_TIMER1 0x09 +#define PXA1928_CLK_UART0 0x0b +#define PXA1928_CLK_UART1 0x0c +#define PXA1928_CLK_UART2 0x0d +#define PXA1928_CLK_GPIO 0x0e +#define PXA1928_CLK_PWM0 0x0f +#define PXA1928_CLK_PWM1 0x10 +#define PXA1928_CLK_PWM2 0x11 +#define PXA1928_CLK_PWM3 0x12 +#define PXA1928_CLK_SSP0 0x13 +#define PXA1928_CLK_SSP1 0x14 +#define PXA1928_CLK_SSP2 0x15 + +#define PXA1928_CLK_TWSI4 0x1f +#define PXA1928_CLK_TWSI5 0x20 +#define PXA1928_CLK_UART3 0x22 +#define PXA1928_CLK_THSENS_GLOB 0x24 +#define PXA1928_CLK_THSENS_CPU 0x26 +#define PXA1928_CLK_THSENS_VPU 0x27 +#define PXA1928_CLK_THSENS_GC 0x28 +#define PXA1928_APBC_NR_CLKS 0x30 + + +/* axi peripherals */ +#define PXA1928_CLK_SDH0 0x15 +#define PXA1928_CLK_SDH1 0x16 +#define PXA1928_CLK_USB 0x17 +#define PXA1928_CLK_NAND 0x18 +#define PXA1928_CLK_DMA 0x19 + +#define PXA1928_CLK_SDH2 0x3a +#define PXA1928_CLK_SDH3 0x3b +#define PXA1928_CLK_HSIC 0x3e +#define PXA1928_CLK_SDH4 0x57 +#define PXA1928_CLK_GC3D 0x5d +#define PXA1928_CLK_GC2D 0x5f + +#define PXA1928_APMU_NR_CLKS 0x60 + +#endif diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h new file mode 100644 index 0000000..7bf4623 --- /dev/null +++ b/include/dt-bindings/clock/marvell,pxa910.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MARVELL_PXA910_CLOCK_H +#define __DTS_MARVELL_PXA910_CLOCK_H + +/* fixed clocks and plls */ +#define PXA910_CLK_CLK32 1 +#define PXA910_CLK_VCTCXO 2 +#define PXA910_CLK_PLL1 3 +#define PXA910_CLK_PLL1_2 8 +#define PXA910_CLK_PLL1_4 9 +#define PXA910_CLK_PLL1_8 10 +#define PXA910_CLK_PLL1_16 11 +#define PXA910_CLK_PLL1_6 12 +#define PXA910_CLK_PLL1_12 13 +#define PXA910_CLK_PLL1_24 14 +#define PXA910_CLK_PLL1_48 15 +#define PXA910_CLK_PLL1_96 16 +#define PXA910_CLK_PLL1_13 17 +#define PXA910_CLK_PLL1_13_1_5 18 +#define PXA910_CLK_PLL1_2_1_5 19 +#define PXA910_CLK_PLL1_3_16 20 +#define PXA910_CLK_PLL1_192 21 +#define PXA910_CLK_UART_PLL 27 +#define PXA910_CLK_USB_PLL 28 + +/* apb periphrals */ +#define PXA910_CLK_TWSI0 60 +#define PXA910_CLK_TWSI1 61 +#define PXA910_CLK_TWSI2 62 +#define PXA910_CLK_TWSI3 63 +#define PXA910_CLK_GPIO 64 +#define PXA910_CLK_KPC 65 +#define PXA910_CLK_RTC 66 +#define PXA910_CLK_PWM0 67 +#define PXA910_CLK_PWM1 68 +#define PXA910_CLK_PWM2 69 +#define PXA910_CLK_PWM3 70 +#define PXA910_CLK_UART0 71 +#define PXA910_CLK_UART1 72 +#define PXA910_CLK_UART2 73 +#define PXA910_CLK_SSP0 74 +#define PXA910_CLK_SSP1 75 +#define PXA910_CLK_TIMER0 76 +#define PXA910_CLK_TIMER1 77 + +/* axi periphrals */ +#define PXA910_CLK_DFC 100 +#define PXA910_CLK_SDH0 101 +#define PXA910_CLK_SDH1 102 +#define PXA910_CLK_SDH2 103 +#define PXA910_CLK_USB 104 +#define PXA910_CLK_SPH 105 +#define PXA910_CLK_DISP0 106 +#define PXA910_CLK_CCIC0 107 +#define PXA910_CLK_CCIC0_PHY 108 +#define PXA910_CLK_CCIC0_SPHY 109 + +#define PXA910_NR_CLKS 200 +#endif diff --git a/include/dt-bindings/clock/maxim,max77620.h b/include/dt-bindings/clock/maxim,max77620.h new file mode 100644 index 0000000..9d6609a --- /dev/null +++ b/include/dt-bindings/clock/maxim,max77620.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved. + * + * Device Tree binding constants clocks for the Maxim 77620 PMIC. + */ + +#ifndef _DT_BINDINGS_CLOCK_MAXIM_MAX77620_CLOCK_H +#define _DT_BINDINGS_CLOCK_MAXIM_MAX77620_CLOCK_H + +/* Fixed rate clocks. */ + +#define MAX77620_CLK_32K_OUT0 0 + +/* Total number of clocks. */ +#define MAX77620_CLKS_NUM (MAX77620_CLK_32K_OUT0 + 1) + +#endif /* _DT_BINDINGS_CLOCK_MAXIM_MAX77620_CLOCK_H */ diff --git a/include/dt-bindings/clock/maxim,max77686.h b/include/dt-bindings/clock/maxim,max77686.h new file mode 100644 index 0000000..af8261d --- /dev/null +++ b/include/dt-bindings/clock/maxim,max77686.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2014 Google, Inc + * + * Device Tree binding constants clocks for the Maxim 77686 PMIC. + */ + +#ifndef _DT_BINDINGS_CLOCK_MAXIM_MAX77686_CLOCK_H +#define _DT_BINDINGS_CLOCK_MAXIM_MAX77686_CLOCK_H + +/* Fixed rate clocks. */ + +#define MAX77686_CLK_AP 0 +#define MAX77686_CLK_CP 1 +#define MAX77686_CLK_PMIC 2 + +/* Total number of clocks. */ +#define MAX77686_CLKS_NUM (MAX77686_CLK_PMIC + 1) + +#endif /* _DT_BINDINGS_CLOCK_MAXIM_MAX77686_CLOCK_H */ diff --git a/include/dt-bindings/clock/maxim,max77802.h b/include/dt-bindings/clock/maxim,max77802.h new file mode 100644 index 0000000..51adcba --- /dev/null +++ b/include/dt-bindings/clock/maxim,max77802.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2014 Google, Inc + * + * Device Tree binding constants clocks for the Maxim 77802 PMIC. + */ + +#ifndef _DT_BINDINGS_CLOCK_MAXIM_MAX77802_CLOCK_H +#define _DT_BINDINGS_CLOCK_MAXIM_MAX77802_CLOCK_H + +/* Fixed rate clocks. */ + +#define MAX77802_CLK_32K_AP 0 +#define MAX77802_CLK_32K_CP 1 + +/* Total number of clocks. */ +#define MAX77802_CLKS_NUM (MAX77802_CLK_32K_CP + 1) + +#endif /* _DT_BINDINGS_CLOCK_MAXIM_MAX77802_CLOCK_H */ diff --git a/include/dt-bindings/clock/maxim,max9485.h b/include/dt-bindings/clock/maxim,max9485.h new file mode 100644 index 0000000..368719a --- /dev/null +++ b/include/dt-bindings/clock/maxim,max9485.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2018 Daniel Mack + */ + +#ifndef __DT_BINDINGS_MAX9485_CLK_H +#define __DT_BINDINGS_MAX9485_CLK_H + +#define MAX9485_MCLKOUT 0 +#define MAX9485_CLKOUT 1 +#define MAX9485_CLKOUT1 2 +#define MAX9485_CLKOUT2 3 + +#endif /* __DT_BINDINGS_MAX9485_CLK_H */ diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h new file mode 100644 index 0000000..68862aa --- /dev/null +++ b/include/dt-bindings/clock/meson8b-clkc.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Meson8b clock tree IDs + */ + +#ifndef __MESON8B_CLKC_H +#define __MESON8B_CLKC_H + +#define CLKID_UNUSED 0 +#define CLKID_XTAL 1 +#define CLKID_PLL_FIXED 2 +#define CLKID_PLL_VID 3 +#define CLKID_PLL_SYS 4 +#define CLKID_FCLK_DIV2 5 +#define CLKID_FCLK_DIV3 6 +#define CLKID_FCLK_DIV4 7 +#define CLKID_FCLK_DIV5 8 +#define CLKID_FCLK_DIV7 9 +#define CLKID_CLK81 10 +#define CLKID_MALI 11 +#define CLKID_CPUCLK 12 +#define CLKID_ZERO 13 +#define CLKID_MPEG_SEL 14 +#define CLKID_MPEG_DIV 15 +#define CLKID_DDR 16 +#define CLKID_DOS 17 +#define CLKID_ISA 18 +#define CLKID_PL301 19 +#define CLKID_PERIPHS 20 +#define CLKID_SPICC 21 +#define CLKID_I2C 22 +#define CLKID_SAR_ADC 23 +#define CLKID_SMART_CARD 24 +#define CLKID_RNG0 25 +#define CLKID_UART0 26 +#define CLKID_SDHC 27 +#define CLKID_STREAM 28 +#define CLKID_ASYNC_FIFO 29 +#define CLKID_SDIO 30 +#define CLKID_ABUF 31 +#define CLKID_HIU_IFACE 32 +#define CLKID_ASSIST_MISC 33 +#define CLKID_SPI 34 +#define CLKID_I2S_SPDIF 35 +#define CLKID_ETH 36 +#define CLKID_DEMUX 37 +#define CLKID_AIU_GLUE 38 +#define CLKID_IEC958 39 +#define CLKID_I2S_OUT 40 +#define CLKID_AMCLK 41 +#define CLKID_AIFIFO2 42 +#define CLKID_MIXER 43 +#define CLKID_MIXER_IFACE 44 +#define CLKID_ADC 45 +#define CLKID_BLKMV 46 +#define CLKID_AIU 47 +#define CLKID_UART1 48 +#define CLKID_G2D 49 +#define CLKID_USB0 50 +#define CLKID_USB1 51 +#define CLKID_RESET 52 +#define CLKID_NAND 53 +#define CLKID_DOS_PARSER 54 +#define CLKID_USB 55 +#define CLKID_VDIN1 56 +#define CLKID_AHB_ARB0 57 +#define CLKID_EFUSE 58 +#define CLKID_BOOT_ROM 59 +#define CLKID_AHB_DATA_BUS 60 +#define CLKID_AHB_CTRL_BUS 61 +#define CLKID_HDMI_INTR_SYNC 62 +#define CLKID_HDMI_PCLK 63 +#define CLKID_USB1_DDR_BRIDGE 64 +#define CLKID_USB0_DDR_BRIDGE 65 +#define CLKID_MMC_PCLK 66 +#define CLKID_DVIN 67 +#define CLKID_UART2 68 +#define CLKID_SANA 69 +#define CLKID_VPU_INTR 70 +#define CLKID_SEC_AHB_AHB3_BRIDGE 71 +#define CLKID_CLK81_A9 72 +#define CLKID_VCLK2_VENCI0 73 +#define CLKID_VCLK2_VENCI1 74 +#define CLKID_VCLK2_VENCP0 75 +#define CLKID_VCLK2_VENCP1 76 +#define CLKID_GCLK_VENCI_INT 77 +#define CLKID_GCLK_VENCP_INT 78 +#define CLKID_DAC_CLK 79 +#define CLKID_AOCLK_GATE 80 +#define CLKID_IEC958_GATE 81 +#define CLKID_ENC480P 82 +#define CLKID_RNG1 83 +#define CLKID_GCLK_VENCL_INT 84 +#define CLKID_VCLK2_VENCLMCC 85 +#define CLKID_VCLK2_VENCL 86 +#define CLKID_VCLK2_OTHER 87 +#define CLKID_EDP 88 +#define CLKID_AO_MEDIA_CPU 89 +#define CLKID_AO_AHB_SRAM 90 +#define CLKID_AO_AHB_BUS 91 +#define CLKID_AO_IFACE 92 +#define CLKID_MPLL0 93 +#define CLKID_MPLL1 94 +#define CLKID_MPLL2 95 +#define CLKID_NAND_CLK 112 +#define CLKID_APB 124 +#define CLKID_PERIPH 126 +#define CLKID_AXI 128 +#define CLKID_L2_DRAM 130 +#define CLKID_VPU 190 +#define CLKID_VDEC_1 196 +#define CLKID_VDEC_HCODEC 199 +#define CLKID_VDEC_2 202 +#define CLKID_VDEC_HEVC 206 +#define CLKID_CTS_AMCLK 209 +#define CLKID_CTS_MCLK_I958 212 +#define CLKID_CTS_I958 213 + +#endif /* __MESON8B_CLKC_H */ diff --git a/include/dt-bindings/clock/microchip,pic32-clock.h b/include/dt-bindings/clock/microchip,pic32-clock.h new file mode 100644 index 0000000..371668d --- /dev/null +++ b/include/dt-bindings/clock/microchip,pic32-clock.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Purna Chandra Mandal, + * Copyright (C) 2015 Microchip Technology Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ +#define _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ + +/* clock output indices */ +#define POSCCLK 0 +#define FRCCLK 1 +#define BFRCCLK 2 +#define LPRCCLK 3 +#define SOSCCLK 4 +#define FRCDIVCLK 5 +#define PLLCLK 6 +#define SCLK 7 +#define PB1CLK 8 +#define PB2CLK 9 +#define PB3CLK 10 +#define PB4CLK 11 +#define PB5CLK 12 +#define PB6CLK 13 +#define PB7CLK 14 +#define REF1CLK 15 +#define REF2CLK 16 +#define REF3CLK 17 +#define REF4CLK 18 +#define REF5CLK 19 +#define UPLLCLK 20 +#define MAXCLKS 21 + +#endif /* _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ */ diff --git a/include/dt-bindings/clock/mpc512x-clock.h b/include/dt-bindings/clock/mpc512x-clock.h new file mode 100644 index 0000000..13c316b --- /dev/null +++ b/include/dt-bindings/clock/mpc512x-clock.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for MPC512x clock specs in DT bindings. + */ + +#ifndef _DT_BINDINGS_CLOCK_MPC512x_CLOCK_H +#define _DT_BINDINGS_CLOCK_MPC512x_CLOCK_H + +#define MPC512x_CLK_DUMMY 0 +#define MPC512x_CLK_REF 1 +#define MPC512x_CLK_SYS 2 +#define MPC512x_CLK_DIU 3 +#define MPC512x_CLK_VIU 4 +#define MPC512x_CLK_CSB 5 +#define MPC512x_CLK_E300 6 +#define MPC512x_CLK_IPS 7 +#define MPC512x_CLK_FEC 8 +#define MPC512x_CLK_SATA 9 +#define MPC512x_CLK_PATA 10 +#define MPC512x_CLK_NFC 11 +#define MPC512x_CLK_LPC 12 +#define MPC512x_CLK_MBX_BUS 13 +#define MPC512x_CLK_MBX 14 +#define MPC512x_CLK_MBX_3D 15 +#define MPC512x_CLK_AXE 16 +#define MPC512x_CLK_USB1 17 +#define MPC512x_CLK_USB2 18 +#define MPC512x_CLK_I2C 19 +#define MPC512x_CLK_MSCAN0_MCLK 20 +#define MPC512x_CLK_MSCAN1_MCLK 21 +#define MPC512x_CLK_MSCAN2_MCLK 22 +#define MPC512x_CLK_MSCAN3_MCLK 23 +#define MPC512x_CLK_BDLC 24 +#define MPC512x_CLK_SDHC 25 +#define MPC512x_CLK_PCI 26 +#define MPC512x_CLK_PSC_MCLK_IN 27 +#define MPC512x_CLK_SPDIF_TX 28 +#define MPC512x_CLK_SPDIF_RX 29 +#define MPC512x_CLK_SPDIF_MCLK 30 +#define MPC512x_CLK_SPDIF 31 +#define MPC512x_CLK_AC97 32 +#define MPC512x_CLK_PSC0_MCLK 33 +#define MPC512x_CLK_PSC1_MCLK 34 +#define MPC512x_CLK_PSC2_MCLK 35 +#define MPC512x_CLK_PSC3_MCLK 36 +#define MPC512x_CLK_PSC4_MCLK 37 +#define MPC512x_CLK_PSC5_MCLK 38 +#define MPC512x_CLK_PSC6_MCLK 39 +#define MPC512x_CLK_PSC7_MCLK 40 +#define MPC512x_CLK_PSC8_MCLK 41 +#define MPC512x_CLK_PSC9_MCLK 42 +#define MPC512x_CLK_PSC10_MCLK 43 +#define MPC512x_CLK_PSC11_MCLK 44 +#define MPC512x_CLK_PSC_FIFO 45 +#define MPC512x_CLK_PSC0 46 +#define MPC512x_CLK_PSC1 47 +#define MPC512x_CLK_PSC2 48 +#define MPC512x_CLK_PSC3 49 +#define MPC512x_CLK_PSC4 50 +#define MPC512x_CLK_PSC5 51 +#define MPC512x_CLK_PSC6 52 +#define MPC512x_CLK_PSC7 53 +#define MPC512x_CLK_PSC8 54 +#define MPC512x_CLK_PSC9 55 +#define MPC512x_CLK_PSC10 56 +#define MPC512x_CLK_PSC11 57 +#define MPC512x_CLK_SDHC2 58 +#define MPC512x_CLK_FEC2 59 +#define MPC512x_CLK_OUT0_CLK 60 +#define MPC512x_CLK_OUT1_CLK 61 +#define MPC512x_CLK_OUT2_CLK 62 +#define MPC512x_CLK_OUT3_CLK 63 +#define MPC512x_CLK_CAN_CLK_IN 64 + +#define MPC512x_CLK_LAST_PUBLIC 64 + +#endif diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h new file mode 100644 index 0000000..6d531d5 --- /dev/null +++ b/include/dt-bindings/clock/mt2701-clk.h @@ -0,0 +1,484 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Shunli Wang + */ + +#ifndef _DT_BINDINGS_CLK_MT2701_H +#define _DT_BINDINGS_CLK_MT2701_H + +/* TOPCKGEN */ +#define CLK_TOP_SYSPLL 1 +#define CLK_TOP_SYSPLL_D2 2 +#define CLK_TOP_SYSPLL_D3 3 +#define CLK_TOP_SYSPLL_D5 4 +#define CLK_TOP_SYSPLL_D7 5 +#define CLK_TOP_SYSPLL1_D2 6 +#define CLK_TOP_SYSPLL1_D4 7 +#define CLK_TOP_SYSPLL1_D8 8 +#define CLK_TOP_SYSPLL1_D16 9 +#define CLK_TOP_SYSPLL2_D2 10 +#define CLK_TOP_SYSPLL2_D4 11 +#define CLK_TOP_SYSPLL2_D8 12 +#define CLK_TOP_SYSPLL3_D2 13 +#define CLK_TOP_SYSPLL3_D4 14 +#define CLK_TOP_SYSPLL4_D2 15 +#define CLK_TOP_SYSPLL4_D4 16 +#define CLK_TOP_UNIVPLL 17 +#define CLK_TOP_UNIVPLL_D2 18 +#define CLK_TOP_UNIVPLL_D3 19 +#define CLK_TOP_UNIVPLL_D5 20 +#define CLK_TOP_UNIVPLL_D7 21 +#define CLK_TOP_UNIVPLL_D26 22 +#define CLK_TOP_UNIVPLL_D52 23 +#define CLK_TOP_UNIVPLL_D108 24 +#define CLK_TOP_USB_PHY48M 25 +#define CLK_TOP_UNIVPLL1_D2 26 +#define CLK_TOP_UNIVPLL1_D4 27 +#define CLK_TOP_UNIVPLL1_D8 28 +#define CLK_TOP_UNIVPLL2_D2 29 +#define CLK_TOP_UNIVPLL2_D4 30 +#define CLK_TOP_UNIVPLL2_D8 31 +#define CLK_TOP_UNIVPLL2_D16 32 +#define CLK_TOP_UNIVPLL2_D32 33 +#define CLK_TOP_UNIVPLL3_D2 34 +#define CLK_TOP_UNIVPLL3_D4 35 +#define CLK_TOP_UNIVPLL3_D8 36 +#define CLK_TOP_MSDCPLL 37 +#define CLK_TOP_MSDCPLL_D2 38 +#define CLK_TOP_MSDCPLL_D4 39 +#define CLK_TOP_MSDCPLL_D8 40 +#define CLK_TOP_MMPLL 41 +#define CLK_TOP_MMPLL_D2 42 +#define CLK_TOP_DMPLL 43 +#define CLK_TOP_DMPLL_D2 44 +#define CLK_TOP_DMPLL_D4 45 +#define CLK_TOP_DMPLL_X2 46 +#define CLK_TOP_TVDPLL 47 +#define CLK_TOP_TVDPLL_D2 48 +#define CLK_TOP_TVDPLL_D4 49 +#define CLK_TOP_TVD2PLL 50 +#define CLK_TOP_TVD2PLL_D2 51 +#define CLK_TOP_HADDS2PLL_98M 52 +#define CLK_TOP_HADDS2PLL_294M 53 +#define CLK_TOP_HADDS2_FB 54 +#define CLK_TOP_MIPIPLL_D2 55 +#define CLK_TOP_MIPIPLL_D4 56 +#define CLK_TOP_HDMIPLL 57 +#define CLK_TOP_HDMIPLL_D2 58 +#define CLK_TOP_HDMIPLL_D3 59 +#define CLK_TOP_HDMI_SCL_RX 60 +#define CLK_TOP_HDMI_0_PIX340M 61 +#define CLK_TOP_HDMI_0_DEEP340M 62 +#define CLK_TOP_HDMI_0_PLL340M 63 +#define CLK_TOP_AUD1PLL_98M 64 +#define CLK_TOP_AUD2PLL_90M 65 +#define CLK_TOP_AUDPLL 66 +#define CLK_TOP_AUDPLL_D4 67 +#define CLK_TOP_AUDPLL_D8 68 +#define CLK_TOP_AUDPLL_D16 69 +#define CLK_TOP_AUDPLL_D24 70 +#define CLK_TOP_ETHPLL_500M 71 +#define CLK_TOP_VDECPLL 72 +#define CLK_TOP_VENCPLL 73 +#define CLK_TOP_MIPIPLL 74 +#define CLK_TOP_ARMPLL_1P3G 75 + +#define CLK_TOP_MM_SEL 76 +#define CLK_TOP_DDRPHYCFG_SEL 77 +#define CLK_TOP_MEM_SEL 78 +#define CLK_TOP_AXI_SEL 79 +#define CLK_TOP_CAMTG_SEL 80 +#define CLK_TOP_MFG_SEL 81 +#define CLK_TOP_VDEC_SEL 82 +#define CLK_TOP_PWM_SEL 83 +#define CLK_TOP_MSDC30_0_SEL 84 +#define CLK_TOP_USB20_SEL 85 +#define CLK_TOP_SPI0_SEL 86 +#define CLK_TOP_UART_SEL 87 +#define CLK_TOP_AUDINTBUS_SEL 88 +#define CLK_TOP_AUDIO_SEL 89 +#define CLK_TOP_MSDC30_2_SEL 90 +#define CLK_TOP_MSDC30_1_SEL 91 +#define CLK_TOP_DPI1_SEL 92 +#define CLK_TOP_DPI0_SEL 93 +#define CLK_TOP_SCP_SEL 94 +#define CLK_TOP_PMICSPI_SEL 95 +#define CLK_TOP_APLL_SEL 96 +#define CLK_TOP_HDMI_SEL 97 +#define CLK_TOP_TVE_SEL 98 +#define CLK_TOP_EMMC_HCLK_SEL 99 +#define CLK_TOP_NFI2X_SEL 100 +#define CLK_TOP_RTC_SEL 101 +#define CLK_TOP_OSD_SEL 102 +#define CLK_TOP_NR_SEL 103 +#define CLK_TOP_DI_SEL 104 +#define CLK_TOP_FLASH_SEL 105 +#define CLK_TOP_ASM_M_SEL 106 +#define CLK_TOP_ASM_I_SEL 107 +#define CLK_TOP_INTDIR_SEL 108 +#define CLK_TOP_HDMIRX_BIST_SEL 109 +#define CLK_TOP_ETHIF_SEL 110 +#define CLK_TOP_MS_CARD_SEL 111 +#define CLK_TOP_ASM_H_SEL 112 +#define CLK_TOP_SPI1_SEL 113 +#define CLK_TOP_CMSYS_SEL 114 +#define CLK_TOP_MSDC30_3_SEL 115 +#define CLK_TOP_HDMIRX26_24_SEL 116 +#define CLK_TOP_AUD2DVD_SEL 117 +#define CLK_TOP_8BDAC_SEL 118 +#define CLK_TOP_SPI2_SEL 119 +#define CLK_TOP_AUD_MUX1_SEL 120 +#define CLK_TOP_AUD_MUX2_SEL 121 +#define CLK_TOP_AUDPLL_MUX_SEL 122 +#define CLK_TOP_AUD_K1_SRC_SEL 123 +#define CLK_TOP_AUD_K2_SRC_SEL 124 +#define CLK_TOP_AUD_K3_SRC_SEL 125 +#define CLK_TOP_AUD_K4_SRC_SEL 126 +#define CLK_TOP_AUD_K5_SRC_SEL 127 +#define CLK_TOP_AUD_K6_SRC_SEL 128 +#define CLK_TOP_PADMCLK_SEL 129 +#define CLK_TOP_AUD_EXTCK1_DIV 130 +#define CLK_TOP_AUD_EXTCK2_DIV 131 +#define CLK_TOP_AUD_MUX1_DIV 132 +#define CLK_TOP_AUD_MUX2_DIV 133 +#define CLK_TOP_AUD_K1_SRC_DIV 134 +#define CLK_TOP_AUD_K2_SRC_DIV 135 +#define CLK_TOP_AUD_K3_SRC_DIV 136 +#define CLK_TOP_AUD_K4_SRC_DIV 137 +#define CLK_TOP_AUD_K5_SRC_DIV 138 +#define CLK_TOP_AUD_K6_SRC_DIV 139 +#define CLK_TOP_AUD_I2S1_MCLK 140 +#define CLK_TOP_AUD_I2S2_MCLK 141 +#define CLK_TOP_AUD_I2S3_MCLK 142 +#define CLK_TOP_AUD_I2S4_MCLK 143 +#define CLK_TOP_AUD_I2S5_MCLK 144 +#define CLK_TOP_AUD_I2S6_MCLK 145 +#define CLK_TOP_AUD_48K_TIMING 146 +#define CLK_TOP_AUD_44K_TIMING 147 + +#define CLK_TOP_32K_INTERNAL 148 +#define CLK_TOP_32K_EXTERNAL 149 +#define CLK_TOP_CLK26M_D8 150 +#define CLK_TOP_8BDAC 151 +#define CLK_TOP_WBG_DIG_416M 152 +#define CLK_TOP_DPI 153 +#define CLK_TOP_DSI0_LNTC_DSI 154 +#define CLK_TOP_AUD_EXT1 155 +#define CLK_TOP_AUD_EXT2 156 +#define CLK_TOP_NFI1X_PAD 157 +#define CLK_TOP_AXISEL_D4 158 +#define CLK_TOP_NR 159 + +/* APMIXEDSYS */ + +#define CLK_APMIXED_ARMPLL 1 +#define CLK_APMIXED_MAINPLL 2 +#define CLK_APMIXED_UNIVPLL 3 +#define CLK_APMIXED_MMPLL 4 +#define CLK_APMIXED_MSDCPLL 5 +#define CLK_APMIXED_TVDPLL 6 +#define CLK_APMIXED_AUD1PLL 7 +#define CLK_APMIXED_TRGPLL 8 +#define CLK_APMIXED_ETHPLL 9 +#define CLK_APMIXED_VDECPLL 10 +#define CLK_APMIXED_HADDS2PLL 11 +#define CLK_APMIXED_AUD2PLL 12 +#define CLK_APMIXED_TVD2PLL 13 +#define CLK_APMIXED_HDMI_REF 14 +#define CLK_APMIXED_NR 15 + +/* DDRPHY */ + +#define CLK_DDRPHY_VENCPLL 1 +#define CLK_DDRPHY_NR 2 + +/* INFRACFG */ + +#define CLK_INFRA_DBG 1 +#define CLK_INFRA_SMI 2 +#define CLK_INFRA_QAXI_CM4 3 +#define CLK_INFRA_AUD_SPLIN_B 4 +#define CLK_INFRA_AUDIO 5 +#define CLK_INFRA_EFUSE 6 +#define CLK_INFRA_L2C_SRAM 7 +#define CLK_INFRA_M4U 8 +#define CLK_INFRA_CONNMCU 9 +#define CLK_INFRA_TRNG 10 +#define CLK_INFRA_RAMBUFIF 11 +#define CLK_INFRA_CPUM 12 +#define CLK_INFRA_KP 13 +#define CLK_INFRA_CEC 14 +#define CLK_INFRA_IRRX 15 +#define CLK_INFRA_PMICSPI 16 +#define CLK_INFRA_PMICWRAP 17 +#define CLK_INFRA_DDCCI 18 +#define CLK_INFRA_CLK_13M 19 +#define CLK_INFRA_CPUSEL 20 +#define CLK_INFRA_NR 21 + +/* PERICFG */ + +#define CLK_PERI_NFI 1 +#define CLK_PERI_THERM 2 +#define CLK_PERI_PWM1 3 +#define CLK_PERI_PWM2 4 +#define CLK_PERI_PWM3 5 +#define CLK_PERI_PWM4 6 +#define CLK_PERI_PWM5 7 +#define CLK_PERI_PWM6 8 +#define CLK_PERI_PWM7 9 +#define CLK_PERI_PWM 10 +#define CLK_PERI_USB0 11 +#define CLK_PERI_USB1 12 +#define CLK_PERI_AP_DMA 13 +#define CLK_PERI_MSDC30_0 14 +#define CLK_PERI_MSDC30_1 15 +#define CLK_PERI_MSDC30_2 16 +#define CLK_PERI_MSDC30_3 17 +#define CLK_PERI_MSDC50_3 18 +#define CLK_PERI_NLI 19 +#define CLK_PERI_UART0 20 +#define CLK_PERI_UART1 21 +#define CLK_PERI_UART2 22 +#define CLK_PERI_UART3 23 +#define CLK_PERI_BTIF 24 +#define CLK_PERI_I2C0 25 +#define CLK_PERI_I2C1 26 +#define CLK_PERI_I2C2 27 +#define CLK_PERI_I2C3 28 +#define CLK_PERI_AUXADC 29 +#define CLK_PERI_SPI0 30 +#define CLK_PERI_ETH 31 +#define CLK_PERI_USB0_MCU 32 + +#define CLK_PERI_USB1_MCU 33 +#define CLK_PERI_USB_SLV 34 +#define CLK_PERI_GCPU 35 +#define CLK_PERI_NFI_ECC 36 +#define CLK_PERI_NFI_PAD 37 +#define CLK_PERI_FLASH 38 +#define CLK_PERI_HOST89_INT 39 +#define CLK_PERI_HOST89_SPI 40 +#define CLK_PERI_HOST89_DVD 41 +#define CLK_PERI_SPI1 42 +#define CLK_PERI_SPI2 43 +#define CLK_PERI_FCI 44 + +#define CLK_PERI_UART0_SEL 45 +#define CLK_PERI_UART1_SEL 46 +#define CLK_PERI_UART2_SEL 47 +#define CLK_PERI_UART3_SEL 48 +#define CLK_PERI_NR 49 + +/* AUDIO */ + +#define CLK_AUD_AFE 1 +#define CLK_AUD_LRCK_DETECT 2 +#define CLK_AUD_I2S 3 +#define CLK_AUD_APLL_TUNER 4 +#define CLK_AUD_HDMI 5 +#define CLK_AUD_SPDF 6 +#define CLK_AUD_SPDF2 7 +#define CLK_AUD_APLL 8 +#define CLK_AUD_TML 9 +#define CLK_AUD_AHB_IDLE_EXT 10 +#define CLK_AUD_AHB_IDLE_INT 11 + +#define CLK_AUD_I2SIN1 12 +#define CLK_AUD_I2SIN2 13 +#define CLK_AUD_I2SIN3 14 +#define CLK_AUD_I2SIN4 15 +#define CLK_AUD_I2SIN5 16 +#define CLK_AUD_I2SIN6 17 +#define CLK_AUD_I2SO1 18 +#define CLK_AUD_I2SO2 19 +#define CLK_AUD_I2SO3 20 +#define CLK_AUD_I2SO4 21 +#define CLK_AUD_I2SO5 22 +#define CLK_AUD_I2SO6 23 +#define CLK_AUD_ASRCI1 24 +#define CLK_AUD_ASRCI2 25 +#define CLK_AUD_ASRCO1 26 +#define CLK_AUD_ASRCO2 27 +#define CLK_AUD_ASRC11 28 +#define CLK_AUD_ASRC12 29 +#define CLK_AUD_HDMIRX 30 +#define CLK_AUD_INTDIR 31 +#define CLK_AUD_A1SYS 32 +#define CLK_AUD_A2SYS 33 +#define CLK_AUD_AFE_CONN 34 +#define CLK_AUD_AFE_PCMIF 35 +#define CLK_AUD_AFE_MRGIF 36 + +#define CLK_AUD_MMIF_UL1 37 +#define CLK_AUD_MMIF_UL2 38 +#define CLK_AUD_MMIF_UL3 39 +#define CLK_AUD_MMIF_UL4 40 +#define CLK_AUD_MMIF_UL5 41 +#define CLK_AUD_MMIF_UL6 42 +#define CLK_AUD_MMIF_DL1 43 +#define CLK_AUD_MMIF_DL2 44 +#define CLK_AUD_MMIF_DL3 45 +#define CLK_AUD_MMIF_DL4 46 +#define CLK_AUD_MMIF_DL5 47 +#define CLK_AUD_MMIF_DL6 48 +#define CLK_AUD_MMIF_DLMCH 49 +#define CLK_AUD_MMIF_ARB1 50 +#define CLK_AUD_MMIF_AWB1 51 +#define CLK_AUD_MMIF_AWB2 52 +#define CLK_AUD_MMIF_DAI 53 + +#define CLK_AUD_DMIC1 54 +#define CLK_AUD_DMIC2 55 +#define CLK_AUD_ASRCI3 56 +#define CLK_AUD_ASRCI4 57 +#define CLK_AUD_ASRCI5 58 +#define CLK_AUD_ASRCI6 59 +#define CLK_AUD_ASRCO3 60 +#define CLK_AUD_ASRCO4 61 +#define CLK_AUD_ASRCO5 62 +#define CLK_AUD_ASRCO6 63 +#define CLK_AUD_MEM_ASRC1 64 +#define CLK_AUD_MEM_ASRC2 65 +#define CLK_AUD_MEM_ASRC3 66 +#define CLK_AUD_MEM_ASRC4 67 +#define CLK_AUD_MEM_ASRC5 68 +#define CLK_AUD_DSD_ENC 69 +#define CLK_AUD_ASRC_BRG 70 +#define CLK_AUD_NR 71 + +/* MMSYS */ + +#define CLK_MM_SMI_COMMON 1 +#define CLK_MM_SMI_LARB0 2 +#define CLK_MM_CMDQ 3 +#define CLK_MM_MUTEX 4 +#define CLK_MM_DISP_COLOR 5 +#define CLK_MM_DISP_BLS 6 +#define CLK_MM_DISP_WDMA 7 +#define CLK_MM_DISP_RDMA 8 +#define CLK_MM_DISP_OVL 9 +#define CLK_MM_MDP_TDSHP 10 +#define CLK_MM_MDP_WROT 11 +#define CLK_MM_MDP_WDMA 12 +#define CLK_MM_MDP_RSZ1 13 +#define CLK_MM_MDP_RSZ0 14 +#define CLK_MM_MDP_RDMA 15 +#define CLK_MM_MDP_BLS_26M 16 +#define CLK_MM_CAM_MDP 17 +#define CLK_MM_FAKE_ENG 18 +#define CLK_MM_MUTEX_32K 19 +#define CLK_MM_DISP_RDMA1 20 +#define CLK_MM_DISP_UFOE 21 + +#define CLK_MM_DSI_ENGINE 22 +#define CLK_MM_DSI_DIG 23 +#define CLK_MM_DPI_DIGL 24 +#define CLK_MM_DPI_ENGINE 25 +#define CLK_MM_DPI1_DIGL 26 +#define CLK_MM_DPI1_ENGINE 27 +#define CLK_MM_TVE_OUTPUT 28 +#define CLK_MM_TVE_INPUT 29 +#define CLK_MM_HDMI_PIXEL 30 +#define CLK_MM_HDMI_PLL 31 +#define CLK_MM_HDMI_AUDIO 32 +#define CLK_MM_HDMI_SPDIF 33 +#define CLK_MM_TVE_FMM 34 +#define CLK_MM_NR 35 + +/* IMGSYS */ + +#define CLK_IMG_SMI_COMM 1 +#define CLK_IMG_RESZ 2 +#define CLK_IMG_JPGDEC_SMI 3 +#define CLK_IMG_JPGDEC 4 +#define CLK_IMG_VENC_LT 5 +#define CLK_IMG_VENC 6 +#define CLK_IMG_NR 7 + +/* VDEC */ + +#define CLK_VDEC_CKGEN 1 +#define CLK_VDEC_LARB 2 +#define CLK_VDEC_NR 3 + +/* HIFSYS */ + +#define CLK_HIFSYS_USB0PHY 1 +#define CLK_HIFSYS_USB1PHY 2 +#define CLK_HIFSYS_PCIE0 3 +#define CLK_HIFSYS_PCIE1 4 +#define CLK_HIFSYS_PCIE2 5 +#define CLK_HIFSYS_NR 6 + +/* ETHSYS */ +#define CLK_ETHSYS_HSDMA 1 +#define CLK_ETHSYS_ESW 2 +#define CLK_ETHSYS_GP2 3 +#define CLK_ETHSYS_GP1 4 +#define CLK_ETHSYS_PCM 5 +#define CLK_ETHSYS_GDMA 6 +#define CLK_ETHSYS_I2S 7 +#define CLK_ETHSYS_CRYPTO 8 +#define CLK_ETHSYS_NR 9 + +/* G3DSYS */ +#define CLK_G3DSYS_CORE 1 +#define CLK_G3DSYS_NR 2 + +/* BDP */ + +#define CLK_BDP_BRG_BA 1 +#define CLK_BDP_BRG_DRAM 2 +#define CLK_BDP_LARB_DRAM 3 +#define CLK_BDP_WR_VDI_PXL 4 +#define CLK_BDP_WR_VDI_DRAM 5 +#define CLK_BDP_WR_B 6 +#define CLK_BDP_DGI_IN 7 +#define CLK_BDP_DGI_OUT 8 +#define CLK_BDP_FMT_MAST_27 9 +#define CLK_BDP_FMT_B 10 +#define CLK_BDP_OSD_B 11 +#define CLK_BDP_OSD_DRAM 12 +#define CLK_BDP_OSD_AGENT 13 +#define CLK_BDP_OSD_PXL 14 +#define CLK_BDP_RLE_B 15 +#define CLK_BDP_RLE_AGENT 16 +#define CLK_BDP_RLE_DRAM 17 +#define CLK_BDP_F27M 18 +#define CLK_BDP_F27M_VDOUT 19 +#define CLK_BDP_F27_74_74 20 +#define CLK_BDP_F2FS 21 +#define CLK_BDP_F2FS74_148 22 +#define CLK_BDP_FB 23 +#define CLK_BDP_VDO_DRAM 24 +#define CLK_BDP_VDO_2FS 25 +#define CLK_BDP_VDO_B 26 +#define CLK_BDP_WR_DI_PXL 27 +#define CLK_BDP_WR_DI_DRAM 28 +#define CLK_BDP_WR_DI_B 29 +#define CLK_BDP_NR_PXL 30 +#define CLK_BDP_NR_DRAM 31 +#define CLK_BDP_NR_B 32 + +#define CLK_BDP_RX_F 33 +#define CLK_BDP_RX_X 34 +#define CLK_BDP_RXPDT 35 +#define CLK_BDP_RX_CSCL_N 36 +#define CLK_BDP_RX_CSCL 37 +#define CLK_BDP_RX_DDCSCL_N 38 +#define CLK_BDP_RX_DDCSCL 39 +#define CLK_BDP_RX_VCO 40 +#define CLK_BDP_RX_DP 41 +#define CLK_BDP_RX_P 42 +#define CLK_BDP_RX_M 43 +#define CLK_BDP_RX_PLL 44 +#define CLK_BDP_BRG_RT_B 45 +#define CLK_BDP_BRG_RT_DRAM 46 +#define CLK_BDP_LARBRT_DRAM 47 +#define CLK_BDP_TMDS_SYN 48 +#define CLK_BDP_HDMI_MON 49 +#define CLK_BDP_NR 50 + +#endif /* _DT_BINDINGS_CLK_MT2701_H */ diff --git a/include/dt-bindings/clock/mt2712-clk.h b/include/dt-bindings/clock/mt2712-clk.h new file mode 100644 index 0000000..0800d9c --- /dev/null +++ b/include/dt-bindings/clock/mt2712-clk.h @@ -0,0 +1,428 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Weiyi Lu + */ + +#ifndef _DT_BINDINGS_CLK_MT2712_H +#define _DT_BINDINGS_CLK_MT2712_H + +/* APMIXEDSYS */ + +#define CLK_APMIXED_MAINPLL 0 +#define CLK_APMIXED_UNIVPLL 1 +#define CLK_APMIXED_VCODECPLL 2 +#define CLK_APMIXED_VENCPLL 3 +#define CLK_APMIXED_APLL1 4 +#define CLK_APMIXED_APLL2 5 +#define CLK_APMIXED_LVDSPLL 6 +#define CLK_APMIXED_LVDSPLL2 7 +#define CLK_APMIXED_MSDCPLL 8 +#define CLK_APMIXED_MSDCPLL2 9 +#define CLK_APMIXED_TVDPLL 10 +#define CLK_APMIXED_MMPLL 11 +#define CLK_APMIXED_ARMCA35PLL 12 +#define CLK_APMIXED_ARMCA72PLL 13 +#define CLK_APMIXED_ETHERPLL 14 +#define CLK_APMIXED_NR_CLK 15 + +/* TOPCKGEN */ + +#define CLK_TOP_ARMCA35PLL 0 +#define CLK_TOP_ARMCA35PLL_600M 1 +#define CLK_TOP_ARMCA35PLL_400M 2 +#define CLK_TOP_ARMCA72PLL 3 +#define CLK_TOP_SYSPLL 4 +#define CLK_TOP_SYSPLL_D2 5 +#define CLK_TOP_SYSPLL1_D2 6 +#define CLK_TOP_SYSPLL1_D4 7 +#define CLK_TOP_SYSPLL1_D8 8 +#define CLK_TOP_SYSPLL1_D16 9 +#define CLK_TOP_SYSPLL_D3 10 +#define CLK_TOP_SYSPLL2_D2 11 +#define CLK_TOP_SYSPLL2_D4 12 +#define CLK_TOP_SYSPLL_D5 13 +#define CLK_TOP_SYSPLL3_D2 14 +#define CLK_TOP_SYSPLL3_D4 15 +#define CLK_TOP_SYSPLL_D7 16 +#define CLK_TOP_SYSPLL4_D2 17 +#define CLK_TOP_SYSPLL4_D4 18 +#define CLK_TOP_UNIVPLL 19 +#define CLK_TOP_UNIVPLL_D7 20 +#define CLK_TOP_UNIVPLL_D26 21 +#define CLK_TOP_UNIVPLL_D52 22 +#define CLK_TOP_UNIVPLL_D104 23 +#define CLK_TOP_UNIVPLL_D208 24 +#define CLK_TOP_UNIVPLL_D2 25 +#define CLK_TOP_UNIVPLL1_D2 26 +#define CLK_TOP_UNIVPLL1_D4 27 +#define CLK_TOP_UNIVPLL1_D8 28 +#define CLK_TOP_UNIVPLL_D3 29 +#define CLK_TOP_UNIVPLL2_D2 30 +#define CLK_TOP_UNIVPLL2_D4 31 +#define CLK_TOP_UNIVPLL2_D8 32 +#define CLK_TOP_UNIVPLL_D5 33 +#define CLK_TOP_UNIVPLL3_D2 34 +#define CLK_TOP_UNIVPLL3_D4 35 +#define CLK_TOP_UNIVPLL3_D8 36 +#define CLK_TOP_F_MP0_PLL1 37 +#define CLK_TOP_F_MP0_PLL2 38 +#define CLK_TOP_F_BIG_PLL1 39 +#define CLK_TOP_F_BIG_PLL2 40 +#define CLK_TOP_F_BUS_PLL1 41 +#define CLK_TOP_F_BUS_PLL2 42 +#define CLK_TOP_APLL1 43 +#define CLK_TOP_APLL1_D2 44 +#define CLK_TOP_APLL1_D4 45 +#define CLK_TOP_APLL1_D8 46 +#define CLK_TOP_APLL1_D16 47 +#define CLK_TOP_APLL2 48 +#define CLK_TOP_APLL2_D2 49 +#define CLK_TOP_APLL2_D4 50 +#define CLK_TOP_APLL2_D8 51 +#define CLK_TOP_APLL2_D16 52 +#define CLK_TOP_LVDSPLL 53 +#define CLK_TOP_LVDSPLL_D2 54 +#define CLK_TOP_LVDSPLL_D4 55 +#define CLK_TOP_LVDSPLL_D8 56 +#define CLK_TOP_LVDSPLL2 57 +#define CLK_TOP_LVDSPLL2_D2 58 +#define CLK_TOP_LVDSPLL2_D4 59 +#define CLK_TOP_LVDSPLL2_D8 60 +#define CLK_TOP_ETHERPLL_125M 61 +#define CLK_TOP_ETHERPLL_50M 62 +#define CLK_TOP_CVBS 63 +#define CLK_TOP_CVBS_D2 64 +#define CLK_TOP_SYS_26M 65 +#define CLK_TOP_MMPLL 66 +#define CLK_TOP_MMPLL_D2 67 +#define CLK_TOP_VENCPLL 68 +#define CLK_TOP_VENCPLL_D2 69 +#define CLK_TOP_VCODECPLL 70 +#define CLK_TOP_VCODECPLL_D2 71 +#define CLK_TOP_TVDPLL 72 +#define CLK_TOP_TVDPLL_D2 73 +#define CLK_TOP_TVDPLL_D4 74 +#define CLK_TOP_TVDPLL_D8 75 +#define CLK_TOP_TVDPLL_429M 76 +#define CLK_TOP_TVDPLL_429M_D2 77 +#define CLK_TOP_TVDPLL_429M_D4 78 +#define CLK_TOP_MSDCPLL 79 +#define CLK_TOP_MSDCPLL_D2 80 +#define CLK_TOP_MSDCPLL_D4 81 +#define CLK_TOP_MSDCPLL2 82 +#define CLK_TOP_MSDCPLL2_D2 83 +#define CLK_TOP_MSDCPLL2_D4 84 +#define CLK_TOP_CLK26M_D2 85 +#define CLK_TOP_D2A_ULCLK_6P5M 86 +#define CLK_TOP_VPLL3_DPIX 87 +#define CLK_TOP_VPLL_DPIX 88 +#define CLK_TOP_LTEPLL_FS26M 89 +#define CLK_TOP_DMPLL 90 +#define CLK_TOP_DSI0_LNTC 91 +#define CLK_TOP_DSI1_LNTC 92 +#define CLK_TOP_LVDSTX3_CLKDIG_CTS 93 +#define CLK_TOP_LVDSTX_CLKDIG_CTS 94 +#define CLK_TOP_CLKRTC_EXT 95 +#define CLK_TOP_CLKRTC_INT 96 +#define CLK_TOP_CSI0 97 +#define CLK_TOP_CVBSPLL 98 +#define CLK_TOP_AXI_SEL 99 +#define CLK_TOP_MEM_SEL 100 +#define CLK_TOP_MM_SEL 101 +#define CLK_TOP_PWM_SEL 102 +#define CLK_TOP_VDEC_SEL 103 +#define CLK_TOP_VENC_SEL 104 +#define CLK_TOP_MFG_SEL 105 +#define CLK_TOP_CAMTG_SEL 106 +#define CLK_TOP_UART_SEL 107 +#define CLK_TOP_SPI_SEL 108 +#define CLK_TOP_USB20_SEL 109 +#define CLK_TOP_USB30_SEL 110 +#define CLK_TOP_MSDC50_0_HCLK_SEL 111 +#define CLK_TOP_MSDC50_0_SEL 112 +#define CLK_TOP_MSDC30_1_SEL 113 +#define CLK_TOP_MSDC30_2_SEL 114 +#define CLK_TOP_MSDC30_3_SEL 115 +#define CLK_TOP_AUDIO_SEL 116 +#define CLK_TOP_AUD_INTBUS_SEL 117 +#define CLK_TOP_PMICSPI_SEL 118 +#define CLK_TOP_DPILVDS1_SEL 119 +#define CLK_TOP_ATB_SEL 120 +#define CLK_TOP_NR_SEL 121 +#define CLK_TOP_NFI2X_SEL 122 +#define CLK_TOP_IRDA_SEL 123 +#define CLK_TOP_CCI400_SEL 124 +#define CLK_TOP_AUD_1_SEL 125 +#define CLK_TOP_AUD_2_SEL 126 +#define CLK_TOP_MEM_MFG_IN_AS_SEL 127 +#define CLK_TOP_AXI_MFG_IN_AS_SEL 128 +#define CLK_TOP_SCAM_SEL 129 +#define CLK_TOP_NFIECC_SEL 130 +#define CLK_TOP_PE2_MAC_P0_SEL 131 +#define CLK_TOP_PE2_MAC_P1_SEL 132 +#define CLK_TOP_DPILVDS_SEL 133 +#define CLK_TOP_MSDC50_3_HCLK_SEL 134 +#define CLK_TOP_HDCP_SEL 135 +#define CLK_TOP_HDCP_24M_SEL 136 +#define CLK_TOP_RTC_SEL 137 +#define CLK_TOP_SPINOR_SEL 138 +#define CLK_TOP_APLL_SEL 139 +#define CLK_TOP_APLL2_SEL 140 +#define CLK_TOP_A1SYS_HP_SEL 141 +#define CLK_TOP_A2SYS_HP_SEL 142 +#define CLK_TOP_ASM_L_SEL 143 +#define CLK_TOP_ASM_M_SEL 144 +#define CLK_TOP_ASM_H_SEL 145 +#define CLK_TOP_I2SO1_SEL 146 +#define CLK_TOP_I2SO2_SEL 147 +#define CLK_TOP_I2SO3_SEL 148 +#define CLK_TOP_TDMO0_SEL 149 +#define CLK_TOP_TDMO1_SEL 150 +#define CLK_TOP_I2SI1_SEL 151 +#define CLK_TOP_I2SI2_SEL 152 +#define CLK_TOP_I2SI3_SEL 153 +#define CLK_TOP_ETHER_125M_SEL 154 +#define CLK_TOP_ETHER_50M_SEL 155 +#define CLK_TOP_JPGDEC_SEL 156 +#define CLK_TOP_SPISLV_SEL 157 +#define CLK_TOP_ETHER_50M_RMII_SEL 158 +#define CLK_TOP_CAM2TG_SEL 159 +#define CLK_TOP_DI_SEL 160 +#define CLK_TOP_TVD_SEL 161 +#define CLK_TOP_I2C_SEL 162 +#define CLK_TOP_PWM_INFRA_SEL 163 +#define CLK_TOP_MSDC0P_AES_SEL 164 +#define CLK_TOP_CMSYS_SEL 165 +#define CLK_TOP_GCPU_SEL 166 +#define CLK_TOP_AUD_APLL1_SEL 167 +#define CLK_TOP_AUD_APLL2_SEL 168 +#define CLK_TOP_DA_AUDULL_VTX_6P5M_SEL 169 +#define CLK_TOP_APLL_DIV0 170 +#define CLK_TOP_APLL_DIV1 171 +#define CLK_TOP_APLL_DIV2 172 +#define CLK_TOP_APLL_DIV3 173 +#define CLK_TOP_APLL_DIV4 174 +#define CLK_TOP_APLL_DIV5 175 +#define CLK_TOP_APLL_DIV6 176 +#define CLK_TOP_APLL_DIV7 177 +#define CLK_TOP_APLL_DIV_PDN0 178 +#define CLK_TOP_APLL_DIV_PDN1 179 +#define CLK_TOP_APLL_DIV_PDN2 180 +#define CLK_TOP_APLL_DIV_PDN3 181 +#define CLK_TOP_APLL_DIV_PDN4 182 +#define CLK_TOP_APLL_DIV_PDN5 183 +#define CLK_TOP_APLL_DIV_PDN6 184 +#define CLK_TOP_APLL_DIV_PDN7 185 +#define CLK_TOP_APLL1_D3 186 +#define CLK_TOP_APLL1_REF_SEL 187 +#define CLK_TOP_APLL2_REF_SEL 188 +#define CLK_TOP_NFI2X_EN 189 +#define CLK_TOP_NFIECC_EN 190 +#define CLK_TOP_NFI1X_CK_EN 191 +#define CLK_TOP_APLL2_D3 192 +#define CLK_TOP_NR_CLK 193 + +/* INFRACFG */ + +#define CLK_INFRA_DBGCLK 0 +#define CLK_INFRA_GCE 1 +#define CLK_INFRA_M4U 2 +#define CLK_INFRA_KP 3 +#define CLK_INFRA_AO_SPI0 4 +#define CLK_INFRA_AO_SPI1 5 +#define CLK_INFRA_AO_UART5 6 +#define CLK_INFRA_NR_CLK 7 + +/* PERICFG */ + +#define CLK_PERI_NFI 0 +#define CLK_PERI_THERM 1 +#define CLK_PERI_PWM0 2 +#define CLK_PERI_PWM1 3 +#define CLK_PERI_PWM2 4 +#define CLK_PERI_PWM3 5 +#define CLK_PERI_PWM4 6 +#define CLK_PERI_PWM5 7 +#define CLK_PERI_PWM6 8 +#define CLK_PERI_PWM7 9 +#define CLK_PERI_PWM 10 +#define CLK_PERI_AP_DMA 11 +#define CLK_PERI_MSDC30_0 12 +#define CLK_PERI_MSDC30_1 13 +#define CLK_PERI_MSDC30_2 14 +#define CLK_PERI_MSDC30_3 15 +#define CLK_PERI_UART0 16 +#define CLK_PERI_UART1 17 +#define CLK_PERI_UART2 18 +#define CLK_PERI_UART3 19 +#define CLK_PERI_I2C0 20 +#define CLK_PERI_I2C1 21 +#define CLK_PERI_I2C2 22 +#define CLK_PERI_I2C3 23 +#define CLK_PERI_I2C4 24 +#define CLK_PERI_AUXADC 25 +#define CLK_PERI_SPI0 26 +#define CLK_PERI_SPI 27 +#define CLK_PERI_I2C5 28 +#define CLK_PERI_SPI2 29 +#define CLK_PERI_SPI3 30 +#define CLK_PERI_SPI5 31 +#define CLK_PERI_UART4 32 +#define CLK_PERI_SFLASH 33 +#define CLK_PERI_GMAC 34 +#define CLK_PERI_PCIE0 35 +#define CLK_PERI_PCIE1 36 +#define CLK_PERI_GMAC_PCLK 37 +#define CLK_PERI_MSDC50_0_EN 38 +#define CLK_PERI_MSDC30_1_EN 39 +#define CLK_PERI_MSDC30_2_EN 40 +#define CLK_PERI_MSDC30_3_EN 41 +#define CLK_PERI_MSDC50_0_HCLK_EN 42 +#define CLK_PERI_MSDC50_3_HCLK_EN 43 +#define CLK_PERI_MSDC30_0_QTR_EN 44 +#define CLK_PERI_MSDC30_3_QTR_EN 45 +#define CLK_PERI_NR_CLK 46 + +/* MCUCFG */ + +#define CLK_MCU_MP0_SEL 0 +#define CLK_MCU_MP2_SEL 1 +#define CLK_MCU_BUS_SEL 2 +#define CLK_MCU_NR_CLK 3 + +/* MFGCFG */ + +#define CLK_MFG_BG3D 0 +#define CLK_MFG_NR_CLK 1 + +/* MMSYS */ + +#define CLK_MM_SMI_COMMON 0 +#define CLK_MM_SMI_LARB0 1 +#define CLK_MM_CAM_MDP 2 +#define CLK_MM_MDP_RDMA0 3 +#define CLK_MM_MDP_RDMA1 4 +#define CLK_MM_MDP_RSZ0 5 +#define CLK_MM_MDP_RSZ1 6 +#define CLK_MM_MDP_RSZ2 7 +#define CLK_MM_MDP_TDSHP0 8 +#define CLK_MM_MDP_TDSHP1 9 +#define CLK_MM_MDP_CROP 10 +#define CLK_MM_MDP_WDMA 11 +#define CLK_MM_MDP_WROT0 12 +#define CLK_MM_MDP_WROT1 13 +#define CLK_MM_FAKE_ENG 14 +#define CLK_MM_MUTEX_32K 15 +#define CLK_MM_DISP_OVL0 16 +#define CLK_MM_DISP_OVL1 17 +#define CLK_MM_DISP_RDMA0 18 +#define CLK_MM_DISP_RDMA1 19 +#define CLK_MM_DISP_RDMA2 20 +#define CLK_MM_DISP_WDMA0 21 +#define CLK_MM_DISP_WDMA1 22 +#define CLK_MM_DISP_COLOR0 23 +#define CLK_MM_DISP_COLOR1 24 +#define CLK_MM_DISP_AAL 25 +#define CLK_MM_DISP_GAMMA 26 +#define CLK_MM_DISP_UFOE 27 +#define CLK_MM_DISP_SPLIT0 28 +#define CLK_MM_DISP_OD 29 +#define CLK_MM_DISP_PWM0_MM 30 +#define CLK_MM_DISP_PWM0_26M 31 +#define CLK_MM_DISP_PWM1_MM 32 +#define CLK_MM_DISP_PWM1_26M 33 +#define CLK_MM_DSI0_ENGINE 34 +#define CLK_MM_DSI0_DIGITAL 35 +#define CLK_MM_DSI1_ENGINE 36 +#define CLK_MM_DSI1_DIGITAL 37 +#define CLK_MM_DPI_PIXEL 38 +#define CLK_MM_DPI_ENGINE 39 +#define CLK_MM_DPI1_PIXEL 40 +#define CLK_MM_DPI1_ENGINE 41 +#define CLK_MM_LVDS_PIXEL 42 +#define CLK_MM_LVDS_CTS 43 +#define CLK_MM_SMI_LARB4 44 +#define CLK_MM_SMI_COMMON1 45 +#define CLK_MM_SMI_LARB5 46 +#define CLK_MM_MDP_RDMA2 47 +#define CLK_MM_MDP_TDSHP2 48 +#define CLK_MM_DISP_OVL2 49 +#define CLK_MM_DISP_WDMA2 50 +#define CLK_MM_DISP_COLOR2 51 +#define CLK_MM_DISP_AAL1 52 +#define CLK_MM_DISP_OD1 53 +#define CLK_MM_LVDS1_PIXEL 54 +#define CLK_MM_LVDS1_CTS 55 +#define CLK_MM_SMI_LARB7 56 +#define CLK_MM_MDP_RDMA3 57 +#define CLK_MM_MDP_WROT2 58 +#define CLK_MM_DSI2 59 +#define CLK_MM_DSI2_DIGITAL 60 +#define CLK_MM_DSI3 61 +#define CLK_MM_DSI3_DIGITAL 62 +#define CLK_MM_NR_CLK 63 + +/* IMGSYS */ + +#define CLK_IMG_SMI_LARB2 0 +#define CLK_IMG_SENINF_SCAM_EN 1 +#define CLK_IMG_SENINF_CAM_EN 2 +#define CLK_IMG_CAM_SV_EN 3 +#define CLK_IMG_CAM_SV1_EN 4 +#define CLK_IMG_CAM_SV2_EN 5 +#define CLK_IMG_NR_CLK 6 + +/* BDPSYS */ + +#define CLK_BDP_BRIDGE_B 0 +#define CLK_BDP_BRIDGE_DRAM 1 +#define CLK_BDP_LARB_DRAM 2 +#define CLK_BDP_WR_CHANNEL_VDI_PXL 3 +#define CLK_BDP_WR_CHANNEL_VDI_DRAM 4 +#define CLK_BDP_WR_CHANNEL_VDI_B 5 +#define CLK_BDP_MT_B 6 +#define CLK_BDP_DISPFMT_27M 7 +#define CLK_BDP_DISPFMT_27M_VDOUT 8 +#define CLK_BDP_DISPFMT_27_74_74 9 +#define CLK_BDP_DISPFMT_2FS 10 +#define CLK_BDP_DISPFMT_2FS_2FS74_148 11 +#define CLK_BDP_DISPFMT_B 12 +#define CLK_BDP_VDO_DRAM 13 +#define CLK_BDP_VDO_2FS 14 +#define CLK_BDP_VDO_B 15 +#define CLK_BDP_WR_CHANNEL_DI_PXL 16 +#define CLK_BDP_WR_CHANNEL_DI_DRAM 17 +#define CLK_BDP_WR_CHANNEL_DI_B 18 +#define CLK_BDP_NR_AGENT 19 +#define CLK_BDP_NR_DRAM 20 +#define CLK_BDP_NR_B 21 +#define CLK_BDP_BRIDGE_RT_B 22 +#define CLK_BDP_BRIDGE_RT_DRAM 23 +#define CLK_BDP_LARB_RT_DRAM 24 +#define CLK_BDP_TVD_TDC 25 +#define CLK_BDP_TVD_54 26 +#define CLK_BDP_TVD_CBUS 27 +#define CLK_BDP_NR_CLK 28 + +/* VDECSYS */ + +#define CLK_VDEC_CKEN 0 +#define CLK_VDEC_LARB1_CKEN 1 +#define CLK_VDEC_IMGRZ_CKEN 2 +#define CLK_VDEC_NR_CLK 3 + +/* VENCSYS */ + +#define CLK_VENC_SMI_COMMON_CON 0 +#define CLK_VENC_VENC 1 +#define CLK_VENC_SMI_LARB6 2 +#define CLK_VENC_NR_CLK 3 + +/* JPGDECSYS */ + +#define CLK_JPGDEC_JPGDEC1 0 +#define CLK_JPGDEC_JPGDEC 1 +#define CLK_JPGDEC_NR_CLK 2 + +#endif /* _DT_BINDINGS_CLK_MT2712_H */ diff --git a/include/dt-bindings/clock/mt6779-clk.h b/include/dt-bindings/clock/mt6779-clk.h new file mode 100644 index 0000000..b083139 --- /dev/null +++ b/include/dt-bindings/clock/mt6779-clk.h @@ -0,0 +1,436 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Wendell Lin + */ + +#ifndef _DT_BINDINGS_CLK_MT6779_H +#define _DT_BINDINGS_CLK_MT6779_H + +/* TOPCKGEN */ +#define CLK_TOP_AXI 1 +#define CLK_TOP_MM 2 +#define CLK_TOP_CAM 3 +#define CLK_TOP_MFG 4 +#define CLK_TOP_CAMTG 5 +#define CLK_TOP_UART 6 +#define CLK_TOP_SPI 7 +#define CLK_TOP_MSDC50_0_HCLK 8 +#define CLK_TOP_MSDC50_0 9 +#define CLK_TOP_MSDC30_1 10 +#define CLK_TOP_MSDC30_2 11 +#define CLK_TOP_AUD 12 +#define CLK_TOP_AUD_INTBUS 13 +#define CLK_TOP_FPWRAP_ULPOSC 14 +#define CLK_TOP_SCP 15 +#define CLK_TOP_ATB 16 +#define CLK_TOP_SSPM 17 +#define CLK_TOP_DPI0 18 +#define CLK_TOP_SCAM 19 +#define CLK_TOP_AUD_1 20 +#define CLK_TOP_AUD_2 21 +#define CLK_TOP_DISP_PWM 22 +#define CLK_TOP_SSUSB_TOP_XHCI 23 +#define CLK_TOP_USB_TOP 24 +#define CLK_TOP_SPM 25 +#define CLK_TOP_I2C 26 +#define CLK_TOP_F52M_MFG 27 +#define CLK_TOP_SENINF 28 +#define CLK_TOP_DXCC 29 +#define CLK_TOP_CAMTG2 30 +#define CLK_TOP_AUD_ENG1 31 +#define CLK_TOP_AUD_ENG2 32 +#define CLK_TOP_FAES_UFSFDE 33 +#define CLK_TOP_FUFS 34 +#define CLK_TOP_IMG 35 +#define CLK_TOP_DSP 36 +#define CLK_TOP_DSP1 37 +#define CLK_TOP_DSP2 38 +#define CLK_TOP_IPU_IF 39 +#define CLK_TOP_CAMTG3 40 +#define CLK_TOP_CAMTG4 41 +#define CLK_TOP_PMICSPI 42 +#define CLK_TOP_MAINPLL_CK 43 +#define CLK_TOP_MAINPLL_D2 44 +#define CLK_TOP_MAINPLL_D3 45 +#define CLK_TOP_MAINPLL_D5 46 +#define CLK_TOP_MAINPLL_D7 47 +#define CLK_TOP_MAINPLL_D2_D2 48 +#define CLK_TOP_MAINPLL_D2_D4 49 +#define CLK_TOP_MAINPLL_D2_D8 50 +#define CLK_TOP_MAINPLL_D2_D16 51 +#define CLK_TOP_MAINPLL_D3_D2 52 +#define CLK_TOP_MAINPLL_D3_D4 53 +#define CLK_TOP_MAINPLL_D3_D8 54 +#define CLK_TOP_MAINPLL_D5_D2 55 +#define CLK_TOP_MAINPLL_D5_D4 56 +#define CLK_TOP_MAINPLL_D7_D2 57 +#define CLK_TOP_MAINPLL_D7_D4 58 +#define CLK_TOP_UNIVPLL_CK 59 +#define CLK_TOP_UNIVPLL_D2 60 +#define CLK_TOP_UNIVPLL_D3 61 +#define CLK_TOP_UNIVPLL_D5 62 +#define CLK_TOP_UNIVPLL_D7 63 +#define CLK_TOP_UNIVPLL_D2_D2 64 +#define CLK_TOP_UNIVPLL_D2_D4 65 +#define CLK_TOP_UNIVPLL_D2_D8 66 +#define CLK_TOP_UNIVPLL_D3_D2 67 +#define CLK_TOP_UNIVPLL_D3_D4 68 +#define CLK_TOP_UNIVPLL_D3_D8 69 +#define CLK_TOP_UNIVPLL_D5_D2 70 +#define CLK_TOP_UNIVPLL_D5_D4 71 +#define CLK_TOP_UNIVPLL_D5_D8 72 +#define CLK_TOP_APLL1_CK 73 +#define CLK_TOP_APLL1_D2 74 +#define CLK_TOP_APLL1_D4 75 +#define CLK_TOP_APLL1_D8 76 +#define CLK_TOP_APLL2_CK 77 +#define CLK_TOP_APLL2_D2 78 +#define CLK_TOP_APLL2_D4 79 +#define CLK_TOP_APLL2_D8 80 +#define CLK_TOP_TVDPLL_CK 81 +#define CLK_TOP_TVDPLL_D2 82 +#define CLK_TOP_TVDPLL_D4 83 +#define CLK_TOP_TVDPLL_D8 84 +#define CLK_TOP_TVDPLL_D16 85 +#define CLK_TOP_MSDCPLL_CK 86 +#define CLK_TOP_MSDCPLL_D2 87 +#define CLK_TOP_MSDCPLL_D4 88 +#define CLK_TOP_MSDCPLL_D8 89 +#define CLK_TOP_MSDCPLL_D16 90 +#define CLK_TOP_AD_OSC_CK 91 +#define CLK_TOP_OSC_D2 92 +#define CLK_TOP_OSC_D4 93 +#define CLK_TOP_OSC_D8 94 +#define CLK_TOP_OSC_D16 95 +#define CLK_TOP_F26M_CK_D2 96 +#define CLK_TOP_MFGPLL_CK 97 +#define CLK_TOP_UNIVP_192M_CK 98 +#define CLK_TOP_UNIVP_192M_D2 99 +#define CLK_TOP_UNIVP_192M_D4 100 +#define CLK_TOP_UNIVP_192M_D8 101 +#define CLK_TOP_UNIVP_192M_D16 102 +#define CLK_TOP_UNIVP_192M_D32 103 +#define CLK_TOP_MMPLL_CK 104 +#define CLK_TOP_MMPLL_D4 105 +#define CLK_TOP_MMPLL_D4_D2 106 +#define CLK_TOP_MMPLL_D4_D4 107 +#define CLK_TOP_MMPLL_D5 108 +#define CLK_TOP_MMPLL_D5_D2 109 +#define CLK_TOP_MMPLL_D5_D4 110 +#define CLK_TOP_MMPLL_D6 111 +#define CLK_TOP_MMPLL_D7 112 +#define CLK_TOP_CLK26M 113 +#define CLK_TOP_CLK13M 114 +#define CLK_TOP_ADSP 115 +#define CLK_TOP_DPMAIF 116 +#define CLK_TOP_VENC 117 +#define CLK_TOP_VDEC 118 +#define CLK_TOP_CAMTM 119 +#define CLK_TOP_PWM 120 +#define CLK_TOP_ADSPPLL_CK 121 +#define CLK_TOP_I2S0_M_SEL 122 +#define CLK_TOP_I2S1_M_SEL 123 +#define CLK_TOP_I2S2_M_SEL 124 +#define CLK_TOP_I2S3_M_SEL 125 +#define CLK_TOP_I2S4_M_SEL 126 +#define CLK_TOP_I2S5_M_SEL 127 +#define CLK_TOP_APLL12_DIV0 128 +#define CLK_TOP_APLL12_DIV1 129 +#define CLK_TOP_APLL12_DIV2 130 +#define CLK_TOP_APLL12_DIV3 131 +#define CLK_TOP_APLL12_DIV4 132 +#define CLK_TOP_APLL12_DIVB 133 +#define CLK_TOP_APLL12_DIV5 134 +#define CLK_TOP_IPE 135 +#define CLK_TOP_DPE 136 +#define CLK_TOP_CCU 137 +#define CLK_TOP_DSP3 138 +#define CLK_TOP_SENINF1 139 +#define CLK_TOP_SENINF2 140 +#define CLK_TOP_AUD_H 141 +#define CLK_TOP_CAMTG5 142 +#define CLK_TOP_TVDPLL_MAINPLL_D2_CK 143 +#define CLK_TOP_AD_OSC2_CK 144 +#define CLK_TOP_OSC2_D2 145 +#define CLK_TOP_OSC2_D3 146 +#define CLK_TOP_FMEM_466M_CK 147 +#define CLK_TOP_ADSPPLL_D4 148 +#define CLK_TOP_ADSPPLL_D5 149 +#define CLK_TOP_ADSPPLL_D6 150 +#define CLK_TOP_OSC_D10 151 +#define CLK_TOP_UNIVPLL_D3_D16 152 +#define CLK_TOP_NR_CLK 153 + +/* APMIXED */ +#define CLK_APMIXED_ARMPLL_LL 1 +#define CLK_APMIXED_ARMPLL_BL 2 +#define CLK_APMIXED_ARMPLL_BB 3 +#define CLK_APMIXED_CCIPLL 4 +#define CLK_APMIXED_MAINPLL 5 +#define CLK_APMIXED_UNIV2PLL 6 +#define CLK_APMIXED_MSDCPLL 7 +#define CLK_APMIXED_ADSPPLL 8 +#define CLK_APMIXED_MMPLL 9 +#define CLK_APMIXED_MFGPLL 10 +#define CLK_APMIXED_TVDPLL 11 +#define CLK_APMIXED_APLL1 12 +#define CLK_APMIXED_APLL2 13 +#define CLK_APMIXED_SSUSB26M 14 +#define CLK_APMIXED_APPLL26M 15 +#define CLK_APMIXED_MIPIC0_26M 16 +#define CLK_APMIXED_MDPLLGP26M 17 +#define CLK_APMIXED_MM_F26M 18 +#define CLK_APMIXED_UFS26M 19 +#define CLK_APMIXED_MIPIC1_26M 20 +#define CLK_APMIXED_MEMPLL26M 21 +#define CLK_APMIXED_CLKSQ_LVPLL_26M 22 +#define CLK_APMIXED_MIPID0_26M 23 +#define CLK_APMIXED_MIPID1_26M 24 +#define CLK_APMIXED_NR_CLK 25 + +/* CAMSYS */ +#define CLK_CAM_LARB10 1 +#define CLK_CAM_DFP_VAD 2 +#define CLK_CAM_LARB11 3 +#define CLK_CAM_LARB9 4 +#define CLK_CAM_CAM 5 +#define CLK_CAM_CAMTG 6 +#define CLK_CAM_SENINF 7 +#define CLK_CAM_CAMSV0 8 +#define CLK_CAM_CAMSV1 9 +#define CLK_CAM_CAMSV2 10 +#define CLK_CAM_CAMSV3 11 +#define CLK_CAM_CCU 12 +#define CLK_CAM_FAKE_ENG 13 +#define CLK_CAM_NR_CLK 14 + +/* INFRA */ +#define CLK_INFRA_PMIC_TMR 1 +#define CLK_INFRA_PMIC_AP 2 +#define CLK_INFRA_PMIC_MD 3 +#define CLK_INFRA_PMIC_CONN 4 +#define CLK_INFRA_SCPSYS 5 +#define CLK_INFRA_SEJ 6 +#define CLK_INFRA_APXGPT 7 +#define CLK_INFRA_ICUSB 8 +#define CLK_INFRA_GCE 9 +#define CLK_INFRA_THERM 10 +#define CLK_INFRA_I2C0 11 +#define CLK_INFRA_I2C1 12 +#define CLK_INFRA_I2C2 13 +#define CLK_INFRA_I2C3 14 +#define CLK_INFRA_PWM_HCLK 15 +#define CLK_INFRA_PWM1 16 +#define CLK_INFRA_PWM2 17 +#define CLK_INFRA_PWM3 18 +#define CLK_INFRA_PWM4 19 +#define CLK_INFRA_PWM 20 +#define CLK_INFRA_UART0 21 +#define CLK_INFRA_UART1 22 +#define CLK_INFRA_UART2 23 +#define CLK_INFRA_UART3 24 +#define CLK_INFRA_GCE_26M 25 +#define CLK_INFRA_CQ_DMA_FPC 26 +#define CLK_INFRA_BTIF 27 +#define CLK_INFRA_SPI0 28 +#define CLK_INFRA_MSDC0 29 +#define CLK_INFRA_MSDC1 30 +#define CLK_INFRA_MSDC2 31 +#define CLK_INFRA_MSDC0_SCK 32 +#define CLK_INFRA_DVFSRC 33 +#define CLK_INFRA_GCPU 34 +#define CLK_INFRA_TRNG 35 +#define CLK_INFRA_AUXADC 36 +#define CLK_INFRA_CPUM 37 +#define CLK_INFRA_CCIF1_AP 38 +#define CLK_INFRA_CCIF1_MD 39 +#define CLK_INFRA_AUXADC_MD 40 +#define CLK_INFRA_MSDC1_SCK 41 +#define CLK_INFRA_MSDC2_SCK 42 +#define CLK_INFRA_AP_DMA 43 +#define CLK_INFRA_XIU 44 +#define CLK_INFRA_DEVICE_APC 45 +#define CLK_INFRA_CCIF_AP 46 +#define CLK_INFRA_DEBUGSYS 47 +#define CLK_INFRA_AUD 48 +#define CLK_INFRA_CCIF_MD 49 +#define CLK_INFRA_DXCC_SEC_CORE 50 +#define CLK_INFRA_DXCC_AO 51 +#define CLK_INFRA_DRAMC_F26M 52 +#define CLK_INFRA_IRTX 53 +#define CLK_INFRA_DISP_PWM 54 +#define CLK_INFRA_DPMAIF_CK 55 +#define CLK_INFRA_AUD_26M_BCLK 56 +#define CLK_INFRA_SPI1 57 +#define CLK_INFRA_I2C4 58 +#define CLK_INFRA_MODEM_TEMP_SHARE 59 +#define CLK_INFRA_SPI2 60 +#define CLK_INFRA_SPI3 61 +#define CLK_INFRA_UNIPRO_SCK 62 +#define CLK_INFRA_UNIPRO_TICK 63 +#define CLK_INFRA_UFS_MP_SAP_BCLK 64 +#define CLK_INFRA_MD32_BCLK 65 +#define CLK_INFRA_SSPM 66 +#define CLK_INFRA_UNIPRO_MBIST 67 +#define CLK_INFRA_SSPM_BUS_HCLK 68 +#define CLK_INFRA_I2C5 69 +#define CLK_INFRA_I2C5_ARBITER 70 +#define CLK_INFRA_I2C5_IMM 71 +#define CLK_INFRA_I2C1_ARBITER 72 +#define CLK_INFRA_I2C1_IMM 73 +#define CLK_INFRA_I2C2_ARBITER 74 +#define CLK_INFRA_I2C2_IMM 75 +#define CLK_INFRA_SPI4 76 +#define CLK_INFRA_SPI5 77 +#define CLK_INFRA_CQ_DMA 78 +#define CLK_INFRA_UFS 79 +#define CLK_INFRA_AES_UFSFDE 80 +#define CLK_INFRA_UFS_TICK 81 +#define CLK_INFRA_MSDC0_SELF 82 +#define CLK_INFRA_MSDC1_SELF 83 +#define CLK_INFRA_MSDC2_SELF 84 +#define CLK_INFRA_SSPM_26M_SELF 85 +#define CLK_INFRA_SSPM_32K_SELF 86 +#define CLK_INFRA_UFS_AXI 87 +#define CLK_INFRA_I2C6 88 +#define CLK_INFRA_AP_MSDC0 89 +#define CLK_INFRA_MD_MSDC0 90 +#define CLK_INFRA_USB 91 +#define CLK_INFRA_DEVMPU_BCLK 92 +#define CLK_INFRA_CCIF2_AP 93 +#define CLK_INFRA_CCIF2_MD 94 +#define CLK_INFRA_CCIF3_AP 95 +#define CLK_INFRA_CCIF3_MD 96 +#define CLK_INFRA_SEJ_F13M 97 +#define CLK_INFRA_AES_BCLK 98 +#define CLK_INFRA_I2C7 99 +#define CLK_INFRA_I2C8 100 +#define CLK_INFRA_FBIST2FPC 101 +#define CLK_INFRA_CCIF4_AP 102 +#define CLK_INFRA_CCIF4_MD 103 +#define CLK_INFRA_FADSP 104 +#define CLK_INFRA_SSUSB_XHCI 105 +#define CLK_INFRA_SPI6 106 +#define CLK_INFRA_SPI7 107 +#define CLK_INFRA_NR_CLK 108 + +/* MFGCFG */ +#define CLK_MFGCFG_BG3D 1 +#define CLK_MFGCFG_NR_CLK 2 + +/* IMG */ +#define CLK_IMG_WPE_A 1 +#define CLK_IMG_MFB 2 +#define CLK_IMG_DIP 3 +#define CLK_IMG_LARB6 4 +#define CLK_IMG_LARB5 5 +#define CLK_IMG_NR_CLK 6 + +/* IPE */ +#define CLK_IPE_LARB7 1 +#define CLK_IPE_LARB8 2 +#define CLK_IPE_SMI_SUBCOM 3 +#define CLK_IPE_FD 4 +#define CLK_IPE_FE 5 +#define CLK_IPE_RSC 6 +#define CLK_IPE_DPE 7 +#define CLK_IPE_NR_CLK 8 + +/* MM_CONFIG */ +#define CLK_MM_SMI_COMMON 1 +#define CLK_MM_SMI_LARB0 2 +#define CLK_MM_SMI_LARB1 3 +#define CLK_MM_GALS_COMM0 4 +#define CLK_MM_GALS_COMM1 5 +#define CLK_MM_GALS_CCU2MM 6 +#define CLK_MM_GALS_IPU12MM 7 +#define CLK_MM_GALS_IMG2MM 8 +#define CLK_MM_GALS_CAM2MM 9 +#define CLK_MM_GALS_IPU2MM 10 +#define CLK_MM_MDP_DL_TXCK 11 +#define CLK_MM_IPU_DL_TXCK 12 +#define CLK_MM_MDP_RDMA0 13 +#define CLK_MM_MDP_RDMA1 14 +#define CLK_MM_MDP_RSZ0 15 +#define CLK_MM_MDP_RSZ1 16 +#define CLK_MM_MDP_TDSHP 17 +#define CLK_MM_MDP_WROT0 18 +#define CLK_MM_FAKE_ENG 19 +#define CLK_MM_DISP_OVL0 20 +#define CLK_MM_DISP_OVL0_2L 21 +#define CLK_MM_DISP_OVL1_2L 22 +#define CLK_MM_DISP_RDMA0 23 +#define CLK_MM_DISP_RDMA1 24 +#define CLK_MM_DISP_WDMA0 25 +#define CLK_MM_DISP_COLOR0 26 +#define CLK_MM_DISP_CCORR0 27 +#define CLK_MM_DISP_AAL0 28 +#define CLK_MM_DISP_GAMMA0 29 +#define CLK_MM_DISP_DITHER0 30 +#define CLK_MM_DISP_SPLIT 31 +#define CLK_MM_DSI0_MM_CK 32 +#define CLK_MM_DSI0_IF_CK 33 +#define CLK_MM_DPI_MM_CK 34 +#define CLK_MM_DPI_IF_CK 35 +#define CLK_MM_FAKE_ENG2 36 +#define CLK_MM_MDP_DL_RX_CK 37 +#define CLK_MM_IPU_DL_RX_CK 38 +#define CLK_MM_26M 39 +#define CLK_MM_MM_R2Y 40 +#define CLK_MM_DISP_RSZ 41 +#define CLK_MM_MDP_WDMA0 42 +#define CLK_MM_MDP_AAL 43 +#define CLK_MM_MDP_HDR 44 +#define CLK_MM_DBI_MM_CK 45 +#define CLK_MM_DBI_IF_CK 46 +#define CLK_MM_MDP_WROT1 47 +#define CLK_MM_DISP_POSTMASK0 48 +#define CLK_MM_DISP_HRT_BW 49 +#define CLK_MM_DISP_OVL_FBDC 50 +#define CLK_MM_NR_CLK 51 + +/* VDEC_GCON */ +#define CLK_VDEC_VDEC 1 +#define CLK_VDEC_LARB1 2 +#define CLK_VDEC_GCON_NR_CLK 3 + +/* VENC_GCON */ +#define CLK_VENC_GCON_LARB 1 +#define CLK_VENC_GCON_VENC 2 +#define CLK_VENC_GCON_JPGENC 3 +#define CLK_VENC_GCON_GALS 4 +#define CLK_VENC_GCON_NR_CLK 5 + +/* AUD */ +#define CLK_AUD_AFE 1 +#define CLK_AUD_22M 2 +#define CLK_AUD_24M 3 +#define CLK_AUD_APLL2_TUNER 4 +#define CLK_AUD_APLL_TUNER 5 +#define CLK_AUD_TDM 6 +#define CLK_AUD_ADC 7 +#define CLK_AUD_DAC 8 +#define CLK_AUD_DAC_PREDIS 9 +#define CLK_AUD_TML 10 +#define CLK_AUD_NLE 11 +#define CLK_AUD_I2S1_BCLK_SW 12 +#define CLK_AUD_I2S2_BCLK_SW 13 +#define CLK_AUD_I2S3_BCLK_SW 14 +#define CLK_AUD_I2S4_BCLK_SW 15 +#define CLK_AUD_I2S5_BCLK_SW 16 +#define CLK_AUD_CONN_I2S_ASRC 17 +#define CLK_AUD_GENERAL1_ASRC 18 +#define CLK_AUD_GENERAL2_ASRC 19 +#define CLK_AUD_DAC_HIRES 20 +#define CLK_AUD_PDN_ADDA6_ADC 21 +#define CLK_AUD_ADC_HIRES 22 +#define CLK_AUD_ADC_HIRES_TML 23 +#define CLK_AUD_ADDA6_ADC_HIRES 24 +#define CLK_AUD_3RD_DAC 25 +#define CLK_AUD_3RD_DAC_PREDIS 26 +#define CLK_AUD_3RD_DAC_TML 27 +#define CLK_AUD_3RD_DAC_HIRES 28 +#define CLK_AUD_NR_CLK 29 + +#endif /* _DT_BINDINGS_CLK_MT6779_H */ diff --git a/include/dt-bindings/clock/mt6797-clk.h b/include/dt-bindings/clock/mt6797-clk.h new file mode 100644 index 0000000..dc23ddb --- /dev/null +++ b/include/dt-bindings/clock/mt6797-clk.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Kevin Chen + */ + +#ifndef _DT_BINDINGS_CLK_MT6797_H +#define _DT_BINDINGS_CLK_MT6797_H + +/* TOPCKGEN */ +#define CLK_TOP_MUX_ULPOSC_AXI_CK_MUX_PRE 1 +#define CLK_TOP_MUX_ULPOSC_AXI_CK_MUX 2 +#define CLK_TOP_MUX_AXI 3 +#define CLK_TOP_MUX_MEM 4 +#define CLK_TOP_MUX_DDRPHYCFG 5 +#define CLK_TOP_MUX_MM 6 +#define CLK_TOP_MUX_PWM 7 +#define CLK_TOP_MUX_VDEC 8 +#define CLK_TOP_MUX_VENC 9 +#define CLK_TOP_MUX_MFG 10 +#define CLK_TOP_MUX_CAMTG 11 +#define CLK_TOP_MUX_UART 12 +#define CLK_TOP_MUX_SPI 13 +#define CLK_TOP_MUX_ULPOSC_SPI_CK_MUX 14 +#define CLK_TOP_MUX_USB20 15 +#define CLK_TOP_MUX_MSDC50_0_HCLK 16 +#define CLK_TOP_MUX_MSDC50_0 17 +#define CLK_TOP_MUX_MSDC30_1 18 +#define CLK_TOP_MUX_MSDC30_2 19 +#define CLK_TOP_MUX_AUDIO 20 +#define CLK_TOP_MUX_AUD_INTBUS 21 +#define CLK_TOP_MUX_PMICSPI 22 +#define CLK_TOP_MUX_SCP 23 +#define CLK_TOP_MUX_ATB 24 +#define CLK_TOP_MUX_MJC 25 +#define CLK_TOP_MUX_DPI0 26 +#define CLK_TOP_MUX_AUD_1 27 +#define CLK_TOP_MUX_AUD_2 28 +#define CLK_TOP_MUX_SSUSB_TOP_SYS 29 +#define CLK_TOP_MUX_SPM 30 +#define CLK_TOP_MUX_BSI_SPI 31 +#define CLK_TOP_MUX_AUDIO_H 32 +#define CLK_TOP_MUX_ANC_MD32 33 +#define CLK_TOP_MUX_MFG_52M 34 +#define CLK_TOP_SYSPLL_CK 35 +#define CLK_TOP_SYSPLL_D2 36 +#define CLK_TOP_SYSPLL1_D2 37 +#define CLK_TOP_SYSPLL1_D4 38 +#define CLK_TOP_SYSPLL1_D8 39 +#define CLK_TOP_SYSPLL1_D16 40 +#define CLK_TOP_SYSPLL_D3 41 +#define CLK_TOP_SYSPLL_D3_D3 42 +#define CLK_TOP_SYSPLL2_D2 43 +#define CLK_TOP_SYSPLL2_D4 44 +#define CLK_TOP_SYSPLL2_D8 45 +#define CLK_TOP_SYSPLL_D5 46 +#define CLK_TOP_SYSPLL3_D2 47 +#define CLK_TOP_SYSPLL3_D4 48 +#define CLK_TOP_SYSPLL_D7 49 +#define CLK_TOP_SYSPLL4_D2 50 +#define CLK_TOP_SYSPLL4_D4 51 +#define CLK_TOP_UNIVPLL_CK 52 +#define CLK_TOP_UNIVPLL_D7 53 +#define CLK_TOP_UNIVPLL_D26 54 +#define CLK_TOP_SSUSB_PHY_48M_CK 55 +#define CLK_TOP_USB_PHY48M_CK 56 +#define CLK_TOP_UNIVPLL_D2 57 +#define CLK_TOP_UNIVPLL1_D2 58 +#define CLK_TOP_UNIVPLL1_D4 59 +#define CLK_TOP_UNIVPLL1_D8 60 +#define CLK_TOP_UNIVPLL_D3 61 +#define CLK_TOP_UNIVPLL2_D2 62 +#define CLK_TOP_UNIVPLL2_D4 63 +#define CLK_TOP_UNIVPLL2_D8 64 +#define CLK_TOP_UNIVPLL_D5 65 +#define CLK_TOP_UNIVPLL3_D2 66 +#define CLK_TOP_UNIVPLL3_D4 67 +#define CLK_TOP_UNIVPLL3_D8 68 +#define CLK_TOP_ULPOSC_CK_ORG 69 +#define CLK_TOP_ULPOSC_CK 70 +#define CLK_TOP_ULPOSC_D2 71 +#define CLK_TOP_ULPOSC_D3 72 +#define CLK_TOP_ULPOSC_D4 73 +#define CLK_TOP_ULPOSC_D8 74 +#define CLK_TOP_ULPOSC_D10 75 +#define CLK_TOP_APLL1_CK 76 +#define CLK_TOP_APLL2_CK 77 +#define CLK_TOP_MFGPLL_CK 78 +#define CLK_TOP_MFGPLL_D2 79 +#define CLK_TOP_IMGPLL_CK 80 +#define CLK_TOP_IMGPLL_D2 81 +#define CLK_TOP_IMGPLL_D4 82 +#define CLK_TOP_CODECPLL_CK 83 +#define CLK_TOP_CODECPLL_D2 84 +#define CLK_TOP_VDECPLL_CK 85 +#define CLK_TOP_TVDPLL_CK 86 +#define CLK_TOP_TVDPLL_D2 87 +#define CLK_TOP_TVDPLL_D4 88 +#define CLK_TOP_TVDPLL_D8 89 +#define CLK_TOP_TVDPLL_D16 90 +#define CLK_TOP_MSDCPLL_CK 91 +#define CLK_TOP_MSDCPLL_D2 92 +#define CLK_TOP_MSDCPLL_D4 93 +#define CLK_TOP_MSDCPLL_D8 94 +#define CLK_TOP_NR 95 + +/* APMIXED_SYS */ +#define CLK_APMIXED_MAINPLL 1 +#define CLK_APMIXED_UNIVPLL 2 +#define CLK_APMIXED_MFGPLL 3 +#define CLK_APMIXED_MSDCPLL 4 +#define CLK_APMIXED_IMGPLL 5 +#define CLK_APMIXED_TVDPLL 6 +#define CLK_APMIXED_CODECPLL 7 +#define CLK_APMIXED_VDECPLL 8 +#define CLK_APMIXED_APLL1 9 +#define CLK_APMIXED_APLL2 10 +#define CLK_APMIXED_NR 11 + +/* INFRA_SYS */ +#define CLK_INFRA_PMIC_TMR 1 +#define CLK_INFRA_PMIC_AP 2 +#define CLK_INFRA_PMIC_MD 3 +#define CLK_INFRA_PMIC_CONN 4 +#define CLK_INFRA_SCP 5 +#define CLK_INFRA_SEJ 6 +#define CLK_INFRA_APXGPT 7 +#define CLK_INFRA_SEJ_13M 8 +#define CLK_INFRA_ICUSB 9 +#define CLK_INFRA_GCE 10 +#define CLK_INFRA_THERM 11 +#define CLK_INFRA_I2C0 12 +#define CLK_INFRA_I2C1 13 +#define CLK_INFRA_I2C2 14 +#define CLK_INFRA_I2C3 15 +#define CLK_INFRA_PWM_HCLK 16 +#define CLK_INFRA_PWM1 17 +#define CLK_INFRA_PWM2 18 +#define CLK_INFRA_PWM3 19 +#define CLK_INFRA_PWM4 20 +#define CLK_INFRA_PWM 21 +#define CLK_INFRA_UART0 22 +#define CLK_INFRA_UART1 23 +#define CLK_INFRA_UART2 24 +#define CLK_INFRA_UART3 25 +#define CLK_INFRA_MD2MD_CCIF_0 26 +#define CLK_INFRA_MD2MD_CCIF_1 27 +#define CLK_INFRA_MD2MD_CCIF_2 28 +#define CLK_INFRA_FHCTL 29 +#define CLK_INFRA_BTIF 30 +#define CLK_INFRA_MD2MD_CCIF_3 31 +#define CLK_INFRA_SPI 32 +#define CLK_INFRA_MSDC0 33 +#define CLK_INFRA_MD2MD_CCIF_4 34 +#define CLK_INFRA_MSDC1 35 +#define CLK_INFRA_MSDC2 36 +#define CLK_INFRA_MD2MD_CCIF_5 37 +#define CLK_INFRA_GCPU 38 +#define CLK_INFRA_TRNG 39 +#define CLK_INFRA_AUXADC 40 +#define CLK_INFRA_CPUM 41 +#define CLK_INFRA_AP_C2K_CCIF_0 42 +#define CLK_INFRA_AP_C2K_CCIF_1 43 +#define CLK_INFRA_CLDMA 44 +#define CLK_INFRA_DISP_PWM 45 +#define CLK_INFRA_AP_DMA 46 +#define CLK_INFRA_DEVICE_APC 47 +#define CLK_INFRA_L2C_SRAM 48 +#define CLK_INFRA_CCIF_AP 49 +#define CLK_INFRA_AUDIO 50 +#define CLK_INFRA_CCIF_MD 51 +#define CLK_INFRA_DRAMC_F26M 52 +#define CLK_INFRA_I2C4 53 +#define CLK_INFRA_I2C_APPM 54 +#define CLK_INFRA_I2C_GPUPM 55 +#define CLK_INFRA_I2C2_IMM 56 +#define CLK_INFRA_I2C2_ARB 57 +#define CLK_INFRA_I2C3_IMM 58 +#define CLK_INFRA_I2C3_ARB 59 +#define CLK_INFRA_I2C5 60 +#define CLK_INFRA_SYS_CIRQ 61 +#define CLK_INFRA_SPI1 62 +#define CLK_INFRA_DRAMC_B_F26M 63 +#define CLK_INFRA_ANC_MD32 64 +#define CLK_INFRA_ANC_MD32_32K 65 +#define CLK_INFRA_DVFS_SPM1 66 +#define CLK_INFRA_AES_TOP0 67 +#define CLK_INFRA_AES_TOP1 68 +#define CLK_INFRA_SSUSB_BUS 69 +#define CLK_INFRA_SPI2 70 +#define CLK_INFRA_SPI3 71 +#define CLK_INFRA_SPI4 72 +#define CLK_INFRA_SPI5 73 +#define CLK_INFRA_IRTX 74 +#define CLK_INFRA_SSUSB_SYS 75 +#define CLK_INFRA_SSUSB_REF 76 +#define CLK_INFRA_AUDIO_26M 77 +#define CLK_INFRA_AUDIO_26M_PAD_TOP 78 +#define CLK_INFRA_MODEM_TEMP_SHARE 79 +#define CLK_INFRA_VAD_WRAP_SOC 80 +#define CLK_INFRA_DRAMC_CONF 81 +#define CLK_INFRA_DRAMC_B_CONF 82 +#define CLK_INFRA_MFG_VCG 83 +#define CLK_INFRA_13M 84 +#define CLK_INFRA_NR 85 + +/* IMG_SYS */ +#define CLK_IMG_FDVT 1 +#define CLK_IMG_DPE 2 +#define CLK_IMG_DIP 3 +#define CLK_IMG_LARB6 4 +#define CLK_IMG_NR 5 + +/* MM_SYS */ +#define CLK_MM_SMI_COMMON 1 +#define CLK_MM_SMI_LARB0 2 +#define CLK_MM_SMI_LARB5 3 +#define CLK_MM_CAM_MDP 4 +#define CLK_MM_MDP_RDMA0 5 +#define CLK_MM_MDP_RDMA1 6 +#define CLK_MM_MDP_RSZ0 7 +#define CLK_MM_MDP_RSZ1 8 +#define CLK_MM_MDP_RSZ2 9 +#define CLK_MM_MDP_TDSHP 10 +#define CLK_MM_MDP_COLOR 11 +#define CLK_MM_MDP_WDMA 12 +#define CLK_MM_MDP_WROT0 13 +#define CLK_MM_MDP_WROT1 14 +#define CLK_MM_FAKE_ENG 15 +#define CLK_MM_DISP_OVL0 16 +#define CLK_MM_DISP_OVL1 17 +#define CLK_MM_DISP_OVL0_2L 18 +#define CLK_MM_DISP_OVL1_2L 19 +#define CLK_MM_DISP_RDMA0 20 +#define CLK_MM_DISP_RDMA1 21 +#define CLK_MM_DISP_WDMA0 22 +#define CLK_MM_DISP_WDMA1 23 +#define CLK_MM_DISP_COLOR 24 +#define CLK_MM_DISP_CCORR 25 +#define CLK_MM_DISP_AAL 26 +#define CLK_MM_DISP_GAMMA 27 +#define CLK_MM_DISP_OD 28 +#define CLK_MM_DISP_DITHER 29 +#define CLK_MM_DISP_UFOE 30 +#define CLK_MM_DISP_DSC 31 +#define CLK_MM_DISP_SPLIT 32 +#define CLK_MM_DSI0_MM_CLOCK 33 +#define CLK_MM_DSI1_MM_CLOCK 34 +#define CLK_MM_DPI_MM_CLOCK 35 +#define CLK_MM_DPI_INTERFACE_CLOCK 36 +#define CLK_MM_LARB4_AXI_ASIF_MM_CLOCK 37 +#define CLK_MM_LARB4_AXI_ASIF_MJC_CLOCK 38 +#define CLK_MM_DISP_OVL0_MOUT_CLOCK 39 +#define CLK_MM_FAKE_ENG2 40 +#define CLK_MM_DSI0_INTERFACE_CLOCK 41 +#define CLK_MM_DSI1_INTERFACE_CLOCK 42 +#define CLK_MM_NR 43 + +/* VDEC_SYS */ +#define CLK_VDEC_CKEN_ENG 1 +#define CLK_VDEC_ACTIVE 2 +#define CLK_VDEC_CKEN 3 +#define CLK_VDEC_LARB1_CKEN 4 +#define CLK_VDEC_NR 5 + +/* VENC_SYS */ +#define CLK_VENC_0 1 +#define CLK_VENC_1 2 +#define CLK_VENC_2 3 +#define CLK_VENC_3 4 +#define CLK_VENC_NR 5 + +#endif /* _DT_BINDINGS_CLK_MT6797_H */ diff --git a/include/dt-bindings/clock/mt7622-clk.h b/include/dt-bindings/clock/mt7622-clk.h new file mode 100644 index 0000000..c12e7ea --- /dev/null +++ b/include/dt-bindings/clock/mt7622-clk.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Chen Zhong + */ + +#ifndef _DT_BINDINGS_CLK_MT7622_H +#define _DT_BINDINGS_CLK_MT7622_H + +/* TOPCKGEN */ + +#define CLK_TOP_TO_U2_PHY 0 +#define CLK_TOP_TO_U2_PHY_1P 1 +#define CLK_TOP_PCIE0_PIPE_EN 2 +#define CLK_TOP_PCIE1_PIPE_EN 3 +#define CLK_TOP_SSUSB_TX250M 4 +#define CLK_TOP_SSUSB_EQ_RX250M 5 +#define CLK_TOP_SSUSB_CDR_REF 6 +#define CLK_TOP_SSUSB_CDR_FB 7 +#define CLK_TOP_SATA_ASIC 8 +#define CLK_TOP_SATA_RBC 9 +#define CLK_TOP_TO_USB3_SYS 10 +#define CLK_TOP_P1_1MHZ 11 +#define CLK_TOP_4MHZ 12 +#define CLK_TOP_P0_1MHZ 13 +#define CLK_TOP_TXCLK_SRC_PRE 14 +#define CLK_TOP_RTC 15 +#define CLK_TOP_MEMPLL 16 +#define CLK_TOP_DMPLL 17 +#define CLK_TOP_SYSPLL_D2 18 +#define CLK_TOP_SYSPLL1_D2 19 +#define CLK_TOP_SYSPLL1_D4 20 +#define CLK_TOP_SYSPLL1_D8 21 +#define CLK_TOP_SYSPLL2_D4 22 +#define CLK_TOP_SYSPLL2_D8 23 +#define CLK_TOP_SYSPLL_D5 24 +#define CLK_TOP_SYSPLL3_D2 25 +#define CLK_TOP_SYSPLL3_D4 26 +#define CLK_TOP_SYSPLL4_D2 27 +#define CLK_TOP_SYSPLL4_D4 28 +#define CLK_TOP_SYSPLL4_D16 29 +#define CLK_TOP_UNIVPLL 30 +#define CLK_TOP_UNIVPLL_D2 31 +#define CLK_TOP_UNIVPLL1_D2 32 +#define CLK_TOP_UNIVPLL1_D4 33 +#define CLK_TOP_UNIVPLL1_D8 34 +#define CLK_TOP_UNIVPLL1_D16 35 +#define CLK_TOP_UNIVPLL2_D2 36 +#define CLK_TOP_UNIVPLL2_D4 37 +#define CLK_TOP_UNIVPLL2_D8 38 +#define CLK_TOP_UNIVPLL2_D16 39 +#define CLK_TOP_UNIVPLL_D5 40 +#define CLK_TOP_UNIVPLL3_D2 41 +#define CLK_TOP_UNIVPLL3_D4 42 +#define CLK_TOP_UNIVPLL3_D16 43 +#define CLK_TOP_UNIVPLL_D7 44 +#define CLK_TOP_UNIVPLL_D80_D4 45 +#define CLK_TOP_UNIV48M 46 +#define CLK_TOP_SGMIIPLL 47 +#define CLK_TOP_SGMIIPLL_D2 48 +#define CLK_TOP_AUD1PLL 49 +#define CLK_TOP_AUD2PLL 50 +#define CLK_TOP_AUD_I2S2_MCK 51 +#define CLK_TOP_TO_USB3_REF 52 +#define CLK_TOP_PCIE1_MAC_EN 53 +#define CLK_TOP_PCIE0_MAC_EN 54 +#define CLK_TOP_ETH_500M 55 +#define CLK_TOP_AXI_SEL 56 +#define CLK_TOP_MEM_SEL 57 +#define CLK_TOP_DDRPHYCFG_SEL 58 +#define CLK_TOP_ETH_SEL 59 +#define CLK_TOP_PWM_SEL 60 +#define CLK_TOP_F10M_REF_SEL 61 +#define CLK_TOP_NFI_INFRA_SEL 62 +#define CLK_TOP_FLASH_SEL 63 +#define CLK_TOP_UART_SEL 64 +#define CLK_TOP_SPI0_SEL 65 +#define CLK_TOP_SPI1_SEL 66 +#define CLK_TOP_MSDC50_0_SEL 67 +#define CLK_TOP_MSDC30_0_SEL 68 +#define CLK_TOP_MSDC30_1_SEL 69 +#define CLK_TOP_A1SYS_HP_SEL 70 +#define CLK_TOP_A2SYS_HP_SEL 71 +#define CLK_TOP_INTDIR_SEL 72 +#define CLK_TOP_AUD_INTBUS_SEL 73 +#define CLK_TOP_PMICSPI_SEL 74 +#define CLK_TOP_SCP_SEL 75 +#define CLK_TOP_ATB_SEL 76 +#define CLK_TOP_HIF_SEL 77 +#define CLK_TOP_AUDIO_SEL 78 +#define CLK_TOP_U2_SEL 79 +#define CLK_TOP_AUD1_SEL 80 +#define CLK_TOP_AUD2_SEL 81 +#define CLK_TOP_IRRX_SEL 82 +#define CLK_TOP_IRTX_SEL 83 +#define CLK_TOP_ASM_L_SEL 84 +#define CLK_TOP_ASM_M_SEL 85 +#define CLK_TOP_ASM_H_SEL 86 +#define CLK_TOP_APLL1_SEL 87 +#define CLK_TOP_APLL2_SEL 88 +#define CLK_TOP_I2S0_MCK_SEL 89 +#define CLK_TOP_I2S1_MCK_SEL 90 +#define CLK_TOP_I2S2_MCK_SEL 91 +#define CLK_TOP_I2S3_MCK_SEL 92 +#define CLK_TOP_APLL1_DIV 93 +#define CLK_TOP_APLL2_DIV 94 +#define CLK_TOP_I2S0_MCK_DIV 95 +#define CLK_TOP_I2S1_MCK_DIV 96 +#define CLK_TOP_I2S2_MCK_DIV 97 +#define CLK_TOP_I2S3_MCK_DIV 98 +#define CLK_TOP_A1SYS_HP_DIV 99 +#define CLK_TOP_A2SYS_HP_DIV 100 +#define CLK_TOP_APLL1_DIV_PD 101 +#define CLK_TOP_APLL2_DIV_PD 102 +#define CLK_TOP_I2S0_MCK_DIV_PD 103 +#define CLK_TOP_I2S1_MCK_DIV_PD 104 +#define CLK_TOP_I2S2_MCK_DIV_PD 105 +#define CLK_TOP_I2S3_MCK_DIV_PD 106 +#define CLK_TOP_A1SYS_HP_DIV_PD 107 +#define CLK_TOP_A2SYS_HP_DIV_PD 108 +#define CLK_TOP_NR_CLK 109 + +/* INFRACFG */ + +#define CLK_INFRA_MUX1_SEL 0 +#define CLK_INFRA_DBGCLK_PD 1 +#define CLK_INFRA_AUDIO_PD 2 +#define CLK_INFRA_IRRX_PD 3 +#define CLK_INFRA_APXGPT_PD 4 +#define CLK_INFRA_PMIC_PD 5 +#define CLK_INFRA_TRNG 6 +#define CLK_INFRA_NR_CLK 7 + +/* PERICFG */ + +#define CLK_PERIBUS_SEL 0 +#define CLK_PERI_THERM_PD 1 +#define CLK_PERI_PWM1_PD 2 +#define CLK_PERI_PWM2_PD 3 +#define CLK_PERI_PWM3_PD 4 +#define CLK_PERI_PWM4_PD 5 +#define CLK_PERI_PWM5_PD 6 +#define CLK_PERI_PWM6_PD 7 +#define CLK_PERI_PWM7_PD 8 +#define CLK_PERI_PWM_PD 9 +#define CLK_PERI_AP_DMA_PD 10 +#define CLK_PERI_MSDC30_0_PD 11 +#define CLK_PERI_MSDC30_1_PD 12 +#define CLK_PERI_UART0_PD 13 +#define CLK_PERI_UART1_PD 14 +#define CLK_PERI_UART2_PD 15 +#define CLK_PERI_UART3_PD 16 +#define CLK_PERI_UART4_PD 17 +#define CLK_PERI_BTIF_PD 18 +#define CLK_PERI_I2C0_PD 19 +#define CLK_PERI_I2C1_PD 20 +#define CLK_PERI_I2C2_PD 21 +#define CLK_PERI_SPI1_PD 22 +#define CLK_PERI_AUXADC_PD 23 +#define CLK_PERI_SPI0_PD 24 +#define CLK_PERI_SNFI_PD 25 +#define CLK_PERI_NFI_PD 26 +#define CLK_PERI_NFIECC_PD 27 +#define CLK_PERI_FLASH_PD 28 +#define CLK_PERI_IRTX_PD 29 +#define CLK_PERI_NR_CLK 30 + +/* APMIXEDSYS */ + +#define CLK_APMIXED_ARMPLL 0 +#define CLK_APMIXED_MAINPLL 1 +#define CLK_APMIXED_UNIV2PLL 2 +#define CLK_APMIXED_ETH1PLL 3 +#define CLK_APMIXED_ETH2PLL 4 +#define CLK_APMIXED_AUD1PLL 5 +#define CLK_APMIXED_AUD2PLL 6 +#define CLK_APMIXED_TRGPLL 7 +#define CLK_APMIXED_SGMIPLL 8 +#define CLK_APMIXED_MAIN_CORE_EN 9 +#define CLK_APMIXED_NR_CLK 10 + +/* AUDIOSYS */ + +#define CLK_AUDIO_AFE 0 +#define CLK_AUDIO_HDMI 1 +#define CLK_AUDIO_SPDF 2 +#define CLK_AUDIO_APLL 3 +#define CLK_AUDIO_I2SIN1 4 +#define CLK_AUDIO_I2SIN2 5 +#define CLK_AUDIO_I2SIN3 6 +#define CLK_AUDIO_I2SIN4 7 +#define CLK_AUDIO_I2SO1 8 +#define CLK_AUDIO_I2SO2 9 +#define CLK_AUDIO_I2SO3 10 +#define CLK_AUDIO_I2SO4 11 +#define CLK_AUDIO_ASRCI1 12 +#define CLK_AUDIO_ASRCI2 13 +#define CLK_AUDIO_ASRCO1 14 +#define CLK_AUDIO_ASRCO2 15 +#define CLK_AUDIO_INTDIR 16 +#define CLK_AUDIO_A1SYS 17 +#define CLK_AUDIO_A2SYS 18 +#define CLK_AUDIO_UL1 19 +#define CLK_AUDIO_UL2 20 +#define CLK_AUDIO_UL3 21 +#define CLK_AUDIO_UL4 22 +#define CLK_AUDIO_UL5 23 +#define CLK_AUDIO_UL6 24 +#define CLK_AUDIO_DL1 25 +#define CLK_AUDIO_DL2 26 +#define CLK_AUDIO_DL3 27 +#define CLK_AUDIO_DL4 28 +#define CLK_AUDIO_DL5 29 +#define CLK_AUDIO_DL6 30 +#define CLK_AUDIO_DLMCH 31 +#define CLK_AUDIO_ARB1 32 +#define CLK_AUDIO_AWB 33 +#define CLK_AUDIO_AWB2 34 +#define CLK_AUDIO_DAI 35 +#define CLK_AUDIO_MOD 36 +#define CLK_AUDIO_ASRCI3 37 +#define CLK_AUDIO_ASRCI4 38 +#define CLK_AUDIO_ASRCO3 39 +#define CLK_AUDIO_ASRCO4 40 +#define CLK_AUDIO_MEM_ASRC1 41 +#define CLK_AUDIO_MEM_ASRC2 42 +#define CLK_AUDIO_MEM_ASRC3 43 +#define CLK_AUDIO_MEM_ASRC4 44 +#define CLK_AUDIO_MEM_ASRC5 45 +#define CLK_AUDIO_AFE_CONN 46 +#define CLK_AUDIO_NR_CLK 47 + +/* SSUSBSYS */ + +#define CLK_SSUSB_U2_PHY_1P_EN 0 +#define CLK_SSUSB_U2_PHY_EN 1 +#define CLK_SSUSB_REF_EN 2 +#define CLK_SSUSB_SYS_EN 3 +#define CLK_SSUSB_MCU_EN 4 +#define CLK_SSUSB_DMA_EN 5 +#define CLK_SSUSB_NR_CLK 6 + +/* PCIESYS */ + +#define CLK_PCIE_P1_AUX_EN 0 +#define CLK_PCIE_P1_OBFF_EN 1 +#define CLK_PCIE_P1_AHB_EN 2 +#define CLK_PCIE_P1_AXI_EN 3 +#define CLK_PCIE_P1_MAC_EN 4 +#define CLK_PCIE_P1_PIPE_EN 5 +#define CLK_PCIE_P0_AUX_EN 6 +#define CLK_PCIE_P0_OBFF_EN 7 +#define CLK_PCIE_P0_AHB_EN 8 +#define CLK_PCIE_P0_AXI_EN 9 +#define CLK_PCIE_P0_MAC_EN 10 +#define CLK_PCIE_P0_PIPE_EN 11 +#define CLK_SATA_AHB_EN 12 +#define CLK_SATA_AXI_EN 13 +#define CLK_SATA_ASIC_EN 14 +#define CLK_SATA_RBC_EN 15 +#define CLK_SATA_PM_EN 16 +#define CLK_PCIE_NR_CLK 17 + +/* ETHSYS */ + +#define CLK_ETH_HSDMA_EN 0 +#define CLK_ETH_ESW_EN 1 +#define CLK_ETH_GP2_EN 2 +#define CLK_ETH_GP1_EN 3 +#define CLK_ETH_GP0_EN 4 +#define CLK_ETH_NR_CLK 5 + +/* SGMIISYS */ + +#define CLK_SGMII_TX250M_EN 0 +#define CLK_SGMII_RX250M_EN 1 +#define CLK_SGMII_CDR_REF 2 +#define CLK_SGMII_CDR_FB 3 +#define CLK_SGMII_NR_CLK 4 + +#endif /* _DT_BINDINGS_CLK_MT7622_H */ + diff --git a/include/dt-bindings/clock/mt7629-clk.h b/include/dt-bindings/clock/mt7629-clk.h new file mode 100644 index 0000000..ad8e6d7 --- /dev/null +++ b/include/dt-bindings/clock/mt7629-clk.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 MediaTek Inc. + */ + +#ifndef _DT_BINDINGS_CLK_MT7629_H +#define _DT_BINDINGS_CLK_MT7629_H + +/* TOPCKGEN */ +#define CLK_TOP_TO_U2_PHY 0 +#define CLK_TOP_TO_U2_PHY_1P 1 +#define CLK_TOP_PCIE0_PIPE_EN 2 +#define CLK_TOP_PCIE1_PIPE_EN 3 +#define CLK_TOP_SSUSB_TX250M 4 +#define CLK_TOP_SSUSB_EQ_RX250M 5 +#define CLK_TOP_SSUSB_CDR_REF 6 +#define CLK_TOP_SSUSB_CDR_FB 7 +#define CLK_TOP_SATA_ASIC 8 +#define CLK_TOP_SATA_RBC 9 +#define CLK_TOP_TO_USB3_SYS 10 +#define CLK_TOP_P1_1MHZ 11 +#define CLK_TOP_4MHZ 12 +#define CLK_TOP_P0_1MHZ 13 +#define CLK_TOP_ETH_500M 14 +#define CLK_TOP_TXCLK_SRC_PRE 15 +#define CLK_TOP_RTC 16 +#define CLK_TOP_PWM_QTR_26M 17 +#define CLK_TOP_CPUM_TCK_IN 18 +#define CLK_TOP_TO_USB3_DA_TOP 19 +#define CLK_TOP_MEMPLL 20 +#define CLK_TOP_DMPLL 21 +#define CLK_TOP_DMPLL_D4 22 +#define CLK_TOP_DMPLL_D8 23 +#define CLK_TOP_SYSPLL_D2 24 +#define CLK_TOP_SYSPLL1_D2 25 +#define CLK_TOP_SYSPLL1_D4 26 +#define CLK_TOP_SYSPLL1_D8 27 +#define CLK_TOP_SYSPLL1_D16 28 +#define CLK_TOP_SYSPLL2_D2 29 +#define CLK_TOP_SYSPLL2_D4 30 +#define CLK_TOP_SYSPLL2_D8 31 +#define CLK_TOP_SYSPLL_D5 32 +#define CLK_TOP_SYSPLL3_D2 33 +#define CLK_TOP_SYSPLL3_D4 34 +#define CLK_TOP_SYSPLL_D7 35 +#define CLK_TOP_SYSPLL4_D2 36 +#define CLK_TOP_SYSPLL4_D4 37 +#define CLK_TOP_SYSPLL4_D16 38 +#define CLK_TOP_UNIVPLL 39 +#define CLK_TOP_UNIVPLL1_D2 40 +#define CLK_TOP_UNIVPLL1_D4 41 +#define CLK_TOP_UNIVPLL1_D8 42 +#define CLK_TOP_UNIVPLL_D3 43 +#define CLK_TOP_UNIVPLL2_D2 44 +#define CLK_TOP_UNIVPLL2_D4 45 +#define CLK_TOP_UNIVPLL2_D8 46 +#define CLK_TOP_UNIVPLL2_D16 47 +#define CLK_TOP_UNIVPLL_D5 48 +#define CLK_TOP_UNIVPLL3_D2 49 +#define CLK_TOP_UNIVPLL3_D4 50 +#define CLK_TOP_UNIVPLL3_D16 51 +#define CLK_TOP_UNIVPLL_D7 52 +#define CLK_TOP_UNIVPLL_D80_D4 53 +#define CLK_TOP_UNIV48M 54 +#define CLK_TOP_SGMIIPLL_D2 55 +#define CLK_TOP_CLKXTAL_D4 56 +#define CLK_TOP_HD_FAXI 57 +#define CLK_TOP_FAXI 58 +#define CLK_TOP_F_FAUD_INTBUS 59 +#define CLK_TOP_AP2WBHIF_HCLK 60 +#define CLK_TOP_10M_INFRAO 61 +#define CLK_TOP_MSDC30_1 62 +#define CLK_TOP_SPI 63 +#define CLK_TOP_SF 64 +#define CLK_TOP_FLASH 65 +#define CLK_TOP_TO_USB3_REF 66 +#define CLK_TOP_TO_USB3_MCU 67 +#define CLK_TOP_TO_USB3_DMA 68 +#define CLK_TOP_FROM_TOP_AHB 69 +#define CLK_TOP_FROM_TOP_AXI 70 +#define CLK_TOP_PCIE1_MAC_EN 71 +#define CLK_TOP_PCIE0_MAC_EN 72 +#define CLK_TOP_AXI_SEL 73 +#define CLK_TOP_MEM_SEL 74 +#define CLK_TOP_DDRPHYCFG_SEL 75 +#define CLK_TOP_ETH_SEL 76 +#define CLK_TOP_PWM_SEL 77 +#define CLK_TOP_F10M_REF_SEL 78 +#define CLK_TOP_NFI_INFRA_SEL 79 +#define CLK_TOP_FLASH_SEL 80 +#define CLK_TOP_UART_SEL 81 +#define CLK_TOP_SPI0_SEL 82 +#define CLK_TOP_SPI1_SEL 83 +#define CLK_TOP_MSDC50_0_SEL 84 +#define CLK_TOP_MSDC30_0_SEL 85 +#define CLK_TOP_MSDC30_1_SEL 86 +#define CLK_TOP_AP2WBMCU_SEL 87 +#define CLK_TOP_AP2WBHIF_SEL 88 +#define CLK_TOP_AUDIO_SEL 89 +#define CLK_TOP_AUD_INTBUS_SEL 90 +#define CLK_TOP_PMICSPI_SEL 91 +#define CLK_TOP_SCP_SEL 92 +#define CLK_TOP_ATB_SEL 93 +#define CLK_TOP_HIF_SEL 94 +#define CLK_TOP_SATA_SEL 95 +#define CLK_TOP_U2_SEL 96 +#define CLK_TOP_AUD1_SEL 97 +#define CLK_TOP_AUD2_SEL 98 +#define CLK_TOP_IRRX_SEL 99 +#define CLK_TOP_IRTX_SEL 100 +#define CLK_TOP_SATA_MCU_SEL 101 +#define CLK_TOP_PCIE0_MCU_SEL 102 +#define CLK_TOP_PCIE1_MCU_SEL 103 +#define CLK_TOP_SSUSB_MCU_SEL 104 +#define CLK_TOP_CRYPTO_SEL 105 +#define CLK_TOP_SGMII_REF_1_SEL 106 +#define CLK_TOP_10M_SEL 107 +#define CLK_TOP_NR_CLK 108 + +/* INFRACFG */ +#define CLK_INFRA_MUX1_SEL 0 +#define CLK_INFRA_DBGCLK_PD 1 +#define CLK_INFRA_TRNG_PD 2 +#define CLK_INFRA_DEVAPC_PD 3 +#define CLK_INFRA_APXGPT_PD 4 +#define CLK_INFRA_SEJ_PD 5 +#define CLK_INFRA_NR_CLK 6 + +/* PERICFG */ +#define CLK_PERIBUS_SEL 0 +#define CLK_PERI_PWM1_PD 1 +#define CLK_PERI_PWM2_PD 2 +#define CLK_PERI_PWM3_PD 3 +#define CLK_PERI_PWM4_PD 4 +#define CLK_PERI_PWM5_PD 5 +#define CLK_PERI_PWM6_PD 6 +#define CLK_PERI_PWM7_PD 7 +#define CLK_PERI_PWM_PD 8 +#define CLK_PERI_AP_DMA_PD 9 +#define CLK_PERI_MSDC30_1_PD 10 +#define CLK_PERI_UART0_PD 11 +#define CLK_PERI_UART1_PD 12 +#define CLK_PERI_UART2_PD 13 +#define CLK_PERI_UART3_PD 14 +#define CLK_PERI_BTIF_PD 15 +#define CLK_PERI_I2C0_PD 16 +#define CLK_PERI_SPI0_PD 17 +#define CLK_PERI_SNFI_PD 18 +#define CLK_PERI_NFI_PD 19 +#define CLK_PERI_NFIECC_PD 20 +#define CLK_PERI_FLASH_PD 21 +#define CLK_PERI_NR_CLK 22 + +/* APMIXEDSYS */ +#define CLK_APMIXED_ARMPLL 0 +#define CLK_APMIXED_MAINPLL 1 +#define CLK_APMIXED_UNIV2PLL 2 +#define CLK_APMIXED_ETH1PLL 3 +#define CLK_APMIXED_ETH2PLL 4 +#define CLK_APMIXED_SGMIPLL 5 +#define CLK_APMIXED_MAIN_CORE_EN 6 +#define CLK_APMIXED_NR_CLK 7 + +/* SSUSBSYS */ +#define CLK_SSUSB_U2_PHY_1P_EN 0 +#define CLK_SSUSB_U2_PHY_EN 1 +#define CLK_SSUSB_REF_EN 2 +#define CLK_SSUSB_SYS_EN 3 +#define CLK_SSUSB_MCU_EN 4 +#define CLK_SSUSB_DMA_EN 5 +#define CLK_SSUSB_NR_CLK 6 + +/* PCIESYS */ +#define CLK_PCIE_P1_AUX_EN 0 +#define CLK_PCIE_P1_OBFF_EN 1 +#define CLK_PCIE_P1_AHB_EN 2 +#define CLK_PCIE_P1_AXI_EN 3 +#define CLK_PCIE_P1_MAC_EN 4 +#define CLK_PCIE_P1_PIPE_EN 5 +#define CLK_PCIE_P0_AUX_EN 6 +#define CLK_PCIE_P0_OBFF_EN 7 +#define CLK_PCIE_P0_AHB_EN 8 +#define CLK_PCIE_P0_AXI_EN 9 +#define CLK_PCIE_P0_MAC_EN 10 +#define CLK_PCIE_P0_PIPE_EN 11 +#define CLK_PCIE_NR_CLK 12 + +/* ETHSYS */ +#define CLK_ETH_FE_EN 0 +#define CLK_ETH_GP2_EN 1 +#define CLK_ETH_GP1_EN 2 +#define CLK_ETH_GP0_EN 3 +#define CLK_ETH_ESW_EN 4 +#define CLK_ETH_NR_CLK 5 + +/* SGMIISYS */ +#define CLK_SGMII_TX_EN 0 +#define CLK_SGMII_RX_EN 1 +#define CLK_SGMII_CDR_REF 2 +#define CLK_SGMII_CDR_FB 3 +#define CLK_SGMII_NR_CLK 4 + +#endif /* _DT_BINDINGS_CLK_MT7629_H */ diff --git a/include/dt-bindings/clock/mt8135-clk.h b/include/dt-bindings/clock/mt8135-clk.h new file mode 100644 index 0000000..dad8365 --- /dev/null +++ b/include/dt-bindings/clock/mt8135-clk.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao + */ + +#ifndef _DT_BINDINGS_CLK_MT8135_H +#define _DT_BINDINGS_CLK_MT8135_H + +/* TOPCKGEN */ + +#define CLK_TOP_DSI0_LNTC_DSICLK 1 +#define CLK_TOP_HDMITX_CLKDIG_CTS 2 +#define CLK_TOP_CLKPH_MCK 3 +#define CLK_TOP_CPUM_TCK_IN 4 +#define CLK_TOP_MAINPLL_806M 5 +#define CLK_TOP_MAINPLL_537P3M 6 +#define CLK_TOP_MAINPLL_322P4M 7 +#define CLK_TOP_MAINPLL_230P3M 8 +#define CLK_TOP_UNIVPLL_624M 9 +#define CLK_TOP_UNIVPLL_416M 10 +#define CLK_TOP_UNIVPLL_249P6M 11 +#define CLK_TOP_UNIVPLL_178P3M 12 +#define CLK_TOP_UNIVPLL_48M 13 +#define CLK_TOP_MMPLL_D2 14 +#define CLK_TOP_MMPLL_D3 15 +#define CLK_TOP_MMPLL_D5 16 +#define CLK_TOP_MMPLL_D7 17 +#define CLK_TOP_MMPLL_D4 18 +#define CLK_TOP_MMPLL_D6 19 +#define CLK_TOP_SYSPLL_D2 20 +#define CLK_TOP_SYSPLL_D4 21 +#define CLK_TOP_SYSPLL_D6 22 +#define CLK_TOP_SYSPLL_D8 23 +#define CLK_TOP_SYSPLL_D10 24 +#define CLK_TOP_SYSPLL_D12 25 +#define CLK_TOP_SYSPLL_D16 26 +#define CLK_TOP_SYSPLL_D24 27 +#define CLK_TOP_SYSPLL_D3 28 +#define CLK_TOP_SYSPLL_D2P5 29 +#define CLK_TOP_SYSPLL_D5 30 +#define CLK_TOP_SYSPLL_D3P5 31 +#define CLK_TOP_UNIVPLL1_D2 32 +#define CLK_TOP_UNIVPLL1_D4 33 +#define CLK_TOP_UNIVPLL1_D6 34 +#define CLK_TOP_UNIVPLL1_D8 35 +#define CLK_TOP_UNIVPLL1_D10 36 +#define CLK_TOP_UNIVPLL2_D2 37 +#define CLK_TOP_UNIVPLL2_D4 38 +#define CLK_TOP_UNIVPLL2_D6 39 +#define CLK_TOP_UNIVPLL2_D8 40 +#define CLK_TOP_UNIVPLL_D3 41 +#define CLK_TOP_UNIVPLL_D5 42 +#define CLK_TOP_UNIVPLL_D7 43 +#define CLK_TOP_UNIVPLL_D10 44 +#define CLK_TOP_UNIVPLL_D26 45 +#define CLK_TOP_APLL 46 +#define CLK_TOP_APLL_D4 47 +#define CLK_TOP_APLL_D8 48 +#define CLK_TOP_APLL_D16 49 +#define CLK_TOP_APLL_D24 50 +#define CLK_TOP_LVDSPLL_D2 51 +#define CLK_TOP_LVDSPLL_D4 52 +#define CLK_TOP_LVDSPLL_D8 53 +#define CLK_TOP_LVDSTX_CLKDIG_CT 54 +#define CLK_TOP_VPLL_DPIX 55 +#define CLK_TOP_TVHDMI_H 56 +#define CLK_TOP_HDMITX_CLKDIG_D2 57 +#define CLK_TOP_HDMITX_CLKDIG_D3 58 +#define CLK_TOP_TVHDMI_D2 59 +#define CLK_TOP_TVHDMI_D4 60 +#define CLK_TOP_MEMPLL_MCK_D4 61 +#define CLK_TOP_AXI_SEL 62 +#define CLK_TOP_SMI_SEL 63 +#define CLK_TOP_MFG_SEL 64 +#define CLK_TOP_IRDA_SEL 65 +#define CLK_TOP_CAM_SEL 66 +#define CLK_TOP_AUD_INTBUS_SEL 67 +#define CLK_TOP_JPG_SEL 68 +#define CLK_TOP_DISP_SEL 69 +#define CLK_TOP_MSDC30_1_SEL 70 +#define CLK_TOP_MSDC30_2_SEL 71 +#define CLK_TOP_MSDC30_3_SEL 72 +#define CLK_TOP_MSDC30_4_SEL 73 +#define CLK_TOP_USB20_SEL 74 +#define CLK_TOP_VENC_SEL 75 +#define CLK_TOP_SPI_SEL 76 +#define CLK_TOP_UART_SEL 77 +#define CLK_TOP_MEM_SEL 78 +#define CLK_TOP_CAMTG_SEL 79 +#define CLK_TOP_AUDIO_SEL 80 +#define CLK_TOP_FIX_SEL 81 +#define CLK_TOP_VDEC_SEL 82 +#define CLK_TOP_DDRPHYCFG_SEL 83 +#define CLK_TOP_DPILVDS_SEL 84 +#define CLK_TOP_PMICSPI_SEL 85 +#define CLK_TOP_MSDC30_0_SEL 86 +#define CLK_TOP_SMI_MFG_AS_SEL 87 +#define CLK_TOP_GCPU_SEL 88 +#define CLK_TOP_DPI1_SEL 89 +#define CLK_TOP_CCI_SEL 90 +#define CLK_TOP_APLL_SEL 91 +#define CLK_TOP_HDMIPLL_SEL 92 +#define CLK_TOP_NR_CLK 93 + +/* APMIXED_SYS */ + +#define CLK_APMIXED_ARMPLL1 1 +#define CLK_APMIXED_ARMPLL2 2 +#define CLK_APMIXED_MAINPLL 3 +#define CLK_APMIXED_UNIVPLL 4 +#define CLK_APMIXED_MMPLL 5 +#define CLK_APMIXED_MSDCPLL 6 +#define CLK_APMIXED_TVDPLL 7 +#define CLK_APMIXED_LVDSPLL 8 +#define CLK_APMIXED_AUDPLL 9 +#define CLK_APMIXED_VDECPLL 10 +#define CLK_APMIXED_NR_CLK 11 + +/* INFRA_SYS */ + +#define CLK_INFRA_PMIC_WRAP 1 +#define CLK_INFRA_PMICSPI 2 +#define CLK_INFRA_CCIF1_AP_CTRL 3 +#define CLK_INFRA_CCIF0_AP_CTRL 4 +#define CLK_INFRA_KP 5 +#define CLK_INFRA_CPUM 6 +#define CLK_INFRA_M4U 7 +#define CLK_INFRA_MFGAXI 8 +#define CLK_INFRA_DEVAPC 9 +#define CLK_INFRA_AUDIO 10 +#define CLK_INFRA_MFG_BUS 11 +#define CLK_INFRA_SMI 12 +#define CLK_INFRA_DBGCLK 13 +#define CLK_INFRA_NR_CLK 14 + +/* PERI_SYS */ + +#define CLK_PERI_I2C5 1 +#define CLK_PERI_I2C4 2 +#define CLK_PERI_I2C3 3 +#define CLK_PERI_I2C2 4 +#define CLK_PERI_I2C1 5 +#define CLK_PERI_I2C0 6 +#define CLK_PERI_UART3 7 +#define CLK_PERI_UART2 8 +#define CLK_PERI_UART1 9 +#define CLK_PERI_UART0 10 +#define CLK_PERI_IRDA 11 +#define CLK_PERI_NLI 12 +#define CLK_PERI_MD_HIF 13 +#define CLK_PERI_AP_HIF 14 +#define CLK_PERI_MSDC30_3 15 +#define CLK_PERI_MSDC30_2 16 +#define CLK_PERI_MSDC30_1 17 +#define CLK_PERI_MSDC20_2 18 +#define CLK_PERI_MSDC20_1 19 +#define CLK_PERI_AP_DMA 20 +#define CLK_PERI_USB1 21 +#define CLK_PERI_USB0 22 +#define CLK_PERI_PWM 23 +#define CLK_PERI_PWM7 24 +#define CLK_PERI_PWM6 25 +#define CLK_PERI_PWM5 26 +#define CLK_PERI_PWM4 27 +#define CLK_PERI_PWM3 28 +#define CLK_PERI_PWM2 29 +#define CLK_PERI_PWM1 30 +#define CLK_PERI_THERM 31 +#define CLK_PERI_NFI 32 +#define CLK_PERI_USBSLV 33 +#define CLK_PERI_USB1_MCU 34 +#define CLK_PERI_USB0_MCU 35 +#define CLK_PERI_GCPU 36 +#define CLK_PERI_FHCTL 37 +#define CLK_PERI_SPI1 38 +#define CLK_PERI_AUXADC 39 +#define CLK_PERI_PERI_PWRAP 40 +#define CLK_PERI_I2C6 41 +#define CLK_PERI_UART0_SEL 42 +#define CLK_PERI_UART1_SEL 43 +#define CLK_PERI_UART2_SEL 44 +#define CLK_PERI_UART3_SEL 45 +#define CLK_PERI_NR_CLK 46 + +#endif /* _DT_BINDINGS_CLK_MT8135_H */ diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h new file mode 100644 index 0000000..3acebe9 --- /dev/null +++ b/include/dt-bindings/clock/mt8173-clk.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao + */ + +#ifndef _DT_BINDINGS_CLK_MT8173_H +#define _DT_BINDINGS_CLK_MT8173_H + +/* TOPCKGEN */ + +#define CLK_TOP_CLKPH_MCK_O 1 +#define CLK_TOP_USB_SYSPLL_125M 3 +#define CLK_TOP_HDMITX_DIG_CTS 4 +#define CLK_TOP_ARMCA7PLL_754M 5 +#define CLK_TOP_ARMCA7PLL_502M 6 +#define CLK_TOP_MAIN_H546M 7 +#define CLK_TOP_MAIN_H364M 8 +#define CLK_TOP_MAIN_H218P4M 9 +#define CLK_TOP_MAIN_H156M 10 +#define CLK_TOP_TVDPLL_445P5M 11 +#define CLK_TOP_TVDPLL_594M 12 +#define CLK_TOP_UNIV_624M 13 +#define CLK_TOP_UNIV_416M 14 +#define CLK_TOP_UNIV_249P6M 15 +#define CLK_TOP_UNIV_178P3M 16 +#define CLK_TOP_UNIV_48M 17 +#define CLK_TOP_CLKRTC_EXT 18 +#define CLK_TOP_CLKRTC_INT 19 +#define CLK_TOP_FPC 20 +#define CLK_TOP_HDMITXPLL_D2 21 +#define CLK_TOP_HDMITXPLL_D3 22 +#define CLK_TOP_ARMCA7PLL_D2 23 +#define CLK_TOP_ARMCA7PLL_D3 24 +#define CLK_TOP_APLL1 25 +#define CLK_TOP_APLL2 26 +#define CLK_TOP_DMPLL 27 +#define CLK_TOP_DMPLL_D2 28 +#define CLK_TOP_DMPLL_D4 29 +#define CLK_TOP_DMPLL_D8 30 +#define CLK_TOP_DMPLL_D16 31 +#define CLK_TOP_LVDSPLL_D2 32 +#define CLK_TOP_LVDSPLL_D4 33 +#define CLK_TOP_LVDSPLL_D8 34 +#define CLK_TOP_MMPLL 35 +#define CLK_TOP_MMPLL_D2 36 +#define CLK_TOP_MSDCPLL 37 +#define CLK_TOP_MSDCPLL_D2 38 +#define CLK_TOP_MSDCPLL_D4 39 +#define CLK_TOP_MSDCPLL2 40 +#define CLK_TOP_MSDCPLL2_D2 41 +#define CLK_TOP_MSDCPLL2_D4 42 +#define CLK_TOP_SYSPLL_D2 43 +#define CLK_TOP_SYSPLL1_D2 44 +#define CLK_TOP_SYSPLL1_D4 45 +#define CLK_TOP_SYSPLL1_D8 46 +#define CLK_TOP_SYSPLL1_D16 47 +#define CLK_TOP_SYSPLL_D3 48 +#define CLK_TOP_SYSPLL2_D2 49 +#define CLK_TOP_SYSPLL2_D4 50 +#define CLK_TOP_SYSPLL_D5 51 +#define CLK_TOP_SYSPLL3_D2 52 +#define CLK_TOP_SYSPLL3_D4 53 +#define CLK_TOP_SYSPLL_D7 54 +#define CLK_TOP_SYSPLL4_D2 55 +#define CLK_TOP_SYSPLL4_D4 56 +#define CLK_TOP_TVDPLL 57 +#define CLK_TOP_TVDPLL_D2 58 +#define CLK_TOP_TVDPLL_D4 59 +#define CLK_TOP_TVDPLL_D8 60 +#define CLK_TOP_TVDPLL_D16 61 +#define CLK_TOP_UNIVPLL_D2 62 +#define CLK_TOP_UNIVPLL1_D2 63 +#define CLK_TOP_UNIVPLL1_D4 64 +#define CLK_TOP_UNIVPLL1_D8 65 +#define CLK_TOP_UNIVPLL_D3 66 +#define CLK_TOP_UNIVPLL2_D2 67 +#define CLK_TOP_UNIVPLL2_D4 68 +#define CLK_TOP_UNIVPLL2_D8 69 +#define CLK_TOP_UNIVPLL_D5 70 +#define CLK_TOP_UNIVPLL3_D2 71 +#define CLK_TOP_UNIVPLL3_D4 72 +#define CLK_TOP_UNIVPLL3_D8 73 +#define CLK_TOP_UNIVPLL_D7 74 +#define CLK_TOP_UNIVPLL_D26 75 +#define CLK_TOP_UNIVPLL_D52 76 +#define CLK_TOP_VCODECPLL 77 +#define CLK_TOP_VCODECPLL_370P5 78 +#define CLK_TOP_VENCPLL 79 +#define CLK_TOP_VENCPLL_D2 80 +#define CLK_TOP_VENCPLL_D4 81 +#define CLK_TOP_AXI_SEL 82 +#define CLK_TOP_MEM_SEL 83 +#define CLK_TOP_DDRPHYCFG_SEL 84 +#define CLK_TOP_MM_SEL 85 +#define CLK_TOP_PWM_SEL 86 +#define CLK_TOP_VDEC_SEL 87 +#define CLK_TOP_VENC_SEL 88 +#define CLK_TOP_MFG_SEL 89 +#define CLK_TOP_CAMTG_SEL 90 +#define CLK_TOP_UART_SEL 91 +#define CLK_TOP_SPI_SEL 92 +#define CLK_TOP_USB20_SEL 93 +#define CLK_TOP_USB30_SEL 94 +#define CLK_TOP_MSDC50_0_H_SEL 95 +#define CLK_TOP_MSDC50_0_SEL 96 +#define CLK_TOP_MSDC30_1_SEL 97 +#define CLK_TOP_MSDC30_2_SEL 98 +#define CLK_TOP_MSDC30_3_SEL 99 +#define CLK_TOP_AUDIO_SEL 100 +#define CLK_TOP_AUD_INTBUS_SEL 101 +#define CLK_TOP_PMICSPI_SEL 102 +#define CLK_TOP_SCP_SEL 103 +#define CLK_TOP_ATB_SEL 104 +#define CLK_TOP_VENC_LT_SEL 105 +#define CLK_TOP_DPI0_SEL 106 +#define CLK_TOP_IRDA_SEL 107 +#define CLK_TOP_CCI400_SEL 108 +#define CLK_TOP_AUD_1_SEL 109 +#define CLK_TOP_AUD_2_SEL 110 +#define CLK_TOP_MEM_MFG_IN_SEL 111 +#define CLK_TOP_AXI_MFG_IN_SEL 112 +#define CLK_TOP_SCAM_SEL 113 +#define CLK_TOP_SPINFI_IFR_SEL 114 +#define CLK_TOP_HDMI_SEL 115 +#define CLK_TOP_DPILVDS_SEL 116 +#define CLK_TOP_MSDC50_2_H_SEL 117 +#define CLK_TOP_HDCP_SEL 118 +#define CLK_TOP_HDCP_24M_SEL 119 +#define CLK_TOP_RTC_SEL 120 +#define CLK_TOP_APLL1_DIV0 121 +#define CLK_TOP_APLL1_DIV1 122 +#define CLK_TOP_APLL1_DIV2 123 +#define CLK_TOP_APLL1_DIV3 124 +#define CLK_TOP_APLL1_DIV4 125 +#define CLK_TOP_APLL1_DIV5 126 +#define CLK_TOP_APLL2_DIV0 127 +#define CLK_TOP_APLL2_DIV1 128 +#define CLK_TOP_APLL2_DIV2 129 +#define CLK_TOP_APLL2_DIV3 130 +#define CLK_TOP_APLL2_DIV4 131 +#define CLK_TOP_APLL2_DIV5 132 +#define CLK_TOP_I2S0_M_SEL 133 +#define CLK_TOP_I2S1_M_SEL 134 +#define CLK_TOP_I2S2_M_SEL 135 +#define CLK_TOP_I2S3_M_SEL 136 +#define CLK_TOP_I2S3_B_SEL 137 +#define CLK_TOP_DSI0_DIG 138 +#define CLK_TOP_DSI1_DIG 139 +#define CLK_TOP_LVDS_PXL 140 +#define CLK_TOP_LVDS_CTS 141 +#define CLK_TOP_NR_CLK 142 + +/* APMIXED_SYS */ + +#define CLK_APMIXED_ARMCA15PLL 1 +#define CLK_APMIXED_ARMCA7PLL 2 +#define CLK_APMIXED_MAINPLL 3 +#define CLK_APMIXED_UNIVPLL 4 +#define CLK_APMIXED_MMPLL 5 +#define CLK_APMIXED_MSDCPLL 6 +#define CLK_APMIXED_VENCPLL 7 +#define CLK_APMIXED_TVDPLL 8 +#define CLK_APMIXED_MPLL 9 +#define CLK_APMIXED_VCODECPLL 10 +#define CLK_APMIXED_APLL1 11 +#define CLK_APMIXED_APLL2 12 +#define CLK_APMIXED_LVDSPLL 13 +#define CLK_APMIXED_MSDCPLL2 14 +#define CLK_APMIXED_REF2USB_TX 15 +#define CLK_APMIXED_HDMI_REF 16 +#define CLK_APMIXED_NR_CLK 17 + +/* INFRA_SYS */ + +#define CLK_INFRA_DBGCLK 1 +#define CLK_INFRA_SMI 2 +#define CLK_INFRA_AUDIO 3 +#define CLK_INFRA_GCE 4 +#define CLK_INFRA_L2C_SRAM 5 +#define CLK_INFRA_M4U 6 +#define CLK_INFRA_CPUM 7 +#define CLK_INFRA_KP 8 +#define CLK_INFRA_CEC 9 +#define CLK_INFRA_PMICSPI 10 +#define CLK_INFRA_PMICWRAP 11 +#define CLK_INFRA_CLK_13M 12 +#define CLK_INFRA_CA53SEL 13 +#define CLK_INFRA_CA57SEL 14 /* Deprecated. Don't use it. */ +#define CLK_INFRA_CA72SEL 14 +#define CLK_INFRA_NR_CLK 15 + +/* PERI_SYS */ + +#define CLK_PERI_NFI 1 +#define CLK_PERI_THERM 2 +#define CLK_PERI_PWM1 3 +#define CLK_PERI_PWM2 4 +#define CLK_PERI_PWM3 5 +#define CLK_PERI_PWM4 6 +#define CLK_PERI_PWM5 7 +#define CLK_PERI_PWM6 8 +#define CLK_PERI_PWM7 9 +#define CLK_PERI_PWM 10 +#define CLK_PERI_USB0 11 +#define CLK_PERI_USB1 12 +#define CLK_PERI_AP_DMA 13 +#define CLK_PERI_MSDC30_0 14 +#define CLK_PERI_MSDC30_1 15 +#define CLK_PERI_MSDC30_2 16 +#define CLK_PERI_MSDC30_3 17 +#define CLK_PERI_NLI_ARB 18 +#define CLK_PERI_IRDA 19 +#define CLK_PERI_UART0 20 +#define CLK_PERI_UART1 21 +#define CLK_PERI_UART2 22 +#define CLK_PERI_UART3 23 +#define CLK_PERI_I2C0 24 +#define CLK_PERI_I2C1 25 +#define CLK_PERI_I2C2 26 +#define CLK_PERI_I2C3 27 +#define CLK_PERI_I2C4 28 +#define CLK_PERI_AUXADC 29 +#define CLK_PERI_SPI0 30 +#define CLK_PERI_I2C5 31 +#define CLK_PERI_NFIECC 32 +#define CLK_PERI_SPI 33 +#define CLK_PERI_IRRX 34 +#define CLK_PERI_I2C6 35 +#define CLK_PERI_UART0_SEL 36 +#define CLK_PERI_UART1_SEL 37 +#define CLK_PERI_UART2_SEL 38 +#define CLK_PERI_UART3_SEL 39 +#define CLK_PERI_NR_CLK 40 + +/* IMG_SYS */ + +#define CLK_IMG_LARB2_SMI 1 +#define CLK_IMG_CAM_SMI 2 +#define CLK_IMG_CAM_CAM 3 +#define CLK_IMG_SEN_TG 4 +#define CLK_IMG_SEN_CAM 5 +#define CLK_IMG_CAM_SV 6 +#define CLK_IMG_FD 7 +#define CLK_IMG_NR_CLK 8 + +/* MM_SYS */ + +#define CLK_MM_SMI_COMMON 1 +#define CLK_MM_SMI_LARB0 2 +#define CLK_MM_CAM_MDP 3 +#define CLK_MM_MDP_RDMA0 4 +#define CLK_MM_MDP_RDMA1 5 +#define CLK_MM_MDP_RSZ0 6 +#define CLK_MM_MDP_RSZ1 7 +#define CLK_MM_MDP_RSZ2 8 +#define CLK_MM_MDP_TDSHP0 9 +#define CLK_MM_MDP_TDSHP1 10 +#define CLK_MM_MDP_WDMA 11 +#define CLK_MM_MDP_WROT0 12 +#define CLK_MM_MDP_WROT1 13 +#define CLK_MM_FAKE_ENG 14 +#define CLK_MM_MUTEX_32K 15 +#define CLK_MM_DISP_OVL0 16 +#define CLK_MM_DISP_OVL1 17 +#define CLK_MM_DISP_RDMA0 18 +#define CLK_MM_DISP_RDMA1 19 +#define CLK_MM_DISP_RDMA2 20 +#define CLK_MM_DISP_WDMA0 21 +#define CLK_MM_DISP_WDMA1 22 +#define CLK_MM_DISP_COLOR0 23 +#define CLK_MM_DISP_COLOR1 24 +#define CLK_MM_DISP_AAL 25 +#define CLK_MM_DISP_GAMMA 26 +#define CLK_MM_DISP_UFOE 27 +#define CLK_MM_DISP_SPLIT0 28 +#define CLK_MM_DISP_SPLIT1 29 +#define CLK_MM_DISP_MERGE 30 +#define CLK_MM_DISP_OD 31 +#define CLK_MM_DISP_PWM0MM 32 +#define CLK_MM_DISP_PWM026M 33 +#define CLK_MM_DISP_PWM1MM 34 +#define CLK_MM_DISP_PWM126M 35 +#define CLK_MM_DSI0_ENGINE 36 +#define CLK_MM_DSI0_DIGITAL 37 +#define CLK_MM_DSI1_ENGINE 38 +#define CLK_MM_DSI1_DIGITAL 39 +#define CLK_MM_DPI_PIXEL 40 +#define CLK_MM_DPI_ENGINE 41 +#define CLK_MM_DPI1_PIXEL 42 +#define CLK_MM_DPI1_ENGINE 43 +#define CLK_MM_HDMI_PIXEL 44 +#define CLK_MM_HDMI_PLLCK 45 +#define CLK_MM_HDMI_AUDIO 46 +#define CLK_MM_HDMI_SPDIF 47 +#define CLK_MM_LVDS_PIXEL 48 +#define CLK_MM_LVDS_CTS 49 +#define CLK_MM_SMI_LARB4 50 +#define CLK_MM_HDMI_HDCP 51 +#define CLK_MM_HDMI_HDCP24M 52 +#define CLK_MM_NR_CLK 53 + +/* VDEC_SYS */ + +#define CLK_VDEC_CKEN 1 +#define CLK_VDEC_LARB_CKEN 2 +#define CLK_VDEC_NR_CLK 3 + +/* VENC_SYS */ + +#define CLK_VENC_CKE0 1 +#define CLK_VENC_CKE1 2 +#define CLK_VENC_CKE2 3 +#define CLK_VENC_CKE3 4 +#define CLK_VENC_NR_CLK 5 + +/* VENCLT_SYS */ + +#define CLK_VENCLT_CKE0 1 +#define CLK_VENCLT_CKE1 2 +#define CLK_VENCLT_NR_CLK 3 + +#endif /* _DT_BINDINGS_CLK_MT8173_H */ diff --git a/include/dt-bindings/clock/mt8183-clk.h b/include/dt-bindings/clock/mt8183-clk.h new file mode 100644 index 0000000..a7b470b --- /dev/null +++ b/include/dt-bindings/clock/mt8183-clk.h @@ -0,0 +1,426 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Weiyi Lu + */ + +#ifndef _DT_BINDINGS_CLK_MT8183_H +#define _DT_BINDINGS_CLK_MT8183_H + +/* APMIXED */ +#define CLK_APMIXED_ARMPLL_LL 0 +#define CLK_APMIXED_ARMPLL_L 1 +#define CLK_APMIXED_CCIPLL 2 +#define CLK_APMIXED_MAINPLL 3 +#define CLK_APMIXED_UNIV2PLL 4 +#define CLK_APMIXED_MSDCPLL 5 +#define CLK_APMIXED_MMPLL 6 +#define CLK_APMIXED_MFGPLL 7 +#define CLK_APMIXED_TVDPLL 8 +#define CLK_APMIXED_APLL1 9 +#define CLK_APMIXED_APLL2 10 +#define CLK_APMIXED_SSUSB_26M 11 +#define CLK_APMIXED_APPLL_26M 12 +#define CLK_APMIXED_MIPIC0_26M 13 +#define CLK_APMIXED_MDPLLGP_26M 14 +#define CLK_APMIXED_MMSYS_26M 15 +#define CLK_APMIXED_UFS_26M 16 +#define CLK_APMIXED_MIPIC1_26M 17 +#define CLK_APMIXED_MEMPLL_26M 18 +#define CLK_APMIXED_CLKSQ_LVPLL_26M 19 +#define CLK_APMIXED_MIPID0_26M 20 +#define CLK_APMIXED_MIPID1_26M 21 +#define CLK_APMIXED_NR_CLK 22 + +/* TOPCKGEN */ +#define CLK_TOP_MUX_AXI 0 +#define CLK_TOP_MUX_MM 1 +#define CLK_TOP_MUX_CAM 2 +#define CLK_TOP_MUX_MFG 3 +#define CLK_TOP_MUX_CAMTG 4 +#define CLK_TOP_MUX_UART 5 +#define CLK_TOP_MUX_SPI 6 +#define CLK_TOP_MUX_MSDC50_0_HCLK 7 +#define CLK_TOP_MUX_MSDC50_0 8 +#define CLK_TOP_MUX_MSDC30_1 9 +#define CLK_TOP_MUX_MSDC30_2 10 +#define CLK_TOP_MUX_AUDIO 11 +#define CLK_TOP_MUX_AUD_INTBUS 12 +#define CLK_TOP_MUX_FPWRAP_ULPOSC 13 +#define CLK_TOP_MUX_SCP 14 +#define CLK_TOP_MUX_ATB 15 +#define CLK_TOP_MUX_SSPM 16 +#define CLK_TOP_MUX_DPI0 17 +#define CLK_TOP_MUX_SCAM 18 +#define CLK_TOP_MUX_AUD_1 19 +#define CLK_TOP_MUX_AUD_2 20 +#define CLK_TOP_MUX_DISP_PWM 21 +#define CLK_TOP_MUX_SSUSB_TOP_XHCI 22 +#define CLK_TOP_MUX_USB_TOP 23 +#define CLK_TOP_MUX_SPM 24 +#define CLK_TOP_MUX_I2C 25 +#define CLK_TOP_MUX_F52M_MFG 26 +#define CLK_TOP_MUX_SENINF 27 +#define CLK_TOP_MUX_DXCC 28 +#define CLK_TOP_MUX_CAMTG2 29 +#define CLK_TOP_MUX_AUD_ENG1 30 +#define CLK_TOP_MUX_AUD_ENG2 31 +#define CLK_TOP_MUX_FAES_UFSFDE 32 +#define CLK_TOP_MUX_FUFS 33 +#define CLK_TOP_MUX_IMG 34 +#define CLK_TOP_MUX_DSP 35 +#define CLK_TOP_MUX_DSP1 36 +#define CLK_TOP_MUX_DSP2 37 +#define CLK_TOP_MUX_IPU_IF 38 +#define CLK_TOP_MUX_CAMTG3 39 +#define CLK_TOP_MUX_CAMTG4 40 +#define CLK_TOP_MUX_PMICSPI 41 +#define CLK_TOP_SYSPLL_CK 42 +#define CLK_TOP_SYSPLL_D2 43 +#define CLK_TOP_SYSPLL_D3 44 +#define CLK_TOP_SYSPLL_D5 45 +#define CLK_TOP_SYSPLL_D7 46 +#define CLK_TOP_SYSPLL_D2_D2 47 +#define CLK_TOP_SYSPLL_D2_D4 48 +#define CLK_TOP_SYSPLL_D2_D8 49 +#define CLK_TOP_SYSPLL_D2_D16 50 +#define CLK_TOP_SYSPLL_D3_D2 51 +#define CLK_TOP_SYSPLL_D3_D4 52 +#define CLK_TOP_SYSPLL_D3_D8 53 +#define CLK_TOP_SYSPLL_D5_D2 54 +#define CLK_TOP_SYSPLL_D5_D4 55 +#define CLK_TOP_SYSPLL_D7_D2 56 +#define CLK_TOP_SYSPLL_D7_D4 57 +#define CLK_TOP_UNIVPLL_CK 58 +#define CLK_TOP_UNIVPLL_D2 59 +#define CLK_TOP_UNIVPLL_D3 60 +#define CLK_TOP_UNIVPLL_D5 61 +#define CLK_TOP_UNIVPLL_D7 62 +#define CLK_TOP_UNIVPLL_D2_D2 63 +#define CLK_TOP_UNIVPLL_D2_D4 64 +#define CLK_TOP_UNIVPLL_D2_D8 65 +#define CLK_TOP_UNIVPLL_D3_D2 66 +#define CLK_TOP_UNIVPLL_D3_D4 67 +#define CLK_TOP_UNIVPLL_D3_D8 68 +#define CLK_TOP_UNIVPLL_D5_D2 69 +#define CLK_TOP_UNIVPLL_D5_D4 70 +#define CLK_TOP_UNIVPLL_D5_D8 71 +#define CLK_TOP_APLL1_CK 72 +#define CLK_TOP_APLL1_D2 73 +#define CLK_TOP_APLL1_D4 74 +#define CLK_TOP_APLL1_D8 75 +#define CLK_TOP_APLL2_CK 76 +#define CLK_TOP_APLL2_D2 77 +#define CLK_TOP_APLL2_D4 78 +#define CLK_TOP_APLL2_D8 79 +#define CLK_TOP_TVDPLL_CK 80 +#define CLK_TOP_TVDPLL_D2 81 +#define CLK_TOP_TVDPLL_D4 82 +#define CLK_TOP_TVDPLL_D8 83 +#define CLK_TOP_TVDPLL_D16 84 +#define CLK_TOP_MSDCPLL_CK 85 +#define CLK_TOP_MSDCPLL_D2 86 +#define CLK_TOP_MSDCPLL_D4 87 +#define CLK_TOP_MSDCPLL_D8 88 +#define CLK_TOP_MSDCPLL_D16 89 +#define CLK_TOP_AD_OSC_CK 90 +#define CLK_TOP_OSC_D2 91 +#define CLK_TOP_OSC_D4 92 +#define CLK_TOP_OSC_D8 93 +#define CLK_TOP_OSC_D16 94 +#define CLK_TOP_F26M_CK_D2 95 +#define CLK_TOP_MFGPLL_CK 96 +#define CLK_TOP_UNIVP_192M_CK 97 +#define CLK_TOP_UNIVP_192M_D2 98 +#define CLK_TOP_UNIVP_192M_D4 99 +#define CLK_TOP_UNIVP_192M_D8 100 +#define CLK_TOP_UNIVP_192M_D16 101 +#define CLK_TOP_UNIVP_192M_D32 102 +#define CLK_TOP_MMPLL_CK 103 +#define CLK_TOP_MMPLL_D4 104 +#define CLK_TOP_MMPLL_D4_D2 105 +#define CLK_TOP_MMPLL_D4_D4 106 +#define CLK_TOP_MMPLL_D5 107 +#define CLK_TOP_MMPLL_D5_D2 108 +#define CLK_TOP_MMPLL_D5_D4 109 +#define CLK_TOP_MMPLL_D6 110 +#define CLK_TOP_MMPLL_D7 111 +#define CLK_TOP_CLK26M 112 +#define CLK_TOP_CLK13M 113 +#define CLK_TOP_ULPOSC 114 +#define CLK_TOP_UNIVP_192M 115 +#define CLK_TOP_MUX_APLL_I2S0 116 +#define CLK_TOP_MUX_APLL_I2S1 117 +#define CLK_TOP_MUX_APLL_I2S2 118 +#define CLK_TOP_MUX_APLL_I2S3 119 +#define CLK_TOP_MUX_APLL_I2S4 120 +#define CLK_TOP_MUX_APLL_I2S5 121 +#define CLK_TOP_APLL12_DIV0 122 +#define CLK_TOP_APLL12_DIV1 123 +#define CLK_TOP_APLL12_DIV2 124 +#define CLK_TOP_APLL12_DIV3 125 +#define CLK_TOP_APLL12_DIV4 126 +#define CLK_TOP_APLL12_DIVB 127 +#define CLK_TOP_UNIVPLL 128 +#define CLK_TOP_ARMPLL_DIV_PLL1 129 +#define CLK_TOP_ARMPLL_DIV_PLL2 130 +#define CLK_TOP_UNIVPLL_D3_D16 131 +#define CLK_TOP_NR_CLK 132 + +/* CAMSYS */ +#define CLK_CAM_LARB6 0 +#define CLK_CAM_DFP_VAD 1 +#define CLK_CAM_CAM 2 +#define CLK_CAM_CAMTG 3 +#define CLK_CAM_SENINF 4 +#define CLK_CAM_CAMSV0 5 +#define CLK_CAM_CAMSV1 6 +#define CLK_CAM_CAMSV2 7 +#define CLK_CAM_CCU 8 +#define CLK_CAM_LARB3 9 +#define CLK_CAM_NR_CLK 10 + +/* INFRACFG_AO */ +#define CLK_INFRA_PMIC_TMR 0 +#define CLK_INFRA_PMIC_AP 1 +#define CLK_INFRA_PMIC_MD 2 +#define CLK_INFRA_PMIC_CONN 3 +#define CLK_INFRA_SCPSYS 4 +#define CLK_INFRA_SEJ 5 +#define CLK_INFRA_APXGPT 6 +#define CLK_INFRA_ICUSB 7 +#define CLK_INFRA_GCE 8 +#define CLK_INFRA_THERM 9 +#define CLK_INFRA_I2C0 10 +#define CLK_INFRA_I2C1 11 +#define CLK_INFRA_I2C2 12 +#define CLK_INFRA_I2C3 13 +#define CLK_INFRA_PWM_HCLK 14 +#define CLK_INFRA_PWM1 15 +#define CLK_INFRA_PWM2 16 +#define CLK_INFRA_PWM3 17 +#define CLK_INFRA_PWM4 18 +#define CLK_INFRA_PWM 19 +#define CLK_INFRA_UART0 20 +#define CLK_INFRA_UART1 21 +#define CLK_INFRA_UART2 22 +#define CLK_INFRA_UART3 23 +#define CLK_INFRA_GCE_26M 24 +#define CLK_INFRA_CQ_DMA_FPC 25 +#define CLK_INFRA_BTIF 26 +#define CLK_INFRA_SPI0 27 +#define CLK_INFRA_MSDC0 28 +#define CLK_INFRA_MSDC1 29 +#define CLK_INFRA_MSDC2 30 +#define CLK_INFRA_MSDC0_SCK 31 +#define CLK_INFRA_DVFSRC 32 +#define CLK_INFRA_GCPU 33 +#define CLK_INFRA_TRNG 34 +#define CLK_INFRA_AUXADC 35 +#define CLK_INFRA_CPUM 36 +#define CLK_INFRA_CCIF1_AP 37 +#define CLK_INFRA_CCIF1_MD 38 +#define CLK_INFRA_AUXADC_MD 39 +#define CLK_INFRA_MSDC1_SCK 40 +#define CLK_INFRA_MSDC2_SCK 41 +#define CLK_INFRA_AP_DMA 42 +#define CLK_INFRA_XIU 43 +#define CLK_INFRA_DEVICE_APC 44 +#define CLK_INFRA_CCIF_AP 45 +#define CLK_INFRA_DEBUGSYS 46 +#define CLK_INFRA_AUDIO 47 +#define CLK_INFRA_CCIF_MD 48 +#define CLK_INFRA_DXCC_SEC_CORE 49 +#define CLK_INFRA_DXCC_AO 50 +#define CLK_INFRA_DRAMC_F26M 51 +#define CLK_INFRA_IRTX 52 +#define CLK_INFRA_DISP_PWM 53 +#define CLK_INFRA_CLDMA_BCLK 54 +#define CLK_INFRA_AUDIO_26M_BCLK 55 +#define CLK_INFRA_SPI1 56 +#define CLK_INFRA_I2C4 57 +#define CLK_INFRA_MODEM_TEMP_SHARE 58 +#define CLK_INFRA_SPI2 59 +#define CLK_INFRA_SPI3 60 +#define CLK_INFRA_UNIPRO_SCK 61 +#define CLK_INFRA_UNIPRO_TICK 62 +#define CLK_INFRA_UFS_MP_SAP_BCLK 63 +#define CLK_INFRA_MD32_BCLK 64 +#define CLK_INFRA_SSPM 65 +#define CLK_INFRA_UNIPRO_MBIST 66 +#define CLK_INFRA_SSPM_BUS_HCLK 67 +#define CLK_INFRA_I2C5 68 +#define CLK_INFRA_I2C5_ARBITER 69 +#define CLK_INFRA_I2C5_IMM 70 +#define CLK_INFRA_I2C1_ARBITER 71 +#define CLK_INFRA_I2C1_IMM 72 +#define CLK_INFRA_I2C2_ARBITER 73 +#define CLK_INFRA_I2C2_IMM 74 +#define CLK_INFRA_SPI4 75 +#define CLK_INFRA_SPI5 76 +#define CLK_INFRA_CQ_DMA 77 +#define CLK_INFRA_UFS 78 +#define CLK_INFRA_AES_UFSFDE 79 +#define CLK_INFRA_UFS_TICK 80 +#define CLK_INFRA_MSDC0_SELF 81 +#define CLK_INFRA_MSDC1_SELF 82 +#define CLK_INFRA_MSDC2_SELF 83 +#define CLK_INFRA_SSPM_26M_SELF 84 +#define CLK_INFRA_SSPM_32K_SELF 85 +#define CLK_INFRA_UFS_AXI 86 +#define CLK_INFRA_I2C6 87 +#define CLK_INFRA_AP_MSDC0 88 +#define CLK_INFRA_MD_MSDC0 89 +#define CLK_INFRA_USB 90 +#define CLK_INFRA_DEVMPU_BCLK 91 +#define CLK_INFRA_CCIF2_AP 92 +#define CLK_INFRA_CCIF2_MD 93 +#define CLK_INFRA_CCIF3_AP 94 +#define CLK_INFRA_CCIF3_MD 95 +#define CLK_INFRA_SEJ_F13M 96 +#define CLK_INFRA_AES_BCLK 97 +#define CLK_INFRA_I2C7 98 +#define CLK_INFRA_I2C8 99 +#define CLK_INFRA_FBIST2FPC 100 +#define CLK_INFRA_NR_CLK 101 + +/* PERICFG */ +#define CLK_PERI_AXI 0 +#define CLK_PERI_NR_CLK 1 + +/* MFGCFG */ +#define CLK_MFG_BG3D 0 +#define CLK_MFG_NR_CLK 1 + +/* IMG */ +#define CLK_IMG_OWE 0 +#define CLK_IMG_WPE_B 1 +#define CLK_IMG_WPE_A 2 +#define CLK_IMG_MFB 3 +#define CLK_IMG_RSC 4 +#define CLK_IMG_DPE 5 +#define CLK_IMG_FDVT 6 +#define CLK_IMG_DIP 7 +#define CLK_IMG_LARB2 8 +#define CLK_IMG_LARB5 9 +#define CLK_IMG_NR_CLK 10 + +/* MMSYS_CONFIG */ +#define CLK_MM_SMI_COMMON 0 +#define CLK_MM_SMI_LARB0 1 +#define CLK_MM_SMI_LARB1 2 +#define CLK_MM_GALS_COMM0 3 +#define CLK_MM_GALS_COMM1 4 +#define CLK_MM_GALS_CCU2MM 5 +#define CLK_MM_GALS_IPU12MM 6 +#define CLK_MM_GALS_IMG2MM 7 +#define CLK_MM_GALS_CAM2MM 8 +#define CLK_MM_GALS_IPU2MM 9 +#define CLK_MM_MDP_DL_TXCK 10 +#define CLK_MM_IPU_DL_TXCK 11 +#define CLK_MM_MDP_RDMA0 12 +#define CLK_MM_MDP_RDMA1 13 +#define CLK_MM_MDP_RSZ0 14 +#define CLK_MM_MDP_RSZ1 15 +#define CLK_MM_MDP_TDSHP 16 +#define CLK_MM_MDP_WROT0 17 +#define CLK_MM_FAKE_ENG 18 +#define CLK_MM_DISP_OVL0 19 +#define CLK_MM_DISP_OVL0_2L 20 +#define CLK_MM_DISP_OVL1_2L 21 +#define CLK_MM_DISP_RDMA0 22 +#define CLK_MM_DISP_RDMA1 23 +#define CLK_MM_DISP_WDMA0 24 +#define CLK_MM_DISP_COLOR0 25 +#define CLK_MM_DISP_CCORR0 26 +#define CLK_MM_DISP_AAL0 27 +#define CLK_MM_DISP_GAMMA0 28 +#define CLK_MM_DISP_DITHER0 29 +#define CLK_MM_DISP_SPLIT 30 +#define CLK_MM_DSI0_MM 31 +#define CLK_MM_DSI0_IF 32 +#define CLK_MM_DPI_MM 33 +#define CLK_MM_DPI_IF 34 +#define CLK_MM_FAKE_ENG2 35 +#define CLK_MM_MDP_DL_RX 36 +#define CLK_MM_IPU_DL_RX 37 +#define CLK_MM_26M 38 +#define CLK_MM_MMSYS_R2Y 39 +#define CLK_MM_DISP_RSZ 40 +#define CLK_MM_MDP_WDMA0 41 +#define CLK_MM_MDP_AAL 42 +#define CLK_MM_MDP_CCORR 43 +#define CLK_MM_DBI_MM 44 +#define CLK_MM_DBI_IF 45 +#define CLK_MM_NR_CLK 46 + +/* VDEC_GCON */ +#define CLK_VDEC_VDEC 0 +#define CLK_VDEC_LARB1 1 +#define CLK_VDEC_NR_CLK 2 + +/* VENC_GCON */ +#define CLK_VENC_LARB 0 +#define CLK_VENC_VENC 1 +#define CLK_VENC_JPGENC 2 +#define CLK_VENC_NR_CLK 3 + +/* AUDIO */ +#define CLK_AUDIO_TML 0 +#define CLK_AUDIO_DAC_PREDIS 1 +#define CLK_AUDIO_DAC 2 +#define CLK_AUDIO_ADC 3 +#define CLK_AUDIO_APLL_TUNER 4 +#define CLK_AUDIO_APLL2_TUNER 5 +#define CLK_AUDIO_24M 6 +#define CLK_AUDIO_22M 7 +#define CLK_AUDIO_AFE 8 +#define CLK_AUDIO_I2S4 9 +#define CLK_AUDIO_I2S3 10 +#define CLK_AUDIO_I2S2 11 +#define CLK_AUDIO_I2S1 12 +#define CLK_AUDIO_PDN_ADDA6_ADC 13 +#define CLK_AUDIO_TDM 14 +#define CLK_AUDIO_NR_CLK 15 + +/* IPU_CONN */ +#define CLK_IPU_CONN_IPU 0 +#define CLK_IPU_CONN_AHB 1 +#define CLK_IPU_CONN_AXI 2 +#define CLK_IPU_CONN_ISP 3 +#define CLK_IPU_CONN_CAM_ADL 4 +#define CLK_IPU_CONN_IMG_ADL 5 +#define CLK_IPU_CONN_DAP_RX 6 +#define CLK_IPU_CONN_APB2AXI 7 +#define CLK_IPU_CONN_APB2AHB 8 +#define CLK_IPU_CONN_IPU_CAB1TO2 9 +#define CLK_IPU_CONN_IPU1_CAB1TO2 10 +#define CLK_IPU_CONN_IPU2_CAB1TO2 11 +#define CLK_IPU_CONN_CAB3TO3 12 +#define CLK_IPU_CONN_CAB2TO1 13 +#define CLK_IPU_CONN_CAB3TO1_SLICE 14 +#define CLK_IPU_CONN_NR_CLK 15 + +/* IPU_ADL */ +#define CLK_IPU_ADL_CABGEN 0 +#define CLK_IPU_ADL_NR_CLK 1 + +/* IPU_CORE0 */ +#define CLK_IPU_CORE0_JTAG 0 +#define CLK_IPU_CORE0_AXI 1 +#define CLK_IPU_CORE0_IPU 2 +#define CLK_IPU_CORE0_NR_CLK 3 + +/* IPU_CORE1 */ +#define CLK_IPU_CORE1_JTAG 0 +#define CLK_IPU_CORE1_AXI 1 +#define CLK_IPU_CORE1_IPU 2 +#define CLK_IPU_CORE1_NR_CLK 3 + +/* MCUCFG */ +#define CLK_MCU_MP0_SEL 0 +#define CLK_MCU_MP2_SEL 1 +#define CLK_MCU_BUS_SEL 2 +#define CLK_MCU_NR_CLK 3 + +#endif /* _DT_BINDINGS_CLK_MT8183_H */ diff --git a/include/dt-bindings/clock/mt8516-clk.h b/include/dt-bindings/clock/mt8516-clk.h new file mode 100644 index 0000000..816447b --- /dev/null +++ b/include/dt-bindings/clock/mt8516-clk.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Copyright (c) 2019 BayLibre, SAS. + * Author: James Liao + */ + +#ifndef _DT_BINDINGS_CLK_MT8516_H +#define _DT_BINDINGS_CLK_MT8516_H + +/* APMIXEDSYS */ + +#define CLK_APMIXED_ARMPLL 0 +#define CLK_APMIXED_MAINPLL 1 +#define CLK_APMIXED_UNIVPLL 2 +#define CLK_APMIXED_MMPLL 3 +#define CLK_APMIXED_APLL1 4 +#define CLK_APMIXED_APLL2 5 +#define CLK_APMIXED_NR_CLK 6 + +/* INFRACFG */ + +#define CLK_IFR_MUX1_SEL 0 +#define CLK_IFR_ETH_25M_SEL 1 +#define CLK_IFR_I2C0_SEL 2 +#define CLK_IFR_I2C1_SEL 3 +#define CLK_IFR_I2C2_SEL 4 +#define CLK_IFR_NR_CLK 5 + +/* TOPCKGEN */ + +#define CLK_TOP_CLK_NULL 0 +#define CLK_TOP_I2S_INFRA_BCK 1 +#define CLK_TOP_MEMPLL 2 +#define CLK_TOP_DMPLL 3 +#define CLK_TOP_MAINPLL_D2 4 +#define CLK_TOP_MAINPLL_D4 5 +#define CLK_TOP_MAINPLL_D8 6 +#define CLK_TOP_MAINPLL_D16 7 +#define CLK_TOP_MAINPLL_D11 8 +#define CLK_TOP_MAINPLL_D22 9 +#define CLK_TOP_MAINPLL_D3 10 +#define CLK_TOP_MAINPLL_D6 11 +#define CLK_TOP_MAINPLL_D12 12 +#define CLK_TOP_MAINPLL_D5 13 +#define CLK_TOP_MAINPLL_D10 14 +#define CLK_TOP_MAINPLL_D20 15 +#define CLK_TOP_MAINPLL_D40 16 +#define CLK_TOP_MAINPLL_D7 17 +#define CLK_TOP_MAINPLL_D14 18 +#define CLK_TOP_UNIVPLL_D2 19 +#define CLK_TOP_UNIVPLL_D4 20 +#define CLK_TOP_UNIVPLL_D8 21 +#define CLK_TOP_UNIVPLL_D16 22 +#define CLK_TOP_UNIVPLL_D3 23 +#define CLK_TOP_UNIVPLL_D6 24 +#define CLK_TOP_UNIVPLL_D12 25 +#define CLK_TOP_UNIVPLL_D24 26 +#define CLK_TOP_UNIVPLL_D5 27 +#define CLK_TOP_UNIVPLL_D20 28 +#define CLK_TOP_MMPLL380M 29 +#define CLK_TOP_MMPLL_D2 30 +#define CLK_TOP_MMPLL_200M 31 +#define CLK_TOP_USB_PHY48M 32 +#define CLK_TOP_APLL1 33 +#define CLK_TOP_APLL1_D2 34 +#define CLK_TOP_APLL1_D4 35 +#define CLK_TOP_APLL1_D8 36 +#define CLK_TOP_APLL2 37 +#define CLK_TOP_APLL2_D2 38 +#define CLK_TOP_APLL2_D4 39 +#define CLK_TOP_APLL2_D8 40 +#define CLK_TOP_CLK26M 41 +#define CLK_TOP_CLK26M_D2 42 +#define CLK_TOP_AHB_INFRA_D2 43 +#define CLK_TOP_NFI1X 44 +#define CLK_TOP_ETH_D2 45 +#define CLK_TOP_THEM 46 +#define CLK_TOP_APDMA 47 +#define CLK_TOP_I2C0 48 +#define CLK_TOP_I2C1 49 +#define CLK_TOP_AUXADC1 50 +#define CLK_TOP_NFI 51 +#define CLK_TOP_NFIECC 52 +#define CLK_TOP_DEBUGSYS 53 +#define CLK_TOP_PWM 54 +#define CLK_TOP_UART0 55 +#define CLK_TOP_UART1 56 +#define CLK_TOP_BTIF 57 +#define CLK_TOP_USB 58 +#define CLK_TOP_FLASHIF_26M 59 +#define CLK_TOP_AUXADC2 60 +#define CLK_TOP_I2C2 61 +#define CLK_TOP_MSDC0 62 +#define CLK_TOP_MSDC1 63 +#define CLK_TOP_NFI2X 64 +#define CLK_TOP_PMICWRAP_AP 65 +#define CLK_TOP_SEJ 66 +#define CLK_TOP_MEMSLP_DLYER 67 +#define CLK_TOP_SPI 68 +#define CLK_TOP_APXGPT 69 +#define CLK_TOP_AUDIO 70 +#define CLK_TOP_PMICWRAP_MD 71 +#define CLK_TOP_PMICWRAP_CONN 72 +#define CLK_TOP_PMICWRAP_26M 73 +#define CLK_TOP_AUX_ADC 74 +#define CLK_TOP_AUX_TP 75 +#define CLK_TOP_MSDC2 76 +#define CLK_TOP_RBIST 77 +#define CLK_TOP_NFI_BUS 78 +#define CLK_TOP_GCE 79 +#define CLK_TOP_TRNG 80 +#define CLK_TOP_SEJ_13M 81 +#define CLK_TOP_AES 82 +#define CLK_TOP_PWM_B 83 +#define CLK_TOP_PWM1_FB 84 +#define CLK_TOP_PWM2_FB 85 +#define CLK_TOP_PWM3_FB 86 +#define CLK_TOP_PWM4_FB 87 +#define CLK_TOP_PWM5_FB 88 +#define CLK_TOP_USB_1P 89 +#define CLK_TOP_FLASHIF_FREERUN 90 +#define CLK_TOP_66M_ETH 91 +#define CLK_TOP_133M_ETH 92 +#define CLK_TOP_FETH_25M 93 +#define CLK_TOP_FETH_50M 94 +#define CLK_TOP_FLASHIF_AXI 95 +#define CLK_TOP_USBIF 96 +#define CLK_TOP_UART2 97 +#define CLK_TOP_BSI 98 +#define CLK_TOP_RG_SPINOR 99 +#define CLK_TOP_RG_MSDC2 100 +#define CLK_TOP_RG_ETH 101 +#define CLK_TOP_RG_AUD1 102 +#define CLK_TOP_RG_AUD2 103 +#define CLK_TOP_RG_AUD_ENGEN1 104 +#define CLK_TOP_RG_AUD_ENGEN2 105 +#define CLK_TOP_RG_I2C 106 +#define CLK_TOP_RG_PWM_INFRA 107 +#define CLK_TOP_RG_AUD_SPDIF_IN 108 +#define CLK_TOP_RG_UART2 109 +#define CLK_TOP_RG_BSI 110 +#define CLK_TOP_RG_DBG_ATCLK 111 +#define CLK_TOP_RG_NFIECC 112 +#define CLK_TOP_RG_APLL1_D2_EN 113 +#define CLK_TOP_RG_APLL1_D4_EN 114 +#define CLK_TOP_RG_APLL1_D8_EN 115 +#define CLK_TOP_RG_APLL2_D2_EN 116 +#define CLK_TOP_RG_APLL2_D4_EN 117 +#define CLK_TOP_RG_APLL2_D8_EN 118 +#define CLK_TOP_APLL12_DIV0 119 +#define CLK_TOP_APLL12_DIV1 120 +#define CLK_TOP_APLL12_DIV2 121 +#define CLK_TOP_APLL12_DIV3 122 +#define CLK_TOP_APLL12_DIV4 123 +#define CLK_TOP_APLL12_DIV4B 124 +#define CLK_TOP_APLL12_DIV5 125 +#define CLK_TOP_APLL12_DIV5B 126 +#define CLK_TOP_APLL12_DIV6 127 +#define CLK_TOP_UART0_SEL 128 +#define CLK_TOP_EMI_DDRPHY_SEL 129 +#define CLK_TOP_AHB_INFRA_SEL 130 +#define CLK_TOP_MSDC0_SEL 131 +#define CLK_TOP_UART1_SEL 132 +#define CLK_TOP_MSDC1_SEL 133 +#define CLK_TOP_PMICSPI_SEL 134 +#define CLK_TOP_QAXI_AUD26M_SEL 135 +#define CLK_TOP_AUD_INTBUS_SEL 136 +#define CLK_TOP_NFI2X_PAD_SEL 137 +#define CLK_TOP_NFI1X_PAD_SEL 138 +#define CLK_TOP_DDRPHYCFG_SEL 139 +#define CLK_TOP_USB_78M_SEL 140 +#define CLK_TOP_SPINOR_SEL 141 +#define CLK_TOP_MSDC2_SEL 142 +#define CLK_TOP_ETH_SEL 143 +#define CLK_TOP_AUD1_SEL 144 +#define CLK_TOP_AUD2_SEL 145 +#define CLK_TOP_AUD_ENGEN1_SEL 146 +#define CLK_TOP_AUD_ENGEN2_SEL 147 +#define CLK_TOP_I2C_SEL 148 +#define CLK_TOP_AUD_I2S0_M_SEL 149 +#define CLK_TOP_AUD_I2S1_M_SEL 150 +#define CLK_TOP_AUD_I2S2_M_SEL 151 +#define CLK_TOP_AUD_I2S3_M_SEL 152 +#define CLK_TOP_AUD_I2S4_M_SEL 153 +#define CLK_TOP_AUD_I2S5_M_SEL 154 +#define CLK_TOP_AUD_SPDIF_B_SEL 155 +#define CLK_TOP_PWM_SEL 156 +#define CLK_TOP_SPI_SEL 157 +#define CLK_TOP_AUD_SPDIFIN_SEL 158 +#define CLK_TOP_UART2_SEL 159 +#define CLK_TOP_BSI_SEL 160 +#define CLK_TOP_DBG_ATCLK_SEL 161 +#define CLK_TOP_CSW_NFIECC_SEL 162 +#define CLK_TOP_NFIECC_SEL 163 +#define CLK_TOP_APLL12_CK_DIV0 164 +#define CLK_TOP_APLL12_CK_DIV1 165 +#define CLK_TOP_APLL12_CK_DIV2 166 +#define CLK_TOP_APLL12_CK_DIV3 167 +#define CLK_TOP_APLL12_CK_DIV4 168 +#define CLK_TOP_APLL12_CK_DIV4B 169 +#define CLK_TOP_APLL12_CK_DIV5 170 +#define CLK_TOP_APLL12_CK_DIV5B 171 +#define CLK_TOP_APLL12_CK_DIV6 172 +#define CLK_TOP_USB_78M 173 +#define CLK_TOP_MSDC0_INFRA 174 +#define CLK_TOP_MSDC1_INFRA 175 +#define CLK_TOP_MSDC2_INFRA 176 +#define CLK_TOP_NR_CLK 177 + +/* AUDSYS */ + +#define CLK_AUD_AFE 0 +#define CLK_AUD_I2S 1 +#define CLK_AUD_22M 2 +#define CLK_AUD_24M 3 +#define CLK_AUD_INTDIR 4 +#define CLK_AUD_APLL2_TUNER 5 +#define CLK_AUD_APLL_TUNER 6 +#define CLK_AUD_HDMI 7 +#define CLK_AUD_SPDF 8 +#define CLK_AUD_ADC 9 +#define CLK_AUD_DAC 10 +#define CLK_AUD_DAC_PREDIS 11 +#define CLK_AUD_TML 12 +#define CLK_AUD_NR_CLK 13 + +#endif /* _DT_BINDINGS_CLK_MT8516_H */ diff --git a/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h new file mode 100644 index 0000000..f215226 --- /dev/null +++ b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Nuvoton NPCM7xx Clock Generator binding + * clock binding number for all clocks supportted by nuvoton,npcm7xx-clk + * + * Copyright (C) 2018 Nuvoton Technologies tali.perry@nuvoton.com + * + */ + +#ifndef __DT_BINDINGS_CLOCK_NPCM7XX_H +#define __DT_BINDINGS_CLOCK_NPCM7XX_H + + +#define NPCM7XX_CLK_CPU 0 +#define NPCM7XX_CLK_GFX_PIXEL 1 +#define NPCM7XX_CLK_MC 2 +#define NPCM7XX_CLK_ADC 3 +#define NPCM7XX_CLK_AHB 4 +#define NPCM7XX_CLK_TIMER 5 +#define NPCM7XX_CLK_UART 6 +#define NPCM7XX_CLK_MMC 7 +#define NPCM7XX_CLK_SPI3 8 +#define NPCM7XX_CLK_PCI 9 +#define NPCM7XX_CLK_AXI 10 +#define NPCM7XX_CLK_APB4 11 +#define NPCM7XX_CLK_APB3 12 +#define NPCM7XX_CLK_APB2 13 +#define NPCM7XX_CLK_APB1 14 +#define NPCM7XX_CLK_APB5 15 +#define NPCM7XX_CLK_CLKOUT 16 +#define NPCM7XX_CLK_GFX 17 +#define NPCM7XX_CLK_SU 18 +#define NPCM7XX_CLK_SU48 19 +#define NPCM7XX_CLK_SDHC 20 +#define NPCM7XX_CLK_SPI0 21 +#define NPCM7XX_CLK_SPIX 22 + +#define NPCM7XX_CLK_REFCLK 23 +#define NPCM7XX_CLK_SYSBYPCK 24 +#define NPCM7XX_CLK_MCBYPCK 25 + +#define NPCM7XX_NUM_CLOCKS (NPCM7XX_CLK_MCBYPCK+1) + +#endif diff --git a/include/dt-bindings/clock/omap4.h b/include/dt-bindings/clock/omap4.h new file mode 100644 index 0000000..5167b2d --- /dev/null +++ b/include/dt-bindings/clock/omap4.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2017 Texas Instruments, Inc. + */ +#ifndef __DT_BINDINGS_CLK_OMAP4_H +#define __DT_BINDINGS_CLK_OMAP4_H + +#define OMAP4_CLKCTRL_OFFSET 0x20 +#define OMAP4_CLKCTRL_INDEX(offset) ((offset) - OMAP4_CLKCTRL_OFFSET) + +/* mpuss clocks */ +#define OMAP4_MPU_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* tesla clocks */ +#define OMAP4_DSP_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* abe clocks */ +#define OMAP4_L4_ABE_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_AESS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_MCPDM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) +#define OMAP4_DMIC_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38) +#define OMAP4_MCASP_CLKCTRL OMAP4_CLKCTRL_INDEX(0x40) +#define OMAP4_MCBSP1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x48) +#define OMAP4_MCBSP2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x50) +#define OMAP4_MCBSP3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x58) +#define OMAP4_SLIMBUS1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x60) +#define OMAP4_TIMER5_CLKCTRL OMAP4_CLKCTRL_INDEX(0x68) +#define OMAP4_TIMER6_CLKCTRL OMAP4_CLKCTRL_INDEX(0x70) +#define OMAP4_TIMER7_CLKCTRL OMAP4_CLKCTRL_INDEX(0x78) +#define OMAP4_TIMER8_CLKCTRL OMAP4_CLKCTRL_INDEX(0x80) +#define OMAP4_WD_TIMER3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x88) + +/* l4_ao clocks */ +#define OMAP4_SMARTREFLEX_MPU_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_SMARTREFLEX_IVA_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) +#define OMAP4_SMARTREFLEX_CORE_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38) + +/* l3_1 clocks */ +#define OMAP4_L3_MAIN_1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* l3_2 clocks */ +#define OMAP4_L3_MAIN_2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_GPMC_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_OCMC_RAM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) + +/* ducati clocks */ +#define OMAP4_IPU_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* l3_dma clocks */ +#define OMAP4_DMA_SYSTEM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* l3_emif clocks */ +#define OMAP4_DMM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_EMIF1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) +#define OMAP4_EMIF2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38) + +/* d2d clocks */ +#define OMAP4_C2C_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* l4_cfg clocks */ +#define OMAP4_L4_CFG_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_SPINLOCK_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_MAILBOX_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) + +/* l3_instr clocks */ +#define OMAP4_L3_MAIN_3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_L3_INSTR_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_OCP_WP_NOC_CLKCTRL OMAP4_CLKCTRL_INDEX(0x40) + +/* ivahd clocks */ +#define OMAP4_IVA_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_SL2IF_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) + +/* iss clocks */ +#define OMAP4_ISS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_FDIF_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) + +/* l3_dss clocks */ +#define OMAP4_DSS_CORE_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* l3_gfx clocks */ +#define OMAP4_GPU_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +/* l3_init clocks */ +#define OMAP4_MMC1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_MMC2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) +#define OMAP4_HSI_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38) +#define OMAP4_USB_HOST_HS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x58) +#define OMAP4_USB_OTG_HS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x60) +#define OMAP4_USB_TLL_HS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x68) +#define OMAP4_USB_HOST_FS_CLKCTRL OMAP4_CLKCTRL_INDEX(0xd0) +#define OMAP4_OCP2SCP_USB_PHY_CLKCTRL OMAP4_CLKCTRL_INDEX(0xe0) + +/* l4_per clocks */ +#define OMAP4_TIMER10_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28) +#define OMAP4_TIMER11_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) +#define OMAP4_TIMER2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38) +#define OMAP4_TIMER3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x40) +#define OMAP4_TIMER4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x48) +#define OMAP4_TIMER9_CLKCTRL OMAP4_CLKCTRL_INDEX(0x50) +#define OMAP4_ELM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x58) +#define OMAP4_GPIO2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x60) +#define OMAP4_GPIO3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x68) +#define OMAP4_GPIO4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x70) +#define OMAP4_GPIO5_CLKCTRL OMAP4_CLKCTRL_INDEX(0x78) +#define OMAP4_GPIO6_CLKCTRL OMAP4_CLKCTRL_INDEX(0x80) +#define OMAP4_HDQ1W_CLKCTRL OMAP4_CLKCTRL_INDEX(0x88) +#define OMAP4_I2C1_CLKCTRL OMAP4_CLKCTRL_INDEX(0xa0) +#define OMAP4_I2C2_CLKCTRL OMAP4_CLKCTRL_INDEX(0xa8) +#define OMAP4_I2C3_CLKCTRL OMAP4_CLKCTRL_INDEX(0xb0) +#define OMAP4_I2C4_CLKCTRL OMAP4_CLKCTRL_INDEX(0xb8) +#define OMAP4_L4_PER_CLKCTRL OMAP4_CLKCTRL_INDEX(0xc0) +#define OMAP4_MCBSP4_CLKCTRL OMAP4_CLKCTRL_INDEX(0xe0) +#define OMAP4_MCSPI1_CLKCTRL OMAP4_CLKCTRL_INDEX(0xf0) +#define OMAP4_MCSPI2_CLKCTRL OMAP4_CLKCTRL_INDEX(0xf8) +#define OMAP4_MCSPI3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x100) +#define OMAP4_MCSPI4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x108) +#define OMAP4_MMC3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x120) +#define OMAP4_MMC4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x128) +#define OMAP4_SLIMBUS2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x138) +#define OMAP4_UART1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x140) +#define OMAP4_UART2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x148) +#define OMAP4_UART3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x150) +#define OMAP4_UART4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x158) +#define OMAP4_MMC5_CLKCTRL OMAP4_CLKCTRL_INDEX(0x160) + +/* l4_wkup clocks */ +#define OMAP4_L4_WKUP_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) +#define OMAP4_WD_TIMER2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30) +#define OMAP4_GPIO1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38) +#define OMAP4_TIMER1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x40) +#define OMAP4_COUNTER_32K_CLKCTRL OMAP4_CLKCTRL_INDEX(0x50) +#define OMAP4_KBD_CLKCTRL OMAP4_CLKCTRL_INDEX(0x78) + +/* emu_sys clocks */ +#define OMAP4_DEBUGSS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20) + +#endif diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h new file mode 100644 index 0000000..e541193 --- /dev/null +++ b/include/dt-bindings/clock/omap5.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2017 Texas Instruments, Inc. + */ +#ifndef __DT_BINDINGS_CLK_OMAP5_H +#define __DT_BINDINGS_CLK_OMAP5_H + +#define OMAP5_CLKCTRL_OFFSET 0x20 +#define OMAP5_CLKCTRL_INDEX(offset) ((offset) - OMAP5_CLKCTRL_OFFSET) + +/* mpu clocks */ +#define OMAP5_MPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* dsp clocks */ +#define OMAP5_MMU_DSP_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* abe clocks */ +#define OMAP5_L4_ABE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_MCPDM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_DMIC_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) +#define OMAP5_MCBSP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48) +#define OMAP5_MCBSP2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50) +#define OMAP5_MCBSP3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x58) +#define OMAP5_TIMER5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68) +#define OMAP5_TIMER6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x70) +#define OMAP5_TIMER7_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78) +#define OMAP5_TIMER8_CLKCTRL OMAP5_CLKCTRL_INDEX(0x80) + +/* l3main1 clocks */ +#define OMAP5_L3_MAIN_1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* l3main2 clocks */ +#define OMAP5_L3_MAIN_2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* ipu clocks */ +#define OMAP5_MMU_IPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* dma clocks */ +#define OMAP5_DMA_SYSTEM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* emif clocks */ +#define OMAP5_DMM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_EMIF1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_EMIF2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) + +/* l4cfg clocks */ +#define OMAP5_L4_CFG_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_SPINLOCK_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_MAILBOX_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) + +/* l3instr clocks */ +#define OMAP5_L3_MAIN_3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_L3_INSTR_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) + +/* l4per clocks */ +#define OMAP5_TIMER10_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_TIMER11_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_TIMER2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) +#define OMAP5_TIMER3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x40) +#define OMAP5_TIMER4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48) +#define OMAP5_TIMER9_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50) +#define OMAP5_GPIO2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x60) +#define OMAP5_GPIO3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68) +#define OMAP5_GPIO4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x70) +#define OMAP5_GPIO5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78) +#define OMAP5_GPIO6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x80) +#define OMAP5_I2C1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xa0) +#define OMAP5_I2C2_CLKCTRL OMAP5_CLKCTRL_INDEX(0xa8) +#define OMAP5_I2C3_CLKCTRL OMAP5_CLKCTRL_INDEX(0xb0) +#define OMAP5_I2C4_CLKCTRL OMAP5_CLKCTRL_INDEX(0xb8) +#define OMAP5_L4_PER_CLKCTRL OMAP5_CLKCTRL_INDEX(0xc0) +#define OMAP5_MCSPI1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf0) +#define OMAP5_MCSPI2_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf8) +#define OMAP5_MCSPI3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x100) +#define OMAP5_MCSPI4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x108) +#define OMAP5_GPIO7_CLKCTRL OMAP5_CLKCTRL_INDEX(0x110) +#define OMAP5_GPIO8_CLKCTRL OMAP5_CLKCTRL_INDEX(0x118) +#define OMAP5_MMC3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x120) +#define OMAP5_MMC4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x128) +#define OMAP5_UART1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x140) +#define OMAP5_UART2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x148) +#define OMAP5_UART3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x150) +#define OMAP5_UART4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x158) +#define OMAP5_MMC5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x160) +#define OMAP5_I2C5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x168) +#define OMAP5_UART5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x170) +#define OMAP5_UART6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x178) + +/* dss clocks */ +#define OMAP5_DSS_CORE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* gpu clocks */ +#define OMAP5_GPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* l3init clocks */ +#define OMAP5_MMC1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_MMC2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_USB_HOST_HS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x58) +#define OMAP5_USB_TLL_HS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68) +#define OMAP5_SATA_CLKCTRL OMAP5_CLKCTRL_INDEX(0x88) +#define OMAP5_OCP2SCP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xe0) +#define OMAP5_OCP2SCP3_CLKCTRL OMAP5_CLKCTRL_INDEX(0xe8) +#define OMAP5_USB_OTG_SS_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf0) + +/* wkupaon clocks */ +#define OMAP5_L4_WKUP_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_WD_TIMER2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_GPIO1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) +#define OMAP5_TIMER1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x40) +#define OMAP5_COUNTER_32K_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50) +#define OMAP5_KBD_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78) + +#endif diff --git a/include/dt-bindings/clock/oxsemi,ox810se.h b/include/dt-bindings/clock/oxsemi,ox810se.h new file mode 100644 index 0000000..7256365 --- /dev/null +++ b/include/dt-bindings/clock/oxsemi,ox810se.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Neil Armstrong + */ + +#ifndef DT_CLOCK_OXSEMI_OX810SE_H +#define DT_CLOCK_OXSEMI_OX810SE_H + +#define CLK_810_LEON 0 +#define CLK_810_DMA_SGDMA 1 +#define CLK_810_CIPHER 2 +#define CLK_810_SATA 3 +#define CLK_810_AUDIO 4 +#define CLK_810_USBMPH 5 +#define CLK_810_ETHA 6 +#define CLK_810_PCIEA 7 +#define CLK_810_NAND 8 + +#endif /* DT_CLOCK_OXSEMI_OX810SE_H */ diff --git a/include/dt-bindings/clock/oxsemi,ox820.h b/include/dt-bindings/clock/oxsemi,ox820.h new file mode 100644 index 0000000..55f4226 --- /dev/null +++ b/include/dt-bindings/clock/oxsemi,ox820.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Neil Armstrong + */ + +#ifndef DT_CLOCK_OXSEMI_OX820_H +#define DT_CLOCK_OXSEMI_OX820_H + +/* PLLs */ +#define CLK_820_PLLA 0 +#define CLK_820_PLLB 1 + +/* Gate Clocks */ +#define CLK_820_LEON 2 +#define CLK_820_DMA_SGDMA 3 +#define CLK_820_CIPHER 4 +#define CLK_820_SD 5 +#define CLK_820_SATA 6 +#define CLK_820_AUDIO 7 +#define CLK_820_USBMPH 8 +#define CLK_820_ETHA 9 +#define CLK_820_PCIEA 10 +#define CLK_820_NAND 11 +#define CLK_820_PCIEB 12 +#define CLK_820_ETHB 13 +#define CLK_820_REF600 14 +#define CLK_820_USBDEV 15 + +#endif /* DT_CLOCK_OXSEMI_OX820_H */ diff --git a/include/dt-bindings/clock/pistachio-clk.h b/include/dt-bindings/clock/pistachio-clk.h new file mode 100644 index 0000000..ec7a868 --- /dev/null +++ b/include/dt-bindings/clock/pistachio-clk.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014 Google, Inc. + */ + +#ifndef _DT_BINDINGS_CLOCK_PISTACHIO_H +#define _DT_BINDINGS_CLOCK_PISTACHIO_H + +/* PLLs */ +#define CLK_MIPS_PLL 0 +#define CLK_AUDIO_PLL 1 +#define CLK_RPU_V_PLL 2 +#define CLK_RPU_L_PLL 3 +#define CLK_SYS_PLL 4 +#define CLK_WIFI_PLL 5 +#define CLK_BT_PLL 6 + +/* Fixed-factor clocks */ +#define CLK_WIFI_DIV4 16 +#define CLK_WIFI_DIV8 17 + +/* Gate clocks */ +#define CLK_MIPS 32 +#define CLK_AUDIO_IN 33 +#define CLK_AUDIO 34 +#define CLK_I2S 35 +#define CLK_SPDIF 36 +#define CLK_AUDIO_DAC 37 +#define CLK_RPU_V 38 +#define CLK_RPU_L 39 +#define CLK_RPU_SLEEP 40 +#define CLK_WIFI_PLL_GATE 41 +#define CLK_RPU_CORE 42 +#define CLK_WIFI_ADC 43 +#define CLK_WIFI_DAC 44 +#define CLK_USB_PHY 45 +#define CLK_ENET_IN 46 +#define CLK_ENET 47 +#define CLK_UART0 48 +#define CLK_UART1 49 +#define CLK_PERIPH_SYS 50 +#define CLK_SPI0 51 +#define CLK_SPI1 52 +#define CLK_EVENT_TIMER 53 +#define CLK_AUX_ADC_INTERNAL 54 +#define CLK_AUX_ADC 55 +#define CLK_SD_HOST 56 +#define CLK_BT 57 +#define CLK_BT_DIV4 58 +#define CLK_BT_DIV8 59 +#define CLK_BT_1MHZ 60 + +/* Divider clocks */ +#define CLK_MIPS_INTERNAL_DIV 64 +#define CLK_MIPS_DIV 65 +#define CLK_AUDIO_DIV 66 +#define CLK_I2S_DIV 67 +#define CLK_SPDIF_DIV 68 +#define CLK_AUDIO_DAC_DIV 69 +#define CLK_RPU_V_DIV 70 +#define CLK_RPU_L_DIV 71 +#define CLK_RPU_SLEEP_DIV 72 +#define CLK_RPU_CORE_DIV 73 +#define CLK_USB_PHY_DIV 74 +#define CLK_ENET_DIV 75 +#define CLK_UART0_INTERNAL_DIV 76 +#define CLK_UART0_DIV 77 +#define CLK_UART1_INTERNAL_DIV 78 +#define CLK_UART1_DIV 79 +#define CLK_SYS_INTERNAL_DIV 80 +#define CLK_SPI0_INTERNAL_DIV 81 +#define CLK_SPI0_DIV 82 +#define CLK_SPI1_INTERNAL_DIV 83 +#define CLK_SPI1_DIV 84 +#define CLK_EVENT_TIMER_INTERNAL_DIV 85 +#define CLK_EVENT_TIMER_DIV 86 +#define CLK_AUX_ADC_INTERNAL_DIV 87 +#define CLK_AUX_ADC_DIV 88 +#define CLK_SD_HOST_DIV 89 +#define CLK_BT_DIV 90 +#define CLK_BT_DIV4_DIV 91 +#define CLK_BT_DIV8_DIV 92 +#define CLK_BT_1MHZ_INTERNAL_DIV 93 +#define CLK_BT_1MHZ_DIV 94 + +/* Mux clocks */ +#define CLK_AUDIO_REF_MUX 96 +#define CLK_MIPS_PLL_MUX 97 +#define CLK_AUDIO_PLL_MUX 98 +#define CLK_AUDIO_MUX 99 +#define CLK_RPU_V_PLL_MUX 100 +#define CLK_RPU_L_PLL_MUX 101 +#define CLK_RPU_L_MUX 102 +#define CLK_WIFI_PLL_MUX 103 +#define CLK_WIFI_DIV4_MUX 104 +#define CLK_WIFI_DIV8_MUX 105 +#define CLK_RPU_CORE_MUX 106 +#define CLK_SYS_PLL_MUX 107 +#define CLK_ENET_MUX 108 +#define CLK_EVENT_TIMER_MUX 109 +#define CLK_SD_HOST_MUX 110 +#define CLK_BT_PLL_MUX 111 +#define CLK_DEBUG_MUX 112 + +#define CLK_NR_CLKS 113 + +/* Peripheral gate clocks */ +#define PERIPH_CLK_SYS 0 +#define PERIPH_CLK_SYS_BUS 1 +#define PERIPH_CLK_DDR 2 +#define PERIPH_CLK_ROM 3 +#define PERIPH_CLK_COUNTER_FAST 4 +#define PERIPH_CLK_COUNTER_SLOW 5 +#define PERIPH_CLK_IR 6 +#define PERIPH_CLK_WD 7 +#define PERIPH_CLK_PDM 8 +#define PERIPH_CLK_PWM 9 +#define PERIPH_CLK_I2C0 10 +#define PERIPH_CLK_I2C1 11 +#define PERIPH_CLK_I2C2 12 +#define PERIPH_CLK_I2C3 13 + +/* Peripheral divider clocks */ +#define PERIPH_CLK_ROM_DIV 32 +#define PERIPH_CLK_COUNTER_FAST_DIV 33 +#define PERIPH_CLK_COUNTER_SLOW_PRE_DIV 34 +#define PERIPH_CLK_COUNTER_SLOW_DIV 35 +#define PERIPH_CLK_IR_PRE_DIV 36 +#define PERIPH_CLK_IR_DIV 37 +#define PERIPH_CLK_WD_PRE_DIV 38 +#define PERIPH_CLK_WD_DIV 39 +#define PERIPH_CLK_PDM_PRE_DIV 40 +#define PERIPH_CLK_PDM_DIV 41 +#define PERIPH_CLK_PWM_PRE_DIV 42 +#define PERIPH_CLK_PWM_DIV 43 +#define PERIPH_CLK_I2C0_PRE_DIV 44 +#define PERIPH_CLK_I2C0_DIV 45 +#define PERIPH_CLK_I2C1_PRE_DIV 46 +#define PERIPH_CLK_I2C1_DIV 47 +#define PERIPH_CLK_I2C2_PRE_DIV 48 +#define PERIPH_CLK_I2C2_DIV 49 +#define PERIPH_CLK_I2C3_PRE_DIV 50 +#define PERIPH_CLK_I2C3_DIV 51 + +#define PERIPH_CLK_NR_CLKS 52 + +/* System gate clocks */ +#define SYS_CLK_I2C0 0 +#define SYS_CLK_I2C1 1 +#define SYS_CLK_I2C2 2 +#define SYS_CLK_I2C3 3 +#define SYS_CLK_I2S_IN 4 +#define SYS_CLK_PAUD_OUT 5 +#define SYS_CLK_SPDIF_OUT 6 +#define SYS_CLK_SPI0_MASTER 7 +#define SYS_CLK_SPI0_SLAVE 8 +#define SYS_CLK_PWM 9 +#define SYS_CLK_UART0 10 +#define SYS_CLK_UART1 11 +#define SYS_CLK_SPI1 12 +#define SYS_CLK_MDC 13 +#define SYS_CLK_SD_HOST 14 +#define SYS_CLK_ENET 15 +#define SYS_CLK_IR 16 +#define SYS_CLK_WD 17 +#define SYS_CLK_TIMER 18 +#define SYS_CLK_I2S_OUT 24 +#define SYS_CLK_SPDIF_IN 25 +#define SYS_CLK_EVENT_TIMER 26 +#define SYS_CLK_HASH 27 + +#define SYS_CLK_NR_CLKS 28 + +/* Gates for external input clocks */ +#define EXT_CLK_AUDIO_IN 0 +#define EXT_CLK_ENET_IN 1 + +#define EXT_CLK_NR_CLKS 2 + +#endif /* _DT_BINDINGS_CLOCK_PISTACHIO_H */ diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h new file mode 100644 index 0000000..0010147 --- /dev/null +++ b/include/dt-bindings/clock/px30-cru.h @@ -0,0 +1,389 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_PX30_H +#define _DT_BINDINGS_CLK_ROCKCHIP_PX30_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_NPLL 4 +#define APLL_BOOST_H 5 +#define APLL_BOOST_L 6 +#define ARMCLK 7 + +/* sclk gates (special clocks) */ +#define USB480M 14 +#define SCLK_PDM 15 +#define SCLK_I2S0_TX 16 +#define SCLK_I2S0_TX_OUT 17 +#define SCLK_I2S0_RX 18 +#define SCLK_I2S0_RX_OUT 19 +#define SCLK_I2S1 20 +#define SCLK_I2S1_OUT 21 +#define SCLK_I2S2 22 +#define SCLK_I2S2_OUT 23 +#define SCLK_UART1 24 +#define SCLK_UART2 25 +#define SCLK_UART3 26 +#define SCLK_UART4 27 +#define SCLK_UART5 28 +#define SCLK_I2C0 29 +#define SCLK_I2C1 30 +#define SCLK_I2C2 31 +#define SCLK_I2C3 32 +#define SCLK_I2C4 33 +#define SCLK_PWM0 34 +#define SCLK_PWM1 35 +#define SCLK_SPI0 36 +#define SCLK_SPI1 37 +#define SCLK_TIMER0 38 +#define SCLK_TIMER1 39 +#define SCLK_TIMER2 40 +#define SCLK_TIMER3 41 +#define SCLK_TIMER4 42 +#define SCLK_TIMER5 43 +#define SCLK_TSADC 44 +#define SCLK_SARADC 45 +#define SCLK_OTP 46 +#define SCLK_OTP_USR 47 +#define SCLK_CRYPTO 48 +#define SCLK_CRYPTO_APK 49 +#define SCLK_DDRC 50 +#define SCLK_ISP 51 +#define SCLK_CIF_OUT 52 +#define SCLK_RGA_CORE 53 +#define SCLK_VOPB_PWM 54 +#define SCLK_NANDC 55 +#define SCLK_SDIO 56 +#define SCLK_EMMC 57 +#define SCLK_SFC 58 +#define SCLK_SDMMC 59 +#define SCLK_OTG_ADP 60 +#define SCLK_GMAC_SRC 61 +#define SCLK_GMAC 62 +#define SCLK_GMAC_RX_TX 63 +#define SCLK_MAC_REF 64 +#define SCLK_MAC_REFOUT 65 +#define SCLK_MAC_OUT 66 +#define SCLK_SDMMC_DRV 67 +#define SCLK_SDMMC_SAMPLE 68 +#define SCLK_SDIO_DRV 69 +#define SCLK_SDIO_SAMPLE 70 +#define SCLK_EMMC_DRV 71 +#define SCLK_EMMC_SAMPLE 72 +#define SCLK_GPU 73 +#define SCLK_PVTM 74 +#define SCLK_CORE_VPU 75 +#define SCLK_GMAC_RMII 76 +#define SCLK_UART2_SRC 77 +#define SCLK_NANDC_DIV 78 +#define SCLK_NANDC_DIV50 79 +#define SCLK_SDIO_DIV 80 +#define SCLK_SDIO_DIV50 81 +#define SCLK_EMMC_DIV 82 +#define SCLK_EMMC_DIV50 83 +#define SCLK_DDRCLK 84 +#define SCLK_UART1_SRC 85 + +/* dclk gates */ +#define DCLK_VOPB 150 +#define DCLK_VOPL 151 + +/* aclk gates */ +#define ACLK_GPU 170 +#define ACLK_BUS_PRE 171 +#define ACLK_CRYPTO 172 +#define ACLK_VI_PRE 173 +#define ACLK_VO_PRE 174 +#define ACLK_VPU 175 +#define ACLK_PERI_PRE 176 +#define ACLK_GMAC 178 +#define ACLK_CIF 179 +#define ACLK_ISP 180 +#define ACLK_VOPB 181 +#define ACLK_VOPL 182 +#define ACLK_RGA 183 +#define ACLK_GIC 184 +#define ACLK_DCF 186 +#define ACLK_DMAC 187 +#define ACLK_BUS_SRC 188 +#define ACLK_PERI_SRC 189 + +/* hclk gates */ +#define HCLK_BUS_PRE 240 +#define HCLK_CRYPTO 241 +#define HCLK_VI_PRE 242 +#define HCLK_VO_PRE 243 +#define HCLK_VPU 244 +#define HCLK_PERI_PRE 245 +#define HCLK_MMC_NAND 246 +#define HCLK_SDMMC 247 +#define HCLK_USB 248 +#define HCLK_CIF 249 +#define HCLK_ISP 250 +#define HCLK_VOPB 251 +#define HCLK_VOPL 252 +#define HCLK_RGA 253 +#define HCLK_NANDC 254 +#define HCLK_SDIO 255 +#define HCLK_EMMC 256 +#define HCLK_SFC 257 +#define HCLK_OTG 258 +#define HCLK_HOST 259 +#define HCLK_HOST_ARB 260 +#define HCLK_PDM 261 +#define HCLK_I2S0 262 +#define HCLK_I2S1 263 +#define HCLK_I2S2 264 + +/* pclk gates */ +#define PCLK_BUS_PRE 320 +#define PCLK_DDR 321 +#define PCLK_VO_PRE 322 +#define PCLK_GMAC 323 +#define PCLK_MIPI_DSI 324 +#define PCLK_MIPIDSIPHY 325 +#define PCLK_MIPICSIPHY 326 +#define PCLK_USB_GRF 327 +#define PCLK_DCF 328 +#define PCLK_UART1 329 +#define PCLK_UART2 330 +#define PCLK_UART3 331 +#define PCLK_UART4 332 +#define PCLK_UART5 333 +#define PCLK_I2C0 334 +#define PCLK_I2C1 335 +#define PCLK_I2C2 336 +#define PCLK_I2C3 337 +#define PCLK_I2C4 338 +#define PCLK_PWM0 339 +#define PCLK_PWM1 340 +#define PCLK_SPI0 341 +#define PCLK_SPI1 342 +#define PCLK_SARADC 343 +#define PCLK_TSADC 344 +#define PCLK_TIMER 345 +#define PCLK_OTP_NS 346 +#define PCLK_WDT_NS 347 +#define PCLK_GPIO1 348 +#define PCLK_GPIO2 349 +#define PCLK_GPIO3 350 +#define PCLK_ISP 351 +#define PCLK_CIF 352 +#define PCLK_OTP_PHY 353 + +#define CLK_NR_CLKS (PCLK_OTP_PHY + 1) + +/* pmu-clocks indices */ + +#define PLL_GPLL 1 + +#define SCLK_RTC32K_PMU 4 +#define SCLK_WIFI_PMU 5 +#define SCLK_UART0_PMU 6 +#define SCLK_PVTM_PMU 7 +#define PCLK_PMU_PRE 8 +#define SCLK_REF24M_PMU 9 +#define SCLK_USBPHY_REF 10 +#define SCLK_MIPIDSIPHY_REF 11 + +#define XIN24M_DIV 12 + +#define PCLK_GPIO0_PMU 20 +#define PCLK_UART0_PMU 21 + +#define CLKPMU_NR_CLKS (PCLK_UART0_PMU + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_CORE_NOC 13 +#define SRST_STRC_A 14 +#define SRST_L2C 15 + +#define SRST_DAP 16 +#define SRST_CORE_PVTM 17 +#define SRST_GPU 18 +#define SRST_GPU_NIU 19 +#define SRST_UPCTL2 20 +#define SRST_UPCTL2_A 21 +#define SRST_UPCTL2_P 22 +#define SRST_MSCH 23 +#define SRST_MSCH_P 24 +#define SRST_DDRMON_P 25 +#define SRST_DDRSTDBY_P 26 +#define SRST_DDRSTDBY 27 +#define SRST_DDRGRF_p 28 +#define SRST_AXI_SPLIT_A 29 +#define SRST_AXI_CMD_A 30 +#define SRST_AXI_CMD_P 31 + +#define SRST_DDRPHY 32 +#define SRST_DDRPHYDIV 33 +#define SRST_DDRPHY_P 34 +#define SRST_VPU_A 36 +#define SRST_VPU_NIU_A 37 +#define SRST_VPU_H 38 +#define SRST_VPU_NIU_H 39 +#define SRST_VI_NIU_A 40 +#define SRST_VI_NIU_H 41 +#define SRST_ISP_H 42 +#define SRST_ISP 43 +#define SRST_CIF_A 44 +#define SRST_CIF_H 45 +#define SRST_CIF_PCLKIN 46 +#define SRST_MIPICSIPHY_P 47 + +#define SRST_VO_NIU_A 48 +#define SRST_VO_NIU_H 49 +#define SRST_VO_NIU_P 50 +#define SRST_VOPB_A 51 +#define SRST_VOPB_H 52 +#define SRST_VOPB 53 +#define SRST_PWM_VOPB 54 +#define SRST_VOPL_A 55 +#define SRST_VOPL_H 56 +#define SRST_VOPL 57 +#define SRST_RGA_A 58 +#define SRST_RGA_H 59 +#define SRST_RGA 60 +#define SRST_MIPIDSI_HOST_P 61 +#define SRST_MIPIDSIPHY_P 62 +#define SRST_VPU_CORE 63 + +#define SRST_PERI_NIU_A 64 +#define SRST_USB_NIU_H 65 +#define SRST_USB2OTG_H 66 +#define SRST_USB2OTG 67 +#define SRST_USB2OTG_ADP 68 +#define SRST_USB2HOST_H 69 +#define SRST_USB2HOST_ARB_H 70 +#define SRST_USB2HOST_AUX_H 71 +#define SRST_USB2HOST_EHCI 72 +#define SRST_USB2HOST 73 +#define SRST_USBPHYPOR 74 +#define SRST_USBPHY_OTG_PORT 75 +#define SRST_USBPHY_HOST_PORT 76 +#define SRST_USBPHY_GRF 77 +#define SRST_CPU_BOOST_P 78 +#define SRST_CPU_BOOST 79 + +#define SRST_MMC_NAND_NIU_H 80 +#define SRST_SDIO_H 81 +#define SRST_EMMC_H 82 +#define SRST_SFC_H 83 +#define SRST_SFC 84 +#define SRST_SDCARD_NIU_H 85 +#define SRST_SDMMC_H 86 +#define SRST_NANDC_H 89 +#define SRST_NANDC 90 +#define SRST_GMAC_NIU_A 92 +#define SRST_GMAC_NIU_P 93 +#define SRST_GMAC_A 94 + +#define SRST_PMU_NIU_P 96 +#define SRST_PMU_SGRF_P 97 +#define SRST_PMU_GRF_P 98 +#define SRST_PMU 99 +#define SRST_PMU_MEM_P 100 +#define SRST_PMU_GPIO0_P 101 +#define SRST_PMU_UART0_P 102 +#define SRST_PMU_CRU_P 103 +#define SRST_PMU_PVTM 104 +#define SRST_PMU_UART 105 +#define SRST_PMU_NIU_H 106 +#define SRST_PMU_DDR_FAIL_SAVE 107 +#define SRST_PMU_CORE_PERF_A 108 +#define SRST_PMU_CORE_GRF_P 109 +#define SRST_PMU_GPU_PERF_A 110 +#define SRST_PMU_GPU_GRF_P 111 + +#define SRST_CRYPTO_NIU_A 112 +#define SRST_CRYPTO_NIU_H 113 +#define SRST_CRYPTO_A 114 +#define SRST_CRYPTO_H 115 +#define SRST_CRYPTO 116 +#define SRST_CRYPTO_APK 117 +#define SRST_BUS_NIU_H 120 +#define SRST_USB_NIU_P 121 +#define SRST_BUS_TOP_NIU_P 122 +#define SRST_INTMEM_A 123 +#define SRST_GIC_A 124 +#define SRST_ROM_H 126 +#define SRST_DCF_A 127 + +#define SRST_DCF_P 128 +#define SRST_PDM_H 129 +#define SRST_PDM 130 +#define SRST_I2S0_H 131 +#define SRST_I2S0_TX 132 +#define SRST_I2S1_H 133 +#define SRST_I2S1 134 +#define SRST_I2S2_H 135 +#define SRST_I2S2 136 +#define SRST_UART1_P 137 +#define SRST_UART1 138 +#define SRST_UART2_P 139 +#define SRST_UART2 140 +#define SRST_UART3_P 141 +#define SRST_UART3 142 +#define SRST_UART4_P 143 + +#define SRST_UART4 144 +#define SRST_UART5_P 145 +#define SRST_UART5 146 +#define SRST_I2C0_P 147 +#define SRST_I2C0 148 +#define SRST_I2C1_P 149 +#define SRST_I2C1 150 +#define SRST_I2C2_P 151 +#define SRST_I2C2 152 +#define SRST_I2C3_P 153 +#define SRST_I2C3 154 +#define SRST_PWM0_P 157 +#define SRST_PWM0 158 +#define SRST_PWM1_P 159 + +#define SRST_PWM1 160 +#define SRST_SPI0_P 161 +#define SRST_SPI0 162 +#define SRST_SPI1_P 163 +#define SRST_SPI1 164 +#define SRST_SARADC_P 165 +#define SRST_SARADC 166 +#define SRST_TSADC_P 167 +#define SRST_TSADC 168 +#define SRST_TIMER_P 169 +#define SRST_TIMER0 170 +#define SRST_TIMER1 171 +#define SRST_TIMER2 172 +#define SRST_TIMER3 173 +#define SRST_TIMER4 174 +#define SRST_TIMER5 175 + +#define SRST_OTP_NS_P 176 +#define SRST_OTP_NS_SBPI 177 +#define SRST_OTP_NS_USR 178 +#define SRST_OTP_PHY_P 179 +#define SRST_OTP_PHY 180 +#define SRST_WDT_NS_P 181 +#define SRST_GPIO1_P 182 +#define SRST_GPIO2_P 183 +#define SRST_GPIO3_P 184 +#define SRST_SGRF_P 185 +#define SRST_GRF_P 186 +#define SRST_I2S0_RX 191 + +#endif diff --git a/include/dt-bindings/clock/pxa-clock.h b/include/dt-bindings/clock/pxa-clock.h new file mode 100644 index 0000000..ce3d6b6 --- /dev/null +++ b/include/dt-bindings/clock/pxa-clock.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Inspired by original work from pxa2xx-regs.h by Nicolas Pitre + * Copyright (C) 2014 Robert Jarzmik + */ + +#ifndef __DT_BINDINGS_CLOCK_PXA2XX_H__ +#define __DT_BINDINGS_CLOCK_PXA2XX_H__ + +#define CLK_NONE 0 +#define CLK_1WIRE 1 +#define CLK_AC97 2 +#define CLK_AC97CONF 3 +#define CLK_ASSP 4 +#define CLK_BOOT 5 +#define CLK_BTUART 6 +#define CLK_CAMERA 7 +#define CLK_CIR 8 +#define CLK_CORE 9 +#define CLK_DMC 10 +#define CLK_FFUART 11 +#define CLK_FICP 12 +#define CLK_GPIO 13 +#define CLK_HSIO2 14 +#define CLK_HWUART 15 +#define CLK_I2C 16 +#define CLK_I2S 17 +#define CLK_IM 18 +#define CLK_INC 19 +#define CLK_ISC 20 +#define CLK_KEYPAD 21 +#define CLK_LCD 22 +#define CLK_MEMC 23 +#define CLK_MEMSTK 24 +#define CLK_MINI_IM 25 +#define CLK_MINI_LCD 26 +#define CLK_MMC 27 +#define CLK_MMC1 28 +#define CLK_MMC2 29 +#define CLK_MMC3 30 +#define CLK_MSL 31 +#define CLK_MSL0 32 +#define CLK_MVED 33 +#define CLK_NAND 34 +#define CLK_NSSP 35 +#define CLK_OSTIMER 36 +#define CLK_PWM0 37 +#define CLK_PWM1 38 +#define CLK_PWM2 39 +#define CLK_PWM3 40 +#define CLK_PWRI2C 41 +#define CLK_PXA300_GCU 42 +#define CLK_PXA320_GCU 43 +#define CLK_SMC 44 +#define CLK_SSP 45 +#define CLK_SSP1 46 +#define CLK_SSP2 47 +#define CLK_SSP3 48 +#define CLK_SSP4 49 +#define CLK_STUART 50 +#define CLK_TOUCH 51 +#define CLK_TPM 52 +#define CLK_UDC 53 +#define CLK_USB 54 +#define CLK_USB2 55 +#define CLK_USBH 56 +#define CLK_USBHOST 57 +#define CLK_USIM 58 +#define CLK_USIM1 59 +#define CLK_USMI0 60 +#define CLK_OSC32k768 61 +#define CLK_MAX 62 + +#endif diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h new file mode 100644 index 0000000..4f7a2d2 --- /dev/null +++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_SDM_CAM_CC_SDM845_H +#define _DT_BINDINGS_CLK_SDM_CAM_CC_SDM845_H + +/* CAM_CC clock registers */ +#define CAM_CC_BPS_AHB_CLK 0 +#define CAM_CC_BPS_AREG_CLK 1 +#define CAM_CC_BPS_AXI_CLK 2 +#define CAM_CC_BPS_CLK 3 +#define CAM_CC_BPS_CLK_SRC 4 +#define CAM_CC_CAMNOC_ATB_CLK 5 +#define CAM_CC_CAMNOC_AXI_CLK 6 +#define CAM_CC_CCI_CLK 7 +#define CAM_CC_CCI_CLK_SRC 8 +#define CAM_CC_CPAS_AHB_CLK 9 +#define CAM_CC_CPHY_RX_CLK_SRC 10 +#define CAM_CC_CSI0PHYTIMER_CLK 11 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 12 +#define CAM_CC_CSI1PHYTIMER_CLK 13 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 14 +#define CAM_CC_CSI2PHYTIMER_CLK 15 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 16 +#define CAM_CC_CSI3PHYTIMER_CLK 17 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 18 +#define CAM_CC_CSIPHY0_CLK 19 +#define CAM_CC_CSIPHY1_CLK 20 +#define CAM_CC_CSIPHY2_CLK 21 +#define CAM_CC_CSIPHY3_CLK 22 +#define CAM_CC_FAST_AHB_CLK_SRC 23 +#define CAM_CC_FD_CORE_CLK 24 +#define CAM_CC_FD_CORE_CLK_SRC 25 +#define CAM_CC_FD_CORE_UAR_CLK 26 +#define CAM_CC_ICP_APB_CLK 27 +#define CAM_CC_ICP_ATB_CLK 28 +#define CAM_CC_ICP_CLK 29 +#define CAM_CC_ICP_CLK_SRC 30 +#define CAM_CC_ICP_CTI_CLK 31 +#define CAM_CC_ICP_TS_CLK 32 +#define CAM_CC_IFE_0_AXI_CLK 33 +#define CAM_CC_IFE_0_CLK 34 +#define CAM_CC_IFE_0_CLK_SRC 35 +#define CAM_CC_IFE_0_CPHY_RX_CLK 36 +#define CAM_CC_IFE_0_CSID_CLK 37 +#define CAM_CC_IFE_0_CSID_CLK_SRC 38 +#define CAM_CC_IFE_0_DSP_CLK 39 +#define CAM_CC_IFE_1_AXI_CLK 40 +#define CAM_CC_IFE_1_CLK 41 +#define CAM_CC_IFE_1_CLK_SRC 42 +#define CAM_CC_IFE_1_CPHY_RX_CLK 43 +#define CAM_CC_IFE_1_CSID_CLK 44 +#define CAM_CC_IFE_1_CSID_CLK_SRC 45 +#define CAM_CC_IFE_1_DSP_CLK 46 +#define CAM_CC_IFE_LITE_CLK 47 +#define CAM_CC_IFE_LITE_CLK_SRC 48 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 49 +#define CAM_CC_IFE_LITE_CSID_CLK 50 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 51 +#define CAM_CC_IPE_0_AHB_CLK 52 +#define CAM_CC_IPE_0_AREG_CLK 53 +#define CAM_CC_IPE_0_AXI_CLK 54 +#define CAM_CC_IPE_0_CLK 55 +#define CAM_CC_IPE_0_CLK_SRC 56 +#define CAM_CC_IPE_1_AHB_CLK 57 +#define CAM_CC_IPE_1_AREG_CLK 58 +#define CAM_CC_IPE_1_AXI_CLK 59 +#define CAM_CC_IPE_1_CLK 60 +#define CAM_CC_IPE_1_CLK_SRC 61 +#define CAM_CC_JPEG_CLK 62 +#define CAM_CC_JPEG_CLK_SRC 63 +#define CAM_CC_LRME_CLK 64 +#define CAM_CC_LRME_CLK_SRC 65 +#define CAM_CC_MCLK0_CLK 66 +#define CAM_CC_MCLK0_CLK_SRC 67 +#define CAM_CC_MCLK1_CLK 68 +#define CAM_CC_MCLK1_CLK_SRC 69 +#define CAM_CC_MCLK2_CLK 70 +#define CAM_CC_MCLK2_CLK_SRC 71 +#define CAM_CC_MCLK3_CLK 72 +#define CAM_CC_MCLK3_CLK_SRC 73 +#define CAM_CC_PLL0 74 +#define CAM_CC_PLL0_OUT_EVEN 75 +#define CAM_CC_PLL1 76 +#define CAM_CC_PLL1_OUT_EVEN 77 +#define CAM_CC_PLL2 78 +#define CAM_CC_PLL2_OUT_EVEN 79 +#define CAM_CC_PLL3 80 +#define CAM_CC_PLL3_OUT_EVEN 81 +#define CAM_CC_SLOW_AHB_CLK_SRC 82 +#define CAM_CC_SOC_AHB_CLK 83 +#define CAM_CC_SYS_TMR_CLK 84 + +/* CAM_CC Resets */ +#define TITAN_CAM_CC_CCI_BCR 0 +#define TITAN_CAM_CC_CPAS_BCR 1 +#define TITAN_CAM_CC_CSI0PHY_BCR 2 +#define TITAN_CAM_CC_CSI1PHY_BCR 3 +#define TITAN_CAM_CC_CSI2PHY_BCR 4 +#define TITAN_CAM_CC_MCLK0_BCR 5 +#define TITAN_CAM_CC_MCLK1_BCR 6 +#define TITAN_CAM_CC_MCLK2_BCR 7 +#define TITAN_CAM_CC_MCLK3_BCR 8 +#define TITAN_CAM_CC_TITAN_TOP_BCR 9 + +/* CAM_CC GDSCRs */ +#define BPS_GDSC 0 +#define IPE_0_GDSC 1 +#define IPE_1_GDSC 2 +#define IFE_0_GDSC 3 +#define IFE_1_GDSC 4 +#define TITAN_TOP_GDSC 5 + +#endif diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h new file mode 100644 index 0000000..11eed4b --- /dev/null +++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H +#define _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H + +/* DISP_CC clock registers */ +#define DISP_CC_MDSS_AHB_CLK 0 +#define DISP_CC_MDSS_AXI_CLK 1 +#define DISP_CC_MDSS_BYTE0_CLK 2 +#define DISP_CC_MDSS_BYTE0_CLK_SRC 3 +#define DISP_CC_MDSS_BYTE0_INTF_CLK 4 +#define DISP_CC_MDSS_BYTE1_CLK 5 +#define DISP_CC_MDSS_BYTE1_CLK_SRC 6 +#define DISP_CC_MDSS_BYTE1_INTF_CLK 7 +#define DISP_CC_MDSS_ESC0_CLK 8 +#define DISP_CC_MDSS_ESC0_CLK_SRC 9 +#define DISP_CC_MDSS_ESC1_CLK 10 +#define DISP_CC_MDSS_ESC1_CLK_SRC 11 +#define DISP_CC_MDSS_MDP_CLK 12 +#define DISP_CC_MDSS_MDP_CLK_SRC 13 +#define DISP_CC_MDSS_MDP_LUT_CLK 14 +#define DISP_CC_MDSS_PCLK0_CLK 15 +#define DISP_CC_MDSS_PCLK0_CLK_SRC 16 +#define DISP_CC_MDSS_PCLK1_CLK 17 +#define DISP_CC_MDSS_PCLK1_CLK_SRC 18 +#define DISP_CC_MDSS_ROT_CLK 19 +#define DISP_CC_MDSS_ROT_CLK_SRC 20 +#define DISP_CC_MDSS_RSCC_AHB_CLK 21 +#define DISP_CC_MDSS_RSCC_VSYNC_CLK 22 +#define DISP_CC_MDSS_VSYNC_CLK 23 +#define DISP_CC_MDSS_VSYNC_CLK_SRC 24 +#define DISP_CC_PLL0 25 +#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 26 +#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 27 + +/* DISP_CC Reset */ +#define DISP_CC_MDSS_RSCC_BCR 0 + +/* DISP_CC GDSCR */ +#define MDSS_GDSC 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-apq8084.h b/include/dt-bindings/clock/qcom,gcc-apq8084.h new file mode 100644 index 0000000..7f657cf --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-apq8084.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_APQ_GCC_8084_H +#define _DT_BINDINGS_CLK_APQ_GCC_8084_H + +#define GPLL0 0 +#define GPLL0_VOTE 1 +#define GPLL1 2 +#define GPLL1_VOTE 3 +#define GPLL2 4 +#define GPLL2_VOTE 5 +#define GPLL3 6 +#define GPLL3_VOTE 7 +#define GPLL4 8 +#define GPLL4_VOTE 9 +#define CONFIG_NOC_CLK_SRC 10 +#define PERIPH_NOC_CLK_SRC 11 +#define SYSTEM_NOC_CLK_SRC 12 +#define BLSP_UART_SIM_CLK_SRC 13 +#define QDSS_TSCTR_CLK_SRC 14 +#define UFS_AXI_CLK_SRC 15 +#define RPM_CLK_SRC 16 +#define KPSS_AHB_CLK_SRC 17 +#define QDSS_AT_CLK_SRC 18 +#define BIMC_DDR_CLK_SRC 19 +#define USB30_MASTER_CLK_SRC 20 +#define USB30_SEC_MASTER_CLK_SRC 21 +#define USB_HSIC_AHB_CLK_SRC 22 +#define MMSS_BIMC_GFX_CLK_SRC 23 +#define QDSS_STM_CLK_SRC 24 +#define ACC_CLK_SRC 25 +#define SEC_CTRL_CLK_SRC 26 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 27 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 28 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 29 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 30 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 31 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 32 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 33 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 34 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 35 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 36 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 37 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 38 +#define BLSP1_UART1_APPS_CLK_SRC 39 +#define BLSP1_UART2_APPS_CLK_SRC 40 +#define BLSP1_UART3_APPS_CLK_SRC 41 +#define BLSP1_UART4_APPS_CLK_SRC 42 +#define BLSP1_UART5_APPS_CLK_SRC 43 +#define BLSP1_UART6_APPS_CLK_SRC 44 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 45 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 46 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 47 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 48 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 49 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 50 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 51 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 52 +#define BLSP2_QUP5_I2C_APPS_CLK_SRC 53 +#define BLSP2_QUP5_SPI_APPS_CLK_SRC 54 +#define BLSP2_QUP6_I2C_APPS_CLK_SRC 55 +#define BLSP2_QUP6_SPI_APPS_CLK_SRC 56 +#define BLSP2_UART1_APPS_CLK_SRC 57 +#define BLSP2_UART2_APPS_CLK_SRC 58 +#define BLSP2_UART3_APPS_CLK_SRC 59 +#define BLSP2_UART4_APPS_CLK_SRC 60 +#define BLSP2_UART5_APPS_CLK_SRC 61 +#define BLSP2_UART6_APPS_CLK_SRC 62 +#define CE1_CLK_SRC 63 +#define CE2_CLK_SRC 64 +#define CE3_CLK_SRC 65 +#define GP1_CLK_SRC 66 +#define GP2_CLK_SRC 67 +#define GP3_CLK_SRC 68 +#define PDM2_CLK_SRC 69 +#define QDSS_TRACECLKIN_CLK_SRC 70 +#define RBCPR_CLK_SRC 71 +#define SATA_ASIC0_CLK_SRC 72 +#define SATA_PMALIVE_CLK_SRC 73 +#define SATA_RX_CLK_SRC 74 +#define SATA_RX_OOB_CLK_SRC 75 +#define SDCC1_APPS_CLK_SRC 76 +#define SDCC2_APPS_CLK_SRC 77 +#define SDCC3_APPS_CLK_SRC 78 +#define SDCC4_APPS_CLK_SRC 79 +#define GCC_SNOC_BUS_TIMEOUT0_AHB_CLK 80 +#define SPMI_AHB_CLK_SRC 81 +#define SPMI_SER_CLK_SRC 82 +#define TSIF_REF_CLK_SRC 83 +#define USB30_MOCK_UTMI_CLK_SRC 84 +#define USB30_SEC_MOCK_UTMI_CLK_SRC 85 +#define USB_HS_SYSTEM_CLK_SRC 86 +#define USB_HSIC_CLK_SRC 87 +#define USB_HSIC_IO_CAL_CLK_SRC 88 +#define USB_HSIC_MOCK_UTMI_CLK_SRC 89 +#define USB_HSIC_SYSTEM_CLK_SRC 90 +#define GCC_BAM_DMA_AHB_CLK 91 +#define GCC_BAM_DMA_INACTIVITY_TIMERS_CLK 92 +#define DDR_CLK_SRC 93 +#define GCC_BIMC_CFG_AHB_CLK 94 +#define GCC_BIMC_CLK 95 +#define GCC_BIMC_KPSS_AXI_CLK 96 +#define GCC_BIMC_SLEEP_CLK 97 +#define GCC_BIMC_SYSNOC_AXI_CLK 98 +#define GCC_BIMC_XO_CLK 99 +#define GCC_BLSP1_AHB_CLK 100 +#define GCC_BLSP1_SLEEP_CLK 101 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 102 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 103 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 104 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 105 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 106 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 107 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 108 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 109 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 110 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 111 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 112 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 113 +#define GCC_BLSP1_UART1_APPS_CLK 114 +#define GCC_BLSP1_UART1_SIM_CLK 115 +#define GCC_BLSP1_UART2_APPS_CLK 116 +#define GCC_BLSP1_UART2_SIM_CLK 117 +#define GCC_BLSP1_UART3_APPS_CLK 118 +#define GCC_BLSP1_UART3_SIM_CLK 119 +#define GCC_BLSP1_UART4_APPS_CLK 120 +#define GCC_BLSP1_UART4_SIM_CLK 121 +#define GCC_BLSP1_UART5_APPS_CLK 122 +#define GCC_BLSP1_UART5_SIM_CLK 123 +#define GCC_BLSP1_UART6_APPS_CLK 124 +#define GCC_BLSP1_UART6_SIM_CLK 125 +#define GCC_BLSP2_AHB_CLK 126 +#define GCC_BLSP2_SLEEP_CLK 127 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 128 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 129 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 130 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 131 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 132 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 133 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 134 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 135 +#define GCC_BLSP2_QUP5_I2C_APPS_CLK 136 +#define GCC_BLSP2_QUP5_SPI_APPS_CLK 137 +#define GCC_BLSP2_QUP6_I2C_APPS_CLK 138 +#define GCC_BLSP2_QUP6_SPI_APPS_CLK 139 +#define GCC_BLSP2_UART1_APPS_CLK 140 +#define GCC_BLSP2_UART1_SIM_CLK 141 +#define GCC_BLSP2_UART2_APPS_CLK 142 +#define GCC_BLSP2_UART2_SIM_CLK 143 +#define GCC_BLSP2_UART3_APPS_CLK 144 +#define GCC_BLSP2_UART3_SIM_CLK 145 +#define GCC_BLSP2_UART4_APPS_CLK 146 +#define GCC_BLSP2_UART4_SIM_CLK 147 +#define GCC_BLSP2_UART5_APPS_CLK 148 +#define GCC_BLSP2_UART5_SIM_CLK 149 +#define GCC_BLSP2_UART6_APPS_CLK 150 +#define GCC_BLSP2_UART6_SIM_CLK 151 +#define GCC_BOOT_ROM_AHB_CLK 152 +#define GCC_CE1_AHB_CLK 153 +#define GCC_CE1_AXI_CLK 154 +#define GCC_CE1_CLK 155 +#define GCC_CE2_AHB_CLK 156 +#define GCC_CE2_AXI_CLK 157 +#define GCC_CE2_CLK 158 +#define GCC_CE3_AHB_CLK 159 +#define GCC_CE3_AXI_CLK 160 +#define GCC_CE3_CLK 161 +#define GCC_CNOC_BUS_TIMEOUT0_AHB_CLK 162 +#define GCC_CNOC_BUS_TIMEOUT1_AHB_CLK 163 +#define GCC_CNOC_BUS_TIMEOUT2_AHB_CLK 164 +#define GCC_CNOC_BUS_TIMEOUT3_AHB_CLK 165 +#define GCC_CNOC_BUS_TIMEOUT4_AHB_CLK 166 +#define GCC_CNOC_BUS_TIMEOUT5_AHB_CLK 167 +#define GCC_CNOC_BUS_TIMEOUT6_AHB_CLK 168 +#define GCC_CNOC_BUS_TIMEOUT7_AHB_CLK 169 +#define GCC_CFG_NOC_AHB_CLK 170 +#define GCC_CFG_NOC_DDR_CFG_CLK 171 +#define GCC_CFG_NOC_RPM_AHB_CLK 172 +#define GCC_COPSS_SMMU_AHB_CLK 173 +#define GCC_COPSS_SMMU_AXI_CLK 174 +#define GCC_DCD_XO_CLK 175 +#define GCC_BIMC_DDR_CH0_CLK 176 +#define GCC_BIMC_DDR_CH1_CLK 177 +#define GCC_BIMC_DDR_CPLL0_CLK 178 +#define GCC_BIMC_DDR_CPLL1_CLK 179 +#define GCC_BIMC_GFX_CLK 180 +#define GCC_DDR_DIM_CFG_CLK 181 +#define GCC_DDR_DIM_SLEEP_CLK 182 +#define GCC_DEHR_CLK 183 +#define GCC_AHB_CLK 184 +#define GCC_IM_SLEEP_CLK 185 +#define GCC_XO_CLK 186 +#define GCC_XO_DIV4_CLK 187 +#define GCC_GP1_CLK 188 +#define GCC_GP2_CLK 189 +#define GCC_GP3_CLK 190 +#define GCC_IMEM_AXI_CLK 191 +#define GCC_IMEM_CFG_AHB_CLK 192 +#define GCC_KPSS_AHB_CLK 193 +#define GCC_KPSS_AXI_CLK 194 +#define GCC_LPASS_MPORT_AXI_CLK 195 +#define GCC_LPASS_Q6_AXI_CLK 196 +#define GCC_LPASS_SWAY_CLK 197 +#define GCC_MMSS_BIMC_GFX_CLK 198 +#define GCC_MMSS_NOC_AT_CLK 199 +#define GCC_MMSS_NOC_CFG_AHB_CLK 200 +#define GCC_MMSS_VPU_MAPLE_SYS_NOC_AXI_CLK 201 +#define GCC_OCMEM_NOC_CFG_AHB_CLK 202 +#define GCC_OCMEM_SYS_NOC_AXI_CLK 203 +#define GCC_MPM_AHB_CLK 204 +#define GCC_MSG_RAM_AHB_CLK 205 +#define GCC_NOC_CONF_XPU_AHB_CLK 206 +#define GCC_PDM2_CLK 207 +#define GCC_PDM_AHB_CLK 208 +#define GCC_PDM_XO4_CLK 209 +#define GCC_PERIPH_NOC_AHB_CLK 210 +#define GCC_PERIPH_NOC_AT_CLK 211 +#define GCC_PERIPH_NOC_CFG_AHB_CLK 212 +#define GCC_PERIPH_NOC_USB_HSIC_AHB_CLK 213 +#define GCC_PERIPH_NOC_MPU_CFG_AHB_CLK 214 +#define GCC_PERIPH_XPU_AHB_CLK 215 +#define GCC_PNOC_BUS_TIMEOUT0_AHB_CLK 216 +#define GCC_PNOC_BUS_TIMEOUT1_AHB_CLK 217 +#define GCC_PNOC_BUS_TIMEOUT2_AHB_CLK 218 +#define GCC_PNOC_BUS_TIMEOUT3_AHB_CLK 219 +#define GCC_PNOC_BUS_TIMEOUT4_AHB_CLK 220 +#define GCC_PRNG_AHB_CLK 221 +#define GCC_QDSS_AT_CLK 222 +#define GCC_QDSS_CFG_AHB_CLK 223 +#define GCC_QDSS_DAP_AHB_CLK 224 +#define GCC_QDSS_DAP_CLK 225 +#define GCC_QDSS_ETR_USB_CLK 226 +#define GCC_QDSS_STM_CLK 227 +#define GCC_QDSS_TRACECLKIN_CLK 228 +#define GCC_QDSS_TSCTR_DIV16_CLK 229 +#define GCC_QDSS_TSCTR_DIV2_CLK 230 +#define GCC_QDSS_TSCTR_DIV3_CLK 231 +#define GCC_QDSS_TSCTR_DIV4_CLK 232 +#define GCC_QDSS_TSCTR_DIV8_CLK 233 +#define GCC_QDSS_RBCPR_XPU_AHB_CLK 234 +#define GCC_RBCPR_AHB_CLK 235 +#define GCC_RBCPR_CLK 236 +#define GCC_RPM_BUS_AHB_CLK 237 +#define GCC_RPM_PROC_HCLK 238 +#define GCC_RPM_SLEEP_CLK 239 +#define GCC_RPM_TIMER_CLK 240 +#define GCC_SATA_ASIC0_CLK 241 +#define GCC_SATA_AXI_CLK 242 +#define GCC_SATA_CFG_AHB_CLK 243 +#define GCC_SATA_PMALIVE_CLK 244 +#define GCC_SATA_RX_CLK 245 +#define GCC_SATA_RX_OOB_CLK 246 +#define GCC_SDCC1_AHB_CLK 247 +#define GCC_SDCC1_APPS_CLK 248 +#define GCC_SDCC1_CDCCAL_FF_CLK 249 +#define GCC_SDCC1_CDCCAL_SLEEP_CLK 250 +#define GCC_SDCC2_AHB_CLK 251 +#define GCC_SDCC2_APPS_CLK 252 +#define GCC_SDCC2_INACTIVITY_TIMERS_CLK 253 +#define GCC_SDCC3_AHB_CLK 254 +#define GCC_SDCC3_APPS_CLK 255 +#define GCC_SDCC3_INACTIVITY_TIMERS_CLK 256 +#define GCC_SDCC4_AHB_CLK 257 +#define GCC_SDCC4_APPS_CLK 258 +#define GCC_SDCC4_INACTIVITY_TIMERS_CLK 259 +#define GCC_SEC_CTRL_ACC_CLK 260 +#define GCC_SEC_CTRL_AHB_CLK 261 +#define GCC_SEC_CTRL_BOOT_ROM_PATCH_CLK 262 +#define GCC_SEC_CTRL_CLK 263 +#define GCC_SEC_CTRL_SENSE_CLK 264 +#define GCC_SNOC_BUS_TIMEOUT2_AHB_CLK 265 +#define GCC_SNOC_BUS_TIMEOUT3_AHB_CLK 266 +#define GCC_SPDM_BIMC_CY_CLK 267 +#define GCC_SPDM_CFG_AHB_CLK 268 +#define GCC_SPDM_DEBUG_CY_CLK 269 +#define GCC_SPDM_FF_CLK 270 +#define GCC_SPDM_MSTR_AHB_CLK 271 +#define GCC_SPDM_PNOC_CY_CLK 272 +#define GCC_SPDM_RPM_CY_CLK 273 +#define GCC_SPDM_SNOC_CY_CLK 274 +#define GCC_SPMI_AHB_CLK 275 +#define GCC_SPMI_CNOC_AHB_CLK 276 +#define GCC_SPMI_SER_CLK 277 +#define GCC_SPSS_AHB_CLK 278 +#define GCC_SNOC_CNOC_AHB_CLK 279 +#define GCC_SNOC_PNOC_AHB_CLK 280 +#define GCC_SYS_NOC_AT_CLK 281 +#define GCC_SYS_NOC_AXI_CLK 282 +#define GCC_SYS_NOC_KPSS_AHB_CLK 283 +#define GCC_SYS_NOC_QDSS_STM_AXI_CLK 284 +#define GCC_SYS_NOC_UFS_AXI_CLK 285 +#define GCC_SYS_NOC_USB3_AXI_CLK 286 +#define GCC_SYS_NOC_USB3_SEC_AXI_CLK 287 +#define GCC_TCSR_AHB_CLK 288 +#define GCC_TLMM_AHB_CLK 289 +#define GCC_TLMM_CLK 290 +#define GCC_TSIF_AHB_CLK 291 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 292 +#define GCC_TSIF_REF_CLK 293 +#define GCC_UFS_AHB_CLK 294 +#define GCC_UFS_AXI_CLK 295 +#define GCC_UFS_RX_CFG_CLK 296 +#define GCC_UFS_RX_SYMBOL_0_CLK 297 +#define GCC_UFS_RX_SYMBOL_1_CLK 298 +#define GCC_UFS_TX_CFG_CLK 299 +#define GCC_UFS_TX_SYMBOL_0_CLK 300 +#define GCC_UFS_TX_SYMBOL_1_CLK 301 +#define GCC_USB2A_PHY_SLEEP_CLK 302 +#define GCC_USB2B_PHY_SLEEP_CLK 303 +#define GCC_USB30_MASTER_CLK 304 +#define GCC_USB30_MOCK_UTMI_CLK 305 +#define GCC_USB30_SLEEP_CLK 306 +#define GCC_USB30_SEC_MASTER_CLK 307 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 308 +#define GCC_USB30_SEC_SLEEP_CLK 309 +#define GCC_USB_HS_AHB_CLK 310 +#define GCC_USB_HS_INACTIVITY_TIMERS_CLK 311 +#define GCC_USB_HS_SYSTEM_CLK 312 +#define GCC_USB_HSIC_AHB_CLK 313 +#define GCC_USB_HSIC_CLK 314 +#define GCC_USB_HSIC_IO_CAL_CLK 315 +#define GCC_USB_HSIC_IO_CAL_SLEEP_CLK 316 +#define GCC_USB_HSIC_MOCK_UTMI_CLK 317 +#define GCC_USB_HSIC_SYSTEM_CLK 318 +#define PCIE_0_AUX_CLK_SRC 319 +#define PCIE_0_PIPE_CLK_SRC 320 +#define PCIE_1_AUX_CLK_SRC 321 +#define PCIE_1_PIPE_CLK_SRC 322 +#define GCC_PCIE_0_AUX_CLK 323 +#define GCC_PCIE_0_CFG_AHB_CLK 324 +#define GCC_PCIE_0_MSTR_AXI_CLK 325 +#define GCC_PCIE_0_PIPE_CLK 326 +#define GCC_PCIE_0_SLV_AXI_CLK 327 +#define GCC_PCIE_1_AUX_CLK 328 +#define GCC_PCIE_1_CFG_AHB_CLK 329 +#define GCC_PCIE_1_MSTR_AXI_CLK 330 +#define GCC_PCIE_1_PIPE_CLK 331 +#define GCC_PCIE_1_SLV_AXI_CLK 332 + +/* gdscs */ +#define USB_HS_HSIC_GDSC 0 +#define PCIE0_GDSC 1 +#define PCIE1_GDSC 2 +#define USB30_GDSC 3 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h new file mode 100644 index 0000000..7e8a7be --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h @@ -0,0 +1,169 @@ +/* Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +#ifndef __QCOM_CLK_IPQ4019_H__ +#define __QCOM_CLK_IPQ4019_H__ + +#define GCC_DUMMY_CLK 0 +#define AUDIO_CLK_SRC 1 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 2 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 3 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 4 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 5 +#define BLSP1_UART1_APPS_CLK_SRC 6 +#define BLSP1_UART2_APPS_CLK_SRC 7 +#define GCC_USB3_MOCK_UTMI_CLK_SRC 8 +#define GCC_APPS_CLK_SRC 9 +#define GCC_APPS_AHB_CLK_SRC 10 +#define GP1_CLK_SRC 11 +#define GP2_CLK_SRC 12 +#define GP3_CLK_SRC 13 +#define SDCC1_APPS_CLK_SRC 14 +#define FEPHY_125M_DLY_CLK_SRC 15 +#define WCSS2G_CLK_SRC 16 +#define WCSS5G_CLK_SRC 17 +#define GCC_APSS_AHB_CLK 18 +#define GCC_AUDIO_AHB_CLK 19 +#define GCC_AUDIO_PWM_CLK 20 +#define GCC_BLSP1_AHB_CLK 21 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 22 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 23 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 24 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 25 +#define GCC_BLSP1_UART1_APPS_CLK 26 +#define GCC_BLSP1_UART2_APPS_CLK 27 +#define GCC_DCD_XO_CLK 28 +#define GCC_GP1_CLK 29 +#define GCC_GP2_CLK 30 +#define GCC_GP3_CLK 31 +#define GCC_BOOT_ROM_AHB_CLK 32 +#define GCC_CRYPTO_AHB_CLK 33 +#define GCC_CRYPTO_AXI_CLK 34 +#define GCC_CRYPTO_CLK 35 +#define GCC_ESS_CLK 36 +#define GCC_IMEM_AXI_CLK 37 +#define GCC_IMEM_CFG_AHB_CLK 38 +#define GCC_PCIE_AHB_CLK 39 +#define GCC_PCIE_AXI_M_CLK 40 +#define GCC_PCIE_AXI_S_CLK 41 +#define GCC_PCNOC_AHB_CLK 42 +#define GCC_PRNG_AHB_CLK 43 +#define GCC_QPIC_AHB_CLK 44 +#define GCC_QPIC_CLK 45 +#define GCC_SDCC1_AHB_CLK 46 +#define GCC_SDCC1_APPS_CLK 47 +#define GCC_SNOC_PCNOC_AHB_CLK 48 +#define GCC_SYS_NOC_125M_CLK 49 +#define GCC_SYS_NOC_AXI_CLK 50 +#define GCC_TCSR_AHB_CLK 51 +#define GCC_TLMM_AHB_CLK 52 +#define GCC_USB2_MASTER_CLK 53 +#define GCC_USB2_SLEEP_CLK 54 +#define GCC_USB2_MOCK_UTMI_CLK 55 +#define GCC_USB3_MASTER_CLK 56 +#define GCC_USB3_SLEEP_CLK 57 +#define GCC_USB3_MOCK_UTMI_CLK 58 +#define GCC_WCSS2G_CLK 59 +#define GCC_WCSS2G_REF_CLK 60 +#define GCC_WCSS2G_RTC_CLK 61 +#define GCC_WCSS5G_CLK 62 +#define GCC_WCSS5G_REF_CLK 63 +#define GCC_WCSS5G_RTC_CLK 64 +#define GCC_APSS_DDRPLL_VCO 65 +#define GCC_SDCC_PLLDIV_CLK 66 +#define GCC_FEPLL_VCO 67 +#define GCC_FEPLL125_CLK 68 +#define GCC_FEPLL125DLY_CLK 69 +#define GCC_FEPLL200_CLK 70 +#define GCC_FEPLL500_CLK 71 +#define GCC_FEPLL_WCSS2G_CLK 72 +#define GCC_FEPLL_WCSS5G_CLK 73 +#define GCC_APSS_CPU_PLLDIV_CLK 74 +#define GCC_PCNOC_AHB_CLK_SRC 75 + +#define WIFI0_CPU_INIT_RESET 0 +#define WIFI0_RADIO_SRIF_RESET 1 +#define WIFI0_RADIO_WARM_RESET 2 +#define WIFI0_RADIO_COLD_RESET 3 +#define WIFI0_CORE_WARM_RESET 4 +#define WIFI0_CORE_COLD_RESET 5 +#define WIFI1_CPU_INIT_RESET 6 +#define WIFI1_RADIO_SRIF_RESET 7 +#define WIFI1_RADIO_WARM_RESET 8 +#define WIFI1_RADIO_COLD_RESET 9 +#define WIFI1_CORE_WARM_RESET 10 +#define WIFI1_CORE_COLD_RESET 11 +#define USB3_UNIPHY_PHY_ARES 12 +#define USB3_HSPHY_POR_ARES 13 +#define USB3_HSPHY_S_ARES 14 +#define USB2_HSPHY_POR_ARES 15 +#define USB2_HSPHY_S_ARES 16 +#define PCIE_PHY_AHB_ARES 17 +#define PCIE_AHB_ARES 18 +#define PCIE_PWR_ARES 19 +#define PCIE_PIPE_STICKY_ARES 20 +#define PCIE_AXI_M_STICKY_ARES 21 +#define PCIE_PHY_ARES 22 +#define PCIE_PARF_XPU_ARES 23 +#define PCIE_AXI_S_XPU_ARES 24 +#define PCIE_AXI_M_VMIDMT_ARES 25 +#define PCIE_PIPE_ARES 26 +#define PCIE_AXI_S_ARES 27 +#define PCIE_AXI_M_ARES 28 +#define ESS_RESET 29 +#define GCC_BLSP1_BCR 30 +#define GCC_BLSP1_QUP1_BCR 31 +#define GCC_BLSP1_UART1_BCR 32 +#define GCC_BLSP1_QUP2_BCR 33 +#define GCC_BLSP1_UART2_BCR 34 +#define GCC_BIMC_BCR 35 +#define GCC_TLMM_BCR 36 +#define GCC_IMEM_BCR 37 +#define GCC_ESS_BCR 38 +#define GCC_PRNG_BCR 39 +#define GCC_BOOT_ROM_BCR 40 +#define GCC_CRYPTO_BCR 41 +#define GCC_SDCC1_BCR 42 +#define GCC_SEC_CTRL_BCR 43 +#define GCC_AUDIO_BCR 44 +#define GCC_QPIC_BCR 45 +#define GCC_PCIE_BCR 46 +#define GCC_USB2_BCR 47 +#define GCC_USB2_PHY_BCR 48 +#define GCC_USB3_BCR 49 +#define GCC_USB3_PHY_BCR 50 +#define GCC_SYSTEM_NOC_BCR 51 +#define GCC_PCNOC_BCR 52 +#define GCC_DCD_BCR 53 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 54 +#define GCC_SNOC_BUS_TIMEOUT1_BCR 55 +#define GCC_SNOC_BUS_TIMEOUT2_BCR 56 +#define GCC_SNOC_BUS_TIMEOUT3_BCR 57 +#define GCC_PCNOC_BUS_TIMEOUT0_BCR 58 +#define GCC_PCNOC_BUS_TIMEOUT1_BCR 59 +#define GCC_PCNOC_BUS_TIMEOUT2_BCR 60 +#define GCC_PCNOC_BUS_TIMEOUT3_BCR 61 +#define GCC_PCNOC_BUS_TIMEOUT4_BCR 62 +#define GCC_PCNOC_BUS_TIMEOUT5_BCR 63 +#define GCC_PCNOC_BUS_TIMEOUT6_BCR 64 +#define GCC_PCNOC_BUS_TIMEOUT7_BCR 65 +#define GCC_PCNOC_BUS_TIMEOUT8_BCR 66 +#define GCC_PCNOC_BUS_TIMEOUT9_BCR 67 +#define GCC_TCSR_BCR 68 +#define GCC_QDSS_BCR 69 +#define GCC_MPM_BCR 70 +#define GCC_SPDM_BCR 71 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h new file mode 100644 index 0000000..7deec14 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_GCC_IPQ806X_H +#define _DT_BINDINGS_CLK_GCC_IPQ806X_H + +#define AFAB_CLK_SRC 0 +#define QDSS_STM_CLK 1 +#define SCSS_A_CLK 2 +#define SCSS_H_CLK 3 +#define AFAB_CORE_CLK 4 +#define SCSS_XO_SRC_CLK 5 +#define AFAB_EBI1_CH0_A_CLK 6 +#define AFAB_EBI1_CH1_A_CLK 7 +#define AFAB_AXI_S0_FCLK 8 +#define AFAB_AXI_S1_FCLK 9 +#define AFAB_AXI_S2_FCLK 10 +#define AFAB_AXI_S3_FCLK 11 +#define AFAB_AXI_S4_FCLK 12 +#define SFAB_CORE_CLK 13 +#define SFAB_AXI_S0_FCLK 14 +#define SFAB_AXI_S1_FCLK 15 +#define SFAB_AXI_S2_FCLK 16 +#define SFAB_AXI_S3_FCLK 17 +#define SFAB_AXI_S4_FCLK 18 +#define SFAB_AXI_S5_FCLK 19 +#define SFAB_AHB_S0_FCLK 20 +#define SFAB_AHB_S1_FCLK 21 +#define SFAB_AHB_S2_FCLK 22 +#define SFAB_AHB_S3_FCLK 23 +#define SFAB_AHB_S4_FCLK 24 +#define SFAB_AHB_S5_FCLK 25 +#define SFAB_AHB_S6_FCLK 26 +#define SFAB_AHB_S7_FCLK 27 +#define QDSS_AT_CLK_SRC 28 +#define QDSS_AT_CLK 29 +#define QDSS_TRACECLKIN_CLK_SRC 30 +#define QDSS_TRACECLKIN_CLK 31 +#define QDSS_TSCTR_CLK_SRC 32 +#define QDSS_TSCTR_CLK 33 +#define SFAB_ADM0_M0_A_CLK 34 +#define SFAB_ADM0_M1_A_CLK 35 +#define SFAB_ADM0_M2_H_CLK 36 +#define ADM0_CLK 37 +#define ADM0_PBUS_CLK 38 +#define IMEM0_A_CLK 39 +#define QDSS_H_CLK 40 +#define PCIE_A_CLK 41 +#define PCIE_AUX_CLK 42 +#define PCIE_H_CLK 43 +#define PCIE_PHY_CLK 44 +#define SFAB_CLK_SRC 45 +#define SFAB_LPASS_Q6_A_CLK 46 +#define SFAB_AFAB_M_A_CLK 47 +#define AFAB_SFAB_M0_A_CLK 48 +#define AFAB_SFAB_M1_A_CLK 49 +#define SFAB_SATA_S_H_CLK 50 +#define DFAB_CLK_SRC 51 +#define DFAB_CLK 52 +#define SFAB_DFAB_M_A_CLK 53 +#define DFAB_SFAB_M_A_CLK 54 +#define DFAB_SWAY0_H_CLK 55 +#define DFAB_SWAY1_H_CLK 56 +#define DFAB_ARB0_H_CLK 57 +#define DFAB_ARB1_H_CLK 58 +#define PPSS_H_CLK 59 +#define PPSS_PROC_CLK 60 +#define PPSS_TIMER0_CLK 61 +#define PPSS_TIMER1_CLK 62 +#define PMEM_A_CLK 63 +#define DMA_BAM_H_CLK 64 +#define SIC_H_CLK 65 +#define SPS_TIC_H_CLK 66 +#define CFPB_2X_CLK_SRC 67 +#define CFPB_CLK 68 +#define CFPB0_H_CLK 69 +#define CFPB1_H_CLK 70 +#define CFPB2_H_CLK 71 +#define SFAB_CFPB_M_H_CLK 72 +#define CFPB_MASTER_H_CLK 73 +#define SFAB_CFPB_S_H_CLK 74 +#define CFPB_SPLITTER_H_CLK 75 +#define TSIF_H_CLK 76 +#define TSIF_INACTIVITY_TIMERS_CLK 77 +#define TSIF_REF_SRC 78 +#define TSIF_REF_CLK 79 +#define CE1_H_CLK 80 +#define CE1_CORE_CLK 81 +#define CE1_SLEEP_CLK 82 +#define CE2_H_CLK 83 +#define CE2_CORE_CLK 84 +#define SFPB_H_CLK_SRC 85 +#define SFPB_H_CLK 86 +#define SFAB_SFPB_M_H_CLK 87 +#define SFAB_SFPB_S_H_CLK 88 +#define RPM_PROC_CLK 89 +#define RPM_BUS_H_CLK 90 +#define RPM_SLEEP_CLK 91 +#define RPM_TIMER_CLK 92 +#define RPM_MSG_RAM_H_CLK 93 +#define PMIC_ARB0_H_CLK 94 +#define PMIC_ARB1_H_CLK 95 +#define PMIC_SSBI2_SRC 96 +#define PMIC_SSBI2_CLK 97 +#define SDC1_H_CLK 98 +#define SDC2_H_CLK 99 +#define SDC3_H_CLK 100 +#define SDC4_H_CLK 101 +#define SDC1_SRC 102 +#define SDC1_CLK 103 +#define SDC2_SRC 104 +#define SDC2_CLK 105 +#define SDC3_SRC 106 +#define SDC3_CLK 107 +#define SDC4_SRC 108 +#define SDC4_CLK 109 +#define USB_HS1_H_CLK 110 +#define USB_HS1_XCVR_SRC 111 +#define USB_HS1_XCVR_CLK 112 +#define USB_HSIC_H_CLK 113 +#define USB_HSIC_XCVR_SRC 114 +#define USB_HSIC_XCVR_CLK 115 +#define USB_HSIC_SYSTEM_CLK_SRC 116 +#define USB_HSIC_SYSTEM_CLK 117 +#define CFPB0_C0_H_CLK 118 +#define CFPB0_D0_H_CLK 119 +#define CFPB0_C1_H_CLK 120 +#define CFPB0_D1_H_CLK 121 +#define USB_FS1_H_CLK 122 +#define USB_FS1_XCVR_SRC 123 +#define USB_FS1_XCVR_CLK 124 +#define USB_FS1_SYSTEM_CLK 125 +#define GSBI_COMMON_SIM_SRC 126 +#define GSBI1_H_CLK 127 +#define GSBI2_H_CLK 128 +#define GSBI3_H_CLK 129 +#define GSBI4_H_CLK 130 +#define GSBI5_H_CLK 131 +#define GSBI6_H_CLK 132 +#define GSBI7_H_CLK 133 +#define GSBI1_QUP_SRC 134 +#define GSBI1_QUP_CLK 135 +#define GSBI2_QUP_SRC 136 +#define GSBI2_QUP_CLK 137 +#define GSBI3_QUP_SRC 138 +#define GSBI3_QUP_CLK 139 +#define GSBI4_QUP_SRC 140 +#define GSBI4_QUP_CLK 141 +#define GSBI5_QUP_SRC 142 +#define GSBI5_QUP_CLK 143 +#define GSBI6_QUP_SRC 144 +#define GSBI6_QUP_CLK 145 +#define GSBI7_QUP_SRC 146 +#define GSBI7_QUP_CLK 147 +#define GSBI1_UART_SRC 148 +#define GSBI1_UART_CLK 149 +#define GSBI2_UART_SRC 150 +#define GSBI2_UART_CLK 151 +#define GSBI3_UART_SRC 152 +#define GSBI3_UART_CLK 153 +#define GSBI4_UART_SRC 154 +#define GSBI4_UART_CLK 155 +#define GSBI5_UART_SRC 156 +#define GSBI5_UART_CLK 157 +#define GSBI6_UART_SRC 158 +#define GSBI6_UART_CLK 159 +#define GSBI7_UART_SRC 160 +#define GSBI7_UART_CLK 161 +#define GSBI1_SIM_CLK 162 +#define GSBI2_SIM_CLK 163 +#define GSBI3_SIM_CLK 164 +#define GSBI4_SIM_CLK 165 +#define GSBI5_SIM_CLK 166 +#define GSBI6_SIM_CLK 167 +#define GSBI7_SIM_CLK 168 +#define USB_HSIC_HSIC_CLK_SRC 169 +#define USB_HSIC_HSIC_CLK 170 +#define USB_HSIC_HSIO_CAL_CLK 171 +#define SPDM_CFG_H_CLK 172 +#define SPDM_MSTR_H_CLK 173 +#define SPDM_FF_CLK_SRC 174 +#define SPDM_FF_CLK 175 +#define SEC_CTRL_CLK 176 +#define SEC_CTRL_ACC_CLK_SRC 177 +#define SEC_CTRL_ACC_CLK 178 +#define TLMM_H_CLK 179 +#define TLMM_CLK 180 +#define SATA_H_CLK 181 +#define SATA_CLK_SRC 182 +#define SATA_RXOOB_CLK 183 +#define SATA_PMALIVE_CLK 184 +#define SATA_PHY_REF_CLK 185 +#define SATA_A_CLK 186 +#define SATA_PHY_CFG_CLK 187 +#define TSSC_CLK_SRC 188 +#define TSSC_CLK 189 +#define PDM_SRC 190 +#define PDM_CLK 191 +#define GP0_SRC 192 +#define GP0_CLK 193 +#define GP1_SRC 194 +#define GP1_CLK 195 +#define GP2_SRC 196 +#define GP2_CLK 197 +#define MPM_CLK 198 +#define EBI1_CLK_SRC 199 +#define EBI1_CH0_CLK 200 +#define EBI1_CH1_CLK 201 +#define EBI1_2X_CLK 202 +#define EBI1_CH0_DQ_CLK 203 +#define EBI1_CH1_DQ_CLK 204 +#define EBI1_CH0_CA_CLK 205 +#define EBI1_CH1_CA_CLK 206 +#define EBI1_XO_CLK 207 +#define SFAB_SMPSS_S_H_CLK 208 +#define PRNG_SRC 209 +#define PRNG_CLK 210 +#define PXO_SRC 211 +#define SPDM_CY_PORT0_CLK 212 +#define SPDM_CY_PORT1_CLK 213 +#define SPDM_CY_PORT2_CLK 214 +#define SPDM_CY_PORT3_CLK 215 +#define SPDM_CY_PORT4_CLK 216 +#define SPDM_CY_PORT5_CLK 217 +#define SPDM_CY_PORT6_CLK 218 +#define SPDM_CY_PORT7_CLK 219 +#define PLL0 220 +#define PLL0_VOTE 221 +#define PLL3 222 +#define PLL3_VOTE 223 +#define PLL4_VOTE 225 +#define PLL8 226 +#define PLL8_VOTE 227 +#define PLL9 228 +#define PLL10 229 +#define PLL11 230 +#define PLL12 231 +#define PLL14 232 +#define PLL14_VOTE 233 +#define PLL18 234 +#define CE5_SRC 235 +#define CE5_H_CLK 236 +#define CE5_CORE_CLK 237 +#define CE3_SLEEP_CLK 238 +#define SFAB_AHB_S8_FCLK 239 +#define SPDM_CY_PORT8_CLK 246 +#define PCIE_ALT_REF_SRC 247 +#define PCIE_ALT_REF_CLK 248 +#define PCIE_1_A_CLK 249 +#define PCIE_1_AUX_CLK 250 +#define PCIE_1_H_CLK 251 +#define PCIE_1_PHY_CLK 252 +#define PCIE_1_ALT_REF_SRC 253 +#define PCIE_1_ALT_REF_CLK 254 +#define PCIE_2_A_CLK 255 +#define PCIE_2_AUX_CLK 256 +#define PCIE_2_H_CLK 257 +#define PCIE_2_PHY_CLK 258 +#define PCIE_2_ALT_REF_SRC 259 +#define PCIE_2_ALT_REF_CLK 260 +#define EBI2_CLK 261 +#define USB30_SLEEP_CLK 262 +#define USB30_UTMI_SRC 263 +#define USB30_0_UTMI_CLK 264 +#define USB30_1_UTMI_CLK 265 +#define USB30_MASTER_SRC 266 +#define USB30_0_MASTER_CLK 267 +#define USB30_1_MASTER_CLK 268 +#define GMAC_CORE1_CLK_SRC 269 +#define GMAC_CORE2_CLK_SRC 270 +#define GMAC_CORE3_CLK_SRC 271 +#define GMAC_CORE4_CLK_SRC 272 +#define GMAC_CORE1_CLK 273 +#define GMAC_CORE2_CLK 274 +#define GMAC_CORE3_CLK 275 +#define GMAC_CORE4_CLK 276 +#define UBI32_CORE1_CLK_SRC 277 +#define UBI32_CORE2_CLK_SRC 278 +#define UBI32_CORE1_CLK 279 +#define UBI32_CORE2_CLK 280 +#define EBI2_AON_CLK 281 +#define NSSTCM_CLK_SRC 282 +#define NSSTCM_CLK 283 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h new file mode 100644 index 0000000..4de4811 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h @@ -0,0 +1,366 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_8074_H +#define _DT_BINDINGS_CLOCK_IPQ_GCC_8074_H + +#define GPLL0 0 +#define GPLL0_MAIN 1 +#define GCC_SLEEP_CLK_SRC 2 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 3 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 4 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 5 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 6 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 7 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 8 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 9 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 10 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 11 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 12 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 13 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 14 +#define BLSP1_UART1_APPS_CLK_SRC 15 +#define BLSP1_UART2_APPS_CLK_SRC 16 +#define BLSP1_UART3_APPS_CLK_SRC 17 +#define BLSP1_UART4_APPS_CLK_SRC 18 +#define BLSP1_UART5_APPS_CLK_SRC 19 +#define BLSP1_UART6_APPS_CLK_SRC 20 +#define GCC_BLSP1_AHB_CLK 21 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 22 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 23 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 24 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 25 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 26 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 27 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 28 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 29 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 30 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 31 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 32 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 33 +#define GCC_BLSP1_UART1_APPS_CLK 34 +#define GCC_BLSP1_UART2_APPS_CLK 35 +#define GCC_BLSP1_UART3_APPS_CLK 36 +#define GCC_BLSP1_UART4_APPS_CLK 37 +#define GCC_BLSP1_UART5_APPS_CLK 38 +#define GCC_BLSP1_UART6_APPS_CLK 39 +#define GCC_PRNG_AHB_CLK 40 +#define GCC_QPIC_AHB_CLK 41 +#define GCC_QPIC_CLK 42 +#define PCNOC_BFDCD_CLK_SRC 43 +#define GPLL2_MAIN 44 +#define GPLL2 45 +#define GPLL4_MAIN 46 +#define GPLL4 47 +#define GPLL6_MAIN 48 +#define GPLL6 49 +#define UBI32_PLL_MAIN 50 +#define UBI32_PLL 51 +#define NSS_CRYPTO_PLL_MAIN 52 +#define NSS_CRYPTO_PLL 53 +#define PCIE0_AXI_CLK_SRC 54 +#define PCIE0_AUX_CLK_SRC 55 +#define PCIE0_PIPE_CLK_SRC 56 +#define PCIE1_AXI_CLK_SRC 57 +#define PCIE1_AUX_CLK_SRC 58 +#define PCIE1_PIPE_CLK_SRC 59 +#define SDCC1_APPS_CLK_SRC 60 +#define SDCC1_ICE_CORE_CLK_SRC 61 +#define SDCC2_APPS_CLK_SRC 62 +#define USB0_MASTER_CLK_SRC 63 +#define USB0_AUX_CLK_SRC 64 +#define USB0_MOCK_UTMI_CLK_SRC 65 +#define USB0_PIPE_CLK_SRC 66 +#define USB1_MASTER_CLK_SRC 67 +#define USB1_AUX_CLK_SRC 68 +#define USB1_MOCK_UTMI_CLK_SRC 69 +#define USB1_PIPE_CLK_SRC 70 +#define GCC_XO_CLK_SRC 71 +#define SYSTEM_NOC_BFDCD_CLK_SRC 72 +#define NSS_CE_CLK_SRC 73 +#define NSS_NOC_BFDCD_CLK_SRC 74 +#define NSS_CRYPTO_CLK_SRC 75 +#define NSS_UBI0_CLK_SRC 76 +#define NSS_UBI0_DIV_CLK_SRC 77 +#define NSS_UBI1_CLK_SRC 78 +#define NSS_UBI1_DIV_CLK_SRC 79 +#define UBI_MPT_CLK_SRC 80 +#define NSS_IMEM_CLK_SRC 81 +#define NSS_PPE_CLK_SRC 82 +#define NSS_PORT1_RX_CLK_SRC 83 +#define NSS_PORT1_RX_DIV_CLK_SRC 84 +#define NSS_PORT1_TX_CLK_SRC 85 +#define NSS_PORT1_TX_DIV_CLK_SRC 86 +#define NSS_PORT2_RX_CLK_SRC 87 +#define NSS_PORT2_RX_DIV_CLK_SRC 88 +#define NSS_PORT2_TX_CLK_SRC 89 +#define NSS_PORT2_TX_DIV_CLK_SRC 90 +#define NSS_PORT3_RX_CLK_SRC 91 +#define NSS_PORT3_RX_DIV_CLK_SRC 92 +#define NSS_PORT3_TX_CLK_SRC 93 +#define NSS_PORT3_TX_DIV_CLK_SRC 94 +#define NSS_PORT4_RX_CLK_SRC 95 +#define NSS_PORT4_RX_DIV_CLK_SRC 96 +#define NSS_PORT4_TX_CLK_SRC 97 +#define NSS_PORT4_TX_DIV_CLK_SRC 98 +#define NSS_PORT5_RX_CLK_SRC 99 +#define NSS_PORT5_RX_DIV_CLK_SRC 100 +#define NSS_PORT5_TX_CLK_SRC 101 +#define NSS_PORT5_TX_DIV_CLK_SRC 102 +#define NSS_PORT6_RX_CLK_SRC 103 +#define NSS_PORT6_RX_DIV_CLK_SRC 104 +#define NSS_PORT6_TX_CLK_SRC 105 +#define NSS_PORT6_TX_DIV_CLK_SRC 106 +#define CRYPTO_CLK_SRC 107 +#define GP1_CLK_SRC 108 +#define GP2_CLK_SRC 109 +#define GP3_CLK_SRC 110 +#define GCC_PCIE0_AHB_CLK 111 +#define GCC_PCIE0_AUX_CLK 112 +#define GCC_PCIE0_AXI_M_CLK 113 +#define GCC_PCIE0_AXI_S_CLK 114 +#define GCC_PCIE0_PIPE_CLK 115 +#define GCC_SYS_NOC_PCIE0_AXI_CLK 116 +#define GCC_PCIE1_AHB_CLK 117 +#define GCC_PCIE1_AUX_CLK 118 +#define GCC_PCIE1_AXI_M_CLK 119 +#define GCC_PCIE1_AXI_S_CLK 120 +#define GCC_PCIE1_PIPE_CLK 121 +#define GCC_SYS_NOC_PCIE1_AXI_CLK 122 +#define GCC_USB0_AUX_CLK 123 +#define GCC_SYS_NOC_USB0_AXI_CLK 124 +#define GCC_USB0_MASTER_CLK 125 +#define GCC_USB0_MOCK_UTMI_CLK 126 +#define GCC_USB0_PHY_CFG_AHB_CLK 127 +#define GCC_USB0_PIPE_CLK 128 +#define GCC_USB0_SLEEP_CLK 129 +#define GCC_USB1_AUX_CLK 130 +#define GCC_SYS_NOC_USB1_AXI_CLK 131 +#define GCC_USB1_MASTER_CLK 132 +#define GCC_USB1_MOCK_UTMI_CLK 133 +#define GCC_USB1_PHY_CFG_AHB_CLK 134 +#define GCC_USB1_PIPE_CLK 135 +#define GCC_USB1_SLEEP_CLK 136 +#define GCC_SDCC1_AHB_CLK 137 +#define GCC_SDCC1_APPS_CLK 138 +#define GCC_SDCC1_ICE_CORE_CLK 139 +#define GCC_SDCC2_AHB_CLK 140 +#define GCC_SDCC2_APPS_CLK 141 +#define GCC_MEM_NOC_NSS_AXI_CLK 142 +#define GCC_NSS_CE_APB_CLK 143 +#define GCC_NSS_CE_AXI_CLK 144 +#define GCC_NSS_CFG_CLK 145 +#define GCC_NSS_CRYPTO_CLK 146 +#define GCC_NSS_CSR_CLK 147 +#define GCC_NSS_EDMA_CFG_CLK 148 +#define GCC_NSS_EDMA_CLK 149 +#define GCC_NSS_IMEM_CLK 150 +#define GCC_NSS_NOC_CLK 151 +#define GCC_NSS_PPE_BTQ_CLK 152 +#define GCC_NSS_PPE_CFG_CLK 153 +#define GCC_NSS_PPE_CLK 154 +#define GCC_NSS_PPE_IPE_CLK 155 +#define GCC_NSS_PTP_REF_CLK 156 +#define GCC_NSSNOC_CE_APB_CLK 157 +#define GCC_NSSNOC_CE_AXI_CLK 158 +#define GCC_NSSNOC_CRYPTO_CLK 159 +#define GCC_NSSNOC_PPE_CFG_CLK 160 +#define GCC_NSSNOC_PPE_CLK 161 +#define GCC_NSSNOC_QOSGEN_REF_CLK 162 +#define GCC_NSSNOC_SNOC_CLK 163 +#define GCC_NSSNOC_TIMEOUT_REF_CLK 164 +#define GCC_NSSNOC_UBI0_AHB_CLK 165 +#define GCC_NSSNOC_UBI1_AHB_CLK 166 +#define GCC_UBI0_AHB_CLK 167 +#define GCC_UBI0_AXI_CLK 168 +#define GCC_UBI0_NC_AXI_CLK 169 +#define GCC_UBI0_CORE_CLK 170 +#define GCC_UBI0_MPT_CLK 171 +#define GCC_UBI1_AHB_CLK 172 +#define GCC_UBI1_AXI_CLK 173 +#define GCC_UBI1_NC_AXI_CLK 174 +#define GCC_UBI1_CORE_CLK 175 +#define GCC_UBI1_MPT_CLK 176 +#define GCC_CMN_12GPLL_AHB_CLK 177 +#define GCC_CMN_12GPLL_SYS_CLK 178 +#define GCC_MDIO_AHB_CLK 179 +#define GCC_UNIPHY0_AHB_CLK 180 +#define GCC_UNIPHY0_SYS_CLK 181 +#define GCC_UNIPHY1_AHB_CLK 182 +#define GCC_UNIPHY1_SYS_CLK 183 +#define GCC_UNIPHY2_AHB_CLK 184 +#define GCC_UNIPHY2_SYS_CLK 185 +#define GCC_NSS_PORT1_RX_CLK 186 +#define GCC_NSS_PORT1_TX_CLK 187 +#define GCC_NSS_PORT2_RX_CLK 188 +#define GCC_NSS_PORT2_TX_CLK 189 +#define GCC_NSS_PORT3_RX_CLK 190 +#define GCC_NSS_PORT3_TX_CLK 191 +#define GCC_NSS_PORT4_RX_CLK 192 +#define GCC_NSS_PORT4_TX_CLK 193 +#define GCC_NSS_PORT5_RX_CLK 194 +#define GCC_NSS_PORT5_TX_CLK 195 +#define GCC_NSS_PORT6_RX_CLK 196 +#define GCC_NSS_PORT6_TX_CLK 197 +#define GCC_PORT1_MAC_CLK 198 +#define GCC_PORT2_MAC_CLK 199 +#define GCC_PORT3_MAC_CLK 200 +#define GCC_PORT4_MAC_CLK 201 +#define GCC_PORT5_MAC_CLK 202 +#define GCC_PORT6_MAC_CLK 203 +#define GCC_UNIPHY0_PORT1_RX_CLK 204 +#define GCC_UNIPHY0_PORT1_TX_CLK 205 +#define GCC_UNIPHY0_PORT2_RX_CLK 206 +#define GCC_UNIPHY0_PORT2_TX_CLK 207 +#define GCC_UNIPHY0_PORT3_RX_CLK 208 +#define GCC_UNIPHY0_PORT3_TX_CLK 209 +#define GCC_UNIPHY0_PORT4_RX_CLK 210 +#define GCC_UNIPHY0_PORT4_TX_CLK 211 +#define GCC_UNIPHY0_PORT5_RX_CLK 212 +#define GCC_UNIPHY0_PORT5_TX_CLK 213 +#define GCC_UNIPHY1_PORT5_RX_CLK 214 +#define GCC_UNIPHY1_PORT5_TX_CLK 215 +#define GCC_UNIPHY2_PORT6_RX_CLK 216 +#define GCC_UNIPHY2_PORT6_TX_CLK 217 +#define GCC_CRYPTO_AHB_CLK 218 +#define GCC_CRYPTO_AXI_CLK 219 +#define GCC_CRYPTO_CLK 220 +#define GCC_GP1_CLK 221 +#define GCC_GP2_CLK 222 +#define GCC_GP3_CLK 223 + +#define GCC_BLSP1_BCR 0 +#define GCC_BLSP1_QUP1_BCR 1 +#define GCC_BLSP1_UART1_BCR 2 +#define GCC_BLSP1_QUP2_BCR 3 +#define GCC_BLSP1_UART2_BCR 4 +#define GCC_BLSP1_QUP3_BCR 5 +#define GCC_BLSP1_UART3_BCR 6 +#define GCC_BLSP1_QUP4_BCR 7 +#define GCC_BLSP1_UART4_BCR 8 +#define GCC_BLSP1_QUP5_BCR 9 +#define GCC_BLSP1_UART5_BCR 10 +#define GCC_BLSP1_QUP6_BCR 11 +#define GCC_BLSP1_UART6_BCR 12 +#define GCC_IMEM_BCR 13 +#define GCC_SMMU_BCR 14 +#define GCC_APSS_TCU_BCR 15 +#define GCC_SMMU_XPU_BCR 16 +#define GCC_PCNOC_TBU_BCR 17 +#define GCC_SMMU_CFG_BCR 18 +#define GCC_PRNG_BCR 19 +#define GCC_BOOT_ROM_BCR 20 +#define GCC_CRYPTO_BCR 21 +#define GCC_WCSS_BCR 22 +#define GCC_WCSS_Q6_BCR 23 +#define GCC_NSS_BCR 24 +#define GCC_SEC_CTRL_BCR 25 +#define GCC_ADSS_BCR 26 +#define GCC_DDRSS_BCR 27 +#define GCC_SYSTEM_NOC_BCR 28 +#define GCC_PCNOC_BCR 29 +#define GCC_TCSR_BCR 30 +#define GCC_QDSS_BCR 31 +#define GCC_DCD_BCR 32 +#define GCC_MSG_RAM_BCR 33 +#define GCC_MPM_BCR 34 +#define GCC_SPMI_BCR 35 +#define GCC_SPDM_BCR 36 +#define GCC_RBCPR_BCR 37 +#define GCC_RBCPR_MX_BCR 38 +#define GCC_TLMM_BCR 39 +#define GCC_RBCPR_WCSS_BCR 40 +#define GCC_USB0_PHY_BCR 41 +#define GCC_USB3PHY_0_PHY_BCR 42 +#define GCC_USB0_BCR 43 +#define GCC_USB1_PHY_BCR 44 +#define GCC_USB3PHY_1_PHY_BCR 45 +#define GCC_USB1_BCR 46 +#define GCC_QUSB2_0_PHY_BCR 47 +#define GCC_QUSB2_1_PHY_BCR 48 +#define GCC_SDCC1_BCR 49 +#define GCC_SDCC2_BCR 50 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 51 +#define GCC_SNOC_BUS_TIMEOUT2_BCR 52 +#define GCC_SNOC_BUS_TIMEOUT3_BCR 53 +#define GCC_PCNOC_BUS_TIMEOUT0_BCR 54 +#define GCC_PCNOC_BUS_TIMEOUT1_BCR 55 +#define GCC_PCNOC_BUS_TIMEOUT2_BCR 56 +#define GCC_PCNOC_BUS_TIMEOUT3_BCR 57 +#define GCC_PCNOC_BUS_TIMEOUT4_BCR 58 +#define GCC_PCNOC_BUS_TIMEOUT5_BCR 59 +#define GCC_PCNOC_BUS_TIMEOUT6_BCR 60 +#define GCC_PCNOC_BUS_TIMEOUT7_BCR 61 +#define GCC_PCNOC_BUS_TIMEOUT8_BCR 62 +#define GCC_PCNOC_BUS_TIMEOUT9_BCR 63 +#define GCC_UNIPHY0_BCR 64 +#define GCC_UNIPHY1_BCR 65 +#define GCC_UNIPHY2_BCR 66 +#define GCC_CMN_12GPLL_BCR 67 +#define GCC_QPIC_BCR 68 +#define GCC_MDIO_BCR 69 +#define GCC_PCIE1_TBU_BCR 70 +#define GCC_WCSS_CORE_TBU_BCR 71 +#define GCC_WCSS_Q6_TBU_BCR 72 +#define GCC_USB0_TBU_BCR 73 +#define GCC_USB1_TBU_BCR 74 +#define GCC_PCIE0_TBU_BCR 75 +#define GCC_NSS_NOC_TBU_BCR 76 +#define GCC_PCIE0_BCR 77 +#define GCC_PCIE0_PHY_BCR 78 +#define GCC_PCIE0PHY_PHY_BCR 79 +#define GCC_PCIE0_LINK_DOWN_BCR 80 +#define GCC_PCIE1_BCR 81 +#define GCC_PCIE1_PHY_BCR 82 +#define GCC_PCIE1PHY_PHY_BCR 83 +#define GCC_PCIE1_LINK_DOWN_BCR 84 +#define GCC_DCC_BCR 85 +#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 86 +#define GCC_APC1_VOLTAGE_DROOP_DETECTOR_BCR 87 +#define GCC_SMMU_CATS_BCR 88 +#define GCC_UBI0_AXI_ARES 89 +#define GCC_UBI0_AHB_ARES 90 +#define GCC_UBI0_NC_AXI_ARES 91 +#define GCC_UBI0_DBG_ARES 92 +#define GCC_UBI0_CORE_CLAMP_ENABLE 93 +#define GCC_UBI0_CLKRST_CLAMP_ENABLE 94 +#define GCC_UBI1_AXI_ARES 95 +#define GCC_UBI1_AHB_ARES 96 +#define GCC_UBI1_NC_AXI_ARES 97 +#define GCC_UBI1_DBG_ARES 98 +#define GCC_UBI1_CORE_CLAMP_ENABLE 99 +#define GCC_UBI1_CLKRST_CLAMP_ENABLE 100 +#define GCC_NSS_CFG_ARES 101 +#define GCC_NSS_IMEM_ARES 102 +#define GCC_NSS_NOC_ARES 103 +#define GCC_NSS_CRYPTO_ARES 104 +#define GCC_NSS_CSR_ARES 105 +#define GCC_NSS_CE_APB_ARES 106 +#define GCC_NSS_CE_AXI_ARES 107 +#define GCC_NSSNOC_CE_APB_ARES 108 +#define GCC_NSSNOC_CE_AXI_ARES 109 +#define GCC_NSSNOC_UBI0_AHB_ARES 110 +#define GCC_NSSNOC_UBI1_AHB_ARES 111 +#define GCC_NSSNOC_SNOC_ARES 112 +#define GCC_NSSNOC_CRYPTO_ARES 113 +#define GCC_NSSNOC_ATB_ARES 114 +#define GCC_NSSNOC_QOSGEN_REF_ARES 115 +#define GCC_NSSNOC_TIMEOUT_REF_ARES 116 +#define GCC_PCIE0_PIPE_ARES 117 +#define GCC_PCIE0_SLEEP_ARES 118 +#define GCC_PCIE0_CORE_STICKY_ARES 119 +#define GCC_PCIE0_AXI_MASTER_ARES 120 +#define GCC_PCIE0_AXI_SLAVE_ARES 121 +#define GCC_PCIE0_AHB_ARES 122 +#define GCC_PCIE0_AXI_MASTER_STICKY_ARES 123 +#define GCC_PCIE1_PIPE_ARES 124 +#define GCC_PCIE1_SLEEP_ARES 125 +#define GCC_PCIE1_CORE_STICKY_ARES 126 +#define GCC_PCIE1_AXI_MASTER_ARES 127 +#define GCC_PCIE1_AXI_SLAVE_ARES 128 +#define GCC_PCIE1_AHB_ARES 129 +#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9615.h b/include/dt-bindings/clock/qcom,gcc-mdm9615.h new file mode 100644 index 0000000..9e4c348 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-mdm9615.h @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * Copyright (c) BayLibre, SAS. + * Author : Neil Armstrong + */ + +#ifndef _DT_BINDINGS_CLK_MDM_GCC_9615_H +#define _DT_BINDINGS_CLK_MDM_GCC_9615_H + +#define AFAB_CLK_SRC 0 +#define AFAB_CORE_CLK 1 +#define SFAB_MSS_Q6_SW_A_CLK 2 +#define SFAB_MSS_Q6_FW_A_CLK 3 +#define QDSS_STM_CLK 4 +#define SCSS_A_CLK 5 +#define SCSS_H_CLK 6 +#define SCSS_XO_SRC_CLK 7 +#define AFAB_EBI1_CH0_A_CLK 8 +#define AFAB_EBI1_CH1_A_CLK 9 +#define AFAB_AXI_S0_FCLK 10 +#define AFAB_AXI_S1_FCLK 11 +#define AFAB_AXI_S2_FCLK 12 +#define AFAB_AXI_S3_FCLK 13 +#define AFAB_AXI_S4_FCLK 14 +#define SFAB_CORE_CLK 15 +#define SFAB_AXI_S0_FCLK 16 +#define SFAB_AXI_S1_FCLK 17 +#define SFAB_AXI_S2_FCLK 18 +#define SFAB_AXI_S3_FCLK 19 +#define SFAB_AXI_S4_FCLK 20 +#define SFAB_AHB_S0_FCLK 21 +#define SFAB_AHB_S1_FCLK 22 +#define SFAB_AHB_S2_FCLK 23 +#define SFAB_AHB_S3_FCLK 24 +#define SFAB_AHB_S4_FCLK 25 +#define SFAB_AHB_S5_FCLK 26 +#define SFAB_AHB_S6_FCLK 27 +#define SFAB_AHB_S7_FCLK 28 +#define QDSS_AT_CLK_SRC 29 +#define QDSS_AT_CLK 30 +#define QDSS_TRACECLKIN_CLK_SRC 31 +#define QDSS_TRACECLKIN_CLK 32 +#define QDSS_TSCTR_CLK_SRC 33 +#define QDSS_TSCTR_CLK 34 +#define SFAB_ADM0_M0_A_CLK 35 +#define SFAB_ADM0_M1_A_CLK 36 +#define SFAB_ADM0_M2_H_CLK 37 +#define ADM0_CLK 38 +#define ADM0_PBUS_CLK 39 +#define MSS_XPU_CLK 40 +#define IMEM0_A_CLK 41 +#define QDSS_H_CLK 42 +#define PCIE_A_CLK 43 +#define PCIE_AUX_CLK 44 +#define PCIE_PHY_REF_CLK 45 +#define PCIE_H_CLK 46 +#define SFAB_CLK_SRC 47 +#define MAHB0_CLK 48 +#define Q6SW_CLK_SRC 49 +#define Q6SW_CLK 50 +#define Q6FW_CLK_SRC 51 +#define Q6FW_CLK 52 +#define SFAB_MSS_M_A_CLK 53 +#define SFAB_USB3_M_A_CLK 54 +#define SFAB_LPASS_Q6_A_CLK 55 +#define SFAB_AFAB_M_A_CLK 56 +#define AFAB_SFAB_M0_A_CLK 57 +#define AFAB_SFAB_M1_A_CLK 58 +#define SFAB_SATA_S_H_CLK 59 +#define DFAB_CLK_SRC 60 +#define DFAB_CLK 61 +#define SFAB_DFAB_M_A_CLK 62 +#define DFAB_SFAB_M_A_CLK 63 +#define DFAB_SWAY0_H_CLK 64 +#define DFAB_SWAY1_H_CLK 65 +#define DFAB_ARB0_H_CLK 66 +#define DFAB_ARB1_H_CLK 67 +#define PPSS_H_CLK 68 +#define PPSS_PROC_CLK 69 +#define PPSS_TIMER0_CLK 70 +#define PPSS_TIMER1_CLK 71 +#define PMEM_A_CLK 72 +#define DMA_BAM_H_CLK 73 +#define SIC_H_CLK 74 +#define SPS_TIC_H_CLK 75 +#define SLIMBUS_H_CLK 76 +#define SLIMBUS_XO_SRC_CLK 77 +#define CFPB_2X_CLK_SRC 78 +#define CFPB_CLK 79 +#define CFPB0_H_CLK 80 +#define CFPB1_H_CLK 81 +#define CFPB2_H_CLK 82 +#define SFAB_CFPB_M_H_CLK 83 +#define CFPB_MASTER_H_CLK 84 +#define SFAB_CFPB_S_H_CLK 85 +#define CFPB_SPLITTER_H_CLK 86 +#define TSIF_H_CLK 87 +#define TSIF_INACTIVITY_TIMERS_CLK 88 +#define TSIF_REF_SRC 89 +#define TSIF_REF_CLK 90 +#define CE1_H_CLK 91 +#define CE1_CORE_CLK 92 +#define CE1_SLEEP_CLK 93 +#define CE2_H_CLK 94 +#define CE2_CORE_CLK 95 +#define SFPB_H_CLK_SRC 97 +#define SFPB_H_CLK 98 +#define SFAB_SFPB_M_H_CLK 99 +#define SFAB_SFPB_S_H_CLK 100 +#define RPM_PROC_CLK 101 +#define RPM_BUS_H_CLK 102 +#define RPM_SLEEP_CLK 103 +#define RPM_TIMER_CLK 104 +#define RPM_MSG_RAM_H_CLK 105 +#define PMIC_ARB0_H_CLK 106 +#define PMIC_ARB1_H_CLK 107 +#define PMIC_SSBI2_SRC 108 +#define PMIC_SSBI2_CLK 109 +#define SDC1_H_CLK 110 +#define SDC2_H_CLK 111 +#define SDC3_H_CLK 112 +#define SDC4_H_CLK 113 +#define SDC5_H_CLK 114 +#define SDC1_SRC 115 +#define SDC2_SRC 116 +#define SDC3_SRC 117 +#define SDC4_SRC 118 +#define SDC5_SRC 119 +#define SDC1_CLK 120 +#define SDC2_CLK 121 +#define SDC3_CLK 122 +#define SDC4_CLK 123 +#define SDC5_CLK 124 +#define DFAB_A2_H_CLK 125 +#define USB_HS1_H_CLK 126 +#define USB_HS1_XCVR_SRC 127 +#define USB_HS1_XCVR_CLK 128 +#define USB_HSIC_H_CLK 129 +#define USB_HSIC_XCVR_FS_SRC 130 +#define USB_HSIC_XCVR_FS_CLK 131 +#define USB_HSIC_SYSTEM_CLK_SRC 132 +#define USB_HSIC_SYSTEM_CLK 133 +#define CFPB0_C0_H_CLK 134 +#define CFPB0_C1_H_CLK 135 +#define CFPB0_D0_H_CLK 136 +#define CFPB0_D1_H_CLK 137 +#define USB_FS1_H_CLK 138 +#define USB_FS1_XCVR_FS_SRC 139 +#define USB_FS1_XCVR_FS_CLK 140 +#define USB_FS1_SYSTEM_CLK 141 +#define USB_FS2_H_CLK 142 +#define USB_FS2_XCVR_FS_SRC 143 +#define USB_FS2_XCVR_FS_CLK 144 +#define USB_FS2_SYSTEM_CLK 145 +#define GSBI_COMMON_SIM_SRC 146 +#define GSBI1_H_CLK 147 +#define GSBI2_H_CLK 148 +#define GSBI3_H_CLK 149 +#define GSBI4_H_CLK 150 +#define GSBI5_H_CLK 151 +#define GSBI6_H_CLK 152 +#define GSBI7_H_CLK 153 +#define GSBI8_H_CLK 154 +#define GSBI9_H_CLK 155 +#define GSBI10_H_CLK 156 +#define GSBI11_H_CLK 157 +#define GSBI12_H_CLK 158 +#define GSBI1_UART_SRC 159 +#define GSBI1_UART_CLK 160 +#define GSBI2_UART_SRC 161 +#define GSBI2_UART_CLK 162 +#define GSBI3_UART_SRC 163 +#define GSBI3_UART_CLK 164 +#define GSBI4_UART_SRC 165 +#define GSBI4_UART_CLK 166 +#define GSBI5_UART_SRC 167 +#define GSBI5_UART_CLK 168 +#define GSBI6_UART_SRC 169 +#define GSBI6_UART_CLK 170 +#define GSBI7_UART_SRC 171 +#define GSBI7_UART_CLK 172 +#define GSBI8_UART_SRC 173 +#define GSBI8_UART_CLK 174 +#define GSBI9_UART_SRC 175 +#define GSBI9_UART_CLK 176 +#define GSBI10_UART_SRC 177 +#define GSBI10_UART_CLK 178 +#define GSBI11_UART_SRC 179 +#define GSBI11_UART_CLK 180 +#define GSBI12_UART_SRC 181 +#define GSBI12_UART_CLK 182 +#define GSBI1_QUP_SRC 183 +#define GSBI1_QUP_CLK 184 +#define GSBI2_QUP_SRC 185 +#define GSBI2_QUP_CLK 186 +#define GSBI3_QUP_SRC 187 +#define GSBI3_QUP_CLK 188 +#define GSBI4_QUP_SRC 189 +#define GSBI4_QUP_CLK 190 +#define GSBI5_QUP_SRC 191 +#define GSBI5_QUP_CLK 192 +#define GSBI6_QUP_SRC 193 +#define GSBI6_QUP_CLK 194 +#define GSBI7_QUP_SRC 195 +#define GSBI7_QUP_CLK 196 +#define GSBI8_QUP_SRC 197 +#define GSBI8_QUP_CLK 198 +#define GSBI9_QUP_SRC 199 +#define GSBI9_QUP_CLK 200 +#define GSBI10_QUP_SRC 201 +#define GSBI10_QUP_CLK 202 +#define GSBI11_QUP_SRC 203 +#define GSBI11_QUP_CLK 204 +#define GSBI12_QUP_SRC 205 +#define GSBI12_QUP_CLK 206 +#define GSBI1_SIM_CLK 207 +#define GSBI2_SIM_CLK 208 +#define GSBI3_SIM_CLK 209 +#define GSBI4_SIM_CLK 210 +#define GSBI5_SIM_CLK 211 +#define GSBI6_SIM_CLK 212 +#define GSBI7_SIM_CLK 213 +#define GSBI8_SIM_CLK 214 +#define GSBI9_SIM_CLK 215 +#define GSBI10_SIM_CLK 216 +#define GSBI11_SIM_CLK 217 +#define GSBI12_SIM_CLK 218 +#define USB_HSIC_HSIC_CLK_SRC 219 +#define USB_HSIC_HSIC_CLK 220 +#define USB_HSIC_HSIO_CAL_CLK 221 +#define SPDM_CFG_H_CLK 222 +#define SPDM_MSTR_H_CLK 223 +#define SPDM_FF_CLK_SRC 224 +#define SPDM_FF_CLK 225 +#define SEC_CTRL_CLK 226 +#define SEC_CTRL_ACC_CLK_SRC 227 +#define SEC_CTRL_ACC_CLK 228 +#define TLMM_H_CLK 229 +#define TLMM_CLK 230 +#define SFAB_MSS_S_H_CLK 231 +#define MSS_SLP_CLK 232 +#define MSS_Q6SW_JTAG_CLK 233 +#define MSS_Q6FW_JTAG_CLK 234 +#define MSS_S_H_CLK 235 +#define MSS_CXO_SRC_CLK 236 +#define SATA_H_CLK 237 +#define SATA_CLK_SRC 238 +#define SATA_RXOOB_CLK 239 +#define SATA_PMALIVE_CLK 240 +#define SATA_PHY_REF_CLK 241 +#define TSSC_CLK_SRC 242 +#define TSSC_CLK 243 +#define PDM_SRC 244 +#define PDM_CLK 245 +#define GP0_SRC 246 +#define GP0_CLK 247 +#define GP1_SRC 248 +#define GP1_CLK 249 +#define GP2_SRC 250 +#define GP2_CLK 251 +#define MPM_CLK 252 +#define EBI1_CLK_SRC 253 +#define EBI1_CH0_CLK 254 +#define EBI1_CH1_CLK 255 +#define EBI1_2X_CLK 256 +#define EBI1_CH0_DQ_CLK 257 +#define EBI1_CH1_DQ_CLK 258 +#define EBI1_CH0_CA_CLK 259 +#define EBI1_CH1_CA_CLK 260 +#define EBI1_XO_CLK 261 +#define SFAB_SMPSS_S_H_CLK 262 +#define PRNG_SRC 263 +#define PRNG_CLK 264 +#define PXO_SRC 265 +#define LPASS_CXO_CLK 266 +#define LPASS_PXO_CLK 267 +#define SPDM_CY_PORT0_CLK 268 +#define SPDM_CY_PORT1_CLK 269 +#define SPDM_CY_PORT2_CLK 270 +#define SPDM_CY_PORT3_CLK 271 +#define SPDM_CY_PORT4_CLK 272 +#define SPDM_CY_PORT5_CLK 273 +#define SPDM_CY_PORT6_CLK 274 +#define SPDM_CY_PORT7_CLK 275 +#define PLL0 276 +#define PLL0_VOTE 277 +#define PLL3 278 +#define PLL3_VOTE 279 +#define PLL4_VOTE 280 +#define PLL5 281 +#define PLL5_VOTE 282 +#define PLL6 283 +#define PLL6_VOTE 284 +#define PLL7_VOTE 285 +#define PLL8 286 +#define PLL8_VOTE 287 +#define PLL9 288 +#define PLL10 289 +#define PLL11 290 +#define PLL12 291 +#define PLL13 292 +#define PLL14 293 +#define PLL14_VOTE 294 +#define USB_HS3_H_CLK 295 +#define USB_HS3_XCVR_SRC 296 +#define USB_HS3_XCVR_CLK 297 +#define USB_HS4_H_CLK 298 +#define USB_HS4_XCVR_SRC 299 +#define USB_HS4_XCVR_CLK 300 +#define SATA_PHY_CFG_CLK 301 +#define SATA_A_CLK 302 +#define CE3_SRC 303 +#define CE3_CORE_CLK 304 +#define CE3_H_CLK 305 +#define USB_HS1_SYSTEM_CLK_SRC 306 +#define USB_HS1_SYSTEM_CLK 307 +#define EBI2_CLK 308 +#define EBI2_AON_CLK 309 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8660.h b/include/dt-bindings/clock/qcom,gcc-msm8660.h new file mode 100644 index 0000000..4777c00 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8660.h @@ -0,0 +1,268 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8660_H +#define _DT_BINDINGS_CLK_MSM_GCC_8660_H + +#define AFAB_CLK_SRC 0 +#define AFAB_CORE_CLK 1 +#define SCSS_A_CLK 2 +#define SCSS_H_CLK 3 +#define SCSS_XO_SRC_CLK 4 +#define AFAB_EBI1_CH0_A_CLK 5 +#define AFAB_EBI1_CH1_A_CLK 6 +#define AFAB_AXI_S0_FCLK 7 +#define AFAB_AXI_S1_FCLK 8 +#define AFAB_AXI_S2_FCLK 9 +#define AFAB_AXI_S3_FCLK 10 +#define AFAB_AXI_S4_FCLK 11 +#define SFAB_CORE_CLK 12 +#define SFAB_AXI_S0_FCLK 13 +#define SFAB_AXI_S1_FCLK 14 +#define SFAB_AXI_S2_FCLK 15 +#define SFAB_AXI_S3_FCLK 16 +#define SFAB_AXI_S4_FCLK 17 +#define SFAB_AHB_S0_FCLK 18 +#define SFAB_AHB_S1_FCLK 19 +#define SFAB_AHB_S2_FCLK 20 +#define SFAB_AHB_S3_FCLK 21 +#define SFAB_AHB_S4_FCLK 22 +#define SFAB_AHB_S5_FCLK 23 +#define SFAB_AHB_S6_FCLK 24 +#define SFAB_ADM0_M0_A_CLK 25 +#define SFAB_ADM0_M1_A_CLK 26 +#define SFAB_ADM0_M2_A_CLK 27 +#define ADM0_CLK 28 +#define ADM0_PBUS_CLK 29 +#define SFAB_ADM1_M0_A_CLK 30 +#define SFAB_ADM1_M1_A_CLK 31 +#define SFAB_ADM1_M2_A_CLK 32 +#define MMFAB_ADM1_M3_A_CLK 33 +#define ADM1_CLK 34 +#define ADM1_PBUS_CLK 35 +#define IMEM0_A_CLK 36 +#define MAHB0_CLK 37 +#define SFAB_LPASS_Q6_A_CLK 38 +#define SFAB_AFAB_M_A_CLK 39 +#define AFAB_SFAB_M0_A_CLK 40 +#define AFAB_SFAB_M1_A_CLK 41 +#define DFAB_CLK_SRC 42 +#define DFAB_CLK 43 +#define DFAB_CORE_CLK 44 +#define SFAB_DFAB_M_A_CLK 45 +#define DFAB_SFAB_M_A_CLK 46 +#define DFAB_SWAY0_H_CLK 47 +#define DFAB_SWAY1_H_CLK 48 +#define DFAB_ARB0_H_CLK 49 +#define DFAB_ARB1_H_CLK 50 +#define PPSS_H_CLK 51 +#define PPSS_PROC_CLK 52 +#define PPSS_TIMER0_CLK 53 +#define PPSS_TIMER1_CLK 54 +#define PMEM_A_CLK 55 +#define DMA_BAM_H_CLK 56 +#define SIC_H_CLK 57 +#define SPS_TIC_H_CLK 58 +#define SLIMBUS_H_CLK 59 +#define SLIMBUS_XO_SRC_CLK 60 +#define CFPB_2X_CLK_SRC 61 +#define CFPB_CLK 62 +#define CFPB0_H_CLK 63 +#define CFPB1_H_CLK 64 +#define CFPB2_H_CLK 65 +#define EBI2_2X_CLK 66 +#define EBI2_CLK 67 +#define SFAB_CFPB_M_H_CLK 68 +#define CFPB_MASTER_H_CLK 69 +#define SFAB_CFPB_S_HCLK 70 +#define CFPB_SPLITTER_H_CLK 71 +#define TSIF_H_CLK 72 +#define TSIF_INACTIVITY_TIMERS_CLK 73 +#define TSIF_REF_SRC 74 +#define TSIF_REF_CLK 75 +#define CE1_H_CLK 76 +#define CE2_H_CLK 77 +#define SFPB_H_CLK_SRC 78 +#define SFPB_H_CLK 79 +#define SFAB_SFPB_M_H_CLK 80 +#define SFAB_SFPB_S_H_CLK 81 +#define RPM_PROC_CLK 82 +#define RPM_BUS_H_CLK 83 +#define RPM_SLEEP_CLK 84 +#define RPM_TIMER_CLK 85 +#define MODEM_AHB1_H_CLK 86 +#define MODEM_AHB2_H_CLK 87 +#define RPM_MSG_RAM_H_CLK 88 +#define SC_H_CLK 89 +#define SC_A_CLK 90 +#define PMIC_ARB0_H_CLK 91 +#define PMIC_ARB1_H_CLK 92 +#define PMIC_SSBI2_SRC 93 +#define PMIC_SSBI2_CLK 94 +#define SDC1_H_CLK 95 +#define SDC2_H_CLK 96 +#define SDC3_H_CLK 97 +#define SDC4_H_CLK 98 +#define SDC5_H_CLK 99 +#define SDC1_SRC 100 +#define SDC2_SRC 101 +#define SDC3_SRC 102 +#define SDC4_SRC 103 +#define SDC5_SRC 104 +#define SDC1_CLK 105 +#define SDC2_CLK 106 +#define SDC3_CLK 107 +#define SDC4_CLK 108 +#define SDC5_CLK 109 +#define USB_HS1_H_CLK 110 +#define USB_HS1_XCVR_SRC 111 +#define USB_HS1_XCVR_CLK 112 +#define USB_HS2_H_CLK 113 +#define USB_HS2_XCVR_SRC 114 +#define USB_HS2_XCVR_CLK 115 +#define USB_FS1_H_CLK 116 +#define USB_FS1_XCVR_FS_SRC 117 +#define USB_FS1_XCVR_FS_CLK 118 +#define USB_FS1_SYSTEM_CLK 119 +#define USB_FS2_H_CLK 120 +#define USB_FS2_XCVR_FS_SRC 121 +#define USB_FS2_XCVR_FS_CLK 122 +#define USB_FS2_SYSTEM_CLK 123 +#define GSBI_COMMON_SIM_SRC 124 +#define GSBI1_H_CLK 125 +#define GSBI2_H_CLK 126 +#define GSBI3_H_CLK 127 +#define GSBI4_H_CLK 128 +#define GSBI5_H_CLK 129 +#define GSBI6_H_CLK 130 +#define GSBI7_H_CLK 131 +#define GSBI8_H_CLK 132 +#define GSBI9_H_CLK 133 +#define GSBI10_H_CLK 134 +#define GSBI11_H_CLK 135 +#define GSBI12_H_CLK 136 +#define GSBI1_UART_SRC 137 +#define GSBI1_UART_CLK 138 +#define GSBI2_UART_SRC 139 +#define GSBI2_UART_CLK 140 +#define GSBI3_UART_SRC 141 +#define GSBI3_UART_CLK 142 +#define GSBI4_UART_SRC 143 +#define GSBI4_UART_CLK 144 +#define GSBI5_UART_SRC 145 +#define GSBI5_UART_CLK 146 +#define GSBI6_UART_SRC 147 +#define GSBI6_UART_CLK 148 +#define GSBI7_UART_SRC 149 +#define GSBI7_UART_CLK 150 +#define GSBI8_UART_SRC 151 +#define GSBI8_UART_CLK 152 +#define GSBI9_UART_SRC 153 +#define GSBI9_UART_CLK 154 +#define GSBI10_UART_SRC 155 +#define GSBI10_UART_CLK 156 +#define GSBI11_UART_SRC 157 +#define GSBI11_UART_CLK 158 +#define GSBI12_UART_SRC 159 +#define GSBI12_UART_CLK 160 +#define GSBI1_QUP_SRC 161 +#define GSBI1_QUP_CLK 162 +#define GSBI2_QUP_SRC 163 +#define GSBI2_QUP_CLK 164 +#define GSBI3_QUP_SRC 165 +#define GSBI3_QUP_CLK 166 +#define GSBI4_QUP_SRC 167 +#define GSBI4_QUP_CLK 168 +#define GSBI5_QUP_SRC 169 +#define GSBI5_QUP_CLK 170 +#define GSBI6_QUP_SRC 171 +#define GSBI6_QUP_CLK 172 +#define GSBI7_QUP_SRC 173 +#define GSBI7_QUP_CLK 174 +#define GSBI8_QUP_SRC 175 +#define GSBI8_QUP_CLK 176 +#define GSBI9_QUP_SRC 177 +#define GSBI9_QUP_CLK 178 +#define GSBI10_QUP_SRC 179 +#define GSBI10_QUP_CLK 180 +#define GSBI11_QUP_SRC 181 +#define GSBI11_QUP_CLK 182 +#define GSBI12_QUP_SRC 183 +#define GSBI12_QUP_CLK 184 +#define GSBI1_SIM_CLK 185 +#define GSBI2_SIM_CLK 186 +#define GSBI3_SIM_CLK 187 +#define GSBI4_SIM_CLK 188 +#define GSBI5_SIM_CLK 189 +#define GSBI6_SIM_CLK 190 +#define GSBI7_SIM_CLK 191 +#define GSBI8_SIM_CLK 192 +#define GSBI9_SIM_CLK 193 +#define GSBI10_SIM_CLK 194 +#define GSBI11_SIM_CLK 195 +#define GSBI12_SIM_CLK 196 +#define SPDM_CFG_H_CLK 197 +#define SPDM_MSTR_H_CLK 198 +#define SPDM_FF_CLK_SRC 199 +#define SPDM_FF_CLK 200 +#define SEC_CTRL_CLK 201 +#define SEC_CTRL_ACC_CLK_SRC 202 +#define SEC_CTRL_ACC_CLK 203 +#define TLMM_H_CLK 204 +#define TLMM_CLK 205 +#define MARM_CLK_SRC 206 +#define MARM_CLK 207 +#define MAHB1_SRC 208 +#define MAHB1_CLK 209 +#define SFAB_MSS_S_H_CLK 210 +#define MAHB2_SRC 211 +#define MAHB2_CLK 212 +#define MSS_MODEM_CLK_SRC 213 +#define MSS_MODEM_CXO_CLK 214 +#define MSS_SLP_CLK 215 +#define MSS_SYS_REF_CLK 216 +#define TSSC_CLK_SRC 217 +#define TSSC_CLK 218 +#define PDM_SRC 219 +#define PDM_CLK 220 +#define GP0_SRC 221 +#define GP0_CLK 222 +#define GP1_SRC 223 +#define GP1_CLK 224 +#define GP2_SRC 225 +#define GP2_CLK 226 +#define PMEM_CLK 227 +#define MPM_CLK 228 +#define EBI1_ASFAB_SRC 229 +#define EBI1_CLK_SRC 230 +#define EBI1_CH0_CLK 231 +#define EBI1_CH1_CLK 232 +#define SFAB_SMPSS_S_H_CLK 233 +#define PRNG_SRC 234 +#define PRNG_CLK 235 +#define PXO_SRC 236 +#define LPASS_CXO_CLK 237 +#define LPASS_PXO_CLK 238 +#define SPDM_CY_PORT0_CLK 239 +#define SPDM_CY_PORT1_CLK 240 +#define SPDM_CY_PORT2_CLK 241 +#define SPDM_CY_PORT3_CLK 242 +#define SPDM_CY_PORT4_CLK 243 +#define SPDM_CY_PORT5_CLK 244 +#define SPDM_CY_PORT6_CLK 245 +#define SPDM_CY_PORT7_CLK 246 +#define PLL0 247 +#define PLL0_VOTE 248 +#define PLL5 249 +#define PLL6 250 +#define PLL6_VOTE 251 +#define PLL8 252 +#define PLL8_VOTE 253 +#define PLL9 254 +#define PLL10 255 +#define PLL11 256 +#define PLL12 257 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8916.h b/include/dt-bindings/clock/qcom,gcc-msm8916.h new file mode 100644 index 0000000..5630344 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8916.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2015 Linaro Limited + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8916_H +#define _DT_BINDINGS_CLK_MSM_GCC_8916_H + +#define GPLL0 0 +#define GPLL0_VOTE 1 +#define BIMC_PLL 2 +#define BIMC_PLL_VOTE 3 +#define GPLL1 4 +#define GPLL1_VOTE 5 +#define GPLL2 6 +#define GPLL2_VOTE 7 +#define PCNOC_BFDCD_CLK_SRC 8 +#define SYSTEM_NOC_BFDCD_CLK_SRC 9 +#define CAMSS_AHB_CLK_SRC 10 +#define APSS_AHB_CLK_SRC 11 +#define CSI0_CLK_SRC 12 +#define CSI1_CLK_SRC 13 +#define GFX3D_CLK_SRC 14 +#define VFE0_CLK_SRC 15 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 16 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 17 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 18 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 19 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 20 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 21 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 22 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 23 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 24 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 25 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 26 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 27 +#define BLSP1_UART1_APPS_CLK_SRC 28 +#define BLSP1_UART2_APPS_CLK_SRC 29 +#define CCI_CLK_SRC 30 +#define CAMSS_GP0_CLK_SRC 31 +#define CAMSS_GP1_CLK_SRC 32 +#define JPEG0_CLK_SRC 33 +#define MCLK0_CLK_SRC 34 +#define MCLK1_CLK_SRC 35 +#define CSI0PHYTIMER_CLK_SRC 36 +#define CSI1PHYTIMER_CLK_SRC 37 +#define CPP_CLK_SRC 38 +#define CRYPTO_CLK_SRC 39 +#define GP1_CLK_SRC 40 +#define GP2_CLK_SRC 41 +#define GP3_CLK_SRC 42 +#define BYTE0_CLK_SRC 43 +#define ESC0_CLK_SRC 44 +#define MDP_CLK_SRC 45 +#define PCLK0_CLK_SRC 46 +#define VSYNC_CLK_SRC 47 +#define PDM2_CLK_SRC 48 +#define SDCC1_APPS_CLK_SRC 49 +#define SDCC2_APPS_CLK_SRC 50 +#define APSS_TCU_CLK_SRC 51 +#define USB_HS_SYSTEM_CLK_SRC 52 +#define VCODEC0_CLK_SRC 53 +#define GCC_BLSP1_AHB_CLK 54 +#define GCC_BLSP1_SLEEP_CLK 55 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 56 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 57 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 58 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 59 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 60 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 61 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 62 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 63 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 64 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 65 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 66 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 67 +#define GCC_BLSP1_UART1_APPS_CLK 68 +#define GCC_BLSP1_UART2_APPS_CLK 69 +#define GCC_BOOT_ROM_AHB_CLK 70 +#define GCC_CAMSS_CCI_AHB_CLK 71 +#define GCC_CAMSS_CCI_CLK 72 +#define GCC_CAMSS_CSI0_AHB_CLK 73 +#define GCC_CAMSS_CSI0_CLK 74 +#define GCC_CAMSS_CSI0PHY_CLK 75 +#define GCC_CAMSS_CSI0PIX_CLK 76 +#define GCC_CAMSS_CSI0RDI_CLK 77 +#define GCC_CAMSS_CSI1_AHB_CLK 78 +#define GCC_CAMSS_CSI1_CLK 79 +#define GCC_CAMSS_CSI1PHY_CLK 80 +#define GCC_CAMSS_CSI1PIX_CLK 81 +#define GCC_CAMSS_CSI1RDI_CLK 82 +#define GCC_CAMSS_CSI_VFE0_CLK 83 +#define GCC_CAMSS_GP0_CLK 84 +#define GCC_CAMSS_GP1_CLK 85 +#define GCC_CAMSS_ISPIF_AHB_CLK 86 +#define GCC_CAMSS_JPEG0_CLK 87 +#define GCC_CAMSS_JPEG_AHB_CLK 88 +#define GCC_CAMSS_JPEG_AXI_CLK 89 +#define GCC_CAMSS_MCLK0_CLK 90 +#define GCC_CAMSS_MCLK1_CLK 91 +#define GCC_CAMSS_MICRO_AHB_CLK 92 +#define GCC_CAMSS_CSI0PHYTIMER_CLK 93 +#define GCC_CAMSS_CSI1PHYTIMER_CLK 94 +#define GCC_CAMSS_AHB_CLK 95 +#define GCC_CAMSS_TOP_AHB_CLK 96 +#define GCC_CAMSS_CPP_AHB_CLK 97 +#define GCC_CAMSS_CPP_CLK 98 +#define GCC_CAMSS_VFE0_CLK 99 +#define GCC_CAMSS_VFE_AHB_CLK 100 +#define GCC_CAMSS_VFE_AXI_CLK 101 +#define GCC_CRYPTO_AHB_CLK 102 +#define GCC_CRYPTO_AXI_CLK 103 +#define GCC_CRYPTO_CLK 104 +#define GCC_OXILI_GMEM_CLK 105 +#define GCC_GP1_CLK 106 +#define GCC_GP2_CLK 107 +#define GCC_GP3_CLK 108 +#define GCC_MDSS_AHB_CLK 109 +#define GCC_MDSS_AXI_CLK 110 +#define GCC_MDSS_BYTE0_CLK 111 +#define GCC_MDSS_ESC0_CLK 112 +#define GCC_MDSS_MDP_CLK 113 +#define GCC_MDSS_PCLK0_CLK 114 +#define GCC_MDSS_VSYNC_CLK 115 +#define GCC_MSS_CFG_AHB_CLK 116 +#define GCC_OXILI_AHB_CLK 117 +#define GCC_OXILI_GFX3D_CLK 118 +#define GCC_PDM2_CLK 119 +#define GCC_PDM_AHB_CLK 120 +#define GCC_PRNG_AHB_CLK 121 +#define GCC_SDCC1_AHB_CLK 122 +#define GCC_SDCC1_APPS_CLK 123 +#define GCC_SDCC2_AHB_CLK 124 +#define GCC_SDCC2_APPS_CLK 125 +#define GCC_GTCU_AHB_CLK 126 +#define GCC_JPEG_TBU_CLK 127 +#define GCC_MDP_TBU_CLK 128 +#define GCC_SMMU_CFG_CLK 129 +#define GCC_VENUS_TBU_CLK 130 +#define GCC_VFE_TBU_CLK 131 +#define GCC_USB2A_PHY_SLEEP_CLK 132 +#define GCC_USB_HS_AHB_CLK 133 +#define GCC_USB_HS_SYSTEM_CLK 134 +#define GCC_VENUS0_AHB_CLK 135 +#define GCC_VENUS0_AXI_CLK 136 +#define GCC_VENUS0_VCODEC0_CLK 137 +#define BIMC_DDR_CLK_SRC 138 +#define GCC_APSS_TCU_CLK 139 +#define GCC_GFX_TCU_CLK 140 +#define BIMC_GPU_CLK_SRC 141 +#define GCC_BIMC_GFX_CLK 142 +#define GCC_BIMC_GPU_CLK 143 +#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 144 +#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 145 +#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 146 +#define ULTAUDIO_XO_CLK_SRC 147 +#define ULTAUDIO_AHBFABRIC_CLK_SRC 148 +#define CODEC_DIGCODEC_CLK_SRC 149 +#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 150 +#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 151 +#define GCC_ULTAUDIO_AVSYNC_XO_CLK 152 +#define GCC_ULTAUDIO_STC_XO_CLK 153 +#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 154 +#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 155 +#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 156 +#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 157 +#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 158 +#define GCC_CODEC_DIGCODEC_CLK 159 +#define GCC_MSS_Q6_BIMC_AXI_CLK 160 + +/* Indexes for GDSCs */ +#define BIMC_GDSC 0 +#define VENUS_GDSC 1 +#define MDSS_GDSC 2 +#define JPEG_GDSC 3 +#define VFE_GDSC 4 +#define OXILI_GDSC 5 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8960.h b/include/dt-bindings/clock/qcom,gcc-msm8960.h new file mode 100644 index 0000000..950b828 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8960.h @@ -0,0 +1,317 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8960_H +#define _DT_BINDINGS_CLK_MSM_GCC_8960_H + +#define AFAB_CLK_SRC 0 +#define AFAB_CORE_CLK 1 +#define SFAB_MSS_Q6_SW_A_CLK 2 +#define SFAB_MSS_Q6_FW_A_CLK 3 +#define QDSS_STM_CLK 4 +#define SCSS_A_CLK 5 +#define SCSS_H_CLK 6 +#define SCSS_XO_SRC_CLK 7 +#define AFAB_EBI1_CH0_A_CLK 8 +#define AFAB_EBI1_CH1_A_CLK 9 +#define AFAB_AXI_S0_FCLK 10 +#define AFAB_AXI_S1_FCLK 11 +#define AFAB_AXI_S2_FCLK 12 +#define AFAB_AXI_S3_FCLK 13 +#define AFAB_AXI_S4_FCLK 14 +#define SFAB_CORE_CLK 15 +#define SFAB_AXI_S0_FCLK 16 +#define SFAB_AXI_S1_FCLK 17 +#define SFAB_AXI_S2_FCLK 18 +#define SFAB_AXI_S3_FCLK 19 +#define SFAB_AXI_S4_FCLK 20 +#define SFAB_AHB_S0_FCLK 21 +#define SFAB_AHB_S1_FCLK 22 +#define SFAB_AHB_S2_FCLK 23 +#define SFAB_AHB_S3_FCLK 24 +#define SFAB_AHB_S4_FCLK 25 +#define SFAB_AHB_S5_FCLK 26 +#define SFAB_AHB_S6_FCLK 27 +#define SFAB_AHB_S7_FCLK 28 +#define QDSS_AT_CLK_SRC 29 +#define QDSS_AT_CLK 30 +#define QDSS_TRACECLKIN_CLK_SRC 31 +#define QDSS_TRACECLKIN_CLK 32 +#define QDSS_TSCTR_CLK_SRC 33 +#define QDSS_TSCTR_CLK 34 +#define SFAB_ADM0_M0_A_CLK 35 +#define SFAB_ADM0_M1_A_CLK 36 +#define SFAB_ADM0_M2_H_CLK 37 +#define ADM0_CLK 38 +#define ADM0_PBUS_CLK 39 +#define MSS_XPU_CLK 40 +#define IMEM0_A_CLK 41 +#define QDSS_H_CLK 42 +#define PCIE_A_CLK 43 +#define PCIE_AUX_CLK 44 +#define PCIE_PHY_REF_CLK 45 +#define PCIE_H_CLK 46 +#define SFAB_CLK_SRC 47 +#define MAHB0_CLK 48 +#define Q6SW_CLK_SRC 49 +#define Q6SW_CLK 50 +#define Q6FW_CLK_SRC 51 +#define Q6FW_CLK 52 +#define SFAB_MSS_M_A_CLK 53 +#define SFAB_USB3_M_A_CLK 54 +#define SFAB_LPASS_Q6_A_CLK 55 +#define SFAB_AFAB_M_A_CLK 56 +#define AFAB_SFAB_M0_A_CLK 57 +#define AFAB_SFAB_M1_A_CLK 58 +#define SFAB_SATA_S_H_CLK 59 +#define DFAB_CLK_SRC 60 +#define DFAB_CLK 61 +#define SFAB_DFAB_M_A_CLK 62 +#define DFAB_SFAB_M_A_CLK 63 +#define DFAB_SWAY0_H_CLK 64 +#define DFAB_SWAY1_H_CLK 65 +#define DFAB_ARB0_H_CLK 66 +#define DFAB_ARB1_H_CLK 67 +#define PPSS_H_CLK 68 +#define PPSS_PROC_CLK 69 +#define PPSS_TIMER0_CLK 70 +#define PPSS_TIMER1_CLK 71 +#define PMEM_A_CLK 72 +#define DMA_BAM_H_CLK 73 +#define SIC_H_CLK 74 +#define SPS_TIC_H_CLK 75 +#define SLIMBUS_H_CLK 76 +#define SLIMBUS_XO_SRC_CLK 77 +#define CFPB_2X_CLK_SRC 78 +#define CFPB_CLK 79 +#define CFPB0_H_CLK 80 +#define CFPB1_H_CLK 81 +#define CFPB2_H_CLK 82 +#define SFAB_CFPB_M_H_CLK 83 +#define CFPB_MASTER_H_CLK 84 +#define SFAB_CFPB_S_H_CLK 85 +#define CFPB_SPLITTER_H_CLK 86 +#define TSIF_H_CLK 87 +#define TSIF_INACTIVITY_TIMERS_CLK 88 +#define TSIF_REF_SRC 89 +#define TSIF_REF_CLK 90 +#define CE1_H_CLK 91 +#define CE1_CORE_CLK 92 +#define CE1_SLEEP_CLK 93 +#define CE2_H_CLK 94 +#define CE2_CORE_CLK 95 +#define SFPB_H_CLK_SRC 97 +#define SFPB_H_CLK 98 +#define SFAB_SFPB_M_H_CLK 99 +#define SFAB_SFPB_S_H_CLK 100 +#define RPM_PROC_CLK 101 +#define RPM_BUS_H_CLK 102 +#define RPM_SLEEP_CLK 103 +#define RPM_TIMER_CLK 104 +#define RPM_MSG_RAM_H_CLK 105 +#define PMIC_ARB0_H_CLK 106 +#define PMIC_ARB1_H_CLK 107 +#define PMIC_SSBI2_SRC 108 +#define PMIC_SSBI2_CLK 109 +#define SDC1_H_CLK 110 +#define SDC2_H_CLK 111 +#define SDC3_H_CLK 112 +#define SDC4_H_CLK 113 +#define SDC5_H_CLK 114 +#define SDC1_SRC 115 +#define SDC2_SRC 116 +#define SDC3_SRC 117 +#define SDC4_SRC 118 +#define SDC5_SRC 119 +#define SDC1_CLK 120 +#define SDC2_CLK 121 +#define SDC3_CLK 122 +#define SDC4_CLK 123 +#define SDC5_CLK 124 +#define DFAB_A2_H_CLK 125 +#define USB_HS1_H_CLK 126 +#define USB_HS1_XCVR_SRC 127 +#define USB_HS1_XCVR_CLK 128 +#define USB_HSIC_H_CLK 129 +#define USB_HSIC_XCVR_FS_SRC 130 +#define USB_HSIC_XCVR_FS_CLK 131 +#define USB_HSIC_SYSTEM_CLK_SRC 132 +#define USB_HSIC_SYSTEM_CLK 133 +#define CFPB0_C0_H_CLK 134 +#define CFPB0_C1_H_CLK 135 +#define CFPB0_D0_H_CLK 136 +#define CFPB0_D1_H_CLK 137 +#define USB_FS1_H_CLK 138 +#define USB_FS1_XCVR_FS_SRC 139 +#define USB_FS1_XCVR_FS_CLK 140 +#define USB_FS1_SYSTEM_CLK 141 +#define USB_FS2_H_CLK 142 +#define USB_FS2_XCVR_FS_SRC 143 +#define USB_FS2_XCVR_FS_CLK 144 +#define USB_FS2_SYSTEM_CLK 145 +#define GSBI_COMMON_SIM_SRC 146 +#define GSBI1_H_CLK 147 +#define GSBI2_H_CLK 148 +#define GSBI3_H_CLK 149 +#define GSBI4_H_CLK 150 +#define GSBI5_H_CLK 151 +#define GSBI6_H_CLK 152 +#define GSBI7_H_CLK 153 +#define GSBI8_H_CLK 154 +#define GSBI9_H_CLK 155 +#define GSBI10_H_CLK 156 +#define GSBI11_H_CLK 157 +#define GSBI12_H_CLK 158 +#define GSBI1_UART_SRC 159 +#define GSBI1_UART_CLK 160 +#define GSBI2_UART_SRC 161 +#define GSBI2_UART_CLK 162 +#define GSBI3_UART_SRC 163 +#define GSBI3_UART_CLK 164 +#define GSBI4_UART_SRC 165 +#define GSBI4_UART_CLK 166 +#define GSBI5_UART_SRC 167 +#define GSBI5_UART_CLK 168 +#define GSBI6_UART_SRC 169 +#define GSBI6_UART_CLK 170 +#define GSBI7_UART_SRC 171 +#define GSBI7_UART_CLK 172 +#define GSBI8_UART_SRC 173 +#define GSBI8_UART_CLK 174 +#define GSBI9_UART_SRC 175 +#define GSBI9_UART_CLK 176 +#define GSBI10_UART_SRC 177 +#define GSBI10_UART_CLK 178 +#define GSBI11_UART_SRC 179 +#define GSBI11_UART_CLK 180 +#define GSBI12_UART_SRC 181 +#define GSBI12_UART_CLK 182 +#define GSBI1_QUP_SRC 183 +#define GSBI1_QUP_CLK 184 +#define GSBI2_QUP_SRC 185 +#define GSBI2_QUP_CLK 186 +#define GSBI3_QUP_SRC 187 +#define GSBI3_QUP_CLK 188 +#define GSBI4_QUP_SRC 189 +#define GSBI4_QUP_CLK 190 +#define GSBI5_QUP_SRC 191 +#define GSBI5_QUP_CLK 192 +#define GSBI6_QUP_SRC 193 +#define GSBI6_QUP_CLK 194 +#define GSBI7_QUP_SRC 195 +#define GSBI7_QUP_CLK 196 +#define GSBI8_QUP_SRC 197 +#define GSBI8_QUP_CLK 198 +#define GSBI9_QUP_SRC 199 +#define GSBI9_QUP_CLK 200 +#define GSBI10_QUP_SRC 201 +#define GSBI10_QUP_CLK 202 +#define GSBI11_QUP_SRC 203 +#define GSBI11_QUP_CLK 204 +#define GSBI12_QUP_SRC 205 +#define GSBI12_QUP_CLK 206 +#define GSBI1_SIM_CLK 207 +#define GSBI2_SIM_CLK 208 +#define GSBI3_SIM_CLK 209 +#define GSBI4_SIM_CLK 210 +#define GSBI5_SIM_CLK 211 +#define GSBI6_SIM_CLK 212 +#define GSBI7_SIM_CLK 213 +#define GSBI8_SIM_CLK 214 +#define GSBI9_SIM_CLK 215 +#define GSBI10_SIM_CLK 216 +#define GSBI11_SIM_CLK 217 +#define GSBI12_SIM_CLK 218 +#define USB_HSIC_HSIC_CLK_SRC 219 +#define USB_HSIC_HSIC_CLK 220 +#define USB_HSIC_HSIO_CAL_CLK 221 +#define SPDM_CFG_H_CLK 222 +#define SPDM_MSTR_H_CLK 223 +#define SPDM_FF_CLK_SRC 224 +#define SPDM_FF_CLK 225 +#define SEC_CTRL_CLK 226 +#define SEC_CTRL_ACC_CLK_SRC 227 +#define SEC_CTRL_ACC_CLK 228 +#define TLMM_H_CLK 229 +#define TLMM_CLK 230 +#define SFAB_MSS_S_H_CLK 231 +#define MSS_SLP_CLK 232 +#define MSS_Q6SW_JTAG_CLK 233 +#define MSS_Q6FW_JTAG_CLK 234 +#define MSS_S_H_CLK 235 +#define MSS_CXO_SRC_CLK 236 +#define SATA_H_CLK 237 +#define SATA_CLK_SRC 238 +#define SATA_RXOOB_CLK 239 +#define SATA_PMALIVE_CLK 240 +#define SATA_PHY_REF_CLK 241 +#define TSSC_CLK_SRC 242 +#define TSSC_CLK 243 +#define PDM_SRC 244 +#define PDM_CLK 245 +#define GP0_SRC 246 +#define GP0_CLK 247 +#define GP1_SRC 248 +#define GP1_CLK 249 +#define GP2_SRC 250 +#define GP2_CLK 251 +#define MPM_CLK 252 +#define EBI1_CLK_SRC 253 +#define EBI1_CH0_CLK 254 +#define EBI1_CH1_CLK 255 +#define EBI1_2X_CLK 256 +#define EBI1_CH0_DQ_CLK 257 +#define EBI1_CH1_DQ_CLK 258 +#define EBI1_CH0_CA_CLK 259 +#define EBI1_CH1_CA_CLK 260 +#define EBI1_XO_CLK 261 +#define SFAB_SMPSS_S_H_CLK 262 +#define PRNG_SRC 263 +#define PRNG_CLK 264 +#define PXO_SRC 265 +#define LPASS_CXO_CLK 266 +#define LPASS_PXO_CLK 267 +#define SPDM_CY_PORT0_CLK 268 +#define SPDM_CY_PORT1_CLK 269 +#define SPDM_CY_PORT2_CLK 270 +#define SPDM_CY_PORT3_CLK 271 +#define SPDM_CY_PORT4_CLK 272 +#define SPDM_CY_PORT5_CLK 273 +#define SPDM_CY_PORT6_CLK 274 +#define SPDM_CY_PORT7_CLK 275 +#define PLL0 276 +#define PLL0_VOTE 277 +#define PLL3 278 +#define PLL3_VOTE 279 +#define PLL4_VOTE 280 +#define PLL5 281 +#define PLL5_VOTE 282 +#define PLL6 283 +#define PLL6_VOTE 284 +#define PLL7_VOTE 285 +#define PLL8 286 +#define PLL8_VOTE 287 +#define PLL9 288 +#define PLL10 289 +#define PLL11 290 +#define PLL12 291 +#define PLL13 292 +#define PLL14 293 +#define PLL14_VOTE 294 +#define USB_HS3_H_CLK 295 +#define USB_HS3_XCVR_SRC 296 +#define USB_HS3_XCVR_CLK 297 +#define USB_HS4_H_CLK 298 +#define USB_HS4_XCVR_SRC 299 +#define USB_HS4_XCVR_CLK 300 +#define SATA_PHY_CFG_CLK 301 +#define SATA_A_CLK 302 +#define CE3_SRC 303 +#define CE3_CORE_CLK 304 +#define CE3_H_CLK 305 +#define PLL16 306 +#define PLL17 307 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8974.h b/include/dt-bindings/clock/qcom,gcc-msm8974.h new file mode 100644 index 0000000..5c10570 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8974.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8974_H +#define _DT_BINDINGS_CLK_MSM_GCC_8974_H + +#define GPLL0 0 +#define GPLL0_VOTE 1 +#define CONFIG_NOC_CLK_SRC 2 +#define GPLL2 3 +#define GPLL2_VOTE 4 +#define GPLL3 5 +#define GPLL3_VOTE 6 +#define PERIPH_NOC_CLK_SRC 7 +#define BLSP_UART_SIM_CLK_SRC 8 +#define QDSS_TSCTR_CLK_SRC 9 +#define BIMC_DDR_CLK_SRC 10 +#define SYSTEM_NOC_CLK_SRC 11 +#define GPLL1 12 +#define GPLL1_VOTE 13 +#define RPM_CLK_SRC 14 +#define GCC_BIMC_CLK 15 +#define BIMC_DDR_CPLL0_ROOT_CLK_SRC 16 +#define KPSS_AHB_CLK_SRC 17 +#define QDSS_AT_CLK_SRC 18 +#define USB30_MASTER_CLK_SRC 19 +#define BIMC_DDR_CPLL1_ROOT_CLK_SRC 20 +#define QDSS_STM_CLK_SRC 21 +#define ACC_CLK_SRC 22 +#define SEC_CTRL_CLK_SRC 23 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 24 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 25 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 26 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 27 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 28 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 29 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 30 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 31 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 32 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 33 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 34 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 35 +#define BLSP1_UART1_APPS_CLK_SRC 36 +#define BLSP1_UART2_APPS_CLK_SRC 37 +#define BLSP1_UART3_APPS_CLK_SRC 38 +#define BLSP1_UART4_APPS_CLK_SRC 39 +#define BLSP1_UART5_APPS_CLK_SRC 40 +#define BLSP1_UART6_APPS_CLK_SRC 41 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 42 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 43 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 44 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 45 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 46 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 47 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 48 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 49 +#define BLSP2_QUP5_I2C_APPS_CLK_SRC 50 +#define BLSP2_QUP5_SPI_APPS_CLK_SRC 51 +#define BLSP2_QUP6_I2C_APPS_CLK_SRC 52 +#define BLSP2_QUP6_SPI_APPS_CLK_SRC 53 +#define BLSP2_UART1_APPS_CLK_SRC 54 +#define BLSP2_UART2_APPS_CLK_SRC 55 +#define BLSP2_UART3_APPS_CLK_SRC 56 +#define BLSP2_UART4_APPS_CLK_SRC 57 +#define BLSP2_UART5_APPS_CLK_SRC 58 +#define BLSP2_UART6_APPS_CLK_SRC 59 +#define CE1_CLK_SRC 60 +#define CE2_CLK_SRC 61 +#define GP1_CLK_SRC 62 +#define GP2_CLK_SRC 63 +#define GP3_CLK_SRC 64 +#define PDM2_CLK_SRC 65 +#define QDSS_TRACECLKIN_CLK_SRC 66 +#define RBCPR_CLK_SRC 67 +#define SDCC1_APPS_CLK_SRC 68 +#define SDCC2_APPS_CLK_SRC 69 +#define SDCC3_APPS_CLK_SRC 70 +#define SDCC4_APPS_CLK_SRC 71 +#define SPMI_AHB_CLK_SRC 72 +#define SPMI_SER_CLK_SRC 73 +#define TSIF_REF_CLK_SRC 74 +#define USB30_MOCK_UTMI_CLK_SRC 75 +#define USB_HS_SYSTEM_CLK_SRC 76 +#define USB_HSIC_CLK_SRC 77 +#define USB_HSIC_IO_CAL_CLK_SRC 78 +#define USB_HSIC_SYSTEM_CLK_SRC 79 +#define GCC_BAM_DMA_AHB_CLK 80 +#define GCC_BAM_DMA_INACTIVITY_TIMERS_CLK 81 +#define GCC_BIMC_CFG_AHB_CLK 82 +#define GCC_BIMC_KPSS_AXI_CLK 83 +#define GCC_BIMC_SLEEP_CLK 84 +#define GCC_BIMC_SYSNOC_AXI_CLK 85 +#define GCC_BIMC_XO_CLK 86 +#define GCC_BLSP1_AHB_CLK 87 +#define GCC_BLSP1_SLEEP_CLK 88 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 89 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 90 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 91 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 92 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 93 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 94 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 95 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 96 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 97 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 98 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 99 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 100 +#define GCC_BLSP1_UART1_APPS_CLK 101 +#define GCC_BLSP1_UART1_SIM_CLK 102 +#define GCC_BLSP1_UART2_APPS_CLK 103 +#define GCC_BLSP1_UART2_SIM_CLK 104 +#define GCC_BLSP1_UART3_APPS_CLK 105 +#define GCC_BLSP1_UART3_SIM_CLK 106 +#define GCC_BLSP1_UART4_APPS_CLK 107 +#define GCC_BLSP1_UART4_SIM_CLK 108 +#define GCC_BLSP1_UART5_APPS_CLK 109 +#define GCC_BLSP1_UART5_SIM_CLK 110 +#define GCC_BLSP1_UART6_APPS_CLK 111 +#define GCC_BLSP1_UART6_SIM_CLK 112 +#define GCC_BLSP2_AHB_CLK 113 +#define GCC_BLSP2_SLEEP_CLK 114 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 115 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 116 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 117 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 118 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 119 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 120 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 121 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 122 +#define GCC_BLSP2_QUP5_I2C_APPS_CLK 123 +#define GCC_BLSP2_QUP5_SPI_APPS_CLK 124 +#define GCC_BLSP2_QUP6_I2C_APPS_CLK 125 +#define GCC_BLSP2_QUP6_SPI_APPS_CLK 126 +#define GCC_BLSP2_UART1_APPS_CLK 127 +#define GCC_BLSP2_UART1_SIM_CLK 128 +#define GCC_BLSP2_UART2_APPS_CLK 129 +#define GCC_BLSP2_UART2_SIM_CLK 130 +#define GCC_BLSP2_UART3_APPS_CLK 131 +#define GCC_BLSP2_UART3_SIM_CLK 132 +#define GCC_BLSP2_UART4_APPS_CLK 133 +#define GCC_BLSP2_UART4_SIM_CLK 134 +#define GCC_BLSP2_UART5_APPS_CLK 135 +#define GCC_BLSP2_UART5_SIM_CLK 136 +#define GCC_BLSP2_UART6_APPS_CLK 137 +#define GCC_BLSP2_UART6_SIM_CLK 138 +#define GCC_BOOT_ROM_AHB_CLK 139 +#define GCC_CE1_AHB_CLK 140 +#define GCC_CE1_AXI_CLK 141 +#define GCC_CE1_CLK 142 +#define GCC_CE2_AHB_CLK 143 +#define GCC_CE2_AXI_CLK 144 +#define GCC_CE2_CLK 145 +#define GCC_CNOC_BUS_TIMEOUT0_AHB_CLK 146 +#define GCC_CNOC_BUS_TIMEOUT1_AHB_CLK 147 +#define GCC_CNOC_BUS_TIMEOUT2_AHB_CLK 148 +#define GCC_CNOC_BUS_TIMEOUT3_AHB_CLK 149 +#define GCC_CNOC_BUS_TIMEOUT4_AHB_CLK 150 +#define GCC_CNOC_BUS_TIMEOUT5_AHB_CLK 151 +#define GCC_CNOC_BUS_TIMEOUT6_AHB_CLK 152 +#define GCC_CFG_NOC_AHB_CLK 153 +#define GCC_CFG_NOC_DDR_CFG_CLK 154 +#define GCC_CFG_NOC_RPM_AHB_CLK 155 +#define GCC_BIMC_DDR_CPLL0_CLK 156 +#define GCC_BIMC_DDR_CPLL1_CLK 157 +#define GCC_DDR_DIM_CFG_CLK 158 +#define GCC_DDR_DIM_SLEEP_CLK 159 +#define GCC_DEHR_CLK 160 +#define GCC_AHB_CLK 161 +#define GCC_IM_SLEEP_CLK 162 +#define GCC_XO_CLK 163 +#define GCC_XO_DIV4_CLK 164 +#define GCC_GP1_CLK 165 +#define GCC_GP2_CLK 166 +#define GCC_GP3_CLK 167 +#define GCC_IMEM_AXI_CLK 168 +#define GCC_IMEM_CFG_AHB_CLK 169 +#define GCC_KPSS_AHB_CLK 170 +#define GCC_KPSS_AXI_CLK 171 +#define GCC_LPASS_Q6_AXI_CLK 172 +#define GCC_MMSS_NOC_AT_CLK 173 +#define GCC_MMSS_NOC_CFG_AHB_CLK 174 +#define GCC_OCMEM_NOC_CFG_AHB_CLK 175 +#define GCC_OCMEM_SYS_NOC_AXI_CLK 176 +#define GCC_MPM_AHB_CLK 177 +#define GCC_MSG_RAM_AHB_CLK 178 +#define GCC_MSS_CFG_AHB_CLK 179 +#define GCC_MSS_Q6_BIMC_AXI_CLK 180 +#define GCC_NOC_CONF_XPU_AHB_CLK 181 +#define GCC_PDM2_CLK 182 +#define GCC_PDM_AHB_CLK 183 +#define GCC_PDM_XO4_CLK 184 +#define GCC_PERIPH_NOC_AHB_CLK 185 +#define GCC_PERIPH_NOC_AT_CLK 186 +#define GCC_PERIPH_NOC_CFG_AHB_CLK 187 +#define GCC_PERIPH_NOC_MPU_CFG_AHB_CLK 188 +#define GCC_PERIPH_XPU_AHB_CLK 189 +#define GCC_PNOC_BUS_TIMEOUT0_AHB_CLK 190 +#define GCC_PNOC_BUS_TIMEOUT1_AHB_CLK 191 +#define GCC_PNOC_BUS_TIMEOUT2_AHB_CLK 192 +#define GCC_PNOC_BUS_TIMEOUT3_AHB_CLK 193 +#define GCC_PNOC_BUS_TIMEOUT4_AHB_CLK 194 +#define GCC_PRNG_AHB_CLK 195 +#define GCC_QDSS_AT_CLK 196 +#define GCC_QDSS_CFG_AHB_CLK 197 +#define GCC_QDSS_DAP_AHB_CLK 198 +#define GCC_QDSS_DAP_CLK 199 +#define GCC_QDSS_ETR_USB_CLK 200 +#define GCC_QDSS_STM_CLK 201 +#define GCC_QDSS_TRACECLKIN_CLK 202 +#define GCC_QDSS_TSCTR_DIV16_CLK 203 +#define GCC_QDSS_TSCTR_DIV2_CLK 204 +#define GCC_QDSS_TSCTR_DIV3_CLK 205 +#define GCC_QDSS_TSCTR_DIV4_CLK 206 +#define GCC_QDSS_TSCTR_DIV8_CLK 207 +#define GCC_QDSS_RBCPR_XPU_AHB_CLK 208 +#define GCC_RBCPR_AHB_CLK 209 +#define GCC_RBCPR_CLK 210 +#define GCC_RPM_BUS_AHB_CLK 211 +#define GCC_RPM_PROC_HCLK 212 +#define GCC_RPM_SLEEP_CLK 213 +#define GCC_RPM_TIMER_CLK 214 +#define GCC_SDCC1_AHB_CLK 215 +#define GCC_SDCC1_APPS_CLK 216 +#define GCC_SDCC1_INACTIVITY_TIMERS_CLK 217 +#define GCC_SDCC2_AHB_CLK 218 +#define GCC_SDCC2_APPS_CLK 219 +#define GCC_SDCC2_INACTIVITY_TIMERS_CLK 220 +#define GCC_SDCC3_AHB_CLK 221 +#define GCC_SDCC3_APPS_CLK 222 +#define GCC_SDCC3_INACTIVITY_TIMERS_CLK 223 +#define GCC_SDCC4_AHB_CLK 224 +#define GCC_SDCC4_APPS_CLK 225 +#define GCC_SDCC4_INACTIVITY_TIMERS_CLK 226 +#define GCC_SEC_CTRL_ACC_CLK 227 +#define GCC_SEC_CTRL_AHB_CLK 228 +#define GCC_SEC_CTRL_BOOT_ROM_PATCH_CLK 229 +#define GCC_SEC_CTRL_CLK 230 +#define GCC_SEC_CTRL_SENSE_CLK 231 +#define GCC_SNOC_BUS_TIMEOUT0_AHB_CLK 232 +#define GCC_SNOC_BUS_TIMEOUT2_AHB_CLK 233 +#define GCC_SPDM_BIMC_CY_CLK 234 +#define GCC_SPDM_CFG_AHB_CLK 235 +#define GCC_SPDM_DEBUG_CY_CLK 236 +#define GCC_SPDM_FF_CLK 237 +#define GCC_SPDM_MSTR_AHB_CLK 238 +#define GCC_SPDM_PNOC_CY_CLK 239 +#define GCC_SPDM_RPM_CY_CLK 240 +#define GCC_SPDM_SNOC_CY_CLK 241 +#define GCC_SPMI_AHB_CLK 242 +#define GCC_SPMI_CNOC_AHB_CLK 243 +#define GCC_SPMI_SER_CLK 244 +#define GCC_SNOC_CNOC_AHB_CLK 245 +#define GCC_SNOC_PNOC_AHB_CLK 246 +#define GCC_SYS_NOC_AT_CLK 247 +#define GCC_SYS_NOC_AXI_CLK 248 +#define GCC_SYS_NOC_KPSS_AHB_CLK 249 +#define GCC_SYS_NOC_QDSS_STM_AXI_CLK 250 +#define GCC_SYS_NOC_USB3_AXI_CLK 251 +#define GCC_TCSR_AHB_CLK 252 +#define GCC_TLMM_AHB_CLK 253 +#define GCC_TLMM_CLK 254 +#define GCC_TSIF_AHB_CLK 255 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 256 +#define GCC_TSIF_REF_CLK 257 +#define GCC_USB2A_PHY_SLEEP_CLK 258 +#define GCC_USB2B_PHY_SLEEP_CLK 259 +#define GCC_USB30_MASTER_CLK 260 +#define GCC_USB30_MOCK_UTMI_CLK 261 +#define GCC_USB30_SLEEP_CLK 262 +#define GCC_USB_HS_AHB_CLK 263 +#define GCC_USB_HS_INACTIVITY_TIMERS_CLK 264 +#define GCC_USB_HS_SYSTEM_CLK 265 +#define GCC_USB_HSIC_AHB_CLK 266 +#define GCC_USB_HSIC_CLK 267 +#define GCC_USB_HSIC_IO_CAL_CLK 268 +#define GCC_USB_HSIC_IO_CAL_SLEEP_CLK 269 +#define GCC_USB_HSIC_SYSTEM_CLK 270 +#define GCC_WCSS_GPLL1_CLK_SRC 271 +#define GCC_MMSS_GPLL0_CLK_SRC 272 +#define GCC_LPASS_GPLL0_CLK_SRC 273 +#define GCC_WCSS_GPLL1_CLK_SRC_SLEEP_ENA 274 +#define GCC_MMSS_GPLL0_CLK_SRC_SLEEP_ENA 275 +#define GCC_LPASS_GPLL0_CLK_SRC_SLEEP_ENA 276 +#define GCC_IMEM_AXI_CLK_SLEEP_ENA 277 +#define GCC_SYS_NOC_KPSS_AHB_CLK_SLEEP_ENA 278 +#define GCC_BIMC_KPSS_AXI_CLK_SLEEP_ENA 279 +#define GCC_KPSS_AHB_CLK_SLEEP_ENA 280 +#define GCC_KPSS_AXI_CLK_SLEEP_ENA 281 +#define GCC_MPM_AHB_CLK_SLEEP_ENA 282 +#define GCC_OCMEM_SYS_NOC_AXI_CLK_SLEEP_ENA 283 +#define GCC_BLSP1_AHB_CLK_SLEEP_ENA 284 +#define GCC_BLSP1_SLEEP_CLK_SLEEP_ENA 285 +#define GCC_BLSP2_AHB_CLK_SLEEP_ENA 286 +#define GCC_BLSP2_SLEEP_CLK_SLEEP_ENA 287 +#define GCC_PRNG_AHB_CLK_SLEEP_ENA 288 +#define GCC_BAM_DMA_AHB_CLK_SLEEP_ENA 289 +#define GCC_BAM_DMA_INACTIVITY_TIMERS_CLK_SLEEP_ENA 290 +#define GCC_BOOT_ROM_AHB_CLK_SLEEP_ENA 291 +#define GCC_MSG_RAM_AHB_CLK_SLEEP_ENA 292 +#define GCC_TLMM_AHB_CLK_SLEEP_ENA 293 +#define GCC_TLMM_CLK_SLEEP_ENA 294 +#define GCC_SPMI_CNOC_AHB_CLK_SLEEP_ENA 295 +#define GCC_CE1_CLK_SLEEP_ENA 296 +#define GCC_CE1_AXI_CLK_SLEEP_ENA 297 +#define GCC_CE1_AHB_CLK_SLEEP_ENA 298 +#define GCC_CE2_CLK_SLEEP_ENA 299 +#define GCC_CE2_AXI_CLK_SLEEP_ENA 300 +#define GCC_CE2_AHB_CLK_SLEEP_ENA 301 +#define GPLL4 302 +#define GPLL4_VOTE 303 +#define GCC_SDCC1_CDCCAL_SLEEP_CLK 304 +#define GCC_SDCC1_CDCCAL_FF_CLK 305 + +/* gdscs */ +#define USB_HS_HSIC_GDSC 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8994.h b/include/dt-bindings/clock/qcom,gcc-msm8994.h new file mode 100644 index 0000000..9389693 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8994.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + */ + + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8994_H +#define _DT_BINDINGS_CLK_MSM_GCC_8994_H + +#define GPLL0_EARLY 0 +#define GPLL0 1 +#define GPLL4_EARLY 2 +#define GPLL4 3 +#define UFS_AXI_CLK_SRC 4 +#define USB30_MASTER_CLK_SRC 5 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 6 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 7 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 8 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 9 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 10 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 11 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 12 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 13 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 14 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 15 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 16 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 17 +#define BLSP1_UART1_APPS_CLK_SRC 18 +#define BLSP1_UART2_APPS_CLK_SRC 19 +#define BLSP1_UART3_APPS_CLK_SRC 20 +#define BLSP1_UART4_APPS_CLK_SRC 21 +#define BLSP1_UART5_APPS_CLK_SRC 22 +#define BLSP1_UART6_APPS_CLK_SRC 23 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 24 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 25 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 26 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 27 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 28 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 29 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 30 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 31 +#define BLSP2_QUP5_I2C_APPS_CLK_SRC 32 +#define BLSP2_QUP5_SPI_APPS_CLK_SRC 33 +#define BLSP2_QUP6_I2C_APPS_CLK_SRC 34 +#define BLSP2_QUP6_SPI_APPS_CLK_SRC 35 +#define BLSP2_UART1_APPS_CLK_SRC 36 +#define BLSP2_UART2_APPS_CLK_SRC 37 +#define BLSP2_UART3_APPS_CLK_SRC 38 +#define BLSP2_UART4_APPS_CLK_SRC 39 +#define BLSP2_UART5_APPS_CLK_SRC 40 +#define BLSP2_UART6_APPS_CLK_SRC 41 +#define GP1_CLK_SRC 42 +#define GP2_CLK_SRC 43 +#define GP3_CLK_SRC 44 +#define PCIE_0_AUX_CLK_SRC 45 +#define PCIE_0_PIPE_CLK_SRC 46 +#define PCIE_1_AUX_CLK_SRC 47 +#define PCIE_1_PIPE_CLK_SRC 48 +#define PDM2_CLK_SRC 49 +#define SDCC1_APPS_CLK_SRC 50 +#define SDCC2_APPS_CLK_SRC 51 +#define SDCC3_APPS_CLK_SRC 52 +#define SDCC4_APPS_CLK_SRC 53 +#define TSIF_REF_CLK_SRC 54 +#define USB30_MOCK_UTMI_CLK_SRC 55 +#define USB3_PHY_AUX_CLK_SRC 56 +#define USB_HS_SYSTEM_CLK_SRC 57 +#define GCC_BLSP1_AHB_CLK 58 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 59 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 60 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 61 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 62 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 63 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 64 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 65 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 66 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 67 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 68 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 69 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 70 +#define GCC_BLSP1_UART1_APPS_CLK 71 +#define GCC_BLSP1_UART2_APPS_CLK 72 +#define GCC_BLSP1_UART3_APPS_CLK 73 +#define GCC_BLSP1_UART4_APPS_CLK 74 +#define GCC_BLSP1_UART5_APPS_CLK 75 +#define GCC_BLSP1_UART6_APPS_CLK 76 +#define GCC_BLSP2_AHB_CLK 77 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 78 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 79 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 80 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 81 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 82 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 83 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 84 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 85 +#define GCC_BLSP2_QUP5_I2C_APPS_CLK 86 +#define GCC_BLSP2_QUP5_SPI_APPS_CLK 87 +#define GCC_BLSP2_QUP6_I2C_APPS_CLK 88 +#define GCC_BLSP2_QUP6_SPI_APPS_CLK 89 +#define GCC_BLSP2_UART1_APPS_CLK 90 +#define GCC_BLSP2_UART2_APPS_CLK 91 +#define GCC_BLSP2_UART3_APPS_CLK 92 +#define GCC_BLSP2_UART4_APPS_CLK 93 +#define GCC_BLSP2_UART5_APPS_CLK 94 +#define GCC_BLSP2_UART6_APPS_CLK 95 +#define GCC_GP1_CLK 96 +#define GCC_GP2_CLK 97 +#define GCC_GP3_CLK 98 +#define GCC_PCIE_0_AUX_CLK 99 +#define GCC_PCIE_0_PIPE_CLK 100 +#define GCC_PCIE_1_AUX_CLK 101 +#define GCC_PCIE_1_PIPE_CLK 102 +#define GCC_PDM2_CLK 103 +#define GCC_SDCC1_APPS_CLK 104 +#define GCC_SDCC2_APPS_CLK 105 +#define GCC_SDCC3_APPS_CLK 106 +#define GCC_SDCC4_APPS_CLK 107 +#define GCC_SYS_NOC_UFS_AXI_CLK 108 +#define GCC_SYS_NOC_USB3_AXI_CLK 109 +#define GCC_TSIF_REF_CLK 110 +#define GCC_UFS_AXI_CLK 111 +#define GCC_UFS_RX_CFG_CLK 112 +#define GCC_UFS_TX_CFG_CLK 113 +#define GCC_USB30_MASTER_CLK 114 +#define GCC_USB30_MOCK_UTMI_CLK 115 +#define GCC_USB3_PHY_AUX_CLK 116 +#define GCC_USB_HS_SYSTEM_CLK 117 +#define GCC_SDCC1_AHB_CLK 118 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h new file mode 100644 index 0000000..03bf49d --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h @@ -0,0 +1,359 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8996_H +#define _DT_BINDINGS_CLK_MSM_GCC_8996_H + +#define GPLL0_EARLY 0 +#define GPLL0 1 +#define GPLL1_EARLY 2 +#define GPLL1 3 +#define GPLL2_EARLY 4 +#define GPLL2 5 +#define GPLL3_EARLY 6 +#define GPLL3 7 +#define GPLL4_EARLY 8 +#define GPLL4 9 +#define SYSTEM_NOC_CLK_SRC 10 +#define CONFIG_NOC_CLK_SRC 11 +#define PERIPH_NOC_CLK_SRC 12 +#define MMSS_BIMC_GFX_CLK_SRC 13 +#define USB30_MASTER_CLK_SRC 14 +#define USB30_MOCK_UTMI_CLK_SRC 15 +#define USB3_PHY_AUX_CLK_SRC 16 +#define USB20_MASTER_CLK_SRC 17 +#define USB20_MOCK_UTMI_CLK_SRC 18 +#define SDCC1_APPS_CLK_SRC 19 +#define SDCC1_ICE_CORE_CLK_SRC 20 +#define SDCC2_APPS_CLK_SRC 21 +#define SDCC3_APPS_CLK_SRC 22 +#define SDCC4_APPS_CLK_SRC 23 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 24 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 25 +#define BLSP1_UART1_APPS_CLK_SRC 26 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 27 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 28 +#define BLSP1_UART2_APPS_CLK_SRC 29 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 30 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 31 +#define BLSP1_UART3_APPS_CLK_SRC 32 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 33 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 34 +#define BLSP1_UART4_APPS_CLK_SRC 35 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 36 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 37 +#define BLSP1_UART5_APPS_CLK_SRC 38 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 39 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 40 +#define BLSP1_UART6_APPS_CLK_SRC 41 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 42 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 43 +#define BLSP2_UART1_APPS_CLK_SRC 44 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 45 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 46 +#define BLSP2_UART2_APPS_CLK_SRC 47 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 48 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 49 +#define BLSP2_UART3_APPS_CLK_SRC 50 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 51 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 52 +#define BLSP2_UART4_APPS_CLK_SRC 53 +#define BLSP2_QUP5_SPI_APPS_CLK_SRC 54 +#define BLSP2_QUP5_I2C_APPS_CLK_SRC 55 +#define BLSP2_UART5_APPS_CLK_SRC 56 +#define BLSP2_QUP6_SPI_APPS_CLK_SRC 57 +#define BLSP2_QUP6_I2C_APPS_CLK_SRC 58 +#define BLSP2_UART6_APPS_CLK_SRC 59 +#define PDM2_CLK_SRC 60 +#define TSIF_REF_CLK_SRC 61 +#define CE1_CLK_SRC 62 +#define GCC_SLEEP_CLK_SRC 63 +#define BIMC_CLK_SRC 64 +#define HMSS_AHB_CLK_SRC 65 +#define BIMC_HMSS_AXI_CLK_SRC 66 +#define HMSS_RBCPR_CLK_SRC 67 +#define HMSS_GPLL0_CLK_SRC 68 +#define GP1_CLK_SRC 69 +#define GP2_CLK_SRC 70 +#define GP3_CLK_SRC 71 +#define PCIE_AUX_CLK_SRC 72 +#define UFS_AXI_CLK_SRC 73 +#define UFS_ICE_CORE_CLK_SRC 74 +#define QSPI_SER_CLK_SRC 75 +#define GCC_SYS_NOC_AXI_CLK 76 +#define GCC_SYS_NOC_HMSS_AHB_CLK 77 +#define GCC_SNOC_CNOC_AHB_CLK 78 +#define GCC_SNOC_PNOC_AHB_CLK 79 +#define GCC_SYS_NOC_AT_CLK 80 +#define GCC_SYS_NOC_USB3_AXI_CLK 81 +#define GCC_SYS_NOC_UFS_AXI_CLK 82 +#define GCC_CFG_NOC_AHB_CLK 83 +#define GCC_PERIPH_NOC_AHB_CLK 84 +#define GCC_PERIPH_NOC_USB20_AHB_CLK 85 +#define GCC_TIC_CLK 86 +#define GCC_IMEM_AXI_CLK 87 +#define GCC_MMSS_SYS_NOC_AXI_CLK 88 +#define GCC_MMSS_NOC_CFG_AHB_CLK 89 +#define GCC_MMSS_BIMC_GFX_CLK 90 +#define GCC_USB30_MASTER_CLK 91 +#define GCC_USB30_SLEEP_CLK 92 +#define GCC_USB30_MOCK_UTMI_CLK 93 +#define GCC_USB3_PHY_AUX_CLK 94 +#define GCC_USB3_PHY_PIPE_CLK 95 +#define GCC_USB20_MASTER_CLK 96 +#define GCC_USB20_SLEEP_CLK 97 +#define GCC_USB20_MOCK_UTMI_CLK 98 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 99 +#define GCC_SDCC1_APPS_CLK 100 +#define GCC_SDCC1_AHB_CLK 101 +#define GCC_SDCC1_ICE_CORE_CLK 102 +#define GCC_SDCC2_APPS_CLK 103 +#define GCC_SDCC2_AHB_CLK 104 +#define GCC_SDCC3_APPS_CLK 105 +#define GCC_SDCC3_AHB_CLK 106 +#define GCC_SDCC4_APPS_CLK 107 +#define GCC_SDCC4_AHB_CLK 108 +#define GCC_BLSP1_AHB_CLK 109 +#define GCC_BLSP1_SLEEP_CLK 110 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 111 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 112 +#define GCC_BLSP1_UART1_APPS_CLK 113 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 114 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 115 +#define GCC_BLSP1_UART2_APPS_CLK 116 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 117 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 118 +#define GCC_BLSP1_UART3_APPS_CLK 119 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 120 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 121 +#define GCC_BLSP1_UART4_APPS_CLK 122 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 123 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 124 +#define GCC_BLSP1_UART5_APPS_CLK 125 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 126 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 127 +#define GCC_BLSP1_UART6_APPS_CLK 128 +#define GCC_BLSP2_AHB_CLK 129 +#define GCC_BLSP2_SLEEP_CLK 130 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 131 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 132 +#define GCC_BLSP2_UART1_APPS_CLK 133 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 134 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 135 +#define GCC_BLSP2_UART2_APPS_CLK 136 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 137 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 138 +#define GCC_BLSP2_UART3_APPS_CLK 139 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 140 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 141 +#define GCC_BLSP2_UART4_APPS_CLK 142 +#define GCC_BLSP2_QUP5_SPI_APPS_CLK 143 +#define GCC_BLSP2_QUP5_I2C_APPS_CLK 144 +#define GCC_BLSP2_UART5_APPS_CLK 145 +#define GCC_BLSP2_QUP6_SPI_APPS_CLK 146 +#define GCC_BLSP2_QUP6_I2C_APPS_CLK 147 +#define GCC_BLSP2_UART6_APPS_CLK 148 +#define GCC_PDM_AHB_CLK 149 +#define GCC_PDM_XO4_CLK 150 +#define GCC_PDM2_CLK 151 +#define GCC_PRNG_AHB_CLK 152 +#define GCC_TSIF_AHB_CLK 153 +#define GCC_TSIF_REF_CLK 154 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 155 +#define GCC_TCSR_AHB_CLK 156 +#define GCC_BOOT_ROM_AHB_CLK 157 +#define GCC_MSG_RAM_AHB_CLK 158 +#define GCC_TLMM_AHB_CLK 159 +#define GCC_TLMM_CLK 160 +#define GCC_MPM_AHB_CLK 161 +#define GCC_SPMI_SER_CLK 162 +#define GCC_SPMI_CNOC_AHB_CLK 163 +#define GCC_CE1_CLK 164 +#define GCC_CE1_AXI_CLK 165 +#define GCC_CE1_AHB_CLK 166 +#define GCC_BIMC_HMSS_AXI_CLK 167 +#define GCC_BIMC_GFX_CLK 168 +#define GCC_HMSS_AHB_CLK 169 +#define GCC_HMSS_SLV_AXI_CLK 170 +#define GCC_HMSS_MSTR_AXI_CLK 171 +#define GCC_HMSS_RBCPR_CLK 172 +#define GCC_GP1_CLK 173 +#define GCC_GP2_CLK 174 +#define GCC_GP3_CLK 175 +#define GCC_PCIE_0_SLV_AXI_CLK 176 +#define GCC_PCIE_0_MSTR_AXI_CLK 177 +#define GCC_PCIE_0_CFG_AHB_CLK 178 +#define GCC_PCIE_0_AUX_CLK 179 +#define GCC_PCIE_0_PIPE_CLK 180 +#define GCC_PCIE_1_SLV_AXI_CLK 181 +#define GCC_PCIE_1_MSTR_AXI_CLK 182 +#define GCC_PCIE_1_CFG_AHB_CLK 183 +#define GCC_PCIE_1_AUX_CLK 184 +#define GCC_PCIE_1_PIPE_CLK 185 +#define GCC_PCIE_2_SLV_AXI_CLK 186 +#define GCC_PCIE_2_MSTR_AXI_CLK 187 +#define GCC_PCIE_2_CFG_AHB_CLK 188 +#define GCC_PCIE_2_AUX_CLK 189 +#define GCC_PCIE_2_PIPE_CLK 190 +#define GCC_PCIE_PHY_CFG_AHB_CLK 191 +#define GCC_PCIE_PHY_AUX_CLK 192 +#define GCC_UFS_AXI_CLK 193 +#define GCC_UFS_AHB_CLK 194 +#define GCC_UFS_TX_CFG_CLK 195 +#define GCC_UFS_RX_CFG_CLK 196 +#define GCC_UFS_TX_SYMBOL_0_CLK 197 +#define GCC_UFS_RX_SYMBOL_0_CLK 198 +#define GCC_UFS_RX_SYMBOL_1_CLK 199 +#define GCC_UFS_UNIPRO_CORE_CLK 200 +#define GCC_UFS_ICE_CORE_CLK 201 +#define GCC_UFS_SYS_CLK_CORE_CLK 202 +#define GCC_UFS_TX_SYMBOL_CLK_CORE_CLK 203 +#define GCC_AGGRE0_SNOC_AXI_CLK 204 +#define GCC_AGGRE0_CNOC_AHB_CLK 205 +#define GCC_SMMU_AGGRE0_AXI_CLK 206 +#define GCC_SMMU_AGGRE0_AHB_CLK 207 +#define GCC_AGGRE1_PNOC_AHB_CLK 208 +#define GCC_AGGRE2_UFS_AXI_CLK 209 +#define GCC_AGGRE2_USB3_AXI_CLK 210 +#define GCC_QSPI_AHB_CLK 211 +#define GCC_QSPI_SER_CLK 212 +#define GCC_USB3_CLKREF_CLK 213 +#define GCC_HDMI_CLKREF_CLK 214 +#define GCC_UFS_CLKREF_CLK 215 +#define GCC_PCIE_CLKREF_CLK 216 +#define GCC_RX2_USB2_CLKREF_CLK 217 +#define GCC_RX1_USB2_CLKREF_CLK 218 +#define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK 219 +#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 220 +#define GCC_EDP_CLKREF_CLK 221 +#define GCC_MSS_CFG_AHB_CLK 222 +#define GCC_MSS_Q6_BIMC_AXI_CLK 223 +#define GCC_MSS_SNOC_AXI_CLK 224 +#define GCC_MSS_MNOC_BIMC_AXI_CLK 225 +#define GCC_DCC_AHB_CLK 226 +#define GCC_AGGRE0_NOC_MPU_CFG_AHB_CLK 227 +#define GCC_MMSS_GPLL0_DIV_CLK 228 +#define GCC_MSS_GPLL0_DIV_CLK 229 + +#define GCC_SYSTEM_NOC_BCR 0 +#define GCC_CONFIG_NOC_BCR 1 +#define GCC_PERIPH_NOC_BCR 2 +#define GCC_IMEM_BCR 3 +#define GCC_MMSS_BCR 4 +#define GCC_PIMEM_BCR 5 +#define GCC_QDSS_BCR 6 +#define GCC_USB_30_BCR 7 +#define GCC_USB_20_BCR 8 +#define GCC_QUSB2PHY_PRIM_BCR 9 +#define GCC_QUSB2PHY_SEC_BCR 10 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 11 +#define GCC_SDCC1_BCR 12 +#define GCC_SDCC2_BCR 13 +#define GCC_SDCC3_BCR 14 +#define GCC_SDCC4_BCR 15 +#define GCC_BLSP1_BCR 16 +#define GCC_BLSP1_QUP1_BCR 17 +#define GCC_BLSP1_UART1_BCR 18 +#define GCC_BLSP1_QUP2_BCR 19 +#define GCC_BLSP1_UART2_BCR 20 +#define GCC_BLSP1_QUP3_BCR 21 +#define GCC_BLSP1_UART3_BCR 22 +#define GCC_BLSP1_QUP4_BCR 23 +#define GCC_BLSP1_UART4_BCR 24 +#define GCC_BLSP1_QUP5_BCR 25 +#define GCC_BLSP1_UART5_BCR 26 +#define GCC_BLSP1_QUP6_BCR 27 +#define GCC_BLSP1_UART6_BCR 28 +#define GCC_BLSP2_BCR 29 +#define GCC_BLSP2_QUP1_BCR 30 +#define GCC_BLSP2_UART1_BCR 31 +#define GCC_BLSP2_QUP2_BCR 32 +#define GCC_BLSP2_UART2_BCR 33 +#define GCC_BLSP2_QUP3_BCR 34 +#define GCC_BLSP2_UART3_BCR 35 +#define GCC_BLSP2_QUP4_BCR 36 +#define GCC_BLSP2_UART4_BCR 37 +#define GCC_BLSP2_QUP5_BCR 38 +#define GCC_BLSP2_UART5_BCR 39 +#define GCC_BLSP2_QUP6_BCR 40 +#define GCC_BLSP2_UART6_BCR 41 +#define GCC_PDM_BCR 42 +#define GCC_PRNG_BCR 43 +#define GCC_TSIF_BCR 44 +#define GCC_TCSR_BCR 45 +#define GCC_BOOT_ROM_BCR 46 +#define GCC_MSG_RAM_BCR 47 +#define GCC_TLMM_BCR 48 +#define GCC_MPM_BCR 49 +#define GCC_SEC_CTRL_BCR 50 +#define GCC_SPMI_BCR 51 +#define GCC_SPDM_BCR 52 +#define GCC_CE1_BCR 53 +#define GCC_BIMC_BCR 54 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 55 +#define GCC_SNOC_BUS_TIMEOUT2_BCR 56 +#define GCC_SNOC_BUS_TIMEOUT1_BCR 57 +#define GCC_SNOC_BUS_TIMEOUT3_BCR 58 +#define GCC_SNOC_BUS_TIMEOUT_EXTREF_BCR 59 +#define GCC_PNOC_BUS_TIMEOUT0_BCR 60 +#define GCC_PNOC_BUS_TIMEOUT1_BCR 61 +#define GCC_PNOC_BUS_TIMEOUT2_BCR 62 +#define GCC_PNOC_BUS_TIMEOUT3_BCR 63 +#define GCC_PNOC_BUS_TIMEOUT4_BCR 64 +#define GCC_CNOC_BUS_TIMEOUT0_BCR 65 +#define GCC_CNOC_BUS_TIMEOUT1_BCR 66 +#define GCC_CNOC_BUS_TIMEOUT2_BCR 67 +#define GCC_CNOC_BUS_TIMEOUT3_BCR 68 +#define GCC_CNOC_BUS_TIMEOUT4_BCR 69 +#define GCC_CNOC_BUS_TIMEOUT5_BCR 70 +#define GCC_CNOC_BUS_TIMEOUT6_BCR 71 +#define GCC_CNOC_BUS_TIMEOUT7_BCR 72 +#define GCC_CNOC_BUS_TIMEOUT8_BCR 73 +#define GCC_CNOC_BUS_TIMEOUT9_BCR 74 +#define GCC_CNOC_BUS_TIMEOUT_EXTREF_BCR 75 +#define GCC_APB2JTAG_BCR 76 +#define GCC_RBCPR_CX_BCR 77 +#define GCC_RBCPR_MX_BCR 78 +#define GCC_PCIE_0_BCR 79 +#define GCC_PCIE_0_PHY_BCR 80 +#define GCC_PCIE_1_BCR 81 +#define GCC_PCIE_1_PHY_BCR 82 +#define GCC_PCIE_2_BCR 83 +#define GCC_PCIE_2_PHY_BCR 84 +#define GCC_PCIE_PHY_BCR 85 +#define GCC_DCD_BCR 86 +#define GCC_OBT_ODT_BCR 87 +#define GCC_UFS_BCR 88 +#define GCC_SSC_BCR 89 +#define GCC_VS_BCR 90 +#define GCC_AGGRE0_NOC_BCR 91 +#define GCC_AGGRE1_NOC_BCR 92 +#define GCC_AGGRE2_NOC_BCR 93 +#define GCC_DCC_BCR 94 +#define GCC_IPA_BCR 95 +#define GCC_QSPI_BCR 96 +#define GCC_SKL_BCR 97 +#define GCC_MSMPU_BCR 98 +#define GCC_MSS_Q6_BCR 99 +#define GCC_QREFS_VBG_CAL_BCR 100 +#define GCC_PCIE_PHY_COM_BCR 101 +#define GCC_PCIE_PHY_COM_NOCSR_BCR 102 +#define GCC_USB3_PHY_BCR 103 +#define GCC_USB3PHY_PHY_BCR 104 +#define GCC_MSS_RESTART 105 + + +/* Indexes for GDSCs */ +#define AGGRE0_NOC_GDSC 0 +#define HLOS1_VOTE_AGGRE0_NOC_GDSC 1 +#define HLOS1_VOTE_LPASS_ADSP_GDSC 2 +#define HLOS1_VOTE_LPASS_CORE_GDSC 3 +#define USB30_GDSC 4 +#define PCIE0_GDSC 5 +#define PCIE1_GDSC 6 +#define PCIE2_GDSC 7 +#define UFS_GDSC 8 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h new file mode 100644 index 0000000..ab37626 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h @@ -0,0 +1,294 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_COBALT_H +#define _DT_BINDINGS_CLK_MSM_GCC_COBALT_H + +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 0 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 1 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 2 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 3 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 4 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 5 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 6 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 7 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 8 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 9 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 10 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 11 +#define BLSP1_UART1_APPS_CLK_SRC 12 +#define BLSP1_UART2_APPS_CLK_SRC 13 +#define BLSP1_UART3_APPS_CLK_SRC 14 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 15 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 16 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 17 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 18 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 19 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 20 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 21 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 22 +#define BLSP2_QUP5_I2C_APPS_CLK_SRC 23 +#define BLSP2_QUP5_SPI_APPS_CLK_SRC 24 +#define BLSP2_QUP6_I2C_APPS_CLK_SRC 25 +#define BLSP2_QUP6_SPI_APPS_CLK_SRC 26 +#define BLSP2_UART1_APPS_CLK_SRC 27 +#define BLSP2_UART2_APPS_CLK_SRC 28 +#define BLSP2_UART3_APPS_CLK_SRC 29 +#define GCC_AGGRE1_NOC_XO_CLK 30 +#define GCC_AGGRE1_UFS_AXI_CLK 31 +#define GCC_AGGRE1_USB3_AXI_CLK 32 +#define GCC_APSS_QDSS_TSCTR_DIV2_CLK 33 +#define GCC_APSS_QDSS_TSCTR_DIV8_CLK 34 +#define GCC_BIMC_HMSS_AXI_CLK 35 +#define GCC_BIMC_MSS_Q6_AXI_CLK 36 +#define GCC_BLSP1_AHB_CLK 37 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 38 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 39 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 40 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 41 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 42 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 43 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 44 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 45 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 46 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 47 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 48 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 49 +#define GCC_BLSP1_SLEEP_CLK 50 +#define GCC_BLSP1_UART1_APPS_CLK 51 +#define GCC_BLSP1_UART2_APPS_CLK 52 +#define GCC_BLSP1_UART3_APPS_CLK 53 +#define GCC_BLSP2_AHB_CLK 54 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 55 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 56 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 57 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 58 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 59 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 60 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 61 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 62 +#define GCC_BLSP2_QUP5_I2C_APPS_CLK 63 +#define GCC_BLSP2_QUP5_SPI_APPS_CLK 64 +#define GCC_BLSP2_QUP6_I2C_APPS_CLK 65 +#define GCC_BLSP2_QUP6_SPI_APPS_CLK 66 +#define GCC_BLSP2_SLEEP_CLK 67 +#define GCC_BLSP2_UART1_APPS_CLK 68 +#define GCC_BLSP2_UART2_APPS_CLK 69 +#define GCC_BLSP2_UART3_APPS_CLK 70 +#define GCC_CFG_NOC_USB3_AXI_CLK 71 +#define GCC_GP1_CLK 72 +#define GCC_GP2_CLK 73 +#define GCC_GP3_CLK 74 +#define GCC_GPU_BIMC_GFX_CLK 75 +#define GCC_GPU_BIMC_GFX_SRC_CLK 76 +#define GCC_GPU_CFG_AHB_CLK 77 +#define GCC_GPU_SNOC_DVM_GFX_CLK 78 +#define GCC_HMSS_AHB_CLK 79 +#define GCC_HMSS_AT_CLK 80 +#define GCC_HMSS_DVM_BUS_CLK 81 +#define GCC_HMSS_RBCPR_CLK 82 +#define GCC_HMSS_TRIG_CLK 83 +#define GCC_LPASS_AT_CLK 84 +#define GCC_LPASS_TRIG_CLK 85 +#define GCC_MMSS_NOC_CFG_AHB_CLK 86 +#define GCC_MMSS_QM_AHB_CLK 87 +#define GCC_MMSS_QM_CORE_CLK 88 +#define GCC_MMSS_SYS_NOC_AXI_CLK 89 +#define GCC_MSS_AT_CLK 90 +#define GCC_PCIE_0_AUX_CLK 91 +#define GCC_PCIE_0_CFG_AHB_CLK 92 +#define GCC_PCIE_0_MSTR_AXI_CLK 93 +#define GCC_PCIE_0_PIPE_CLK 94 +#define GCC_PCIE_0_SLV_AXI_CLK 95 +#define GCC_PCIE_PHY_AUX_CLK 96 +#define GCC_PDM2_CLK 97 +#define GCC_PDM_AHB_CLK 98 +#define GCC_PDM_XO4_CLK 99 +#define GCC_PRNG_AHB_CLK 100 +#define GCC_SDCC2_AHB_CLK 101 +#define GCC_SDCC2_APPS_CLK 102 +#define GCC_SDCC4_AHB_CLK 103 +#define GCC_SDCC4_APPS_CLK 104 +#define GCC_TSIF_AHB_CLK 105 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 106 +#define GCC_TSIF_REF_CLK 107 +#define GCC_UFS_AHB_CLK 108 +#define GCC_UFS_AXI_CLK 109 +#define GCC_UFS_ICE_CORE_CLK 110 +#define GCC_UFS_PHY_AUX_CLK 111 +#define GCC_UFS_RX_SYMBOL_0_CLK 112 +#define GCC_UFS_RX_SYMBOL_1_CLK 113 +#define GCC_UFS_TX_SYMBOL_0_CLK 114 +#define GCC_UFS_UNIPRO_CORE_CLK 115 +#define GCC_USB30_MASTER_CLK 116 +#define GCC_USB30_MOCK_UTMI_CLK 117 +#define GCC_USB30_SLEEP_CLK 118 +#define GCC_USB3_PHY_AUX_CLK 119 +#define GCC_USB3_PHY_PIPE_CLK 120 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 121 +#define GP1_CLK_SRC 122 +#define GP2_CLK_SRC 123 +#define GP3_CLK_SRC 124 +#define GPLL0 125 +#define GPLL0_OUT_EVEN 126 +#define GPLL0_OUT_MAIN 127 +#define GPLL0_OUT_ODD 128 +#define GPLL0_OUT_TEST 129 +#define GPLL1 130 +#define GPLL1_OUT_EVEN 131 +#define GPLL1_OUT_MAIN 132 +#define GPLL1_OUT_ODD 133 +#define GPLL1_OUT_TEST 134 +#define GPLL2 135 +#define GPLL2_OUT_EVEN 136 +#define GPLL2_OUT_MAIN 137 +#define GPLL2_OUT_ODD 138 +#define GPLL2_OUT_TEST 139 +#define GPLL3 140 +#define GPLL3_OUT_EVEN 141 +#define GPLL3_OUT_MAIN 142 +#define GPLL3_OUT_ODD 143 +#define GPLL3_OUT_TEST 144 +#define GPLL4 145 +#define GPLL4_OUT_EVEN 146 +#define GPLL4_OUT_MAIN 147 +#define GPLL4_OUT_ODD 148 +#define GPLL4_OUT_TEST 149 +#define GPLL6 150 +#define GPLL6_OUT_EVEN 151 +#define GPLL6_OUT_MAIN 152 +#define GPLL6_OUT_ODD 153 +#define GPLL6_OUT_TEST 154 +#define HMSS_AHB_CLK_SRC 155 +#define HMSS_RBCPR_CLK_SRC 156 +#define PCIE_AUX_CLK_SRC 157 +#define PDM2_CLK_SRC 158 +#define SDCC2_APPS_CLK_SRC 159 +#define SDCC4_APPS_CLK_SRC 160 +#define TSIF_REF_CLK_SRC 161 +#define UFS_AXI_CLK_SRC 162 +#define USB30_MASTER_CLK_SRC 163 +#define USB30_MOCK_UTMI_CLK_SRC 164 +#define USB3_PHY_AUX_CLK_SRC 165 +#define GCC_USB3_CLKREF_CLK 166 +#define GCC_HDMI_CLKREF_CLK 167 +#define GCC_UFS_CLKREF_CLK 168 +#define GCC_PCIE_CLKREF_CLK 169 +#define GCC_RX1_USB2_CLKREF_CLK 170 + +#define PCIE_0_GDSC 0 +#define UFS_GDSC 1 +#define USB_30_GDSC 2 + +#define GCC_BLSP1_QUP1_BCR 0 +#define GCC_BLSP1_QUP2_BCR 1 +#define GCC_BLSP1_QUP3_BCR 2 +#define GCC_BLSP1_QUP4_BCR 3 +#define GCC_BLSP1_QUP5_BCR 4 +#define GCC_BLSP1_QUP6_BCR 5 +#define GCC_BLSP2_QUP1_BCR 6 +#define GCC_BLSP2_QUP2_BCR 7 +#define GCC_BLSP2_QUP3_BCR 8 +#define GCC_BLSP2_QUP4_BCR 9 +#define GCC_BLSP2_QUP5_BCR 10 +#define GCC_BLSP2_QUP6_BCR 11 +#define GCC_PCIE_0_BCR 12 +#define GCC_PDM_BCR 13 +#define GCC_SDCC2_BCR 14 +#define GCC_SDCC4_BCR 15 +#define GCC_TSIF_BCR 16 +#define GCC_UFS_BCR 17 +#define GCC_USB_30_BCR 18 +#define GCC_SYSTEM_NOC_BCR 19 +#define GCC_CONFIG_NOC_BCR 20 +#define GCC_AHB2PHY_EAST_BCR 21 +#define GCC_IMEM_BCR 22 +#define GCC_PIMEM_BCR 23 +#define GCC_MMSS_BCR 24 +#define GCC_QDSS_BCR 25 +#define GCC_WCSS_BCR 26 +#define GCC_BLSP1_BCR 27 +#define GCC_BLSP1_UART1_BCR 28 +#define GCC_BLSP1_UART2_BCR 29 +#define GCC_BLSP1_UART3_BCR 30 +#define GCC_CM_PHY_REFGEN1_BCR 31 +#define GCC_CM_PHY_REFGEN2_BCR 32 +#define GCC_BLSP2_BCR 33 +#define GCC_BLSP2_UART1_BCR 34 +#define GCC_BLSP2_UART2_BCR 35 +#define GCC_BLSP2_UART3_BCR 36 +#define GCC_SRAM_SENSOR_BCR 37 +#define GCC_PRNG_BCR 38 +#define GCC_TSIF_0_RESET 39 +#define GCC_TSIF_1_RESET 40 +#define GCC_TCSR_BCR 41 +#define GCC_BOOT_ROM_BCR 42 +#define GCC_MSG_RAM_BCR 43 +#define GCC_TLMM_BCR 44 +#define GCC_MPM_BCR 45 +#define GCC_SEC_CTRL_BCR 46 +#define GCC_SPMI_BCR 47 +#define GCC_SPDM_BCR 48 +#define GCC_CE1_BCR 49 +#define GCC_BIMC_BCR 50 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 51 +#define GCC_SNOC_BUS_TIMEOUT1_BCR 52 +#define GCC_SNOC_BUS_TIMEOUT3_BCR 53 +#define GCC_SNOC_BUS_TIMEOUT_EXTREF_BCR 54 +#define GCC_PNOC_BUS_TIMEOUT0_BCR 55 +#define GCC_CNOC_PERIPH_BUS_TIMEOUT1_BCR 56 +#define GCC_CNOC_PERIPH_BUS_TIMEOUT2_BCR 57 +#define GCC_CNOC_BUS_TIMEOUT0_BCR 58 +#define GCC_CNOC_BUS_TIMEOUT1_BCR 59 +#define GCC_CNOC_BUS_TIMEOUT2_BCR 60 +#define GCC_CNOC_BUS_TIMEOUT3_BCR 61 +#define GCC_CNOC_BUS_TIMEOUT4_BCR 62 +#define GCC_CNOC_BUS_TIMEOUT5_BCR 63 +#define GCC_CNOC_BUS_TIMEOUT6_BCR 64 +#define GCC_CNOC_BUS_TIMEOUT7_BCR 65 +#define GCC_APB2JTAG_BCR 66 +#define GCC_RBCPR_CX_BCR 67 +#define GCC_RBCPR_MX_BCR 68 +#define GCC_USB3_PHY_BCR 69 +#define GCC_USB3PHY_PHY_BCR 70 +#define GCC_USB3_DP_PHY_BCR 71 +#define GCC_SSC_BCR 72 +#define GCC_SSC_RESET 73 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 74 +#define GCC_PCIE_0_LINK_DOWN_BCR 75 +#define GCC_PCIE_0_PHY_BCR 76 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 77 +#define GCC_PCIE_PHY_BCR 78 +#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 79 +#define GCC_PCIE_PHY_CFG_AHB_BCR 80 +#define GCC_PCIE_PHY_COM_BCR 81 +#define GCC_GPU_BCR 82 +#define GCC_SPSS_BCR 83 +#define GCC_OBT_ODT_BCR 84 +#define GCC_VS_BCR 85 +#define GCC_MSS_VS_RESET 86 +#define GCC_GPU_VS_RESET 87 +#define GCC_APC0_VS_RESET 88 +#define GCC_APC1_VS_RESET 89 +#define GCC_CNOC_BUS_TIMEOUT8_BCR 90 +#define GCC_CNOC_BUS_TIMEOUT9_BCR 91 +#define GCC_CNOC_BUS_TIMEOUT10_BCR 92 +#define GCC_CNOC_BUS_TIMEOUT11_BCR 93 +#define GCC_CNOC_BUS_TIMEOUT12_BCR 94 +#define GCC_CNOC_BUS_TIMEOUT13_BCR 95 +#define GCC_CNOC_BUS_TIMEOUT14_BCR 96 +#define GCC_CNOC_BUS_TIMEOUT_EXTREF_BCR 97 +#define GCC_AGGRE1_NOC_BCR 98 +#define GCC_AGGRE2_NOC_BCR 99 +#define GCC_DCC_BCR 100 +#define GCC_QREFS_VBG_CAL_BCR 101 +#define GCC_IPA_BCR 102 +#define GCC_GLM_BCR 103 +#define GCC_SKL_BCR 104 +#define GCC_MSMPU_BCR 105 +#define GCC_QUSB2PHY_PRIM_BCR 106 +#define GCC_QUSB2PHY_SEC_BCR 107 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-qcs404.h b/include/dt-bindings/clock/qcom,gcc-qcs404.h new file mode 100644 index 0000000..bc30515 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-qcs404.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCS404_H +#define _DT_BINDINGS_CLK_QCOM_GCC_QCS404_H + +#define GCC_APSS_AHB_CLK_SRC 0 +#define GCC_BLSP1_QUP0_I2C_APPS_CLK_SRC 1 +#define GCC_BLSP1_QUP0_SPI_APPS_CLK_SRC 2 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 3 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 4 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 5 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 6 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 7 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 8 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 9 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 10 +#define GCC_BLSP1_UART0_APPS_CLK_SRC 11 +#define GCC_BLSP1_UART1_APPS_CLK_SRC 12 +#define GCC_BLSP1_UART2_APPS_CLK_SRC 13 +#define GCC_BLSP1_UART3_APPS_CLK_SRC 14 +#define GCC_BLSP2_QUP0_I2C_APPS_CLK_SRC 15 +#define GCC_BLSP2_QUP0_SPI_APPS_CLK_SRC 16 +#define GCC_BLSP2_UART0_APPS_CLK_SRC 17 +#define GCC_BYTE0_CLK_SRC 18 +#define GCC_EMAC_CLK_SRC 19 +#define GCC_EMAC_PTP_CLK_SRC 20 +#define GCC_ESC0_CLK_SRC 21 +#define GCC_APSS_AHB_CLK 22 +#define GCC_APSS_AXI_CLK 23 +#define GCC_BIMC_APSS_AXI_CLK 24 +#define GCC_BIMC_GFX_CLK 25 +#define GCC_BIMC_MDSS_CLK 26 +#define GCC_BLSP1_AHB_CLK 27 +#define GCC_BLSP1_QUP0_I2C_APPS_CLK 28 +#define GCC_BLSP1_QUP0_SPI_APPS_CLK 29 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 30 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 31 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 32 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 33 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 34 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 35 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 36 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 37 +#define GCC_BLSP1_UART0_APPS_CLK 38 +#define GCC_BLSP1_UART1_APPS_CLK 39 +#define GCC_BLSP1_UART2_APPS_CLK 40 +#define GCC_BLSP1_UART3_APPS_CLK 41 +#define GCC_BLSP2_AHB_CLK 42 +#define GCC_BLSP2_QUP0_I2C_APPS_CLK 43 +#define GCC_BLSP2_QUP0_SPI_APPS_CLK 44 +#define GCC_BLSP2_UART0_APPS_CLK 45 +#define GCC_BOOT_ROM_AHB_CLK 46 +#define GCC_DCC_CLK 47 +#define GCC_GENI_IR_H_CLK 48 +#define GCC_ETH_AXI_CLK 49 +#define GCC_ETH_PTP_CLK 50 +#define GCC_ETH_RGMII_CLK 51 +#define GCC_ETH_SLAVE_AHB_CLK 52 +#define GCC_GENI_IR_S_CLK 53 +#define GCC_GP1_CLK 54 +#define GCC_GP2_CLK 55 +#define GCC_GP3_CLK 56 +#define GCC_MDSS_AHB_CLK 57 +#define GCC_MDSS_AXI_CLK 58 +#define GCC_MDSS_BYTE0_CLK 59 +#define GCC_MDSS_ESC0_CLK 60 +#define GCC_MDSS_HDMI_APP_CLK 61 +#define GCC_MDSS_HDMI_PCLK_CLK 62 +#define GCC_MDSS_MDP_CLK 63 +#define GCC_MDSS_PCLK0_CLK 64 +#define GCC_MDSS_VSYNC_CLK 65 +#define GCC_OXILI_AHB_CLK 66 +#define GCC_OXILI_GFX3D_CLK 67 +#define GCC_PCIE_0_AUX_CLK 68 +#define GCC_PCIE_0_CFG_AHB_CLK 69 +#define GCC_PCIE_0_MSTR_AXI_CLK 70 +#define GCC_PCIE_0_PIPE_CLK 71 +#define GCC_PCIE_0_SLV_AXI_CLK 72 +#define GCC_PCNOC_USB2_CLK 73 +#define GCC_PCNOC_USB3_CLK 74 +#define GCC_PDM2_CLK 75 +#define GCC_PDM_AHB_CLK 76 +#define GCC_VSYNC_CLK_SRC 77 +#define GCC_PRNG_AHB_CLK 78 +#define GCC_PWM0_XO512_CLK 79 +#define GCC_PWM1_XO512_CLK 80 +#define GCC_PWM2_XO512_CLK 81 +#define GCC_SDCC1_AHB_CLK 82 +#define GCC_SDCC1_APPS_CLK 83 +#define GCC_SDCC1_ICE_CORE_CLK 84 +#define GCC_SDCC2_AHB_CLK 85 +#define GCC_SDCC2_APPS_CLK 86 +#define GCC_SYS_NOC_USB3_CLK 87 +#define GCC_USB20_MOCK_UTMI_CLK 88 +#define GCC_USB2A_PHY_SLEEP_CLK 89 +#define GCC_USB30_MASTER_CLK 90 +#define GCC_USB30_MOCK_UTMI_CLK 91 +#define GCC_USB30_SLEEP_CLK 92 +#define GCC_USB3_PHY_AUX_CLK 93 +#define GCC_USB3_PHY_PIPE_CLK 94 +#define GCC_USB_HS_PHY_CFG_AHB_CLK 95 +#define GCC_USB_HS_SYSTEM_CLK 96 +#define GCC_GFX3D_CLK_SRC 97 +#define GCC_GP1_CLK_SRC 98 +#define GCC_GP2_CLK_SRC 99 +#define GCC_GP3_CLK_SRC 100 +#define GCC_GPLL0_OUT_MAIN 101 +#define GCC_GPLL1_OUT_MAIN 102 +#define GCC_GPLL3_OUT_MAIN 103 +#define GCC_GPLL4_OUT_MAIN 104 +#define GCC_HDMI_APP_CLK_SRC 105 +#define GCC_HDMI_PCLK_CLK_SRC 106 +#define GCC_MDP_CLK_SRC 107 +#define GCC_PCIE_0_AUX_CLK_SRC 108 +#define GCC_PCIE_0_PIPE_CLK_SRC 109 +#define GCC_PCLK0_CLK_SRC 110 +#define GCC_PDM2_CLK_SRC 111 +#define GCC_SDCC1_APPS_CLK_SRC 112 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 113 +#define GCC_SDCC2_APPS_CLK_SRC 114 +#define GCC_USB20_MOCK_UTMI_CLK_SRC 115 +#define GCC_USB30_MASTER_CLK_SRC 116 +#define GCC_USB30_MOCK_UTMI_CLK_SRC 117 +#define GCC_USB3_PHY_AUX_CLK_SRC 118 +#define GCC_USB_HS_SYSTEM_CLK_SRC 119 +#define GCC_GPLL0_AO_CLK_SRC 120 +#define GCC_USB_HS_INACTIVITY_TIMERS_CLK 122 +#define GCC_GPLL0_AO_OUT_MAIN 123 +#define GCC_GPLL0_SLEEP_CLK_SRC 124 +#define GCC_GPLL6 125 +#define GCC_GPLL6_OUT_AUX 126 +#define GCC_MDSS_MDP_VOTE_CLK 127 +#define GCC_MDSS_ROTATOR_VOTE_CLK 128 +#define GCC_BIMC_GPU_CLK 129 +#define GCC_GTCU_AHB_CLK 130 +#define GCC_GFX_TCU_CLK 131 +#define GCC_GFX_TBU_CLK 132 +#define GCC_SMMU_CFG_CLK 133 +#define GCC_APSS_TCU_CLK 134 +#define GCC_CRYPTO_AHB_CLK 135 +#define GCC_CRYPTO_AXI_CLK 136 +#define GCC_CRYPTO_CLK 137 +#define GCC_MDP_TBU_CLK 138 +#define GCC_QDSS_DAP_CLK 139 +#define GCC_DCC_XO_CLK 140 +#define GCC_WCSS_Q6_AHB_CLK 141 +#define GCC_WCSS_Q6_AXIM_CLK 142 +#define GCC_CDSP_CFG_AHB_CLK 143 +#define GCC_BIMC_CDSP_CLK 144 +#define GCC_CDSP_TBU_CLK 145 +#define GCC_CDSP_BIMC_CLK_SRC 146 + +#define GCC_GENI_IR_BCR 0 +#define GCC_USB_HS_BCR 1 +#define GCC_USB2_HS_PHY_ONLY_BCR 2 +#define GCC_QUSB2_PHY_BCR 3 +#define GCC_USB_HS_PHY_CFG_AHB_BCR 4 +#define GCC_USB2A_PHY_BCR 5 +#define GCC_USB3_PHY_BCR 6 +#define GCC_USB_30_BCR 7 +#define GCC_USB3PHY_PHY_BCR 8 +#define GCC_PCIE_0_BCR 9 +#define GCC_PCIE_0_PHY_BCR 10 +#define GCC_PCIE_0_LINK_DOWN_BCR 11 +#define GCC_PCIEPHY_0_PHY_BCR 12 +#define GCC_EMAC_BCR 13 +#define GCC_CDSP_RESTART 14 +#define GCC_PCIE_0_AXI_MASTER_STICKY_ARES 15 +#define GCC_PCIE_0_AHB_ARES 16 +#define GCC_PCIE_0_AXI_SLAVE_ARES 17 +#define GCC_PCIE_0_AXI_MASTER_ARES 18 +#define GCC_PCIE_0_CORE_STICKY_ARES 19 +#define GCC_PCIE_0_SLEEP_ARES 20 +#define GCC_PCIE_0_PIPE_ARES 21 +#define GCC_WDSP_RESTART 22 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sdm660.h b/include/dt-bindings/clock/qcom,gcc-sdm660.h new file mode 100644 index 0000000..4683022 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sdm660.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2018, Craig Tatlor. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_660_H +#define _DT_BINDINGS_CLK_MSM_GCC_660_H + +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 0 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 1 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 2 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 3 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 4 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 5 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 6 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 7 +#define BLSP1_UART1_APPS_CLK_SRC 8 +#define BLSP1_UART2_APPS_CLK_SRC 9 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 10 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 11 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 12 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 13 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 14 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 15 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 16 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 17 +#define BLSP2_UART1_APPS_CLK_SRC 18 +#define BLSP2_UART2_APPS_CLK_SRC 19 +#define GCC_AGGRE2_UFS_AXI_CLK 20 +#define GCC_AGGRE2_USB3_AXI_CLK 21 +#define GCC_BIMC_GFX_CLK 22 +#define GCC_BIMC_HMSS_AXI_CLK 23 +#define GCC_BIMC_MSS_Q6_AXI_CLK 24 +#define GCC_BLSP1_AHB_CLK 25 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 26 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 27 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 28 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 29 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 30 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 31 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 32 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 33 +#define GCC_BLSP1_UART1_APPS_CLK 34 +#define GCC_BLSP1_UART2_APPS_CLK 35 +#define GCC_BLSP2_AHB_CLK 36 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 37 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 38 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 39 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 40 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 41 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 42 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 43 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 44 +#define GCC_BLSP2_UART1_APPS_CLK 45 +#define GCC_BLSP2_UART2_APPS_CLK 46 +#define GCC_BOOT_ROM_AHB_CLK 47 +#define GCC_CFG_NOC_USB2_AXI_CLK 48 +#define GCC_CFG_NOC_USB3_AXI_CLK 49 +#define GCC_DCC_AHB_CLK 50 +#define GCC_GP1_CLK 51 +#define GCC_GP2_CLK 52 +#define GCC_GP3_CLK 53 +#define GCC_GPU_BIMC_GFX_CLK 54 +#define GCC_GPU_CFG_AHB_CLK 55 +#define GCC_GPU_GPLL0_CLK 56 +#define GCC_GPU_GPLL0_DIV_CLK 57 +#define GCC_HMSS_DVM_BUS_CLK 58 +#define GCC_HMSS_RBCPR_CLK 59 +#define GCC_MMSS_GPLL0_CLK 60 +#define GCC_MMSS_GPLL0_DIV_CLK 61 +#define GCC_MMSS_NOC_CFG_AHB_CLK 62 +#define GCC_MMSS_SYS_NOC_AXI_CLK 63 +#define GCC_MSS_CFG_AHB_CLK 64 +#define GCC_MSS_GPLL0_DIV_CLK 65 +#define GCC_MSS_MNOC_BIMC_AXI_CLK 66 +#define GCC_MSS_Q6_BIMC_AXI_CLK 67 +#define GCC_MSS_SNOC_AXI_CLK 68 +#define GCC_PDM2_CLK 69 +#define GCC_PDM_AHB_CLK 70 +#define GCC_PRNG_AHB_CLK 71 +#define GCC_QSPI_AHB_CLK 72 +#define GCC_QSPI_SER_CLK 73 +#define GCC_SDCC1_AHB_CLK 74 +#define GCC_SDCC1_APPS_CLK 75 +#define GCC_SDCC1_ICE_CORE_CLK 76 +#define GCC_SDCC2_AHB_CLK 77 +#define GCC_SDCC2_APPS_CLK 78 +#define GCC_UFS_AHB_CLK 79 +#define GCC_UFS_AXI_CLK 80 +#define GCC_UFS_CLKREF_CLK 81 +#define GCC_UFS_ICE_CORE_CLK 82 +#define GCC_UFS_PHY_AUX_CLK 83 +#define GCC_UFS_RX_SYMBOL_0_CLK 84 +#define GCC_UFS_RX_SYMBOL_1_CLK 85 +#define GCC_UFS_TX_SYMBOL_0_CLK 86 +#define GCC_UFS_UNIPRO_CORE_CLK 87 +#define GCC_USB20_MASTER_CLK 88 +#define GCC_USB20_MOCK_UTMI_CLK 89 +#define GCC_USB20_SLEEP_CLK 90 +#define GCC_USB30_MASTER_CLK 91 +#define GCC_USB30_MOCK_UTMI_CLK 92 +#define GCC_USB30_SLEEP_CLK 93 +#define GCC_USB3_CLKREF_CLK 94 +#define GCC_USB3_PHY_AUX_CLK 95 +#define GCC_USB3_PHY_PIPE_CLK 96 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 97 +#define GP1_CLK_SRC 98 +#define GP2_CLK_SRC 99 +#define GP3_CLK_SRC 100 +#define GPLL0 101 +#define GPLL0_EARLY 102 +#define GPLL1 103 +#define GPLL1_EARLY 104 +#define GPLL4 105 +#define GPLL4_EARLY 106 +#define HMSS_GPLL0_CLK_SRC 107 +#define HMSS_GPLL4_CLK_SRC 108 +#define HMSS_RBCPR_CLK_SRC 109 +#define PDM2_CLK_SRC 110 +#define QSPI_SER_CLK_SRC 111 +#define SDCC1_APPS_CLK_SRC 112 +#define SDCC1_ICE_CORE_CLK_SRC 113 +#define SDCC2_APPS_CLK_SRC 114 +#define UFS_AXI_CLK_SRC 115 +#define UFS_ICE_CORE_CLK_SRC 116 +#define UFS_PHY_AUX_CLK_SRC 117 +#define UFS_UNIPRO_CORE_CLK_SRC 118 +#define USB20_MASTER_CLK_SRC 119 +#define USB20_MOCK_UTMI_CLK_SRC 120 +#define USB30_MASTER_CLK_SRC 121 +#define USB30_MOCK_UTMI_CLK_SRC 122 +#define USB3_PHY_AUX_CLK_SRC 123 +#define GPLL0_OUT_MSSCC 124 +#define GCC_UFS_AXI_HW_CTL_CLK 125 +#define GCC_UFS_ICE_CORE_HW_CTL_CLK 126 +#define GCC_UFS_PHY_AUX_HW_CTL_CLK 127 +#define GCC_UFS_UNIPRO_CORE_HW_CTL_CLK 128 +#define GCC_RX0_USB2_CLKREF_CLK 129 +#define GCC_RX1_USB2_CLKREF_CLK 130 + +#define PCIE_0_GDSC 0 +#define UFS_GDSC 1 +#define USB_30_GDSC 2 + +#define GCC_QUSB2PHY_PRIM_BCR 0 +#define GCC_QUSB2PHY_SEC_BCR 1 +#define GCC_UFS_BCR 2 +#define GCC_USB3_DP_PHY_BCR 3 +#define GCC_USB3_PHY_BCR 4 +#define GCC_USB3PHY_PHY_BCR 5 +#define GCC_USB_20_BCR 6 +#define GCC_USB_30_BCR 7 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 8 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h new file mode 100644 index 0000000..968fa65 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h @@ -0,0 +1,246 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_SDM_GCC_SDM845_H +#define _DT_BINDINGS_CLK_SDM_GCC_SDM845_H + +/* GCC clock registers */ +#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0 +#define GCC_AGGRE_UFS_CARD_AXI_CLK 1 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 2 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3 +#define GCC_AGGRE_USB3_SEC_AXI_CLK 4 +#define GCC_BOOT_ROM_AHB_CLK 5 +#define GCC_CAMERA_AHB_CLK 6 +#define GCC_CAMERA_AXI_CLK 7 +#define GCC_CAMERA_XO_CLK 8 +#define GCC_CE1_AHB_CLK 9 +#define GCC_CE1_AXI_CLK 10 +#define GCC_CE1_CLK 11 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12 +#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13 +#define GCC_CPUSS_AHB_CLK 14 +#define GCC_CPUSS_AHB_CLK_SRC 15 +#define GCC_CPUSS_RBCPR_CLK 16 +#define GCC_CPUSS_RBCPR_CLK_SRC 17 +#define GCC_DDRSS_GPU_AXI_CLK 18 +#define GCC_DISP_AHB_CLK 19 +#define GCC_DISP_AXI_CLK 20 +#define GCC_DISP_GPLL0_CLK_SRC 21 +#define GCC_DISP_GPLL0_DIV_CLK_SRC 22 +#define GCC_DISP_XO_CLK 23 +#define GCC_GP1_CLK 24 +#define GCC_GP1_CLK_SRC 25 +#define GCC_GP2_CLK 26 +#define GCC_GP2_CLK_SRC 27 +#define GCC_GP3_CLK 28 +#define GCC_GP3_CLK_SRC 29 +#define GCC_GPU_CFG_AHB_CLK 30 +#define GCC_GPU_GPLL0_CLK_SRC 31 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 32 +#define GCC_GPU_MEMNOC_GFX_CLK 33 +#define GCC_GPU_SNOC_DVM_GFX_CLK 34 +#define GCC_MSS_AXIS2_CLK 35 +#define GCC_MSS_CFG_AHB_CLK 36 +#define GCC_MSS_GPLL0_DIV_CLK_SRC 37 +#define GCC_MSS_MFAB_AXIS_CLK 38 +#define GCC_MSS_Q6_MEMNOC_AXI_CLK 39 +#define GCC_MSS_SNOC_AXI_CLK 40 +#define GCC_PCIE_0_AUX_CLK 41 +#define GCC_PCIE_0_AUX_CLK_SRC 42 +#define GCC_PCIE_0_CFG_AHB_CLK 43 +#define GCC_PCIE_0_CLKREF_CLK 44 +#define GCC_PCIE_0_MSTR_AXI_CLK 45 +#define GCC_PCIE_0_PIPE_CLK 46 +#define GCC_PCIE_0_SLV_AXI_CLK 47 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48 +#define GCC_PCIE_1_AUX_CLK 49 +#define GCC_PCIE_1_AUX_CLK_SRC 50 +#define GCC_PCIE_1_CFG_AHB_CLK 51 +#define GCC_PCIE_1_CLKREF_CLK 52 +#define GCC_PCIE_1_MSTR_AXI_CLK 53 +#define GCC_PCIE_1_PIPE_CLK 54 +#define GCC_PCIE_1_SLV_AXI_CLK 55 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 56 +#define GCC_PCIE_PHY_AUX_CLK 57 +#define GCC_PCIE_PHY_REFGEN_CLK 58 +#define GCC_PCIE_PHY_REFGEN_CLK_SRC 59 +#define GCC_PDM2_CLK 60 +#define GCC_PDM2_CLK_SRC 61 +#define GCC_PDM_AHB_CLK 62 +#define GCC_PDM_XO4_CLK 63 +#define GCC_PRNG_AHB_CLK 64 +#define GCC_QMIP_CAMERA_AHB_CLK 65 +#define GCC_QMIP_DISP_AHB_CLK 66 +#define GCC_QMIP_VIDEO_AHB_CLK 67 +#define GCC_QUPV3_WRAP0_S0_CLK 68 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 69 +#define GCC_QUPV3_WRAP0_S1_CLK 70 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 71 +#define GCC_QUPV3_WRAP0_S2_CLK 72 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 73 +#define GCC_QUPV3_WRAP0_S3_CLK 74 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 75 +#define GCC_QUPV3_WRAP0_S4_CLK 76 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 77 +#define GCC_QUPV3_WRAP0_S5_CLK 78 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 79 +#define GCC_QUPV3_WRAP0_S6_CLK 80 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 81 +#define GCC_QUPV3_WRAP0_S7_CLK 82 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 83 +#define GCC_QUPV3_WRAP1_S0_CLK 84 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 85 +#define GCC_QUPV3_WRAP1_S1_CLK 86 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 87 +#define GCC_QUPV3_WRAP1_S2_CLK 88 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 89 +#define GCC_QUPV3_WRAP1_S3_CLK 90 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 91 +#define GCC_QUPV3_WRAP1_S4_CLK 92 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 93 +#define GCC_QUPV3_WRAP1_S5_CLK 94 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 95 +#define GCC_QUPV3_WRAP1_S6_CLK 96 +#define GCC_QUPV3_WRAP1_S6_CLK_SRC 97 +#define GCC_QUPV3_WRAP1_S7_CLK 98 +#define GCC_QUPV3_WRAP1_S7_CLK_SRC 99 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 100 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 101 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 102 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 103 +#define GCC_SDCC2_AHB_CLK 104 +#define GCC_SDCC2_APPS_CLK 105 +#define GCC_SDCC2_APPS_CLK_SRC 106 +#define GCC_SDCC4_AHB_CLK 107 +#define GCC_SDCC4_APPS_CLK 108 +#define GCC_SDCC4_APPS_CLK_SRC 109 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 110 +#define GCC_TSIF_AHB_CLK 111 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 112 +#define GCC_TSIF_REF_CLK 113 +#define GCC_TSIF_REF_CLK_SRC 114 +#define GCC_UFS_CARD_AHB_CLK 115 +#define GCC_UFS_CARD_AXI_CLK 116 +#define GCC_UFS_CARD_AXI_CLK_SRC 117 +#define GCC_UFS_CARD_CLKREF_CLK 118 +#define GCC_UFS_CARD_ICE_CORE_CLK 119 +#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 120 +#define GCC_UFS_CARD_PHY_AUX_CLK 121 +#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 122 +#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 123 +#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 124 +#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 125 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK 126 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 127 +#define GCC_UFS_MEM_CLKREF_CLK 128 +#define GCC_UFS_PHY_AHB_CLK 129 +#define GCC_UFS_PHY_AXI_CLK 130 +#define GCC_UFS_PHY_AXI_CLK_SRC 131 +#define GCC_UFS_PHY_ICE_CORE_CLK 132 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 133 +#define GCC_UFS_PHY_PHY_AUX_CLK 134 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 135 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 136 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 137 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 139 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 140 +#define GCC_USB30_PRIM_MASTER_CLK 141 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 142 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 143 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 144 +#define GCC_USB30_PRIM_SLEEP_CLK 145 +#define GCC_USB30_SEC_MASTER_CLK 146 +#define GCC_USB30_SEC_MASTER_CLK_SRC 147 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 148 +#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 149 +#define GCC_USB30_SEC_SLEEP_CLK 150 +#define GCC_USB3_PRIM_CLKREF_CLK 151 +#define GCC_USB3_PRIM_PHY_AUX_CLK 152 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 153 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 154 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 155 +#define GCC_USB3_SEC_CLKREF_CLK 156 +#define GCC_USB3_SEC_PHY_AUX_CLK 157 +#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 158 +#define GCC_USB3_SEC_PHY_PIPE_CLK 159 +#define GCC_USB3_SEC_PHY_COM_AUX_CLK 160 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 161 +#define GCC_VIDEO_AHB_CLK 162 +#define GCC_VIDEO_AXI_CLK 163 +#define GCC_VIDEO_XO_CLK 164 +#define GPLL0 165 +#define GPLL0_OUT_EVEN 166 +#define GPLL0_OUT_MAIN 167 +#define GCC_GPU_IREF_CLK 168 +#define GCC_SDCC1_AHB_CLK 169 +#define GCC_SDCC1_APPS_CLK 170 +#define GCC_SDCC1_ICE_CORE_CLK 171 +#define GCC_SDCC1_APPS_CLK_SRC 172 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 173 +#define GCC_APC_VS_CLK 174 +#define GCC_GPU_VS_CLK 175 +#define GCC_MSS_VS_CLK 176 +#define GCC_VDDA_VS_CLK 177 +#define GCC_VDDCX_VS_CLK 178 +#define GCC_VDDMX_VS_CLK 179 +#define GCC_VS_CTRL_AHB_CLK 180 +#define GCC_VS_CTRL_CLK 181 +#define GCC_VS_CTRL_CLK_SRC 182 +#define GCC_VSENSOR_CLK_SRC 183 +#define GPLL4 184 +#define GCC_CPUSS_DVM_BUS_CLK 185 +#define GCC_CPUSS_GNOC_CLK 186 +#define GCC_QSPI_CORE_CLK_SRC 187 +#define GCC_QSPI_CORE_CLK 188 +#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 189 +#define GCC_LPASS_Q6_AXI_CLK 190 +#define GCC_LPASS_SWAY_CLK 191 + +/* GCC Resets */ +#define GCC_MMSS_BCR 0 +#define GCC_PCIE_0_BCR 1 +#define GCC_PCIE_1_BCR 2 +#define GCC_PCIE_PHY_BCR 3 +#define GCC_PDM_BCR 4 +#define GCC_PRNG_BCR 5 +#define GCC_QUPV3_WRAPPER_0_BCR 6 +#define GCC_QUPV3_WRAPPER_1_BCR 7 +#define GCC_QUSB2PHY_PRIM_BCR 8 +#define GCC_QUSB2PHY_SEC_BCR 9 +#define GCC_SDCC2_BCR 10 +#define GCC_SDCC4_BCR 11 +#define GCC_TSIF_BCR 12 +#define GCC_UFS_CARD_BCR 13 +#define GCC_UFS_PHY_BCR 14 +#define GCC_USB30_PRIM_BCR 15 +#define GCC_USB30_SEC_BCR 16 +#define GCC_USB3_PHY_PRIM_BCR 17 +#define GCC_USB3PHY_PHY_PRIM_BCR 18 +#define GCC_USB3_DP_PHY_PRIM_BCR 19 +#define GCC_USB3_PHY_SEC_BCR 20 +#define GCC_USB3PHY_PHY_SEC_BCR 21 +#define GCC_USB3_DP_PHY_SEC_BCR 22 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23 +#define GCC_PCIE_0_PHY_BCR 24 +#define GCC_PCIE_1_PHY_BCR 25 + +/* GCC GDSCRs */ +#define PCIE_0_GDSC 0 +#define PCIE_1_GDSC 1 +#define UFS_CARD_GDSC 2 +#define UFS_PHY_GDSC 3 +#define USB30_PRIM_GDSC 4 +#define USB30_SEC_GDSC 5 +#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 6 +#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 7 +#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 8 +#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 9 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 10 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 11 +#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 12 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sm8150.h b/include/dt-bindings/clock/qcom,gcc-sm8150.h new file mode 100644 index 0000000..90d60ef --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sm8150.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8150_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM8150_H + +/* GCC clocks */ +#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0 +#define GCC_AGGRE_UFS_CARD_AXI_CLK 1 +#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 2 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 3 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 4 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 5 +#define GCC_AGGRE_USB3_SEC_AXI_CLK 6 +#define GCC_BOOT_ROM_AHB_CLK 7 +#define GCC_CAMERA_AHB_CLK 8 +#define GCC_CAMERA_HF_AXI_CLK 9 +#define GCC_CAMERA_SF_AXI_CLK 10 +#define GCC_CAMERA_XO_CLK 11 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12 +#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13 +#define GCC_CPUSS_AHB_CLK 14 +#define GCC_CPUSS_AHB_CLK_SRC 15 +#define GCC_CPUSS_DVM_BUS_CLK 16 +#define GCC_CPUSS_GNOC_CLK 17 +#define GCC_CPUSS_RBCPR_CLK 18 +#define GCC_DDRSS_GPU_AXI_CLK 19 +#define GCC_DISP_AHB_CLK 20 +#define GCC_DISP_HF_AXI_CLK 21 +#define GCC_DISP_SF_AXI_CLK 22 +#define GCC_DISP_XO_CLK 23 +#define GCC_EMAC_AXI_CLK 24 +#define GCC_EMAC_PTP_CLK 25 +#define GCC_EMAC_PTP_CLK_SRC 26 +#define GCC_EMAC_RGMII_CLK 27 +#define GCC_EMAC_RGMII_CLK_SRC 28 +#define GCC_EMAC_SLV_AHB_CLK 29 +#define GCC_GP1_CLK 30 +#define GCC_GP1_CLK_SRC 31 +#define GCC_GP2_CLK 32 +#define GCC_GP2_CLK_SRC 33 +#define GCC_GP3_CLK 34 +#define GCC_GP3_CLK_SRC 35 +#define GCC_GPU_CFG_AHB_CLK 36 +#define GCC_GPU_GPLL0_CLK_SRC 37 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 38 +#define GCC_GPU_IREF_CLK 39 +#define GCC_GPU_MEMNOC_GFX_CLK 40 +#define GCC_GPU_SNOC_DVM_GFX_CLK 41 +#define GCC_NPU_AT_CLK 42 +#define GCC_NPU_AXI_CLK 43 +#define GCC_NPU_CFG_AHB_CLK 44 +#define GCC_NPU_GPLL0_CLK_SRC 45 +#define GCC_NPU_GPLL0_DIV_CLK_SRC 46 +#define GCC_NPU_TRIG_CLK 47 +#define GCC_PCIE0_PHY_REFGEN_CLK 48 +#define GCC_PCIE1_PHY_REFGEN_CLK 49 +#define GCC_PCIE_0_AUX_CLK 50 +#define GCC_PCIE_0_AUX_CLK_SRC 51 +#define GCC_PCIE_0_CFG_AHB_CLK 52 +#define GCC_PCIE_0_CLKREF_CLK 53 +#define GCC_PCIE_0_MSTR_AXI_CLK 54 +#define GCC_PCIE_0_PIPE_CLK 55 +#define GCC_PCIE_0_SLV_AXI_CLK 56 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57 +#define GCC_PCIE_1_AUX_CLK 58 +#define GCC_PCIE_1_AUX_CLK_SRC 59 +#define GCC_PCIE_1_CFG_AHB_CLK 60 +#define GCC_PCIE_1_CLKREF_CLK 61 +#define GCC_PCIE_1_MSTR_AXI_CLK 62 +#define GCC_PCIE_1_PIPE_CLK 63 +#define GCC_PCIE_1_SLV_AXI_CLK 64 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 65 +#define GCC_PCIE_PHY_AUX_CLK 66 +#define GCC_PCIE_PHY_REFGEN_CLK_SRC 67 +#define GCC_PDM2_CLK 68 +#define GCC_PDM2_CLK_SRC 69 +#define GCC_PDM_AHB_CLK 70 +#define GCC_PDM_XO4_CLK 71 +#define GCC_PRNG_AHB_CLK 72 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 73 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 74 +#define GCC_QMIP_DISP_AHB_CLK 75 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 76 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 77 +#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 78 +#define GCC_QSPI_CORE_CLK 79 +#define GCC_QSPI_CORE_CLK_SRC 80 +#define GCC_QUPV3_WRAP0_S0_CLK 81 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 82 +#define GCC_QUPV3_WRAP0_S1_CLK 83 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 84 +#define GCC_QUPV3_WRAP0_S2_CLK 85 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 86 +#define GCC_QUPV3_WRAP0_S3_CLK 87 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 88 +#define GCC_QUPV3_WRAP0_S4_CLK 89 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 90 +#define GCC_QUPV3_WRAP0_S5_CLK 91 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 92 +#define GCC_QUPV3_WRAP0_S6_CLK 93 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 94 +#define GCC_QUPV3_WRAP0_S7_CLK 95 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 96 +#define GCC_QUPV3_WRAP1_S0_CLK 97 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 98 +#define GCC_QUPV3_WRAP1_S1_CLK 99 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 100 +#define GCC_QUPV3_WRAP1_S2_CLK 101 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 102 +#define GCC_QUPV3_WRAP1_S3_CLK 103 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 104 +#define GCC_QUPV3_WRAP1_S4_CLK 105 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 106 +#define GCC_QUPV3_WRAP1_S5_CLK 107 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 108 +#define GCC_QUPV3_WRAP2_S0_CLK 109 +#define GCC_QUPV3_WRAP2_S0_CLK_SRC 110 +#define GCC_QUPV3_WRAP2_S1_CLK 111 +#define GCC_QUPV3_WRAP2_S1_CLK_SRC 112 +#define GCC_QUPV3_WRAP2_S2_CLK 113 +#define GCC_QUPV3_WRAP2_S2_CLK_SRC 114 +#define GCC_QUPV3_WRAP2_S3_CLK 115 +#define GCC_QUPV3_WRAP2_S3_CLK_SRC 116 +#define GCC_QUPV3_WRAP2_S4_CLK 117 +#define GCC_QUPV3_WRAP2_S4_CLK_SRC 118 +#define GCC_QUPV3_WRAP2_S5_CLK 119 +#define GCC_QUPV3_WRAP2_S5_CLK_SRC 120 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 121 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 122 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 123 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 124 +#define GCC_QUPV3_WRAP_2_M_AHB_CLK 125 +#define GCC_QUPV3_WRAP_2_S_AHB_CLK 126 +#define GCC_SDCC2_AHB_CLK 127 +#define GCC_SDCC2_APPS_CLK 128 +#define GCC_SDCC2_APPS_CLK_SRC 129 +#define GCC_SDCC4_AHB_CLK 130 +#define GCC_SDCC4_APPS_CLK 131 +#define GCC_SDCC4_APPS_CLK_SRC 132 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 133 +#define GCC_TSIF_AHB_CLK 134 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 135 +#define GCC_TSIF_REF_CLK 136 +#define GCC_TSIF_REF_CLK_SRC 137 +#define GCC_UFS_CARD_AHB_CLK 138 +#define GCC_UFS_CARD_AXI_CLK 139 +#define GCC_UFS_CARD_AXI_CLK_SRC 140 +#define GCC_UFS_CARD_AXI_HW_CTL_CLK 141 +#define GCC_UFS_CARD_CLKREF_CLK 142 +#define GCC_UFS_CARD_ICE_CORE_CLK 143 +#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 144 +#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 145 +#define GCC_UFS_CARD_PHY_AUX_CLK 146 +#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 147 +#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 148 +#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 149 +#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 150 +#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 151 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK 152 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 153 +#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 154 +#define GCC_UFS_MEM_CLKREF_CLK 155 +#define GCC_UFS_PHY_AHB_CLK 156 +#define GCC_UFS_PHY_AXI_CLK 157 +#define GCC_UFS_PHY_AXI_CLK_SRC 158 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 159 +#define GCC_UFS_PHY_ICE_CORE_CLK 160 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 161 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 162 +#define GCC_UFS_PHY_PHY_AUX_CLK 163 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 164 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 165 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 166 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 167 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 168 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 169 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 170 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 171 +#define GCC_USB30_PRIM_MASTER_CLK 172 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 173 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 174 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 175 +#define GCC_USB30_PRIM_SLEEP_CLK 176 +#define GCC_USB30_SEC_MASTER_CLK 177 +#define GCC_USB30_SEC_MASTER_CLK_SRC 178 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 179 +#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 180 +#define GCC_USB30_SEC_SLEEP_CLK 181 +#define GCC_USB3_PRIM_CLKREF_CLK 182 +#define GCC_USB3_PRIM_PHY_AUX_CLK 183 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 184 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 185 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 186 +#define GCC_USB3_SEC_CLKREF_CLK 187 +#define GCC_USB3_SEC_PHY_AUX_CLK 188 +#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 189 +#define GCC_USB3_SEC_PHY_COM_AUX_CLK 190 +#define GCC_USB3_SEC_PHY_PIPE_CLK 191 +#define GCC_VIDEO_AHB_CLK 192 +#define GCC_VIDEO_AXI0_CLK 193 +#define GCC_VIDEO_AXI1_CLK 194 +#define GCC_VIDEO_AXIC_CLK 195 +#define GCC_VIDEO_XO_CLK 196 +#define GPLL0 197 +#define GPLL0_OUT_EVEN 198 +#define GPLL7 199 +#define GPLL9 200 + +/* Reset clocks */ +#define GCC_EMAC_BCR 0 +#define GCC_GPU_BCR 1 +#define GCC_MMSS_BCR 2 +#define GCC_NPU_BCR 3 +#define GCC_PCIE_0_BCR 4 +#define GCC_PCIE_0_PHY_BCR 5 +#define GCC_PCIE_1_BCR 6 +#define GCC_PCIE_1_PHY_BCR 7 +#define GCC_PCIE_PHY_BCR 8 +#define GCC_PDM_BCR 9 +#define GCC_PRNG_BCR 10 +#define GCC_QSPI_BCR 11 +#define GCC_QUPV3_WRAPPER_0_BCR 12 +#define GCC_QUPV3_WRAPPER_1_BCR 13 +#define GCC_QUPV3_WRAPPER_2_BCR 14 +#define GCC_QUSB2PHY_PRIM_BCR 15 +#define GCC_QUSB2PHY_SEC_BCR 16 +#define GCC_USB3_PHY_PRIM_BCR 17 +#define GCC_USB3_DP_PHY_PRIM_BCR 18 +#define GCC_USB3_PHY_SEC_BCR 19 +#define GCC_USB3PHY_PHY_SEC_BCR 20 +#define GCC_SDCC2_BCR 21 +#define GCC_SDCC4_BCR 22 +#define GCC_TSIF_BCR 23 +#define GCC_UFS_CARD_BCR 24 +#define GCC_UFS_PHY_BCR 25 +#define GCC_USB30_PRIM_BCR 26 +#define GCC_USB30_SEC_BCR 27 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 28 + +#endif diff --git a/include/dt-bindings/clock/qcom,gpucc-msm8998.h b/include/dt-bindings/clock/qcom,gpucc-msm8998.h new file mode 100644 index 0000000..2623570 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-msm8998.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019, Jeffrey Hugo + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GPUCC_8998_H +#define _DT_BINDINGS_CLK_MSM_GPUCC_8998_H + +#define GPUPLL0 0 +#define GPUPLL0_OUT_EVEN 1 +#define RBCPR_CLK_SRC 2 +#define GFX3D_CLK_SRC 3 +#define RBBMTIMER_CLK_SRC 4 +#define GFX3D_ISENSE_CLK_SRC 5 +#define RBCPR_CLK 6 +#define GFX3D_CLK 7 +#define RBBMTIMER_CLK 8 +#define GFX3D_ISENSE_CLK 9 +#define GPUCC_CXO_CLK 10 + +#define GPU_CX_BCR 0 +#define RBCPR_BCR 1 +#define GPU_GX_BCR 2 +#define GPU_ISENSE_BCR 3 + +#define GPU_CX_GDSC 1 +#define GPU_GX_GDSC 2 + +#endif diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h new file mode 100644 index 0000000..9690d90 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_SDM_GPU_CC_SDM845_H +#define _DT_BINDINGS_CLK_SDM_GPU_CC_SDM845_H + +/* GPU_CC clock registers */ +#define GPU_CC_CX_GMU_CLK 0 +#define GPU_CC_CXO_CLK 1 +#define GPU_CC_GMU_CLK_SRC 2 +#define GPU_CC_PLL1 3 + +/* GPU_CC Resets */ +#define GPUCC_GPU_CC_CX_BCR 0 +#define GPUCC_GPU_CC_GMU_BCR 1 +#define GPUCC_GPU_CC_XO_BCR 2 + +/* GPU_CC GDSCRs */ +#define GPU_CX_GDSC 0 +#define GPU_GX_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h new file mode 100644 index 0000000..25b92bb --- /dev/null +++ b/include/dt-bindings/clock/qcom,lcc-ipq806x.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_LCC_IPQ806X_H +#define _DT_BINDINGS_CLK_LCC_IPQ806X_H + +#define PLL4 0 +#define MI2S_OSR_SRC 1 +#define MI2S_OSR_CLK 2 +#define MI2S_DIV_CLK 3 +#define MI2S_BIT_DIV_CLK 4 +#define MI2S_BIT_CLK 5 +#define PCM_SRC 6 +#define PCM_CLK_OUT 7 +#define PCM_CLK 8 +#define SPDIF_SRC 9 +#define SPDIF_CLK 10 +#define AHBIX_CLK 11 + +#endif diff --git a/include/dt-bindings/clock/qcom,lcc-mdm9615.h b/include/dt-bindings/clock/qcom,lcc-mdm9615.h new file mode 100644 index 0000000..299338e --- /dev/null +++ b/include/dt-bindings/clock/qcom,lcc-mdm9615.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (c) BayLibre, SAS. + * Author : Neil Armstrong + */ + +#ifndef _DT_BINDINGS_CLK_LCC_MDM9615_H +#define _DT_BINDINGS_CLK_LCC_MDM9615_H + +#define PLL4 0 +#define MI2S_OSR_SRC 1 +#define MI2S_OSR_CLK 2 +#define MI2S_DIV_CLK 3 +#define MI2S_BIT_DIV_CLK 4 +#define MI2S_BIT_CLK 5 +#define PCM_SRC 6 +#define PCM_CLK_OUT 7 +#define PCM_CLK 8 +#define SLIMBUS_SRC 9 +#define AUDIO_SLIMBUS_CLK 10 +#define SPS_SLIMBUS_CLK 11 +#define CODEC_I2S_MIC_OSR_SRC 12 +#define CODEC_I2S_MIC_OSR_CLK 13 +#define CODEC_I2S_MIC_DIV_CLK 14 +#define CODEC_I2S_MIC_BIT_DIV_CLK 15 +#define CODEC_I2S_MIC_BIT_CLK 16 +#define SPARE_I2S_MIC_OSR_SRC 17 +#define SPARE_I2S_MIC_OSR_CLK 18 +#define SPARE_I2S_MIC_DIV_CLK 19 +#define SPARE_I2S_MIC_BIT_DIV_CLK 20 +#define SPARE_I2S_MIC_BIT_CLK 21 +#define CODEC_I2S_SPKR_OSR_SRC 22 +#define CODEC_I2S_SPKR_OSR_CLK 23 +#define CODEC_I2S_SPKR_DIV_CLK 24 +#define CODEC_I2S_SPKR_BIT_DIV_CLK 25 +#define CODEC_I2S_SPKR_BIT_CLK 26 +#define SPARE_I2S_SPKR_OSR_SRC 27 +#define SPARE_I2S_SPKR_OSR_CLK 28 +#define SPARE_I2S_SPKR_DIV_CLK 29 +#define SPARE_I2S_SPKR_BIT_DIV_CLK 30 +#define SPARE_I2S_SPKR_BIT_CLK 31 + +#endif diff --git a/include/dt-bindings/clock/qcom,lcc-msm8960.h b/include/dt-bindings/clock/qcom,lcc-msm8960.h new file mode 100644 index 0000000..d115a49 --- /dev/null +++ b/include/dt-bindings/clock/qcom,lcc-msm8960.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_LCC_MSM8960_H +#define _DT_BINDINGS_CLK_LCC_MSM8960_H + +#define PLL4 0 +#define MI2S_OSR_SRC 1 +#define MI2S_OSR_CLK 2 +#define MI2S_DIV_CLK 3 +#define MI2S_BIT_DIV_CLK 4 +#define MI2S_BIT_CLK 5 +#define PCM_SRC 6 +#define PCM_CLK_OUT 7 +#define PCM_CLK 8 +#define SLIMBUS_SRC 9 +#define AUDIO_SLIMBUS_CLK 10 +#define SPS_SLIMBUS_CLK 11 +#define CODEC_I2S_MIC_OSR_SRC 12 +#define CODEC_I2S_MIC_OSR_CLK 13 +#define CODEC_I2S_MIC_DIV_CLK 14 +#define CODEC_I2S_MIC_BIT_DIV_CLK 15 +#define CODEC_I2S_MIC_BIT_CLK 16 +#define SPARE_I2S_MIC_OSR_SRC 17 +#define SPARE_I2S_MIC_OSR_CLK 18 +#define SPARE_I2S_MIC_DIV_CLK 19 +#define SPARE_I2S_MIC_BIT_DIV_CLK 20 +#define SPARE_I2S_MIC_BIT_CLK 21 +#define CODEC_I2S_SPKR_OSR_SRC 22 +#define CODEC_I2S_SPKR_OSR_CLK 23 +#define CODEC_I2S_SPKR_DIV_CLK 24 +#define CODEC_I2S_SPKR_BIT_DIV_CLK 25 +#define CODEC_I2S_SPKR_BIT_CLK 26 +#define SPARE_I2S_SPKR_OSR_SRC 27 +#define SPARE_I2S_SPKR_OSR_CLK 28 +#define SPARE_I2S_SPKR_DIV_CLK 29 +#define SPARE_I2S_SPKR_BIT_DIV_CLK 30 +#define SPARE_I2S_SPKR_BIT_CLK 31 + +#endif diff --git a/include/dt-bindings/clock/qcom,lpass-sdm845.h b/include/dt-bindings/clock/qcom,lpass-sdm845.h new file mode 100644 index 0000000..6590508 --- /dev/null +++ b/include/dt-bindings/clock/qcom,lpass-sdm845.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_SDM_LPASS_SDM845_H +#define _DT_BINDINGS_CLK_SDM_LPASS_SDM845_H + +#define LPASS_Q6SS_AHBM_AON_CLK 0 +#define LPASS_Q6SS_AHBS_AON_CLK 1 +#define LPASS_QDSP6SS_XO_CLK 2 +#define LPASS_QDSP6SS_SLEEP_CLK 3 +#define LPASS_QDSP6SS_CORE_CLK 4 + +#endif diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h new file mode 100644 index 0000000..9d42b1b --- /dev/null +++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_APQ_MMCC_8084_H +#define _DT_BINDINGS_CLK_APQ_MMCC_8084_H + +#define MMSS_AHB_CLK_SRC 0 +#define MMSS_AXI_CLK_SRC 1 +#define MMPLL0 2 +#define MMPLL0_VOTE 3 +#define MMPLL1 4 +#define MMPLL1_VOTE 5 +#define MMPLL2 6 +#define MMPLL3 7 +#define MMPLL4 8 +#define CSI0_CLK_SRC 9 +#define CSI1_CLK_SRC 10 +#define CSI2_CLK_SRC 11 +#define CSI3_CLK_SRC 12 +#define VCODEC0_CLK_SRC 13 +#define VFE0_CLK_SRC 14 +#define VFE1_CLK_SRC 15 +#define MDP_CLK_SRC 16 +#define PCLK0_CLK_SRC 17 +#define PCLK1_CLK_SRC 18 +#define OCMEMNOC_CLK_SRC 19 +#define GFX3D_CLK_SRC 20 +#define JPEG0_CLK_SRC 21 +#define JPEG1_CLK_SRC 22 +#define JPEG2_CLK_SRC 23 +#define EDPPIXEL_CLK_SRC 24 +#define EXTPCLK_CLK_SRC 25 +#define VP_CLK_SRC 26 +#define CCI_CLK_SRC 27 +#define CAMSS_GP0_CLK_SRC 28 +#define CAMSS_GP1_CLK_SRC 29 +#define MCLK0_CLK_SRC 30 +#define MCLK1_CLK_SRC 31 +#define MCLK2_CLK_SRC 32 +#define MCLK3_CLK_SRC 33 +#define CSI0PHYTIMER_CLK_SRC 34 +#define CSI1PHYTIMER_CLK_SRC 35 +#define CSI2PHYTIMER_CLK_SRC 36 +#define CPP_CLK_SRC 37 +#define BYTE0_CLK_SRC 38 +#define BYTE1_CLK_SRC 39 +#define EDPAUX_CLK_SRC 40 +#define EDPLINK_CLK_SRC 41 +#define ESC0_CLK_SRC 42 +#define ESC1_CLK_SRC 43 +#define HDMI_CLK_SRC 44 +#define VSYNC_CLK_SRC 45 +#define MMSS_RBCPR_CLK_SRC 46 +#define RBBMTIMER_CLK_SRC 47 +#define MAPLE_CLK_SRC 48 +#define VDP_CLK_SRC 49 +#define VPU_BUS_CLK_SRC 50 +#define MMSS_CXO_CLK 51 +#define MMSS_SLEEPCLK_CLK 52 +#define AVSYNC_AHB_CLK 53 +#define AVSYNC_EDPPIXEL_CLK 54 +#define AVSYNC_EXTPCLK_CLK 55 +#define AVSYNC_PCLK0_CLK 56 +#define AVSYNC_PCLK1_CLK 57 +#define AVSYNC_VP_CLK 58 +#define CAMSS_AHB_CLK 59 +#define CAMSS_CCI_CCI_AHB_CLK 60 +#define CAMSS_CCI_CCI_CLK 61 +#define CAMSS_CSI0_AHB_CLK 62 +#define CAMSS_CSI0_CLK 63 +#define CAMSS_CSI0PHY_CLK 64 +#define CAMSS_CSI0PIX_CLK 65 +#define CAMSS_CSI0RDI_CLK 66 +#define CAMSS_CSI1_AHB_CLK 67 +#define CAMSS_CSI1_CLK 68 +#define CAMSS_CSI1PHY_CLK 69 +#define CAMSS_CSI1PIX_CLK 70 +#define CAMSS_CSI1RDI_CLK 71 +#define CAMSS_CSI2_AHB_CLK 72 +#define CAMSS_CSI2_CLK 73 +#define CAMSS_CSI2PHY_CLK 74 +#define CAMSS_CSI2PIX_CLK 75 +#define CAMSS_CSI2RDI_CLK 76 +#define CAMSS_CSI3_AHB_CLK 77 +#define CAMSS_CSI3_CLK 78 +#define CAMSS_CSI3PHY_CLK 79 +#define CAMSS_CSI3PIX_CLK 80 +#define CAMSS_CSI3RDI_CLK 81 +#define CAMSS_CSI_VFE0_CLK 82 +#define CAMSS_CSI_VFE1_CLK 83 +#define CAMSS_GP0_CLK 84 +#define CAMSS_GP1_CLK 85 +#define CAMSS_ISPIF_AHB_CLK 86 +#define CAMSS_JPEG_JPEG0_CLK 87 +#define CAMSS_JPEG_JPEG1_CLK 88 +#define CAMSS_JPEG_JPEG2_CLK 89 +#define CAMSS_JPEG_JPEG_AHB_CLK 90 +#define CAMSS_JPEG_JPEG_AXI_CLK 91 +#define CAMSS_MCLK0_CLK 92 +#define CAMSS_MCLK1_CLK 93 +#define CAMSS_MCLK2_CLK 94 +#define CAMSS_MCLK3_CLK 95 +#define CAMSS_MICRO_AHB_CLK 96 +#define CAMSS_PHY0_CSI0PHYTIMER_CLK 97 +#define CAMSS_PHY1_CSI1PHYTIMER_CLK 98 +#define CAMSS_PHY2_CSI2PHYTIMER_CLK 99 +#define CAMSS_TOP_AHB_CLK 100 +#define CAMSS_VFE_CPP_AHB_CLK 101 +#define CAMSS_VFE_CPP_CLK 102 +#define CAMSS_VFE_VFE0_CLK 103 +#define CAMSS_VFE_VFE1_CLK 104 +#define CAMSS_VFE_VFE_AHB_CLK 105 +#define CAMSS_VFE_VFE_AXI_CLK 106 +#define MDSS_AHB_CLK 107 +#define MDSS_AXI_CLK 108 +#define MDSS_BYTE0_CLK 109 +#define MDSS_BYTE1_CLK 110 +#define MDSS_EDPAUX_CLK 111 +#define MDSS_EDPLINK_CLK 112 +#define MDSS_EDPPIXEL_CLK 113 +#define MDSS_ESC0_CLK 114 +#define MDSS_ESC1_CLK 115 +#define MDSS_EXTPCLK_CLK 116 +#define MDSS_HDMI_AHB_CLK 117 +#define MDSS_HDMI_CLK 118 +#define MDSS_MDP_CLK 119 +#define MDSS_MDP_LUT_CLK 120 +#define MDSS_PCLK0_CLK 121 +#define MDSS_PCLK1_CLK 122 +#define MDSS_VSYNC_CLK 123 +#define MMSS_RBCPR_AHB_CLK 124 +#define MMSS_RBCPR_CLK 125 +#define MMSS_SPDM_AHB_CLK 126 +#define MMSS_SPDM_AXI_CLK 127 +#define MMSS_SPDM_CSI0_CLK 128 +#define MMSS_SPDM_GFX3D_CLK 129 +#define MMSS_SPDM_JPEG0_CLK 130 +#define MMSS_SPDM_JPEG1_CLK 131 +#define MMSS_SPDM_JPEG2_CLK 132 +#define MMSS_SPDM_MDP_CLK 133 +#define MMSS_SPDM_PCLK0_CLK 134 +#define MMSS_SPDM_PCLK1_CLK 135 +#define MMSS_SPDM_VCODEC0_CLK 136 +#define MMSS_SPDM_VFE0_CLK 137 +#define MMSS_SPDM_VFE1_CLK 138 +#define MMSS_SPDM_RM_AXI_CLK 139 +#define MMSS_SPDM_RM_OCMEMNOC_CLK 140 +#define MMSS_MISC_AHB_CLK 141 +#define MMSS_MMSSNOC_AHB_CLK 142 +#define MMSS_MMSSNOC_BTO_AHB_CLK 143 +#define MMSS_MMSSNOC_AXI_CLK 144 +#define MMSS_S0_AXI_CLK 145 +#define OCMEMCX_AHB_CLK 146 +#define OCMEMCX_OCMEMNOC_CLK 147 +#define OXILI_OCMEMGX_CLK 148 +#define OXILI_GFX3D_CLK 149 +#define OXILI_RBBMTIMER_CLK 150 +#define OXILICX_AHB_CLK 151 +#define VENUS0_AHB_CLK 152 +#define VENUS0_AXI_CLK 153 +#define VENUS0_CORE0_VCODEC_CLK 154 +#define VENUS0_CORE1_VCODEC_CLK 155 +#define VENUS0_OCMEMNOC_CLK 156 +#define VENUS0_VCODEC0_CLK 157 +#define VPU_AHB_CLK 158 +#define VPU_AXI_CLK 159 +#define VPU_BUS_CLK 160 +#define VPU_CXO_CLK 161 +#define VPU_MAPLE_CLK 162 +#define VPU_SLEEP_CLK 163 +#define VPU_VDP_CLK 164 + +/* GDSCs */ +#define VENUS0_GDSC 0 +#define VENUS0_CORE0_GDSC 1 +#define VENUS0_CORE1_GDSC 2 +#define MDSS_GDSC 3 +#define CAMSS_JPEG_GDSC 4 +#define CAMSS_VFE_GDSC 5 +#define OXILI_GDSC 6 +#define OXILICX_GDSC 7 + +#endif diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8960.h b/include/dt-bindings/clock/qcom,mmcc-msm8960.h new file mode 100644 index 0000000..81714fc --- /dev/null +++ b/include/dt-bindings/clock/qcom,mmcc-msm8960.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8960_H +#define _DT_BINDINGS_CLK_MSM_MMCC_8960_H + +#define MMSS_AHB_SRC 0 +#define FAB_AHB_CLK 1 +#define APU_AHB_CLK 2 +#define TV_ENC_AHB_CLK 3 +#define AMP_AHB_CLK 4 +#define DSI2_S_AHB_CLK 5 +#define JPEGD_AHB_CLK 6 +#define GFX2D0_AHB_CLK 7 +#define DSI_S_AHB_CLK 8 +#define DSI2_M_AHB_CLK 9 +#define VPE_AHB_CLK 10 +#define SMMU_AHB_CLK 11 +#define HDMI_M_AHB_CLK 12 +#define VFE_AHB_CLK 13 +#define ROT_AHB_CLK 14 +#define VCODEC_AHB_CLK 15 +#define MDP_AHB_CLK 16 +#define DSI_M_AHB_CLK 17 +#define CSI_AHB_CLK 18 +#define MMSS_IMEM_AHB_CLK 19 +#define IJPEG_AHB_CLK 20 +#define HDMI_S_AHB_CLK 21 +#define GFX3D_AHB_CLK 22 +#define GFX2D1_AHB_CLK 23 +#define MMSS_FPB_CLK 24 +#define MMSS_AXI_SRC 25 +#define MMSS_FAB_CORE 26 +#define FAB_MSP_AXI_CLK 27 +#define JPEGD_AXI_CLK 28 +#define GMEM_AXI_CLK 29 +#define MDP_AXI_CLK 30 +#define MMSS_IMEM_AXI_CLK 31 +#define IJPEG_AXI_CLK 32 +#define GFX3D_AXI_CLK 33 +#define VCODEC_AXI_CLK 34 +#define VFE_AXI_CLK 35 +#define VPE_AXI_CLK 36 +#define ROT_AXI_CLK 37 +#define VCODEC_AXI_A_CLK 38 +#define VCODEC_AXI_B_CLK 39 +#define MM_AXI_S3_FCLK 40 +#define MM_AXI_S2_FCLK 41 +#define MM_AXI_S1_FCLK 42 +#define MM_AXI_S0_FCLK 43 +#define MM_AXI_S2_CLK 44 +#define MM_AXI_S1_CLK 45 +#define MM_AXI_S0_CLK 46 +#define CSI0_SRC 47 +#define CSI0_CLK 48 +#define CSI0_PHY_CLK 49 +#define CSI1_SRC 50 +#define CSI1_CLK 51 +#define CSI1_PHY_CLK 52 +#define CSI2_SRC 53 +#define CSI2_CLK 54 +#define CSI2_PHY_CLK 55 +#define DSI_SRC 56 +#define DSI_CLK 57 +#define CSI_PIX_CLK 58 +#define CSI_RDI_CLK 59 +#define MDP_VSYNC_CLK 60 +#define HDMI_DIV_CLK 61 +#define HDMI_APP_CLK 62 +#define CSI_PIX1_CLK 63 +#define CSI_RDI2_CLK 64 +#define CSI_RDI1_CLK 65 +#define GFX2D0_SRC 66 +#define GFX2D0_CLK 67 +#define GFX2D1_SRC 68 +#define GFX2D1_CLK 69 +#define GFX3D_SRC 70 +#define GFX3D_CLK 71 +#define IJPEG_SRC 72 +#define IJPEG_CLK 73 +#define JPEGD_SRC 74 +#define JPEGD_CLK 75 +#define MDP_SRC 76 +#define MDP_CLK 77 +#define MDP_LUT_CLK 78 +#define DSI2_PIXEL_SRC 79 +#define DSI2_PIXEL_CLK 80 +#define DSI2_SRC 81 +#define DSI2_CLK 82 +#define DSI1_BYTE_SRC 83 +#define DSI1_BYTE_CLK 84 +#define DSI2_BYTE_SRC 85 +#define DSI2_BYTE_CLK 86 +#define DSI1_ESC_SRC 87 +#define DSI1_ESC_CLK 88 +#define DSI2_ESC_SRC 89 +#define DSI2_ESC_CLK 90 +#define ROT_SRC 91 +#define ROT_CLK 92 +#define TV_ENC_CLK 93 +#define TV_DAC_CLK 94 +#define HDMI_TV_CLK 95 +#define MDP_TV_CLK 96 +#define TV_SRC 97 +#define VCODEC_SRC 98 +#define VCODEC_CLK 99 +#define VFE_SRC 100 +#define VFE_CLK 101 +#define VFE_CSI_CLK 102 +#define VPE_SRC 103 +#define VPE_CLK 104 +#define DSI_PIXEL_SRC 105 +#define DSI_PIXEL_CLK 106 +#define CAMCLK0_SRC 107 +#define CAMCLK0_CLK 108 +#define CAMCLK1_SRC 109 +#define CAMCLK1_CLK 110 +#define CAMCLK2_SRC 111 +#define CAMCLK2_CLK 112 +#define CSIPHYTIMER_SRC 113 +#define CSIPHY2_TIMER_CLK 114 +#define CSIPHY1_TIMER_CLK 115 +#define CSIPHY0_TIMER_CLK 116 +#define PLL1 117 +#define PLL2 118 +#define RGB_TV_CLK 119 +#define NPL_TV_CLK 120 +#define VCAP_AHB_CLK 121 +#define VCAP_AXI_CLK 122 +#define VCAP_SRC 123 +#define VCAP_CLK 124 +#define VCAP_NPL_CLK 125 +#define PLL15 126 + +#endif diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8974.h b/include/dt-bindings/clock/qcom,mmcc-msm8974.h new file mode 100644 index 0000000..a62cb06 --- /dev/null +++ b/include/dt-bindings/clock/qcom,mmcc-msm8974.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8974_H +#define _DT_BINDINGS_CLK_MSM_MMCC_8974_H + +#define MMSS_AHB_CLK_SRC 0 +#define MMSS_AXI_CLK_SRC 1 +#define MMPLL0 2 +#define MMPLL0_VOTE 3 +#define MMPLL1 4 +#define MMPLL1_VOTE 5 +#define MMPLL2 6 +#define MMPLL3 7 +#define CSI0_CLK_SRC 8 +#define CSI1_CLK_SRC 9 +#define CSI2_CLK_SRC 10 +#define CSI3_CLK_SRC 11 +#define VFE0_CLK_SRC 12 +#define VFE1_CLK_SRC 13 +#define MDP_CLK_SRC 14 +#define GFX3D_CLK_SRC 15 +#define JPEG0_CLK_SRC 16 +#define JPEG1_CLK_SRC 17 +#define JPEG2_CLK_SRC 18 +#define PCLK0_CLK_SRC 19 +#define PCLK1_CLK_SRC 20 +#define VCODEC0_CLK_SRC 21 +#define CCI_CLK_SRC 22 +#define CAMSS_GP0_CLK_SRC 23 +#define CAMSS_GP1_CLK_SRC 24 +#define MCLK0_CLK_SRC 25 +#define MCLK1_CLK_SRC 26 +#define MCLK2_CLK_SRC 27 +#define MCLK3_CLK_SRC 28 +#define CSI0PHYTIMER_CLK_SRC 29 +#define CSI1PHYTIMER_CLK_SRC 30 +#define CSI2PHYTIMER_CLK_SRC 31 +#define CPP_CLK_SRC 32 +#define BYTE0_CLK_SRC 33 +#define BYTE1_CLK_SRC 34 +#define EDPAUX_CLK_SRC 35 +#define EDPLINK_CLK_SRC 36 +#define EDPPIXEL_CLK_SRC 37 +#define ESC0_CLK_SRC 38 +#define ESC1_CLK_SRC 39 +#define EXTPCLK_CLK_SRC 40 +#define HDMI_CLK_SRC 41 +#define VSYNC_CLK_SRC 42 +#define MMSS_RBCPR_CLK_SRC 43 +#define CAMSS_CCI_CCI_AHB_CLK 44 +#define CAMSS_CCI_CCI_CLK 45 +#define CAMSS_CSI0_AHB_CLK 46 +#define CAMSS_CSI0_CLK 47 +#define CAMSS_CSI0PHY_CLK 48 +#define CAMSS_CSI0PIX_CLK 49 +#define CAMSS_CSI0RDI_CLK 50 +#define CAMSS_CSI1_AHB_CLK 51 +#define CAMSS_CSI1_CLK 52 +#define CAMSS_CSI1PHY_CLK 53 +#define CAMSS_CSI1PIX_CLK 54 +#define CAMSS_CSI1RDI_CLK 55 +#define CAMSS_CSI2_AHB_CLK 56 +#define CAMSS_CSI2_CLK 57 +#define CAMSS_CSI2PHY_CLK 58 +#define CAMSS_CSI2PIX_CLK 59 +#define CAMSS_CSI2RDI_CLK 60 +#define CAMSS_CSI3_AHB_CLK 61 +#define CAMSS_CSI3_CLK 62 +#define CAMSS_CSI3PHY_CLK 63 +#define CAMSS_CSI3PIX_CLK 64 +#define CAMSS_CSI3RDI_CLK 65 +#define CAMSS_CSI_VFE0_CLK 66 +#define CAMSS_CSI_VFE1_CLK 67 +#define CAMSS_GP0_CLK 68 +#define CAMSS_GP1_CLK 69 +#define CAMSS_ISPIF_AHB_CLK 70 +#define CAMSS_JPEG_JPEG0_CLK 71 +#define CAMSS_JPEG_JPEG1_CLK 72 +#define CAMSS_JPEG_JPEG2_CLK 73 +#define CAMSS_JPEG_JPEG_AHB_CLK 74 +#define CAMSS_JPEG_JPEG_AXI_CLK 75 +#define CAMSS_JPEG_JPEG_OCMEMNOC_CLK 76 +#define CAMSS_MCLK0_CLK 77 +#define CAMSS_MCLK1_CLK 78 +#define CAMSS_MCLK2_CLK 79 +#define CAMSS_MCLK3_CLK 80 +#define CAMSS_MICRO_AHB_CLK 81 +#define CAMSS_PHY0_CSI0PHYTIMER_CLK 82 +#define CAMSS_PHY1_CSI1PHYTIMER_CLK 83 +#define CAMSS_PHY2_CSI2PHYTIMER_CLK 84 +#define CAMSS_TOP_AHB_CLK 85 +#define CAMSS_VFE_CPP_AHB_CLK 86 +#define CAMSS_VFE_CPP_CLK 87 +#define CAMSS_VFE_VFE0_CLK 88 +#define CAMSS_VFE_VFE1_CLK 89 +#define CAMSS_VFE_VFE_AHB_CLK 90 +#define CAMSS_VFE_VFE_AXI_CLK 91 +#define CAMSS_VFE_VFE_OCMEMNOC_CLK 92 +#define MDSS_AHB_CLK 93 +#define MDSS_AXI_CLK 94 +#define MDSS_BYTE0_CLK 95 +#define MDSS_BYTE1_CLK 96 +#define MDSS_EDPAUX_CLK 97 +#define MDSS_EDPLINK_CLK 98 +#define MDSS_EDPPIXEL_CLK 99 +#define MDSS_ESC0_CLK 100 +#define MDSS_ESC1_CLK 101 +#define MDSS_EXTPCLK_CLK 102 +#define MDSS_HDMI_AHB_CLK 103 +#define MDSS_HDMI_CLK 104 +#define MDSS_MDP_CLK 105 +#define MDSS_MDP_LUT_CLK 106 +#define MDSS_PCLK0_CLK 107 +#define MDSS_PCLK1_CLK 108 +#define MDSS_VSYNC_CLK 109 +#define MMSS_MISC_AHB_CLK 110 +#define MMSS_MMSSNOC_AHB_CLK 111 +#define MMSS_MMSSNOC_BTO_AHB_CLK 112 +#define MMSS_MMSSNOC_AXI_CLK 113 +#define MMSS_S0_AXI_CLK 114 +#define OCMEMCX_AHB_CLK 115 +#define OCMEMCX_OCMEMNOC_CLK 116 +#define OXILI_OCMEMGX_CLK 117 +#define OCMEMNOC_CLK 118 +#define OXILI_GFX3D_CLK 119 +#define OXILICX_AHB_CLK 120 +#define OXILICX_AXI_CLK 121 +#define VENUS0_AHB_CLK 122 +#define VENUS0_AXI_CLK 123 +#define VENUS0_OCMEMNOC_CLK 124 +#define VENUS0_VCODEC0_CLK 125 +#define OCMEMNOC_CLK_SRC 126 +#define SPDM_JPEG0 127 +#define SPDM_JPEG1 128 +#define SPDM_MDP 129 +#define SPDM_AXI 130 +#define SPDM_VCODEC0 131 +#define SPDM_VFE0 132 +#define SPDM_VFE1 133 +#define SPDM_JPEG2 134 +#define SPDM_PCLK1 135 +#define SPDM_GFX3D 136 +#define SPDM_AHB 137 +#define SPDM_PCLK0 138 +#define SPDM_OCMEMNOC 139 +#define SPDM_CSI0 140 +#define SPDM_RM_AXI 141 +#define SPDM_RM_OCMEMNOC 142 + +/* gdscs */ +#define VENUS0_GDSC 0 +#define MDSS_GDSC 1 +#define CAMSS_JPEG_GDSC 2 +#define CAMSS_VFE_GDSC 3 +#define OXILI_GDSC 4 +#define OXILICX_GDSC 5 + +#endif diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8996.h b/include/dt-bindings/clock/qcom,mmcc-msm8996.h new file mode 100644 index 0000000..d51f9ac --- /dev/null +++ b/include/dt-bindings/clock/qcom,mmcc-msm8996.h @@ -0,0 +1,295 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8996_H +#define _DT_BINDINGS_CLK_MSM_MMCC_8996_H + +#define MMPLL0_EARLY 0 +#define MMPLL0_PLL 1 +#define MMPLL1_EARLY 2 +#define MMPLL1_PLL 3 +#define MMPLL2_EARLY 4 +#define MMPLL2_PLL 5 +#define MMPLL3_EARLY 6 +#define MMPLL3_PLL 7 +#define MMPLL4_EARLY 8 +#define MMPLL4_PLL 9 +#define MMPLL5_EARLY 10 +#define MMPLL5_PLL 11 +#define MMPLL8_EARLY 12 +#define MMPLL8_PLL 13 +#define MMPLL9_EARLY 14 +#define MMPLL9_PLL 15 +#define AHB_CLK_SRC 16 +#define AXI_CLK_SRC 17 +#define MAXI_CLK_SRC 18 +#define DSA_CORE_CLK_SRC 19 +#define GFX3D_CLK_SRC 20 +#define RBBMTIMER_CLK_SRC 21 +#define ISENSE_CLK_SRC 22 +#define RBCPR_CLK_SRC 23 +#define VIDEO_CORE_CLK_SRC 24 +#define VIDEO_SUBCORE0_CLK_SRC 25 +#define VIDEO_SUBCORE1_CLK_SRC 26 +#define PCLK0_CLK_SRC 27 +#define PCLK1_CLK_SRC 28 +#define MDP_CLK_SRC 29 +#define EXTPCLK_CLK_SRC 30 +#define VSYNC_CLK_SRC 31 +#define HDMI_CLK_SRC 32 +#define BYTE0_CLK_SRC 33 +#define BYTE1_CLK_SRC 34 +#define ESC0_CLK_SRC 35 +#define ESC1_CLK_SRC 36 +#define CAMSS_GP0_CLK_SRC 37 +#define CAMSS_GP1_CLK_SRC 38 +#define MCLK0_CLK_SRC 39 +#define MCLK1_CLK_SRC 40 +#define MCLK2_CLK_SRC 41 +#define MCLK3_CLK_SRC 42 +#define CCI_CLK_SRC 43 +#define CSI0PHYTIMER_CLK_SRC 44 +#define CSI1PHYTIMER_CLK_SRC 45 +#define CSI2PHYTIMER_CLK_SRC 46 +#define CSIPHY0_3P_CLK_SRC 47 +#define CSIPHY1_3P_CLK_SRC 48 +#define CSIPHY2_3P_CLK_SRC 49 +#define JPEG0_CLK_SRC 50 +#define JPEG2_CLK_SRC 51 +#define JPEG_DMA_CLK_SRC 52 +#define VFE0_CLK_SRC 53 +#define VFE1_CLK_SRC 54 +#define CPP_CLK_SRC 55 +#define CSI0_CLK_SRC 56 +#define CSI1_CLK_SRC 57 +#define CSI2_CLK_SRC 58 +#define CSI3_CLK_SRC 59 +#define FD_CORE_CLK_SRC 60 +#define MMSS_CXO_CLK 61 +#define MMSS_SLEEPCLK_CLK 62 +#define MMSS_MMAGIC_AHB_CLK 63 +#define MMSS_MMAGIC_CFG_AHB_CLK 64 +#define MMSS_MISC_AHB_CLK 65 +#define MMSS_MISC_CXO_CLK 66 +#define MMSS_BTO_AHB_CLK 67 +#define MMSS_MMAGIC_AXI_CLK 68 +#define MMSS_S0_AXI_CLK 69 +#define MMSS_MMAGIC_MAXI_CLK 70 +#define DSA_CORE_CLK 71 +#define DSA_NOC_CFG_AHB_CLK 72 +#define MMAGIC_CAMSS_AXI_CLK 73 +#define MMAGIC_CAMSS_NOC_CFG_AHB_CLK 74 +#define THROTTLE_CAMSS_CXO_CLK 75 +#define THROTTLE_CAMSS_AHB_CLK 76 +#define THROTTLE_CAMSS_AXI_CLK 77 +#define SMMU_VFE_AHB_CLK 78 +#define SMMU_VFE_AXI_CLK 79 +#define SMMU_CPP_AHB_CLK 80 +#define SMMU_CPP_AXI_CLK 81 +#define SMMU_JPEG_AHB_CLK 82 +#define SMMU_JPEG_AXI_CLK 83 +#define MMAGIC_MDSS_AXI_CLK 84 +#define MMAGIC_MDSS_NOC_CFG_AHB_CLK 85 +#define THROTTLE_MDSS_CXO_CLK 86 +#define THROTTLE_MDSS_AHB_CLK 87 +#define THROTTLE_MDSS_AXI_CLK 88 +#define SMMU_ROT_AHB_CLK 89 +#define SMMU_ROT_AXI_CLK 90 +#define SMMU_MDP_AHB_CLK 91 +#define SMMU_MDP_AXI_CLK 92 +#define MMAGIC_VIDEO_AXI_CLK 93 +#define MMAGIC_VIDEO_NOC_CFG_AHB_CLK 94 +#define THROTTLE_VIDEO_CXO_CLK 95 +#define THROTTLE_VIDEO_AHB_CLK 96 +#define THROTTLE_VIDEO_AXI_CLK 97 +#define SMMU_VIDEO_AHB_CLK 98 +#define SMMU_VIDEO_AXI_CLK 99 +#define MMAGIC_BIMC_AXI_CLK 100 +#define MMAGIC_BIMC_NOC_CFG_AHB_CLK 101 +#define GPU_GX_GFX3D_CLK 102 +#define GPU_GX_RBBMTIMER_CLK 103 +#define GPU_AHB_CLK 104 +#define GPU_AON_ISENSE_CLK 105 +#define VMEM_MAXI_CLK 106 +#define VMEM_AHB_CLK 107 +#define MMSS_RBCPR_CLK 108 +#define MMSS_RBCPR_AHB_CLK 109 +#define VIDEO_CORE_CLK 110 +#define VIDEO_AXI_CLK 111 +#define VIDEO_MAXI_CLK 112 +#define VIDEO_AHB_CLK 113 +#define VIDEO_SUBCORE0_CLK 114 +#define VIDEO_SUBCORE1_CLK 115 +#define MDSS_AHB_CLK 116 +#define MDSS_HDMI_AHB_CLK 117 +#define MDSS_AXI_CLK 118 +#define MDSS_PCLK0_CLK 119 +#define MDSS_PCLK1_CLK 120 +#define MDSS_MDP_CLK 121 +#define MDSS_EXTPCLK_CLK 122 +#define MDSS_VSYNC_CLK 123 +#define MDSS_HDMI_CLK 124 +#define MDSS_BYTE0_CLK 125 +#define MDSS_BYTE1_CLK 126 +#define MDSS_ESC0_CLK 127 +#define MDSS_ESC1_CLK 128 +#define CAMSS_TOP_AHB_CLK 129 +#define CAMSS_AHB_CLK 130 +#define CAMSS_MICRO_AHB_CLK 131 +#define CAMSS_GP0_CLK 132 +#define CAMSS_GP1_CLK 133 +#define CAMSS_MCLK0_CLK 134 +#define CAMSS_MCLK1_CLK 135 +#define CAMSS_MCLK2_CLK 136 +#define CAMSS_MCLK3_CLK 137 +#define CAMSS_CCI_CLK 138 +#define CAMSS_CCI_AHB_CLK 139 +#define CAMSS_CSI0PHYTIMER_CLK 140 +#define CAMSS_CSI1PHYTIMER_CLK 141 +#define CAMSS_CSI2PHYTIMER_CLK 142 +#define CAMSS_CSIPHY0_3P_CLK 143 +#define CAMSS_CSIPHY1_3P_CLK 144 +#define CAMSS_CSIPHY2_3P_CLK 145 +#define CAMSS_JPEG0_CLK 146 +#define CAMSS_JPEG2_CLK 147 +#define CAMSS_JPEG_DMA_CLK 148 +#define CAMSS_JPEG_AHB_CLK 149 +#define CAMSS_JPEG_AXI_CLK 150 +#define CAMSS_VFE_AHB_CLK 151 +#define CAMSS_VFE_AXI_CLK 152 +#define CAMSS_VFE0_CLK 153 +#define CAMSS_VFE0_STREAM_CLK 154 +#define CAMSS_VFE0_AHB_CLK 155 +#define CAMSS_VFE1_CLK 156 +#define CAMSS_VFE1_STREAM_CLK 157 +#define CAMSS_VFE1_AHB_CLK 158 +#define CAMSS_CSI_VFE0_CLK 159 +#define CAMSS_CSI_VFE1_CLK 160 +#define CAMSS_CPP_VBIF_AHB_CLK 161 +#define CAMSS_CPP_AXI_CLK 162 +#define CAMSS_CPP_CLK 163 +#define CAMSS_CPP_AHB_CLK 164 +#define CAMSS_CSI0_CLK 165 +#define CAMSS_CSI0_AHB_CLK 166 +#define CAMSS_CSI0PHY_CLK 167 +#define CAMSS_CSI0RDI_CLK 168 +#define CAMSS_CSI0PIX_CLK 169 +#define CAMSS_CSI1_CLK 170 +#define CAMSS_CSI1_AHB_CLK 171 +#define CAMSS_CSI1PHY_CLK 172 +#define CAMSS_CSI1RDI_CLK 173 +#define CAMSS_CSI1PIX_CLK 174 +#define CAMSS_CSI2_CLK 175 +#define CAMSS_CSI2_AHB_CLK 176 +#define CAMSS_CSI2PHY_CLK 177 +#define CAMSS_CSI2RDI_CLK 178 +#define CAMSS_CSI2PIX_CLK 179 +#define CAMSS_CSI3_CLK 180 +#define CAMSS_CSI3_AHB_CLK 181 +#define CAMSS_CSI3PHY_CLK 182 +#define CAMSS_CSI3RDI_CLK 183 +#define CAMSS_CSI3PIX_CLK 184 +#define CAMSS_ISPIF_AHB_CLK 185 +#define FD_CORE_CLK 186 +#define FD_CORE_UAR_CLK 187 +#define FD_AHB_CLK 188 +#define MMSS_SPDM_CSI0_CLK 189 +#define MMSS_SPDM_JPEG_DMA_CLK 190 +#define MMSS_SPDM_CPP_CLK 191 +#define MMSS_SPDM_PCLK0_CLK 192 +#define MMSS_SPDM_AHB_CLK 193 +#define MMSS_SPDM_GFX3D_CLK 194 +#define MMSS_SPDM_PCLK1_CLK 195 +#define MMSS_SPDM_JPEG2_CLK 196 +#define MMSS_SPDM_DEBUG_CLK 197 +#define MMSS_SPDM_VFE1_CLK 198 +#define MMSS_SPDM_VFE0_CLK 199 +#define MMSS_SPDM_VIDEO_CORE_CLK 200 +#define MMSS_SPDM_AXI_CLK 201 +#define MMSS_SPDM_MDP_CLK 202 +#define MMSS_SPDM_JPEG0_CLK 203 +#define MMSS_SPDM_RM_AXI_CLK 204 +#define MMSS_SPDM_RM_MAXI_CLK 205 + +#define MMAGICAHB_BCR 0 +#define MMAGIC_CFG_BCR 1 +#define MISC_BCR 2 +#define BTO_BCR 3 +#define MMAGICAXI_BCR 4 +#define MMAGICMAXI_BCR 5 +#define DSA_BCR 6 +#define MMAGIC_CAMSS_BCR 7 +#define THROTTLE_CAMSS_BCR 8 +#define SMMU_VFE_BCR 9 +#define SMMU_CPP_BCR 10 +#define SMMU_JPEG_BCR 11 +#define MMAGIC_MDSS_BCR 12 +#define THROTTLE_MDSS_BCR 13 +#define SMMU_ROT_BCR 14 +#define SMMU_MDP_BCR 15 +#define MMAGIC_VIDEO_BCR 16 +#define THROTTLE_VIDEO_BCR 17 +#define SMMU_VIDEO_BCR 18 +#define MMAGIC_BIMC_BCR 19 +#define GPU_GX_BCR 20 +#define GPU_BCR 21 +#define GPU_AON_BCR 22 +#define VMEM_BCR 23 +#define MMSS_RBCPR_BCR 24 +#define VIDEO_BCR 25 +#define MDSS_BCR 26 +#define CAMSS_TOP_BCR 27 +#define CAMSS_AHB_BCR 28 +#define CAMSS_MICRO_BCR 29 +#define CAMSS_CCI_BCR 30 +#define CAMSS_PHY0_BCR 31 +#define CAMSS_PHY1_BCR 32 +#define CAMSS_PHY2_BCR 33 +#define CAMSS_CSIPHY0_3P_BCR 34 +#define CAMSS_CSIPHY1_3P_BCR 35 +#define CAMSS_CSIPHY2_3P_BCR 36 +#define CAMSS_JPEG_BCR 37 +#define CAMSS_VFE_BCR 38 +#define CAMSS_VFE0_BCR 39 +#define CAMSS_VFE1_BCR 40 +#define CAMSS_CSI_VFE0_BCR 41 +#define CAMSS_CSI_VFE1_BCR 42 +#define CAMSS_CPP_TOP_BCR 43 +#define CAMSS_CPP_BCR 44 +#define CAMSS_CSI0_BCR 45 +#define CAMSS_CSI0RDI_BCR 46 +#define CAMSS_CSI0PIX_BCR 47 +#define CAMSS_CSI1_BCR 48 +#define CAMSS_CSI1RDI_BCR 49 +#define CAMSS_CSI1PIX_BCR 50 +#define CAMSS_CSI2_BCR 51 +#define CAMSS_CSI2RDI_BCR 52 +#define CAMSS_CSI2PIX_BCR 53 +#define CAMSS_CSI3_BCR 54 +#define CAMSS_CSI3RDI_BCR 55 +#define CAMSS_CSI3PIX_BCR 56 +#define CAMSS_ISPIF_BCR 57 +#define FD_BCR 58 +#define MMSS_SPDM_RM_BCR 59 + +/* Indexes for GDSCs */ +#define MMAGIC_VIDEO_GDSC 0 +#define MMAGIC_MDSS_GDSC 1 +#define MMAGIC_CAMSS_GDSC 2 +#define GPU_GDSC 3 +#define VENUS_GDSC 4 +#define VENUS_CORE0_GDSC 5 +#define VENUS_CORE1_GDSC 6 +#define CAMSS_GDSC 7 +#define VFE0_GDSC 8 +#define VFE1_GDSC 9 +#define JPEG_GDSC 10 +#define CPP_GDSC 11 +#define FD_GDSC 12 +#define MDSS_GDSC 13 +#define GPU_GX_GDSC 14 +#define MMAGIC_BIMC_GDSC 15 + +#endif diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h new file mode 100644 index 0000000..8e30957 --- /dev/null +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2015 Linaro Limited + */ + +#ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H +#define _DT_BINDINGS_CLK_MSM_RPMCC_H + +/* RPM clocks */ +#define RPM_PXO_CLK 0 +#define RPM_PXO_A_CLK 1 +#define RPM_CXO_CLK 2 +#define RPM_CXO_A_CLK 3 +#define RPM_APPS_FABRIC_CLK 4 +#define RPM_APPS_FABRIC_A_CLK 5 +#define RPM_CFPB_CLK 6 +#define RPM_CFPB_A_CLK 7 +#define RPM_QDSS_CLK 8 +#define RPM_QDSS_A_CLK 9 +#define RPM_DAYTONA_FABRIC_CLK 10 +#define RPM_DAYTONA_FABRIC_A_CLK 11 +#define RPM_EBI1_CLK 12 +#define RPM_EBI1_A_CLK 13 +#define RPM_MM_FABRIC_CLK 14 +#define RPM_MM_FABRIC_A_CLK 15 +#define RPM_MMFPB_CLK 16 +#define RPM_MMFPB_A_CLK 17 +#define RPM_SYS_FABRIC_CLK 18 +#define RPM_SYS_FABRIC_A_CLK 19 +#define RPM_SFPB_CLK 20 +#define RPM_SFPB_A_CLK 21 +#define RPM_SMI_CLK 22 +#define RPM_SMI_A_CLK 23 +#define RPM_PLL4_CLK 24 +#define RPM_XO_D0 25 +#define RPM_XO_D1 26 +#define RPM_XO_A0 27 +#define RPM_XO_A1 28 +#define RPM_XO_A2 29 + +/* SMD RPM clocks */ +#define RPM_SMD_XO_CLK_SRC 0 +#define RPM_SMD_XO_A_CLK_SRC 1 +#define RPM_SMD_PCNOC_CLK 2 +#define RPM_SMD_PCNOC_A_CLK 3 +#define RPM_SMD_SNOC_CLK 4 +#define RPM_SMD_SNOC_A_CLK 5 +#define RPM_SMD_BIMC_CLK 6 +#define RPM_SMD_BIMC_A_CLK 7 +#define RPM_SMD_QDSS_CLK 8 +#define RPM_SMD_QDSS_A_CLK 9 +#define RPM_SMD_BB_CLK1 10 +#define RPM_SMD_BB_CLK1_A 11 +#define RPM_SMD_BB_CLK2 12 +#define RPM_SMD_BB_CLK2_A 13 +#define RPM_SMD_RF_CLK1 14 +#define RPM_SMD_RF_CLK1_A 15 +#define RPM_SMD_RF_CLK2 16 +#define RPM_SMD_RF_CLK2_A 17 +#define RPM_SMD_BB_CLK1_PIN 18 +#define RPM_SMD_BB_CLK1_A_PIN 19 +#define RPM_SMD_BB_CLK2_PIN 20 +#define RPM_SMD_BB_CLK2_A_PIN 21 +#define RPM_SMD_RF_CLK1_PIN 22 +#define RPM_SMD_RF_CLK1_A_PIN 23 +#define RPM_SMD_RF_CLK2_PIN 24 +#define RPM_SMD_RF_CLK2_A_PIN 25 +#define RPM_SMD_PNOC_CLK 26 +#define RPM_SMD_PNOC_A_CLK 27 +#define RPM_SMD_CNOC_CLK 28 +#define RPM_SMD_CNOC_A_CLK 29 +#define RPM_SMD_MMSSNOC_AHB_CLK 30 +#define RPM_SMD_MMSSNOC_AHB_A_CLK 31 +#define RPM_SMD_GFX3D_CLK_SRC 32 +#define RPM_SMD_GFX3D_A_CLK_SRC 33 +#define RPM_SMD_OCMEMGX_CLK 34 +#define RPM_SMD_OCMEMGX_A_CLK 35 +#define RPM_SMD_CXO_D0 36 +#define RPM_SMD_CXO_D0_A 37 +#define RPM_SMD_CXO_D1 38 +#define RPM_SMD_CXO_D1_A 39 +#define RPM_SMD_CXO_A0 40 +#define RPM_SMD_CXO_A0_A 41 +#define RPM_SMD_CXO_A1 42 +#define RPM_SMD_CXO_A1_A 43 +#define RPM_SMD_CXO_A2 44 +#define RPM_SMD_CXO_A2_A 45 +#define RPM_SMD_DIV_CLK1 46 +#define RPM_SMD_DIV_A_CLK1 47 +#define RPM_SMD_DIV_CLK2 48 +#define RPM_SMD_DIV_A_CLK2 49 +#define RPM_SMD_DIFF_CLK 50 +#define RPM_SMD_DIFF_A_CLK 51 +#define RPM_SMD_CXO_D0_PIN 52 +#define RPM_SMD_CXO_D0_A_PIN 53 +#define RPM_SMD_CXO_D1_PIN 54 +#define RPM_SMD_CXO_D1_A_PIN 55 +#define RPM_SMD_CXO_A0_PIN 56 +#define RPM_SMD_CXO_A0_A_PIN 57 +#define RPM_SMD_CXO_A1_PIN 58 +#define RPM_SMD_CXO_A1_A_PIN 59 +#define RPM_SMD_CXO_A2_PIN 60 +#define RPM_SMD_CXO_A2_A_PIN 61 +#define RPM_SMD_AGGR1_NOC_CLK 62 +#define RPM_SMD_AGGR1_NOC_A_CLK 63 +#define RPM_SMD_AGGR2_NOC_CLK 64 +#define RPM_SMD_AGGR2_NOC_A_CLK 65 +#define RPM_SMD_MMAXI_CLK 66 +#define RPM_SMD_MMAXI_A_CLK 67 +#define RPM_SMD_IPA_CLK 68 +#define RPM_SMD_IPA_A_CLK 69 +#define RPM_SMD_CE1_CLK 70 +#define RPM_SMD_CE1_A_CLK 71 +#define RPM_SMD_DIV_CLK3 72 +#define RPM_SMD_DIV_A_CLK3 73 +#define RPM_SMD_LN_BB_CLK 74 +#define RPM_SMD_LN_BB_A_CLK 75 +#define RPM_SMD_BIMC_GPU_CLK 76 +#define RPM_SMD_BIMC_GPU_A_CLK 77 +#define RPM_SMD_QPIC_CLK 78 +#define RPM_SMD_QPIC_CLK_A 79 +#define RPM_SMD_LN_BB_CLK1 80 +#define RPM_SMD_LN_BB_CLK1_A 81 +#define RPM_SMD_LN_BB_CLK2 82 +#define RPM_SMD_LN_BB_CLK2_A 83 +#define RPM_SMD_LN_BB_CLK3_PIN 84 +#define RPM_SMD_LN_BB_CLK3_A_PIN 85 +#define RPM_SMD_RF_CLK3 86 +#define RPM_SMD_RF_CLK3_A 87 +#define RPM_SMD_RF_CLK3_PIN 88 +#define RPM_SMD_RF_CLK3_A_PIN 89 + +#endif diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h new file mode 100644 index 0000000..edcab3f --- /dev/null +++ b/include/dt-bindings/clock/qcom,rpmh.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */ + + +#ifndef _DT_BINDINGS_CLK_MSM_RPMH_H +#define _DT_BINDINGS_CLK_MSM_RPMH_H + +/* RPMh controlled clocks */ +#define RPMH_CXO_CLK 0 +#define RPMH_CXO_CLK_A 1 +#define RPMH_LN_BB_CLK2 2 +#define RPMH_LN_BB_CLK2_A 3 +#define RPMH_LN_BB_CLK3 4 +#define RPMH_LN_BB_CLK3_A 5 +#define RPMH_RF_CLK1 6 +#define RPMH_RF_CLK1_A 7 +#define RPMH_RF_CLK2 8 +#define RPMH_RF_CLK2_A 9 +#define RPMH_RF_CLK3 10 +#define RPMH_RF_CLK3_A 11 +#define RPMH_IPA_CLK 12 + +#endif diff --git a/include/dt-bindings/clock/qcom,turingcc-qcs404.h b/include/dt-bindings/clock/qcom,turingcc-qcs404.h new file mode 100644 index 0000000..838faef --- /dev/null +++ b/include/dt-bindings/clock/qcom,turingcc-qcs404.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019, Linaro Ltd + */ + +#ifndef _DT_BINDINGS_CLK_TURING_QCS404_H +#define _DT_BINDINGS_CLK_TURING_QCS404_H + +#define TURING_Q6SS_Q6_AXIM_CLK 0 +#define TURING_Q6SS_AHBM_AON_CLK 1 +#define TURING_WRAPPER_AON_CLK 2 +#define TURING_Q6SS_AHBS_AON_CLK 3 +#define TURING_WRAPPER_QOS_AHBS_AON_CLK 4 + +#endif diff --git a/include/dt-bindings/clock/qcom,videocc-sdm845.h b/include/dt-bindings/clock/qcom,videocc-sdm845.h new file mode 100644 index 0000000..1b86816 --- /dev/null +++ b/include/dt-bindings/clock/qcom,videocc-sdm845.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_SDM_VIDEO_CC_SDM845_H +#define _DT_BINDINGS_CLK_SDM_VIDEO_CC_SDM845_H + +/* VIDEO_CC clock registers */ +#define VIDEO_CC_APB_CLK 0 +#define VIDEO_CC_AT_CLK 1 +#define VIDEO_CC_QDSS_TRIG_CLK 2 +#define VIDEO_CC_QDSS_TSCTR_DIV8_CLK 3 +#define VIDEO_CC_VCODEC0_AXI_CLK 4 +#define VIDEO_CC_VCODEC0_CORE_CLK 5 +#define VIDEO_CC_VCODEC1_AXI_CLK 6 +#define VIDEO_CC_VCODEC1_CORE_CLK 7 +#define VIDEO_CC_VENUS_AHB_CLK 8 +#define VIDEO_CC_VENUS_CLK_SRC 9 +#define VIDEO_CC_VENUS_CTL_AXI_CLK 10 +#define VIDEO_CC_VENUS_CTL_CORE_CLK 11 +#define VIDEO_PLL0 12 + +/* VIDEO_CC Resets */ +#define VIDEO_CC_VENUS_BCR 0 +#define VIDEO_CC_VCODEC0_BCR 1 +#define VIDEO_CC_VCODEC1_BCR 2 +#define VIDEO_CC_INTERFACE_BCR 3 + +/* VIDEO_CC GDSCRs */ +#define VENUS_GDSC 0 +#define VCODEC0_GDSC 1 +#define VCODEC1_GDSC 2 + +#endif diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h new file mode 100644 index 0000000..a267ac2 --- /dev/null +++ b/include/dt-bindings/clock/r7s72100-clock.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2014 Renesas Solutions Corp. + * Copyright (C) 2014 Wolfram Sang, Sang Engineering + */ + +#ifndef __DT_BINDINGS_CLOCK_R7S72100_H__ +#define __DT_BINDINGS_CLOCK_R7S72100_H__ + +#define R7S72100_CLK_PLL 0 +#define R7S72100_CLK_I 1 +#define R7S72100_CLK_G 2 + +/* MSTP2 */ +#define R7S72100_CLK_CORESIGHT 0 + +/* MSTP3 */ +#define R7S72100_CLK_IEBUS 7 +#define R7S72100_CLK_IRDA 6 +#define R7S72100_CLK_LIN0 5 +#define R7S72100_CLK_LIN1 4 +#define R7S72100_CLK_MTU2 3 +#define R7S72100_CLK_CAN 2 +#define R7S72100_CLK_ADCPWR 1 +#define R7S72100_CLK_PWM 0 + +/* MSTP4 */ +#define R7S72100_CLK_SCIF0 7 +#define R7S72100_CLK_SCIF1 6 +#define R7S72100_CLK_SCIF2 5 +#define R7S72100_CLK_SCIF3 4 +#define R7S72100_CLK_SCIF4 3 +#define R7S72100_CLK_SCIF5 2 +#define R7S72100_CLK_SCIF6 1 +#define R7S72100_CLK_SCIF7 0 + +/* MSTP5 */ +#define R7S72100_CLK_SCI0 7 +#define R7S72100_CLK_SCI1 6 +#define R7S72100_CLK_SG0 5 +#define R7S72100_CLK_SG1 4 +#define R7S72100_CLK_SG2 3 +#define R7S72100_CLK_SG3 2 +#define R7S72100_CLK_OSTM0 1 +#define R7S72100_CLK_OSTM1 0 + +/* MSTP6 */ +#define R7S72100_CLK_ADC 7 +#define R7S72100_CLK_CEU 6 +#define R7S72100_CLK_DOC0 5 +#define R7S72100_CLK_DOC1 4 +#define R7S72100_CLK_DRC0 3 +#define R7S72100_CLK_DRC1 2 +#define R7S72100_CLK_JCU 1 +#define R7S72100_CLK_RTC 0 + +/* MSTP7 */ +#define R7S72100_CLK_VDEC0 7 +#define R7S72100_CLK_VDEC1 6 +#define R7S72100_CLK_ETHER 4 +#define R7S72100_CLK_NAND 3 +#define R7S72100_CLK_USB0 1 +#define R7S72100_CLK_USB1 0 + +/* MSTP8 */ +#define R7S72100_CLK_IMR0 7 +#define R7S72100_CLK_IMR1 6 +#define R7S72100_CLK_IMRDISP 5 +#define R7S72100_CLK_MMCIF 4 +#define R7S72100_CLK_MLB 3 +#define R7S72100_CLK_ETHAVB 2 +#define R7S72100_CLK_SCUX 1 + +/* MSTP9 */ +#define R7S72100_CLK_I2C0 7 +#define R7S72100_CLK_I2C1 6 +#define R7S72100_CLK_I2C2 5 +#define R7S72100_CLK_I2C3 4 +#define R7S72100_CLK_SPIBSC0 3 +#define R7S72100_CLK_SPIBSC1 2 +#define R7S72100_CLK_VDC50 1 /* and LVDS */ +#define R7S72100_CLK_VDC51 0 + +/* MSTP10 */ +#define R7S72100_CLK_SPI0 7 +#define R7S72100_CLK_SPI1 6 +#define R7S72100_CLK_SPI2 5 +#define R7S72100_CLK_SPI3 4 +#define R7S72100_CLK_SPI4 3 +#define R7S72100_CLK_CDROM 2 +#define R7S72100_CLK_SPDIF 1 +#define R7S72100_CLK_RGPVG2 0 + +/* MSTP11 */ +#define R7S72100_CLK_SSI0 5 +#define R7S72100_CLK_SSI1 4 +#define R7S72100_CLK_SSI2 3 +#define R7S72100_CLK_SSI3 2 +#define R7S72100_CLK_SSI4 1 +#define R7S72100_CLK_SSI5 0 + +/* MSTP12 */ +#define R7S72100_CLK_SDHI00 3 +#define R7S72100_CLK_SDHI01 2 +#define R7S72100_CLK_SDHI10 1 +#define R7S72100_CLK_SDHI11 0 + +/* MSTP13 */ +#define R7S72100_CLK_PIX1 2 +#define R7S72100_CLK_PIX0 1 + +#endif /* __DT_BINDINGS_CLOCK_R7S72100_H__ */ diff --git a/include/dt-bindings/clock/r7s9210-cpg-mssr.h b/include/dt-bindings/clock/r7s9210-cpg-mssr.h new file mode 100644 index 0000000..b6f85ca --- /dev/null +++ b/include/dt-bindings/clock/r7s9210-cpg-mssr.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_R7S9210_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R7S9210_CPG_MSSR_H__ + +#include + +/* R7S9210 CPG Core Clocks */ +#define R7S9210_CLK_I 0 +#define R7S9210_CLK_G 1 +#define R7S9210_CLK_B 2 +#define R7S9210_CLK_P1 3 +#define R7S9210_CLK_P1C 4 +#define R7S9210_CLK_P0 5 + +#endif /* __DT_BINDINGS_CLOCK_R7S9210_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a73a4-clock.h b/include/dt-bindings/clock/r8a73a4-clock.h new file mode 100644 index 0000000..1ec4827 --- /dev/null +++ b/include/dt-bindings/clock/r8a73a4-clock.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2014 Ulrich Hecht + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A73A4_H__ +#define __DT_BINDINGS_CLOCK_R8A73A4_H__ + +/* CPG */ +#define R8A73A4_CLK_MAIN 0 +#define R8A73A4_CLK_PLL0 1 +#define R8A73A4_CLK_PLL1 2 +#define R8A73A4_CLK_PLL2 3 +#define R8A73A4_CLK_PLL2S 4 +#define R8A73A4_CLK_PLL2H 5 +#define R8A73A4_CLK_Z 6 +#define R8A73A4_CLK_Z2 7 +#define R8A73A4_CLK_I 8 +#define R8A73A4_CLK_M3 9 +#define R8A73A4_CLK_B 10 +#define R8A73A4_CLK_M1 11 +#define R8A73A4_CLK_M2 12 +#define R8A73A4_CLK_ZX 13 +#define R8A73A4_CLK_ZS 14 +#define R8A73A4_CLK_HP 15 + +/* MSTP2 */ +#define R8A73A4_CLK_DMAC 18 +#define R8A73A4_CLK_SCIFB3 17 +#define R8A73A4_CLK_SCIFB2 16 +#define R8A73A4_CLK_SCIFB1 7 +#define R8A73A4_CLK_SCIFB0 6 +#define R8A73A4_CLK_SCIFA0 4 +#define R8A73A4_CLK_SCIFA1 3 + +/* MSTP3 */ +#define R8A73A4_CLK_CMT1 29 +#define R8A73A4_CLK_IIC1 23 +#define R8A73A4_CLK_IIC0 18 +#define R8A73A4_CLK_IIC7 17 +#define R8A73A4_CLK_IIC6 16 +#define R8A73A4_CLK_MMCIF0 15 +#define R8A73A4_CLK_SDHI0 14 +#define R8A73A4_CLK_SDHI1 13 +#define R8A73A4_CLK_SDHI2 12 +#define R8A73A4_CLK_MMCIF1 5 +#define R8A73A4_CLK_IIC2 0 + +/* MSTP4 */ +#define R8A73A4_CLK_IIC3 11 +#define R8A73A4_CLK_IIC4 10 +#define R8A73A4_CLK_IIC5 9 +#define R8A73A4_CLK_INTC_SYS 8 +#define R8A73A4_CLK_IRQC 7 + +/* MSTP5 */ +#define R8A73A4_CLK_THERMAL 22 +#define R8A73A4_CLK_IIC8 15 + +#endif /* __DT_BINDINGS_CLOCK_R8A73A4_H__ */ diff --git a/include/dt-bindings/clock/r8a7740-clock.h b/include/dt-bindings/clock/r8a7740-clock.h new file mode 100644 index 0000000..1b3fdb3 --- /dev/null +++ b/include/dt-bindings/clock/r8a7740-clock.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2014 Ulrich Hecht + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7740_H__ +#define __DT_BINDINGS_CLOCK_R8A7740_H__ + +/* CPG */ +#define R8A7740_CLK_SYSTEM 0 +#define R8A7740_CLK_PLLC0 1 +#define R8A7740_CLK_PLLC1 2 +#define R8A7740_CLK_PLLC2 3 +#define R8A7740_CLK_R 4 +#define R8A7740_CLK_USB24S 5 +#define R8A7740_CLK_I 6 +#define R8A7740_CLK_ZG 7 +#define R8A7740_CLK_B 8 +#define R8A7740_CLK_M1 9 +#define R8A7740_CLK_HP 10 +#define R8A7740_CLK_HPP 11 +#define R8A7740_CLK_USBP 12 +#define R8A7740_CLK_S 13 +#define R8A7740_CLK_ZB 14 +#define R8A7740_CLK_M3 15 +#define R8A7740_CLK_CP 16 + +/* MSTP1 */ +#define R8A7740_CLK_CEU21 28 +#define R8A7740_CLK_CEU20 27 +#define R8A7740_CLK_TMU0 25 +#define R8A7740_CLK_LCDC1 17 +#define R8A7740_CLK_IIC0 16 +#define R8A7740_CLK_TMU1 11 +#define R8A7740_CLK_LCDC0 0 + +/* MSTP2 */ +#define R8A7740_CLK_SCIFA6 30 +#define R8A7740_CLK_INTCA 29 +#define R8A7740_CLK_SCIFA7 22 +#define R8A7740_CLK_DMAC1 18 +#define R8A7740_CLK_DMAC2 17 +#define R8A7740_CLK_DMAC3 16 +#define R8A7740_CLK_USBDMAC 14 +#define R8A7740_CLK_SCIFA5 7 +#define R8A7740_CLK_SCIFB 6 +#define R8A7740_CLK_SCIFA0 4 +#define R8A7740_CLK_SCIFA1 3 +#define R8A7740_CLK_SCIFA2 2 +#define R8A7740_CLK_SCIFA3 1 +#define R8A7740_CLK_SCIFA4 0 + +/* MSTP3 */ +#define R8A7740_CLK_CMT1 29 +#define R8A7740_CLK_FSI 28 +#define R8A7740_CLK_IIC1 23 +#define R8A7740_CLK_USBF 20 +#define R8A7740_CLK_SDHI0 14 +#define R8A7740_CLK_SDHI1 13 +#define R8A7740_CLK_MMC 12 +#define R8A7740_CLK_GETHER 9 +#define R8A7740_CLK_TPU0 4 + +/* MSTP4 */ +#define R8A7740_CLK_USBH 16 +#define R8A7740_CLK_SDHI2 15 +#define R8A7740_CLK_USBFUNC 7 +#define R8A7740_CLK_USBPHY 6 + +/* SUBCK* */ +#define R8A7740_CLK_SUBCK 9 +#define R8A7740_CLK_SUBCK2 10 + +#endif /* __DT_BINDINGS_CLOCK_R8A7740_H__ */ diff --git a/include/dt-bindings/clock/r8a7743-cpg-mssr.h b/include/dt-bindings/clock/r8a7743-cpg-mssr.h new file mode 100644 index 0000000..3ba9360 --- /dev/null +++ b/include/dt-bindings/clock/r8a7743-cpg-mssr.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2016 Cogent Embedded Inc. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__ + +#include + +/* r8a7743 CPG Core Clocks */ +#define R8A7743_CLK_Z 0 +#define R8A7743_CLK_ZG 1 +#define R8A7743_CLK_ZTR 2 +#define R8A7743_CLK_ZTRD2 3 +#define R8A7743_CLK_ZT 4 +#define R8A7743_CLK_ZX 5 +#define R8A7743_CLK_ZS 6 +#define R8A7743_CLK_HP 7 +#define R8A7743_CLK_B 9 +#define R8A7743_CLK_LB 10 +#define R8A7743_CLK_P 11 +#define R8A7743_CLK_CL 12 +#define R8A7743_CLK_M2 13 +#define R8A7743_CLK_ZB3 15 +#define R8A7743_CLK_ZB3D2 16 +#define R8A7743_CLK_DDR 17 +#define R8A7743_CLK_SDH 18 +#define R8A7743_CLK_SD0 19 +#define R8A7743_CLK_SD2 20 +#define R8A7743_CLK_SD3 21 +#define R8A7743_CLK_MMC0 22 +#define R8A7743_CLK_MP 23 +#define R8A7743_CLK_QSPI 26 +#define R8A7743_CLK_CP 27 +#define R8A7743_CLK_RCAN 28 +#define R8A7743_CLK_R 29 +#define R8A7743_CLK_OSC 30 + +#endif /* __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7744-cpg-mssr.h b/include/dt-bindings/clock/r8a7744-cpg-mssr.h new file mode 100644 index 0000000..2690be0 --- /dev/null +++ b/include/dt-bindings/clock/r8a7744-cpg-mssr.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A7744_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7744_CPG_MSSR_H__ + +#include + +/* r8a7744 CPG Core Clocks */ +#define R8A7744_CLK_Z 0 +#define R8A7744_CLK_ZG 1 +#define R8A7744_CLK_ZTR 2 +#define R8A7744_CLK_ZTRD2 3 +#define R8A7744_CLK_ZT 4 +#define R8A7744_CLK_ZX 5 +#define R8A7744_CLK_ZS 6 +#define R8A7744_CLK_HP 7 +#define R8A7744_CLK_B 9 +#define R8A7744_CLK_LB 10 +#define R8A7744_CLK_P 11 +#define R8A7744_CLK_CL 12 +#define R8A7744_CLK_M2 13 +#define R8A7744_CLK_ZB3 15 +#define R8A7744_CLK_ZB3D2 16 +#define R8A7744_CLK_DDR 17 +#define R8A7744_CLK_SDH 18 +#define R8A7744_CLK_SD0 19 +#define R8A7744_CLK_SD2 20 +#define R8A7744_CLK_SD3 21 +#define R8A7744_CLK_MMC0 22 +#define R8A7744_CLK_MP 23 +#define R8A7744_CLK_QSPI 26 +#define R8A7744_CLK_CP 27 +#define R8A7744_CLK_RCAN 28 +#define R8A7744_CLK_R 29 +#define R8A7744_CLK_OSC 30 + +#endif /* __DT_BINDINGS_CLOCK_R8A7744_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7745-cpg-mssr.h b/include/dt-bindings/clock/r8a7745-cpg-mssr.h new file mode 100644 index 0000000..f81066c --- /dev/null +++ b/include/dt-bindings/clock/r8a7745-cpg-mssr.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2016 Cogent Embedded Inc. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__ + +#include + +/* r8a7745 CPG Core Clocks */ +#define R8A7745_CLK_Z2 0 +#define R8A7745_CLK_ZG 1 +#define R8A7745_CLK_ZTR 2 +#define R8A7745_CLK_ZTRD2 3 +#define R8A7745_CLK_ZT 4 +#define R8A7745_CLK_ZX 5 +#define R8A7745_CLK_ZS 6 +#define R8A7745_CLK_HP 7 +#define R8A7745_CLK_B 9 +#define R8A7745_CLK_LB 10 +#define R8A7745_CLK_P 11 +#define R8A7745_CLK_CL 12 +#define R8A7745_CLK_CP 13 +#define R8A7745_CLK_M2 14 +#define R8A7745_CLK_ZB3 16 +#define R8A7745_CLK_ZB3D2 17 +#define R8A7745_CLK_DDR 18 +#define R8A7745_CLK_SDH 19 +#define R8A7745_CLK_SD0 20 +#define R8A7745_CLK_SD2 21 +#define R8A7745_CLK_SD3 22 +#define R8A7745_CLK_MMC0 23 +#define R8A7745_CLK_MP 24 +#define R8A7745_CLK_QSPI 25 +#define R8A7745_CLK_CPEX 26 +#define R8A7745_CLK_RCAN 27 +#define R8A7745_CLK_R 28 +#define R8A7745_CLK_OSC 29 + +#endif /* __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a77470-cpg-mssr.h b/include/dt-bindings/clock/r8a77470-cpg-mssr.h new file mode 100644 index 0000000..34cba49 --- /dev/null +++ b/include/dt-bindings/clock/r8a77470-cpg-mssr.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A77470_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A77470_CPG_MSSR_H__ + +#include + +/* r8a77470 CPG Core Clocks */ +#define R8A77470_CLK_Z2 0 +#define R8A77470_CLK_ZTR 1 +#define R8A77470_CLK_ZTRD2 2 +#define R8A77470_CLK_ZT 3 +#define R8A77470_CLK_ZX 4 +#define R8A77470_CLK_ZS 5 +#define R8A77470_CLK_HP 6 +#define R8A77470_CLK_B 7 +#define R8A77470_CLK_LB 8 +#define R8A77470_CLK_P 9 +#define R8A77470_CLK_CL 10 +#define R8A77470_CLK_CP 11 +#define R8A77470_CLK_M2 12 +#define R8A77470_CLK_ZB3 13 +#define R8A77470_CLK_SDH 14 +#define R8A77470_CLK_SD0 15 +#define R8A77470_CLK_SD1 16 +#define R8A77470_CLK_SD2 17 +#define R8A77470_CLK_MP 18 +#define R8A77470_CLK_QSPI 19 +#define R8A77470_CLK_CPEX 20 +#define R8A77470_CLK_RCAN 21 +#define R8A77470_CLK_R 22 +#define R8A77470_CLK_OSC 23 + +#endif /* __DT_BINDINGS_CLOCK_R8A77470_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a774a1-cpg-mssr.h b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h new file mode 100644 index 0000000..e355363 --- /dev/null +++ b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ + +#include + +/* r8a774a1 CPG Core Clocks */ +#define R8A774A1_CLK_Z 0 +#define R8A774A1_CLK_Z2 1 +#define R8A774A1_CLK_ZG 2 +#define R8A774A1_CLK_ZTR 3 +#define R8A774A1_CLK_ZTRD2 4 +#define R8A774A1_CLK_ZT 5 +#define R8A774A1_CLK_ZX 6 +#define R8A774A1_CLK_S0D1 7 +#define R8A774A1_CLK_S0D2 8 +#define R8A774A1_CLK_S0D3 9 +#define R8A774A1_CLK_S0D4 10 +#define R8A774A1_CLK_S0D6 11 +#define R8A774A1_CLK_S0D8 12 +#define R8A774A1_CLK_S0D12 13 +#define R8A774A1_CLK_S1D2 14 +#define R8A774A1_CLK_S1D4 15 +#define R8A774A1_CLK_S2D1 16 +#define R8A774A1_CLK_S2D2 17 +#define R8A774A1_CLK_S2D4 18 +#define R8A774A1_CLK_S3D1 19 +#define R8A774A1_CLK_S3D2 20 +#define R8A774A1_CLK_S3D4 21 +#define R8A774A1_CLK_LB 22 +#define R8A774A1_CLK_CL 23 +#define R8A774A1_CLK_ZB3 24 +#define R8A774A1_CLK_ZB3D2 25 +#define R8A774A1_CLK_ZB3D4 26 +#define R8A774A1_CLK_CR 27 +#define R8A774A1_CLK_CRD2 28 +#define R8A774A1_CLK_SD0H 29 +#define R8A774A1_CLK_SD0 30 +#define R8A774A1_CLK_SD1H 31 +#define R8A774A1_CLK_SD1 32 +#define R8A774A1_CLK_SD2H 33 +#define R8A774A1_CLK_SD2 34 +#define R8A774A1_CLK_SD3H 35 +#define R8A774A1_CLK_SD3 36 +#define R8A774A1_CLK_RPC 37 +#define R8A774A1_CLK_RPCD2 38 +#define R8A774A1_CLK_MSO 39 +#define R8A774A1_CLK_HDMI 40 +#define R8A774A1_CLK_CSI0 41 +#define R8A774A1_CLK_CP 42 +#define R8A774A1_CLK_CPEX 43 +#define R8A774A1_CLK_R 44 +#define R8A774A1_CLK_OSC 45 +#define R8A774A1_CLK_CANFD 46 + +#endif /* __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a774c0-cpg-mssr.h b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h new file mode 100644 index 0000000..8ad9cd6 --- /dev/null +++ b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ + +#include + +/* r8a774c0 CPG Core Clocks */ +#define R8A774C0_CLK_Z2 0 +#define R8A774C0_CLK_ZG 1 +#define R8A774C0_CLK_ZTR 2 +#define R8A774C0_CLK_ZT 3 +#define R8A774C0_CLK_ZX 4 +#define R8A774C0_CLK_S0D1 5 +#define R8A774C0_CLK_S0D3 6 +#define R8A774C0_CLK_S0D6 7 +#define R8A774C0_CLK_S0D12 8 +#define R8A774C0_CLK_S0D24 9 +#define R8A774C0_CLK_S1D1 10 +#define R8A774C0_CLK_S1D2 11 +#define R8A774C0_CLK_S1D4 12 +#define R8A774C0_CLK_S2D1 13 +#define R8A774C0_CLK_S2D2 14 +#define R8A774C0_CLK_S2D4 15 +#define R8A774C0_CLK_S3D1 16 +#define R8A774C0_CLK_S3D2 17 +#define R8A774C0_CLK_S3D4 18 +#define R8A774C0_CLK_S0D6C 19 +#define R8A774C0_CLK_S3D1C 20 +#define R8A774C0_CLK_S3D2C 21 +#define R8A774C0_CLK_S3D4C 22 +#define R8A774C0_CLK_LB 23 +#define R8A774C0_CLK_CL 24 +#define R8A774C0_CLK_ZB3 25 +#define R8A774C0_CLK_ZB3D2 26 +#define R8A774C0_CLK_CR 27 +#define R8A774C0_CLK_CRD2 28 +#define R8A774C0_CLK_SD0H 29 +#define R8A774C0_CLK_SD0 30 +#define R8A774C0_CLK_SD1H 31 +#define R8A774C0_CLK_SD1 32 +#define R8A774C0_CLK_SD3H 33 +#define R8A774C0_CLK_SD3 34 +#define R8A774C0_CLK_RPC 35 +#define R8A774C0_CLK_RPCD2 36 +#define R8A774C0_CLK_ZA2 37 +#define R8A774C0_CLK_ZA8 38 +#define R8A774C0_CLK_Z2D 39 +#define R8A774C0_CLK_MSO 40 +#define R8A774C0_CLK_R 41 +#define R8A774C0_CLK_OSC 42 +#define R8A774C0_CLK_LV0 43 +#define R8A774C0_CLK_LV1 44 +#define R8A774C0_CLK_CSI0 45 +#define R8A774C0_CLK_CP 46 +#define R8A774C0_CLK_CPEX 47 +#define R8A774C0_CLK_CANFD 48 + +#endif /* __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7778-clock.h b/include/dt-bindings/clock/r8a7778-clock.h new file mode 100644 index 0000000..4a32b36 --- /dev/null +++ b/include/dt-bindings/clock/r8a7778-clock.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2014 Ulrich Hecht + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7778_H__ +#define __DT_BINDINGS_CLOCK_R8A7778_H__ + +/* CPG */ +#define R8A7778_CLK_PLLA 0 +#define R8A7778_CLK_PLLB 1 +#define R8A7778_CLK_B 2 +#define R8A7778_CLK_OUT 3 +#define R8A7778_CLK_P 4 +#define R8A7778_CLK_S 5 +#define R8A7778_CLK_S1 6 + +/* MSTP0 */ +#define R8A7778_CLK_I2C0 30 +#define R8A7778_CLK_I2C1 29 +#define R8A7778_CLK_I2C2 28 +#define R8A7778_CLK_I2C3 27 +#define R8A7778_CLK_SCIF0 26 +#define R8A7778_CLK_SCIF1 25 +#define R8A7778_CLK_SCIF2 24 +#define R8A7778_CLK_SCIF3 23 +#define R8A7778_CLK_SCIF4 22 +#define R8A7778_CLK_SCIF5 21 +#define R8A7778_CLK_HSCIF0 19 +#define R8A7778_CLK_HSCIF1 18 +#define R8A7778_CLK_TMU0 16 +#define R8A7778_CLK_TMU1 15 +#define R8A7778_CLK_TMU2 14 +#define R8A7778_CLK_SSI0 12 +#define R8A7778_CLK_SSI1 11 +#define R8A7778_CLK_SSI2 10 +#define R8A7778_CLK_SSI3 9 +#define R8A7778_CLK_SRU 8 +#define R8A7778_CLK_HSPI 7 + +/* MSTP1 */ +#define R8A7778_CLK_ETHER 14 +#define R8A7778_CLK_VIN0 10 +#define R8A7778_CLK_VIN1 9 +#define R8A7778_CLK_USB 0 + +/* MSTP3 */ +#define R8A7778_CLK_MMC 31 +#define R8A7778_CLK_SDHI0 23 +#define R8A7778_CLK_SDHI1 22 +#define R8A7778_CLK_SDHI2 21 +#define R8A7778_CLK_SSI4 11 +#define R8A7778_CLK_SSI5 10 +#define R8A7778_CLK_SSI6 9 +#define R8A7778_CLK_SSI7 8 +#define R8A7778_CLK_SSI8 7 + +/* MSTP5 */ +#define R8A7778_CLK_SRU_SRC0 31 +#define R8A7778_CLK_SRU_SRC1 30 +#define R8A7778_CLK_SRU_SRC2 29 +#define R8A7778_CLK_SRU_SRC3 28 +#define R8A7778_CLK_SRU_SRC4 27 +#define R8A7778_CLK_SRU_SRC5 26 +#define R8A7778_CLK_SRU_SRC6 25 +#define R8A7778_CLK_SRU_SRC7 24 +#define R8A7778_CLK_SRU_SRC8 23 + +#endif /* __DT_BINDINGS_CLOCK_R8A7778_H__ */ diff --git a/include/dt-bindings/clock/r8a7779-clock.h b/include/dt-bindings/clock/r8a7779-clock.h new file mode 100644 index 0000000..f054923 --- /dev/null +++ b/include/dt-bindings/clock/r8a7779-clock.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2013 Horms Solutions Ltd. + * + * Contact: Simon Horman + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7779_H__ +#define __DT_BINDINGS_CLOCK_R8A7779_H__ + +/* CPG */ +#define R8A7779_CLK_PLLA 0 +#define R8A7779_CLK_Z 1 +#define R8A7779_CLK_ZS 2 +#define R8A7779_CLK_S 3 +#define R8A7779_CLK_S1 4 +#define R8A7779_CLK_P 5 +#define R8A7779_CLK_B 6 +#define R8A7779_CLK_OUT 7 + +/* MSTP 0 */ +#define R8A7779_CLK_HSPI 7 +#define R8A7779_CLK_TMU2 14 +#define R8A7779_CLK_TMU1 15 +#define R8A7779_CLK_TMU0 16 +#define R8A7779_CLK_HSCIF1 18 +#define R8A7779_CLK_HSCIF0 19 +#define R8A7779_CLK_SCIF5 21 +#define R8A7779_CLK_SCIF4 22 +#define R8A7779_CLK_SCIF3 23 +#define R8A7779_CLK_SCIF2 24 +#define R8A7779_CLK_SCIF1 25 +#define R8A7779_CLK_SCIF0 26 +#define R8A7779_CLK_I2C3 27 +#define R8A7779_CLK_I2C2 28 +#define R8A7779_CLK_I2C1 29 +#define R8A7779_CLK_I2C0 30 + +/* MSTP 1 */ +#define R8A7779_CLK_USB01 0 +#define R8A7779_CLK_USB2 1 +#define R8A7779_CLK_DU 3 +#define R8A7779_CLK_VIN2 8 +#define R8A7779_CLK_VIN1 9 +#define R8A7779_CLK_VIN0 10 +#define R8A7779_CLK_ETHER 14 +#define R8A7779_CLK_SATA 15 +#define R8A7779_CLK_PCIE 16 +#define R8A7779_CLK_VIN3 20 + +/* MSTP 3 */ +#define R8A7779_CLK_SDHI3 20 +#define R8A7779_CLK_SDHI2 21 +#define R8A7779_CLK_SDHI1 22 +#define R8A7779_CLK_SDHI0 23 +#define R8A7779_CLK_MMC1 30 +#define R8A7779_CLK_MMC0 31 + + +#endif /* __DT_BINDINGS_CLOCK_R8A7779_H__ */ diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h new file mode 100644 index 0000000..c92ff1e --- /dev/null +++ b/include/dt-bindings/clock/r8a7790-clock.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2013 Ideas On Board SPRL + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7790_H__ +#define __DT_BINDINGS_CLOCK_R8A7790_H__ + +/* CPG */ +#define R8A7790_CLK_MAIN 0 +#define R8A7790_CLK_PLL0 1 +#define R8A7790_CLK_PLL1 2 +#define R8A7790_CLK_PLL3 3 +#define R8A7790_CLK_LB 4 +#define R8A7790_CLK_QSPI 5 +#define R8A7790_CLK_SDH 6 +#define R8A7790_CLK_SD0 7 +#define R8A7790_CLK_SD1 8 +#define R8A7790_CLK_Z 9 +#define R8A7790_CLK_RCAN 10 +#define R8A7790_CLK_ADSP 11 + +/* MSTP0 */ +#define R8A7790_CLK_MSIOF0 0 + +/* MSTP1 */ +#define R8A7790_CLK_VCP1 0 +#define R8A7790_CLK_VCP0 1 +#define R8A7790_CLK_VPC1 2 +#define R8A7790_CLK_VPC0 3 +#define R8A7790_CLK_JPU 6 +#define R8A7790_CLK_SSP1 9 +#define R8A7790_CLK_TMU1 11 +#define R8A7790_CLK_3DG 12 +#define R8A7790_CLK_2DDMAC 15 +#define R8A7790_CLK_FDP1_2 17 +#define R8A7790_CLK_FDP1_1 18 +#define R8A7790_CLK_FDP1_0 19 +#define R8A7790_CLK_TMU3 21 +#define R8A7790_CLK_TMU2 22 +#define R8A7790_CLK_CMT0 24 +#define R8A7790_CLK_TMU0 25 +#define R8A7790_CLK_VSP1_DU1 27 +#define R8A7790_CLK_VSP1_DU0 28 +#define R8A7790_CLK_VSP1_R 30 +#define R8A7790_CLK_VSP1_S 31 + +/* MSTP2 */ +#define R8A7790_CLK_SCIFA2 2 +#define R8A7790_CLK_SCIFA1 3 +#define R8A7790_CLK_SCIFA0 4 +#define R8A7790_CLK_MSIOF2 5 +#define R8A7790_CLK_SCIFB0 6 +#define R8A7790_CLK_SCIFB1 7 +#define R8A7790_CLK_MSIOF1 8 +#define R8A7790_CLK_MSIOF3 15 +#define R8A7790_CLK_SCIFB2 16 +#define R8A7790_CLK_SYS_DMAC1 18 +#define R8A7790_CLK_SYS_DMAC0 19 + +/* MSTP3 */ +#define R8A7790_CLK_IIC2 0 +#define R8A7790_CLK_TPU0 4 +#define R8A7790_CLK_MMCIF1 5 +#define R8A7790_CLK_SCIF2 10 +#define R8A7790_CLK_SDHI3 11 +#define R8A7790_CLK_SDHI2 12 +#define R8A7790_CLK_SDHI1 13 +#define R8A7790_CLK_SDHI0 14 +#define R8A7790_CLK_MMCIF0 15 +#define R8A7790_CLK_IIC0 18 +#define R8A7790_CLK_PCIEC 19 +#define R8A7790_CLK_IIC1 23 +#define R8A7790_CLK_SSUSB 28 +#define R8A7790_CLK_CMT1 29 +#define R8A7790_CLK_USBDMAC0 30 +#define R8A7790_CLK_USBDMAC1 31 + +/* MSTP4 */ +#define R8A7790_CLK_IRQC 7 +#define R8A7790_CLK_INTC_SYS 8 + +/* MSTP5 */ +#define R8A7790_CLK_AUDIO_DMAC1 1 +#define R8A7790_CLK_AUDIO_DMAC0 2 +#define R8A7790_CLK_ADSP_MOD 6 +#define R8A7790_CLK_THERMAL 22 +#define R8A7790_CLK_PWM 23 + +/* MSTP7 */ +#define R8A7790_CLK_EHCI 3 +#define R8A7790_CLK_HSUSB 4 +#define R8A7790_CLK_HSCIF1 16 +#define R8A7790_CLK_HSCIF0 17 +#define R8A7790_CLK_SCIF1 20 +#define R8A7790_CLK_SCIF0 21 +#define R8A7790_CLK_DU2 22 +#define R8A7790_CLK_DU1 23 +#define R8A7790_CLK_DU0 24 +#define R8A7790_CLK_LVDS1 25 +#define R8A7790_CLK_LVDS0 26 + +/* MSTP8 */ +#define R8A7790_CLK_MLB 2 +#define R8A7790_CLK_VIN3 8 +#define R8A7790_CLK_VIN2 9 +#define R8A7790_CLK_VIN1 10 +#define R8A7790_CLK_VIN0 11 +#define R8A7790_CLK_ETHERAVB 12 +#define R8A7790_CLK_ETHER 13 +#define R8A7790_CLK_SATA1 14 +#define R8A7790_CLK_SATA0 15 + +/* MSTP9 */ +#define R8A7790_CLK_GPIO5 7 +#define R8A7790_CLK_GPIO4 8 +#define R8A7790_CLK_GPIO3 9 +#define R8A7790_CLK_GPIO2 10 +#define R8A7790_CLK_GPIO1 11 +#define R8A7790_CLK_GPIO0 12 +#define R8A7790_CLK_RCAN1 15 +#define R8A7790_CLK_RCAN0 16 +#define R8A7790_CLK_QSPI_MOD 17 +#define R8A7790_CLK_IICDVFS 26 +#define R8A7790_CLK_I2C3 28 +#define R8A7790_CLK_I2C2 29 +#define R8A7790_CLK_I2C1 30 +#define R8A7790_CLK_I2C0 31 + +/* MSTP10 */ +#define R8A7790_CLK_SSI_ALL 5 +#define R8A7790_CLK_SSI9 6 +#define R8A7790_CLK_SSI8 7 +#define R8A7790_CLK_SSI7 8 +#define R8A7790_CLK_SSI6 9 +#define R8A7790_CLK_SSI5 10 +#define R8A7790_CLK_SSI4 11 +#define R8A7790_CLK_SSI3 12 +#define R8A7790_CLK_SSI2 13 +#define R8A7790_CLK_SSI1 14 +#define R8A7790_CLK_SSI0 15 +#define R8A7790_CLK_SCU_ALL 17 +#define R8A7790_CLK_SCU_DVC1 18 +#define R8A7790_CLK_SCU_DVC0 19 +#define R8A7790_CLK_SCU_CTU1_MIX1 20 +#define R8A7790_CLK_SCU_CTU0_MIX0 21 +#define R8A7790_CLK_SCU_SRC9 22 +#define R8A7790_CLK_SCU_SRC8 23 +#define R8A7790_CLK_SCU_SRC7 24 +#define R8A7790_CLK_SCU_SRC6 25 +#define R8A7790_CLK_SCU_SRC5 26 +#define R8A7790_CLK_SCU_SRC4 27 +#define R8A7790_CLK_SCU_SRC3 28 +#define R8A7790_CLK_SCU_SRC2 29 +#define R8A7790_CLK_SCU_SRC1 30 +#define R8A7790_CLK_SCU_SRC0 31 + +#endif /* __DT_BINDINGS_CLOCK_R8A7790_H__ */ diff --git a/include/dt-bindings/clock/r8a7790-cpg-mssr.h b/include/dt-bindings/clock/r8a7790-cpg-mssr.h new file mode 100644 index 0000000..c5955b5 --- /dev/null +++ b/include/dt-bindings/clock/r8a7790-cpg-mssr.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2015 Renesas Electronics Corp. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7790_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7790_CPG_MSSR_H__ + +#include + +/* r8a7790 CPG Core Clocks */ +#define R8A7790_CLK_Z 0 +#define R8A7790_CLK_Z2 1 +#define R8A7790_CLK_ZG 2 +#define R8A7790_CLK_ZTR 3 +#define R8A7790_CLK_ZTRD2 4 +#define R8A7790_CLK_ZT 5 +#define R8A7790_CLK_ZX 6 +#define R8A7790_CLK_ZS 7 +#define R8A7790_CLK_HP 8 +#define R8A7790_CLK_I 9 +#define R8A7790_CLK_B 10 +#define R8A7790_CLK_LB 11 +#define R8A7790_CLK_P 12 +#define R8A7790_CLK_CL 13 +#define R8A7790_CLK_M2 14 +#define R8A7790_CLK_ADSP 15 +#define R8A7790_CLK_IMP 16 +#define R8A7790_CLK_ZB3 17 +#define R8A7790_CLK_ZB3D2 18 +#define R8A7790_CLK_DDR 19 +#define R8A7790_CLK_SDH 20 +#define R8A7790_CLK_SD0 21 +#define R8A7790_CLK_SD1 22 +#define R8A7790_CLK_SD2 23 +#define R8A7790_CLK_SD3 24 +#define R8A7790_CLK_MMC0 25 +#define R8A7790_CLK_MMC1 26 +#define R8A7790_CLK_MP 27 +#define R8A7790_CLK_SSP 28 +#define R8A7790_CLK_SSPRS 29 +#define R8A7790_CLK_QSPI 30 +#define R8A7790_CLK_CP 31 +#define R8A7790_CLK_RCAN 32 +#define R8A7790_CLK_R 33 +#define R8A7790_CLK_OSC 34 + +#endif /* __DT_BINDINGS_CLOCK_R8A7790_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h new file mode 100644 index 0000000..bb4f18b --- /dev/null +++ b/include/dt-bindings/clock/r8a7791-clock.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2013 Ideas On Board SPRL + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7791_H__ +#define __DT_BINDINGS_CLOCK_R8A7791_H__ + +/* CPG */ +#define R8A7791_CLK_MAIN 0 +#define R8A7791_CLK_PLL0 1 +#define R8A7791_CLK_PLL1 2 +#define R8A7791_CLK_PLL3 3 +#define R8A7791_CLK_LB 4 +#define R8A7791_CLK_QSPI 5 +#define R8A7791_CLK_SDH 6 +#define R8A7791_CLK_SD0 7 +#define R8A7791_CLK_Z 8 +#define R8A7791_CLK_RCAN 9 +#define R8A7791_CLK_ADSP 10 + +/* MSTP0 */ +#define R8A7791_CLK_MSIOF0 0 + +/* MSTP1 */ +#define R8A7791_CLK_VCP0 1 +#define R8A7791_CLK_VPC0 3 +#define R8A7791_CLK_JPU 6 +#define R8A7791_CLK_SSP1 9 +#define R8A7791_CLK_TMU1 11 +#define R8A7791_CLK_3DG 12 +#define R8A7791_CLK_2DDMAC 15 +#define R8A7791_CLK_FDP1_1 18 +#define R8A7791_CLK_FDP1_0 19 +#define R8A7791_CLK_TMU3 21 +#define R8A7791_CLK_TMU2 22 +#define R8A7791_CLK_CMT0 24 +#define R8A7791_CLK_TMU0 25 +#define R8A7791_CLK_VSP1_DU1 27 +#define R8A7791_CLK_VSP1_DU0 28 +#define R8A7791_CLK_VSP1_S 31 + +/* MSTP2 */ +#define R8A7791_CLK_SCIFA2 2 +#define R8A7791_CLK_SCIFA1 3 +#define R8A7791_CLK_SCIFA0 4 +#define R8A7791_CLK_MSIOF2 5 +#define R8A7791_CLK_SCIFB0 6 +#define R8A7791_CLK_SCIFB1 7 +#define R8A7791_CLK_MSIOF1 8 +#define R8A7791_CLK_SCIFB2 16 +#define R8A7791_CLK_SYS_DMAC1 18 +#define R8A7791_CLK_SYS_DMAC0 19 + +/* MSTP3 */ +#define R8A7791_CLK_TPU0 4 +#define R8A7791_CLK_SDHI2 11 +#define R8A7791_CLK_SDHI1 12 +#define R8A7791_CLK_SDHI0 14 +#define R8A7791_CLK_MMCIF0 15 +#define R8A7791_CLK_IIC0 18 +#define R8A7791_CLK_PCIEC 19 +#define R8A7791_CLK_IIC1 23 +#define R8A7791_CLK_SSUSB 28 +#define R8A7791_CLK_CMT1 29 +#define R8A7791_CLK_USBDMAC0 30 +#define R8A7791_CLK_USBDMAC1 31 + +/* MSTP4 */ +#define R8A7791_CLK_IRQC 7 +#define R8A7791_CLK_INTC_SYS 8 + +/* MSTP5 */ +#define R8A7791_CLK_AUDIO_DMAC1 1 +#define R8A7791_CLK_AUDIO_DMAC0 2 +#define R8A7791_CLK_ADSP_MOD 6 +#define R8A7791_CLK_THERMAL 22 +#define R8A7791_CLK_PWM 23 + +/* MSTP7 */ +#define R8A7791_CLK_EHCI 3 +#define R8A7791_CLK_HSUSB 4 +#define R8A7791_CLK_HSCIF2 13 +#define R8A7791_CLK_SCIF5 14 +#define R8A7791_CLK_SCIF4 15 +#define R8A7791_CLK_HSCIF1 16 +#define R8A7791_CLK_HSCIF0 17 +#define R8A7791_CLK_SCIF3 18 +#define R8A7791_CLK_SCIF2 19 +#define R8A7791_CLK_SCIF1 20 +#define R8A7791_CLK_SCIF0 21 +#define R8A7791_CLK_DU1 23 +#define R8A7791_CLK_DU0 24 +#define R8A7791_CLK_LVDS0 26 + +/* MSTP8 */ +#define R8A7791_CLK_IPMMU_SGX 0 +#define R8A7791_CLK_MLB 2 +#define R8A7791_CLK_VIN2 9 +#define R8A7791_CLK_VIN1 10 +#define R8A7791_CLK_VIN0 11 +#define R8A7791_CLK_ETHERAVB 12 +#define R8A7791_CLK_ETHER 13 +#define R8A7791_CLK_SATA1 14 +#define R8A7791_CLK_SATA0 15 + +/* MSTP9 */ +#define R8A7791_CLK_GYROADC 1 +#define R8A7791_CLK_GPIO7 4 +#define R8A7791_CLK_GPIO6 5 +#define R8A7791_CLK_GPIO5 7 +#define R8A7791_CLK_GPIO4 8 +#define R8A7791_CLK_GPIO3 9 +#define R8A7791_CLK_GPIO2 10 +#define R8A7791_CLK_GPIO1 11 +#define R8A7791_CLK_GPIO0 12 +#define R8A7791_CLK_RCAN1 15 +#define R8A7791_CLK_RCAN0 16 +#define R8A7791_CLK_QSPI_MOD 17 +#define R8A7791_CLK_I2C5 25 +#define R8A7791_CLK_IICDVFS 26 +#define R8A7791_CLK_I2C4 27 +#define R8A7791_CLK_I2C3 28 +#define R8A7791_CLK_I2C2 29 +#define R8A7791_CLK_I2C1 30 +#define R8A7791_CLK_I2C0 31 + +/* MSTP10 */ +#define R8A7791_CLK_SSI_ALL 5 +#define R8A7791_CLK_SSI9 6 +#define R8A7791_CLK_SSI8 7 +#define R8A7791_CLK_SSI7 8 +#define R8A7791_CLK_SSI6 9 +#define R8A7791_CLK_SSI5 10 +#define R8A7791_CLK_SSI4 11 +#define R8A7791_CLK_SSI3 12 +#define R8A7791_CLK_SSI2 13 +#define R8A7791_CLK_SSI1 14 +#define R8A7791_CLK_SSI0 15 +#define R8A7791_CLK_SCU_ALL 17 +#define R8A7791_CLK_SCU_DVC1 18 +#define R8A7791_CLK_SCU_DVC0 19 +#define R8A7791_CLK_SCU_CTU1_MIX1 20 +#define R8A7791_CLK_SCU_CTU0_MIX0 21 +#define R8A7791_CLK_SCU_SRC9 22 +#define R8A7791_CLK_SCU_SRC8 23 +#define R8A7791_CLK_SCU_SRC7 24 +#define R8A7791_CLK_SCU_SRC6 25 +#define R8A7791_CLK_SCU_SRC5 26 +#define R8A7791_CLK_SCU_SRC4 27 +#define R8A7791_CLK_SCU_SRC3 28 +#define R8A7791_CLK_SCU_SRC2 29 +#define R8A7791_CLK_SCU_SRC1 30 +#define R8A7791_CLK_SCU_SRC0 31 + +/* MSTP11 */ +#define R8A7791_CLK_SCIFA3 6 +#define R8A7791_CLK_SCIFA4 7 +#define R8A7791_CLK_SCIFA5 8 + +#endif /* __DT_BINDINGS_CLOCK_R8A7791_H__ */ diff --git a/include/dt-bindings/clock/r8a7791-cpg-mssr.h b/include/dt-bindings/clock/r8a7791-cpg-mssr.h new file mode 100644 index 0000000..aadd06c --- /dev/null +++ b/include/dt-bindings/clock/r8a7791-cpg-mssr.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2015 Renesas Electronics Corp. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7791_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7791_CPG_MSSR_H__ + +#include + +/* r8a7791 CPG Core Clocks */ +#define R8A7791_CLK_Z 0 +#define R8A7791_CLK_ZG 1 +#define R8A7791_CLK_ZTR 2 +#define R8A7791_CLK_ZTRD2 3 +#define R8A7791_CLK_ZT 4 +#define R8A7791_CLK_ZX 5 +#define R8A7791_CLK_ZS 6 +#define R8A7791_CLK_HP 7 +#define R8A7791_CLK_I 8 +#define R8A7791_CLK_B 9 +#define R8A7791_CLK_LB 10 +#define R8A7791_CLK_P 11 +#define R8A7791_CLK_CL 12 +#define R8A7791_CLK_M2 13 +#define R8A7791_CLK_ADSP 14 +#define R8A7791_CLK_ZB3 15 +#define R8A7791_CLK_ZB3D2 16 +#define R8A7791_CLK_DDR 17 +#define R8A7791_CLK_SDH 18 +#define R8A7791_CLK_SD0 19 +#define R8A7791_CLK_SD2 20 +#define R8A7791_CLK_SD3 21 +#define R8A7791_CLK_MMC0 22 +#define R8A7791_CLK_MP 23 +#define R8A7791_CLK_SSP 24 +#define R8A7791_CLK_SSPRS 25 +#define R8A7791_CLK_QSPI 26 +#define R8A7791_CLK_CP 27 +#define R8A7791_CLK_RCAN 28 +#define R8A7791_CLK_R 29 +#define R8A7791_CLK_OSC 30 + +#endif /* __DT_BINDINGS_CLOCK_R8A7791_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7792-clock.h b/include/dt-bindings/clock/r8a7792-clock.h new file mode 100644 index 0000000..2948d9c --- /dev/null +++ b/include/dt-bindings/clock/r8a7792-clock.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2016 Cogent Embedded, Inc. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7792_H__ +#define __DT_BINDINGS_CLOCK_R8A7792_H__ + +/* CPG */ +#define R8A7792_CLK_MAIN 0 +#define R8A7792_CLK_PLL0 1 +#define R8A7792_CLK_PLL1 2 +#define R8A7792_CLK_PLL3 3 +#define R8A7792_CLK_LB 4 +#define R8A7792_CLK_QSPI 5 + +/* MSTP0 */ +#define R8A7792_CLK_MSIOF0 0 + +/* MSTP1 */ +#define R8A7792_CLK_JPU 6 +#define R8A7792_CLK_TMU1 11 +#define R8A7792_CLK_TMU3 21 +#define R8A7792_CLK_TMU2 22 +#define R8A7792_CLK_CMT0 24 +#define R8A7792_CLK_TMU0 25 +#define R8A7792_CLK_VSP1DU1 27 +#define R8A7792_CLK_VSP1DU0 28 +#define R8A7792_CLK_VSP1_SY 31 + +/* MSTP2 */ +#define R8A7792_CLK_MSIOF1 8 +#define R8A7792_CLK_SYS_DMAC1 18 +#define R8A7792_CLK_SYS_DMAC0 19 + +/* MSTP3 */ +#define R8A7792_CLK_TPU0 4 +#define R8A7792_CLK_SDHI0 14 +#define R8A7792_CLK_CMT1 29 + +/* MSTP4 */ +#define R8A7792_CLK_IRQC 7 +#define R8A7792_CLK_INTC_SYS 8 + +/* MSTP5 */ +#define R8A7792_CLK_AUDIO_DMAC0 2 +#define R8A7792_CLK_THERMAL 22 +#define R8A7792_CLK_PWM 23 + +/* MSTP7 */ +#define R8A7792_CLK_HSCIF1 16 +#define R8A7792_CLK_HSCIF0 17 +#define R8A7792_CLK_SCIF3 18 +#define R8A7792_CLK_SCIF2 19 +#define R8A7792_CLK_SCIF1 20 +#define R8A7792_CLK_SCIF0 21 +#define R8A7792_CLK_DU1 23 +#define R8A7792_CLK_DU0 24 + +/* MSTP8 */ +#define R8A7792_CLK_VIN5 4 +#define R8A7792_CLK_VIN4 5 +#define R8A7792_CLK_VIN3 8 +#define R8A7792_CLK_VIN2 9 +#define R8A7792_CLK_VIN1 10 +#define R8A7792_CLK_VIN0 11 +#define R8A7792_CLK_ETHERAVB 12 + +/* MSTP9 */ +#define R8A7792_CLK_GPIO7 4 +#define R8A7792_CLK_GPIO6 5 +#define R8A7792_CLK_GPIO5 7 +#define R8A7792_CLK_GPIO4 8 +#define R8A7792_CLK_GPIO3 9 +#define R8A7792_CLK_GPIO2 10 +#define R8A7792_CLK_GPIO1 11 +#define R8A7792_CLK_GPIO0 12 +#define R8A7792_CLK_GPIO11 13 +#define R8A7792_CLK_GPIO10 14 +#define R8A7792_CLK_CAN1 15 +#define R8A7792_CLK_CAN0 16 +#define R8A7792_CLK_QSPI_MOD 17 +#define R8A7792_CLK_GPIO9 19 +#define R8A7792_CLK_GPIO8 21 +#define R8A7792_CLK_I2C5 25 +#define R8A7792_CLK_IICDVFS 26 +#define R8A7792_CLK_I2C4 27 +#define R8A7792_CLK_I2C3 28 +#define R8A7792_CLK_I2C2 29 +#define R8A7792_CLK_I2C1 30 +#define R8A7792_CLK_I2C0 31 + +/* MSTP10 */ +#define R8A7792_CLK_SSI_ALL 5 +#define R8A7792_CLK_SSI4 11 +#define R8A7792_CLK_SSI3 12 + +#endif /* __DT_BINDINGS_CLOCK_R8A7792_H__ */ diff --git a/include/dt-bindings/clock/r8a7792-cpg-mssr.h b/include/dt-bindings/clock/r8a7792-cpg-mssr.h new file mode 100644 index 0000000..829c44d --- /dev/null +++ b/include/dt-bindings/clock/r8a7792-cpg-mssr.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2015 Renesas Electronics Corp. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7792_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7792_CPG_MSSR_H__ + +#include + +/* r8a7792 CPG Core Clocks */ +#define R8A7792_CLK_Z 0 +#define R8A7792_CLK_ZG 1 +#define R8A7792_CLK_ZTR 2 +#define R8A7792_CLK_ZTRD2 3 +#define R8A7792_CLK_ZT 4 +#define R8A7792_CLK_ZX 5 +#define R8A7792_CLK_ZS 6 +#define R8A7792_CLK_HP 7 +#define R8A7792_CLK_I 8 +#define R8A7792_CLK_B 9 +#define R8A7792_CLK_LB 10 +#define R8A7792_CLK_P 11 +#define R8A7792_CLK_CL 12 +#define R8A7792_CLK_M2 13 +#define R8A7792_CLK_IMP 14 +#define R8A7792_CLK_ZB3 15 +#define R8A7792_CLK_ZB3D2 16 +#define R8A7792_CLK_DDR 17 +#define R8A7792_CLK_SD 18 +#define R8A7792_CLK_MP 19 +#define R8A7792_CLK_QSPI 20 +#define R8A7792_CLK_CP 21 +#define R8A7792_CLK_CPEX 22 +#define R8A7792_CLK_RCAN 23 +#define R8A7792_CLK_R 24 +#define R8A7792_CLK_OSC 25 + +#endif /* __DT_BINDINGS_CLOCK_R8A7792_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h new file mode 100644 index 0000000..49c66d8 --- /dev/null +++ b/include/dt-bindings/clock/r8a7793-clock.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * r8a7793 clock definition + * + * Copyright (C) 2014 Renesas Electronics Corporation + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7793_H__ +#define __DT_BINDINGS_CLOCK_R8A7793_H__ + +/* CPG */ +#define R8A7793_CLK_MAIN 0 +#define R8A7793_CLK_PLL0 1 +#define R8A7793_CLK_PLL1 2 +#define R8A7793_CLK_PLL3 3 +#define R8A7793_CLK_LB 4 +#define R8A7793_CLK_QSPI 5 +#define R8A7793_CLK_SDH 6 +#define R8A7793_CLK_SD0 7 +#define R8A7793_CLK_Z 8 +#define R8A7793_CLK_RCAN 9 +#define R8A7793_CLK_ADSP 10 + +/* MSTP0 */ +#define R8A7793_CLK_MSIOF0 0 + +/* MSTP1 */ +#define R8A7793_CLK_VCP0 1 +#define R8A7793_CLK_VPC0 3 +#define R8A7793_CLK_SSP1 9 +#define R8A7793_CLK_TMU1 11 +#define R8A7793_CLK_3DG 12 +#define R8A7793_CLK_2DDMAC 15 +#define R8A7793_CLK_FDP1_1 18 +#define R8A7793_CLK_FDP1_0 19 +#define R8A7793_CLK_TMU3 21 +#define R8A7793_CLK_TMU2 22 +#define R8A7793_CLK_CMT0 24 +#define R8A7793_CLK_TMU0 25 +#define R8A7793_CLK_VSP1_DU1 27 +#define R8A7793_CLK_VSP1_DU0 28 +#define R8A7793_CLK_VSP1_S 31 + +/* MSTP2 */ +#define R8A7793_CLK_SCIFA2 2 +#define R8A7793_CLK_SCIFA1 3 +#define R8A7793_CLK_SCIFA0 4 +#define R8A7793_CLK_MSIOF2 5 +#define R8A7793_CLK_SCIFB0 6 +#define R8A7793_CLK_SCIFB1 7 +#define R8A7793_CLK_MSIOF1 8 +#define R8A7793_CLK_SCIFB2 16 +#define R8A7793_CLK_SYS_DMAC1 18 +#define R8A7793_CLK_SYS_DMAC0 19 + +/* MSTP3 */ +#define R8A7793_CLK_TPU0 4 +#define R8A7793_CLK_SDHI2 11 +#define R8A7793_CLK_SDHI1 12 +#define R8A7793_CLK_SDHI0 14 +#define R8A7793_CLK_MMCIF0 15 +#define R8A7793_CLK_IIC0 18 +#define R8A7793_CLK_PCIEC 19 +#define R8A7793_CLK_IIC1 23 +#define R8A7793_CLK_SSUSB 28 +#define R8A7793_CLK_CMT1 29 +#define R8A7793_CLK_USBDMAC0 30 +#define R8A7793_CLK_USBDMAC1 31 + +/* MSTP4 */ +#define R8A7793_CLK_IRQC 7 +#define R8A7793_CLK_INTC_SYS 8 + +/* MSTP5 */ +#define R8A7793_CLK_AUDIO_DMAC1 1 +#define R8A7793_CLK_AUDIO_DMAC0 2 +#define R8A7793_CLK_ADSP_MOD 6 +#define R8A7793_CLK_THERMAL 22 +#define R8A7793_CLK_PWM 23 + +/* MSTP7 */ +#define R8A7793_CLK_EHCI 3 +#define R8A7793_CLK_HSUSB 4 +#define R8A7793_CLK_HSCIF2 13 +#define R8A7793_CLK_SCIF5 14 +#define R8A7793_CLK_SCIF4 15 +#define R8A7793_CLK_HSCIF1 16 +#define R8A7793_CLK_HSCIF0 17 +#define R8A7793_CLK_SCIF3 18 +#define R8A7793_CLK_SCIF2 19 +#define R8A7793_CLK_SCIF1 20 +#define R8A7793_CLK_SCIF0 21 +#define R8A7793_CLK_DU1 23 +#define R8A7793_CLK_DU0 24 +#define R8A7793_CLK_LVDS0 26 + +/* MSTP8 */ +#define R8A7793_CLK_IPMMU_SGX 0 +#define R8A7793_CLK_VIN2 9 +#define R8A7793_CLK_VIN1 10 +#define R8A7793_CLK_VIN0 11 +#define R8A7793_CLK_ETHER 13 +#define R8A7793_CLK_SATA1 14 +#define R8A7793_CLK_SATA0 15 + +/* MSTP9 */ +#define R8A7793_CLK_GPIO7 4 +#define R8A7793_CLK_GPIO6 5 +#define R8A7793_CLK_GPIO5 7 +#define R8A7793_CLK_GPIO4 8 +#define R8A7793_CLK_GPIO3 9 +#define R8A7793_CLK_GPIO2 10 +#define R8A7793_CLK_GPIO1 11 +#define R8A7793_CLK_GPIO0 12 +#define R8A7793_CLK_RCAN1 15 +#define R8A7793_CLK_RCAN0 16 +#define R8A7793_CLK_QSPI_MOD 17 +#define R8A7793_CLK_I2C5 25 +#define R8A7793_CLK_IICDVFS 26 +#define R8A7793_CLK_I2C4 27 +#define R8A7793_CLK_I2C3 28 +#define R8A7793_CLK_I2C2 29 +#define R8A7793_CLK_I2C1 30 +#define R8A7793_CLK_I2C0 31 + +/* MSTP10 */ +#define R8A7793_CLK_SSI_ALL 5 +#define R8A7793_CLK_SSI9 6 +#define R8A7793_CLK_SSI8 7 +#define R8A7793_CLK_SSI7 8 +#define R8A7793_CLK_SSI6 9 +#define R8A7793_CLK_SSI5 10 +#define R8A7793_CLK_SSI4 11 +#define R8A7793_CLK_SSI3 12 +#define R8A7793_CLK_SSI2 13 +#define R8A7793_CLK_SSI1 14 +#define R8A7793_CLK_SSI0 15 +#define R8A7793_CLK_SCU_ALL 17 +#define R8A7793_CLK_SCU_DVC1 18 +#define R8A7793_CLK_SCU_DVC0 19 +#define R8A7793_CLK_SCU_CTU1_MIX1 20 +#define R8A7793_CLK_SCU_CTU0_MIX0 21 +#define R8A7793_CLK_SCU_SRC9 22 +#define R8A7793_CLK_SCU_SRC8 23 +#define R8A7793_CLK_SCU_SRC7 24 +#define R8A7793_CLK_SCU_SRC6 25 +#define R8A7793_CLK_SCU_SRC5 26 +#define R8A7793_CLK_SCU_SRC4 27 +#define R8A7793_CLK_SCU_SRC3 28 +#define R8A7793_CLK_SCU_SRC2 29 +#define R8A7793_CLK_SCU_SRC1 30 +#define R8A7793_CLK_SCU_SRC0 31 + +/* MSTP11 */ +#define R8A7793_CLK_SCIFA3 6 +#define R8A7793_CLK_SCIFA4 7 +#define R8A7793_CLK_SCIFA5 8 + +#endif /* __DT_BINDINGS_CLOCK_R8A7793_H__ */ diff --git a/include/dt-bindings/clock/r8a7793-cpg-mssr.h b/include/dt-bindings/clock/r8a7793-cpg-mssr.h new file mode 100644 index 0000000..d1ff646 --- /dev/null +++ b/include/dt-bindings/clock/r8a7793-cpg-mssr.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2015 Renesas Electronics Corp. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7793_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7793_CPG_MSSR_H__ + +#include + +/* r8a7793 CPG Core Clocks */ +#define R8A7793_CLK_Z 0 +#define R8A7793_CLK_ZG 1 +#define R8A7793_CLK_ZTR 2 +#define R8A7793_CLK_ZTRD2 3 +#define R8A7793_CLK_ZT 4 +#define R8A7793_CLK_ZX 5 +#define R8A7793_CLK_ZS 6 +#define R8A7793_CLK_HP 7 +#define R8A7793_CLK_I 8 +#define R8A7793_CLK_B 9 +#define R8A7793_CLK_LB 10 +#define R8A7793_CLK_P 11 +#define R8A7793_CLK_CL 12 +#define R8A7793_CLK_M2 13 +#define R8A7793_CLK_ADSP 14 +#define R8A7793_CLK_ZB3 15 +#define R8A7793_CLK_ZB3D2 16 +#define R8A7793_CLK_DDR 17 +#define R8A7793_CLK_SDH 18 +#define R8A7793_CLK_SD0 19 +#define R8A7793_CLK_SD2 20 +#define R8A7793_CLK_SD3 21 +#define R8A7793_CLK_MMC0 22 +#define R8A7793_CLK_MP 23 +#define R8A7793_CLK_SSP 24 +#define R8A7793_CLK_SSPRS 25 +#define R8A7793_CLK_QSPI 26 +#define R8A7793_CLK_CP 27 +#define R8A7793_CLK_RCAN 28 +#define R8A7793_CLK_R 29 +#define R8A7793_CLK_OSC 30 + +#endif /* __DT_BINDINGS_CLOCK_R8A7793_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h new file mode 100644 index 0000000..649f005 --- /dev/null +++ b/include/dt-bindings/clock/r8a7794-clock.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2014 Renesas Electronics Corporation + * Copyright 2013 Ideas On Board SPRL + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7794_H__ +#define __DT_BINDINGS_CLOCK_R8A7794_H__ + +/* CPG */ +#define R8A7794_CLK_MAIN 0 +#define R8A7794_CLK_PLL0 1 +#define R8A7794_CLK_PLL1 2 +#define R8A7794_CLK_PLL3 3 +#define R8A7794_CLK_LB 4 +#define R8A7794_CLK_QSPI 5 +#define R8A7794_CLK_SDH 6 +#define R8A7794_CLK_SD0 7 +#define R8A7794_CLK_RCAN 8 + +/* MSTP0 */ +#define R8A7794_CLK_MSIOF0 0 + +/* MSTP1 */ +#define R8A7794_CLK_VCP0 1 +#define R8A7794_CLK_VPC0 3 +#define R8A7794_CLK_TMU1 11 +#define R8A7794_CLK_3DG 12 +#define R8A7794_CLK_2DDMAC 15 +#define R8A7794_CLK_FDP1_0 19 +#define R8A7794_CLK_TMU3 21 +#define R8A7794_CLK_TMU2 22 +#define R8A7794_CLK_CMT0 24 +#define R8A7794_CLK_TMU0 25 +#define R8A7794_CLK_VSP1_DU0 28 +#define R8A7794_CLK_VSP1_S 31 + +/* MSTP2 */ +#define R8A7794_CLK_SCIFA2 2 +#define R8A7794_CLK_SCIFA1 3 +#define R8A7794_CLK_SCIFA0 4 +#define R8A7794_CLK_MSIOF2 5 +#define R8A7794_CLK_SCIFB0 6 +#define R8A7794_CLK_SCIFB1 7 +#define R8A7794_CLK_MSIOF1 8 +#define R8A7794_CLK_SCIFB2 16 +#define R8A7794_CLK_SYS_DMAC1 18 +#define R8A7794_CLK_SYS_DMAC0 19 + +/* MSTP3 */ +#define R8A7794_CLK_SDHI2 11 +#define R8A7794_CLK_SDHI1 12 +#define R8A7794_CLK_SDHI0 14 +#define R8A7794_CLK_MMCIF0 15 +#define R8A7794_CLK_IIC0 18 +#define R8A7794_CLK_IIC1 23 +#define R8A7794_CLK_CMT1 29 +#define R8A7794_CLK_USBDMAC0 30 +#define R8A7794_CLK_USBDMAC1 31 + +/* MSTP4 */ +#define R8A7794_CLK_IRQC 7 +#define R8A7794_CLK_INTC_SYS 8 + +/* MSTP5 */ +#define R8A7794_CLK_AUDIO_DMAC0 2 +#define R8A7794_CLK_PWM 23 + +/* MSTP7 */ +#define R8A7794_CLK_EHCI 3 +#define R8A7794_CLK_HSUSB 4 +#define R8A7794_CLK_HSCIF2 13 +#define R8A7794_CLK_SCIF5 14 +#define R8A7794_CLK_SCIF4 15 +#define R8A7794_CLK_HSCIF1 16 +#define R8A7794_CLK_HSCIF0 17 +#define R8A7794_CLK_SCIF3 18 +#define R8A7794_CLK_SCIF2 19 +#define R8A7794_CLK_SCIF1 20 +#define R8A7794_CLK_SCIF0 21 +#define R8A7794_CLK_DU1 23 +#define R8A7794_CLK_DU0 24 + +/* MSTP8 */ +#define R8A7794_CLK_VIN1 10 +#define R8A7794_CLK_VIN0 11 +#define R8A7794_CLK_ETHERAVB 12 +#define R8A7794_CLK_ETHER 13 + +/* MSTP9 */ +#define R8A7794_CLK_GPIO6 5 +#define R8A7794_CLK_GPIO5 7 +#define R8A7794_CLK_GPIO4 8 +#define R8A7794_CLK_GPIO3 9 +#define R8A7794_CLK_GPIO2 10 +#define R8A7794_CLK_GPIO1 11 +#define R8A7794_CLK_GPIO0 12 +#define R8A7794_CLK_RCAN1 15 +#define R8A7794_CLK_RCAN0 16 +#define R8A7794_CLK_QSPI_MOD 17 +#define R8A7794_CLK_I2C5 25 +#define R8A7794_CLK_I2C4 27 +#define R8A7794_CLK_I2C3 28 +#define R8A7794_CLK_I2C2 29 +#define R8A7794_CLK_I2C1 30 +#define R8A7794_CLK_I2C0 31 + +/* MSTP10 */ +#define R8A7794_CLK_SSI_ALL 5 +#define R8A7794_CLK_SSI9 6 +#define R8A7794_CLK_SSI8 7 +#define R8A7794_CLK_SSI7 8 +#define R8A7794_CLK_SSI6 9 +#define R8A7794_CLK_SSI5 10 +#define R8A7794_CLK_SSI4 11 +#define R8A7794_CLK_SSI3 12 +#define R8A7794_CLK_SSI2 13 +#define R8A7794_CLK_SSI1 14 +#define R8A7794_CLK_SSI0 15 +#define R8A7794_CLK_SCU_ALL 17 +#define R8A7794_CLK_SCU_DVC1 18 +#define R8A7794_CLK_SCU_DVC0 19 +#define R8A7794_CLK_SCU_CTU1_MIX1 20 +#define R8A7794_CLK_SCU_CTU0_MIX0 21 +#define R8A7794_CLK_SCU_SRC6 25 +#define R8A7794_CLK_SCU_SRC5 26 +#define R8A7794_CLK_SCU_SRC4 27 +#define R8A7794_CLK_SCU_SRC3 28 +#define R8A7794_CLK_SCU_SRC2 29 +#define R8A7794_CLK_SCU_SRC1 30 + +/* MSTP11 */ +#define R8A7794_CLK_SCIFA3 6 +#define R8A7794_CLK_SCIFA4 7 +#define R8A7794_CLK_SCIFA5 8 + +#endif /* __DT_BINDINGS_CLOCK_R8A7794_H__ */ diff --git a/include/dt-bindings/clock/r8a7794-cpg-mssr.h b/include/dt-bindings/clock/r8a7794-cpg-mssr.h new file mode 100644 index 0000000..6314e23 --- /dev/null +++ b/include/dt-bindings/clock/r8a7794-cpg-mssr.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2015 Renesas Electronics Corp. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7794_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7794_CPG_MSSR_H__ + +#include + +/* r8a7794 CPG Core Clocks */ +#define R8A7794_CLK_Z2 0 +#define R8A7794_CLK_ZG 1 +#define R8A7794_CLK_ZTR 2 +#define R8A7794_CLK_ZTRD2 3 +#define R8A7794_CLK_ZT 4 +#define R8A7794_CLK_ZX 5 +#define R8A7794_CLK_ZS 6 +#define R8A7794_CLK_HP 7 +#define R8A7794_CLK_I 8 +#define R8A7794_CLK_B 9 +#define R8A7794_CLK_LB 10 +#define R8A7794_CLK_P 11 +#define R8A7794_CLK_CL 12 +#define R8A7794_CLK_CP 13 +#define R8A7794_CLK_M2 14 +#define R8A7794_CLK_ADSP 15 +#define R8A7794_CLK_ZB3 16 +#define R8A7794_CLK_ZB3D2 17 +#define R8A7794_CLK_DDR 18 +#define R8A7794_CLK_SDH 19 +#define R8A7794_CLK_SD0 20 +#define R8A7794_CLK_SD2 21 +#define R8A7794_CLK_SD3 22 +#define R8A7794_CLK_MMC0 23 +#define R8A7794_CLK_MP 24 +#define R8A7794_CLK_QSPI 25 +#define R8A7794_CLK_CPEX 26 +#define R8A7794_CLK_RCAN 27 +#define R8A7794_CLK_R 28 +#define R8A7794_CLK_OSC 29 + +#endif /* __DT_BINDINGS_CLOCK_R8A7794_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h new file mode 100644 index 0000000..92b3e2a --- /dev/null +++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2015 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ + +#include + +/* r8a7795 CPG Core Clocks */ +#define R8A7795_CLK_Z 0 +#define R8A7795_CLK_Z2 1 +#define R8A7795_CLK_ZR 2 +#define R8A7795_CLK_ZG 3 +#define R8A7795_CLK_ZTR 4 +#define R8A7795_CLK_ZTRD2 5 +#define R8A7795_CLK_ZT 6 +#define R8A7795_CLK_ZX 7 +#define R8A7795_CLK_S0D1 8 +#define R8A7795_CLK_S0D4 9 +#define R8A7795_CLK_S1D1 10 +#define R8A7795_CLK_S1D2 11 +#define R8A7795_CLK_S1D4 12 +#define R8A7795_CLK_S2D1 13 +#define R8A7795_CLK_S2D2 14 +#define R8A7795_CLK_S2D4 15 +#define R8A7795_CLK_S3D1 16 +#define R8A7795_CLK_S3D2 17 +#define R8A7795_CLK_S3D4 18 +#define R8A7795_CLK_LB 19 +#define R8A7795_CLK_CL 20 +#define R8A7795_CLK_ZB3 21 +#define R8A7795_CLK_ZB3D2 22 +#define R8A7795_CLK_CR 23 +#define R8A7795_CLK_CRD2 24 +#define R8A7795_CLK_SD0H 25 +#define R8A7795_CLK_SD0 26 +#define R8A7795_CLK_SD1H 27 +#define R8A7795_CLK_SD1 28 +#define R8A7795_CLK_SD2H 29 +#define R8A7795_CLK_SD2 30 +#define R8A7795_CLK_SD3H 31 +#define R8A7795_CLK_SD3 32 +#define R8A7795_CLK_SSP2 33 +#define R8A7795_CLK_SSP1 34 +#define R8A7795_CLK_SSPRS 35 +#define R8A7795_CLK_RPC 36 +#define R8A7795_CLK_RPCD2 37 +#define R8A7795_CLK_MSO 38 +#define R8A7795_CLK_CANFD 39 +#define R8A7795_CLK_HDMI 40 +#define R8A7795_CLK_CSI0 41 +/* CLK_CSIREF was removed */ +#define R8A7795_CLK_CP 43 +#define R8A7795_CLK_CPEX 44 +#define R8A7795_CLK_R 45 +#define R8A7795_CLK_OSC 46 + +/* r8a7795 ES2.0 CPG Core Clocks */ +#define R8A7795_CLK_S0D2 47 +#define R8A7795_CLK_S0D3 48 +#define R8A7795_CLK_S0D6 49 +#define R8A7795_CLK_S0D8 50 +#define R8A7795_CLK_S0D12 51 + +#endif /* __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7796-cpg-mssr.h b/include/dt-bindings/clock/r8a7796-cpg-mssr.h new file mode 100644 index 0000000..c0957cf --- /dev/null +++ b/include/dt-bindings/clock/r8a7796-cpg-mssr.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2016 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__ + +#include + +/* r8a7796 CPG Core Clocks */ +#define R8A7796_CLK_Z 0 +#define R8A7796_CLK_Z2 1 +#define R8A7796_CLK_ZR 2 +#define R8A7796_CLK_ZG 3 +#define R8A7796_CLK_ZTR 4 +#define R8A7796_CLK_ZTRD2 5 +#define R8A7796_CLK_ZT 6 +#define R8A7796_CLK_ZX 7 +#define R8A7796_CLK_S0D1 8 +#define R8A7796_CLK_S0D2 9 +#define R8A7796_CLK_S0D3 10 +#define R8A7796_CLK_S0D4 11 +#define R8A7796_CLK_S0D6 12 +#define R8A7796_CLK_S0D8 13 +#define R8A7796_CLK_S0D12 14 +#define R8A7796_CLK_S1D1 15 +#define R8A7796_CLK_S1D2 16 +#define R8A7796_CLK_S1D4 17 +#define R8A7796_CLK_S2D1 18 +#define R8A7796_CLK_S2D2 19 +#define R8A7796_CLK_S2D4 20 +#define R8A7796_CLK_S3D1 21 +#define R8A7796_CLK_S3D2 22 +#define R8A7796_CLK_S3D4 23 +#define R8A7796_CLK_LB 24 +#define R8A7796_CLK_CL 25 +#define R8A7796_CLK_ZB3 26 +#define R8A7796_CLK_ZB3D2 27 +#define R8A7796_CLK_ZB3D4 28 +#define R8A7796_CLK_CR 29 +#define R8A7796_CLK_CRD2 30 +#define R8A7796_CLK_SD0H 31 +#define R8A7796_CLK_SD0 32 +#define R8A7796_CLK_SD1H 33 +#define R8A7796_CLK_SD1 34 +#define R8A7796_CLK_SD2H 35 +#define R8A7796_CLK_SD2 36 +#define R8A7796_CLK_SD3H 37 +#define R8A7796_CLK_SD3 38 +#define R8A7796_CLK_SSP2 39 +#define R8A7796_CLK_SSP1 40 +#define R8A7796_CLK_SSPRS 41 +#define R8A7796_CLK_RPC 42 +#define R8A7796_CLK_RPCD2 43 +#define R8A7796_CLK_MSO 44 +#define R8A7796_CLK_CANFD 45 +#define R8A7796_CLK_HDMI 46 +#define R8A7796_CLK_CSI0 47 +/* CLK_CSIREF was removed */ +#define R8A7796_CLK_CP 49 +#define R8A7796_CLK_CPEX 50 +#define R8A7796_CLK_R 51 +#define R8A7796_CLK_OSC 52 + +#endif /* __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a77965-cpg-mssr.h b/include/dt-bindings/clock/r8a77965-cpg-mssr.h new file mode 100644 index 0000000..6d3b5a9 --- /dev/null +++ b/include/dt-bindings/clock/r8a77965-cpg-mssr.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Jacopo Mondi + */ +#ifndef __DT_BINDINGS_CLOCK_R8A77965_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A77965_CPG_MSSR_H__ + +#include + +/* r8a77965 CPG Core Clocks */ +#define R8A77965_CLK_Z 0 +#define R8A77965_CLK_ZR 1 +#define R8A77965_CLK_ZG 2 +#define R8A77965_CLK_ZTR 3 +#define R8A77965_CLK_ZTRD2 4 +#define R8A77965_CLK_ZT 5 +#define R8A77965_CLK_ZX 6 +#define R8A77965_CLK_S0D1 7 +#define R8A77965_CLK_S0D2 8 +#define R8A77965_CLK_S0D3 9 +#define R8A77965_CLK_S0D4 10 +#define R8A77965_CLK_S0D6 11 +#define R8A77965_CLK_S0D8 12 +#define R8A77965_CLK_S0D12 13 +#define R8A77965_CLK_S1D1 14 +#define R8A77965_CLK_S1D2 15 +#define R8A77965_CLK_S1D4 16 +#define R8A77965_CLK_S2D1 17 +#define R8A77965_CLK_S2D2 18 +#define R8A77965_CLK_S2D4 19 +#define R8A77965_CLK_S3D1 20 +#define R8A77965_CLK_S3D2 21 +#define R8A77965_CLK_S3D4 22 +#define R8A77965_CLK_LB 23 +#define R8A77965_CLK_CL 24 +#define R8A77965_CLK_ZB3 25 +#define R8A77965_CLK_ZB3D2 26 +#define R8A77965_CLK_CR 27 +#define R8A77965_CLK_CRD2 28 +#define R8A77965_CLK_SD0H 29 +#define R8A77965_CLK_SD0 30 +#define R8A77965_CLK_SD1H 31 +#define R8A77965_CLK_SD1 32 +#define R8A77965_CLK_SD2H 33 +#define R8A77965_CLK_SD2 34 +#define R8A77965_CLK_SD3H 35 +#define R8A77965_CLK_SD3 36 +#define R8A77965_CLK_SSP2 37 +#define R8A77965_CLK_SSP1 38 +#define R8A77965_CLK_SSPRS 39 +#define R8A77965_CLK_RPC 40 +#define R8A77965_CLK_RPCD2 41 +#define R8A77965_CLK_MSO 42 +#define R8A77965_CLK_CANFD 43 +#define R8A77965_CLK_HDMI 44 +#define R8A77965_CLK_CSI0 45 +#define R8A77965_CLK_CP 46 +#define R8A77965_CLK_CPEX 47 +#define R8A77965_CLK_R 48 +#define R8A77965_CLK_OSC 49 + +#endif /* __DT_BINDINGS_CLOCK_R8A77965_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a77970-cpg-mssr.h b/include/dt-bindings/clock/r8a77970-cpg-mssr.h new file mode 100644 index 0000000..6145ebe --- /dev/null +++ b/include/dt-bindings/clock/r8a77970-cpg-mssr.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2016 Renesas Electronics Corp. + * Copyright (C) 2017 Cogent Embedded, Inc. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__ + +#include + +/* r8a77970 CPG Core Clocks */ +#define R8A77970_CLK_Z2 0 +#define R8A77970_CLK_ZR 1 +#define R8A77970_CLK_ZTR 2 +#define R8A77970_CLK_ZTRD2 3 +#define R8A77970_CLK_ZT 4 +#define R8A77970_CLK_ZX 5 +#define R8A77970_CLK_S1D1 6 +#define R8A77970_CLK_S1D2 7 +#define R8A77970_CLK_S1D4 8 +#define R8A77970_CLK_S2D1 9 +#define R8A77970_CLK_S2D2 10 +#define R8A77970_CLK_S2D4 11 +#define R8A77970_CLK_LB 12 +#define R8A77970_CLK_CL 13 +#define R8A77970_CLK_ZB3 14 +#define R8A77970_CLK_ZB3D2 15 +#define R8A77970_CLK_DDR 16 +#define R8A77970_CLK_CR 17 +#define R8A77970_CLK_CRD2 18 +#define R8A77970_CLK_SD0H 19 +#define R8A77970_CLK_SD0 20 +#define R8A77970_CLK_RPC 21 +#define R8A77970_CLK_RPCD2 22 +#define R8A77970_CLK_MSO 23 +#define R8A77970_CLK_CANFD 24 +#define R8A77970_CLK_CSI0 25 +#define R8A77970_CLK_FRAY 26 +#define R8A77970_CLK_CP 27 +#define R8A77970_CLK_CPEX 28 +#define R8A77970_CLK_R 29 +#define R8A77970_CLK_OSC 30 + +#endif /* __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a77980-cpg-mssr.h b/include/dt-bindings/clock/r8a77980-cpg-mssr.h new file mode 100644 index 0000000..a4c0d76 --- /dev/null +++ b/include/dt-bindings/clock/r8a77980-cpg-mssr.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2018 Renesas Electronics Corp. + * Copyright (C) 2018 Cogent Embedded, Inc. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A77980_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A77980_CPG_MSSR_H__ + +#include + +/* r8a77980 CPG Core Clocks */ +#define R8A77980_CLK_Z2 0 +#define R8A77980_CLK_ZR 1 +#define R8A77980_CLK_ZTR 2 +#define R8A77980_CLK_ZTRD2 3 +#define R8A77980_CLK_ZT 4 +#define R8A77980_CLK_ZX 5 +#define R8A77980_CLK_S0D1 6 +#define R8A77980_CLK_S0D2 7 +#define R8A77980_CLK_S0D3 8 +#define R8A77980_CLK_S0D4 9 +#define R8A77980_CLK_S0D6 10 +#define R8A77980_CLK_S0D12 11 +#define R8A77980_CLK_S0D24 12 +#define R8A77980_CLK_S1D1 13 +#define R8A77980_CLK_S1D2 14 +#define R8A77980_CLK_S1D4 15 +#define R8A77980_CLK_S2D1 16 +#define R8A77980_CLK_S2D2 17 +#define R8A77980_CLK_S2D4 18 +#define R8A77980_CLK_S3D1 19 +#define R8A77980_CLK_S3D2 20 +#define R8A77980_CLK_S3D4 21 +#define R8A77980_CLK_LB 22 +#define R8A77980_CLK_CL 23 +#define R8A77980_CLK_ZB3 24 +#define R8A77980_CLK_ZB3D2 25 +#define R8A77980_CLK_ZB3D4 26 +#define R8A77980_CLK_SD0H 27 +#define R8A77980_CLK_SD0 28 +#define R8A77980_CLK_RPC 29 +#define R8A77980_CLK_RPCD2 30 +#define R8A77980_CLK_MSO 31 +#define R8A77980_CLK_CANFD 32 +#define R8A77980_CLK_CSI0 33 +#define R8A77980_CLK_CP 34 +#define R8A77980_CLK_CPEX 35 +#define R8A77980_CLK_R 36 +#define R8A77980_CLK_OSC 37 + +#endif /* __DT_BINDINGS_CLOCK_R8A77980_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a77990-cpg-mssr.h b/include/dt-bindings/clock/r8a77990-cpg-mssr.h new file mode 100644 index 0000000..a596a48 --- /dev/null +++ b/include/dt-bindings/clock/r8a77990-cpg-mssr.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A77990_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A77990_CPG_MSSR_H__ + +#include + +/* r8a77990 CPG Core Clocks */ +#define R8A77990_CLK_Z2 0 +#define R8A77990_CLK_ZR 1 +#define R8A77990_CLK_ZG 2 +#define R8A77990_CLK_ZTR 3 +#define R8A77990_CLK_ZT 4 +#define R8A77990_CLK_ZX 5 +#define R8A77990_CLK_S0D1 6 +#define R8A77990_CLK_S0D3 7 +#define R8A77990_CLK_S0D6 8 +#define R8A77990_CLK_S0D12 9 +#define R8A77990_CLK_S0D24 10 +#define R8A77990_CLK_S1D1 11 +#define R8A77990_CLK_S1D2 12 +#define R8A77990_CLK_S1D4 13 +#define R8A77990_CLK_S2D1 14 +#define R8A77990_CLK_S2D2 15 +#define R8A77990_CLK_S2D4 16 +#define R8A77990_CLK_S3D1 17 +#define R8A77990_CLK_S3D2 18 +#define R8A77990_CLK_S3D4 19 +#define R8A77990_CLK_S0D6C 20 +#define R8A77990_CLK_S3D1C 21 +#define R8A77990_CLK_S3D2C 22 +#define R8A77990_CLK_S3D4C 23 +#define R8A77990_CLK_LB 24 +#define R8A77990_CLK_CL 25 +#define R8A77990_CLK_ZB3 26 +#define R8A77990_CLK_ZB3D2 27 +#define R8A77990_CLK_CR 28 +#define R8A77990_CLK_CRD2 29 +#define R8A77990_CLK_SD0H 30 +#define R8A77990_CLK_SD0 31 +#define R8A77990_CLK_SD1H 32 +#define R8A77990_CLK_SD1 33 +#define R8A77990_CLK_SD3H 34 +#define R8A77990_CLK_SD3 35 +#define R8A77990_CLK_RPC 36 +#define R8A77990_CLK_RPCD2 37 +#define R8A77990_CLK_ZA2 38 +#define R8A77990_CLK_ZA8 39 +#define R8A77990_CLK_Z2D 40 +#define R8A77990_CLK_CANFD 41 +#define R8A77990_CLK_MSO 42 +#define R8A77990_CLK_R 43 +#define R8A77990_CLK_OSC 44 +#define R8A77990_CLK_LV0 45 +#define R8A77990_CLK_LV1 46 +#define R8A77990_CLK_CSI0 47 +#define R8A77990_CLK_CP 48 +#define R8A77990_CLK_CPEX 49 + +#endif /* __DT_BINDINGS_CLOCK_R8A77990_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a77995-cpg-mssr.h b/include/dt-bindings/clock/r8a77995-cpg-mssr.h new file mode 100644 index 0000000..fd701c4 --- /dev/null +++ b/include/dt-bindings/clock/r8a77995-cpg-mssr.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2017 Glider bvba + */ +#ifndef __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__ + +#include + +/* r8a77995 CPG Core Clocks */ +#define R8A77995_CLK_Z2 0 +#define R8A77995_CLK_ZG 1 +#define R8A77995_CLK_ZTR 2 +#define R8A77995_CLK_ZT 3 +#define R8A77995_CLK_ZX 4 +#define R8A77995_CLK_S0D1 5 +#define R8A77995_CLK_S1D1 6 +#define R8A77995_CLK_S1D2 7 +#define R8A77995_CLK_S1D4 8 +#define R8A77995_CLK_S2D1 9 +#define R8A77995_CLK_S2D2 10 +#define R8A77995_CLK_S2D4 11 +#define R8A77995_CLK_S3D1 12 +#define R8A77995_CLK_S3D2 13 +#define R8A77995_CLK_S3D4 14 +#define R8A77995_CLK_S1D4C 15 +#define R8A77995_CLK_S3D1C 16 +#define R8A77995_CLK_S3D2C 17 +#define R8A77995_CLK_S3D4C 18 +#define R8A77995_CLK_LB 19 +#define R8A77995_CLK_CL 20 +#define R8A77995_CLK_ZB3 21 +#define R8A77995_CLK_ZB3D2 22 +#define R8A77995_CLK_CR 23 +#define R8A77995_CLK_CRD2 24 +#define R8A77995_CLK_SD0H 25 +#define R8A77995_CLK_SD0 26 +/* CLK_SSP2 was removed */ +/* CLK_SSP1 was removed */ +#define R8A77995_CLK_RPC 29 +#define R8A77995_CLK_RPCD2 30 +#define R8A77995_CLK_ZA2 31 +#define R8A77995_CLK_ZA8 32 +#define R8A77995_CLK_Z2D 33 +#define R8A77995_CLK_CANFD 34 +#define R8A77995_CLK_MSO 35 +#define R8A77995_CLK_R 36 +#define R8A77995_CLK_OSC 37 +#define R8A77995_CLK_LV0 38 +#define R8A77995_CLK_LV1 39 +#define R8A77995_CLK_CP 40 +#define R8A77995_CLK_CPEX 41 + +#endif /* __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r9a06g032-sysctrl.h b/include/dt-bindings/clock/r9a06g032-sysctrl.h new file mode 100644 index 0000000..90c0f3d --- /dev/null +++ b/include/dt-bindings/clock/r9a06g032-sysctrl.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * R9A06G032 sysctrl IDs + * + * Copyright (C) 2018 Renesas Electronics Europe Limited + * + * Michel Pollet , + */ + +#ifndef __DT_BINDINGS_R9A06G032_SYSCTRL_H__ +#define __DT_BINDINGS_R9A06G032_SYSCTRL_H__ + +#define R9A06G032_CLK_PLL_USB 1 +#define R9A06G032_CLK_48 1 /* AKA CLK_PLL_USB */ +#define R9A06G032_MSEBIS_CLK 3 /* AKA CLKOUT_D16 */ +#define R9A06G032_MSEBIM_CLK 3 /* AKA CLKOUT_D16 */ +#define R9A06G032_CLK_DDRPHY_PLLCLK 5 /* AKA CLKOUT_D1OR2 */ +#define R9A06G032_CLK50 6 /* AKA CLKOUT_D20 */ +#define R9A06G032_CLK25 7 /* AKA CLKOUT_D40 */ +#define R9A06G032_CLK125 9 /* AKA CLKOUT_D8 */ +#define R9A06G032_CLK_P5_PG1 17 /* AKA DIV_P5_PG */ +#define R9A06G032_CLK_REF_SYNC 21 /* AKA DIV_REF_SYNC */ +#define R9A06G032_CLK_25_PG4 26 +#define R9A06G032_CLK_25_PG5 27 +#define R9A06G032_CLK_25_PG6 28 +#define R9A06G032_CLK_25_PG7 29 +#define R9A06G032_CLK_25_PG8 30 +#define R9A06G032_CLK_ADC 31 +#define R9A06G032_CLK_ECAT100 32 +#define R9A06G032_CLK_HSR100 33 +#define R9A06G032_CLK_I2C0 34 +#define R9A06G032_CLK_I2C1 35 +#define R9A06G032_CLK_MII_REF 36 +#define R9A06G032_CLK_NAND 37 +#define R9A06G032_CLK_NOUSBP2_PG6 38 +#define R9A06G032_CLK_P1_PG2 39 +#define R9A06G032_CLK_P1_PG3 40 +#define R9A06G032_CLK_P1_PG4 41 +#define R9A06G032_CLK_P4_PG3 42 +#define R9A06G032_CLK_P4_PG4 43 +#define R9A06G032_CLK_P6_PG1 44 +#define R9A06G032_CLK_P6_PG2 45 +#define R9A06G032_CLK_P6_PG3 46 +#define R9A06G032_CLK_P6_PG4 47 +#define R9A06G032_CLK_PCI_USB 48 +#define R9A06G032_CLK_QSPI0 49 +#define R9A06G032_CLK_QSPI1 50 +#define R9A06G032_CLK_RGMII_REF 51 +#define R9A06G032_CLK_RMII_REF 52 +#define R9A06G032_CLK_SDIO0 53 +#define R9A06G032_CLK_SDIO1 54 +#define R9A06G032_CLK_SERCOS100 55 +#define R9A06G032_CLK_SLCD 56 +#define R9A06G032_CLK_SPI0 57 +#define R9A06G032_CLK_SPI1 58 +#define R9A06G032_CLK_SPI2 59 +#define R9A06G032_CLK_SPI3 60 +#define R9A06G032_CLK_SPI4 61 +#define R9A06G032_CLK_SPI5 62 +#define R9A06G032_CLK_SWITCH 63 +#define R9A06G032_HCLK_ECAT125 65 +#define R9A06G032_HCLK_PINCONFIG 66 +#define R9A06G032_HCLK_SERCOS 67 +#define R9A06G032_HCLK_SGPIO2 68 +#define R9A06G032_HCLK_SGPIO3 69 +#define R9A06G032_HCLK_SGPIO4 70 +#define R9A06G032_HCLK_TIMER0 71 +#define R9A06G032_HCLK_TIMER1 72 +#define R9A06G032_HCLK_USBF 73 +#define R9A06G032_HCLK_USBH 74 +#define R9A06G032_HCLK_USBPM 75 +#define R9A06G032_CLK_48_PG_F 76 +#define R9A06G032_CLK_48_PG4 77 +#define R9A06G032_CLK_DDRPHY_PCLK 81 /* AKA CLK_REF_SYNC_D4 */ +#define R9A06G032_CLK_FW 81 /* AKA CLK_REF_SYNC_D4 */ +#define R9A06G032_CLK_CRYPTO 81 /* AKA CLK_REF_SYNC_D4 */ +#define R9A06G032_CLK_A7MP 84 /* AKA DIV_CA7 */ +#define R9A06G032_HCLK_CAN0 85 +#define R9A06G032_HCLK_CAN1 86 +#define R9A06G032_HCLK_DELTASIGMA 87 +#define R9A06G032_HCLK_PWMPTO 88 +#define R9A06G032_HCLK_RSV 89 +#define R9A06G032_HCLK_SGPIO0 90 +#define R9A06G032_HCLK_SGPIO1 91 +#define R9A06G032_RTOS_MDC 92 +#define R9A06G032_CLK_CM3 93 +#define R9A06G032_CLK_DDRC 94 +#define R9A06G032_CLK_ECAT25 95 +#define R9A06G032_CLK_HSR50 96 +#define R9A06G032_CLK_HW_RTOS 97 +#define R9A06G032_CLK_SERCOS50 98 +#define R9A06G032_HCLK_ADC 99 +#define R9A06G032_HCLK_CM3 100 +#define R9A06G032_HCLK_CRYPTO_EIP150 101 +#define R9A06G032_HCLK_CRYPTO_EIP93 102 +#define R9A06G032_HCLK_DDRC 103 +#define R9A06G032_HCLK_DMA0 104 +#define R9A06G032_HCLK_DMA1 105 +#define R9A06G032_HCLK_GMAC0 106 +#define R9A06G032_HCLK_GMAC1 107 +#define R9A06G032_HCLK_GPIO0 108 +#define R9A06G032_HCLK_GPIO1 109 +#define R9A06G032_HCLK_GPIO2 110 +#define R9A06G032_HCLK_HSR 111 +#define R9A06G032_HCLK_I2C0 112 +#define R9A06G032_HCLK_I2C1 113 +#define R9A06G032_HCLK_LCD 114 +#define R9A06G032_HCLK_MSEBI_M 115 +#define R9A06G032_HCLK_MSEBI_S 116 +#define R9A06G032_HCLK_NAND 117 +#define R9A06G032_HCLK_PG_I 118 +#define R9A06G032_HCLK_PG19 119 +#define R9A06G032_HCLK_PG20 120 +#define R9A06G032_HCLK_PG3 121 +#define R9A06G032_HCLK_PG4 122 +#define R9A06G032_HCLK_QSPI0 123 +#define R9A06G032_HCLK_QSPI1 124 +#define R9A06G032_HCLK_ROM 125 +#define R9A06G032_HCLK_RTC 126 +#define R9A06G032_HCLK_SDIO0 127 +#define R9A06G032_HCLK_SDIO1 128 +#define R9A06G032_HCLK_SEMAP 129 +#define R9A06G032_HCLK_SPI0 130 +#define R9A06G032_HCLK_SPI1 131 +#define R9A06G032_HCLK_SPI2 132 +#define R9A06G032_HCLK_SPI3 133 +#define R9A06G032_HCLK_SPI4 134 +#define R9A06G032_HCLK_SPI5 135 +#define R9A06G032_HCLK_SWITCH 136 +#define R9A06G032_HCLK_SWITCH_RG 137 +#define R9A06G032_HCLK_UART0 138 +#define R9A06G032_HCLK_UART1 139 +#define R9A06G032_HCLK_UART2 140 +#define R9A06G032_HCLK_UART3 141 +#define R9A06G032_HCLK_UART4 142 +#define R9A06G032_HCLK_UART5 143 +#define R9A06G032_HCLK_UART6 144 +#define R9A06G032_HCLK_UART7 145 +#define R9A06G032_CLK_UART0 146 +#define R9A06G032_CLK_UART1 147 +#define R9A06G032_CLK_UART2 148 +#define R9A06G032_CLK_UART3 149 +#define R9A06G032_CLK_UART4 150 +#define R9A06G032_CLK_UART5 151 +#define R9A06G032_CLK_UART6 152 +#define R9A06G032_CLK_UART7 153 + +#endif /* __DT_BINDINGS_R9A06G032_SYSCTRL_H__ */ diff --git a/include/dt-bindings/clock/renesas-cpg-mssr.h b/include/dt-bindings/clock/renesas-cpg-mssr.h new file mode 100644 index 0000000..8169ad0 --- /dev/null +++ b/include/dt-bindings/clock/renesas-cpg-mssr.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2015 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ + +#define CPG_CORE 0 /* Core Clock */ +#define CPG_MOD 1 /* Module Clock */ + +#endif /* __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h new file mode 100644 index 0000000..35a5a01 --- /dev/null +++ b/include/dt-bindings/clock/rk3036-cru.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2015 Rockchip Electronics Co. Ltd. + * Author: Xing Zheng + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3036_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3036_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_GPLL 3 +#define ARMCLK 4 + +/* sclk gates (special clocks) */ +#define SCLK_GPU 64 +#define SCLK_SPI 65 +#define SCLK_SDMMC 68 +#define SCLK_SDIO 69 +#define SCLK_EMMC 71 +#define SCLK_NANDC 76 +#define SCLK_UART0 77 +#define SCLK_UART1 78 +#define SCLK_UART2 79 +#define SCLK_I2S 82 +#define SCLK_SPDIF 83 +#define SCLK_TIMER0 85 +#define SCLK_TIMER1 86 +#define SCLK_TIMER2 87 +#define SCLK_TIMER3 88 +#define SCLK_OTGPHY0 93 +#define SCLK_LCDC 100 +#define SCLK_HDMI 109 +#define SCLK_HEVC 111 +#define SCLK_I2S_OUT 113 +#define SCLK_SDMMC_DRV 114 +#define SCLK_SDIO_DRV 115 +#define SCLK_EMMC_DRV 117 +#define SCLK_SDMMC_SAMPLE 118 +#define SCLK_SDIO_SAMPLE 119 +#define SCLK_EMMC_SAMPLE 121 +#define SCLK_PVTM_CORE 123 +#define SCLK_PVTM_GPU 124 +#define SCLK_PVTM_VIDEO 125 +#define SCLK_MAC 151 +#define SCLK_MACREF 152 +#define SCLK_MACPLL 153 +#define SCLK_SFC 160 + +/* aclk gates */ +#define ACLK_DMAC2 194 +#define ACLK_LCDC 197 +#define ACLK_VIO 203 +#define ACLK_VCODEC 208 +#define ACLK_CPU 209 +#define ACLK_PERI 210 + +/* pclk gates */ +#define PCLK_GPIO0 320 +#define PCLK_GPIO1 321 +#define PCLK_GPIO2 322 +#define PCLK_GRF 329 +#define PCLK_I2C0 332 +#define PCLK_I2C1 333 +#define PCLK_I2C2 334 +#define PCLK_SPI 338 +#define PCLK_UART0 341 +#define PCLK_UART1 342 +#define PCLK_UART2 343 +#define PCLK_PWM 350 +#define PCLK_TIMER 353 +#define PCLK_HDMI 360 +#define PCLK_CPU 362 +#define PCLK_PERI 363 +#define PCLK_DDRUPCTL 364 +#define PCLK_WDT 368 +#define PCLK_ACODEC 369 + +/* hclk gates */ +#define HCLK_OTG0 449 +#define HCLK_OTG1 450 +#define HCLK_NANDC 453 +#define HCLK_SDMMC 456 +#define HCLK_SDIO 457 +#define HCLK_EMMC 459 +#define HCLK_MAC 460 +#define HCLK_I2S 462 +#define HCLK_LCDC 465 +#define HCLK_ROM 467 +#define HCLK_VIO_BUS 472 +#define HCLK_VCODEC 476 +#define HCLK_CPU 477 +#define HCLK_PERI 478 + +#define CLK_NR_CLKS (HCLK_PERI + 1) + +/* soft-reset indices */ +#define SRST_CORE0 0 +#define SRST_CORE1 1 +#define SRST_CORE0_DBG 4 +#define SRST_CORE1_DBG 5 +#define SRST_CORE0_POR 8 +#define SRST_CORE1_POR 9 +#define SRST_L2C 12 +#define SRST_TOPDBG 13 +#define SRST_STRC_SYS_A 14 +#define SRST_PD_CORE_NIU 15 + +#define SRST_TIMER2 16 +#define SRST_CPUSYS_H 17 +#define SRST_AHB2APB_H 19 +#define SRST_TIMER3 20 +#define SRST_INTMEM 21 +#define SRST_ROM 22 +#define SRST_PERI_NIU 23 +#define SRST_I2S 24 +#define SRST_DDR_PLL 25 +#define SRST_GPU_DLL 26 +#define SRST_TIMER0 27 +#define SRST_TIMER1 28 +#define SRST_CORE_DLL 29 +#define SRST_EFUSE_P 30 +#define SRST_ACODEC_P 31 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_UART0 39 +#define SRST_UART1 40 +#define SRST_UART2 41 +#define SRST_I2C0 43 +#define SRST_I2C1 44 +#define SRST_I2C2 45 +#define SRST_SFC 47 + +#define SRST_PWM0 48 +#define SRST_DAP 51 +#define SRST_DAP_SYS 52 +#define SRST_GRF 55 +#define SRST_PERIPHSYS_A 57 +#define SRST_PERIPHSYS_H 58 +#define SRST_PERIPHSYS_P 59 +#define SRST_CPU_PERI 61 +#define SRST_EMEM_PERI 62 +#define SRST_USB_PERI 63 + +#define SRST_DMA2 64 +#define SRST_MAC 66 +#define SRST_NANDC 68 +#define SRST_USBOTG0 69 +#define SRST_OTGC0 71 +#define SRST_USBOTG1 72 +#define SRST_OTGC1 74 +#define SRST_DDRMSCH 79 + +#define SRST_MMC0 81 +#define SRST_SDIO 82 +#define SRST_EMMC 83 +#define SRST_SPI0 84 +#define SRST_WDT 86 +#define SRST_DDRPHY 88 +#define SRST_DDRPHY_P 89 +#define SRST_DDRCTRL 90 +#define SRST_DDRCTRL_P 91 + +#define SRST_HDMI_P 96 +#define SRST_VIO_BUS_H 99 +#define SRST_UTMI0 103 +#define SRST_UTMI1 104 +#define SRST_USBPOR 105 + +#define SRST_VCODEC_A 112 +#define SRST_VCODEC_H 113 +#define SRST_VIO1_A 114 +#define SRST_HEVC 115 +#define SRST_VCODEC_NIU_A 116 +#define SRST_LCDC1_A 117 +#define SRST_LCDC1_H 118 +#define SRST_LCDC1_D 119 +#define SRST_GPU 120 +#define SRST_GPU_NIU_A 122 + +#define SRST_DBG_P 131 + +#endif diff --git a/include/dt-bindings/clock/rk3066a-cru.h b/include/dt-bindings/clock/rk3066a-cru.h new file mode 100644 index 0000000..553f972 --- /dev/null +++ b/include/dt-bindings/clock/rk3066a-cru.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2014 MundoReader S.L. + * Author: Heiko Stuebner + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3066A_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3066A_H + +#include + +/* soft-reset indices */ +#define SRST_SRST1 0 +#define SRST_SRST2 1 + +#define SRST_L2MEM 18 +#define SRST_I2S0 23 +#define SRST_I2S1 24 +#define SRST_I2S2 25 +#define SRST_TIMER2 29 + +#define SRST_GPIO4 36 +#define SRST_GPIO6 38 + +#define SRST_TSADC 92 + +#define SRST_HDMI 96 +#define SRST_HDMI_APB 97 +#define SRST_CIF1 111 + +#endif diff --git a/include/dt-bindings/clock/rk3128-cru.h b/include/dt-bindings/clock/rk3128-cru.h new file mode 100644 index 0000000..6a47825 --- /dev/null +++ b/include/dt-bindings/clock/rk3128-cru.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2017 Rockchip Electronics Co. Ltd. + * Author: Elaine + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3128_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3128_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define ARMCLK 5 +#define PLL_GPLL_DIV2 6 +#define PLL_GPLL_DIV3 7 + +/* sclk gates (special clocks) */ +#define SCLK_SPI0 65 +#define SCLK_NANDC 67 +#define SCLK_SDMMC 68 +#define SCLK_SDIO 69 +#define SCLK_EMMC 71 +#define SCLK_UART0 77 +#define SCLK_UART1 78 +#define SCLK_UART2 79 +#define SCLK_I2S0 80 +#define SCLK_I2S1 81 +#define SCLK_SPDIF 83 +#define SCLK_TIMER0 85 +#define SCLK_TIMER1 86 +#define SCLK_TIMER2 87 +#define SCLK_TIMER3 88 +#define SCLK_TIMER4 89 +#define SCLK_TIMER5 90 +#define SCLK_SARADC 91 +#define SCLK_I2S_OUT 113 +#define SCLK_SDMMC_DRV 114 +#define SCLK_SDIO_DRV 115 +#define SCLK_EMMC_DRV 117 +#define SCLK_SDMMC_SAMPLE 118 +#define SCLK_SDIO_SAMPLE 119 +#define SCLK_EMMC_SAMPLE 121 +#define SCLK_VOP 122 +#define SCLK_MAC_SRC 124 +#define SCLK_MAC 126 +#define SCLK_MAC_REFOUT 127 +#define SCLK_MAC_REF 128 +#define SCLK_MAC_RX 129 +#define SCLK_MAC_TX 130 +#define SCLK_HEVC_CORE 134 +#define SCLK_RGA 135 +#define SCLK_CRYPTO 138 +#define SCLK_TSP 139 +#define SCLK_OTGPHY0 142 +#define SCLK_OTGPHY1 143 +#define SCLK_DDRC 144 +#define SCLK_PVTM_FUNC 145 +#define SCLK_PVTM_CORE 146 +#define SCLK_PVTM_GPU 147 +#define SCLK_MIPI_24M 148 +#define SCLK_PVTM 149 +#define SCLK_CIF_SRC 150 +#define SCLK_CIF_OUT_SRC 151 +#define SCLK_CIF_OUT 152 +#define SCLK_SFC 153 +#define SCLK_USB480M 154 + +/* dclk gates */ +#define DCLK_VOP 190 +#define DCLK_EBC 191 + +/* aclk gates */ +#define ACLK_VIO0 192 +#define ACLK_VIO1 193 +#define ACLK_DMAC 194 +#define ACLK_CPU 195 +#define ACLK_VEPU 196 +#define ACLK_VDPU 197 +#define ACLK_CIF 198 +#define ACLK_IEP 199 +#define ACLK_LCDC0 204 +#define ACLK_RGA 205 +#define ACLK_PERI 210 +#define ACLK_VOP 211 +#define ACLK_GMAC 212 +#define ACLK_GPU 213 + +/* pclk gates */ +#define PCLK_SARADC 318 +#define PCLK_WDT 319 +#define PCLK_GPIO0 320 +#define PCLK_GPIO1 321 +#define PCLK_GPIO2 322 +#define PCLK_GPIO3 323 +#define PCLK_VIO_H2P 324 +#define PCLK_MIPI 325 +#define PCLK_EFUSE 326 +#define PCLK_HDMI 327 +#define PCLK_ACODEC 328 +#define PCLK_GRF 329 +#define PCLK_I2C0 332 +#define PCLK_I2C1 333 +#define PCLK_I2C2 334 +#define PCLK_I2C3 335 +#define PCLK_SPI0 338 +#define PCLK_UART0 341 +#define PCLK_UART1 342 +#define PCLK_UART2 343 +#define PCLK_TSADC 344 +#define PCLK_PWM 350 +#define PCLK_TIMER 353 +#define PCLK_CPU 354 +#define PCLK_PERI 363 +#define PCLK_GMAC 367 +#define PCLK_PMU_PRE 368 +#define PCLK_SIM_CARD 369 + +/* hclk gates */ +#define HCLK_SPDIF 440 +#define HCLK_GPS 441 +#define HCLK_USBHOST 442 +#define HCLK_I2S_8CH 443 +#define HCLK_I2S_2CH 444 +#define HCLK_VOP 452 +#define HCLK_NANDC 453 +#define HCLK_SDMMC 456 +#define HCLK_SDIO 457 +#define HCLK_EMMC 459 +#define HCLK_CPU 460 +#define HCLK_VEPU 461 +#define HCLK_VDPU 462 +#define HCLK_LCDC0 463 +#define HCLK_EBC 465 +#define HCLK_VIO 466 +#define HCLK_RGA 467 +#define HCLK_IEP 468 +#define HCLK_VIO_H2P 469 +#define HCLK_CIF 470 +#define HCLK_HOST2 473 +#define HCLK_OTG 474 +#define HCLK_TSP 475 +#define HCLK_CRYPTO 476 +#define HCLK_PERI 478 + +#define CLK_NR_CLKS (HCLK_PERI + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_ACLK_CORE 13 +#define SRST_STRC_SYS_A 14 +#define SRST_L2C 15 + +#define SRST_CPUSYS_H 18 +#define SRST_AHB2APBSYS_H 19 +#define SRST_SPDIF 20 +#define SRST_INTMEM 21 +#define SRST_ROM 22 +#define SRST_PERI_NIU 23 +#define SRST_I2S_2CH 24 +#define SRST_I2S_8CH 25 +#define SRST_GPU_PVTM 26 +#define SRST_FUNC_PVTM 27 +#define SRST_CORE_PVTM 29 +#define SRST_EFUSE_P 30 +#define SRST_ACODEC_P 31 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_GPIO3 35 +#define SRST_MIPIPHY_P 36 +#define SRST_UART0 39 +#define SRST_UART1 40 +#define SRST_UART2 41 +#define SRST_I2C0 43 +#define SRST_I2C1 44 +#define SRST_I2C2 45 +#define SRST_I2C3 46 +#define SRST_SFC 47 + +#define SRST_PWM 48 +#define SRST_DAP_PO 50 +#define SRST_DAP 51 +#define SRST_DAP_SYS 52 +#define SRST_CRYPTO 53 +#define SRST_GRF 55 +#define SRST_GMAC 56 +#define SRST_PERIPH_SYS_A 57 +#define SRST_PERIPH_SYS_H 58 +#define SRST_PERIPH_SYS_P 59 +#define SRST_SMART_CARD 60 +#define SRST_CPU_PERI 61 +#define SRST_EMEM_PERI 62 +#define SRST_USB_PERI 63 + +#define SRST_DMA 64 +#define SRST_GPS 67 +#define SRST_NANDC 68 +#define SRST_USBOTG0 69 +#define SRST_OTGC0 71 +#define SRST_USBOTG1 72 +#define SRST_OTGC1 74 +#define SRST_DDRMSCH 79 + +#define SRST_SDMMC 81 +#define SRST_SDIO 82 +#define SRST_EMMC 83 +#define SRST_SPI 84 +#define SRST_WDT 86 +#define SRST_SARADC 87 +#define SRST_DDRPHY 88 +#define SRST_DDRPHY_P 89 +#define SRST_DDRCTRL 90 +#define SRST_DDRCTRL_P 91 +#define SRST_TSP 92 +#define SRST_TSP_CLKIN 93 +#define SRST_HOST0_ECHI 94 + +#define SRST_HDMI_P 96 +#define SRST_VIO_ARBI_H 97 +#define SRST_VIO0_A 98 +#define SRST_VIO_BUS_H 99 +#define SRST_VOP_A 100 +#define SRST_VOP_H 101 +#define SRST_VOP_D 102 +#define SRST_UTMI0 103 +#define SRST_UTMI1 104 +#define SRST_USBPOR 105 +#define SRST_IEP_A 106 +#define SRST_IEP_H 107 +#define SRST_RGA_A 108 +#define SRST_RGA_H 109 +#define SRST_CIF0 110 +#define SRST_PMU 111 + +#define SRST_VCODEC_A 112 +#define SRST_VCODEC_H 113 +#define SRST_VIO1_A 114 +#define SRST_HEVC_CORE 115 +#define SRST_VCODEC_NIU_A 116 +#define SRST_PMU_NIU_P 117 +#define SRST_LCDC0_S 119 +#define SRST_GPU 120 +#define SRST_GPU_NIU_A 122 +#define SRST_EBC_A 123 +#define SRST_EBC_H 124 + +#define SRST_CORE_DBG 128 +#define SRST_DBG_P 129 +#define SRST_TIMER0 130 +#define SRST_TIMER1 131 +#define SRST_TIMER2 132 +#define SRST_TIMER3 133 +#define SRST_TIMER4 134 +#define SRST_TIMER5 135 +#define SRST_VIO_H2P 136 +#define SRST_VIO_MIPI_DSI 137 + +#endif diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h new file mode 100644 index 0000000..afad906 --- /dev/null +++ b/include/dt-bindings/clock/rk3188-cru-common.h @@ -0,0 +1,261 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2014 MundoReader S.L. + * Author: Heiko Stuebner + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3188_COMMON_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3188_COMMON_H + +/* core clocks from */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define CORE_PERI 5 +#define CORE_L2C 6 +#define ARMCLK 7 + +/* sclk gates (special clocks) */ +#define SCLK_UART0 64 +#define SCLK_UART1 65 +#define SCLK_UART2 66 +#define SCLK_UART3 67 +#define SCLK_MAC 68 +#define SCLK_SPI0 69 +#define SCLK_SPI1 70 +#define SCLK_SARADC 71 +#define SCLK_SDMMC 72 +#define SCLK_SDIO 73 +#define SCLK_EMMC 74 +#define SCLK_I2S0 75 +#define SCLK_I2S1 76 +#define SCLK_I2S2 77 +#define SCLK_SPDIF 78 +#define SCLK_CIF0 79 +#define SCLK_CIF1 80 +#define SCLK_OTGPHY0 81 +#define SCLK_OTGPHY1 82 +#define SCLK_HSADC 83 +#define SCLK_TIMER0 84 +#define SCLK_TIMER1 85 +#define SCLK_TIMER2 86 +#define SCLK_TIMER3 87 +#define SCLK_TIMER4 88 +#define SCLK_TIMER5 89 +#define SCLK_TIMER6 90 +#define SCLK_JTAG 91 +#define SCLK_SMC 92 +#define SCLK_TSADC 93 + +#define DCLK_LCDC0 190 +#define DCLK_LCDC1 191 + +/* aclk gates */ +#define ACLK_DMA1 192 +#define ACLK_DMA2 193 +#define ACLK_GPS 194 +#define ACLK_LCDC0 195 +#define ACLK_LCDC1 196 +#define ACLK_GPU 197 +#define ACLK_SMC 198 +#define ACLK_CIF1 199 +#define ACLK_IPP 200 +#define ACLK_RGA 201 +#define ACLK_CIF0 202 +#define ACLK_CPU 203 +#define ACLK_PERI 204 +#define ACLK_VEPU 205 +#define ACLK_VDPU 206 + +/* pclk gates */ +#define PCLK_GRF 320 +#define PCLK_PMU 321 +#define PCLK_TIMER0 322 +#define PCLK_TIMER1 323 +#define PCLK_TIMER2 324 +#define PCLK_TIMER3 325 +#define PCLK_PWM01 326 +#define PCLK_PWM23 327 +#define PCLK_SPI0 328 +#define PCLK_SPI1 329 +#define PCLK_SARADC 330 +#define PCLK_WDT 331 +#define PCLK_UART0 332 +#define PCLK_UART1 333 +#define PCLK_UART2 334 +#define PCLK_UART3 335 +#define PCLK_I2C0 336 +#define PCLK_I2C1 337 +#define PCLK_I2C2 338 +#define PCLK_I2C3 339 +#define PCLK_I2C4 340 +#define PCLK_GPIO0 341 +#define PCLK_GPIO1 342 +#define PCLK_GPIO2 343 +#define PCLK_GPIO3 344 +#define PCLK_GPIO4 345 +#define PCLK_GPIO6 346 +#define PCLK_EFUSE 347 +#define PCLK_TZPC 348 +#define PCLK_TSADC 349 +#define PCLK_CPU 350 +#define PCLK_PERI 351 +#define PCLK_DDRUPCTL 352 +#define PCLK_PUBL 353 + +/* hclk gates */ +#define HCLK_SDMMC 448 +#define HCLK_SDIO 449 +#define HCLK_EMMC 450 +#define HCLK_OTG0 451 +#define HCLK_EMAC 452 +#define HCLK_SPDIF 453 +#define HCLK_I2S0 454 +#define HCLK_I2S1 455 +#define HCLK_I2S2 456 +#define HCLK_OTG1 457 +#define HCLK_HSIC 458 +#define HCLK_HSADC 459 +#define HCLK_PIDF 460 +#define HCLK_LCDC0 461 +#define HCLK_LCDC1 462 +#define HCLK_ROM 463 +#define HCLK_CIF0 464 +#define HCLK_IPP 465 +#define HCLK_RGA 466 +#define HCLK_NANDC0 467 +#define HCLK_CPU 468 +#define HCLK_PERI 469 +#define HCLK_CIF1 470 +#define HCLK_VEPU 471 +#define HCLK_VDPU 472 +#define HCLK_HDMI 473 + +#define CLK_NR_CLKS (HCLK_HDMI + 1) + +/* soft-reset indices */ +#define SRST_MCORE 2 +#define SRST_CORE0 3 +#define SRST_CORE1 4 +#define SRST_MCORE_DBG 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE0_WDT 12 +#define SRST_CORE1_WDT 13 +#define SRST_STRC_SYS 14 +#define SRST_L2C 15 + +#define SRST_CPU_AHB 17 +#define SRST_AHB2APB 19 +#define SRST_DMA1 20 +#define SRST_INTMEM 21 +#define SRST_ROM 22 +#define SRST_SPDIF 26 +#define SRST_TIMER0 27 +#define SRST_TIMER1 28 +#define SRST_EFUSE 30 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_GPIO3 35 + +#define SRST_UART0 39 +#define SRST_UART1 40 +#define SRST_UART2 41 +#define SRST_UART3 42 +#define SRST_I2C0 43 +#define SRST_I2C1 44 +#define SRST_I2C2 45 +#define SRST_I2C3 46 +#define SRST_I2C4 47 + +#define SRST_PWM0 48 +#define SRST_PWM1 49 +#define SRST_DAP_PO 50 +#define SRST_DAP 51 +#define SRST_DAP_SYS 52 +#define SRST_TPIU_ATB 53 +#define SRST_PMU_APB 54 +#define SRST_GRF 55 +#define SRST_PMU 56 +#define SRST_PERI_AXI 57 +#define SRST_PERI_AHB 58 +#define SRST_PERI_APB 59 +#define SRST_PERI_NIU 60 +#define SRST_CPU_PERI 61 +#define SRST_EMEM_PERI 62 +#define SRST_USB_PERI 63 + +#define SRST_DMA2 64 +#define SRST_SMC 65 +#define SRST_MAC 66 +#define SRST_NANC0 68 +#define SRST_USBOTG0 69 +#define SRST_USBPHY0 70 +#define SRST_OTGC0 71 +#define SRST_USBOTG1 72 +#define SRST_USBPHY1 73 +#define SRST_OTGC1 74 +#define SRST_HSADC 76 +#define SRST_PIDFILTER 77 +#define SRST_DDR_MSCH 79 + +#define SRST_TZPC 80 +#define SRST_SDMMC 81 +#define SRST_SDIO 82 +#define SRST_EMMC 83 +#define SRST_SPI0 84 +#define SRST_SPI1 85 +#define SRST_WDT 86 +#define SRST_SARADC 87 +#define SRST_DDRPHY 88 +#define SRST_DDRPHY_APB 89 +#define SRST_DDRCTL 90 +#define SRST_DDRCTL_APB 91 +#define SRST_DDRPUB 93 + +#define SRST_VIO0_AXI 98 +#define SRST_VIO0_AHB 99 +#define SRST_LCDC0_AXI 100 +#define SRST_LCDC0_AHB 101 +#define SRST_LCDC0_DCLK 102 +#define SRST_LCDC1_AXI 103 +#define SRST_LCDC1_AHB 104 +#define SRST_LCDC1_DCLK 105 +#define SRST_IPP_AXI 106 +#define SRST_IPP_AHB 107 +#define SRST_RGA_AXI 108 +#define SRST_RGA_AHB 109 +#define SRST_CIF0 110 + +#define SRST_VCODEC_AXI 112 +#define SRST_VCODEC_AHB 113 +#define SRST_VIO1_AXI 114 +#define SRST_VCODEC_CPU 115 +#define SRST_VCODEC_NIU 116 +#define SRST_GPU 120 +#define SRST_GPU_NIU 122 +#define SRST_TFUN_ATB 125 +#define SRST_TFUN_APB 126 +#define SRST_CTI4_APB 127 + +#define SRST_TPIU_APB 128 +#define SRST_TRACE 129 +#define SRST_CORE_DBG 130 +#define SRST_DBG_APB 131 +#define SRST_CTI0 132 +#define SRST_CTI0_APB 133 +#define SRST_CTI1 134 +#define SRST_CTI1_APB 135 +#define SRST_PTM_CORE0 136 +#define SRST_PTM_CORE1 137 +#define SRST_PTM0 138 +#define SRST_PTM0_ATB 139 +#define SRST_PTM1 140 +#define SRST_PTM1_ATB 141 +#define SRST_CTM 142 +#define SRST_TS 143 + +#endif diff --git a/include/dt-bindings/clock/rk3188-cru.h b/include/dt-bindings/clock/rk3188-cru.h new file mode 100644 index 0000000..c45916a --- /dev/null +++ b/include/dt-bindings/clock/rk3188-cru.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2014 MundoReader S.L. + * Author: Heiko Stuebner + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3188_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3188_H + +#include + +/* soft-reset indices */ +#define SRST_PTM_CORE2 0 +#define SRST_PTM_CORE3 1 +#define SRST_CORE2 5 +#define SRST_CORE3 6 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 + +#define SRST_TIMER2 16 +#define SRST_TIMER4 23 +#define SRST_I2S0 24 +#define SRST_TIMER5 25 +#define SRST_TIMER3 29 +#define SRST_TIMER6 31 + +#define SRST_PTM3 36 +#define SRST_PTM3_ATB 37 + +#define SRST_GPS 67 +#define SRST_HSICPHY 75 +#define SRST_TIMER 78 + +#define SRST_PTM2 92 +#define SRST_CORE2_WDT 94 +#define SRST_CORE3_WDT 95 + +#define SRST_PTM2_ATB 111 + +#define SRST_HSIC 117 +#define SRST_CTI2 118 +#define SRST_CTI2_APB 119 +#define SRST_GPU_BRIDGE 121 +#define SRST_CTI3 123 +#define SRST_CTI3_APB 124 + +#endif diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h new file mode 100644 index 0000000..de550ea --- /dev/null +++ b/include/dt-bindings/clock/rk3228-cru.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2015 Rockchip Electronics Co. Ltd. + * Author: Jeffy Chen + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3228_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3228_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define ARMCLK 5 + +/* sclk gates (special clocks) */ +#define SCLK_SPI0 65 +#define SCLK_NANDC 67 +#define SCLK_SDMMC 68 +#define SCLK_SDIO 69 +#define SCLK_EMMC 71 +#define SCLK_TSADC 72 +#define SCLK_UART0 77 +#define SCLK_UART1 78 +#define SCLK_UART2 79 +#define SCLK_I2S0 80 +#define SCLK_I2S1 81 +#define SCLK_I2S2 82 +#define SCLK_SPDIF 83 +#define SCLK_TIMER0 85 +#define SCLK_TIMER1 86 +#define SCLK_TIMER2 87 +#define SCLK_TIMER3 88 +#define SCLK_TIMER4 89 +#define SCLK_TIMER5 90 +#define SCLK_I2S_OUT 113 +#define SCLK_SDMMC_DRV 114 +#define SCLK_SDIO_DRV 115 +#define SCLK_EMMC_DRV 117 +#define SCLK_SDMMC_SAMPLE 118 +#define SCLK_SDIO_SAMPLE 119 +#define SCLK_SDIO_SRC 120 +#define SCLK_EMMC_SAMPLE 121 +#define SCLK_VOP 122 +#define SCLK_HDMI_HDCP 123 +#define SCLK_MAC_SRC 124 +#define SCLK_MAC_EXTCLK 125 +#define SCLK_MAC 126 +#define SCLK_MAC_REFOUT 127 +#define SCLK_MAC_REF 128 +#define SCLK_MAC_RX 129 +#define SCLK_MAC_TX 130 +#define SCLK_MAC_PHY 131 +#define SCLK_MAC_OUT 132 +#define SCLK_VDEC_CABAC 133 +#define SCLK_VDEC_CORE 134 +#define SCLK_RGA 135 +#define SCLK_HDCP 136 +#define SCLK_HDMI_CEC 137 +#define SCLK_CRYPTO 138 +#define SCLK_TSP 139 +#define SCLK_HSADC 140 +#define SCLK_WIFI 141 +#define SCLK_OTGPHY0 142 +#define SCLK_OTGPHY1 143 +#define SCLK_HDMI_PHY 144 + +/* dclk gates */ +#define DCLK_VOP 190 +#define DCLK_HDMI_PHY 191 + +/* aclk gates */ +#define ACLK_DMAC 194 +#define ACLK_CPU 195 +#define ACLK_VPU_PRE 196 +#define ACLK_RKVDEC_PRE 197 +#define ACLK_RGA_PRE 198 +#define ACLK_IEP_PRE 199 +#define ACLK_HDCP_PRE 200 +#define ACLK_VOP_PRE 201 +#define ACLK_VPU 202 +#define ACLK_RKVDEC 203 +#define ACLK_IEP 204 +#define ACLK_RGA 205 +#define ACLK_HDCP 206 +#define ACLK_PERI 210 +#define ACLK_VOP 211 +#define ACLK_GMAC 212 +#define ACLK_GPU 213 + +/* pclk gates */ +#define PCLK_GPIO0 320 +#define PCLK_GPIO1 321 +#define PCLK_GPIO2 322 +#define PCLK_GPIO3 323 +#define PCLK_VIO_H2P 324 +#define PCLK_HDCP 325 +#define PCLK_EFUSE_1024 326 +#define PCLK_EFUSE_256 327 +#define PCLK_GRF 329 +#define PCLK_I2C0 332 +#define PCLK_I2C1 333 +#define PCLK_I2C2 334 +#define PCLK_I2C3 335 +#define PCLK_SPI0 338 +#define PCLK_UART0 341 +#define PCLK_UART1 342 +#define PCLK_UART2 343 +#define PCLK_TSADC 344 +#define PCLK_PWM 350 +#define PCLK_TIMER 353 +#define PCLK_CPU 354 +#define PCLK_PERI 363 +#define PCLK_HDMI_CTRL 364 +#define PCLK_HDMI_PHY 365 +#define PCLK_GMAC 367 + +/* hclk gates */ +#define HCLK_I2S0_8CH 442 +#define HCLK_I2S1_8CH 443 +#define HCLK_I2S2_2CH 444 +#define HCLK_SPDIF_8CH 445 +#define HCLK_VOP 452 +#define HCLK_NANDC 453 +#define HCLK_SDMMC 456 +#define HCLK_SDIO 457 +#define HCLK_EMMC 459 +#define HCLK_CPU 460 +#define HCLK_VPU_PRE 461 +#define HCLK_RKVDEC_PRE 462 +#define HCLK_VIO_PRE 463 +#define HCLK_VPU 464 +#define HCLK_RKVDEC 465 +#define HCLK_VIO 466 +#define HCLK_RGA 467 +#define HCLK_IEP 468 +#define HCLK_VIO_H2P 469 +#define HCLK_HDCP_MMU 470 +#define HCLK_HOST0 471 +#define HCLK_HOST1 472 +#define HCLK_HOST2 473 +#define HCLK_OTG 474 +#define HCLK_TSP 475 +#define HCLK_M_CRYPTO 476 +#define HCLK_S_CRYPTO 477 +#define HCLK_PERI 478 + +#define CLK_NR_CLKS (HCLK_PERI + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_ACLK_CORE 13 +#define SRST_NOC 14 +#define SRST_L2C 15 + +#define SRST_CPUSYS_H 18 +#define SRST_BUSSYS_H 19 +#define SRST_SPDIF 20 +#define SRST_INTMEM 21 +#define SRST_ROM 22 +#define SRST_OTG_ADP 23 +#define SRST_I2S0 24 +#define SRST_I2S1 25 +#define SRST_I2S2 26 +#define SRST_ACODEC_P 27 +#define SRST_DFIMON 28 +#define SRST_MSCH 29 +#define SRST_EFUSE1024 30 +#define SRST_EFUSE256 31 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_GPIO3 35 +#define SRST_PERIPH_NOC_A 36 +#define SRST_PERIPH_NOC_BUS_H 37 +#define SRST_PERIPH_NOC_P 38 +#define SRST_UART0 39 +#define SRST_UART1 40 +#define SRST_UART2 41 +#define SRST_PHYNOC 42 +#define SRST_I2C0 43 +#define SRST_I2C1 44 +#define SRST_I2C2 45 +#define SRST_I2C3 46 + +#define SRST_PWM 48 +#define SRST_A53_GIC 49 +#define SRST_DAP 51 +#define SRST_DAP_NOC 52 +#define SRST_CRYPTO 53 +#define SRST_SGRF 54 +#define SRST_GRF 55 +#define SRST_GMAC 56 +#define SRST_PERIPH_NOC_H 58 +#define SRST_MACPHY 63 + +#define SRST_DMA 64 +#define SRST_NANDC 68 +#define SRST_USBOTG 69 +#define SRST_OTGC 70 +#define SRST_USBHOST0 71 +#define SRST_HOST_CTRL0 72 +#define SRST_USBHOST1 73 +#define SRST_HOST_CTRL1 74 +#define SRST_USBHOST2 75 +#define SRST_HOST_CTRL2 76 +#define SRST_USBPOR0 77 +#define SRST_USBPOR1 78 +#define SRST_DDRMSCH 79 + +#define SRST_SMART_CARD 80 +#define SRST_SDMMC 81 +#define SRST_SDIO 82 +#define SRST_EMMC 83 +#define SRST_SPI 84 +#define SRST_TSP_H 85 +#define SRST_TSP 86 +#define SRST_TSADC 87 +#define SRST_DDRPHY 88 +#define SRST_DDRPHY_P 89 +#define SRST_DDRCTRL 90 +#define SRST_DDRCTRL_P 91 +#define SRST_HOST0_ECHI 92 +#define SRST_HOST1_ECHI 93 +#define SRST_HOST2_ECHI 94 +#define SRST_VOP_NOC_A 95 + +#define SRST_HDMI_P 96 +#define SRST_VIO_ARBI_H 97 +#define SRST_IEP_NOC_A 98 +#define SRST_VIO_NOC_H 99 +#define SRST_VOP_A 100 +#define SRST_VOP_H 101 +#define SRST_VOP_D 102 +#define SRST_UTMI0 103 +#define SRST_UTMI1 104 +#define SRST_UTMI2 105 +#define SRST_UTMI3 106 +#define SRST_RGA 107 +#define SRST_RGA_NOC_A 108 +#define SRST_RGA_A 109 +#define SRST_RGA_H 110 +#define SRST_HDCP_A 111 + +#define SRST_VPU_A 112 +#define SRST_VPU_H 113 +#define SRST_VPU_NOC_A 116 +#define SRST_VPU_NOC_H 117 +#define SRST_RKVDEC_A 118 +#define SRST_RKVDEC_NOC_A 119 +#define SRST_RKVDEC_H 120 +#define SRST_RKVDEC_NOC_H 121 +#define SRST_RKVDEC_CORE 122 +#define SRST_RKVDEC_CABAC 123 +#define SRST_IEP_A 124 +#define SRST_IEP_H 125 +#define SRST_GPU_A 126 +#define SRST_GPU_NOC_A 127 + +#define SRST_CORE_DBG 128 +#define SRST_DBG_P 129 +#define SRST_TIMER0 130 +#define SRST_TIMER1 131 +#define SRST_TIMER2 132 +#define SRST_TIMER3 133 +#define SRST_TIMER4 134 +#define SRST_TIMER5 135 +#define SRST_VIO_H2P 136 +#define SRST_HDMIPHY 139 +#define SRST_VDAC 140 +#define SRST_TIMER_6CH_P 141 + +#endif diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h new file mode 100644 index 0000000..33819ac --- /dev/null +++ b/include/dt-bindings/clock/rk3288-cru.h @@ -0,0 +1,380 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2014 MundoReader S.L. + * Author: Heiko Stuebner + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3288_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3288_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define PLL_NPLL 5 +#define ARMCLK 6 + +/* sclk gates (special clocks) */ +#define SCLK_GPU 64 +#define SCLK_SPI0 65 +#define SCLK_SPI1 66 +#define SCLK_SPI2 67 +#define SCLK_SDMMC 68 +#define SCLK_SDIO0 69 +#define SCLK_SDIO1 70 +#define SCLK_EMMC 71 +#define SCLK_TSADC 72 +#define SCLK_SARADC 73 +#define SCLK_PS2C 74 +#define SCLK_NANDC0 75 +#define SCLK_NANDC1 76 +#define SCLK_UART0 77 +#define SCLK_UART1 78 +#define SCLK_UART2 79 +#define SCLK_UART3 80 +#define SCLK_UART4 81 +#define SCLK_I2S0 82 +#define SCLK_SPDIF 83 +#define SCLK_SPDIF8CH 84 +#define SCLK_TIMER0 85 +#define SCLK_TIMER1 86 +#define SCLK_TIMER2 87 +#define SCLK_TIMER3 88 +#define SCLK_TIMER4 89 +#define SCLK_TIMER5 90 +#define SCLK_TIMER6 91 +#define SCLK_HSADC 92 +#define SCLK_OTGPHY0 93 +#define SCLK_OTGPHY1 94 +#define SCLK_OTGPHY2 95 +#define SCLK_OTG_ADP 96 +#define SCLK_HSICPHY480M 97 +#define SCLK_HSICPHY12M 98 +#define SCLK_MACREF 99 +#define SCLK_LCDC_PWM0 100 +#define SCLK_LCDC_PWM1 101 +#define SCLK_MAC_RX 102 +#define SCLK_MAC_TX 103 +#define SCLK_EDP_24M 104 +#define SCLK_EDP 105 +#define SCLK_RGA 106 +#define SCLK_ISP 107 +#define SCLK_ISP_JPE 108 +#define SCLK_HDMI_HDCP 109 +#define SCLK_HDMI_CEC 110 +#define SCLK_HEVC_CABAC 111 +#define SCLK_HEVC_CORE 112 +#define SCLK_I2S0_OUT 113 +#define SCLK_SDMMC_DRV 114 +#define SCLK_SDIO0_DRV 115 +#define SCLK_SDIO1_DRV 116 +#define SCLK_EMMC_DRV 117 +#define SCLK_SDMMC_SAMPLE 118 +#define SCLK_SDIO0_SAMPLE 119 +#define SCLK_SDIO1_SAMPLE 120 +#define SCLK_EMMC_SAMPLE 121 +#define SCLK_USBPHY480M_SRC 122 +#define SCLK_PVTM_CORE 123 +#define SCLK_PVTM_GPU 124 +#define SCLK_CRYPTO 125 +#define SCLK_MIPIDSI_24M 126 +#define SCLK_VIP_OUT 127 + +#define SCLK_MAC 151 +#define SCLK_MACREF_OUT 152 + +#define DCLK_VOP0 190 +#define DCLK_VOP1 191 + +/* aclk gates */ +#define ACLK_GPU 192 +#define ACLK_DMAC1 193 +#define ACLK_DMAC2 194 +#define ACLK_MMU 195 +#define ACLK_GMAC 196 +#define ACLK_VOP0 197 +#define ACLK_VOP1 198 +#define ACLK_CRYPTO 199 +#define ACLK_RGA 200 +#define ACLK_RGA_NIU 201 +#define ACLK_IEP 202 +#define ACLK_VIO0_NIU 203 +#define ACLK_VIP 204 +#define ACLK_ISP 205 +#define ACLK_VIO1_NIU 206 +#define ACLK_HEVC 207 +#define ACLK_VCODEC 208 +#define ACLK_CPU 209 +#define ACLK_PERI 210 + +/* pclk gates */ +#define PCLK_GPIO0 320 +#define PCLK_GPIO1 321 +#define PCLK_GPIO2 322 +#define PCLK_GPIO3 323 +#define PCLK_GPIO4 324 +#define PCLK_GPIO5 325 +#define PCLK_GPIO6 326 +#define PCLK_GPIO7 327 +#define PCLK_GPIO8 328 +#define PCLK_GRF 329 +#define PCLK_SGRF 330 +#define PCLK_PMU 331 +#define PCLK_I2C0 332 +#define PCLK_I2C1 333 +#define PCLK_I2C2 334 +#define PCLK_I2C3 335 +#define PCLK_I2C4 336 +#define PCLK_I2C5 337 +#define PCLK_SPI0 338 +#define PCLK_SPI1 339 +#define PCLK_SPI2 340 +#define PCLK_UART0 341 +#define PCLK_UART1 342 +#define PCLK_UART2 343 +#define PCLK_UART3 344 +#define PCLK_UART4 345 +#define PCLK_TSADC 346 +#define PCLK_SARADC 347 +#define PCLK_SIM 348 +#define PCLK_GMAC 349 +#define PCLK_PWM 350 +#define PCLK_RKPWM 351 +#define PCLK_PS2C 352 +#define PCLK_TIMER 353 +#define PCLK_TZPC 354 +#define PCLK_EDP_CTRL 355 +#define PCLK_MIPI_DSI0 356 +#define PCLK_MIPI_DSI1 357 +#define PCLK_MIPI_CSI 358 +#define PCLK_LVDS_PHY 359 +#define PCLK_HDMI_CTRL 360 +#define PCLK_VIO2_H2P 361 +#define PCLK_CPU 362 +#define PCLK_PERI 363 +#define PCLK_DDRUPCTL0 364 +#define PCLK_PUBL0 365 +#define PCLK_DDRUPCTL1 366 +#define PCLK_PUBL1 367 +#define PCLK_WDT 368 +#define PCLK_EFUSE256 369 +#define PCLK_EFUSE1024 370 +#define PCLK_ISP_IN 371 + +/* hclk gates */ +#define HCLK_GPS 448 +#define HCLK_OTG0 449 +#define HCLK_USBHOST0 450 +#define HCLK_USBHOST1 451 +#define HCLK_HSIC 452 +#define HCLK_NANDC0 453 +#define HCLK_NANDC1 454 +#define HCLK_TSP 455 +#define HCLK_SDMMC 456 +#define HCLK_SDIO0 457 +#define HCLK_SDIO1 458 +#define HCLK_EMMC 459 +#define HCLK_HSADC 460 +#define HCLK_CRYPTO 461 +#define HCLK_I2S0 462 +#define HCLK_SPDIF 463 +#define HCLK_SPDIF8CH 464 +#define HCLK_VOP0 465 +#define HCLK_VOP1 466 +#define HCLK_ROM 467 +#define HCLK_IEP 468 +#define HCLK_ISP 469 +#define HCLK_RGA 470 +#define HCLK_VIO_AHB_ARBI 471 +#define HCLK_VIO_NIU 472 +#define HCLK_VIP 473 +#define HCLK_VIO2_H2P 474 +#define HCLK_HEVC 475 +#define HCLK_VCODEC 476 +#define HCLK_CPU 477 +#define HCLK_PERI 478 + +#define CLK_NR_CLKS (HCLK_PERI + 1) + +/* soft-reset indices */ +#define SRST_CORE0 0 +#define SRST_CORE1 1 +#define SRST_CORE2 2 +#define SRST_CORE3 3 +#define SRST_CORE0_PO 4 +#define SRST_CORE1_PO 5 +#define SRST_CORE2_PO 6 +#define SRST_CORE3_PO 7 +#define SRST_PDCORE_STRSYS 8 +#define SRST_PDBUS_STRSYS 9 +#define SRST_L2C 10 +#define SRST_TOPDBG 11 +#define SRST_CORE0_DBG 12 +#define SRST_CORE1_DBG 13 +#define SRST_CORE2_DBG 14 +#define SRST_CORE3_DBG 15 + +#define SRST_PDBUG_AHB_ARBITOR 16 +#define SRST_EFUSE256 17 +#define SRST_DMAC1 18 +#define SRST_INTMEM 19 +#define SRST_ROM 20 +#define SRST_SPDIF8CH 21 +#define SRST_TIMER 22 +#define SRST_I2S0 23 +#define SRST_SPDIF 24 +#define SRST_TIMER0 25 +#define SRST_TIMER1 26 +#define SRST_TIMER2 27 +#define SRST_TIMER3 28 +#define SRST_TIMER4 29 +#define SRST_TIMER5 30 +#define SRST_EFUSE 31 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_GPIO3 35 +#define SRST_GPIO4 36 +#define SRST_GPIO5 37 +#define SRST_GPIO6 38 +#define SRST_GPIO7 39 +#define SRST_GPIO8 40 +#define SRST_I2C0 42 +#define SRST_I2C1 43 +#define SRST_I2C2 44 +#define SRST_I2C3 45 +#define SRST_I2C4 46 +#define SRST_I2C5 47 + +#define SRST_DWPWM 48 +#define SRST_MMC_PERI 49 +#define SRST_PERIPH_MMU 50 +#define SRST_DAP 51 +#define SRST_DAP_SYS 52 +#define SRST_TPIU 53 +#define SRST_PMU_APB 54 +#define SRST_GRF 55 +#define SRST_PMU 56 +#define SRST_PERIPH_AXI 57 +#define SRST_PERIPH_AHB 58 +#define SRST_PERIPH_APB 59 +#define SRST_PERIPH_NIU 60 +#define SRST_PDPERI_AHB_ARBI 61 +#define SRST_EMEM 62 +#define SRST_USB_PERI 63 + +#define SRST_DMAC2 64 +#define SRST_MAC 66 +#define SRST_GPS 67 +#define SRST_RKPWM 69 +#define SRST_CCP 71 +#define SRST_USBHOST0 72 +#define SRST_HSIC 73 +#define SRST_HSIC_AUX 74 +#define SRST_HSIC_PHY 75 +#define SRST_HSADC 76 +#define SRST_NANDC0 77 +#define SRST_NANDC1 78 + +#define SRST_TZPC 80 +#define SRST_SPI0 83 +#define SRST_SPI1 84 +#define SRST_SPI2 85 +#define SRST_SARADC 87 +#define SRST_PDALIVE_NIU 88 +#define SRST_PDPMU_INTMEM 89 +#define SRST_PDPMU_NIU 90 +#define SRST_SGRF 91 + +#define SRST_VIO_ARBI 96 +#define SRST_RGA_NIU 97 +#define SRST_VIO0_NIU_AXI 98 +#define SRST_VIO_NIU_AHB 99 +#define SRST_LCDC0_AXI 100 +#define SRST_LCDC0_AHB 101 +#define SRST_LCDC0_DCLK 102 +#define SRST_VIO1_NIU_AXI 103 +#define SRST_VIP 104 +#define SRST_RGA_CORE 105 +#define SRST_IEP_AXI 106 +#define SRST_IEP_AHB 107 +#define SRST_RGA_AXI 108 +#define SRST_RGA_AHB 109 +#define SRST_ISP 110 +#define SRST_EDP 111 + +#define SRST_VCODEC_AXI 112 +#define SRST_VCODEC_AHB 113 +#define SRST_VIO_H2P 114 +#define SRST_MIPIDSI0 115 +#define SRST_MIPIDSI1 116 +#define SRST_MIPICSI 117 +#define SRST_LVDS_PHY 118 +#define SRST_LVDS_CON 119 +#define SRST_GPU 120 +#define SRST_HDMI 121 +#define SRST_CORE_PVTM 124 +#define SRST_GPU_PVTM 125 + +#define SRST_MMC0 128 +#define SRST_SDIO0 129 +#define SRST_SDIO1 130 +#define SRST_EMMC 131 +#define SRST_USBOTG_AHB 132 +#define SRST_USBOTG_PHY 133 +#define SRST_USBOTG_CON 134 +#define SRST_USBHOST0_AHB 135 +#define SRST_USBHOST0_PHY 136 +#define SRST_USBHOST0_CON 137 +#define SRST_USBHOST1_AHB 138 +#define SRST_USBHOST1_PHY 139 +#define SRST_USBHOST1_CON 140 +#define SRST_USB_ADP 141 +#define SRST_ACC_EFUSE 142 + +#define SRST_CORESIGHT 144 +#define SRST_PD_CORE_AHB_NOC 145 +#define SRST_PD_CORE_APB_NOC 146 +#define SRST_PD_CORE_MP_AXI 147 +#define SRST_GIC 148 +#define SRST_LCDC_PWM0 149 +#define SRST_LCDC_PWM1 150 +#define SRST_VIO0_H2P_BRG 151 +#define SRST_VIO1_H2P_BRG 152 +#define SRST_RGA_H2P_BRG 153 +#define SRST_HEVC 154 +#define SRST_TSADC 159 + +#define SRST_DDRPHY0 160 +#define SRST_DDRPHY0_APB 161 +#define SRST_DDRCTRL0 162 +#define SRST_DDRCTRL0_APB 163 +#define SRST_DDRPHY0_CTRL 164 +#define SRST_DDRPHY1 165 +#define SRST_DDRPHY1_APB 166 +#define SRST_DDRCTRL1 167 +#define SRST_DDRCTRL1_APB 168 +#define SRST_DDRPHY1_CTRL 169 +#define SRST_DDRMSCH0 170 +#define SRST_DDRMSCH1 171 +#define SRST_CRYPTO 174 +#define SRST_C2C_HOST 175 + +#define SRST_LCDC1_AXI 176 +#define SRST_LCDC1_AHB 177 +#define SRST_LCDC1_DCLK 178 +#define SRST_UART0 179 +#define SRST_UART1 180 +#define SRST_UART2 181 +#define SRST_UART3 182 +#define SRST_UART4 183 +#define SRST_SIMC 186 +#define SRST_PS2C 187 +#define SRST_TSP 188 +#define SRST_TSP_CLKIN0 189 +#define SRST_TSP_CLKIN1 190 +#define SRST_TSP_27M 191 + +#endif diff --git a/include/dt-bindings/clock/rk3308-cru.h b/include/dt-bindings/clock/rk3308-cru.h new file mode 100644 index 0000000..d97840f --- /dev/null +++ b/include/dt-bindings/clock/rk3308-cru.h @@ -0,0 +1,387 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 Rockchip Electronics Co. Ltd. + * Author: Finley Xiao + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3308_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3308_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_VPLL0 3 +#define PLL_VPLL1 4 +#define ARMCLK 5 + +/* sclk (special clocks) */ +#define USB480M 14 +#define SCLK_RTC32K 15 +#define SCLK_PVTM_CORE 16 +#define SCLK_UART0 17 +#define SCLK_UART1 18 +#define SCLK_UART2 19 +#define SCLK_UART3 20 +#define SCLK_UART4 21 +#define SCLK_I2C0 22 +#define SCLK_I2C1 23 +#define SCLK_I2C2 24 +#define SCLK_I2C3 25 +#define SCLK_PWM0 26 +#define SCLK_SPI0 27 +#define SCLK_SPI1 28 +#define SCLK_SPI2 29 +#define SCLK_TIMER0 30 +#define SCLK_TIMER1 31 +#define SCLK_TIMER2 32 +#define SCLK_TIMER3 33 +#define SCLK_TIMER4 34 +#define SCLK_TIMER5 35 +#define SCLK_TSADC 36 +#define SCLK_SARADC 37 +#define SCLK_OTP 38 +#define SCLK_OTP_USR 39 +#define SCLK_CPU_BOOST 40 +#define SCLK_CRYPTO 41 +#define SCLK_CRYPTO_APK 42 +#define SCLK_NANDC_DIV 43 +#define SCLK_NANDC_DIV50 44 +#define SCLK_NANDC 45 +#define SCLK_SDMMC_DIV 46 +#define SCLK_SDMMC_DIV50 47 +#define SCLK_SDMMC 48 +#define SCLK_SDMMC_DRV 49 +#define SCLK_SDMMC_SAMPLE 50 +#define SCLK_SDIO_DIV 51 +#define SCLK_SDIO_DIV50 52 +#define SCLK_SDIO 53 +#define SCLK_SDIO_DRV 54 +#define SCLK_SDIO_SAMPLE 55 +#define SCLK_EMMC_DIV 56 +#define SCLK_EMMC_DIV50 57 +#define SCLK_EMMC 58 +#define SCLK_EMMC_DRV 59 +#define SCLK_EMMC_SAMPLE 60 +#define SCLK_SFC 61 +#define SCLK_OTG_ADP 62 +#define SCLK_MAC_SRC 63 +#define SCLK_MAC 64 +#define SCLK_MAC_REF 65 +#define SCLK_MAC_RX_TX 66 +#define SCLK_MAC_RMII 67 +#define SCLK_DDR_MON_TIMER 68 +#define SCLK_DDR_MON 69 +#define SCLK_DDRCLK 70 +#define SCLK_PMU 71 +#define SCLK_USBPHY_REF 72 +#define SCLK_WIFI 73 +#define SCLK_PVTM_PMU 74 +#define SCLK_PDM 75 +#define SCLK_I2S0_8CH_TX 76 +#define SCLK_I2S0_8CH_TX_OUT 77 +#define SCLK_I2S0_8CH_RX 78 +#define SCLK_I2S0_8CH_RX_OUT 79 +#define SCLK_I2S1_8CH_TX 80 +#define SCLK_I2S1_8CH_TX_OUT 81 +#define SCLK_I2S1_8CH_RX 82 +#define SCLK_I2S1_8CH_RX_OUT 83 +#define SCLK_I2S2_8CH_TX 84 +#define SCLK_I2S2_8CH_TX_OUT 85 +#define SCLK_I2S2_8CH_RX 86 +#define SCLK_I2S2_8CH_RX_OUT 87 +#define SCLK_I2S3_8CH_TX 88 +#define SCLK_I2S3_8CH_TX_OUT 89 +#define SCLK_I2S3_8CH_RX 90 +#define SCLK_I2S3_8CH_RX_OUT 91 +#define SCLK_I2S0_2CH 92 +#define SCLK_I2S0_2CH_OUT 93 +#define SCLK_I2S1_2CH 94 +#define SCLK_I2S1_2CH_OUT 95 +#define SCLK_SPDIF_TX_DIV 96 +#define SCLK_SPDIF_TX_DIV50 97 +#define SCLK_SPDIF_TX 98 +#define SCLK_SPDIF_RX_DIV 99 +#define SCLK_SPDIF_RX_DIV50 100 +#define SCLK_SPDIF_RX 101 +#define SCLK_I2S0_8CH_TX_MUX 102 +#define SCLK_I2S0_8CH_RX_MUX 103 +#define SCLK_I2S1_8CH_TX_MUX 104 +#define SCLK_I2S1_8CH_RX_MUX 105 +#define SCLK_I2S2_8CH_TX_MUX 106 +#define SCLK_I2S2_8CH_RX_MUX 107 +#define SCLK_I2S3_8CH_TX_MUX 108 +#define SCLK_I2S3_8CH_RX_MUX 109 +#define SCLK_I2S0_8CH_TX_SRC 110 +#define SCLK_I2S0_8CH_RX_SRC 111 +#define SCLK_I2S1_8CH_TX_SRC 112 +#define SCLK_I2S1_8CH_RX_SRC 113 +#define SCLK_I2S2_8CH_TX_SRC 114 +#define SCLK_I2S2_8CH_RX_SRC 115 +#define SCLK_I2S3_8CH_TX_SRC 116 +#define SCLK_I2S3_8CH_RX_SRC 117 +#define SCLK_I2S0_2CH_SRC 118 +#define SCLK_I2S1_2CH_SRC 119 +#define SCLK_PWM1 120 +#define SCLK_PWM2 121 +#define SCLK_OWIRE 122 + +/* dclk */ +#define DCLK_VOP 125 + +/* aclk */ +#define ACLK_BUS_SRC 130 +#define ACLK_BUS 131 +#define ACLK_PERI_SRC 132 +#define ACLK_PERI 133 +#define ACLK_MAC 134 +#define ACLK_CRYPTO 135 +#define ACLK_VOP 136 +#define ACLK_GIC 137 +#define ACLK_DMAC0 138 +#define ACLK_DMAC1 139 + +/* hclk */ +#define HCLK_BUS 150 +#define HCLK_PERI 151 +#define HCLK_AUDIO 152 +#define HCLK_NANDC 153 +#define HCLK_SDMMC 154 +#define HCLK_SDIO 155 +#define HCLK_EMMC 156 +#define HCLK_SFC 157 +#define HCLK_OTG 158 +#define HCLK_HOST 159 +#define HCLK_HOST_ARB 160 +#define HCLK_PDM 161 +#define HCLK_SPDIFTX 162 +#define HCLK_SPDIFRX 163 +#define HCLK_I2S0_8CH 164 +#define HCLK_I2S1_8CH 165 +#define HCLK_I2S2_8CH 166 +#define HCLK_I2S3_8CH 167 +#define HCLK_I2S0_2CH 168 +#define HCLK_I2S1_2CH 169 +#define HCLK_VAD 170 +#define HCLK_CRYPTO 171 +#define HCLK_VOP 172 + +/* pclk */ +#define PCLK_BUS 190 +#define PCLK_DDR 191 +#define PCLK_PERI 192 +#define PCLK_PMU 193 +#define PCLK_AUDIO 194 +#define PCLK_MAC 195 +#define PCLK_ACODEC 196 +#define PCLK_UART0 197 +#define PCLK_UART1 198 +#define PCLK_UART2 199 +#define PCLK_UART3 200 +#define PCLK_UART4 201 +#define PCLK_I2C0 202 +#define PCLK_I2C1 203 +#define PCLK_I2C2 204 +#define PCLK_I2C3 205 +#define PCLK_PWM0 206 +#define PCLK_SPI0 207 +#define PCLK_SPI1 208 +#define PCLK_SPI2 209 +#define PCLK_SARADC 210 +#define PCLK_TSADC 211 +#define PCLK_TIMER 212 +#define PCLK_OTP_NS 213 +#define PCLK_WDT 214 +#define PCLK_GPIO0 215 +#define PCLK_GPIO1 216 +#define PCLK_GPIO2 217 +#define PCLK_GPIO3 218 +#define PCLK_GPIO4 219 +#define PCLK_SGRF 220 +#define PCLK_GRF 221 +#define PCLK_USBSD_DET 222 +#define PCLK_DDR_UPCTL 223 +#define PCLK_DDR_MON 224 +#define PCLK_DDRPHY 225 +#define PCLK_DDR_STDBY 226 +#define PCLK_USB_GRF 227 +#define PCLK_CRU 228 +#define PCLK_OTP_PHY 229 +#define PCLK_CPU_BOOST 230 +#define PCLK_PWM1 231 +#define PCLK_PWM2 232 +#define PCLK_CAN 233 +#define PCLK_OWIRE 234 + +#define CLK_NR_CLKS (PCLK_OWIRE + 1) + +/* soft-reset indices */ + +/* cru_softrst_con0 */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_CORE_NOC 13 +#define SRST_STRC_A 14 +#define SRST_L2C 15 + +/* cru_softrst_con1 */ +#define SRST_DAP 16 +#define SRST_CORE_PVTM 17 +#define SRST_CORE_PRF 18 +#define SRST_CORE_GRF 19 +#define SRST_DDRUPCTL 20 +#define SRST_DDRUPCTL_P 22 +#define SRST_MSCH 23 +#define SRST_DDRMON_P 25 +#define SRST_DDRSTDBY_P 26 +#define SRST_DDRSTDBY 27 +#define SRST_DDRPHY 28 +#define SRST_DDRPHY_DIV 29 +#define SRST_DDRPHY_P 30 + +/* cru_softrst_con2 */ +#define SRST_BUS_NIU_H 32 +#define SRST_USB_NIU_P 33 +#define SRST_CRYPTO_A 34 +#define SRST_CRYPTO_H 35 +#define SRST_CRYPTO 36 +#define SRST_CRYPTO_APK 37 +#define SRST_VOP_A 38 +#define SRST_VOP_H 39 +#define SRST_VOP_D 40 +#define SRST_INTMEM_A 41 +#define SRST_ROM_H 42 +#define SRST_GIC_A 43 +#define SRST_UART0_P 44 +#define SRST_UART0 45 +#define SRST_UART1_P 46 +#define SRST_UART1 47 + +/* cru_softrst_con3 */ +#define SRST_UART2_P 48 +#define SRST_UART2 49 +#define SRST_UART3_P 50 +#define SRST_UART3 51 +#define SRST_UART4_P 52 +#define SRST_UART4 53 +#define SRST_I2C0_P 54 +#define SRST_I2C0 55 +#define SRST_I2C1_P 56 +#define SRST_I2C1 57 +#define SRST_I2C2_P 58 +#define SRST_I2C2 59 +#define SRST_I2C3_P 60 +#define SRST_I2C3 61 +#define SRST_PWM0_P 62 +#define SRST_PWM0 63 + +/* cru_softrst_con4 */ +#define SRST_SPI0_P 64 +#define SRST_SPI0 65 +#define SRST_SPI1_P 66 +#define SRST_SPI1 67 +#define SRST_SPI2_P 68 +#define SRST_SPI2 69 +#define SRST_SARADC_P 70 +#define SRST_TSADC_P 71 +#define SRST_TSADC 72 +#define SRST_TIMER0_P 73 +#define SRST_TIMER0 74 +#define SRST_TIMER1 75 +#define SRST_TIMER2 76 +#define SRST_TIMER3 77 +#define SRST_TIMER4 78 +#define SRST_TIMER5 79 + +/* cru_softrst_con5 */ +#define SRST_OTP_NS_P 80 +#define SRST_OTP_NS_SBPI 81 +#define SRST_OTP_NS_USR 82 +#define SRST_OTP_PHY_P 83 +#define SRST_OTP_PHY 84 +#define SRST_GPIO0_P 86 +#define SRST_GPIO1_P 87 +#define SRST_GPIO2_P 88 +#define SRST_GPIO3_P 89 +#define SRST_GPIO4_P 90 +#define SRST_GRF_P 91 +#define SRST_USBSD_DET_P 92 +#define SRST_PMU 93 +#define SRST_PMU_PVTM 94 +#define SRST_USB_GRF_P 95 + +/* cru_softrst_con6 */ +#define SRST_CPU_BOOST 96 +#define SRST_CPU_BOOST_P 97 +#define SRST_PWM1_P 98 +#define SRST_PWM1 99 +#define SRST_PWM2_P 100 +#define SRST_PWM2 101 +#define SRST_PERI_NIU_A 104 +#define SRST_PERI_NIU_H 105 +#define SRST_PERI_NIU_p 106 +#define SRST_USB2OTG_H 107 +#define SRST_USB2OTG 108 +#define SRST_USB2OTG_ADP 109 +#define SRST_USB2HOST_H 110 +#define SRST_USB2HOST_ARB_H 111 + +/* cru_softrst_con7 */ +#define SRST_USB2HOST_AUX_H 112 +#define SRST_USB2HOST_EHCI 113 +#define SRST_USB2HOST 114 +#define SRST_USBPHYPOR 115 +#define SRST_UTMI0 116 +#define SRST_UTMI1 117 +#define SRST_SDIO_H 118 +#define SRST_EMMC_H 119 +#define SRST_SFC_H 120 +#define SRST_SFC 121 +#define SRST_SD_H 122 +#define SRST_NANDC_H 123 +#define SRST_NANDC_N 124 +#define SRST_MAC_A 125 +#define SRST_CAN_P 126 +#define SRST_OWIRE_P 127 + +/* cru_softrst_con8 */ +#define SRST_AUDIO_NIU_H 128 +#define SRST_AUDIO_NIU_P 129 +#define SRST_PDM_H 130 +#define SRST_PDM_M 131 +#define SRST_SPDIFTX_H 132 +#define SRST_SPDIFTX_M 133 +#define SRST_SPDIFRX_H 134 +#define SRST_SPDIFRX_M 135 +#define SRST_I2S0_8CH_H 136 +#define SRST_I2S0_8CH_TX_M 137 +#define SRST_I2S0_8CH_RX_M 138 +#define SRST_I2S1_8CH_H 139 +#define SRST_I2S1_8CH_TX_M 140 +#define SRST_I2S1_8CH_RX_M 141 +#define SRST_I2S2_8CH_H 142 +#define SRST_I2S2_8CH_TX_M 143 + +/* cru_softrst_con9 */ +#define SRST_I2S2_8CH_RX_M 144 +#define SRST_I2S3_8CH_H 145 +#define SRST_I2S3_8CH_TX_M 146 +#define SRST_I2S3_8CH_RX_M 147 +#define SRST_I2S0_2CH_H 148 +#define SRST_I2S0_2CH_M 149 +#define SRST_I2S1_2CH_H 150 +#define SRST_I2S1_2CH_M 151 +#define SRST_VAD_H 152 +#define SRST_ACODEC_P 153 + +#endif diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h new file mode 100644 index 0000000..555b4ff --- /dev/null +++ b/include/dt-bindings/clock/rk3328-cru.h @@ -0,0 +1,393 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Elaine + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3328_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3328_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define PLL_NPLL 5 +#define ARMCLK 6 + +/* sclk gates (special clocks) */ +#define SCLK_RTC32K 30 +#define SCLK_SDMMC_EXT 31 +#define SCLK_SPI 32 +#define SCLK_SDMMC 33 +#define SCLK_SDIO 34 +#define SCLK_EMMC 35 +#define SCLK_TSADC 36 +#define SCLK_SARADC 37 +#define SCLK_UART0 38 +#define SCLK_UART1 39 +#define SCLK_UART2 40 +#define SCLK_I2S0 41 +#define SCLK_I2S1 42 +#define SCLK_I2S2 43 +#define SCLK_I2S1_OUT 44 +#define SCLK_I2S2_OUT 45 +#define SCLK_SPDIF 46 +#define SCLK_TIMER0 47 +#define SCLK_TIMER1 48 +#define SCLK_TIMER2 49 +#define SCLK_TIMER3 50 +#define SCLK_TIMER4 51 +#define SCLK_TIMER5 52 +#define SCLK_WIFI 53 +#define SCLK_CIF_OUT 54 +#define SCLK_I2C0 55 +#define SCLK_I2C1 56 +#define SCLK_I2C2 57 +#define SCLK_I2C3 58 +#define SCLK_CRYPTO 59 +#define SCLK_PWM 60 +#define SCLK_PDM 61 +#define SCLK_EFUSE 62 +#define SCLK_OTP 63 +#define SCLK_DDRCLK 64 +#define SCLK_VDEC_CABAC 65 +#define SCLK_VDEC_CORE 66 +#define SCLK_VENC_DSP 67 +#define SCLK_VENC_CORE 68 +#define SCLK_RGA 69 +#define SCLK_HDMI_SFC 70 +#define SCLK_HDMI_CEC 71 +#define SCLK_USB3_REF 72 +#define SCLK_USB3_SUSPEND 73 +#define SCLK_SDMMC_DRV 74 +#define SCLK_SDIO_DRV 75 +#define SCLK_EMMC_DRV 76 +#define SCLK_SDMMC_EXT_DRV 77 +#define SCLK_SDMMC_SAMPLE 78 +#define SCLK_SDIO_SAMPLE 79 +#define SCLK_EMMC_SAMPLE 80 +#define SCLK_SDMMC_EXT_SAMPLE 81 +#define SCLK_VOP 82 +#define SCLK_MAC2PHY_RXTX 83 +#define SCLK_MAC2PHY_SRC 84 +#define SCLK_MAC2PHY_REF 85 +#define SCLK_MAC2PHY_OUT 86 +#define SCLK_MAC2IO_RX 87 +#define SCLK_MAC2IO_TX 88 +#define SCLK_MAC2IO_REFOUT 89 +#define SCLK_MAC2IO_REF 90 +#define SCLK_MAC2IO_OUT 91 +#define SCLK_TSP 92 +#define SCLK_HSADC_TSP 93 +#define SCLK_USB3PHY_REF 94 +#define SCLK_REF_USB3OTG 95 +#define SCLK_USB3OTG_REF 96 +#define SCLK_USB3OTG_SUSPEND 97 +#define SCLK_REF_USB3OTG_SRC 98 +#define SCLK_MAC2IO_SRC 99 +#define SCLK_MAC2IO 100 +#define SCLK_MAC2PHY 101 +#define SCLK_MAC2IO_EXT 102 + +/* dclk gates */ +#define DCLK_LCDC 120 +#define DCLK_HDMIPHY 121 +#define HDMIPHY 122 +#define USB480M 123 +#define DCLK_LCDC_SRC 124 + +/* aclk gates */ +#define ACLK_AXISRAM 130 +#define ACLK_VOP_PRE 131 +#define ACLK_USB3OTG 132 +#define ACLK_RGA_PRE 133 +#define ACLK_DMAC 134 +#define ACLK_GPU 135 +#define ACLK_BUS_PRE 136 +#define ACLK_PERI_PRE 137 +#define ACLK_RKVDEC_PRE 138 +#define ACLK_RKVDEC 139 +#define ACLK_RKVENC 140 +#define ACLK_VPU_PRE 141 +#define ACLK_VIO_PRE 142 +#define ACLK_VPU 143 +#define ACLK_VIO 144 +#define ACLK_VOP 145 +#define ACLK_GMAC 146 +#define ACLK_H265 147 +#define ACLK_H264 148 +#define ACLK_MAC2PHY 149 +#define ACLK_MAC2IO 150 +#define ACLK_DCF 151 +#define ACLK_TSP 152 +#define ACLK_PERI 153 +#define ACLK_RGA 154 +#define ACLK_IEP 155 +#define ACLK_CIF 156 +#define ACLK_HDCP 157 + +/* pclk gates */ +#define PCLK_GPIO0 200 +#define PCLK_GPIO1 201 +#define PCLK_GPIO2 202 +#define PCLK_GPIO3 203 +#define PCLK_GRF 204 +#define PCLK_I2C0 205 +#define PCLK_I2C1 206 +#define PCLK_I2C2 207 +#define PCLK_I2C3 208 +#define PCLK_SPI 209 +#define PCLK_UART0 210 +#define PCLK_UART1 211 +#define PCLK_UART2 212 +#define PCLK_TSADC 213 +#define PCLK_PWM 214 +#define PCLK_TIMER 215 +#define PCLK_BUS_PRE 216 +#define PCLK_PERI_PRE 217 +#define PCLK_HDMI_CTRL 218 +#define PCLK_HDMI_PHY 219 +#define PCLK_GMAC 220 +#define PCLK_H265 221 +#define PCLK_MAC2PHY 222 +#define PCLK_MAC2IO 223 +#define PCLK_USB3PHY_OTG 224 +#define PCLK_USB3PHY_PIPE 225 +#define PCLK_USB3_GRF 226 +#define PCLK_USB2_GRF 227 +#define PCLK_HDMIPHY 228 +#define PCLK_DDR 229 +#define PCLK_PERI 230 +#define PCLK_HDMI 231 +#define PCLK_HDCP 232 +#define PCLK_DCF 233 +#define PCLK_SARADC 234 +#define PCLK_ACODECPHY 235 +#define PCLK_WDT 236 + +/* hclk gates */ +#define HCLK_PERI 308 +#define HCLK_TSP 309 +#define HCLK_GMAC 310 +#define HCLK_I2S0_8CH 311 +#define HCLK_I2S1_8CH 312 +#define HCLK_I2S2_2CH 313 +#define HCLK_SPDIF_8CH 314 +#define HCLK_VOP 315 +#define HCLK_NANDC 316 +#define HCLK_SDMMC 317 +#define HCLK_SDIO 318 +#define HCLK_EMMC 319 +#define HCLK_SDMMC_EXT 320 +#define HCLK_RKVDEC_PRE 321 +#define HCLK_RKVDEC 322 +#define HCLK_RKVENC 323 +#define HCLK_VPU_PRE 324 +#define HCLK_VIO_PRE 325 +#define HCLK_VPU 326 +#define HCLK_BUS_PRE 328 +#define HCLK_PERI_PRE 329 +#define HCLK_H264 330 +#define HCLK_CIF 331 +#define HCLK_OTG_PMU 332 +#define HCLK_OTG 333 +#define HCLK_HOST0 334 +#define HCLK_HOST0_ARB 335 +#define HCLK_CRYPTO_MST 336 +#define HCLK_CRYPTO_SLV 337 +#define HCLK_PDM 338 +#define HCLK_IEP 339 +#define HCLK_RGA 340 +#define HCLK_HDCP 341 + +#define CLK_NR_CLKS (HCLK_HDCP + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_CORE_NIU 13 +#define SRST_STRC_A 14 +#define SRST_L2C 15 + +#define SRST_A53_GIC 18 +#define SRST_DAP 19 +#define SRST_PMU_P 21 +#define SRST_EFUSE 22 +#define SRST_BUSSYS_H 23 +#define SRST_BUSSYS_P 24 +#define SRST_SPDIF 25 +#define SRST_INTMEM 26 +#define SRST_ROM 27 +#define SRST_GPIO0 28 +#define SRST_GPIO1 29 +#define SRST_GPIO2 30 +#define SRST_GPIO3 31 + +#define SRST_I2S0 32 +#define SRST_I2S1 33 +#define SRST_I2S2 34 +#define SRST_I2S0_H 35 +#define SRST_I2S1_H 36 +#define SRST_I2S2_H 37 +#define SRST_UART0 38 +#define SRST_UART1 39 +#define SRST_UART2 40 +#define SRST_UART0_P 41 +#define SRST_UART1_P 42 +#define SRST_UART2_P 43 +#define SRST_I2C0 44 +#define SRST_I2C1 45 +#define SRST_I2C2 46 +#define SRST_I2C3 47 + +#define SRST_I2C0_P 48 +#define SRST_I2C1_P 49 +#define SRST_I2C2_P 50 +#define SRST_I2C3_P 51 +#define SRST_EFUSE_SE_P 52 +#define SRST_EFUSE_NS_P 53 +#define SRST_PWM0 54 +#define SRST_PWM0_P 55 +#define SRST_DMA 56 +#define SRST_TSP_A 57 +#define SRST_TSP_H 58 +#define SRST_TSP 59 +#define SRST_TSP_HSADC 60 +#define SRST_DCF_A 61 +#define SRST_DCF_P 62 + +#define SRST_SCR 64 +#define SRST_SPI 65 +#define SRST_TSADC 66 +#define SRST_TSADC_P 67 +#define SRST_CRYPTO 68 +#define SRST_SGRF 69 +#define SRST_GRF 70 +#define SRST_USB_GRF 71 +#define SRST_TIMER_6CH_P 72 +#define SRST_TIMER0 73 +#define SRST_TIMER1 74 +#define SRST_TIMER2 75 +#define SRST_TIMER3 76 +#define SRST_TIMER4 77 +#define SRST_TIMER5 78 +#define SRST_USB3GRF 79 + +#define SRST_PHYNIU 80 +#define SRST_HDMIPHY 81 +#define SRST_VDAC 82 +#define SRST_ACODEC_p 83 +#define SRST_SARADC 85 +#define SRST_SARADC_P 86 +#define SRST_GRF_DDR 87 +#define SRST_DFIMON 88 +#define SRST_MSCH 89 +#define SRST_DDRMSCH 91 +#define SRST_DDRCTRL 92 +#define SRST_DDRCTRL_P 93 +#define SRST_DDRPHY 94 +#define SRST_DDRPHY_P 95 + +#define SRST_GMAC_NIU_A 96 +#define SRST_GMAC_NIU_P 97 +#define SRST_GMAC2PHY_A 98 +#define SRST_GMAC2IO_A 99 +#define SRST_MACPHY 100 +#define SRST_OTP_PHY 101 +#define SRST_GPU_A 102 +#define SRST_GPU_NIU_A 103 +#define SRST_SDMMCEXT 104 +#define SRST_PERIPH_NIU_A 105 +#define SRST_PERIHP_NIU_H 106 +#define SRST_PERIHP_P 107 +#define SRST_PERIPHSYS_H 108 +#define SRST_MMC0 109 +#define SRST_SDIO 110 +#define SRST_EMMC 111 + +#define SRST_USB2OTG_H 112 +#define SRST_USB2OTG 113 +#define SRST_USB2OTG_ADP 114 +#define SRST_USB2HOST_H 115 +#define SRST_USB2HOST_ARB 116 +#define SRST_USB2HOST_AUX 117 +#define SRST_USB2HOST_EHCIPHY 118 +#define SRST_USB2HOST_UTMI 119 +#define SRST_USB3OTG 120 +#define SRST_USBPOR 121 +#define SRST_USB2OTG_UTMI 122 +#define SRST_USB2HOST_PHY_UTMI 123 +#define SRST_USB3OTG_UTMI 124 +#define SRST_USB3PHY_U2 125 +#define SRST_USB3PHY_U3 126 +#define SRST_USB3PHY_PIPE 127 + +#define SRST_VIO_A 128 +#define SRST_VIO_BUS_H 129 +#define SRST_VIO_H2P_H 130 +#define SRST_VIO_ARBI_H 131 +#define SRST_VOP_NIU_A 132 +#define SRST_VOP_A 133 +#define SRST_VOP_H 134 +#define SRST_VOP_D 135 +#define SRST_RGA 136 +#define SRST_RGA_NIU_A 137 +#define SRST_RGA_A 138 +#define SRST_RGA_H 139 +#define SRST_IEP_A 140 +#define SRST_IEP_H 141 +#define SRST_HDMI 142 +#define SRST_HDMI_P 143 + +#define SRST_HDCP_A 144 +#define SRST_HDCP 145 +#define SRST_HDCP_H 146 +#define SRST_CIF_A 147 +#define SRST_CIF_H 148 +#define SRST_CIF_P 149 +#define SRST_OTP_P 150 +#define SRST_OTP_SBPI 151 +#define SRST_OTP_USER 152 +#define SRST_DDRCTRL_A 153 +#define SRST_DDRSTDY_P 154 +#define SRST_DDRSTDY 155 +#define SRST_PDM_H 156 +#define SRST_PDM 157 +#define SRST_USB3PHY_OTG_P 158 +#define SRST_USB3PHY_PIPE_P 159 + +#define SRST_VCODEC_A 160 +#define SRST_VCODEC_NIU_A 161 +#define SRST_VCODEC_H 162 +#define SRST_VCODEC_NIU_H 163 +#define SRST_VDEC_A 164 +#define SRST_VDEC_NIU_A 165 +#define SRST_VDEC_H 166 +#define SRST_VDEC_NIU_H 167 +#define SRST_VDEC_CORE 168 +#define SRST_VDEC_CABAC 169 +#define SRST_DDRPHYDIV 175 + +#define SRST_RKVENC_NIU_A 176 +#define SRST_RKVENC_NIU_H 177 +#define SRST_RKVENC_H265_A 178 +#define SRST_RKVENC_H265_P 179 +#define SRST_RKVENC_H265_CORE 180 +#define SRST_RKVENC_H265_DSP 181 +#define SRST_RKVENC_H264_A 182 +#define SRST_RKVENC_H264_H 183 +#define SRST_RKVENC_INTMEM 184 + +#endif diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h new file mode 100644 index 0000000..0a06c5f --- /dev/null +++ b/include/dt-bindings/clock/rk3368-cru.h @@ -0,0 +1,381 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2015 Heiko Stuebner + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3368_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3368_H + +/* core clocks */ +#define PLL_APLLB 1 +#define PLL_APLLL 2 +#define PLL_DPLL 3 +#define PLL_CPLL 4 +#define PLL_GPLL 5 +#define PLL_NPLL 6 +#define ARMCLKB 7 +#define ARMCLKL 8 + +/* sclk gates (special clocks) */ +#define SCLK_GPU_CORE 64 +#define SCLK_SPI0 65 +#define SCLK_SPI1 66 +#define SCLK_SPI2 67 +#define SCLK_SDMMC 68 +#define SCLK_SDIO0 69 +#define SCLK_EMMC 71 +#define SCLK_TSADC 72 +#define SCLK_SARADC 73 +#define SCLK_NANDC0 75 +#define SCLK_UART0 77 +#define SCLK_UART1 78 +#define SCLK_UART2 79 +#define SCLK_UART3 80 +#define SCLK_UART4 81 +#define SCLK_I2S_8CH 82 +#define SCLK_SPDIF_8CH 83 +#define SCLK_I2S_2CH 84 +#define SCLK_TIMER00 85 +#define SCLK_TIMER01 86 +#define SCLK_TIMER02 87 +#define SCLK_TIMER03 88 +#define SCLK_TIMER04 89 +#define SCLK_TIMER05 90 +#define SCLK_OTGPHY0 93 +#define SCLK_OTG_ADP 96 +#define SCLK_HSICPHY480M 97 +#define SCLK_HSICPHY12M 98 +#define SCLK_MACREF 99 +#define SCLK_VOP0_PWM 100 +#define SCLK_MAC_RX 102 +#define SCLK_MAC_TX 103 +#define SCLK_EDP_24M 104 +#define SCLK_EDP 105 +#define SCLK_RGA 106 +#define SCLK_ISP 107 +#define SCLK_HDCP 108 +#define SCLK_HDMI_HDCP 109 +#define SCLK_HDMI_CEC 110 +#define SCLK_HEVC_CABAC 111 +#define SCLK_HEVC_CORE 112 +#define SCLK_I2S_8CH_OUT 113 +#define SCLK_SDMMC_DRV 114 +#define SCLK_SDIO0_DRV 115 +#define SCLK_EMMC_DRV 117 +#define SCLK_SDMMC_SAMPLE 118 +#define SCLK_SDIO0_SAMPLE 119 +#define SCLK_EMMC_SAMPLE 121 +#define SCLK_USBPHY480M 122 +#define SCLK_PVTM_CORE 123 +#define SCLK_PVTM_GPU 124 +#define SCLK_PVTM_PMU 125 +#define SCLK_SFC 126 +#define SCLK_MAC 127 +#define SCLK_MACREF_OUT 128 +#define SCLK_TIMER10 133 +#define SCLK_TIMER11 134 +#define SCLK_TIMER12 135 +#define SCLK_TIMER13 136 +#define SCLK_TIMER14 137 +#define SCLK_TIMER15 138 + +#define DCLK_VOP 190 +#define MCLK_CRYPTO 191 + +/* aclk gates */ +#define ACLK_GPU_MEM 192 +#define ACLK_GPU_CFG 193 +#define ACLK_DMAC_BUS 194 +#define ACLK_DMAC_PERI 195 +#define ACLK_PERI_MMU 196 +#define ACLK_GMAC 197 +#define ACLK_VOP 198 +#define ACLK_VOP_IEP 199 +#define ACLK_RGA 200 +#define ACLK_HDCP 201 +#define ACLK_IEP 202 +#define ACLK_VIO0_NOC 203 +#define ACLK_VIP 204 +#define ACLK_ISP 205 +#define ACLK_VIO1_NOC 206 +#define ACLK_VIDEO 208 +#define ACLK_BUS 209 +#define ACLK_PERI 210 + +/* pclk gates */ +#define PCLK_GPIO0 320 +#define PCLK_GPIO1 321 +#define PCLK_GPIO2 322 +#define PCLK_GPIO3 323 +#define PCLK_PMUGRF 324 +#define PCLK_MAILBOX 325 +#define PCLK_GRF 329 +#define PCLK_SGRF 330 +#define PCLK_PMU 331 +#define PCLK_I2C0 332 +#define PCLK_I2C1 333 +#define PCLK_I2C2 334 +#define PCLK_I2C3 335 +#define PCLK_I2C4 336 +#define PCLK_I2C5 337 +#define PCLK_SPI0 338 +#define PCLK_SPI1 339 +#define PCLK_SPI2 340 +#define PCLK_UART0 341 +#define PCLK_UART1 342 +#define PCLK_UART2 343 +#define PCLK_UART3 344 +#define PCLK_UART4 345 +#define PCLK_TSADC 346 +#define PCLK_SARADC 347 +#define PCLK_SIM 348 +#define PCLK_GMAC 349 +#define PCLK_PWM0 350 +#define PCLK_PWM1 351 +#define PCLK_TIMER0 353 +#define PCLK_TIMER1 354 +#define PCLK_EDP_CTRL 355 +#define PCLK_MIPI_DSI0 356 +#define PCLK_MIPI_CSI 358 +#define PCLK_HDCP 359 +#define PCLK_HDMI_CTRL 360 +#define PCLK_VIO_H2P 361 +#define PCLK_BUS 362 +#define PCLK_PERI 363 +#define PCLK_DDRUPCTL 364 +#define PCLK_DDRPHY 365 +#define PCLK_ISP 366 +#define PCLK_VIP 367 +#define PCLK_WDT 368 +#define PCLK_EFUSE256 369 + +/* hclk gates */ +#define HCLK_SFC 448 +#define HCLK_OTG0 449 +#define HCLK_HOST0 450 +#define HCLK_HOST1 451 +#define HCLK_HSIC 452 +#define HCLK_NANDC0 453 +#define HCLK_TSP 455 +#define HCLK_SDMMC 456 +#define HCLK_SDIO0 457 +#define HCLK_EMMC 459 +#define HCLK_HSADC 460 +#define HCLK_CRYPTO 461 +#define HCLK_I2S_2CH 462 +#define HCLK_I2S_8CH 463 +#define HCLK_SPDIF 464 +#define HCLK_VOP 465 +#define HCLK_ROM 467 +#define HCLK_IEP 468 +#define HCLK_ISP 469 +#define HCLK_RGA 470 +#define HCLK_VIO_AHB_ARBI 471 +#define HCLK_VIO_NOC 472 +#define HCLK_VIP 473 +#define HCLK_VIO_H2P 474 +#define HCLK_VIO_HDCPMMU 475 +#define HCLK_VIDEO 476 +#define HCLK_BUS 477 +#define HCLK_PERI 478 + +#define CLK_NR_CLKS (HCLK_PERI + 1) + +/* soft-reset indices */ +#define SRST_CORE_B0 0 +#define SRST_CORE_B1 1 +#define SRST_CORE_B2 2 +#define SRST_CORE_B3 3 +#define SRST_CORE_B0_PO 4 +#define SRST_CORE_B1_PO 5 +#define SRST_CORE_B2_PO 6 +#define SRST_CORE_B3_PO 7 +#define SRST_L2_B 8 +#define SRST_ADB_B 9 +#define SRST_PD_CORE_B_NIU 10 +#define SRST_PDBUS_STRSYS 11 +#define SRST_SOCDBG_B 14 +#define SRST_CORE_B_DBG 15 + +#define SRST_DMAC1 18 +#define SRST_INTMEM 19 +#define SRST_ROM 20 +#define SRST_SPDIF8CH 21 +#define SRST_I2S8CH 23 +#define SRST_MAILBOX 24 +#define SRST_I2S2CH 25 +#define SRST_EFUSE_256 26 +#define SRST_MCU_SYS 28 +#define SRST_MCU_PO 29 +#define SRST_MCU_NOC 30 +#define SRST_EFUSE 31 + +#define SRST_GPIO0 32 +#define SRST_GPIO1 33 +#define SRST_GPIO2 34 +#define SRST_GPIO3 35 +#define SRST_GPIO4 36 +#define SRST_PMUGRF 41 +#define SRST_I2C0 42 +#define SRST_I2C1 43 +#define SRST_I2C2 44 +#define SRST_I2C3 45 +#define SRST_I2C4 46 +#define SRST_I2C5 47 + +#define SRST_DWPWM 48 +#define SRST_MMC_PERI 49 +#define SRST_PERIPH_MMU 50 +#define SRST_GRF 55 +#define SRST_PMU 56 +#define SRST_PERIPH_AXI 57 +#define SRST_PERIPH_AHB 58 +#define SRST_PERIPH_APB 59 +#define SRST_PERIPH_NIU 60 +#define SRST_PDPERI_AHB_ARBI 61 +#define SRST_EMEM 62 +#define SRST_USB_PERI 63 + +#define SRST_DMAC2 64 +#define SRST_MAC 66 +#define SRST_GPS 67 +#define SRST_RKPWM 69 +#define SRST_USBHOST0 72 +#define SRST_HSIC 73 +#define SRST_HSIC_AUX 74 +#define SRST_HSIC_PHY 75 +#define SRST_HSADC 76 +#define SRST_NANDC0 77 +#define SRST_SFC 79 + +#define SRST_SPI0 83 +#define SRST_SPI1 84 +#define SRST_SPI2 85 +#define SRST_SARADC 87 +#define SRST_PDALIVE_NIU 88 +#define SRST_PDPMU_INTMEM 89 +#define SRST_PDPMU_NIU 90 +#define SRST_SGRF 91 + +#define SRST_VIO_ARBI 96 +#define SRST_RGA_NIU 97 +#define SRST_VIO0_NIU_AXI 98 +#define SRST_VIO_NIU_AHB 99 +#define SRST_LCDC0_AXI 100 +#define SRST_LCDC0_AHB 101 +#define SRST_LCDC0_DCLK 102 +#define SRST_VIP 104 +#define SRST_RGA_CORE 105 +#define SRST_IEP_AXI 106 +#define SRST_IEP_AHB 107 +#define SRST_RGA_AXI 108 +#define SRST_RGA_AHB 109 +#define SRST_ISP 110 +#define SRST_EDP_24M 111 + +#define SRST_VIDEO_AXI 112 +#define SRST_VIDEO_AHB 113 +#define SRST_MIPIDPHYTX 114 +#define SRST_MIPIDSI0 115 +#define SRST_MIPIDPHYRX 116 +#define SRST_MIPICSI 117 +#define SRST_GPU 120 +#define SRST_HDMI 121 +#define SRST_EDP 122 +#define SRST_PMU_PVTM 123 +#define SRST_CORE_PVTM 124 +#define SRST_GPU_PVTM 125 +#define SRST_GPU_SYS 126 +#define SRST_GPU_MEM_NIU 127 + +#define SRST_MMC0 128 +#define SRST_SDIO0 129 +#define SRST_EMMC 131 +#define SRST_USBOTG_AHB 132 +#define SRST_USBOTG_PHY 133 +#define SRST_USBOTG_CON 134 +#define SRST_USBHOST0_AHB 135 +#define SRST_USBHOST0_PHY 136 +#define SRST_USBHOST0_CON 137 +#define SRST_USBOTG_UTMI 138 +#define SRST_USBHOST1_UTMI 139 +#define SRST_USB_ADP 141 + +#define SRST_CORESIGHT 144 +#define SRST_PD_CORE_AHB_NOC 145 +#define SRST_PD_CORE_APB_NOC 146 +#define SRST_GIC 148 +#define SRST_LCDC_PWM0 149 +#define SRST_RGA_H2P_BRG 153 +#define SRST_VIDEO 154 +#define SRST_GPU_CFG_NIU 157 +#define SRST_TSADC 159 + +#define SRST_DDRPHY0 160 +#define SRST_DDRPHY0_APB 161 +#define SRST_DDRCTRL0 162 +#define SRST_DDRCTRL0_APB 163 +#define SRST_VIDEO_NIU 165 +#define SRST_VIDEO_NIU_AHB 167 +#define SRST_DDRMSCH0 170 +#define SRST_PDBUS_AHB 173 +#define SRST_CRYPTO 174 + +#define SRST_UART0 179 +#define SRST_UART1 180 +#define SRST_UART2 181 +#define SRST_UART3 182 +#define SRST_UART4 183 +#define SRST_SIMC 186 +#define SRST_TSP 188 +#define SRST_TSP_CLKIN0 189 + +#define SRST_CORE_L0 192 +#define SRST_CORE_L1 193 +#define SRST_CORE_L2 194 +#define SRST_CORE_L3 195 +#define SRST_CORE_L0_PO 195 +#define SRST_CORE_L1_PO 197 +#define SRST_CORE_L2_PO 198 +#define SRST_CORE_L3_PO 199 +#define SRST_L2_L 200 +#define SRST_ADB_L 201 +#define SRST_PD_CORE_L_NIU 202 +#define SRST_CCI_SYS 203 +#define SRST_CCI_DDR 204 +#define SRST_CCI 205 +#define SRST_SOCDBG_L 206 +#define SRST_CORE_L_DBG 207 + +#define SRST_CORE_B0_NC 208 +#define SRST_CORE_B0_PO_NC 209 +#define SRST_L2_B_NC 210 +#define SRST_ADB_B_NC 211 +#define SRST_PD_CORE_B_NIU_NC 212 +#define SRST_PDBUS_STRSYS_NC 213 +#define SRST_CORE_L0_NC 214 +#define SRST_CORE_L0_PO_NC 215 +#define SRST_L2_L_NC 216 +#define SRST_ADB_L_NC 217 +#define SRST_PD_CORE_L_NIU_NC 218 +#define SRST_CCI_SYS_NC 219 +#define SRST_CCI_DDR_NC 220 +#define SRST_CCI_NC 221 +#define SRST_TRACE_NC 222 + +#define SRST_TIMER00 224 +#define SRST_TIMER01 225 +#define SRST_TIMER02 226 +#define SRST_TIMER03 227 +#define SRST_TIMER04 228 +#define SRST_TIMER05 229 +#define SRST_TIMER10 230 +#define SRST_TIMER11 231 +#define SRST_TIMER12 232 +#define SRST_TIMER13 233 +#define SRST_TIMER14 234 +#define SRST_TIMER15 235 +#define SRST_TIMER0_APB 236 +#define SRST_TIMER1_APB 237 + +#endif diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h new file mode 100644 index 0000000..44e0a31 --- /dev/null +++ b/include/dt-bindings/clock/rk3399-cru.h @@ -0,0 +1,751 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Xing Zheng + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3399_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3399_H + +/* core clocks */ +#define PLL_APLLL 1 +#define PLL_APLLB 2 +#define PLL_DPLL 3 +#define PLL_CPLL 4 +#define PLL_GPLL 5 +#define PLL_NPLL 6 +#define PLL_VPLL 7 +#define ARMCLKL 8 +#define ARMCLKB 9 + +/* sclk gates (special clocks) */ +#define SCLK_I2C1 65 +#define SCLK_I2C2 66 +#define SCLK_I2C3 67 +#define SCLK_I2C5 68 +#define SCLK_I2C6 69 +#define SCLK_I2C7 70 +#define SCLK_SPI0 71 +#define SCLK_SPI1 72 +#define SCLK_SPI2 73 +#define SCLK_SPI4 74 +#define SCLK_SPI5 75 +#define SCLK_SDMMC 76 +#define SCLK_SDIO 77 +#define SCLK_EMMC 78 +#define SCLK_TSADC 79 +#define SCLK_SARADC 80 +#define SCLK_UART0 81 +#define SCLK_UART1 82 +#define SCLK_UART2 83 +#define SCLK_UART3 84 +#define SCLK_SPDIF_8CH 85 +#define SCLK_I2S0_8CH 86 +#define SCLK_I2S1_8CH 87 +#define SCLK_I2S2_8CH 88 +#define SCLK_I2S_8CH_OUT 89 +#define SCLK_TIMER00 90 +#define SCLK_TIMER01 91 +#define SCLK_TIMER02 92 +#define SCLK_TIMER03 93 +#define SCLK_TIMER04 94 +#define SCLK_TIMER05 95 +#define SCLK_TIMER06 96 +#define SCLK_TIMER07 97 +#define SCLK_TIMER08 98 +#define SCLK_TIMER09 99 +#define SCLK_TIMER10 100 +#define SCLK_TIMER11 101 +#define SCLK_MACREF 102 +#define SCLK_MAC_RX 103 +#define SCLK_MAC_TX 104 +#define SCLK_MAC 105 +#define SCLK_MACREF_OUT 106 +#define SCLK_VOP0_PWM 107 +#define SCLK_VOP1_PWM 108 +#define SCLK_RGA_CORE 109 +#define SCLK_ISP0 110 +#define SCLK_ISP1 111 +#define SCLK_HDMI_CEC 112 +#define SCLK_HDMI_SFR 113 +#define SCLK_DP_CORE 114 +#define SCLK_PVTM_CORE_L 115 +#define SCLK_PVTM_CORE_B 116 +#define SCLK_PVTM_GPU 117 +#define SCLK_PVTM_DDR 118 +#define SCLK_MIPIDPHY_REF 119 +#define SCLK_MIPIDPHY_CFG 120 +#define SCLK_HSICPHY 121 +#define SCLK_USBPHY480M 122 +#define SCLK_USB2PHY0_REF 123 +#define SCLK_USB2PHY1_REF 124 +#define SCLK_UPHY0_TCPDPHY_REF 125 +#define SCLK_UPHY0_TCPDCORE 126 +#define SCLK_UPHY1_TCPDPHY_REF 127 +#define SCLK_UPHY1_TCPDCORE 128 +#define SCLK_USB3OTG0_REF 129 +#define SCLK_USB3OTG1_REF 130 +#define SCLK_USB3OTG0_SUSPEND 131 +#define SCLK_USB3OTG1_SUSPEND 132 +#define SCLK_CRYPTO0 133 +#define SCLK_CRYPTO1 134 +#define SCLK_CCI_TRACE 135 +#define SCLK_CS 136 +#define SCLK_CIF_OUT 137 +#define SCLK_PCIEPHY_REF 138 +#define SCLK_PCIE_CORE 139 +#define SCLK_M0_PERILP 140 +#define SCLK_M0_PERILP_DEC 141 +#define SCLK_CM0S 142 +#define SCLK_DBG_NOC 143 +#define SCLK_DBG_PD_CORE_B 144 +#define SCLK_DBG_PD_CORE_L 145 +#define SCLK_DFIMON0_TIMER 146 +#define SCLK_DFIMON1_TIMER 147 +#define SCLK_INTMEM0 148 +#define SCLK_INTMEM1 149 +#define SCLK_INTMEM2 150 +#define SCLK_INTMEM3 151 +#define SCLK_INTMEM4 152 +#define SCLK_INTMEM5 153 +#define SCLK_SDMMC_DRV 154 +#define SCLK_SDMMC_SAMPLE 155 +#define SCLK_SDIO_DRV 156 +#define SCLK_SDIO_SAMPLE 157 +#define SCLK_VDU_CORE 158 +#define SCLK_VDU_CA 159 +#define SCLK_PCIE_PM 160 +#define SCLK_SPDIF_REC_DPTX 161 +#define SCLK_DPHY_PLL 162 +#define SCLK_DPHY_TX0_CFG 163 +#define SCLK_DPHY_TX1RX1_CFG 164 +#define SCLK_DPHY_RX0_CFG 165 +#define SCLK_RMII_SRC 166 +#define SCLK_PCIEPHY_REF100M 167 +#define SCLK_DDRC 168 +#define SCLK_TESTCLKOUT1 169 +#define SCLK_TESTCLKOUT2 170 + +#define DCLK_VOP0 180 +#define DCLK_VOP1 181 +#define DCLK_VOP0_DIV 182 +#define DCLK_VOP1_DIV 183 +#define DCLK_M0_PERILP 184 +#define DCLK_VOP0_FRAC 185 +#define DCLK_VOP1_FRAC 186 + +#define FCLK_CM0S 190 + +/* aclk gates */ +#define ACLK_PERIHP 192 +#define ACLK_PERIHP_NOC 193 +#define ACLK_PERILP0 194 +#define ACLK_PERILP0_NOC 195 +#define ACLK_PERF_PCIE 196 +#define ACLK_PCIE 197 +#define ACLK_INTMEM 198 +#define ACLK_TZMA 199 +#define ACLK_DCF 200 +#define ACLK_CCI 201 +#define ACLK_CCI_NOC0 202 +#define ACLK_CCI_NOC1 203 +#define ACLK_CCI_GRF 204 +#define ACLK_CENTER 205 +#define ACLK_CENTER_MAIN_NOC 206 +#define ACLK_CENTER_PERI_NOC 207 +#define ACLK_GPU 208 +#define ACLK_PERF_GPU 209 +#define ACLK_GPU_GRF 210 +#define ACLK_DMAC0_PERILP 211 +#define ACLK_DMAC1_PERILP 212 +#define ACLK_GMAC 213 +#define ACLK_GMAC_NOC 214 +#define ACLK_PERF_GMAC 215 +#define ACLK_VOP0_NOC 216 +#define ACLK_VOP0 217 +#define ACLK_VOP1_NOC 218 +#define ACLK_VOP1 219 +#define ACLK_RGA 220 +#define ACLK_RGA_NOC 221 +#define ACLK_HDCP 222 +#define ACLK_HDCP_NOC 223 +#define ACLK_HDCP22 224 +#define ACLK_IEP 225 +#define ACLK_IEP_NOC 226 +#define ACLK_VIO 227 +#define ACLK_VIO_NOC 228 +#define ACLK_ISP0 229 +#define ACLK_ISP1 230 +#define ACLK_ISP0_NOC 231 +#define ACLK_ISP1_NOC 232 +#define ACLK_ISP0_WRAPPER 233 +#define ACLK_ISP1_WRAPPER 234 +#define ACLK_VCODEC 235 +#define ACLK_VCODEC_NOC 236 +#define ACLK_VDU 237 +#define ACLK_VDU_NOC 238 +#define ACLK_PERI 239 +#define ACLK_EMMC 240 +#define ACLK_EMMC_CORE 241 +#define ACLK_EMMC_NOC 242 +#define ACLK_EMMC_GRF 243 +#define ACLK_USB3 244 +#define ACLK_USB3_NOC 245 +#define ACLK_USB3OTG0 246 +#define ACLK_USB3OTG1 247 +#define ACLK_USB3_RKSOC_AXI_PERF 248 +#define ACLK_USB3_GRF 249 +#define ACLK_GIC 250 +#define ACLK_GIC_NOC 251 +#define ACLK_GIC_ADB400_CORE_L_2_GIC 252 +#define ACLK_GIC_ADB400_CORE_B_2_GIC 253 +#define ACLK_GIC_ADB400_GIC_2_CORE_L 254 +#define ACLK_GIC_ADB400_GIC_2_CORE_B 255 +#define ACLK_CORE_ADB400_CORE_L_2_CCI500 256 +#define ACLK_CORE_ADB400_CORE_B_2_CCI500 257 +#define ACLK_ADB400M_PD_CORE_L 258 +#define ACLK_ADB400M_PD_CORE_B 259 +#define ACLK_PERF_CORE_L 260 +#define ACLK_PERF_CORE_B 261 +#define ACLK_GIC_PRE 262 +#define ACLK_VOP0_PRE 263 +#define ACLK_VOP1_PRE 264 + +/* pclk gates */ +#define PCLK_PERIHP 320 +#define PCLK_PERIHP_NOC 321 +#define PCLK_PERILP0 322 +#define PCLK_PERILP1 323 +#define PCLK_PERILP1_NOC 324 +#define PCLK_PERILP_SGRF 325 +#define PCLK_PERIHP_GRF 326 +#define PCLK_PCIE 327 +#define PCLK_SGRF 328 +#define PCLK_INTR_ARB 329 +#define PCLK_CENTER_MAIN_NOC 330 +#define PCLK_CIC 331 +#define PCLK_COREDBG_B 332 +#define PCLK_COREDBG_L 333 +#define PCLK_DBG_CXCS_PD_CORE_B 334 +#define PCLK_DCF 335 +#define PCLK_GPIO2 336 +#define PCLK_GPIO3 337 +#define PCLK_GPIO4 338 +#define PCLK_GRF 339 +#define PCLK_HSICPHY 340 +#define PCLK_I2C1 341 +#define PCLK_I2C2 342 +#define PCLK_I2C3 343 +#define PCLK_I2C5 344 +#define PCLK_I2C6 345 +#define PCLK_I2C7 346 +#define PCLK_SPI0 347 +#define PCLK_SPI1 348 +#define PCLK_SPI2 349 +#define PCLK_SPI4 350 +#define PCLK_SPI5 351 +#define PCLK_UART0 352 +#define PCLK_UART1 353 +#define PCLK_UART2 354 +#define PCLK_UART3 355 +#define PCLK_TSADC 356 +#define PCLK_SARADC 357 +#define PCLK_GMAC 358 +#define PCLK_GMAC_NOC 359 +#define PCLK_TIMER0 360 +#define PCLK_TIMER1 361 +#define PCLK_EDP 362 +#define PCLK_EDP_NOC 363 +#define PCLK_EDP_CTRL 364 +#define PCLK_VIO 365 +#define PCLK_VIO_NOC 366 +#define PCLK_VIO_GRF 367 +#define PCLK_MIPI_DSI0 368 +#define PCLK_MIPI_DSI1 369 +#define PCLK_HDCP 370 +#define PCLK_HDCP_NOC 371 +#define PCLK_HDMI_CTRL 372 +#define PCLK_DP_CTRL 373 +#define PCLK_HDCP22 374 +#define PCLK_GASKET 375 +#define PCLK_DDR 376 +#define PCLK_DDR_MON 377 +#define PCLK_DDR_SGRF 378 +#define PCLK_ISP1_WRAPPER 379 +#define PCLK_WDT 380 +#define PCLK_EFUSE1024NS 381 +#define PCLK_EFUSE1024S 382 +#define PCLK_PMU_INTR_ARB 383 +#define PCLK_MAILBOX0 384 +#define PCLK_USBPHY_MUX_G 385 +#define PCLK_UPHY0_TCPHY_G 386 +#define PCLK_UPHY0_TCPD_G 387 +#define PCLK_UPHY1_TCPHY_G 388 +#define PCLK_UPHY1_TCPD_G 389 +#define PCLK_ALIVE 390 + +/* hclk gates */ +#define HCLK_PERIHP 448 +#define HCLK_PERILP0 449 +#define HCLK_PERILP1 450 +#define HCLK_PERILP0_NOC 451 +#define HCLK_PERILP1_NOC 452 +#define HCLK_M0_PERILP 453 +#define HCLK_M0_PERILP_NOC 454 +#define HCLK_AHB1TOM 455 +#define HCLK_HOST0 456 +#define HCLK_HOST0_ARB 457 +#define HCLK_HOST1 458 +#define HCLK_HOST1_ARB 459 +#define HCLK_HSIC 460 +#define HCLK_SD 461 +#define HCLK_SDMMC 462 +#define HCLK_SDMMC_NOC 463 +#define HCLK_M_CRYPTO0 464 +#define HCLK_M_CRYPTO1 465 +#define HCLK_S_CRYPTO0 466 +#define HCLK_S_CRYPTO1 467 +#define HCLK_I2S0_8CH 468 +#define HCLK_I2S1_8CH 469 +#define HCLK_I2S2_8CH 470 +#define HCLK_SPDIF 471 +#define HCLK_VOP0_NOC 472 +#define HCLK_VOP0 473 +#define HCLK_VOP1_NOC 474 +#define HCLK_VOP1 475 +#define HCLK_ROM 476 +#define HCLK_IEP 477 +#define HCLK_IEP_NOC 478 +#define HCLK_ISP0 479 +#define HCLK_ISP1 480 +#define HCLK_ISP0_NOC 481 +#define HCLK_ISP1_NOC 482 +#define HCLK_ISP0_WRAPPER 483 +#define HCLK_ISP1_WRAPPER 484 +#define HCLK_RGA 485 +#define HCLK_RGA_NOC 486 +#define HCLK_HDCP 487 +#define HCLK_HDCP_NOC 488 +#define HCLK_HDCP22 489 +#define HCLK_VCODEC 490 +#define HCLK_VCODEC_NOC 491 +#define HCLK_VDU 492 +#define HCLK_VDU_NOC 493 +#define HCLK_SDIO 494 +#define HCLK_SDIO_NOC 495 +#define HCLK_SDIOAUDIO_NOC 496 + +#define CLK_NR_CLKS (HCLK_SDIOAUDIO_NOC + 1) + +/* pmu-clocks indices */ + +#define PLL_PPLL 1 + +#define SCLK_32K_SUSPEND_PMU 2 +#define SCLK_SPI3_PMU 3 +#define SCLK_TIMER12_PMU 4 +#define SCLK_TIMER13_PMU 5 +#define SCLK_UART4_PMU 6 +#define SCLK_PVTM_PMU 7 +#define SCLK_WIFI_PMU 8 +#define SCLK_I2C0_PMU 9 +#define SCLK_I2C4_PMU 10 +#define SCLK_I2C8_PMU 11 + +#define PCLK_SRC_PMU 19 +#define PCLK_PMU 20 +#define PCLK_PMUGRF_PMU 21 +#define PCLK_INTMEM1_PMU 22 +#define PCLK_GPIO0_PMU 23 +#define PCLK_GPIO1_PMU 24 +#define PCLK_SGRF_PMU 25 +#define PCLK_NOC_PMU 26 +#define PCLK_I2C0_PMU 27 +#define PCLK_I2C4_PMU 28 +#define PCLK_I2C8_PMU 29 +#define PCLK_RKPWM_PMU 30 +#define PCLK_SPI3_PMU 31 +#define PCLK_TIMER_PMU 32 +#define PCLK_MAILBOX_PMU 33 +#define PCLK_UART4_PMU 34 +#define PCLK_WDT_M0_PMU 35 + +#define FCLK_CM0S_SRC_PMU 44 +#define FCLK_CM0S_PMU 45 +#define SCLK_CM0S_PMU 46 +#define HCLK_CM0S_PMU 47 +#define DCLK_CM0S_PMU 48 +#define PCLK_INTR_ARB_PMU 49 +#define HCLK_NOC_PMU 50 + +#define CLKPMU_NR_CLKS (HCLK_NOC_PMU + 1) + +/* soft-reset indices */ + +/* cru_softrst_con0 */ +#define SRST_CORE_L0 0 +#define SRST_CORE_B0 1 +#define SRST_CORE_PO_L0 2 +#define SRST_CORE_PO_B0 3 +#define SRST_L2_L 4 +#define SRST_L2_B 5 +#define SRST_ADB_L 6 +#define SRST_ADB_B 7 +#define SRST_A_CCI 8 +#define SRST_A_CCIM0_NOC 9 +#define SRST_A_CCIM1_NOC 10 +#define SRST_DBG_NOC 11 + +/* cru_softrst_con1 */ +#define SRST_CORE_L0_T 16 +#define SRST_CORE_L1 17 +#define SRST_CORE_L2 18 +#define SRST_CORE_L3 19 +#define SRST_CORE_PO_L0_T 20 +#define SRST_CORE_PO_L1 21 +#define SRST_CORE_PO_L2 22 +#define SRST_CORE_PO_L3 23 +#define SRST_A_ADB400_GIC2COREL 24 +#define SRST_A_ADB400_COREL2GIC 25 +#define SRST_P_DBG_L 26 +#define SRST_L2_L_T 28 +#define SRST_ADB_L_T 29 +#define SRST_A_RKPERF_L 30 +#define SRST_PVTM_CORE_L 31 + +/* cru_softrst_con2 */ +#define SRST_CORE_B0_T 32 +#define SRST_CORE_B1 33 +#define SRST_CORE_PO_B0_T 36 +#define SRST_CORE_PO_B1 37 +#define SRST_A_ADB400_GIC2COREB 40 +#define SRST_A_ADB400_COREB2GIC 41 +#define SRST_P_DBG_B 42 +#define SRST_L2_B_T 43 +#define SRST_ADB_B_T 45 +#define SRST_A_RKPERF_B 46 +#define SRST_PVTM_CORE_B 47 + +/* cru_softrst_con3 */ +#define SRST_A_CCI_T 50 +#define SRST_A_CCIM0_NOC_T 51 +#define SRST_A_CCIM1_NOC_T 52 +#define SRST_A_ADB400M_PD_CORE_B_T 53 +#define SRST_A_ADB400M_PD_CORE_L_T 54 +#define SRST_DBG_NOC_T 55 +#define SRST_DBG_CXCS 56 +#define SRST_CCI_TRACE 57 +#define SRST_P_CCI_GRF 58 + +/* cru_softrst_con4 */ +#define SRST_A_CENTER_MAIN_NOC 64 +#define SRST_A_CENTER_PERI_NOC 65 +#define SRST_P_CENTER_MAIN 66 +#define SRST_P_DDRMON 67 +#define SRST_P_CIC 68 +#define SRST_P_CENTER_SGRF 69 +#define SRST_DDR0_MSCH 70 +#define SRST_DDRCFG0_MSCH 71 +#define SRST_DDR0 72 +#define SRST_DDRPHY0 73 +#define SRST_DDR1_MSCH 74 +#define SRST_DDRCFG1_MSCH 75 +#define SRST_DDR1 76 +#define SRST_DDRPHY1 77 +#define SRST_DDR_CIC 78 +#define SRST_PVTM_DDR 79 + +/* cru_softrst_con5 */ +#define SRST_A_VCODEC_NOC 80 +#define SRST_A_VCODEC 81 +#define SRST_H_VCODEC_NOC 82 +#define SRST_H_VCODEC 83 +#define SRST_A_VDU_NOC 88 +#define SRST_A_VDU 89 +#define SRST_H_VDU_NOC 90 +#define SRST_H_VDU 91 +#define SRST_VDU_CORE 92 +#define SRST_VDU_CA 93 + +/* cru_softrst_con6 */ +#define SRST_A_IEP_NOC 96 +#define SRST_A_VOP_IEP 97 +#define SRST_A_IEP 98 +#define SRST_H_IEP_NOC 99 +#define SRST_H_IEP 100 +#define SRST_A_RGA_NOC 102 +#define SRST_A_RGA 103 +#define SRST_H_RGA_NOC 104 +#define SRST_H_RGA 105 +#define SRST_RGA_CORE 106 +#define SRST_EMMC_NOC 108 +#define SRST_EMMC 109 +#define SRST_EMMC_GRF 110 + +/* cru_softrst_con7 */ +#define SRST_A_PERIHP_NOC 112 +#define SRST_P_PERIHP_GRF 113 +#define SRST_H_PERIHP_NOC 114 +#define SRST_USBHOST0 115 +#define SRST_HOSTC0_AUX 116 +#define SRST_HOST0_ARB 117 +#define SRST_USBHOST1 118 +#define SRST_HOSTC1_AUX 119 +#define SRST_HOST1_ARB 120 +#define SRST_SDIO0 121 +#define SRST_SDMMC 122 +#define SRST_HSIC 123 +#define SRST_HSIC_AUX 124 +#define SRST_AHB1TOM 125 +#define SRST_P_PERIHP_NOC 126 +#define SRST_HSICPHY 127 + +/* cru_softrst_con8 */ +#define SRST_A_PCIE 128 +#define SRST_P_PCIE 129 +#define SRST_PCIE_CORE 130 +#define SRST_PCIE_MGMT 131 +#define SRST_PCIE_MGMT_STICKY 132 +#define SRST_PCIE_PIPE 133 +#define SRST_PCIE_PM 134 +#define SRST_PCIEPHY 135 +#define SRST_A_GMAC_NOC 136 +#define SRST_A_GMAC 137 +#define SRST_P_GMAC_NOC 138 +#define SRST_P_GMAC_GRF 140 +#define SRST_HSICPHY_POR 142 +#define SRST_HSICPHY_UTMI 143 + +/* cru_softrst_con9 */ +#define SRST_USB2PHY0_POR 144 +#define SRST_USB2PHY0_UTMI_PORT0 145 +#define SRST_USB2PHY0_UTMI_PORT1 146 +#define SRST_USB2PHY0_EHCIPHY 147 +#define SRST_UPHY0_PIPE_L00 148 +#define SRST_UPHY0 149 +#define SRST_UPHY0_TCPDPWRUP 150 +#define SRST_USB2PHY1_POR 152 +#define SRST_USB2PHY1_UTMI_PORT0 153 +#define SRST_USB2PHY1_UTMI_PORT1 154 +#define SRST_USB2PHY1_EHCIPHY 155 +#define SRST_UPHY1_PIPE_L00 156 +#define SRST_UPHY1 157 +#define SRST_UPHY1_TCPDPWRUP 158 + +/* cru_softrst_con10 */ +#define SRST_A_PERILP0_NOC 160 +#define SRST_A_DCF 161 +#define SRST_GIC500 162 +#define SRST_DMAC0_PERILP0 163 +#define SRST_DMAC1_PERILP0 164 +#define SRST_TZMA 165 +#define SRST_INTMEM 166 +#define SRST_ADB400_MST0 167 +#define SRST_ADB400_MST1 168 +#define SRST_ADB400_SLV0 169 +#define SRST_ADB400_SLV1 170 +#define SRST_H_PERILP0 171 +#define SRST_H_PERILP0_NOC 172 +#define SRST_ROM 173 +#define SRST_CRYPTO_S 174 +#define SRST_CRYPTO_M 175 + +/* cru_softrst_con11 */ +#define SRST_P_DCF 176 +#define SRST_CM0S_NOC 177 +#define SRST_CM0S 178 +#define SRST_CM0S_DBG 179 +#define SRST_CM0S_PO 180 +#define SRST_CRYPTO 181 +#define SRST_P_PERILP1_SGRF 182 +#define SRST_P_PERILP1_GRF 183 +#define SRST_CRYPTO1_S 184 +#define SRST_CRYPTO1_M 185 +#define SRST_CRYPTO1 186 +#define SRST_GIC_NOC 188 +#define SRST_SD_NOC 189 +#define SRST_SDIOAUDIO_BRG 190 + +/* cru_softrst_con12 */ +#define SRST_H_PERILP1 192 +#define SRST_H_PERILP1_NOC 193 +#define SRST_H_I2S0_8CH 194 +#define SRST_H_I2S1_8CH 195 +#define SRST_H_I2S2_8CH 196 +#define SRST_H_SPDIF_8CH 197 +#define SRST_P_PERILP1_NOC 198 +#define SRST_P_EFUSE_1024 199 +#define SRST_P_EFUSE_1024S 200 +#define SRST_P_I2C0 201 +#define SRST_P_I2C1 202 +#define SRST_P_I2C2 203 +#define SRST_P_I2C3 204 +#define SRST_P_I2C4 205 +#define SRST_P_I2C5 206 +#define SRST_P_MAILBOX0 207 + +/* cru_softrst_con13 */ +#define SRST_P_UART0 208 +#define SRST_P_UART1 209 +#define SRST_P_UART2 210 +#define SRST_P_UART3 211 +#define SRST_P_SARADC 212 +#define SRST_P_TSADC 213 +#define SRST_P_SPI0 214 +#define SRST_P_SPI1 215 +#define SRST_P_SPI2 216 +#define SRST_P_SPI3 217 +#define SRST_P_SPI4 218 +#define SRST_SPI0 219 +#define SRST_SPI1 220 +#define SRST_SPI2 221 +#define SRST_SPI3 222 +#define SRST_SPI4 223 + +/* cru_softrst_con14 */ +#define SRST_I2S0_8CH 224 +#define SRST_I2S1_8CH 225 +#define SRST_I2S2_8CH 226 +#define SRST_SPDIF_8CH 227 +#define SRST_UART0 228 +#define SRST_UART1 229 +#define SRST_UART2 230 +#define SRST_UART3 231 +#define SRST_TSADC 232 +#define SRST_I2C0 233 +#define SRST_I2C1 234 +#define SRST_I2C2 235 +#define SRST_I2C3 236 +#define SRST_I2C4 237 +#define SRST_I2C5 238 +#define SRST_SDIOAUDIO_NOC 239 + +/* cru_softrst_con15 */ +#define SRST_A_VIO_NOC 240 +#define SRST_A_HDCP_NOC 241 +#define SRST_A_HDCP 242 +#define SRST_H_HDCP_NOC 243 +#define SRST_H_HDCP 244 +#define SRST_P_HDCP_NOC 245 +#define SRST_P_HDCP 246 +#define SRST_P_HDMI_CTRL 247 +#define SRST_P_DP_CTRL 248 +#define SRST_S_DP_CTRL 249 +#define SRST_C_DP_CTRL 250 +#define SRST_P_MIPI_DSI0 251 +#define SRST_P_MIPI_DSI1 252 +#define SRST_DP_CORE 253 +#define SRST_DP_I2S 254 + +/* cru_softrst_con16 */ +#define SRST_GASKET 256 +#define SRST_VIO_GRF 258 +#define SRST_DPTX_SPDIF_REC 259 +#define SRST_HDMI_CTRL 260 +#define SRST_HDCP_CTRL 261 +#define SRST_A_ISP0_NOC 262 +#define SRST_A_ISP1_NOC 263 +#define SRST_H_ISP0_NOC 266 +#define SRST_H_ISP1_NOC 267 +#define SRST_H_ISP0 268 +#define SRST_H_ISP1 269 +#define SRST_ISP0 270 +#define SRST_ISP1 271 + +/* cru_softrst_con17 */ +#define SRST_A_VOP0_NOC 272 +#define SRST_A_VOP1_NOC 273 +#define SRST_A_VOP0 274 +#define SRST_A_VOP1 275 +#define SRST_H_VOP0_NOC 276 +#define SRST_H_VOP1_NOC 277 +#define SRST_H_VOP0 278 +#define SRST_H_VOP1 279 +#define SRST_D_VOP0 280 +#define SRST_D_VOP1 281 +#define SRST_VOP0_PWM 282 +#define SRST_VOP1_PWM 283 +#define SRST_P_EDP_NOC 284 +#define SRST_P_EDP_CTRL 285 + +/* cru_softrst_con18 */ +#define SRST_A_GPU 288 +#define SRST_A_GPU_NOC 289 +#define SRST_A_GPU_GRF 290 +#define SRST_PVTM_GPU 291 +#define SRST_A_USB3_NOC 292 +#define SRST_A_USB3_OTG0 293 +#define SRST_A_USB3_OTG1 294 +#define SRST_A_USB3_GRF 295 +#define SRST_PMU 296 + +/* cru_softrst_con19 */ +#define SRST_P_TIMER0_5 304 +#define SRST_TIMER0 305 +#define SRST_TIMER1 306 +#define SRST_TIMER2 307 +#define SRST_TIMER3 308 +#define SRST_TIMER4 309 +#define SRST_TIMER5 310 +#define SRST_P_TIMER6_11 311 +#define SRST_TIMER6 312 +#define SRST_TIMER7 313 +#define SRST_TIMER8 314 +#define SRST_TIMER9 315 +#define SRST_TIMER10 316 +#define SRST_TIMER11 317 +#define SRST_P_INTR_ARB_PMU 318 +#define SRST_P_ALIVE_SGRF 319 + +/* cru_softrst_con20 */ +#define SRST_P_GPIO2 320 +#define SRST_P_GPIO3 321 +#define SRST_P_GPIO4 322 +#define SRST_P_GRF 323 +#define SRST_P_ALIVE_NOC 324 +#define SRST_P_WDT0 325 +#define SRST_P_WDT1 326 +#define SRST_P_INTR_ARB 327 +#define SRST_P_UPHY0_DPTX 328 +#define SRST_P_UPHY0_APB 330 +#define SRST_P_UPHY0_TCPHY 332 +#define SRST_P_UPHY1_TCPHY 333 +#define SRST_P_UPHY0_TCPDCTRL 334 +#define SRST_P_UPHY1_TCPDCTRL 335 + +/* pmu soft-reset indices */ + +/* pmu_cru_softrst_con0 */ +#define SRST_P_NOC 0 +#define SRST_P_INTMEM 1 +#define SRST_H_CM0S 2 +#define SRST_H_CM0S_NOC 3 +#define SRST_DBG_CM0S 4 +#define SRST_PO_CM0S 5 +#define SRST_P_SPI6 6 +#define SRST_SPI6 7 +#define SRST_P_TIMER_0_1 8 +#define SRST_P_TIMER_0 9 +#define SRST_P_TIMER_1 10 +#define SRST_P_UART4 11 +#define SRST_UART4 12 +#define SRST_P_WDT 13 + +/* pmu_cru_softrst_con1 */ +#define SRST_P_I2C6 16 +#define SRST_P_I2C7 17 +#define SRST_P_I2C8 18 +#define SRST_P_MAILBOX 19 +#define SRST_P_RKPWM 20 +#define SRST_P_PMUGRF 21 +#define SRST_P_SGRF 22 +#define SRST_P_GPIO0 23 +#define SRST_P_GPIO1 24 +#define SRST_P_CRU 25 +#define SRST_P_INTR 26 +#define SRST_PVTM 27 +#define SRST_I2C6 28 +#define SRST_I2C7 29 +#define SRST_I2C8 30 + +#endif diff --git a/include/dt-bindings/clock/rk3399-ddr.h b/include/dt-bindings/clock/rk3399-ddr.h new file mode 100644 index 0000000..ed22808 --- /dev/null +++ b/include/dt-bindings/clock/rk3399-ddr.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ + +#ifndef DT_BINDINGS_DDR_H +#define DT_BINDINGS_DDR_H + +/* + * DDR3 SDRAM Standard Speed Bins include tCK, tRCD, tRP, tRAS and tRC for + * each corresponding bin. + */ + +/* DDR3-800 (5-5-5) */ +#define DDR3_800D 0 +/* DDR3-800 (6-6-6) */ +#define DDR3_800E 1 +/* DDR3-1066 (6-6-6) */ +#define DDR3_1066E 2 +/* DDR3-1066 (7-7-7) */ +#define DDR3_1066F 3 +/* DDR3-1066 (8-8-8) */ +#define DDR3_1066G 4 +/* DDR3-1333 (7-7-7) */ +#define DDR3_1333F 5 +/* DDR3-1333 (8-8-8) */ +#define DDR3_1333G 6 +/* DDR3-1333 (9-9-9) */ +#define DDR3_1333H 7 +/* DDR3-1333 (10-10-10) */ +#define DDR3_1333J 8 +/* DDR3-1600 (8-8-8) */ +#define DDR3_1600G 9 +/* DDR3-1600 (9-9-9) */ +#define DDR3_1600H 10 +/* DDR3-1600 (10-10-10) */ +#define DDR3_1600J 11 +/* DDR3-1600 (11-11-11) */ +#define DDR3_1600K 12 +/* DDR3-1600 (10-10-10) */ +#define DDR3_1866J 13 +/* DDR3-1866 (11-11-11) */ +#define DDR3_1866K 14 +/* DDR3-1866 (12-12-12) */ +#define DDR3_1866L 15 +/* DDR3-1866 (13-13-13) */ +#define DDR3_1866M 16 +/* DDR3-2133 (11-11-11) */ +#define DDR3_2133K 17 +/* DDR3-2133 (12-12-12) */ +#define DDR3_2133L 18 +/* DDR3-2133 (13-13-13) */ +#define DDR3_2133M 19 +/* DDR3-2133 (14-14-14) */ +#define DDR3_2133N 20 +/* DDR3 ATF default */ +#define DDR3_DEFAULT 21 + +#endif diff --git a/include/dt-bindings/clock/rockchip,rk808.h b/include/dt-bindings/clock/rockchip,rk808.h new file mode 100644 index 0000000..75dabfc --- /dev/null +++ b/include/dt-bindings/clock/rockchip,rk808.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants clk index RK808 pmic clkout + */ +#ifndef _CLK_ROCKCHIP_RK808 +#define _CLK_ROCKCHIP_RK808 + +/* CLOCKOUT index */ +#define RK808_CLKOUT0 0 +#define RK808_CLKOUT1 1 + +#endif diff --git a/include/dt-bindings/clock/rv1108-cru.h b/include/dt-bindings/clock/rv1108-cru.h new file mode 100644 index 0000000..41d7d60 --- /dev/null +++ b/include/dt-bindings/clock/rv1108-cru.h @@ -0,0 +1,353 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Shawn Lin + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RV1108_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RV1108_H + +/* pll id */ +#define PLL_APLL 0 +#define PLL_DPLL 1 +#define PLL_GPLL 2 +#define ARMCLK 3 + +/* sclk gates (special clocks) */ +#define SCLK_SPI0 65 +#define SCLK_NANDC 67 +#define SCLK_SDMMC 68 +#define SCLK_SDIO 69 +#define SCLK_EMMC 71 +#define SCLK_UART0 72 +#define SCLK_UART1 73 +#define SCLK_UART2 74 +#define SCLK_I2S0 75 +#define SCLK_I2S1 76 +#define SCLK_I2S2 77 +#define SCLK_TIMER0 78 +#define SCLK_TIMER1 79 +#define SCLK_SFC 80 +#define SCLK_SDMMC_DRV 81 +#define SCLK_SDIO_DRV 82 +#define SCLK_EMMC_DRV 83 +#define SCLK_SDMMC_SAMPLE 84 +#define SCLK_SDIO_SAMPLE 85 +#define SCLK_EMMC_SAMPLE 86 +#define SCLK_VENC_CORE 87 +#define SCLK_HEVC_CORE 88 +#define SCLK_HEVC_CABAC 89 +#define SCLK_PWM0_PMU 90 +#define SCLK_I2C0_PMU 91 +#define SCLK_WIFI 92 +#define SCLK_CIFOUT 93 +#define SCLK_MIPI_CSI_OUT 94 +#define SCLK_CIF0 95 +#define SCLK_CIF1 96 +#define SCLK_CIF2 97 +#define SCLK_CIF3 98 +#define SCLK_DSP 99 +#define SCLK_DSP_IOP 100 +#define SCLK_DSP_EPP 101 +#define SCLK_DSP_EDP 102 +#define SCLK_DSP_EDAP 103 +#define SCLK_CVBS_HOST 104 +#define SCLK_HDMI_SFR 105 +#define SCLK_HDMI_CEC 106 +#define SCLK_CRYPTO 107 +#define SCLK_SPI 108 +#define SCLK_SARADC 109 +#define SCLK_TSADC 110 +#define SCLK_MAC_PRE 111 +#define SCLK_MAC 112 +#define SCLK_MAC_RX 113 +#define SCLK_MAC_REF 114 +#define SCLK_MAC_REFOUT 115 +#define SCLK_DSP_PFM 116 +#define SCLK_RGA 117 +#define SCLK_I2C1 118 +#define SCLK_I2C2 119 +#define SCLK_I2C3 120 +#define SCLK_PWM 121 +#define SCLK_ISP 122 +#define SCLK_USBPHY 123 +#define SCLK_I2S0_SRC 124 +#define SCLK_I2S1_SRC 125 +#define SCLK_I2S2_SRC 126 +#define SCLK_UART0_SRC 127 +#define SCLK_UART1_SRC 128 +#define SCLK_UART2_SRC 129 + +#define DCLK_VOP_SRC 185 +#define DCLK_HDMIPHY 186 +#define DCLK_VOP 187 + +/* aclk gates */ +#define ACLK_DMAC 192 +#define ACLK_PRE 193 +#define ACLK_CORE 194 +#define ACLK_ENMCORE 195 +#define ACLK_RKVENC 196 +#define ACLK_RKVDEC 197 +#define ACLK_VPU 198 +#define ACLK_CIF0 199 +#define ACLK_VIO0 200 +#define ACLK_VIO1 201 +#define ACLK_VOP 202 +#define ACLK_IEP 203 +#define ACLK_RGA 204 +#define ACLK_ISP 205 +#define ACLK_CIF1 206 +#define ACLK_CIF2 207 +#define ACLK_CIF3 208 +#define ACLK_PERI 209 +#define ACLK_GMAC 210 + +/* pclk gates */ +#define PCLK_GPIO1 256 +#define PCLK_GPIO2 257 +#define PCLK_GPIO3 258 +#define PCLK_GRF 259 +#define PCLK_I2C1 260 +#define PCLK_I2C2 261 +#define PCLK_I2C3 262 +#define PCLK_SPI 263 +#define PCLK_SFC 264 +#define PCLK_UART0 265 +#define PCLK_UART1 266 +#define PCLK_UART2 267 +#define PCLK_TSADC 268 +#define PCLK_PWM 269 +#define PCLK_TIMER 270 +#define PCLK_PERI 271 +#define PCLK_GPIO0_PMU 272 +#define PCLK_I2C0_PMU 273 +#define PCLK_PWM0_PMU 274 +#define PCLK_ISP 275 +#define PCLK_VIO 276 +#define PCLK_MIPI_DSI 277 +#define PCLK_HDMI_CTRL 278 +#define PCLK_SARADC 279 +#define PCLK_DSP_CFG 280 +#define PCLK_BUS 281 +#define PCLK_EFUSE0 282 +#define PCLK_EFUSE1 283 +#define PCLK_WDT 284 +#define PCLK_GMAC 285 + +/* hclk gates */ +#define HCLK_I2S0_8CH 320 +#define HCLK_I2S1_2CH 321 +#define HCLK_I2S2_2CH 322 +#define HCLK_NANDC 323 +#define HCLK_SDMMC 324 +#define HCLK_SDIO 325 +#define HCLK_EMMC 326 +#define HCLK_PERI 327 +#define HCLK_SFC 328 +#define HCLK_RKVENC 329 +#define HCLK_RKVDEC 330 +#define HCLK_CIF0 331 +#define HCLK_VIO 332 +#define HCLK_VOP 333 +#define HCLK_IEP 334 +#define HCLK_RGA 335 +#define HCLK_ISP 336 +#define HCLK_CRYPTO_MST 337 +#define HCLK_CRYPTO_SLV 338 +#define HCLK_HOST0 339 +#define HCLK_OTG 340 +#define HCLK_CIF1 341 +#define HCLK_CIF2 342 +#define HCLK_CIF3 343 +#define HCLK_BUS 344 +#define HCLK_VPU 345 + +#define CLK_NR_CLKS (HCLK_VPU + 1) + +/* reset id */ +#define SRST_CORE_PO_AD 0 +#define SRST_CORE_AD 1 +#define SRST_L2_AD 2 +#define SRST_CPU_NIU_AD 3 +#define SRST_CORE_PO 4 +#define SRST_CORE 5 +#define SRST_L2 6 +#define SRST_CORE_DBG 8 +#define PRST_DBG 9 +#define RST_DAP 10 +#define PRST_DBG_NIU 11 +#define ARST_STRC_SYS_AD 15 + +#define SRST_DDRPHY_CLKDIV 16 +#define SRST_DDRPHY 17 +#define PRST_DDRPHY 18 +#define PRST_HDMIPHY 19 +#define PRST_VDACPHY 20 +#define PRST_VADCPHY 21 +#define PRST_MIPI_CSI_PHY 22 +#define PRST_MIPI_DSI_PHY 23 +#define PRST_ACODEC 24 +#define ARST_BUS_NIU 25 +#define PRST_TOP_NIU 26 +#define ARST_INTMEM 27 +#define HRST_ROM 28 +#define ARST_DMAC 29 +#define SRST_MSCH_NIU 30 +#define PRST_MSCH_NIU 31 + +#define PRST_DDRUPCTL 32 +#define NRST_DDRUPCTL 33 +#define PRST_DDRMON 34 +#define HRST_I2S0_8CH 35 +#define MRST_I2S0_8CH 36 +#define HRST_I2S1_2CH 37 +#define MRST_IS21_2CH 38 +#define HRST_I2S2_2CH 39 +#define MRST_I2S2_2CH 40 +#define HRST_CRYPTO 41 +#define SRST_CRYPTO 42 +#define PRST_SPI 43 +#define SRST_SPI 44 +#define PRST_UART0 45 +#define PRST_UART1 46 +#define PRST_UART2 47 + +#define SRST_UART0 48 +#define SRST_UART1 49 +#define SRST_UART2 50 +#define PRST_I2C1 51 +#define PRST_I2C2 52 +#define PRST_I2C3 53 +#define SRST_I2C1 54 +#define SRST_I2C2 55 +#define SRST_I2C3 56 +#define PRST_PWM1 58 +#define SRST_PWM1 60 +#define PRST_WDT 61 +#define PRST_GPIO1 62 +#define PRST_GPIO2 63 + +#define PRST_GPIO3 64 +#define PRST_GRF 65 +#define PRST_EFUSE 66 +#define PRST_EFUSE512 67 +#define PRST_TIMER0 68 +#define SRST_TIMER0 69 +#define SRST_TIMER1 70 +#define PRST_TSADC 71 +#define SRST_TSADC 72 +#define PRST_SARADC 73 +#define SRST_SARADC 74 +#define HRST_SYSBUS 75 +#define PRST_USBGRF 76 + +#define ARST_PERIPH_NIU 80 +#define HRST_PERIPH_NIU 81 +#define PRST_PERIPH_NIU 82 +#define HRST_PERIPH 83 +#define HRST_SDMMC 84 +#define HRST_SDIO 85 +#define HRST_EMMC 86 +#define HRST_NANDC 87 +#define NRST_NANDC 88 +#define HRST_SFC 89 +#define SRST_SFC 90 +#define ARST_GMAC 91 +#define HRST_OTG 92 +#define SRST_OTG 93 +#define SRST_OTG_ADP 94 +#define HRST_HOST0 95 + +#define HRST_HOST0_AUX 96 +#define HRST_HOST0_ARB 97 +#define SRST_HOST0_EHCIPHY 98 +#define SRST_HOST0_UTMI 99 +#define SRST_USBPOR 100 +#define SRST_UTMI0 101 +#define SRST_UTMI1 102 + +#define ARST_VIO0_NIU 102 +#define ARST_VIO1_NIU 103 +#define HRST_VIO_NIU 104 +#define PRST_VIO_NIU 105 +#define ARST_VOP 106 +#define HRST_VOP 107 +#define DRST_VOP 108 +#define ARST_IEP 109 +#define HRST_IEP 110 +#define ARST_RGA 111 +#define HRST_RGA 112 +#define SRST_RGA 113 +#define PRST_CVBS 114 +#define PRST_HDMI 115 +#define SRST_HDMI 116 +#define PRST_MIPI_DSI 117 + +#define ARST_ISP_NIU 118 +#define HRST_ISP_NIU 119 +#define HRST_ISP 120 +#define SRST_ISP 121 +#define ARST_VIP0 122 +#define HRST_VIP0 123 +#define PRST_VIP0 124 +#define ARST_VIP1 125 +#define HRST_VIP1 126 +#define PRST_VIP1 127 +#define ARST_VIP2 128 +#define HRST_VIP2 129 +#define PRST_VIP2 120 +#define ARST_VIP3 121 +#define HRST_VIP3 122 +#define PRST_VIP4 123 + +#define PRST_CIF1TO4 124 +#define SRST_CVBS_CLK 125 +#define HRST_CVBS 126 + +#define ARST_VPU_NIU 140 +#define HRST_VPU_NIU 141 +#define ARST_VPU 142 +#define HRST_VPU 143 +#define ARST_RKVDEC_NIU 144 +#define HRST_RKVDEC_NIU 145 +#define ARST_RKVDEC 146 +#define HRST_RKVDEC 147 +#define SRST_RKVDEC_CABAC 148 +#define SRST_RKVDEC_CORE 149 +#define ARST_RKVENC_NIU 150 +#define HRST_RKVENC_NIU 151 +#define ARST_RKVENC 152 +#define HRST_RKVENC 153 +#define SRST_RKVENC_CORE 154 + +#define SRST_DSP_CORE 156 +#define SRST_DSP_SYS 157 +#define SRST_DSP_GLOBAL 158 +#define SRST_DSP_OECM 159 +#define PRST_DSP_IOP_NIU 160 +#define ARST_DSP_EPP_NIU 161 +#define ARST_DSP_EDP_NIU 162 +#define PRST_DSP_DBG_NIU 163 +#define PRST_DSP_CFG_NIU 164 +#define PRST_DSP_GRF 165 +#define PRST_DSP_MAILBOX 166 +#define PRST_DSP_INTC 167 +#define PRST_DSP_PFM_MON 169 +#define SRST_DSP_PFM_MON 170 +#define ARST_DSP_EDAP_NIU 171 + +#define SRST_PMU 172 +#define SRST_PMU_I2C0 173 +#define PRST_PMU_I2C0 174 +#define PRST_PMU_GPIO0 175 +#define PRST_PMU_INTMEM 176 +#define PRST_PMU_PWM0 177 +#define SRST_PMU_PWM0 178 +#define PRST_PMU_GRF 179 +#define SRST_PMU_NIU 180 +#define SRST_PMU_PVTM 181 +#define ARST_DSP_EDP_PERF 184 +#define ARST_DSP_EPP_PERF 185 + +#endif /* _DT_BINDINGS_CLK_ROCKCHIP_RV1108_H */ diff --git a/include/dt-bindings/clock/s3c2410.h b/include/dt-bindings/clock/s3c2410.h new file mode 100644 index 0000000..0fb65c3 --- /dev/null +++ b/include/dt-bindings/clock/s3c2410.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013 Heiko Stuebner + * + * Device Tree binding constants clock controllers of Samsung S3C2410 and later. + */ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C2410_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C2410_CLOCK_H + +/* + * Let each exported clock get a unique index, which is used on DT-enabled + * platforms to lookup the clock from a clock specifier. These indices are + * therefore considered an ABI and so must not be changed. This implies + * that new clocks should be added either in free spaces between clock groups + * or at the end. + */ + +/* Core clocks. */ + +/* id 1 is reserved */ +#define MPLL 2 +#define UPLL 3 +#define FCLK 4 +#define HCLK 5 +#define PCLK 6 +#define UCLK 7 +#define ARMCLK 8 + +/* pclk-gates */ +#define PCLK_UART0 16 +#define PCLK_UART1 17 +#define PCLK_UART2 18 +#define PCLK_I2C 19 +#define PCLK_SDI 20 +#define PCLK_SPI 21 +#define PCLK_ADC 22 +#define PCLK_AC97 23 +#define PCLK_I2S 24 +#define PCLK_PWM 25 +#define PCLK_RTC 26 +#define PCLK_GPIO 27 + + +/* hclk-gates */ +#define HCLK_LCD 32 +#define HCLK_USBH 33 +#define HCLK_USBD 34 +#define HCLK_NAND 35 +#define HCLK_CAM 36 + + +#define CAMIF 40 + + +/* Total number of clocks. */ +#define NR_CLKS (CAMIF + 1) + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H */ diff --git a/include/dt-bindings/clock/s3c2412.h b/include/dt-bindings/clock/s3c2412.h new file mode 100644 index 0000000..b465615 --- /dev/null +++ b/include/dt-bindings/clock/s3c2412.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013 Heiko Stuebner + * + * Device Tree binding constants clock controllers of Samsung S3C2412. + */ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C2412_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C2412_CLOCK_H + +/* + * Let each exported clock get a unique index, which is used on DT-enabled + * platforms to lookup the clock from a clock specifier. These indices are + * therefore considered an ABI and so must not be changed. This implies + * that new clocks should be added either in free spaces between clock groups + * or at the end. + */ + +/* Core clocks. */ + +/* id 1 is reserved */ +#define MPLL 2 +#define UPLL 3 +#define MDIVCLK 4 +#define MSYSCLK 5 +#define USYSCLK 6 +#define HCLK 7 +#define PCLK 8 +#define ARMDIV 9 +#define ARMCLK 10 + + +/* Special clocks */ +#define SCLK_CAM 16 +#define SCLK_UART 17 +#define SCLK_I2S 18 +#define SCLK_USBD 19 +#define SCLK_USBH 20 + +/* pclk-gates */ +#define PCLK_WDT 32 +#define PCLK_SPI 33 +#define PCLK_I2S 34 +#define PCLK_I2C 35 +#define PCLK_ADC 36 +#define PCLK_RTC 37 +#define PCLK_GPIO 38 +#define PCLK_UART2 39 +#define PCLK_UART1 40 +#define PCLK_UART0 41 +#define PCLK_SDI 42 +#define PCLK_PWM 43 +#define PCLK_USBD 44 + +/* hclk-gates */ +#define HCLK_HALF 48 +#define HCLK_X2 49 +#define HCLK_SDRAM 50 +#define HCLK_USBH 51 +#define HCLK_LCD 52 +#define HCLK_NAND 53 +#define HCLK_DMA3 54 +#define HCLK_DMA2 55 +#define HCLK_DMA1 56 +#define HCLK_DMA0 57 + +/* Total number of clocks. */ +#define NR_CLKS (HCLK_DMA0 + 1) + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C2412_CLOCK_H */ diff --git a/include/dt-bindings/clock/s3c2443.h b/include/dt-bindings/clock/s3c2443.h new file mode 100644 index 0000000..a9d2f10 --- /dev/null +++ b/include/dt-bindings/clock/s3c2443.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013 Heiko Stuebner + * + * Device Tree binding constants clock controllers of Samsung S3C2443 and later. + */ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H + +/* + * Let each exported clock get a unique index, which is used on DT-enabled + * platforms to lookup the clock from a clock specifier. These indices are + * therefore considered an ABI and so must not be changed. This implies + * that new clocks should be added either in free spaces between clock groups + * or at the end. + */ + +/* Core clocks. */ +#define MSYSCLK 1 +#define ESYSCLK 2 +#define ARMDIV 3 +#define ARMCLK 4 +#define HCLK 5 +#define PCLK 6 +#define MPLL 7 +#define EPLL 8 + +/* Special clocks */ +#define SCLK_HSSPI0 16 +#define SCLK_FIMD 17 +#define SCLK_I2S0 18 +#define SCLK_I2S1 19 +#define SCLK_HSMMC1 20 +#define SCLK_HSMMC_EXT 21 +#define SCLK_CAM 22 +#define SCLK_UART 23 +#define SCLK_USBH 24 + +/* Muxes */ +#define MUX_HSSPI0 32 +#define MUX_HSSPI1 33 +#define MUX_HSMMC0 34 +#define MUX_HSMMC1 35 + +/* hclk-gates */ +#define HCLK_DMA0 48 +#define HCLK_DMA1 49 +#define HCLK_DMA2 50 +#define HCLK_DMA3 51 +#define HCLK_DMA4 52 +#define HCLK_DMA5 53 +#define HCLK_DMA6 54 +#define HCLK_DMA7 55 +#define HCLK_CAM 56 +#define HCLK_LCD 57 +#define HCLK_USBH 58 +#define HCLK_USBD 59 +#define HCLK_IROM 60 +#define HCLK_HSMMC0 61 +#define HCLK_HSMMC1 62 +#define HCLK_CFC 63 +#define HCLK_SSMC 64 +#define HCLK_DRAM 65 +#define HCLK_2D 66 + +/* pclk-gates */ +#define PCLK_UART0 72 +#define PCLK_UART1 73 +#define PCLK_UART2 74 +#define PCLK_UART3 75 +#define PCLK_I2C0 76 +#define PCLK_SDI 77 +#define PCLK_SPI0 78 +#define PCLK_ADC 79 +#define PCLK_AC97 80 +#define PCLK_I2S0 81 +#define PCLK_PWM 82 +#define PCLK_WDT 83 +#define PCLK_RTC 84 +#define PCLK_GPIO 85 +#define PCLK_SPI1 86 +#define PCLK_CHIPID 87 +#define PCLK_I2C1 88 +#define PCLK_I2S1 89 +#define PCLK_PCM 90 + +/* Total number of clocks. */ +#define NR_CLKS (PCLK_PCM + 1) + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H */ diff --git a/include/dt-bindings/clock/s5pv210-audss.h b/include/dt-bindings/clock/s5pv210-audss.h new file mode 100644 index 0000000..84d62fe --- /dev/null +++ b/include/dt-bindings/clock/s5pv210-audss.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 Tomasz Figa + * + * This header provides constants for Samsung audio subsystem + * clock controller. + * + * The constants defined in this header are being used in dts + * and s5pv210 audss driver. + */ + +#ifndef _DT_BINDINGS_CLOCK_S5PV210_AUDSS_H +#define _DT_BINDINGS_CLOCK_S5PV210_AUDSS_H + +#define CLK_MOUT_AUDSS 0 +#define CLK_MOUT_I2S_A 1 + +#define CLK_DOUT_AUD_BUS 2 +#define CLK_DOUT_I2S_A 3 + +#define CLK_I2S 4 +#define CLK_HCLK_I2S 5 +#define CLK_HCLK_UART 6 +#define CLK_HCLK_HWA 7 +#define CLK_HCLK_DMA 8 +#define CLK_HCLK_BUF 9 +#define CLK_HCLK_RP 10 + +#define AUDSS_MAX_CLKS 11 + +#endif diff --git a/include/dt-bindings/clock/s5pv210.h b/include/dt-bindings/clock/s5pv210.h new file mode 100644 index 0000000..c36699c --- /dev/null +++ b/include/dt-bindings/clock/s5pv210.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Mateusz Krawczuk + * + * Device Tree binding constants for Samsung S5PV210 clock controller. + */ + +#ifndef _DT_BINDINGS_CLOCK_S5PV210_H +#define _DT_BINDINGS_CLOCK_S5PV210_H + +/* Core clocks. */ +#define FIN_PLL 1 +#define FOUT_APLL 2 +#define FOUT_MPLL 3 +#define FOUT_EPLL 4 +#define FOUT_VPLL 5 + +/* Muxes. */ +#define MOUT_FLASH 6 +#define MOUT_PSYS 7 +#define MOUT_DSYS 8 +#define MOUT_MSYS 9 +#define MOUT_VPLL 10 +#define MOUT_EPLL 11 +#define MOUT_MPLL 12 +#define MOUT_APLL 13 +#define MOUT_VPLLSRC 14 +#define MOUT_CSIS 15 +#define MOUT_FIMD 16 +#define MOUT_CAM1 17 +#define MOUT_CAM0 18 +#define MOUT_DAC 19 +#define MOUT_MIXER 20 +#define MOUT_HDMI 21 +#define MOUT_G2D 22 +#define MOUT_MFC 23 +#define MOUT_G3D 24 +#define MOUT_FIMC2 25 +#define MOUT_FIMC1 26 +#define MOUT_FIMC0 27 +#define MOUT_UART3 28 +#define MOUT_UART2 29 +#define MOUT_UART1 30 +#define MOUT_UART0 31 +#define MOUT_MMC3 32 +#define MOUT_MMC2 33 +#define MOUT_MMC1 34 +#define MOUT_MMC0 35 +#define MOUT_PWM 36 +#define MOUT_SPI0 37 +#define MOUT_SPI1 38 +#define MOUT_DMC0 39 +#define MOUT_PWI 40 +#define MOUT_HPM 41 +#define MOUT_SPDIF 42 +#define MOUT_AUDIO2 43 +#define MOUT_AUDIO1 44 +#define MOUT_AUDIO0 45 + +/* Dividers. */ +#define DOUT_PCLKP 46 +#define DOUT_HCLKP 47 +#define DOUT_PCLKD 48 +#define DOUT_HCLKD 49 +#define DOUT_PCLKM 50 +#define DOUT_HCLKM 51 +#define DOUT_A2M 52 +#define DOUT_APLL 53 +#define DOUT_CSIS 54 +#define DOUT_FIMD 55 +#define DOUT_CAM1 56 +#define DOUT_CAM0 57 +#define DOUT_TBLK 58 +#define DOUT_G2D 59 +#define DOUT_MFC 60 +#define DOUT_G3D 61 +#define DOUT_FIMC2 62 +#define DOUT_FIMC1 63 +#define DOUT_FIMC0 64 +#define DOUT_UART3 65 +#define DOUT_UART2 66 +#define DOUT_UART1 67 +#define DOUT_UART0 68 +#define DOUT_MMC3 69 +#define DOUT_MMC2 70 +#define DOUT_MMC1 71 +#define DOUT_MMC0 72 +#define DOUT_PWM 73 +#define DOUT_SPI1 74 +#define DOUT_SPI0 75 +#define DOUT_DMC0 76 +#define DOUT_PWI 77 +#define DOUT_HPM 78 +#define DOUT_COPY 79 +#define DOUT_FLASH 80 +#define DOUT_AUDIO2 81 +#define DOUT_AUDIO1 82 +#define DOUT_AUDIO0 83 +#define DOUT_DPM 84 +#define DOUT_DVSEM 85 + +/* Gates */ +#define SCLK_FIMC 86 +#define CLK_CSIS 87 +#define CLK_ROTATOR 88 +#define CLK_FIMC2 89 +#define CLK_FIMC1 90 +#define CLK_FIMC0 91 +#define CLK_MFC 92 +#define CLK_G2D 93 +#define CLK_G3D 94 +#define CLK_IMEM 95 +#define CLK_PDMA1 96 +#define CLK_PDMA0 97 +#define CLK_MDMA 98 +#define CLK_DMC1 99 +#define CLK_DMC0 100 +#define CLK_NFCON 101 +#define CLK_SROMC 102 +#define CLK_CFCON 103 +#define CLK_NANDXL 104 +#define CLK_USB_HOST 105 +#define CLK_USB_OTG 106 +#define CLK_HDMI 107 +#define CLK_TVENC 108 +#define CLK_MIXER 109 +#define CLK_VP 110 +#define CLK_DSIM 111 +#define CLK_FIMD 112 +#define CLK_TZIC3 113 +#define CLK_TZIC2 114 +#define CLK_TZIC1 115 +#define CLK_TZIC0 116 +#define CLK_VIC3 117 +#define CLK_VIC2 118 +#define CLK_VIC1 119 +#define CLK_VIC0 120 +#define CLK_TSI 121 +#define CLK_HSMMC3 122 +#define CLK_HSMMC2 123 +#define CLK_HSMMC1 124 +#define CLK_HSMMC0 125 +#define CLK_JTAG 126 +#define CLK_MODEMIF 127 +#define CLK_CORESIGHT 128 +#define CLK_SDM 129 +#define CLK_SECSS 130 +#define CLK_PCM2 131 +#define CLK_PCM1 132 +#define CLK_PCM0 133 +#define CLK_SYSCON 134 +#define CLK_GPIO 135 +#define CLK_TSADC 136 +#define CLK_PWM 137 +#define CLK_WDT 138 +#define CLK_KEYIF 139 +#define CLK_UART3 140 +#define CLK_UART2 141 +#define CLK_UART1 142 +#define CLK_UART0 143 +#define CLK_SYSTIMER 144 +#define CLK_RTC 145 +#define CLK_SPI1 146 +#define CLK_SPI0 147 +#define CLK_I2C_HDMI_PHY 148 +#define CLK_I2C1 149 +#define CLK_I2C2 150 +#define CLK_I2C0 151 +#define CLK_I2S1 152 +#define CLK_I2S2 153 +#define CLK_I2S0 154 +#define CLK_AC97 155 +#define CLK_SPDIF 156 +#define CLK_TZPC3 157 +#define CLK_TZPC2 158 +#define CLK_TZPC1 159 +#define CLK_TZPC0 160 +#define CLK_SECKEY 161 +#define CLK_IEM_APC 162 +#define CLK_IEM_IEC 163 +#define CLK_CHIPID 164 +#define CLK_JPEG 163 + +/* Special clocks*/ +#define SCLK_PWI 164 +#define SCLK_SPDIF 165 +#define SCLK_AUDIO2 166 +#define SCLK_AUDIO1 167 +#define SCLK_AUDIO0 168 +#define SCLK_PWM 169 +#define SCLK_SPI1 170 +#define SCLK_SPI0 171 +#define SCLK_UART3 172 +#define SCLK_UART2 173 +#define SCLK_UART1 174 +#define SCLK_UART0 175 +#define SCLK_MMC3 176 +#define SCLK_MMC2 177 +#define SCLK_MMC1 178 +#define SCLK_MMC0 179 +#define SCLK_FINVPLL 180 +#define SCLK_CSIS 181 +#define SCLK_FIMD 182 +#define SCLK_CAM1 183 +#define SCLK_CAM0 184 +#define SCLK_DAC 185 +#define SCLK_MIXER 186 +#define SCLK_HDMI 187 +#define SCLK_FIMC2 188 +#define SCLK_FIMC1 189 +#define SCLK_FIMC0 190 +#define SCLK_HDMI27M 191 +#define SCLK_HDMIPHY 192 +#define SCLK_USBPHY0 193 +#define SCLK_USBPHY1 194 + +/* S5P6442-specific clocks */ +#define MOUT_D0SYNC 195 +#define MOUT_D1SYNC 196 +#define DOUT_MIXER 197 +#define CLK_ETB 198 +#define CLK_ETM 199 + +/* CLKOUT */ +#define FOUT_APLL_CLKOUT 200 +#define FOUT_MPLL_CLKOUT 201 +#define DOUT_APLL_CLKOUT 202 +#define MOUT_CLKSEL 203 +#define DOUT_CLKOUT 204 +#define MOUT_CLKOUT 205 + +/* Total number of clocks. */ +#define NR_CLKS 206 + +#endif /* _DT_BINDINGS_CLOCK_S5PV210_H */ diff --git a/include/dt-bindings/clock/samsung,s2mps11.h b/include/dt-bindings/clock/samsung,s2mps11.h new file mode 100644 index 0000000..5ece35d --- /dev/null +++ b/include/dt-bindings/clock/samsung,s2mps11.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015 Markus Reichl + * + * Device Tree binding constants clocks for the Samsung S2MPS11 PMIC. + */ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H + +/* Fixed rate clocks. */ + +#define S2MPS11_CLK_AP 0 +#define S2MPS11_CLK_CP 1 +#define S2MPS11_CLK_BT 2 + +/* Total number of clocks. */ +#define S2MPS11_CLKS_NUM (S2MPS11_CLK_BT + 1) + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H */ diff --git a/include/dt-bindings/clock/samsung,s3c64xx-clock.h b/include/dt-bindings/clock/samsung,s3c64xx-clock.h new file mode 100644 index 0000000..19d233f --- /dev/null +++ b/include/dt-bindings/clock/samsung,s3c64xx-clock.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013 Tomasz Figa + * + * Device Tree binding constants for Samsung S3C64xx clock controller. + */ + +#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H +#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H + +/* + * Let each exported clock get a unique index, which is used on DT-enabled + * platforms to lookup the clock from a clock specifier. These indices are + * therefore considered an ABI and so must not be changed. This implies + * that new clocks should be added either in free spaces between clock groups + * or at the end. + */ + +/* Core clocks. */ +#define CLK27M 1 +#define CLK48M 2 +#define FOUT_APLL 3 +#define FOUT_MPLL 4 +#define FOUT_EPLL 5 +#define ARMCLK 6 +#define HCLKX2 7 +#define HCLK 8 +#define PCLK 9 + +/* HCLK bus clocks. */ +#define HCLK_3DSE 16 +#define HCLK_UHOST 17 +#define HCLK_SECUR 18 +#define HCLK_SDMA1 19 +#define HCLK_SDMA0 20 +#define HCLK_IROM 21 +#define HCLK_DDR1 22 +#define HCLK_MEM1 23 +#define HCLK_MEM0 24 +#define HCLK_USB 25 +#define HCLK_HSMMC2 26 +#define HCLK_HSMMC1 27 +#define HCLK_HSMMC0 28 +#define HCLK_MDP 29 +#define HCLK_DHOST 30 +#define HCLK_IHOST 31 +#define HCLK_DMA1 32 +#define HCLK_DMA0 33 +#define HCLK_JPEG 34 +#define HCLK_CAMIF 35 +#define HCLK_SCALER 36 +#define HCLK_2D 37 +#define HCLK_TV 38 +#define HCLK_POST0 39 +#define HCLK_ROT 40 +#define HCLK_LCD 41 +#define HCLK_TZIC 42 +#define HCLK_INTC 43 +#define HCLK_MFC 44 +#define HCLK_DDR0 45 + +/* PCLK bus clocks. */ +#define PCLK_IIC1 48 +#define PCLK_IIS2 49 +#define PCLK_SKEY 50 +#define PCLK_CHIPID 51 +#define PCLK_SPI1 52 +#define PCLK_SPI0 53 +#define PCLK_HSIRX 54 +#define PCLK_HSITX 55 +#define PCLK_GPIO 56 +#define PCLK_IIC0 57 +#define PCLK_IIS1 58 +#define PCLK_IIS0 59 +#define PCLK_AC97 60 +#define PCLK_TZPC 61 +#define PCLK_TSADC 62 +#define PCLK_KEYPAD 63 +#define PCLK_IRDA 64 +#define PCLK_PCM1 65 +#define PCLK_PCM0 66 +#define PCLK_PWM 67 +#define PCLK_RTC 68 +#define PCLK_WDT 69 +#define PCLK_UART3 70 +#define PCLK_UART2 71 +#define PCLK_UART1 72 +#define PCLK_UART0 73 +#define PCLK_MFC 74 + +/* Special clocks. */ +#define SCLK_UHOST 80 +#define SCLK_MMC2_48 81 +#define SCLK_MMC1_48 82 +#define SCLK_MMC0_48 83 +#define SCLK_MMC2 84 +#define SCLK_MMC1 85 +#define SCLK_MMC0 86 +#define SCLK_SPI1_48 87 +#define SCLK_SPI0_48 88 +#define SCLK_SPI1 89 +#define SCLK_SPI0 90 +#define SCLK_DAC27 91 +#define SCLK_TV27 92 +#define SCLK_SCALER27 93 +#define SCLK_SCALER 94 +#define SCLK_LCD27 95 +#define SCLK_LCD 96 +#define SCLK_FIMC 97 +#define SCLK_POST0_27 98 +#define SCLK_AUDIO2 99 +#define SCLK_POST0 100 +#define SCLK_AUDIO1 101 +#define SCLK_AUDIO0 102 +#define SCLK_SECUR 103 +#define SCLK_IRDA 104 +#define SCLK_UART 105 +#define SCLK_MFC 106 +#define SCLK_CAM 107 +#define SCLK_JPEG 108 +#define SCLK_ONENAND 109 + +/* MEM0 bus clocks - S3C6410-specific. */ +#define MEM0_CFCON 112 +#define MEM0_ONENAND1 113 +#define MEM0_ONENAND0 114 +#define MEM0_NFCON 115 +#define MEM0_SROM 116 + +/* Muxes. */ +#define MOUT_APLL 128 +#define MOUT_MPLL 129 +#define MOUT_EPLL 130 +#define MOUT_MFC 131 +#define MOUT_AUDIO0 132 +#define MOUT_AUDIO1 133 +#define MOUT_UART 134 +#define MOUT_SPI0 135 +#define MOUT_SPI1 136 +#define MOUT_MMC0 137 +#define MOUT_MMC1 138 +#define MOUT_MMC2 139 +#define MOUT_UHOST 140 +#define MOUT_IRDA 141 +#define MOUT_LCD 142 +#define MOUT_SCALER 143 +#define MOUT_DAC27 144 +#define MOUT_TV27 145 +#define MOUT_AUDIO2 146 + +/* Dividers. */ +#define DOUT_MPLL 160 +#define DOUT_SECUR 161 +#define DOUT_CAM 162 +#define DOUT_JPEG 163 +#define DOUT_MFC 164 +#define DOUT_MMC0 165 +#define DOUT_MMC1 166 +#define DOUT_MMC2 167 +#define DOUT_LCD 168 +#define DOUT_SCALER 169 +#define DOUT_UHOST 170 +#define DOUT_SPI0 171 +#define DOUT_SPI1 172 +#define DOUT_AUDIO0 173 +#define DOUT_AUDIO1 174 +#define DOUT_UART 175 +#define DOUT_IRDA 176 +#define DOUT_FIMC 177 +#define DOUT_AUDIO2 178 + +/* Total number of clocks. */ +#define NR_CLKS (DOUT_AUDIO2 + 1) + +#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H */ diff --git a/include/dt-bindings/clock/sh73a0-clock.h b/include/dt-bindings/clock/sh73a0-clock.h new file mode 100644 index 0000000..5b544ad --- /dev/null +++ b/include/dt-bindings/clock/sh73a0-clock.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2014 Ulrich Hecht + */ + +#ifndef __DT_BINDINGS_CLOCK_SH73A0_H__ +#define __DT_BINDINGS_CLOCK_SH73A0_H__ + +/* CPG */ +#define SH73A0_CLK_MAIN 0 +#define SH73A0_CLK_PLL0 1 +#define SH73A0_CLK_PLL1 2 +#define SH73A0_CLK_PLL2 3 +#define SH73A0_CLK_PLL3 4 +#define SH73A0_CLK_DSI0PHY 5 +#define SH73A0_CLK_DSI1PHY 6 +#define SH73A0_CLK_ZG 7 +#define SH73A0_CLK_M3 8 +#define SH73A0_CLK_B 9 +#define SH73A0_CLK_M1 10 +#define SH73A0_CLK_M2 11 +#define SH73A0_CLK_Z 12 +#define SH73A0_CLK_ZX 13 +#define SH73A0_CLK_HP 14 + +/* MSTP0 */ +#define SH73A0_CLK_IIC2 1 +#define SH73A0_CLK_MSIOF0 0 + +/* MSTP1 */ +#define SH73A0_CLK_CEU1 29 +#define SH73A0_CLK_CSI2_RX1 28 +#define SH73A0_CLK_CEU0 27 +#define SH73A0_CLK_CSI2_RX0 26 +#define SH73A0_CLK_TMU0 25 +#define SH73A0_CLK_DSITX0 18 +#define SH73A0_CLK_IIC0 16 +#define SH73A0_CLK_SGX 12 +#define SH73A0_CLK_LCDC0 0 + +/* MSTP2 */ +#define SH73A0_CLK_SCIFA7 19 +#define SH73A0_CLK_SY_DMAC 18 +#define SH73A0_CLK_MP_DMAC 17 +#define SH73A0_CLK_MSIOF3 15 +#define SH73A0_CLK_MSIOF1 8 +#define SH73A0_CLK_SCIFA5 7 +#define SH73A0_CLK_SCIFB 6 +#define SH73A0_CLK_MSIOF2 5 +#define SH73A0_CLK_SCIFA0 4 +#define SH73A0_CLK_SCIFA1 3 +#define SH73A0_CLK_SCIFA2 2 +#define SH73A0_CLK_SCIFA3 1 +#define SH73A0_CLK_SCIFA4 0 + +/* MSTP3 */ +#define SH73A0_CLK_SCIFA6 31 +#define SH73A0_CLK_CMT1 29 +#define SH73A0_CLK_FSI 28 +#define SH73A0_CLK_IRDA 25 +#define SH73A0_CLK_IIC1 23 +#define SH73A0_CLK_USB 22 +#define SH73A0_CLK_FLCTL 15 +#define SH73A0_CLK_SDHI0 14 +#define SH73A0_CLK_SDHI1 13 +#define SH73A0_CLK_MMCIF0 12 +#define SH73A0_CLK_SDHI2 11 +#define SH73A0_CLK_TPU0 4 +#define SH73A0_CLK_TPU1 3 +#define SH73A0_CLK_TPU2 2 +#define SH73A0_CLK_TPU3 1 +#define SH73A0_CLK_TPU4 0 + +/* MSTP4 */ +#define SH73A0_CLK_IIC3 11 +#define SH73A0_CLK_IIC4 10 +#define SH73A0_CLK_KEYSC 3 + +/* MSTP5 */ +#define SH73A0_CLK_INTCA0 8 + +#endif diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h new file mode 100644 index 0000000..3b21d05 --- /dev/null +++ b/include/dt-bindings/clock/sifive-fu540-prci.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (C) 2018-2019 SiFive, Inc. + * Wesley Terpstra + * Paul Walmsley + */ + +#ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H +#define __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H + +/* Clock indexes for use by Device Tree data and the PRCI driver */ + +#define PRCI_CLK_COREPLL 0 +#define PRCI_CLK_DDRPLL 1 +#define PRCI_CLK_GEMGXLPLL 2 +#define PRCI_CLK_TLCLK 3 + +#endif diff --git a/include/dt-bindings/clock/sprd,sc9860-clk.h b/include/dt-bindings/clock/sprd,sc9860-clk.h new file mode 100644 index 0000000..f2ab463 --- /dev/null +++ b/include/dt-bindings/clock/sprd,sc9860-clk.h @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +// +// Spreadtrum SC9860 platform clocks +// +// Copyright (C) 2017, Spreadtrum Communications Inc. + +#ifndef _DT_BINDINGS_CLK_SC9860_H_ +#define _DT_BINDINGS_CLK_SC9860_H_ + +#define CLK_FAC_4M 0 +#define CLK_FAC_2M 1 +#define CLK_FAC_1M 2 +#define CLK_FAC_250K 3 +#define CLK_FAC_RPLL0_26M 4 +#define CLK_FAC_RPLL1_26M 5 +#define CLK_FAC_RCO25M 6 +#define CLK_FAC_RCO4M 7 +#define CLK_FAC_RCO2M 8 +#define CLK_FAC_3K2 9 +#define CLK_FAC_1K 10 +#define CLK_MPLL0_GATE 11 +#define CLK_MPLL1_GATE 12 +#define CLK_DPLL0_GATE 13 +#define CLK_DPLL1_GATE 14 +#define CLK_LTEPLL0_GATE 15 +#define CLK_TWPLL_GATE 16 +#define CLK_LTEPLL1_GATE 17 +#define CLK_RPLL0_GATE 18 +#define CLK_RPLL1_GATE 19 +#define CLK_CPPLL_GATE 20 +#define CLK_GPLL_GATE 21 +#define CLK_PMU_GATE_NUM (CLK_GPLL_GATE + 1) + +#define CLK_MPLL0 0 +#define CLK_MPLL1 1 +#define CLK_DPLL0 2 +#define CLK_DPLL1 3 +#define CLK_RPLL0 4 +#define CLK_RPLL1 5 +#define CLK_TWPLL 6 +#define CLK_LTEPLL0 7 +#define CLK_LTEPLL1 8 +#define CLK_GPLL 9 +#define CLK_CPPLL 10 +#define CLK_GPLL_42M5 11 +#define CLK_TWPLL_768M 12 +#define CLK_TWPLL_384M 13 +#define CLK_TWPLL_192M 14 +#define CLK_TWPLL_96M 15 +#define CLK_TWPLL_48M 16 +#define CLK_TWPLL_24M 17 +#define CLK_TWPLL_12M 18 +#define CLK_TWPLL_512M 19 +#define CLK_TWPLL_256M 20 +#define CLK_TWPLL_128M 21 +#define CLK_TWPLL_64M 22 +#define CLK_TWPLL_307M2 23 +#define CLK_TWPLL_153M6 24 +#define CLK_TWPLL_76M8 25 +#define CLK_TWPLL_51M2 26 +#define CLK_TWPLL_38M4 27 +#define CLK_TWPLL_19M2 28 +#define CLK_L0_614M4 29 +#define CLK_L0_409M6 30 +#define CLK_L0_38M 31 +#define CLK_L1_38M 32 +#define CLK_RPLL0_192M 33 +#define CLK_RPLL0_96M 34 +#define CLK_RPLL0_48M 35 +#define CLK_RPLL1_468M 36 +#define CLK_RPLL1_192M 37 +#define CLK_RPLL1_96M 38 +#define CLK_RPLL1_64M 39 +#define CLK_RPLL1_48M 40 +#define CLK_DPLL0_50M 41 +#define CLK_DPLL1_50M 42 +#define CLK_CPPLL_50M 43 +#define CLK_M0_39M 44 +#define CLK_M1_63M 45 +#define CLK_PLL_NUM (CLK_M1_63M + 1) + + +#define CLK_AP_APB 0 +#define CLK_AP_USB3 1 +#define CLK_UART0 2 +#define CLK_UART1 3 +#define CLK_UART2 4 +#define CLK_UART3 5 +#define CLK_UART4 6 +#define CLK_I2C0 7 +#define CLK_I2C1 8 +#define CLK_I2C2 9 +#define CLK_I2C3 10 +#define CLK_I2C4 11 +#define CLK_I2C5 12 +#define CLK_SPI0 13 +#define CLK_SPI1 14 +#define CLK_SPI2 15 +#define CLK_SPI3 16 +#define CLK_IIS0 17 +#define CLK_IIS1 18 +#define CLK_IIS2 19 +#define CLK_IIS3 20 +#define CLK_AP_CLK_NUM (CLK_IIS3 + 1) + +#define CLK_AON_APB 0 +#define CLK_AUX0 1 +#define CLK_AUX1 2 +#define CLK_AUX2 3 +#define CLK_PROBE 4 +#define CLK_SP_AHB 5 +#define CLK_CCI 6 +#define CLK_GIC 7 +#define CLK_CSSYS 8 +#define CLK_SDIO0_2X 9 +#define CLK_SDIO1_2X 10 +#define CLK_SDIO2_2X 11 +#define CLK_EMMC_2X 12 +#define CLK_SDIO0_1X 13 +#define CLK_SDIO1_1X 14 +#define CLK_SDIO2_1X 15 +#define CLK_EMMC_1X 16 +#define CLK_ADI 17 +#define CLK_PWM0 18 +#define CLK_PWM1 19 +#define CLK_PWM2 20 +#define CLK_PWM3 21 +#define CLK_EFUSE 22 +#define CLK_CM3_UART0 23 +#define CLK_CM3_UART1 24 +#define CLK_THM 25 +#define CLK_CM3_I2C0 26 +#define CLK_CM3_I2C1 27 +#define CLK_CM4_SPI 28 +#define CLK_AON_I2C 29 +#define CLK_AVS 30 +#define CLK_CA53_DAP 31 +#define CLK_CA53_TS 32 +#define CLK_DJTAG_TCK 33 +#define CLK_PMU 34 +#define CLK_PMU_26M 35 +#define CLK_DEBOUNCE 36 +#define CLK_OTG2_REF 37 +#define CLK_USB3_REF 38 +#define CLK_AP_AXI 39 +#define CLK_AON_PREDIV_NUM (CLK_AP_AXI + 1) + +#define CLK_USB3_EB 0 +#define CLK_USB3_SUSPEND_EB 1 +#define CLK_USB3_REF_EB 2 +#define CLK_DMA_EB 3 +#define CLK_SDIO0_EB 4 +#define CLK_SDIO1_EB 5 +#define CLK_SDIO2_EB 6 +#define CLK_EMMC_EB 7 +#define CLK_ROM_EB 8 +#define CLK_BUSMON_EB 9 +#define CLK_CC63S_EB 10 +#define CLK_CC63P_EB 11 +#define CLK_CE0_EB 12 +#define CLK_CE1_EB 13 +#define CLK_APAHB_GATE_NUM (CLK_CE1_EB + 1) + +#define CLK_AVS_LIT_EB 0 +#define CLK_AVS_BIG_EB 1 +#define CLK_AP_INTC5_EB 2 +#define CLK_GPIO_EB 3 +#define CLK_PWM0_EB 4 +#define CLK_PWM1_EB 5 +#define CLK_PWM2_EB 6 +#define CLK_PWM3_EB 7 +#define CLK_KPD_EB 8 +#define CLK_AON_SYS_EB 9 +#define CLK_AP_SYS_EB 10 +#define CLK_AON_TMR_EB 11 +#define CLK_AP_TMR0_EB 12 +#define CLK_EFUSE_EB 13 +#define CLK_EIC_EB 14 +#define CLK_PUB1_REG_EB 15 +#define CLK_ADI_EB 16 +#define CLK_AP_INTC0_EB 17 +#define CLK_AP_INTC1_EB 18 +#define CLK_AP_INTC2_EB 19 +#define CLK_AP_INTC3_EB 20 +#define CLK_AP_INTC4_EB 21 +#define CLK_SPLK_EB 22 +#define CLK_MSPI_EB 23 +#define CLK_PUB0_REG_EB 24 +#define CLK_PIN_EB 25 +#define CLK_AON_CKG_EB 26 +#define CLK_GPU_EB 27 +#define CLK_APCPU_TS0_EB 28 +#define CLK_APCPU_TS1_EB 29 +#define CLK_DAP_EB 30 +#define CLK_I2C_EB 31 +#define CLK_PMU_EB 32 +#define CLK_THM_EB 33 +#define CLK_AUX0_EB 34 +#define CLK_AUX1_EB 35 +#define CLK_AUX2_EB 36 +#define CLK_PROBE_EB 37 +#define CLK_GPU0_AVS_EB 38 +#define CLK_GPU1_AVS_EB 39 +#define CLK_APCPU_WDG_EB 40 +#define CLK_AP_TMR1_EB 41 +#define CLK_AP_TMR2_EB 42 +#define CLK_DISP_EMC_EB 43 +#define CLK_ZIP_EMC_EB 44 +#define CLK_GSP_EMC_EB 45 +#define CLK_OSC_AON_EB 46 +#define CLK_LVDS_TRX_EB 47 +#define CLK_LVDS_TCXO_EB 48 +#define CLK_MDAR_EB 49 +#define CLK_RTC4M0_CAL_EB 50 +#define CLK_RCT100M_CAL_EB 51 +#define CLK_DJTAG_EB 52 +#define CLK_MBOX_EB 53 +#define CLK_AON_DMA_EB 54 +#define CLK_DBG_EMC_EB 55 +#define CLK_LVDS_PLL_DIV_EN 56 +#define CLK_DEF_EB 57 +#define CLK_AON_APB_RSV0 58 +#define CLK_ORP_JTAG_EB 59 +#define CLK_VSP_EB 60 +#define CLK_CAM_EB 61 +#define CLK_DISP_EB 62 +#define CLK_DBG_AXI_IF_EB 63 +#define CLK_SDIO0_2X_EN 64 +#define CLK_SDIO1_2X_EN 65 +#define CLK_SDIO2_2X_EN 66 +#define CLK_EMMC_2X_EN 67 +#define CLK_ARCH_RTC_EB 68 +#define CLK_KPB_RTC_EB 69 +#define CLK_AON_SYST_RTC_EB 70 +#define CLK_AP_SYST_RTC_EB 71 +#define CLK_AON_TMR_RTC_EB 72 +#define CLK_AP_TMR0_RTC_EB 73 +#define CLK_EIC_RTC_EB 74 +#define CLK_EIC_RTCDV5_EB 75 +#define CLK_AP_WDG_RTC_EB 76 +#define CLK_AP_TMR1_RTC_EB 77 +#define CLK_AP_TMR2_RTC_EB 78 +#define CLK_DCXO_TMR_RTC_EB 79 +#define CLK_BB_CAL_RTC_EB 80 +#define CLK_AVS_BIG_RTC_EB 81 +#define CLK_AVS_LIT_RTC_EB 82 +#define CLK_AVS_GPU0_RTC_EB 83 +#define CLK_AVS_GPU1_RTC_EB 84 +#define CLK_GPU_TS_EB 85 +#define CLK_RTCDV10_EB 86 +#define CLK_AON_GATE_NUM (CLK_RTCDV10_EB + 1) + +#define CLK_LIT_MCU 0 +#define CLK_BIG_MCU 1 +#define CLK_AONSECURE_NUM (CLK_BIG_MCU + 1) + +#define CLK_AGCP_IIS0_EB 0 +#define CLK_AGCP_IIS1_EB 1 +#define CLK_AGCP_IIS2_EB 2 +#define CLK_AGCP_IIS3_EB 3 +#define CLK_AGCP_UART_EB 4 +#define CLK_AGCP_DMACP_EB 5 +#define CLK_AGCP_DMAAP_EB 6 +#define CLK_AGCP_ARC48K_EB 7 +#define CLK_AGCP_SRC44P1K_EB 8 +#define CLK_AGCP_MCDT_EB 9 +#define CLK_AGCP_VBCIFD_EB 10 +#define CLK_AGCP_VBC_EB 11 +#define CLK_AGCP_SPINLOCK_EB 12 +#define CLK_AGCP_ICU_EB 13 +#define CLK_AGCP_AP_ASHB_EB 14 +#define CLK_AGCP_CP_ASHB_EB 15 +#define CLK_AGCP_AUD_EB 16 +#define CLK_AGCP_AUDIF_EB 17 +#define CLK_AGCP_GATE_NUM (CLK_AGCP_AUDIF_EB + 1) + +#define CLK_GPU 0 +#define CLK_GPU_NUM (CLK_GPU + 1) + +#define CLK_AHB_VSP 0 +#define CLK_VSP 1 +#define CLK_VSP_ENC 2 +#define CLK_VPP 3 +#define CLK_VSP_26M 4 +#define CLK_VSP_NUM (CLK_VSP_26M + 1) + +#define CLK_VSP_DEC_EB 0 +#define CLK_VSP_CKG_EB 1 +#define CLK_VSP_MMU_EB 2 +#define CLK_VSP_ENC_EB 3 +#define CLK_VPP_EB 4 +#define CLK_VSP_26M_EB 5 +#define CLK_VSP_AXI_GATE 6 +#define CLK_VSP_ENC_GATE 7 +#define CLK_VPP_AXI_GATE 8 +#define CLK_VSP_BM_GATE 9 +#define CLK_VSP_ENC_BM_GATE 10 +#define CLK_VPP_BM_GATE 11 +#define CLK_VSP_GATE_NUM (CLK_VPP_BM_GATE + 1) + +#define CLK_AHB_CAM 0 +#define CLK_SENSOR0 1 +#define CLK_SENSOR1 2 +#define CLK_SENSOR2 3 +#define CLK_MIPI_CSI0_EB 4 +#define CLK_MIPI_CSI1_EB 5 +#define CLK_CAM_NUM (CLK_MIPI_CSI1_EB + 1) + +#define CLK_DCAM0_EB 0 +#define CLK_DCAM1_EB 1 +#define CLK_ISP0_EB 2 +#define CLK_CSI0_EB 3 +#define CLK_CSI1_EB 4 +#define CLK_JPG0_EB 5 +#define CLK_JPG1_EB 6 +#define CLK_CAM_CKG_EB 7 +#define CLK_CAM_MMU_EB 8 +#define CLK_ISP1_EB 9 +#define CLK_CPP_EB 10 +#define CLK_MMU_PF_EB 11 +#define CLK_ISP2_EB 12 +#define CLK_DCAM2ISP_IF_EB 13 +#define CLK_ISP2DCAM_IF_EB 14 +#define CLK_ISP_LCLK_EB 15 +#define CLK_ISP_ICLK_EB 16 +#define CLK_ISP_MCLK_EB 17 +#define CLK_ISP_PCLK_EB 18 +#define CLK_ISP_ISP2DCAM_EB 19 +#define CLK_DCAM0_IF_EB 20 +#define CLK_CLK26M_IF_EB 21 +#define CLK_CPHY0_GATE 22 +#define CLK_MIPI_CSI0_GATE 23 +#define CLK_CPHY1_GATE 24 +#define CLK_MIPI_CSI1 25 +#define CLK_DCAM0_AXI_GATE 26 +#define CLK_DCAM1_AXI_GATE 27 +#define CLK_SENSOR0_GATE 28 +#define CLK_SENSOR1_GATE 29 +#define CLK_JPG0_AXI_GATE 30 +#define CLK_GPG1_AXI_GATE 31 +#define CLK_ISP0_AXI_GATE 32 +#define CLK_ISP1_AXI_GATE 33 +#define CLK_ISP2_AXI_GATE 34 +#define CLK_CPP_AXI_GATE 35 +#define CLK_D0_IF_AXI_GATE 36 +#define CLK_D2I_IF_AXI_GATE 37 +#define CLK_I2D_IF_AXI_GATE 38 +#define CLK_SPARE_AXI_GATE 39 +#define CLK_SENSOR2_GATE 40 +#define CLK_D0IF_IN_D_EN 41 +#define CLK_D1IF_IN_D_EN 42 +#define CLK_D0IF_IN_D2I_EN 43 +#define CLK_D1IF_IN_D2I_EN 44 +#define CLK_IA_IN_D2I_EN 45 +#define CLK_IB_IN_D2I_EN 46 +#define CLK_IC_IN_D2I_EN 47 +#define CLK_IA_IN_I_EN 48 +#define CLK_IB_IN_I_EN 49 +#define CLK_IC_IN_I_EN 50 +#define CLK_CAM_GATE_NUM (CLK_IC_IN_I_EN + 1) + +#define CLK_AHB_DISP 0 +#define CLK_DISPC0_DPI 1 +#define CLK_DISPC1_DPI 2 +#define CLK_DISP_NUM (CLK_DISPC1_DPI + 1) + +#define CLK_DISPC0_EB 0 +#define CLK_DISPC1_EB 1 +#define CLK_DISPC_MMU_EB 2 +#define CLK_GSP0_EB 3 +#define CLK_GSP1_EB 4 +#define CLK_GSP0_MMU_EB 5 +#define CLK_GSP1_MMU_EB 6 +#define CLK_DSI0_EB 7 +#define CLK_DSI1_EB 8 +#define CLK_DISP_CKG_EB 9 +#define CLK_DISP_GPU_EB 10 +#define CLK_GPU_MTX_EB 11 +#define CLK_GSP_MTX_EB 12 +#define CLK_TMC_MTX_EB 13 +#define CLK_DISPC_MTX_EB 14 +#define CLK_DPHY0_GATE 15 +#define CLK_DPHY1_GATE 16 +#define CLK_GSP0_A_GATE 17 +#define CLK_GSP1_A_GATE 18 +#define CLK_GSP0_F_GATE 19 +#define CLK_GSP1_F_GATE 20 +#define CLK_D_MTX_F_GATE 21 +#define CLK_D_MTX_A_GATE 22 +#define CLK_D_NOC_F_GATE 23 +#define CLK_D_NOC_A_GATE 24 +#define CLK_GSP_MTX_F_GATE 25 +#define CLK_GSP_MTX_A_GATE 26 +#define CLK_GSP_NOC_F_GATE 27 +#define CLK_GSP_NOC_A_GATE 28 +#define CLK_DISPM0IDLE_GATE 29 +#define CLK_GSPM0IDLE_GATE 30 +#define CLK_DISP_GATE_NUM (CLK_GSPM0IDLE_GATE + 1) + +#define CLK_SIM0_EB 0 +#define CLK_IIS0_EB 1 +#define CLK_IIS1_EB 2 +#define CLK_IIS2_EB 3 +#define CLK_IIS3_EB 4 +#define CLK_SPI0_EB 5 +#define CLK_SPI1_EB 6 +#define CLK_SPI2_EB 7 +#define CLK_I2C0_EB 8 +#define CLK_I2C1_EB 9 +#define CLK_I2C2_EB 10 +#define CLK_I2C3_EB 11 +#define CLK_I2C4_EB 12 +#define CLK_I2C5_EB 13 +#define CLK_UART0_EB 14 +#define CLK_UART1_EB 15 +#define CLK_UART2_EB 16 +#define CLK_UART3_EB 17 +#define CLK_UART4_EB 18 +#define CLK_AP_CKG_EB 19 +#define CLK_SPI3_EB 20 +#define CLK_APAPB_GATE_NUM (CLK_SPI3_EB + 1) + +#endif /* _DT_BINDINGS_CLK_SC9860_H_ */ diff --git a/include/dt-bindings/clock/ste-ab8500.h b/include/dt-bindings/clock/ste-ab8500.h new file mode 100644 index 0000000..fb42dd0 --- /dev/null +++ b/include/dt-bindings/clock/ste-ab8500.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __STE_CLK_AB8500_H__ +#define __STE_CLK_AB8500_H__ + +#define AB8500_SYSCLK_BUF2 0 +#define AB8500_SYSCLK_BUF3 1 +#define AB8500_SYSCLK_BUF4 2 +#define AB8500_SYSCLK_ULP 3 +#define AB8500_SYSCLK_INT 4 +#define AB8500_SYSCLK_AUDIO 5 + +#endif diff --git a/include/dt-bindings/clock/stih407-clks.h b/include/dt-bindings/clock/stih407-clks.h new file mode 100644 index 0000000..f0936c1 --- /dev/null +++ b/include/dt-bindings/clock/stih407-clks.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants clk index STMicroelectronics + * STiH407 SoC. + */ +#ifndef _DT_BINDINGS_CLK_STIH407 +#define _DT_BINDINGS_CLK_STIH407 + +/* CLOCKGEN A0 */ +#define CLK_IC_LMI0 0 +#define CLK_IC_LMI1 1 + +/* CLOCKGEN C0 */ +#define CLK_ICN_GPU 0 +#define CLK_FDMA 1 +#define CLK_NAND 2 +#define CLK_HVA 3 +#define CLK_PROC_STFE 4 +#define CLK_PROC_TP 5 +#define CLK_RX_ICN_DMU 6 +#define CLK_RX_ICN_DISP_0 6 +#define CLK_RX_ICN_DISP_1 6 +#define CLK_RX_ICN_HVA 7 +#define CLK_RX_ICN_TS 7 +#define CLK_ICN_CPU 8 +#define CLK_TX_ICN_DMU 9 +#define CLK_TX_ICN_HVA 9 +#define CLK_TX_ICN_TS 9 +#define CLK_ICN_COMPO 9 +#define CLK_MMC_0 10 +#define CLK_MMC_1 11 +#define CLK_JPEGDEC 12 +#define CLK_ICN_REG 13 +#define CLK_TRACE_A9 13 +#define CLK_PTI_STM 13 +#define CLK_EXT2F_A9 13 +#define CLK_IC_BDISP_0 14 +#define CLK_IC_BDISP_1 15 +#define CLK_PP_DMU 16 +#define CLK_VID_DMU 17 +#define CLK_DSS_LPC 18 +#define CLK_ST231_AUD_0 19 +#define CLK_ST231_GP_0 19 +#define CLK_ST231_GP_1 20 +#define CLK_ST231_DMU 21 +#define CLK_ICN_LMI 22 +#define CLK_TX_ICN_DISP_0 23 +#define CLK_TX_ICN_DISP_1 23 +#define CLK_ICN_SBC 24 +#define CLK_STFE_FRC2 25 +#define CLK_ETH_PHY 26 +#define CLK_ETH_REF_PHYCLK 27 +#define CLK_FLASH_PROMIP 28 +#define CLK_MAIN_DISP 29 +#define CLK_AUX_DISP 30 +#define CLK_COMPO_DVP 31 + +/* CLOCKGEN D0 */ +#define CLK_PCM_0 0 +#define CLK_PCM_1 1 +#define CLK_PCM_2 2 +#define CLK_SPDIFF 3 + +/* CLOCKGEN D2 */ +#define CLK_PIX_MAIN_DISP 0 +#define CLK_PIX_PIP 1 +#define CLK_PIX_GDP1 2 +#define CLK_PIX_GDP2 3 +#define CLK_PIX_GDP3 4 +#define CLK_PIX_GDP4 5 +#define CLK_PIX_AUX_DISP 6 +#define CLK_DENC 7 +#define CLK_PIX_HDDAC 8 +#define CLK_HDDAC 9 +#define CLK_SDDAC 10 +#define CLK_PIX_DVO 11 +#define CLK_DVO 12 +#define CLK_PIX_HDMI 13 +#define CLK_TMDS_HDMI 14 +#define CLK_REF_HDMIPHY 15 + +/* CLOCKGEN D3 */ +#define CLK_STFE_FRC1 0 +#define CLK_TSOUT_0 1 +#define CLK_TSOUT_1 2 +#define CLK_MCHI 3 +#define CLK_VSENS_COMPO 4 +#define CLK_FRC1_REMOTE 5 +#define CLK_LPC_0 6 +#define CLK_LPC_1 7 +#endif diff --git a/include/dt-bindings/clock/stih410-clks.h b/include/dt-bindings/clock/stih410-clks.h new file mode 100644 index 0000000..90cbe61 --- /dev/null +++ b/include/dt-bindings/clock/stih410-clks.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants clk index STMicroelectronics + * STiH410 SoC. + */ +#ifndef _DT_BINDINGS_CLK_STIH410 +#define _DT_BINDINGS_CLK_STIH410 + +#include "stih407-clks.h" + +/* STiH410 introduces new clock outputs compared to STiH407 */ + +/* CLOCKGEN C0 */ +#define CLK_TX_ICN_HADES 32 +#define CLK_RX_ICN_HADES 33 +#define CLK_ICN_REG_16 34 +#define CLK_PP_HADES 35 +#define CLK_CLUST_HADES 36 +#define CLK_HWPE_HADES 37 +#define CLK_FC_HADES 38 + +/* CLOCKGEN D0 */ +#define CLK_PCMR10_MASTER 4 +#define CLK_USB2_PHY 5 + +#endif diff --git a/include/dt-bindings/clock/stih416-clks.h b/include/dt-bindings/clock/stih416-clks.h new file mode 100644 index 0000000..7430227 --- /dev/null +++ b/include/dt-bindings/clock/stih416-clks.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants clk index STMicroelectronics + * STiH416 SoC. + */ +#ifndef _CLK_STIH416 +#define _CLK_STIH416 + +/* CLOCKGEN A0 */ +#define CLK_ICN_REG 0 +#define CLK_ETH1_PHY 4 + +/* CLOCKGEN A1 */ +#define CLK_ICN_IF_2 0 +#define CLK_GMAC0_PHY 3 + +#endif diff --git a/include/dt-bindings/clock/stih418-clks.h b/include/dt-bindings/clock/stih418-clks.h new file mode 100644 index 0000000..0e7fba0 --- /dev/null +++ b/include/dt-bindings/clock/stih418-clks.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants clk index STMicroelectronics + * STiH418 SoC. + */ +#ifndef _DT_BINDINGS_CLK_STIH418 +#define _DT_BINDINGS_CLK_STIH418 + +#include "stih410-clks.h" + +/* STiH418 introduces new clock outputs compared to STiH410 */ + +/* CLOCKGEN C0 */ +#define CLK_PROC_BDISP_0 14 +#define CLK_PROC_BDISP_1 15 +#define CLK_TX_ICN_1 23 +#define CLK_ETH_PHYREF 27 +#define CLK_PP_HEVC 35 +#define CLK_CLUST_HEVC 36 +#define CLK_HWPE_HEVC 37 +#define CLK_FC_HEVC 38 +#define CLK_PROC_MIXER 39 +#define CLK_PROC_SC 40 +#define CLK_AVSP_HEVC 41 + +/* CLOCKGEN D2 */ +#undef CLK_PIX_PIP +#undef CLK_PIX_GDP1 +#undef CLK_PIX_GDP2 +#undef CLK_PIX_GDP3 +#undef CLK_PIX_GDP4 + +#define CLK_TMDS_HDMI_DIV2 5 +#define CLK_VP9 47 +#endif diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h new file mode 100644 index 0000000..1cc89c5 --- /dev/null +++ b/include/dt-bindings/clock/stm32fx-clock.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * stm32fx-clock.h + * + * Copyright (C) 2016 STMicroelectronics + * Author: Gabriel Fernandez for STMicroelectronics. + */ + +/* + * List of clocks wich are not derived from system clock (SYSCLOCK) + * + * The index of these clocks is the secondary index of DT bindings + * (see Documentatoin/devicetree/bindings/clock/st,stm32-rcc.txt) + * + * e.g: + ; +*/ + +#ifndef _DT_BINDINGS_CLK_STMFX_H +#define _DT_BINDINGS_CLK_STMFX_H + +#define SYSTICK 0 +#define FCLK 1 +#define CLK_LSI 2 +#define CLK_LSE 3 +#define CLK_HSE_RTC 4 +#define CLK_RTC 5 +#define PLL_VCO_I2S 6 +#define PLL_VCO_SAI 7 +#define CLK_LCD 8 +#define CLK_I2S 9 +#define CLK_SAI1 10 +#define CLK_SAI2 11 +#define CLK_I2SQ_PDIV 12 +#define CLK_SAIQ_PDIV 13 +#define CLK_HSI 14 +#define CLK_SYSCLK 15 +#define CLK_F469_DSI 16 + +#define END_PRIMARY_CLK 17 + +#define CLK_HDMI_CEC 16 +#define CLK_SPDIF 17 +#define CLK_USART1 18 +#define CLK_USART2 19 +#define CLK_USART3 20 +#define CLK_UART4 21 +#define CLK_UART5 22 +#define CLK_USART6 23 +#define CLK_UART7 24 +#define CLK_UART8 25 +#define CLK_I2C1 26 +#define CLK_I2C2 27 +#define CLK_I2C3 28 +#define CLK_I2C4 29 +#define CLK_LPTIMER 30 +#define CLK_PLL_SRC 31 +#define CLK_DFSDM1 32 +#define CLK_ADFSDM1 33 +#define CLK_F769_DSI 34 +#define END_PRIMARY_CLK_F7 35 + +#endif diff --git a/include/dt-bindings/clock/stm32h7-clks.h b/include/dt-bindings/clock/stm32h7-clks.h new file mode 100644 index 0000000..6637272 --- /dev/null +++ b/include/dt-bindings/clock/stm32h7-clks.h @@ -0,0 +1,165 @@ +/* SYS, CORE AND BUS CLOCKS */ +#define SYS_D1CPRE 0 +#define HCLK 1 +#define PCLK1 2 +#define PCLK2 3 +#define PCLK3 4 +#define PCLK4 5 +#define HSI_DIV 6 +#define HSE_1M 7 +#define I2S_CKIN 8 +#define CK_DSI_PHY 9 +#define HSE_CK 10 +#define LSE_CK 11 +#define CSI_KER_DIV122 12 +#define RTC_CK 13 +#define CPU_SYSTICK 14 + +/* OSCILLATOR BANK */ +#define OSC_BANK 18 +#define HSI_CK 18 +#define HSI_KER_CK 19 +#define CSI_CK 20 +#define CSI_KER_CK 21 +#define RC48_CK 22 +#define LSI_CK 23 + +/* MCLOCK BANK */ +#define MCLK_BANK 28 +#define PER_CK 28 +#define PLLSRC 29 +#define SYS_CK 30 +#define TRACEIN_CK 31 + +/* ODF BANK */ +#define ODF_BANK 32 +#define PLL1_P 32 +#define PLL1_Q 33 +#define PLL1_R 34 +#define PLL2_P 35 +#define PLL2_Q 36 +#define PLL2_R 37 +#define PLL3_P 38 +#define PLL3_Q 39 +#define PLL3_R 40 + +/* MCO BANK */ +#define MCO_BANK 41 +#define MCO1 41 +#define MCO2 42 + +/* PERIF BANK */ +#define PERIF_BANK 50 +#define D1SRAM1_CK 50 +#define ITCM_CK 51 +#define DTCM2_CK 52 +#define DTCM1_CK 53 +#define FLITF_CK 54 +#define JPGDEC_CK 55 +#define DMA2D_CK 56 +#define MDMA_CK 57 +#define USB2ULPI_CK 58 +#define USB1ULPI_CK 59 +#define ETH1RX_CK 60 +#define ETH1TX_CK 61 +#define ETH1MAC_CK 62 +#define ART_CK 63 +#define DMA2_CK 64 +#define DMA1_CK 65 +#define D2SRAM3_CK 66 +#define D2SRAM2_CK 67 +#define D2SRAM1_CK 68 +#define HASH_CK 69 +#define CRYPT_CK 70 +#define CAMITF_CK 71 +#define BKPRAM_CK 72 +#define HSEM_CK 73 +#define BDMA_CK 74 +#define CRC_CK 75 +#define GPIOK_CK 76 +#define GPIOJ_CK 77 +#define GPIOI_CK 78 +#define GPIOH_CK 79 +#define GPIOG_CK 80 +#define GPIOF_CK 81 +#define GPIOE_CK 82 +#define GPIOD_CK 83 +#define GPIOC_CK 84 +#define GPIOB_CK 85 +#define GPIOA_CK 86 +#define WWDG1_CK 87 +#define DAC12_CK 88 +#define WWDG2_CK 89 +#define TIM14_CK 90 +#define TIM13_CK 91 +#define TIM12_CK 92 +#define TIM7_CK 93 +#define TIM6_CK 94 +#define TIM5_CK 95 +#define TIM4_CK 96 +#define TIM3_CK 97 +#define TIM2_CK 98 +#define MDIOS_CK 99 +#define OPAMP_CK 100 +#define CRS_CK 101 +#define TIM17_CK 102 +#define TIM16_CK 103 +#define TIM15_CK 104 +#define TIM8_CK 105 +#define TIM1_CK 106 +#define TMPSENS_CK 107 +#define RTCAPB_CK 108 +#define VREF_CK 109 +#define COMP12_CK 110 +#define SYSCFG_CK 111 + +/* KERNEL BANK */ +#define KERN_BANK 120 +#define SDMMC1_CK 120 +#define QUADSPI_CK 121 +#define FMC_CK 122 +#define USB2OTG_CK 123 +#define USB1OTG_CK 124 +#define ADC12_CK 125 +#define SDMMC2_CK 126 +#define RNG_CK 127 +#define ADC3_CK 128 +#define DSI_CK 129 +#define LTDC_CK 130 +#define USART8_CK 131 +#define USART7_CK 132 +#define HDMICEC_CK 133 +#define I2C3_CK 134 +#define I2C2_CK 135 +#define I2C1_CK 136 +#define UART5_CK 137 +#define UART4_CK 138 +#define USART3_CK 139 +#define USART2_CK 140 +#define SPDIFRX_CK 141 +#define SPI3_CK 142 +#define SPI2_CK 143 +#define LPTIM1_CK 144 +#define FDCAN_CK 145 +#define SWP_CK 146 +#define HRTIM_CK 147 +#define DFSDM1_CK 148 +#define SAI3_CK 149 +#define SAI2_CK 150 +#define SAI1_CK 151 +#define SPI5_CK 152 +#define SPI4_CK 153 +#define SPI1_CK 154 +#define USART6_CK 155 +#define USART1_CK 156 +#define SAI4B_CK 157 +#define SAI4A_CK 158 +#define LPTIM5_CK 159 +#define LPTIM4_CK 160 +#define LPTIM3_CK 161 +#define LPTIM2_CK 162 +#define I2C4_CK 163 +#define SPI6_CK 164 +#define LPUART1_CK 165 + +#define STM32H7_MAX_CLKS 166 diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h new file mode 100644 index 0000000..4cdaf13 --- /dev/null +++ b/include/dt-bindings/clock/stm32mp1-clks.h @@ -0,0 +1,251 @@ +/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Author: Gabriel Fernandez for STMicroelectronics. + */ + +#ifndef _DT_BINDINGS_STM32MP1_CLKS_H_ +#define _DT_BINDINGS_STM32MP1_CLKS_H_ + +/* OSCILLATOR clocks */ +#define CK_HSE 0 +#define CK_CSI 1 +#define CK_LSI 2 +#define CK_LSE 3 +#define CK_HSI 4 +#define CK_HSE_DIV2 5 + +/* Bus clocks */ +#define TIM2 6 +#define TIM3 7 +#define TIM4 8 +#define TIM5 9 +#define TIM6 10 +#define TIM7 11 +#define TIM12 12 +#define TIM13 13 +#define TIM14 14 +#define LPTIM1 15 +#define SPI2 16 +#define SPI3 17 +#define USART2 18 +#define USART3 19 +#define UART4 20 +#define UART5 21 +#define UART7 22 +#define UART8 23 +#define I2C1 24 +#define I2C2 25 +#define I2C3 26 +#define I2C5 27 +#define SPDIF 28 +#define CEC 29 +#define DAC12 30 +#define MDIO 31 +#define TIM1 32 +#define TIM8 33 +#define TIM15 34 +#define TIM16 35 +#define TIM17 36 +#define SPI1 37 +#define SPI4 38 +#define SPI5 39 +#define USART6 40 +#define SAI1 41 +#define SAI2 42 +#define SAI3 43 +#define DFSDM 44 +#define FDCAN 45 +#define LPTIM2 46 +#define LPTIM3 47 +#define LPTIM4 48 +#define LPTIM5 49 +#define SAI4 50 +#define SYSCFG 51 +#define VREF 52 +#define TMPSENS 53 +#define PMBCTRL 54 +#define HDP 55 +#define LTDC 56 +#define DSI 57 +#define IWDG2 58 +#define USBPHY 59 +#define STGENRO 60 +#define SPI6 61 +#define I2C4 62 +#define I2C6 63 +#define USART1 64 +#define RTCAPB 65 +#define TZC1 66 +#define TZPC 67 +#define IWDG1 68 +#define BSEC 69 +#define STGEN 70 +#define DMA1 71 +#define DMA2 72 +#define DMAMUX 73 +#define ADC12 74 +#define USBO 75 +#define SDMMC3 76 +#define DCMI 77 +#define CRYP2 78 +#define HASH2 79 +#define RNG2 80 +#define CRC2 81 +#define HSEM 82 +#define IPCC 83 +#define GPIOA 84 +#define GPIOB 85 +#define GPIOC 86 +#define GPIOD 87 +#define GPIOE 88 +#define GPIOF 89 +#define GPIOG 90 +#define GPIOH 91 +#define GPIOI 92 +#define GPIOJ 93 +#define GPIOK 94 +#define GPIOZ 95 +#define CRYP1 96 +#define HASH1 97 +#define RNG1 98 +#define BKPSRAM 99 +#define MDMA 100 +#define GPU 101 +#define ETHCK 102 +#define ETHTX 103 +#define ETHRX 104 +#define ETHMAC 105 +#define FMC 106 +#define QSPI 107 +#define SDMMC1 108 +#define SDMMC2 109 +#define CRC1 110 +#define USBH 111 +#define ETHSTP 112 +#define TZC2 113 + +/* Kernel clocks */ +#define SDMMC1_K 118 +#define SDMMC2_K 119 +#define SDMMC3_K 120 +#define FMC_K 121 +#define QSPI_K 122 +#define ETHCK_K 123 +#define RNG1_K 124 +#define RNG2_K 125 +#define GPU_K 126 +#define USBPHY_K 127 +#define STGEN_K 128 +#define SPDIF_K 129 +#define SPI1_K 130 +#define SPI2_K 131 +#define SPI3_K 132 +#define SPI4_K 133 +#define SPI5_K 134 +#define SPI6_K 135 +#define CEC_K 136 +#define I2C1_K 137 +#define I2C2_K 138 +#define I2C3_K 139 +#define I2C4_K 140 +#define I2C5_K 141 +#define I2C6_K 142 +#define LPTIM1_K 143 +#define LPTIM2_K 144 +#define LPTIM3_K 145 +#define LPTIM4_K 146 +#define LPTIM5_K 147 +#define USART1_K 148 +#define USART2_K 149 +#define USART3_K 150 +#define UART4_K 151 +#define UART5_K 152 +#define USART6_K 153 +#define UART7_K 154 +#define UART8_K 155 +#define DFSDM_K 156 +#define FDCAN_K 157 +#define SAI1_K 158 +#define SAI2_K 159 +#define SAI3_K 160 +#define SAI4_K 161 +#define ADC12_K 162 +#define DSI_K 163 +#define DSI_PX 164 +#define ADFSDM_K 165 +#define USBO_K 166 +#define LTDC_PX 167 +#define DAC12_K 168 +#define ETHPTP_K 169 + +/* PLL */ +#define PLL1 176 +#define PLL2 177 +#define PLL3 178 +#define PLL4 179 + +/* ODF */ +#define PLL1_P 180 +#define PLL1_Q 181 +#define PLL1_R 182 +#define PLL2_P 183 +#define PLL2_Q 184 +#define PLL2_R 185 +#define PLL3_P 186 +#define PLL3_Q 187 +#define PLL3_R 188 +#define PLL4_P 189 +#define PLL4_Q 190 +#define PLL4_R 191 + +/* AUX */ +#define RTC 192 + +/* MCLK */ +#define CK_PER 193 +#define CK_MPU 194 +#define CK_AXI 195 +#define CK_MCU 196 + +/* Time base */ +#define TIM2_K 197 +#define TIM3_K 198 +#define TIM4_K 199 +#define TIM5_K 200 +#define TIM6_K 201 +#define TIM7_K 202 +#define TIM12_K 203 +#define TIM13_K 204 +#define TIM14_K 205 +#define TIM1_K 206 +#define TIM8_K 207 +#define TIM15_K 208 +#define TIM16_K 209 +#define TIM17_K 210 + +/* MCO clocks */ +#define CK_MCO1 211 +#define CK_MCO2 212 + +/* TRACE & DEBUG clocks */ +#define CK_DBG 214 +#define CK_TRACE 215 + +/* DDR */ +#define DDRC1 220 +#define DDRC1LP 221 +#define DDRC2 222 +#define DDRC2LP 223 +#define DDRPHYC 224 +#define DDRPHYCLP 225 +#define DDRCAPB 226 +#define DDRCAPBLP 227 +#define AXIDCG 228 +#define DDRPHYCAPB 229 +#define DDRPHYCAPBLP 230 +#define DDRPERFM 231 + +#define STM32MP1_LAST_CLK 232 + +#endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */ diff --git a/include/dt-bindings/clock/stratix10-clock.h b/include/dt-bindings/clock/stratix10-clock.h new file mode 100644 index 0000000..08b98e2 --- /dev/null +++ b/include/dt-bindings/clock/stratix10-clock.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2017, Intel Corporation + */ + +#ifndef __STRATIX10_CLOCK_H +#define __STRATIX10_CLOCK_H + +/* fixed rate clocks */ +#define STRATIX10_OSC1 0 +#define STRATIX10_CB_INTOSC_HS_DIV2_CLK 1 +#define STRATIX10_CB_INTOSC_LS_CLK 2 +#define STRATIX10_F2S_FREE_CLK 3 + +/* fixed factor clocks */ +#define STRATIX10_L4_SYS_FREE_CLK 4 +#define STRATIX10_MPU_PERIPH_CLK 5 +#define STRATIX10_MPU_L2RAM_CLK 6 +#define STRATIX10_SDMMC_CIU_CLK 7 + +/* PLL clocks */ +#define STRATIX10_MAIN_PLL_CLK 8 +#define STRATIX10_PERIPH_PLL_CLK 9 +#define STRATIX10_BOOT_CLK 10 + +/* Periph clocks */ +#define STRATIX10_MAIN_MPU_BASE_CLK 11 +#define STRATIX10_MAIN_NOC_BASE_CLK 12 +#define STRATIX10_MAIN_EMACA_CLK 13 +#define STRATIX10_MAIN_EMACB_CLK 14 +#define STRATIX10_MAIN_EMAC_PTP_CLK 15 +#define STRATIX10_MAIN_GPIO_DB_CLK 16 +#define STRATIX10_MAIN_SDMMC_CLK 17 +#define STRATIX10_MAIN_S2F_USR0_CLK 18 +#define STRATIX10_MAIN_S2F_USR1_CLK 19 +#define STRATIX10_MAIN_PSI_REF_CLK 20 + +#define STRATIX10_PERI_MPU_BASE_CLK 21 +#define STRATIX10_PERI_NOC_BASE_CLK 22 +#define STRATIX10_PERI_EMACA_CLK 23 +#define STRATIX10_PERI_EMACB_CLK 24 +#define STRATIX10_PERI_EMAC_PTP_CLK 25 +#define STRATIX10_PERI_GPIO_DB_CLK 26 +#define STRATIX10_PERI_SDMMC_CLK 27 +#define STRATIX10_PERI_S2F_USR0_CLK 28 +#define STRATIX10_PERI_S2F_USR1_CLK 29 +#define STRATIX10_PERI_PSI_REF_CLK 30 + +#define STRATIX10_MPU_FREE_CLK 31 +#define STRATIX10_NOC_FREE_CLK 32 +#define STRATIX10_S2F_USR0_CLK 33 +#define STRATIX10_NOC_CLK 34 +#define STRATIX10_EMAC_A_FREE_CLK 35 +#define STRATIX10_EMAC_B_FREE_CLK 36 +#define STRATIX10_EMAC_PTP_FREE_CLK 37 +#define STRATIX10_GPIO_DB_FREE_CLK 38 +#define STRATIX10_SDMMC_FREE_CLK 39 +#define STRATIX10_S2F_USER1_FREE_CLK 40 +#define STRATIX10_PSI_REF_FREE_CLK 41 + +/* Gate clocks */ +#define STRATIX10_MPU_CLK 42 +#define STRATIX10_L4_MAIN_CLK 43 +#define STRATIX10_L4_MP_CLK 44 +#define STRATIX10_L4_SP_CLK 45 +#define STRATIX10_CS_AT_CLK 46 +#define STRATIX10_CS_TRACE_CLK 47 +#define STRATIX10_CS_PDBG_CLK 48 +#define STRATIX10_CS_TIMER_CLK 49 +#define STRATIX10_S2F_USER0_CLK 50 +#define STRATIX10_S2F_USER1_CLK 51 +#define STRATIX10_EMAC0_CLK 52 +#define STRATIX10_EMAC1_CLK 53 +#define STRATIX10_EMAC2_CLK 54 +#define STRATIX10_EMAC_PTP_CLK 55 +#define STRATIX10_GPIO_DB_CLK 56 +#define STRATIX10_SDMMC_CLK 57 +#define STRATIX10_PSI_REF_CLK 58 +#define STRATIX10_USB_CLK 59 +#define STRATIX10_SPI_M_CLK 60 +#define STRATIX10_NAND_CLK 61 +#define STRATIX10_NAND_X_CLK 62 +#define STRATIX10_NAND_ECC_CLK 63 +#define STRATIX10_NUM_CLKS 64 + +#endif /* __STRATIX10_CLOCK_H */ diff --git a/include/dt-bindings/clock/sun4i-a10-ccu.h b/include/dt-bindings/clock/sun4i-a10-ccu.h new file mode 100644 index 0000000..e4fa61b --- /dev/null +++ b/include/dt-bindings/clock/sun4i-a10-ccu.h @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2017 Priit Laes + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN4I_A10_H_ +#define _DT_BINDINGS_CLK_SUN4I_A10_H_ + +#define CLK_HOSC 1 +#define CLK_PLL_VIDEO0_2X 9 +#define CLK_PLL_VIDEO1_2X 18 +#define CLK_CPU 20 + +/* AHB Gates */ +#define CLK_AHB_OTG 26 +#define CLK_AHB_EHCI0 27 +#define CLK_AHB_OHCI0 28 +#define CLK_AHB_EHCI1 29 +#define CLK_AHB_OHCI1 30 +#define CLK_AHB_SS 31 +#define CLK_AHB_DMA 32 +#define CLK_AHB_BIST 33 +#define CLK_AHB_MMC0 34 +#define CLK_AHB_MMC1 35 +#define CLK_AHB_MMC2 36 +#define CLK_AHB_MMC3 37 +#define CLK_AHB_MS 38 +#define CLK_AHB_NAND 39 +#define CLK_AHB_SDRAM 40 +#define CLK_AHB_ACE 41 +#define CLK_AHB_EMAC 42 +#define CLK_AHB_TS 43 +#define CLK_AHB_SPI0 44 +#define CLK_AHB_SPI1 45 +#define CLK_AHB_SPI2 46 +#define CLK_AHB_SPI3 47 +#define CLK_AHB_PATA 48 +#define CLK_AHB_SATA 49 +#define CLK_AHB_GPS 50 +#define CLK_AHB_HSTIMER 51 +#define CLK_AHB_VE 52 +#define CLK_AHB_TVD 53 +#define CLK_AHB_TVE0 54 +#define CLK_AHB_TVE1 55 +#define CLK_AHB_LCD0 56 +#define CLK_AHB_LCD1 57 +#define CLK_AHB_CSI0 58 +#define CLK_AHB_CSI1 59 +#define CLK_AHB_HDMI0 60 +#define CLK_AHB_HDMI1 61 +#define CLK_AHB_DE_BE0 62 +#define CLK_AHB_DE_BE1 63 +#define CLK_AHB_DE_FE0 64 +#define CLK_AHB_DE_FE1 65 +#define CLK_AHB_GMAC 66 +#define CLK_AHB_MP 67 +#define CLK_AHB_GPU 68 + +/* APB0 Gates */ +#define CLK_APB0_CODEC 69 +#define CLK_APB0_SPDIF 70 +#define CLK_APB0_I2S0 71 +#define CLK_APB0_AC97 72 +#define CLK_APB0_I2S1 73 +#define CLK_APB0_PIO 74 +#define CLK_APB0_IR0 75 +#define CLK_APB0_IR1 76 +#define CLK_APB0_I2S2 77 +#define CLK_APB0_KEYPAD 78 + +/* APB1 Gates */ +#define CLK_APB1_I2C0 79 +#define CLK_APB1_I2C1 80 +#define CLK_APB1_I2C2 81 +#define CLK_APB1_I2C3 82 +#define CLK_APB1_CAN 83 +#define CLK_APB1_SCR 84 +#define CLK_APB1_PS20 85 +#define CLK_APB1_PS21 86 +#define CLK_APB1_I2C4 87 +#define CLK_APB1_UART0 88 +#define CLK_APB1_UART1 89 +#define CLK_APB1_UART2 90 +#define CLK_APB1_UART3 91 +#define CLK_APB1_UART4 92 +#define CLK_APB1_UART5 93 +#define CLK_APB1_UART6 94 +#define CLK_APB1_UART7 95 + +/* IP clocks */ +#define CLK_NAND 96 +#define CLK_MS 97 +#define CLK_MMC0 98 +#define CLK_MMC0_OUTPUT 99 +#define CLK_MMC0_SAMPLE 100 +#define CLK_MMC1 101 +#define CLK_MMC1_OUTPUT 102 +#define CLK_MMC1_SAMPLE 103 +#define CLK_MMC2 104 +#define CLK_MMC2_OUTPUT 105 +#define CLK_MMC2_SAMPLE 106 +#define CLK_MMC3 107 +#define CLK_MMC3_OUTPUT 108 +#define CLK_MMC3_SAMPLE 109 +#define CLK_TS 110 +#define CLK_SS 111 +#define CLK_SPI0 112 +#define CLK_SPI1 113 +#define CLK_SPI2 114 +#define CLK_PATA 115 +#define CLK_IR0 116 +#define CLK_IR1 117 +#define CLK_I2S0 118 +#define CLK_AC97 119 +#define CLK_SPDIF 120 +#define CLK_KEYPAD 121 +#define CLK_SATA 122 +#define CLK_USB_OHCI0 123 +#define CLK_USB_OHCI1 124 +#define CLK_USB_PHY 125 +#define CLK_GPS 126 +#define CLK_SPI3 127 +#define CLK_I2S1 128 +#define CLK_I2S2 129 + +/* DRAM Gates */ +#define CLK_DRAM_VE 130 +#define CLK_DRAM_CSI0 131 +#define CLK_DRAM_CSI1 132 +#define CLK_DRAM_TS 133 +#define CLK_DRAM_TVD 134 +#define CLK_DRAM_TVE0 135 +#define CLK_DRAM_TVE1 136 +#define CLK_DRAM_OUT 137 +#define CLK_DRAM_DE_FE1 138 +#define CLK_DRAM_DE_FE0 139 +#define CLK_DRAM_DE_BE0 140 +#define CLK_DRAM_DE_BE1 141 +#define CLK_DRAM_MP 142 +#define CLK_DRAM_ACE 143 + +/* Display Engine Clocks */ +#define CLK_DE_BE0 144 +#define CLK_DE_BE1 145 +#define CLK_DE_FE0 146 +#define CLK_DE_FE1 147 +#define CLK_DE_MP 148 +#define CLK_TCON0_CH0 149 +#define CLK_TCON1_CH0 150 +#define CLK_CSI_SCLK 151 +#define CLK_TVD_SCLK2 152 +#define CLK_TVD 153 +#define CLK_TCON0_CH1_SCLK2 154 +#define CLK_TCON0_CH1 155 +#define CLK_TCON1_CH1_SCLK2 156 +#define CLK_TCON1_CH1 157 +#define CLK_CSI0 158 +#define CLK_CSI1 159 +#define CLK_CODEC 160 +#define CLK_VE 161 +#define CLK_AVS 162 +#define CLK_ACE 163 +#define CLK_HDMI 164 +#define CLK_GPU 165 + +#endif /* _DT_BINDINGS_CLK_SUN4I_A10_H_ */ diff --git a/include/dt-bindings/clock/sun4i-a10-pll2.h b/include/dt-bindings/clock/sun4i-a10-pll2.h new file mode 100644 index 0000000..071c811 --- /dev/null +++ b/include/dt-bindings/clock/sun4i-a10-pll2.h @@ -0,0 +1,53 @@ +/* + * Copyright 2015 Maxime Ripard + * + * Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DT_BINDINGS_CLOCK_SUN4I_A10_PLL2_H_ +#define __DT_BINDINGS_CLOCK_SUN4I_A10_PLL2_H_ + +#define SUN4I_A10_PLL2_1X 0 +#define SUN4I_A10_PLL2_2X 1 +#define SUN4I_A10_PLL2_4X 2 +#define SUN4I_A10_PLL2_8X 3 + +#endif /* __DT_BINDINGS_CLOCK_SUN4I_A10_PLL2_H_ */ diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h new file mode 100644 index 0000000..a8ac4cf --- /dev/null +++ b/include/dt-bindings/clock/sun50i-a64-ccu.h @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_ +#define _DT_BINDINGS_CLK_SUN50I_A64_H_ + +#define CLK_PLL_VIDEO0 7 +#define CLK_PLL_PERIPH0 11 + +#define CLK_BUS_MIPI_DSI 28 +#define CLK_BUS_CE 29 +#define CLK_BUS_DMA 30 +#define CLK_BUS_MMC0 31 +#define CLK_BUS_MMC1 32 +#define CLK_BUS_MMC2 33 +#define CLK_BUS_NAND 34 +#define CLK_BUS_DRAM 35 +#define CLK_BUS_EMAC 36 +#define CLK_BUS_TS 37 +#define CLK_BUS_HSTIMER 38 +#define CLK_BUS_SPI0 39 +#define CLK_BUS_SPI1 40 +#define CLK_BUS_OTG 41 +#define CLK_BUS_EHCI0 42 +#define CLK_BUS_EHCI1 43 +#define CLK_BUS_OHCI0 44 +#define CLK_BUS_OHCI1 45 +#define CLK_BUS_VE 46 +#define CLK_BUS_TCON0 47 +#define CLK_BUS_TCON1 48 +#define CLK_BUS_DEINTERLACE 49 +#define CLK_BUS_CSI 50 +#define CLK_BUS_HDMI 51 +#define CLK_BUS_DE 52 +#define CLK_BUS_GPU 53 +#define CLK_BUS_MSGBOX 54 +#define CLK_BUS_SPINLOCK 55 +#define CLK_BUS_CODEC 56 +#define CLK_BUS_SPDIF 57 +#define CLK_BUS_PIO 58 +#define CLK_BUS_THS 59 +#define CLK_BUS_I2S0 60 +#define CLK_BUS_I2S1 61 +#define CLK_BUS_I2S2 62 +#define CLK_BUS_I2C0 63 +#define CLK_BUS_I2C1 64 +#define CLK_BUS_I2C2 65 +#define CLK_BUS_SCR 66 +#define CLK_BUS_UART0 67 +#define CLK_BUS_UART1 68 +#define CLK_BUS_UART2 69 +#define CLK_BUS_UART3 70 +#define CLK_BUS_UART4 71 +#define CLK_BUS_DBG 72 +#define CLK_THS 73 +#define CLK_NAND 74 +#define CLK_MMC0 75 +#define CLK_MMC1 76 +#define CLK_MMC2 77 +#define CLK_TS 78 +#define CLK_CE 79 +#define CLK_SPI0 80 +#define CLK_SPI1 81 +#define CLK_I2S0 82 +#define CLK_I2S1 83 +#define CLK_I2S2 84 +#define CLK_SPDIF 85 +#define CLK_USB_PHY0 86 +#define CLK_USB_PHY1 87 +#define CLK_USB_HSIC 88 +#define CLK_USB_HSIC_12M 89 + +#define CLK_USB_OHCI0 91 + +#define CLK_USB_OHCI1 93 + +#define CLK_DRAM_VE 95 +#define CLK_DRAM_CSI 96 +#define CLK_DRAM_DEINTERLACE 97 +#define CLK_DRAM_TS 98 +#define CLK_DE 99 +#define CLK_TCON0 100 +#define CLK_TCON1 101 +#define CLK_DEINTERLACE 102 +#define CLK_CSI_MISC 103 +#define CLK_CSI_SCLK 104 +#define CLK_CSI_MCLK 105 +#define CLK_VE 106 +#define CLK_AC_DIG 107 +#define CLK_AC_DIG_4X 108 +#define CLK_AVS 109 +#define CLK_HDMI 110 +#define CLK_HDMI_DDC 111 + +#define CLK_DSI_DPHY 113 +#define CLK_GPU 114 + +#endif /* _DT_BINDINGS_CLK_SUN50I_H_ */ diff --git a/include/dt-bindings/clock/sun50i-h6-ccu.h b/include/dt-bindings/clock/sun50i-h6-ccu.h new file mode 100644 index 0000000..a1545cd --- /dev/null +++ b/include/dt-bindings/clock/sun50i-h6-ccu.h @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: (GPL-2.0+ or MIT) +/* + * Copyright (C) 2017 Icenowy Zheng + */ + +#ifndef _DT_BINDINGS_CLK_SUN50I_H6_H_ +#define _DT_BINDINGS_CLK_SUN50I_H6_H_ + +#define CLK_PLL_PERIPH0 3 + +#define CLK_CPUX 21 + +#define CLK_APB1 26 + +#define CLK_DE 29 +#define CLK_BUS_DE 30 +#define CLK_DEINTERLACE 31 +#define CLK_BUS_DEINTERLACE 32 +#define CLK_GPU 33 +#define CLK_BUS_GPU 34 +#define CLK_CE 35 +#define CLK_BUS_CE 36 +#define CLK_VE 37 +#define CLK_BUS_VE 38 +#define CLK_EMCE 39 +#define CLK_BUS_EMCE 40 +#define CLK_VP9 41 +#define CLK_BUS_VP9 42 +#define CLK_BUS_DMA 43 +#define CLK_BUS_MSGBOX 44 +#define CLK_BUS_SPINLOCK 45 +#define CLK_BUS_HSTIMER 46 +#define CLK_AVS 47 +#define CLK_BUS_DBG 48 +#define CLK_BUS_PSI 49 +#define CLK_BUS_PWM 50 +#define CLK_BUS_IOMMU 51 + +#define CLK_MBUS_DMA 53 +#define CLK_MBUS_VE 54 +#define CLK_MBUS_CE 55 +#define CLK_MBUS_TS 56 +#define CLK_MBUS_NAND 57 +#define CLK_MBUS_CSI 58 +#define CLK_MBUS_DEINTERLACE 59 + +#define CLK_NAND0 61 +#define CLK_NAND1 62 +#define CLK_BUS_NAND 63 +#define CLK_MMC0 64 +#define CLK_MMC1 65 +#define CLK_MMC2 66 +#define CLK_BUS_MMC0 67 +#define CLK_BUS_MMC1 68 +#define CLK_BUS_MMC2 69 +#define CLK_BUS_UART0 70 +#define CLK_BUS_UART1 71 +#define CLK_BUS_UART2 72 +#define CLK_BUS_UART3 73 +#define CLK_BUS_I2C0 74 +#define CLK_BUS_I2C1 75 +#define CLK_BUS_I2C2 76 +#define CLK_BUS_I2C3 77 +#define CLK_BUS_SCR0 78 +#define CLK_BUS_SCR1 79 +#define CLK_SPI0 80 +#define CLK_SPI1 81 +#define CLK_BUS_SPI0 82 +#define CLK_BUS_SPI1 83 +#define CLK_BUS_EMAC 84 +#define CLK_TS 85 +#define CLK_BUS_TS 86 +#define CLK_IR_TX 87 +#define CLK_BUS_IR_TX 88 +#define CLK_BUS_THS 89 +#define CLK_I2S3 90 +#define CLK_I2S0 91 +#define CLK_I2S1 92 +#define CLK_I2S2 93 +#define CLK_BUS_I2S0 94 +#define CLK_BUS_I2S1 95 +#define CLK_BUS_I2S2 96 +#define CLK_BUS_I2S3 97 +#define CLK_SPDIF 98 +#define CLK_BUS_SPDIF 99 +#define CLK_DMIC 100 +#define CLK_BUS_DMIC 101 +#define CLK_AUDIO_HUB 102 +#define CLK_BUS_AUDIO_HUB 103 +#define CLK_USB_OHCI0 104 +#define CLK_USB_PHY0 105 +#define CLK_USB_PHY1 106 +#define CLK_USB_OHCI3 107 +#define CLK_USB_PHY3 108 +#define CLK_USB_HSIC_12M 109 +#define CLK_USB_HSIC 110 +#define CLK_BUS_OHCI0 111 +#define CLK_BUS_OHCI3 112 +#define CLK_BUS_EHCI0 113 +#define CLK_BUS_XHCI 114 +#define CLK_BUS_EHCI3 115 +#define CLK_BUS_OTG 116 +#define CLK_PCIE_REF_100M 117 +#define CLK_PCIE_REF 118 +#define CLK_PCIE_REF_OUT 119 +#define CLK_PCIE_MAXI 120 +#define CLK_PCIE_AUX 121 +#define CLK_BUS_PCIE 122 +#define CLK_HDMI 123 +#define CLK_HDMI_SLOW 124 +#define CLK_HDMI_CEC 125 +#define CLK_BUS_HDMI 126 +#define CLK_BUS_TCON_TOP 127 +#define CLK_TCON_LCD0 128 +#define CLK_BUS_TCON_LCD0 129 +#define CLK_TCON_TV0 130 +#define CLK_BUS_TCON_TV0 131 +#define CLK_CSI_CCI 132 +#define CLK_CSI_TOP 133 +#define CLK_CSI_MCLK 134 +#define CLK_BUS_CSI 135 +#define CLK_HDCP 136 +#define CLK_BUS_HDCP 137 + +#endif /* _DT_BINDINGS_CLK_SUN50I_H6_H_ */ diff --git a/include/dt-bindings/clock/sun50i-h6-r-ccu.h b/include/dt-bindings/clock/sun50i-h6-r-ccu.h new file mode 100644 index 0000000..7613613 --- /dev/null +++ b/include/dt-bindings/clock/sun50i-h6-r-ccu.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017 Icenowy Zheng + */ + +#ifndef _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ +#define _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ + +#define CLK_AR100 0 + +#define CLK_R_APB1 2 + +#define CLK_R_APB1_TIMER 4 +#define CLK_R_APB1_TWD 5 +#define CLK_R_APB1_PWM 6 +#define CLK_R_APB2_UART 7 +#define CLK_R_APB2_I2C 8 +#define CLK_R_APB1_IR 9 +#define CLK_R_APB1_W1 10 + +#define CLK_IR 11 +#define CLK_W1 12 + +#endif /* _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun5i-ccu.h b/include/dt-bindings/clock/sun5i-ccu.h new file mode 100644 index 0000000..75fe561 --- /dev/null +++ b/include/dt-bindings/clock/sun5i-ccu.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard + */ + +#ifndef _DT_BINDINGS_CLK_SUN5I_H_ +#define _DT_BINDINGS_CLK_SUN5I_H_ + +#define CLK_HOSC 1 + +#define CLK_PLL_VIDEO0_2X 9 + +#define CLK_PLL_VIDEO1_2X 16 +#define CLK_CPU 17 + +#define CLK_AHB_OTG 23 +#define CLK_AHB_EHCI 24 +#define CLK_AHB_OHCI 25 +#define CLK_AHB_SS 26 +#define CLK_AHB_DMA 27 +#define CLK_AHB_BIST 28 +#define CLK_AHB_MMC0 29 +#define CLK_AHB_MMC1 30 +#define CLK_AHB_MMC2 31 +#define CLK_AHB_NAND 32 +#define CLK_AHB_SDRAM 33 +#define CLK_AHB_EMAC 34 +#define CLK_AHB_TS 35 +#define CLK_AHB_SPI0 36 +#define CLK_AHB_SPI1 37 +#define CLK_AHB_SPI2 38 +#define CLK_AHB_GPS 39 +#define CLK_AHB_HSTIMER 40 +#define CLK_AHB_VE 41 +#define CLK_AHB_TVE 42 +#define CLK_AHB_LCD 43 +#define CLK_AHB_CSI 44 +#define CLK_AHB_HDMI 45 +#define CLK_AHB_DE_BE 46 +#define CLK_AHB_DE_FE 47 +#define CLK_AHB_IEP 48 +#define CLK_AHB_GPU 49 +#define CLK_APB0_CODEC 50 +#define CLK_APB0_SPDIF 51 +#define CLK_APB0_I2S 52 +#define CLK_APB0_PIO 53 +#define CLK_APB0_IR 54 +#define CLK_APB0_KEYPAD 55 +#define CLK_APB1_I2C0 56 +#define CLK_APB1_I2C1 57 +#define CLK_APB1_I2C2 58 +#define CLK_APB1_UART0 59 +#define CLK_APB1_UART1 60 +#define CLK_APB1_UART2 61 +#define CLK_APB1_UART3 62 +#define CLK_NAND 63 +#define CLK_MMC0 64 +#define CLK_MMC1 65 +#define CLK_MMC2 66 +#define CLK_TS 67 +#define CLK_SS 68 +#define CLK_SPI0 69 +#define CLK_SPI1 70 +#define CLK_SPI2 71 +#define CLK_IR 72 +#define CLK_I2S 73 +#define CLK_SPDIF 74 +#define CLK_KEYPAD 75 +#define CLK_USB_OHCI 76 +#define CLK_USB_PHY0 77 +#define CLK_USB_PHY1 78 +#define CLK_GPS 79 +#define CLK_DRAM_VE 80 +#define CLK_DRAM_CSI 81 +#define CLK_DRAM_TS 82 +#define CLK_DRAM_TVE 83 +#define CLK_DRAM_DE_FE 84 +#define CLK_DRAM_DE_BE 85 +#define CLK_DRAM_ACE 86 +#define CLK_DRAM_IEP 87 +#define CLK_DE_BE 88 +#define CLK_DE_FE 89 +#define CLK_TCON_CH0 90 + +#define CLK_TCON_CH1 92 +#define CLK_CSI 93 +#define CLK_VE 94 +#define CLK_CODEC 95 +#define CLK_AVS 96 +#define CLK_HDMI 97 +#define CLK_GPU 98 +#define CLK_MBUS 99 +#define CLK_IEP 100 + +#endif /* _DT_BINDINGS_CLK_SUN5I_H_ */ diff --git a/include/dt-bindings/clock/sun6i-a31-ccu.h b/include/dt-bindings/clock/sun6i-a31-ccu.h new file mode 100644 index 0000000..c5d1334 --- /dev/null +++ b/include/dt-bindings/clock/sun6i-a31-ccu.h @@ -0,0 +1,191 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN6I_A31_H_ +#define _DT_BINDINGS_CLK_SUN6I_A31_H_ + +#define CLK_PLL_VIDEO0_2X 7 + +#define CLK_PLL_PERIPH 10 + +#define CLK_PLL_VIDEO1_2X 13 + +#define CLK_CPU 18 + +#define CLK_AHB1_MIPIDSI 23 +#define CLK_AHB1_SS 24 +#define CLK_AHB1_DMA 25 +#define CLK_AHB1_MMC0 26 +#define CLK_AHB1_MMC1 27 +#define CLK_AHB1_MMC2 28 +#define CLK_AHB1_MMC3 29 +#define CLK_AHB1_NAND1 30 +#define CLK_AHB1_NAND0 31 +#define CLK_AHB1_SDRAM 32 +#define CLK_AHB1_EMAC 33 +#define CLK_AHB1_TS 34 +#define CLK_AHB1_HSTIMER 35 +#define CLK_AHB1_SPI0 36 +#define CLK_AHB1_SPI1 37 +#define CLK_AHB1_SPI2 38 +#define CLK_AHB1_SPI3 39 +#define CLK_AHB1_OTG 40 +#define CLK_AHB1_EHCI0 41 +#define CLK_AHB1_EHCI1 42 +#define CLK_AHB1_OHCI0 43 +#define CLK_AHB1_OHCI1 44 +#define CLK_AHB1_OHCI2 45 +#define CLK_AHB1_VE 46 +#define CLK_AHB1_LCD0 47 +#define CLK_AHB1_LCD1 48 +#define CLK_AHB1_CSI 49 +#define CLK_AHB1_HDMI 50 +#define CLK_AHB1_BE0 51 +#define CLK_AHB1_BE1 52 +#define CLK_AHB1_FE0 53 +#define CLK_AHB1_FE1 54 +#define CLK_AHB1_MP 55 +#define CLK_AHB1_GPU 56 +#define CLK_AHB1_DEU0 57 +#define CLK_AHB1_DEU1 58 +#define CLK_AHB1_DRC0 59 +#define CLK_AHB1_DRC1 60 + +#define CLK_APB1_CODEC 61 +#define CLK_APB1_SPDIF 62 +#define CLK_APB1_DIGITAL_MIC 63 +#define CLK_APB1_PIO 64 +#define CLK_APB1_DAUDIO0 65 +#define CLK_APB1_DAUDIO1 66 + +#define CLK_APB2_I2C0 67 +#define CLK_APB2_I2C1 68 +#define CLK_APB2_I2C2 69 +#define CLK_APB2_I2C3 70 +#define CLK_APB2_UART0 71 +#define CLK_APB2_UART1 72 +#define CLK_APB2_UART2 73 +#define CLK_APB2_UART3 74 +#define CLK_APB2_UART4 75 +#define CLK_APB2_UART5 76 + +#define CLK_NAND0 77 +#define CLK_NAND1 78 +#define CLK_MMC0 79 +#define CLK_MMC0_SAMPLE 80 +#define CLK_MMC0_OUTPUT 81 +#define CLK_MMC1 82 +#define CLK_MMC1_SAMPLE 83 +#define CLK_MMC1_OUTPUT 84 +#define CLK_MMC2 85 +#define CLK_MMC2_SAMPLE 86 +#define CLK_MMC2_OUTPUT 87 +#define CLK_MMC3 88 +#define CLK_MMC3_SAMPLE 89 +#define CLK_MMC3_OUTPUT 90 +#define CLK_TS 91 +#define CLK_SS 92 +#define CLK_SPI0 93 +#define CLK_SPI1 94 +#define CLK_SPI2 95 +#define CLK_SPI3 96 +#define CLK_DAUDIO0 97 +#define CLK_DAUDIO1 98 +#define CLK_SPDIF 99 +#define CLK_USB_PHY0 100 +#define CLK_USB_PHY1 101 +#define CLK_USB_PHY2 102 +#define CLK_USB_OHCI0 103 +#define CLK_USB_OHCI1 104 +#define CLK_USB_OHCI2 105 + +#define CLK_DRAM_VE 110 +#define CLK_DRAM_CSI_ISP 111 +#define CLK_DRAM_TS 112 +#define CLK_DRAM_DRC0 113 +#define CLK_DRAM_DRC1 114 +#define CLK_DRAM_DEU0 115 +#define CLK_DRAM_DEU1 116 +#define CLK_DRAM_FE0 117 +#define CLK_DRAM_FE1 118 +#define CLK_DRAM_BE0 119 +#define CLK_DRAM_BE1 120 +#define CLK_DRAM_MP 121 + +#define CLK_BE0 122 +#define CLK_BE1 123 +#define CLK_FE0 124 +#define CLK_FE1 125 +#define CLK_MP 126 +#define CLK_LCD0_CH0 127 +#define CLK_LCD1_CH0 128 +#define CLK_LCD0_CH1 129 +#define CLK_LCD1_CH1 130 +#define CLK_CSI0_SCLK 131 +#define CLK_CSI0_MCLK 132 +#define CLK_CSI1_MCLK 133 +#define CLK_VE 134 +#define CLK_CODEC 135 +#define CLK_AVS 136 +#define CLK_DIGITAL_MIC 137 +#define CLK_HDMI 138 +#define CLK_HDMI_DDC 139 +#define CLK_PS 140 + +#define CLK_MIPI_DSI 143 +#define CLK_MIPI_DSI_DPHY 144 +#define CLK_MIPI_CSI_DPHY 145 +#define CLK_IEP_DRC0 146 +#define CLK_IEP_DRC1 147 +#define CLK_IEP_DEU0 148 +#define CLK_IEP_DEU1 149 +#define CLK_GPU_CORE 150 +#define CLK_GPU_MEMORY 151 +#define CLK_GPU_HYD 152 +#define CLK_ATS 153 +#define CLK_TRACE 154 + +#define CLK_OUT_A 155 +#define CLK_OUT_B 156 +#define CLK_OUT_C 157 + +#endif /* _DT_BINDINGS_CLK_SUN6I_A31_H_ */ diff --git a/include/dt-bindings/clock/sun7i-a20-ccu.h b/include/dt-bindings/clock/sun7i-a20-ccu.h new file mode 100644 index 0000000..045a517 --- /dev/null +++ b/include/dt-bindings/clock/sun7i-a20-ccu.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2017 Priit Laes + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN7I_A20_H_ +#define _DT_BINDINGS_CLK_SUN7I_A20_H_ + +#include + +#define CLK_MBUS 166 +#define CLK_HDMI1_SLOW 167 +#define CLK_HDMI1 168 +#define CLK_OUT_A 169 +#define CLK_OUT_B 170 + +#endif /* _DT_BINDINGS_CLK_SUN7I_A20_H_ */ diff --git a/include/dt-bindings/clock/sun8i-a23-a33-ccu.h b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h new file mode 100644 index 0000000..f8222b6 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_A23_A33_H_ +#define _DT_BINDINGS_CLK_SUN8I_A23_A33_H_ + +#define CLK_CPUX 18 + +#define CLK_BUS_MIPI_DSI 23 +#define CLK_BUS_SS 24 +#define CLK_BUS_DMA 25 +#define CLK_BUS_MMC0 26 +#define CLK_BUS_MMC1 27 +#define CLK_BUS_MMC2 28 +#define CLK_BUS_NAND 29 +#define CLK_BUS_DRAM 30 +#define CLK_BUS_HSTIMER 31 +#define CLK_BUS_SPI0 32 +#define CLK_BUS_SPI1 33 +#define CLK_BUS_OTG 34 +#define CLK_BUS_EHCI 35 +#define CLK_BUS_OHCI 36 +#define CLK_BUS_VE 37 +#define CLK_BUS_LCD 38 +#define CLK_BUS_CSI 39 +#define CLK_BUS_DE_BE 40 +#define CLK_BUS_DE_FE 41 +#define CLK_BUS_GPU 42 +#define CLK_BUS_MSGBOX 43 +#define CLK_BUS_SPINLOCK 44 +#define CLK_BUS_DRC 45 +#define CLK_BUS_SAT 46 +#define CLK_BUS_CODEC 47 +#define CLK_BUS_PIO 48 +#define CLK_BUS_I2S0 49 +#define CLK_BUS_I2S1 50 +#define CLK_BUS_I2C0 51 +#define CLK_BUS_I2C1 52 +#define CLK_BUS_I2C2 53 +#define CLK_BUS_UART0 54 +#define CLK_BUS_UART1 55 +#define CLK_BUS_UART2 56 +#define CLK_BUS_UART3 57 +#define CLK_BUS_UART4 58 +#define CLK_NAND 59 +#define CLK_MMC0 60 +#define CLK_MMC0_SAMPLE 61 +#define CLK_MMC0_OUTPUT 62 +#define CLK_MMC1 63 +#define CLK_MMC1_SAMPLE 64 +#define CLK_MMC1_OUTPUT 65 +#define CLK_MMC2 66 +#define CLK_MMC2_SAMPLE 67 +#define CLK_MMC2_OUTPUT 68 +#define CLK_SS 69 +#define CLK_SPI0 70 +#define CLK_SPI1 71 +#define CLK_I2S0 72 +#define CLK_I2S1 73 +#define CLK_USB_PHY0 74 +#define CLK_USB_PHY1 75 +#define CLK_USB_HSIC 76 +#define CLK_USB_HSIC_12M 77 +#define CLK_USB_OHCI 78 + +#define CLK_DRAM_VE 80 +#define CLK_DRAM_CSI 81 +#define CLK_DRAM_DRC 82 +#define CLK_DRAM_DE_FE 83 +#define CLK_DRAM_DE_BE 84 +#define CLK_DE_BE 85 +#define CLK_DE_FE 86 +#define CLK_LCD_CH0 87 +#define CLK_LCD_CH1 88 +#define CLK_CSI_SCLK 89 +#define CLK_CSI_MCLK 90 +#define CLK_VE 91 +#define CLK_AC_DIG 92 +#define CLK_AC_DIG_4X 93 +#define CLK_AVS 94 + +#define CLK_DSI_SCLK 96 +#define CLK_DSI_DPHY 97 +#define CLK_DRC 98 +#define CLK_GPU 99 +#define CLK_ATS 100 + +#endif /* _DT_BINDINGS_CLK_SUN8I_A23_A33_H_ */ diff --git a/include/dt-bindings/clock/sun8i-a83t-ccu.h b/include/dt-bindings/clock/sun8i-a83t-ccu.h new file mode 100644 index 0000000..78af508 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-a83t-ccu.h @@ -0,0 +1,140 @@ +/* + * Copyright (C) 2017 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN8I_A83T_CCU_H_ +#define _DT_BINDINGS_CLOCK_SUN8I_A83T_CCU_H_ + +#define CLK_PLL_PERIPH 6 + +#define CLK_PLL_DE 9 + +#define CLK_C0CPUX 11 +#define CLK_C1CPUX 12 + +#define CLK_BUS_MIPI_DSI 19 +#define CLK_BUS_SS 20 +#define CLK_BUS_DMA 21 +#define CLK_BUS_MMC0 22 +#define CLK_BUS_MMC1 23 +#define CLK_BUS_MMC2 24 +#define CLK_BUS_NAND 25 +#define CLK_BUS_DRAM 26 +#define CLK_BUS_EMAC 27 +#define CLK_BUS_HSTIMER 28 +#define CLK_BUS_SPI0 29 +#define CLK_BUS_SPI1 30 +#define CLK_BUS_OTG 31 +#define CLK_BUS_EHCI0 32 +#define CLK_BUS_EHCI1 33 +#define CLK_BUS_OHCI0 34 + +#define CLK_BUS_VE 35 +#define CLK_BUS_TCON0 36 +#define CLK_BUS_TCON1 37 +#define CLK_BUS_CSI 38 +#define CLK_BUS_HDMI 39 +#define CLK_BUS_DE 40 +#define CLK_BUS_GPU 41 +#define CLK_BUS_MSGBOX 42 +#define CLK_BUS_SPINLOCK 43 + +#define CLK_BUS_SPDIF 44 +#define CLK_BUS_PIO 45 +#define CLK_BUS_I2S0 46 +#define CLK_BUS_I2S1 47 +#define CLK_BUS_I2S2 48 +#define CLK_BUS_TDM 49 + +#define CLK_BUS_I2C0 50 +#define CLK_BUS_I2C1 51 +#define CLK_BUS_I2C2 52 +#define CLK_BUS_UART0 53 +#define CLK_BUS_UART1 54 +#define CLK_BUS_UART2 55 +#define CLK_BUS_UART3 56 +#define CLK_BUS_UART4 57 + +#define CLK_NAND 59 +#define CLK_MMC0 60 +#define CLK_MMC0_SAMPLE 61 +#define CLK_MMC0_OUTPUT 62 +#define CLK_MMC1 63 +#define CLK_MMC1_SAMPLE 64 +#define CLK_MMC1_OUTPUT 65 +#define CLK_MMC2 66 +#define CLK_MMC2_SAMPLE 67 +#define CLK_MMC2_OUTPUT 68 +#define CLK_SS 69 +#define CLK_SPI0 70 +#define CLK_SPI1 71 +#define CLK_I2S0 72 +#define CLK_I2S1 73 +#define CLK_I2S2 74 +#define CLK_TDM 75 +#define CLK_SPDIF 76 +#define CLK_USB_PHY0 77 +#define CLK_USB_PHY1 78 +#define CLK_USB_HSIC 79 +#define CLK_USB_HSIC_12M 80 +#define CLK_USB_OHCI0 81 + +#define CLK_DRAM_VE 83 +#define CLK_DRAM_CSI 84 + +#define CLK_TCON0 85 +#define CLK_TCON1 86 +#define CLK_CSI_MISC 87 +#define CLK_MIPI_CSI 88 +#define CLK_CSI_MCLK 89 +#define CLK_CSI_SCLK 90 +#define CLK_VE 91 +#define CLK_AVS 92 +#define CLK_HDMI 93 +#define CLK_HDMI_SLOW 94 + +#define CLK_MIPI_DSI0 96 +#define CLK_MIPI_DSI1 97 +#define CLK_GPU_CORE 98 +#define CLK_GPU_MEMORY 99 +#define CLK_GPU_HYD 100 + +#endif /* _DT_BINDINGS_CLOCK_SUN8I_A83T_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun8i-de2.h b/include/dt-bindings/clock/sun8i-de2.h new file mode 100644 index 0000000..7768f73 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-de2.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2016 Icenowy Zheng + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN8I_DE2_H_ +#define _DT_BINDINGS_CLOCK_SUN8I_DE2_H_ + +#define CLK_BUS_MIXER0 0 +#define CLK_BUS_MIXER1 1 +#define CLK_BUS_WB 2 + +#define CLK_MIXER0 6 +#define CLK_MIXER1 7 +#define CLK_WB 8 + +#define CLK_BUS_ROT 9 +#define CLK_ROT 10 + +#endif /* _DT_BINDINGS_CLOCK_SUN8I_DE2_H_ */ diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h new file mode 100644 index 0000000..c5f7e9a --- /dev/null +++ b/include/dt-bindings/clock/sun8i-h3-ccu.h @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_ +#define _DT_BINDINGS_CLK_SUN8I_H3_H_ + +#define CLK_PLL_VIDEO 6 + +#define CLK_PLL_PERIPH0 9 + +#define CLK_CPUX 14 + +#define CLK_BUS_CE 20 +#define CLK_BUS_DMA 21 +#define CLK_BUS_MMC0 22 +#define CLK_BUS_MMC1 23 +#define CLK_BUS_MMC2 24 +#define CLK_BUS_NAND 25 +#define CLK_BUS_DRAM 26 +#define CLK_BUS_EMAC 27 +#define CLK_BUS_TS 28 +#define CLK_BUS_HSTIMER 29 +#define CLK_BUS_SPI0 30 +#define CLK_BUS_SPI1 31 +#define CLK_BUS_OTG 32 +#define CLK_BUS_EHCI0 33 +#define CLK_BUS_EHCI1 34 +#define CLK_BUS_EHCI2 35 +#define CLK_BUS_EHCI3 36 +#define CLK_BUS_OHCI0 37 +#define CLK_BUS_OHCI1 38 +#define CLK_BUS_OHCI2 39 +#define CLK_BUS_OHCI3 40 +#define CLK_BUS_VE 41 +#define CLK_BUS_TCON0 42 +#define CLK_BUS_TCON1 43 +#define CLK_BUS_DEINTERLACE 44 +#define CLK_BUS_CSI 45 +#define CLK_BUS_TVE 46 +#define CLK_BUS_HDMI 47 +#define CLK_BUS_DE 48 +#define CLK_BUS_GPU 49 +#define CLK_BUS_MSGBOX 50 +#define CLK_BUS_SPINLOCK 51 +#define CLK_BUS_CODEC 52 +#define CLK_BUS_SPDIF 53 +#define CLK_BUS_PIO 54 +#define CLK_BUS_THS 55 +#define CLK_BUS_I2S0 56 +#define CLK_BUS_I2S1 57 +#define CLK_BUS_I2S2 58 +#define CLK_BUS_I2C0 59 +#define CLK_BUS_I2C1 60 +#define CLK_BUS_I2C2 61 +#define CLK_BUS_UART0 62 +#define CLK_BUS_UART1 63 +#define CLK_BUS_UART2 64 +#define CLK_BUS_UART3 65 +#define CLK_BUS_SCR0 66 +#define CLK_BUS_EPHY 67 +#define CLK_BUS_DBG 68 + +#define CLK_THS 69 +#define CLK_NAND 70 +#define CLK_MMC0 71 +#define CLK_MMC0_SAMPLE 72 +#define CLK_MMC0_OUTPUT 73 +#define CLK_MMC1 74 +#define CLK_MMC1_SAMPLE 75 +#define CLK_MMC1_OUTPUT 76 +#define CLK_MMC2 77 +#define CLK_MMC2_SAMPLE 78 +#define CLK_MMC2_OUTPUT 79 +#define CLK_TS 80 +#define CLK_CE 81 +#define CLK_SPI0 82 +#define CLK_SPI1 83 +#define CLK_I2S0 84 +#define CLK_I2S1 85 +#define CLK_I2S2 86 +#define CLK_SPDIF 87 +#define CLK_USB_PHY0 88 +#define CLK_USB_PHY1 89 +#define CLK_USB_PHY2 90 +#define CLK_USB_PHY3 91 +#define CLK_USB_OHCI0 92 +#define CLK_USB_OHCI1 93 +#define CLK_USB_OHCI2 94 +#define CLK_USB_OHCI3 95 + +#define CLK_DRAM_VE 97 +#define CLK_DRAM_CSI 98 +#define CLK_DRAM_DEINTERLACE 99 +#define CLK_DRAM_TS 100 +#define CLK_DE 101 +#define CLK_TCON0 102 +#define CLK_TVE 103 +#define CLK_DEINTERLACE 104 +#define CLK_CSI_MISC 105 +#define CLK_CSI_SCLK 106 +#define CLK_CSI_MCLK 107 +#define CLK_VE 108 +#define CLK_AC_DIG 109 +#define CLK_AVS 110 +#define CLK_HDMI 111 +#define CLK_HDMI_DDC 112 + +#define CLK_GPU 114 + +/* New clocks imported in H5 */ +#define CLK_BUS_SCR1 115 + +#endif /* _DT_BINDINGS_CLK_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/clock/sun8i-r-ccu.h b/include/dt-bindings/clock/sun8i-r-ccu.h new file mode 100644 index 0000000..779d20a --- /dev/null +++ b/include/dt-bindings/clock/sun8i-r-ccu.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2016 Icenowy Zheng + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_R_CCU_H_ +#define _DT_BINDINGS_CLK_SUN8I_R_CCU_H_ + +#define CLK_AR100 0 + +#define CLK_APB0_PIO 3 +#define CLK_APB0_IR 4 +#define CLK_APB0_TIMER 5 +#define CLK_APB0_RSB 6 +#define CLK_APB0_UART 7 +/* 8 is reserved for CLK_APB0_W1 on A31 */ +#define CLK_APB0_I2C 9 +#define CLK_APB0_TWD 10 + +#define CLK_IR 11 + +#endif /* _DT_BINDINGS_CLK_SUN8I_R_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun8i-r40-ccu.h b/include/dt-bindings/clock/sun8i-r40-ccu.h new file mode 100644 index 0000000..f9e15a2 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-r40-ccu.h @@ -0,0 +1,191 @@ +/* + * Copyright (C) 2017 Icenowy Zheng + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_R40_H_ +#define _DT_BINDINGS_CLK_SUN8I_R40_H_ + +#define CLK_PLL_VIDEO0 7 + +#define CLK_PLL_VIDEO1 16 + +#define CLK_CPU 24 + +#define CLK_BUS_MIPI_DSI 29 +#define CLK_BUS_CE 30 +#define CLK_BUS_DMA 31 +#define CLK_BUS_MMC0 32 +#define CLK_BUS_MMC1 33 +#define CLK_BUS_MMC2 34 +#define CLK_BUS_MMC3 35 +#define CLK_BUS_NAND 36 +#define CLK_BUS_DRAM 37 +#define CLK_BUS_EMAC 38 +#define CLK_BUS_TS 39 +#define CLK_BUS_HSTIMER 40 +#define CLK_BUS_SPI0 41 +#define CLK_BUS_SPI1 42 +#define CLK_BUS_SPI2 43 +#define CLK_BUS_SPI3 44 +#define CLK_BUS_SATA 45 +#define CLK_BUS_OTG 46 +#define CLK_BUS_EHCI0 47 +#define CLK_BUS_EHCI1 48 +#define CLK_BUS_EHCI2 49 +#define CLK_BUS_OHCI0 50 +#define CLK_BUS_OHCI1 51 +#define CLK_BUS_OHCI2 52 +#define CLK_BUS_VE 53 +#define CLK_BUS_MP 54 +#define CLK_BUS_DEINTERLACE 55 +#define CLK_BUS_CSI0 56 +#define CLK_BUS_CSI1 57 +#define CLK_BUS_HDMI1 58 +#define CLK_BUS_HDMI0 59 +#define CLK_BUS_DE 60 +#define CLK_BUS_TVE0 61 +#define CLK_BUS_TVE1 62 +#define CLK_BUS_TVE_TOP 63 +#define CLK_BUS_GMAC 64 +#define CLK_BUS_GPU 65 +#define CLK_BUS_TVD0 66 +#define CLK_BUS_TVD1 67 +#define CLK_BUS_TVD2 68 +#define CLK_BUS_TVD3 69 +#define CLK_BUS_TVD_TOP 70 +#define CLK_BUS_TCON_LCD0 71 +#define CLK_BUS_TCON_LCD1 72 +#define CLK_BUS_TCON_TV0 73 +#define CLK_BUS_TCON_TV1 74 +#define CLK_BUS_TCON_TOP 75 +#define CLK_BUS_CODEC 76 +#define CLK_BUS_SPDIF 77 +#define CLK_BUS_AC97 78 +#define CLK_BUS_PIO 79 +#define CLK_BUS_IR0 80 +#define CLK_BUS_IR1 81 +#define CLK_BUS_THS 82 +#define CLK_BUS_KEYPAD 83 +#define CLK_BUS_I2S0 84 +#define CLK_BUS_I2S1 85 +#define CLK_BUS_I2S2 86 +#define CLK_BUS_I2C0 87 +#define CLK_BUS_I2C1 88 +#define CLK_BUS_I2C2 89 +#define CLK_BUS_I2C3 90 +#define CLK_BUS_CAN 91 +#define CLK_BUS_SCR 92 +#define CLK_BUS_PS20 93 +#define CLK_BUS_PS21 94 +#define CLK_BUS_I2C4 95 +#define CLK_BUS_UART0 96 +#define CLK_BUS_UART1 97 +#define CLK_BUS_UART2 98 +#define CLK_BUS_UART3 99 +#define CLK_BUS_UART4 100 +#define CLK_BUS_UART5 101 +#define CLK_BUS_UART6 102 +#define CLK_BUS_UART7 103 +#define CLK_BUS_DBG 104 + +#define CLK_THS 105 +#define CLK_NAND 106 +#define CLK_MMC0 107 +#define CLK_MMC1 108 +#define CLK_MMC2 109 +#define CLK_MMC3 110 +#define CLK_TS 111 +#define CLK_CE 112 +#define CLK_SPI0 113 +#define CLK_SPI1 114 +#define CLK_SPI2 115 +#define CLK_SPI3 116 +#define CLK_I2S0 117 +#define CLK_I2S1 118 +#define CLK_I2S2 119 +#define CLK_AC97 120 +#define CLK_SPDIF 121 +#define CLK_KEYPAD 122 +#define CLK_SATA 123 +#define CLK_USB_PHY0 124 +#define CLK_USB_PHY1 125 +#define CLK_USB_PHY2 126 +#define CLK_USB_OHCI0 127 +#define CLK_USB_OHCI1 128 +#define CLK_USB_OHCI2 129 +#define CLK_IR0 130 +#define CLK_IR1 131 + +#define CLK_DRAM_VE 133 +#define CLK_DRAM_CSI0 134 +#define CLK_DRAM_CSI1 135 +#define CLK_DRAM_TS 136 +#define CLK_DRAM_TVD 137 +#define CLK_DRAM_MP 138 +#define CLK_DRAM_DEINTERLACE 139 +#define CLK_DE 140 +#define CLK_MP 141 +#define CLK_TCON_LCD0 142 +#define CLK_TCON_LCD1 143 +#define CLK_TCON_TV0 144 +#define CLK_TCON_TV1 145 +#define CLK_DEINTERLACE 146 +#define CLK_CSI1_MCLK 147 +#define CLK_CSI_SCLK 148 +#define CLK_CSI0_MCLK 149 +#define CLK_VE 150 +#define CLK_CODEC 151 +#define CLK_AVS 152 +#define CLK_HDMI 153 +#define CLK_HDMI_SLOW 154 + +#define CLK_DSI_DPHY 156 +#define CLK_TVE0 157 +#define CLK_TVE1 158 +#define CLK_TVD0 159 +#define CLK_TVD1 160 +#define CLK_TVD2 161 +#define CLK_TVD3 162 +#define CLK_GPU 163 +#define CLK_OUTA 164 +#define CLK_OUTB 165 + +#endif /* _DT_BINDINGS_CLK_SUN8I_R40_H_ */ diff --git a/include/dt-bindings/clock/sun8i-tcon-top.h b/include/dt-bindings/clock/sun8i-tcon-top.h new file mode 100644 index 0000000..25164d7 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-tcon-top.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ +/* Copyright (C) 2018 Jernej Skrabec */ + +#ifndef _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_ +#define _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_ + +#define CLK_TCON_TOP_TV0 0 +#define CLK_TCON_TOP_TV1 1 +#define CLK_TCON_TOP_DSI 2 + +#endif /* _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_ */ diff --git a/include/dt-bindings/clock/sun8i-v3s-ccu.h b/include/dt-bindings/clock/sun8i-v3s-ccu.h new file mode 100644 index 0000000..014ac61 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-v3s-ccu.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2016 Icenowy Zheng + * + * Based on sun8i-h3-ccu.h, which is: + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_V3S_H_ +#define _DT_BINDINGS_CLK_SUN8I_V3S_H_ + +#define CLK_CPU 14 + +#define CLK_BUS_CE 20 +#define CLK_BUS_DMA 21 +#define CLK_BUS_MMC0 22 +#define CLK_BUS_MMC1 23 +#define CLK_BUS_MMC2 24 +#define CLK_BUS_DRAM 25 +#define CLK_BUS_EMAC 26 +#define CLK_BUS_HSTIMER 27 +#define CLK_BUS_SPI0 28 +#define CLK_BUS_OTG 29 +#define CLK_BUS_EHCI0 30 +#define CLK_BUS_OHCI0 31 +#define CLK_BUS_VE 32 +#define CLK_BUS_TCON0 33 +#define CLK_BUS_CSI 34 +#define CLK_BUS_DE 35 +#define CLK_BUS_CODEC 36 +#define CLK_BUS_PIO 37 +#define CLK_BUS_I2C0 38 +#define CLK_BUS_I2C1 39 +#define CLK_BUS_UART0 40 +#define CLK_BUS_UART1 41 +#define CLK_BUS_UART2 42 +#define CLK_BUS_EPHY 43 +#define CLK_BUS_DBG 44 + +#define CLK_MMC0 45 +#define CLK_MMC0_SAMPLE 46 +#define CLK_MMC0_OUTPUT 47 +#define CLK_MMC1 48 +#define CLK_MMC1_SAMPLE 49 +#define CLK_MMC1_OUTPUT 50 +#define CLK_MMC2 51 +#define CLK_MMC2_SAMPLE 52 +#define CLK_MMC2_OUTPUT 53 +#define CLK_CE 54 +#define CLK_SPI0 55 +#define CLK_USB_PHY0 56 +#define CLK_USB_OHCI0 57 + +#define CLK_DRAM_VE 59 +#define CLK_DRAM_CSI 60 +#define CLK_DRAM_EHCI 61 +#define CLK_DRAM_OHCI 62 +#define CLK_DE 63 +#define CLK_TCON0 64 +#define CLK_CSI_MISC 65 +#define CLK_CSI0_MCLK 66 +#define CLK_CSI1_SCLK 67 +#define CLK_CSI1_MCLK 68 +#define CLK_VE 69 +#define CLK_AC_DIG 70 +#define CLK_AVS 71 + +#define CLK_MIPI_CSI 73 + +/* Clocks not available on V3s */ +#define CLK_BUS_I2S0 75 +#define CLK_I2S0 76 + +#endif /* _DT_BINDINGS_CLK_SUN8I_V3S_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-ccu.h b/include/dt-bindings/clock/sun9i-a80-ccu.h new file mode 100644 index 0000000..6ea1492 --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-ccu.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ + +#define CLK_PLL_AUDIO 2 +#define CLK_PLL_PERIPH0 3 + +#define CLK_C0CPUX 12 +#define CLK_C1CPUX 13 + +#define CLK_OUT_A 27 +#define CLK_OUT_B 28 + +#define CLK_NAND0_0 29 +#define CLK_NAND0_1 30 +#define CLK_NAND1_0 31 +#define CLK_NAND1_1 32 +#define CLK_MMC0 33 +#define CLK_MMC0_SAMPLE 34 +#define CLK_MMC0_OUTPUT 35 +#define CLK_MMC1 36 +#define CLK_MMC1_SAMPLE 37 +#define CLK_MMC1_OUTPUT 38 +#define CLK_MMC2 39 +#define CLK_MMC2_SAMPLE 40 +#define CLK_MMC2_OUTPUT 41 +#define CLK_MMC3 42 +#define CLK_MMC3_SAMPLE 43 +#define CLK_MMC3_OUTPUT 44 +#define CLK_TS 45 +#define CLK_SS 46 +#define CLK_SPI0 47 +#define CLK_SPI1 48 +#define CLK_SPI2 49 +#define CLK_SPI3 50 +#define CLK_I2S0 51 +#define CLK_I2S1 52 +#define CLK_SPDIF 53 +#define CLK_SDRAM 54 +#define CLK_DE 55 +#define CLK_EDP 56 +#define CLK_MP 57 +#define CLK_LCD0 58 +#define CLK_LCD1 59 +#define CLK_MIPI_DSI0 60 +#define CLK_MIPI_DSI1 61 +#define CLK_HDMI 62 +#define CLK_HDMI_SLOW 63 +#define CLK_MIPI_CSI 64 +#define CLK_CSI_ISP 65 +#define CLK_CSI_MISC 66 +#define CLK_CSI0_MCLK 67 +#define CLK_CSI1_MCLK 68 +#define CLK_FD 69 +#define CLK_VE 70 +#define CLK_AVS 71 +#define CLK_GPU_CORE 72 +#define CLK_GPU_MEMORY 73 +#define CLK_GPU_AXI 74 +#define CLK_SATA 75 +#define CLK_AC97 76 +#define CLK_MIPI_HSI 77 +#define CLK_GPADC 78 +#define CLK_CIR_TX 79 + +#define CLK_BUS_FD 80 +#define CLK_BUS_VE 81 +#define CLK_BUS_GPU_CTRL 82 +#define CLK_BUS_SS 83 +#define CLK_BUS_MMC 84 +#define CLK_BUS_NAND0 85 +#define CLK_BUS_NAND1 86 +#define CLK_BUS_SDRAM 87 +#define CLK_BUS_MIPI_HSI 88 +#define CLK_BUS_SATA 89 +#define CLK_BUS_TS 90 +#define CLK_BUS_SPI0 91 +#define CLK_BUS_SPI1 92 +#define CLK_BUS_SPI2 93 +#define CLK_BUS_SPI3 94 + +#define CLK_BUS_OTG 95 +#define CLK_BUS_USB 96 +#define CLK_BUS_GMAC 97 +#define CLK_BUS_MSGBOX 98 +#define CLK_BUS_SPINLOCK 99 +#define CLK_BUS_HSTIMER 100 +#define CLK_BUS_DMA 101 + +#define CLK_BUS_LCD0 102 +#define CLK_BUS_LCD1 103 +#define CLK_BUS_EDP 104 +#define CLK_BUS_CSI 105 +#define CLK_BUS_HDMI 106 +#define CLK_BUS_DE 107 +#define CLK_BUS_MP 108 +#define CLK_BUS_MIPI_DSI 109 + +#define CLK_BUS_SPDIF 110 +#define CLK_BUS_PIO 111 +#define CLK_BUS_AC97 112 +#define CLK_BUS_I2S0 113 +#define CLK_BUS_I2S1 114 +#define CLK_BUS_LRADC 115 +#define CLK_BUS_GPADC 116 +#define CLK_BUS_TWD 117 +#define CLK_BUS_CIR_TX 118 + +#define CLK_BUS_I2C0 119 +#define CLK_BUS_I2C1 120 +#define CLK_BUS_I2C2 121 +#define CLK_BUS_I2C3 122 +#define CLK_BUS_I2C4 123 +#define CLK_BUS_UART0 124 +#define CLK_BUS_UART1 125 +#define CLK_BUS_UART2 126 +#define CLK_BUS_UART3 127 +#define CLK_BUS_UART4 128 +#define CLK_BUS_UART5 129 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-de.h b/include/dt-bindings/clock/sun9i-a80-de.h new file mode 100644 index 0000000..3dad6c3 --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-de.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ + +#define CLK_FE0 0 +#define CLK_FE1 1 +#define CLK_FE2 2 +#define CLK_IEP_DEU0 3 +#define CLK_IEP_DEU1 4 +#define CLK_BE0 5 +#define CLK_BE1 6 +#define CLK_BE2 7 +#define CLK_IEP_DRC0 8 +#define CLK_IEP_DRC1 9 +#define CLK_MERGE 10 + +#define CLK_DRAM_FE0 11 +#define CLK_DRAM_FE1 12 +#define CLK_DRAM_FE2 13 +#define CLK_DRAM_DEU0 14 +#define CLK_DRAM_DEU1 15 +#define CLK_DRAM_BE0 16 +#define CLK_DRAM_BE1 17 +#define CLK_DRAM_BE2 18 +#define CLK_DRAM_DRC0 19 +#define CLK_DRAM_DRC1 20 + +#define CLK_BUS_FE0 21 +#define CLK_BUS_FE1 22 +#define CLK_BUS_FE2 23 +#define CLK_BUS_DEU0 24 +#define CLK_BUS_DEU1 25 +#define CLK_BUS_BE0 26 +#define CLK_BUS_BE1 27 +#define CLK_BUS_BE2 28 +#define CLK_BUS_DRC0 29 +#define CLK_BUS_DRC1 30 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-usb.h b/include/dt-bindings/clock/sun9i-a80-usb.h new file mode 100644 index 0000000..783a60d --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-usb.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ + +#define CLK_BUS_HCI0 0 +#define CLK_USB_OHCI0 1 +#define CLK_BUS_HCI1 2 +#define CLK_BUS_HCI2 3 +#define CLK_USB_OHCI2 4 + +#define CLK_USB0_PHY 5 +#define CLK_USB1_HSIC 6 +#define CLK_USB1_PHY 7 +#define CLK_USB2_HSIC 8 +#define CLK_USB2_PHY 9 +#define CLK_USB_HSIC 10 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ */ diff --git a/include/dt-bindings/clock/suniv-ccu-f1c100s.h b/include/dt-bindings/clock/suniv-ccu-f1c100s.h new file mode 100644 index 0000000..f5ac155 --- /dev/null +++ b/include/dt-bindings/clock/suniv-ccu-f1c100s.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) + * + * Copyright (c) 2018 Icenowy Zheng + * + */ + +#ifndef _DT_BINDINGS_CLK_SUNIV_F1C100S_H_ +#define _DT_BINDINGS_CLK_SUNIV_F1C100S_H_ + +#define CLK_CPU 11 + +#define CLK_BUS_DMA 14 +#define CLK_BUS_MMC0 15 +#define CLK_BUS_MMC1 16 +#define CLK_BUS_DRAM 17 +#define CLK_BUS_SPI0 18 +#define CLK_BUS_SPI1 19 +#define CLK_BUS_OTG 20 +#define CLK_BUS_VE 21 +#define CLK_BUS_LCD 22 +#define CLK_BUS_DEINTERLACE 23 +#define CLK_BUS_CSI 24 +#define CLK_BUS_TVD 25 +#define CLK_BUS_TVE 26 +#define CLK_BUS_DE_BE 27 +#define CLK_BUS_DE_FE 28 +#define CLK_BUS_CODEC 29 +#define CLK_BUS_SPDIF 30 +#define CLK_BUS_IR 31 +#define CLK_BUS_RSB 32 +#define CLK_BUS_I2S0 33 +#define CLK_BUS_I2C0 34 +#define CLK_BUS_I2C1 35 +#define CLK_BUS_I2C2 36 +#define CLK_BUS_PIO 37 +#define CLK_BUS_UART0 38 +#define CLK_BUS_UART1 39 +#define CLK_BUS_UART2 40 + +#define CLK_MMC0 41 +#define CLK_MMC0_SAMPLE 42 +#define CLK_MMC0_OUTPUT 43 +#define CLK_MMC1 44 +#define CLK_MMC1_SAMPLE 45 +#define CLK_MMC1_OUTPUT 46 +#define CLK_I2S 47 +#define CLK_SPDIF 48 + +#define CLK_USB_PHY0 49 + +#define CLK_DRAM_VE 50 +#define CLK_DRAM_CSI 51 +#define CLK_DRAM_DEINTERLACE 52 +#define CLK_DRAM_TVD 53 +#define CLK_DRAM_DE_FE 54 +#define CLK_DRAM_DE_BE 55 + +#define CLK_DE_BE 56 +#define CLK_DE_FE 57 +#define CLK_TCON 58 +#define CLK_DEINTERLACE 59 +#define CLK_TVE2_CLK 60 +#define CLK_TVE1_CLK 61 +#define CLK_TVD 62 +#define CLK_CSI 63 +#define CLK_VE 64 +#define CLK_CODEC 65 +#define CLK_AVS 66 + +#endif diff --git a/include/dt-bindings/clock/tegra114-car.h b/include/dt-bindings/clock/tegra114-car.h new file mode 100644 index 0000000..bb5c2c9 --- /dev/null +++ b/include/dt-bindings/clock/tegra114-car.h @@ -0,0 +1,344 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra114-car. + * + * The first 160 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 160 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 160 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA114_CAR_H +#define _DT_BINDINGS_CLOCK_TEGRA114_CAR_H + +/* 0 */ +/* 1 */ +/* 2 */ +/* 3 */ +#define TEGRA114_CLK_RTC 4 +#define TEGRA114_CLK_TIMER 5 +#define TEGRA114_CLK_UARTA 6 +/* 7 (register bit affects uartb and vfir) */ +/* 8 */ +#define TEGRA114_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA114_CLK_I2S1 11 +#define TEGRA114_CLK_I2C1 12 +#define TEGRA114_CLK_NDFLASH 13 +#define TEGRA114_CLK_SDMMC1 14 +#define TEGRA114_CLK_SDMMC4 15 +/* 16 */ +#define TEGRA114_CLK_PWM 17 +#define TEGRA114_CLK_I2S2 18 +#define TEGRA114_CLK_EPP 19 +/* 20 (register bit affects vi and vi_sensor) */ +#define TEGRA114_CLK_GR2D 21 +#define TEGRA114_CLK_USBD 22 +#define TEGRA114_CLK_ISP 23 +#define TEGRA114_CLK_GR3D 24 +/* 25 */ +#define TEGRA114_CLK_DISP2 26 +#define TEGRA114_CLK_DISP1 27 +#define TEGRA114_CLK_HOST1X 28 +#define TEGRA114_CLK_VCP 29 +#define TEGRA114_CLK_I2S0 30 +/* 31 */ + +#define TEGRA114_CLK_MC 32 +/* 33 */ +#define TEGRA114_CLK_APBDMA 34 +/* 35 */ +#define TEGRA114_CLK_KBC 36 +/* 37 */ +/* 38 */ +/* 39 (register bit affects fuse and fuse_burn) */ +#define TEGRA114_CLK_KFUSE 40 +#define TEGRA114_CLK_SBC1 41 +#define TEGRA114_CLK_NOR 42 +/* 43 */ +#define TEGRA114_CLK_SBC2 44 +/* 45 */ +#define TEGRA114_CLK_SBC3 46 +#define TEGRA114_CLK_I2C5 47 +#define TEGRA114_CLK_DSIA 48 +/* 49 */ +#define TEGRA114_CLK_MIPI 50 +#define TEGRA114_CLK_HDMI 51 +#define TEGRA114_CLK_CSI 52 +/* 53 */ +#define TEGRA114_CLK_I2C2 54 +#define TEGRA114_CLK_UARTC 55 +#define TEGRA114_CLK_MIPI_CAL 56 +#define TEGRA114_CLK_EMC 57 +#define TEGRA114_CLK_USB2 58 +#define TEGRA114_CLK_USB3 59 +/* 60 */ +#define TEGRA114_CLK_VDE 61 +#define TEGRA114_CLK_BSEA 62 +#define TEGRA114_CLK_BSEV 63 + +/* 64 */ +#define TEGRA114_CLK_UARTD 65 +/* 66 */ +#define TEGRA114_CLK_I2C3 67 +#define TEGRA114_CLK_SBC4 68 +#define TEGRA114_CLK_SDMMC3 69 +/* 70 */ +#define TEGRA114_CLK_OWR 71 +/* 72 */ +#define TEGRA114_CLK_CSITE 73 +/* 74 */ +/* 75 */ +#define TEGRA114_CLK_LA 76 +#define TEGRA114_CLK_TRACE 77 +#define TEGRA114_CLK_SOC_THERM 78 +#define TEGRA114_CLK_DTV 79 +#define TEGRA114_CLK_NDSPEED 80 +#define TEGRA114_CLK_I2CSLOW 81 +#define TEGRA114_CLK_DSIB 82 +#define TEGRA114_CLK_TSEC 83 +/* 84 */ +/* 85 */ +/* 86 */ +/* 87 */ +/* 88 */ +#define TEGRA114_CLK_XUSB_HOST 89 +/* 90 */ +#define TEGRA114_CLK_MSENC 91 +#define TEGRA114_CLK_CSUS 92 +/* 93 */ +/* 94 */ +/* 95 (bit affects xusb_dev and xusb_dev_src) */ + +/* 96 */ +/* 97 */ +/* 98 */ +#define TEGRA114_CLK_MSELECT 99 +#define TEGRA114_CLK_TSENSOR 100 +#define TEGRA114_CLK_I2S3 101 +#define TEGRA114_CLK_I2S4 102 +#define TEGRA114_CLK_I2C4 103 +#define TEGRA114_CLK_SBC5 104 +#define TEGRA114_CLK_SBC6 105 +#define TEGRA114_CLK_D_AUDIO 106 +#define TEGRA114_CLK_APBIF 107 +#define TEGRA114_CLK_DAM0 108 +#define TEGRA114_CLK_DAM1 109 +#define TEGRA114_CLK_DAM2 110 +#define TEGRA114_CLK_HDA2CODEC_2X 111 +/* 112 */ +#define TEGRA114_CLK_AUDIO0_2X 113 +#define TEGRA114_CLK_AUDIO1_2X 114 +#define TEGRA114_CLK_AUDIO2_2X 115 +#define TEGRA114_CLK_AUDIO3_2X 116 +#define TEGRA114_CLK_AUDIO4_2X 117 +#define TEGRA114_CLK_SPDIF_2X 118 +#define TEGRA114_CLK_ACTMON 119 +#define TEGRA114_CLK_EXTERN1 120 +#define TEGRA114_CLK_EXTERN2 121 +#define TEGRA114_CLK_EXTERN3 122 +/* 123 */ +/* 124 */ +#define TEGRA114_CLK_HDA 125 +/* 126 */ +#define TEGRA114_CLK_SE 127 + +#define TEGRA114_CLK_HDA2HDMI 128 +/* 129 */ +/* 130 */ +/* 131 */ +/* 132 */ +/* 133 */ +/* 134 */ +/* 135 */ +#define TEGRA114_CLK_CEC 136 +/* 137 */ +/* 138 */ +/* 139 */ +/* 140 */ +/* 141 */ +/* 142 */ +/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */ +/* xusb_host_src and xusb_ss_src) */ +#define TEGRA114_CLK_CILAB 144 +#define TEGRA114_CLK_CILCD 145 +#define TEGRA114_CLK_CILE 146 +#define TEGRA114_CLK_DSIALP 147 +#define TEGRA114_CLK_DSIBLP 148 +/* 149 */ +#define TEGRA114_CLK_DDS 150 +/* 151 */ +#define TEGRA114_CLK_DP2 152 +#define TEGRA114_CLK_AMX 153 +#define TEGRA114_CLK_ADX 154 +/* 155 (bit affects dfll_ref and dfll_soc) */ +#define TEGRA114_CLK_XUSB_SS 156 +/* 157 */ +/* 158 */ +/* 159 */ + +/* 160 */ +/* 161 */ +/* 162 */ +/* 163 */ +/* 164 */ +/* 165 */ +/* 166 */ +/* 167 */ +/* 168 */ +/* 169 */ +/* 170 */ +/* 171 */ +/* 172 */ +/* 173 */ +/* 174 */ +/* 175 */ +/* 176 */ +/* 177 */ +/* 178 */ +/* 179 */ +/* 180 */ +/* 181 */ +/* 182 */ +/* 183 */ +/* 184 */ +/* 185 */ +/* 186 */ +/* 187 */ +/* 188 */ +/* 189 */ +/* 190 */ +/* 191 */ + +#define TEGRA114_CLK_UARTB 192 +#define TEGRA114_CLK_VFIR 193 +#define TEGRA114_CLK_SPDIF_IN 194 +#define TEGRA114_CLK_SPDIF_OUT 195 +#define TEGRA114_CLK_VI 196 +#define TEGRA114_CLK_VI_SENSOR 197 +#define TEGRA114_CLK_FUSE 198 +#define TEGRA114_CLK_FUSE_BURN 199 +#define TEGRA114_CLK_CLK_32K 200 +#define TEGRA114_CLK_CLK_M 201 +#define TEGRA114_CLK_CLK_M_DIV2 202 +#define TEGRA114_CLK_CLK_M_DIV4 203 +#define TEGRA114_CLK_PLL_REF 204 +#define TEGRA114_CLK_PLL_C 205 +#define TEGRA114_CLK_PLL_C_OUT1 206 +#define TEGRA114_CLK_PLL_C2 207 +#define TEGRA114_CLK_PLL_C3 208 +#define TEGRA114_CLK_PLL_M 209 +#define TEGRA114_CLK_PLL_M_OUT1 210 +#define TEGRA114_CLK_PLL_P 211 +#define TEGRA114_CLK_PLL_P_OUT1 212 +#define TEGRA114_CLK_PLL_P_OUT2 213 +#define TEGRA114_CLK_PLL_P_OUT3 214 +#define TEGRA114_CLK_PLL_P_OUT4 215 +#define TEGRA114_CLK_PLL_A 216 +#define TEGRA114_CLK_PLL_A_OUT0 217 +#define TEGRA114_CLK_PLL_D 218 +#define TEGRA114_CLK_PLL_D_OUT0 219 +#define TEGRA114_CLK_PLL_D2 220 +#define TEGRA114_CLK_PLL_D2_OUT0 221 +#define TEGRA114_CLK_PLL_U 222 +#define TEGRA114_CLK_PLL_U_480M 223 + +#define TEGRA114_CLK_PLL_U_60M 224 +#define TEGRA114_CLK_PLL_U_48M 225 +#define TEGRA114_CLK_PLL_U_12M 226 +#define TEGRA114_CLK_PLL_X 227 +#define TEGRA114_CLK_PLL_X_OUT0 228 +#define TEGRA114_CLK_PLL_RE_VCO 229 +#define TEGRA114_CLK_PLL_RE_OUT 230 +#define TEGRA114_CLK_PLL_E_OUT0 231 +#define TEGRA114_CLK_SPDIF_IN_SYNC 232 +#define TEGRA114_CLK_I2S0_SYNC 233 +#define TEGRA114_CLK_I2S1_SYNC 234 +#define TEGRA114_CLK_I2S2_SYNC 235 +#define TEGRA114_CLK_I2S3_SYNC 236 +#define TEGRA114_CLK_I2S4_SYNC 237 +#define TEGRA114_CLK_VIMCLK_SYNC 238 +#define TEGRA114_CLK_AUDIO0 239 +#define TEGRA114_CLK_AUDIO1 240 +#define TEGRA114_CLK_AUDIO2 241 +#define TEGRA114_CLK_AUDIO3 242 +#define TEGRA114_CLK_AUDIO4 243 +#define TEGRA114_CLK_SPDIF 244 +#define TEGRA114_CLK_CLK_OUT_1 245 +#define TEGRA114_CLK_CLK_OUT_2 246 +#define TEGRA114_CLK_CLK_OUT_3 247 +#define TEGRA114_CLK_BLINK 248 +/* 249 */ +/* 250 */ +/* 251 */ +#define TEGRA114_CLK_XUSB_HOST_SRC 252 +#define TEGRA114_CLK_XUSB_FALCON_SRC 253 +#define TEGRA114_CLK_XUSB_FS_SRC 254 +#define TEGRA114_CLK_XUSB_SS_SRC 255 + +#define TEGRA114_CLK_XUSB_DEV_SRC 256 +#define TEGRA114_CLK_XUSB_DEV 257 +#define TEGRA114_CLK_XUSB_HS_SRC 258 +#define TEGRA114_CLK_SCLK 259 +#define TEGRA114_CLK_HCLK 260 +#define TEGRA114_CLK_PCLK 261 +#define TEGRA114_CLK_CCLK_G 262 +#define TEGRA114_CLK_CCLK_LP 263 +#define TEGRA114_CLK_DFLL_REF 264 +#define TEGRA114_CLK_DFLL_SOC 265 +/* 266 */ +/* 267 */ +/* 268 */ +/* 269 */ +/* 270 */ +/* 271 */ +/* 272 */ +/* 273 */ +/* 274 */ +/* 275 */ +/* 276 */ +/* 277 */ +/* 278 */ +/* 279 */ +/* 280 */ +/* 281 */ +/* 282 */ +/* 283 */ +/* 284 */ +/* 285 */ +/* 286 */ +/* 287 */ + +/* 288 */ +/* 289 */ +/* 290 */ +/* 291 */ +/* 292 */ +/* 293 */ +/* 294 */ +/* 295 */ +/* 296 */ +/* 297 */ +/* 298 */ +/* 299 */ +#define TEGRA114_CLK_AUDIO0_MUX 300 +#define TEGRA114_CLK_AUDIO1_MUX 301 +#define TEGRA114_CLK_AUDIO2_MUX 302 +#define TEGRA114_CLK_AUDIO3_MUX 303 +#define TEGRA114_CLK_AUDIO4_MUX 304 +#define TEGRA114_CLK_SPDIF_MUX 305 +#define TEGRA114_CLK_CLK_OUT_1_MUX 306 +#define TEGRA114_CLK_CLK_OUT_2_MUX 307 +#define TEGRA114_CLK_CLK_OUT_3_MUX 308 +#define TEGRA114_CLK_DSIA_MUX 309 +#define TEGRA114_CLK_DSIB_MUX 310 +#define TEGRA114_CLK_XUSB_SS_DIV2 311 +#define TEGRA114_CLK_CLK_MAX 312 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA114_CAR_H */ diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h new file mode 100644 index 0000000..4331f1d --- /dev/null +++ b/include/dt-bindings/clock/tegra124-car-common.h @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra124-car or + * nvidia,tegra132-car. + * + * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 185 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 185 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H +#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H + +/* 0 */ +/* 1 */ +/* 2 */ +#define TEGRA124_CLK_ISPB 3 +#define TEGRA124_CLK_RTC 4 +#define TEGRA124_CLK_TIMER 5 +#define TEGRA124_CLK_UARTA 6 +/* 7 (register bit affects uartb and vfir) */ +/* 8 */ +#define TEGRA124_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA124_CLK_I2S1 11 +#define TEGRA124_CLK_I2C1 12 +/* 13 */ +#define TEGRA124_CLK_SDMMC1 14 +#define TEGRA124_CLK_SDMMC4 15 +/* 16 */ +#define TEGRA124_CLK_PWM 17 +#define TEGRA124_CLK_I2S2 18 +/* 20 (register bit affects vi and vi_sensor) */ +/* 21 */ +#define TEGRA124_CLK_USBD 22 +#define TEGRA124_CLK_ISP 23 +/* 26 */ +/* 25 */ +#define TEGRA124_CLK_DISP2 26 +#define TEGRA124_CLK_DISP1 27 +#define TEGRA124_CLK_HOST1X 28 +#define TEGRA124_CLK_VCP 29 +#define TEGRA124_CLK_I2S0 30 +/* 31 */ + +#define TEGRA124_CLK_MC 32 +/* 33 */ +#define TEGRA124_CLK_APBDMA 34 +/* 35 */ +#define TEGRA124_CLK_KBC 36 +/* 37 */ +/* 38 */ +/* 39 (register bit affects fuse and fuse_burn) */ +#define TEGRA124_CLK_KFUSE 40 +#define TEGRA124_CLK_SBC1 41 +#define TEGRA124_CLK_NOR 42 +/* 43 */ +#define TEGRA124_CLK_SBC2 44 +/* 45 */ +#define TEGRA124_CLK_SBC3 46 +#define TEGRA124_CLK_I2C5 47 +#define TEGRA124_CLK_DSIA 48 +/* 49 */ +#define TEGRA124_CLK_MIPI 50 +#define TEGRA124_CLK_HDMI 51 +#define TEGRA124_CLK_CSI 52 +/* 53 */ +#define TEGRA124_CLK_I2C2 54 +#define TEGRA124_CLK_UARTC 55 +#define TEGRA124_CLK_MIPI_CAL 56 +#define TEGRA124_CLK_EMC 57 +#define TEGRA124_CLK_USB2 58 +#define TEGRA124_CLK_USB3 59 +/* 60 */ +#define TEGRA124_CLK_VDE 61 +#define TEGRA124_CLK_BSEA 62 +#define TEGRA124_CLK_BSEV 63 + +/* 64 */ +#define TEGRA124_CLK_UARTD 65 +/* 66 */ +#define TEGRA124_CLK_I2C3 67 +#define TEGRA124_CLK_SBC4 68 +#define TEGRA124_CLK_SDMMC3 69 +#define TEGRA124_CLK_PCIE 70 +#define TEGRA124_CLK_OWR 71 +#define TEGRA124_CLK_AFI 72 +#define TEGRA124_CLK_CSITE 73 +/* 74 */ +/* 75 */ +#define TEGRA124_CLK_LA 76 +#define TEGRA124_CLK_TRACE 77 +#define TEGRA124_CLK_SOC_THERM 78 +#define TEGRA124_CLK_DTV 79 +/* 80 */ +#define TEGRA124_CLK_I2CSLOW 81 +#define TEGRA124_CLK_DSIB 82 +#define TEGRA124_CLK_TSEC 83 +/* 84 */ +/* 85 */ +/* 86 */ +/* 87 */ +/* 88 */ +#define TEGRA124_CLK_XUSB_HOST 89 +/* 90 */ +#define TEGRA124_CLK_MSENC 91 +#define TEGRA124_CLK_CSUS 92 +/* 93 */ +/* 94 */ +/* 95 (bit affects xusb_dev and xusb_dev_src) */ + +/* 96 */ +/* 97 */ +/* 98 */ +#define TEGRA124_CLK_MSELECT 99 +#define TEGRA124_CLK_TSENSOR 100 +#define TEGRA124_CLK_I2S3 101 +#define TEGRA124_CLK_I2S4 102 +#define TEGRA124_CLK_I2C4 103 +#define TEGRA124_CLK_SBC5 104 +#define TEGRA124_CLK_SBC6 105 +#define TEGRA124_CLK_D_AUDIO 106 +#define TEGRA124_CLK_APBIF 107 +#define TEGRA124_CLK_DAM0 108 +#define TEGRA124_CLK_DAM1 109 +#define TEGRA124_CLK_DAM2 110 +#define TEGRA124_CLK_HDA2CODEC_2X 111 +/* 112 */ +#define TEGRA124_CLK_AUDIO0_2X 113 +#define TEGRA124_CLK_AUDIO1_2X 114 +#define TEGRA124_CLK_AUDIO2_2X 115 +#define TEGRA124_CLK_AUDIO3_2X 116 +#define TEGRA124_CLK_AUDIO4_2X 117 +#define TEGRA124_CLK_SPDIF_2X 118 +#define TEGRA124_CLK_ACTMON 119 +#define TEGRA124_CLK_EXTERN1 120 +#define TEGRA124_CLK_EXTERN2 121 +#define TEGRA124_CLK_EXTERN3 122 +#define TEGRA124_CLK_SATA_OOB 123 +#define TEGRA124_CLK_SATA 124 +#define TEGRA124_CLK_HDA 125 +/* 126 */ +#define TEGRA124_CLK_SE 127 + +#define TEGRA124_CLK_HDA2HDMI 128 +#define TEGRA124_CLK_SATA_COLD 129 +/* 130 */ +/* 131 */ +/* 132 */ +/* 133 */ +/* 134 */ +/* 135 */ +#define TEGRA124_CLK_CEC 136 +/* 137 */ +/* 138 */ +/* 139 */ +/* 140 */ +/* 141 */ +/* 142 */ +/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */ +/* xusb_host_src and xusb_ss_src) */ +#define TEGRA124_CLK_CILAB 144 +#define TEGRA124_CLK_CILCD 145 +#define TEGRA124_CLK_CILE 146 +#define TEGRA124_CLK_DSIALP 147 +#define TEGRA124_CLK_DSIBLP 148 +#define TEGRA124_CLK_ENTROPY 149 +#define TEGRA124_CLK_DDS 150 +/* 151 */ +#define TEGRA124_CLK_DP2 152 +#define TEGRA124_CLK_AMX 153 +#define TEGRA124_CLK_ADX 154 +/* 155 (bit affects dfll_ref and dfll_soc) */ +#define TEGRA124_CLK_XUSB_SS 156 +/* 157 */ +/* 158 */ +/* 159 */ + +/* 160 */ +/* 161 */ +/* 162 */ +/* 163 */ +/* 164 */ +/* 165 */ +#define TEGRA124_CLK_I2C6 166 +/* 167 */ +/* 168 */ +/* 169 */ +/* 170 */ +#define TEGRA124_CLK_VIM2_CLK 171 +/* 172 */ +/* 173 */ +/* 174 */ +/* 175 */ +#define TEGRA124_CLK_HDMI_AUDIO 176 +#define TEGRA124_CLK_CLK72MHZ 177 +#define TEGRA124_CLK_VIC03 178 +/* 179 */ +#define TEGRA124_CLK_ADX1 180 +#define TEGRA124_CLK_DPAUX 181 +#define TEGRA124_CLK_SOR0 182 +/* 183 */ +#define TEGRA124_CLK_GPU 184 +#define TEGRA124_CLK_AMX1 185 +/* 186 */ +/* 187 */ +/* 188 */ +/* 189 */ +/* 190 */ +/* 191 */ +#define TEGRA124_CLK_UARTB 192 +#define TEGRA124_CLK_VFIR 193 +#define TEGRA124_CLK_SPDIF_IN 194 +#define TEGRA124_CLK_SPDIF_OUT 195 +#define TEGRA124_CLK_VI 196 +#define TEGRA124_CLK_VI_SENSOR 197 +#define TEGRA124_CLK_FUSE 198 +#define TEGRA124_CLK_FUSE_BURN 199 +#define TEGRA124_CLK_CLK_32K 200 +#define TEGRA124_CLK_CLK_M 201 +#define TEGRA124_CLK_CLK_M_DIV2 202 +#define TEGRA124_CLK_CLK_M_DIV4 203 +#define TEGRA124_CLK_PLL_REF 204 +#define TEGRA124_CLK_PLL_C 205 +#define TEGRA124_CLK_PLL_C_OUT1 206 +#define TEGRA124_CLK_PLL_C2 207 +#define TEGRA124_CLK_PLL_C3 208 +#define TEGRA124_CLK_PLL_M 209 +#define TEGRA124_CLK_PLL_M_OUT1 210 +#define TEGRA124_CLK_PLL_P 211 +#define TEGRA124_CLK_PLL_P_OUT1 212 +#define TEGRA124_CLK_PLL_P_OUT2 213 +#define TEGRA124_CLK_PLL_P_OUT3 214 +#define TEGRA124_CLK_PLL_P_OUT4 215 +#define TEGRA124_CLK_PLL_A 216 +#define TEGRA124_CLK_PLL_A_OUT0 217 +#define TEGRA124_CLK_PLL_D 218 +#define TEGRA124_CLK_PLL_D_OUT0 219 +#define TEGRA124_CLK_PLL_D2 220 +#define TEGRA124_CLK_PLL_D2_OUT0 221 +#define TEGRA124_CLK_PLL_U 222 +#define TEGRA124_CLK_PLL_U_480M 223 + +#define TEGRA124_CLK_PLL_U_60M 224 +#define TEGRA124_CLK_PLL_U_48M 225 +#define TEGRA124_CLK_PLL_U_12M 226 +/* 227 */ +/* 228 */ +#define TEGRA124_CLK_PLL_RE_VCO 229 +#define TEGRA124_CLK_PLL_RE_OUT 230 +#define TEGRA124_CLK_PLL_E 231 +#define TEGRA124_CLK_SPDIF_IN_SYNC 232 +#define TEGRA124_CLK_I2S0_SYNC 233 +#define TEGRA124_CLK_I2S1_SYNC 234 +#define TEGRA124_CLK_I2S2_SYNC 235 +#define TEGRA124_CLK_I2S3_SYNC 236 +#define TEGRA124_CLK_I2S4_SYNC 237 +#define TEGRA124_CLK_VIMCLK_SYNC 238 +#define TEGRA124_CLK_AUDIO0 239 +#define TEGRA124_CLK_AUDIO1 240 +#define TEGRA124_CLK_AUDIO2 241 +#define TEGRA124_CLK_AUDIO3 242 +#define TEGRA124_CLK_AUDIO4 243 +#define TEGRA124_CLK_SPDIF 244 +#define TEGRA124_CLK_CLK_OUT_1 245 +#define TEGRA124_CLK_CLK_OUT_2 246 +#define TEGRA124_CLK_CLK_OUT_3 247 +#define TEGRA124_CLK_BLINK 248 +/* 249 */ +/* 250 */ +/* 251 */ +#define TEGRA124_CLK_XUSB_HOST_SRC 252 +#define TEGRA124_CLK_XUSB_FALCON_SRC 253 +#define TEGRA124_CLK_XUSB_FS_SRC 254 +#define TEGRA124_CLK_XUSB_SS_SRC 255 + +#define TEGRA124_CLK_XUSB_DEV_SRC 256 +#define TEGRA124_CLK_XUSB_DEV 257 +#define TEGRA124_CLK_XUSB_HS_SRC 258 +#define TEGRA124_CLK_SCLK 259 +#define TEGRA124_CLK_HCLK 260 +#define TEGRA124_CLK_PCLK 261 +/* 262 */ +/* 263 */ +#define TEGRA124_CLK_DFLL_REF 264 +#define TEGRA124_CLK_DFLL_SOC 265 +#define TEGRA124_CLK_VI_SENSOR2 266 +#define TEGRA124_CLK_PLL_P_OUT5 267 +#define TEGRA124_CLK_CML0 268 +#define TEGRA124_CLK_CML1 269 +#define TEGRA124_CLK_PLL_C4 270 +#define TEGRA124_CLK_PLL_DP 271 +#define TEGRA124_CLK_PLL_E_MUX 272 +#define TEGRA124_CLK_PLL_D_DSI_OUT 273 +/* 274 */ +/* 275 */ +/* 276 */ +/* 277 */ +/* 278 */ +/* 279 */ +/* 280 */ +/* 281 */ +/* 282 */ +/* 283 */ +/* 284 */ +/* 285 */ +/* 286 */ +/* 287 */ + +/* 288 */ +/* 289 */ +/* 290 */ +/* 291 */ +/* 292 */ +/* 293 */ +/* 294 */ +/* 295 */ +/* 296 */ +/* 297 */ +/* 298 */ +/* 299 */ +#define TEGRA124_CLK_AUDIO0_MUX 300 +#define TEGRA124_CLK_AUDIO1_MUX 301 +#define TEGRA124_CLK_AUDIO2_MUX 302 +#define TEGRA124_CLK_AUDIO3_MUX 303 +#define TEGRA124_CLK_AUDIO4_MUX 304 +#define TEGRA124_CLK_SPDIF_MUX 305 +#define TEGRA124_CLK_CLK_OUT_1_MUX 306 +#define TEGRA124_CLK_CLK_OUT_2_MUX 307 +#define TEGRA124_CLK_CLK_OUT_3_MUX 308 +/* 309 */ +/* 310 */ +#define TEGRA124_CLK_SOR0_LVDS 311 +#define TEGRA124_CLK_XUSB_SS_DIV2 312 + +#define TEGRA124_CLK_PLL_M_UD 313 +#define TEGRA124_CLK_PLL_C_UD 314 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H */ diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h new file mode 100644 index 0000000..c520ee2 --- /dev/null +++ b/include/dt-bindings/clock/tegra124-car.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides Tegra124-specific constants for binding + * nvidia,tegra124-car. + */ + +#include + +#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_H +#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_H + +#define TEGRA124_CLK_PLL_X 227 +#define TEGRA124_CLK_PLL_X_OUT0 228 + +#define TEGRA124_CLK_CCLK_G 262 +#define TEGRA124_CLK_CCLK_LP 263 + +#define TEGRA124_CLK_CLK_MAX 315 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_H */ diff --git a/include/dt-bindings/clock/tegra186-clock.h b/include/dt-bindings/clock/tegra186-clock.h new file mode 100644 index 0000000..d6b525f --- /dev/null +++ b/include/dt-bindings/clock/tegra186-clock.h @@ -0,0 +1,941 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** @file */ + +#ifndef _MACH_T186_CLK_T186_H +#define _MACH_T186_CLK_T186_H + +/** + * @defgroup clock_ids Clock Identifiers + * @{ + * @defgroup extern_input external input clocks + * @{ + * @def TEGRA186_CLK_OSC + * @def TEGRA186_CLK_CLK_32K + * @def TEGRA186_CLK_DTV_INPUT + * @def TEGRA186_CLK_SOR0_PAD_CLKOUT + * @def TEGRA186_CLK_SOR1_PAD_CLKOUT + * @def TEGRA186_CLK_I2S1_SYNC_INPUT + * @def TEGRA186_CLK_I2S2_SYNC_INPUT + * @def TEGRA186_CLK_I2S3_SYNC_INPUT + * @def TEGRA186_CLK_I2S4_SYNC_INPUT + * @def TEGRA186_CLK_I2S5_SYNC_INPUT + * @def TEGRA186_CLK_I2S6_SYNC_INPUT + * @def TEGRA186_CLK_SPDIFIN_SYNC_INPUT + * @} + * + * @defgroup extern_output external output clocks + * @{ + * @def TEGRA186_CLK_EXTPERIPH1 + * @def TEGRA186_CLK_EXTPERIPH2 + * @def TEGRA186_CLK_EXTPERIPH3 + * @def TEGRA186_CLK_EXTPERIPH4 + * @} + * + * @defgroup display_clks display related clocks + * @{ + * @def TEGRA186_CLK_CEC + * @def TEGRA186_CLK_DSIC + * @def TEGRA186_CLK_DSIC_LP + * @def TEGRA186_CLK_DSID + * @def TEGRA186_CLK_DSID_LP + * @def TEGRA186_CLK_DPAUX1 + * @def TEGRA186_CLK_DPAUX + * @def TEGRA186_CLK_HDA2HDMICODEC + * @def TEGRA186_CLK_NVDISPLAY_DISP + * @def TEGRA186_CLK_NVDISPLAY_DSC + * @def TEGRA186_CLK_NVDISPLAY_P0 + * @def TEGRA186_CLK_NVDISPLAY_P1 + * @def TEGRA186_CLK_NVDISPLAY_P2 + * @def TEGRA186_CLK_NVDISPLAYHUB + * @def TEGRA186_CLK_SOR_SAFE + * @def TEGRA186_CLK_SOR0 + * @def TEGRA186_CLK_SOR0_OUT + * @def TEGRA186_CLK_SOR1 + * @def TEGRA186_CLK_SOR1_OUT + * @def TEGRA186_CLK_DSI + * @def TEGRA186_CLK_MIPI_CAL + * @def TEGRA186_CLK_DSIA_LP + * @def TEGRA186_CLK_DSIB + * @def TEGRA186_CLK_DSIB_LP + * @} + * + * @defgroup camera_clks camera related clocks + * @{ + * @def TEGRA186_CLK_NVCSI + * @def TEGRA186_CLK_NVCSILP + * @def TEGRA186_CLK_VI + * @} + * + * @defgroup audio_clks audio related clocks + * @{ + * @def TEGRA186_CLK_ACLK + * @def TEGRA186_CLK_ADSP + * @def TEGRA186_CLK_ADSPNEON + * @def TEGRA186_CLK_AHUB + * @def TEGRA186_CLK_APE + * @def TEGRA186_CLK_APB2APE + * @def TEGRA186_CLK_AUD_MCLK + * @def TEGRA186_CLK_DMIC1 + * @def TEGRA186_CLK_DMIC2 + * @def TEGRA186_CLK_DMIC3 + * @def TEGRA186_CLK_DMIC4 + * @def TEGRA186_CLK_DSPK1 + * @def TEGRA186_CLK_DSPK2 + * @def TEGRA186_CLK_HDA + * @def TEGRA186_CLK_HDA2CODEC_2X + * @def TEGRA186_CLK_I2S1 + * @def TEGRA186_CLK_I2S2 + * @def TEGRA186_CLK_I2S3 + * @def TEGRA186_CLK_I2S4 + * @def TEGRA186_CLK_I2S5 + * @def TEGRA186_CLK_I2S6 + * @def TEGRA186_CLK_MAUD + * @def TEGRA186_CLK_PLL_A_OUT0 + * @def TEGRA186_CLK_SPDIF_DOUBLER + * @def TEGRA186_CLK_SPDIF_IN + * @def TEGRA186_CLK_SPDIF_OUT + * @def TEGRA186_CLK_SYNC_DMIC1 + * @def TEGRA186_CLK_SYNC_DMIC2 + * @def TEGRA186_CLK_SYNC_DMIC3 + * @def TEGRA186_CLK_SYNC_DMIC4 + * @def TEGRA186_CLK_SYNC_DMIC5 + * @def TEGRA186_CLK_SYNC_DSPK1 + * @def TEGRA186_CLK_SYNC_DSPK2 + * @def TEGRA186_CLK_SYNC_I2S1 + * @def TEGRA186_CLK_SYNC_I2S2 + * @def TEGRA186_CLK_SYNC_I2S3 + * @def TEGRA186_CLK_SYNC_I2S4 + * @def TEGRA186_CLK_SYNC_I2S5 + * @def TEGRA186_CLK_SYNC_I2S6 + * @def TEGRA186_CLK_SYNC_SPDIF + * @} + * + * @defgroup uart_clks UART clocks + * @{ + * @def TEGRA186_CLK_AON_UART_FST_MIPI_CAL + * @def TEGRA186_CLK_UARTA + * @def TEGRA186_CLK_UARTB + * @def TEGRA186_CLK_UARTC + * @def TEGRA186_CLK_UARTD + * @def TEGRA186_CLK_UARTE + * @def TEGRA186_CLK_UARTF + * @def TEGRA186_CLK_UARTG + * @def TEGRA186_CLK_UART_FST_MIPI_CAL + * @} + * + * @defgroup i2c_clks I2C clocks + * @{ + * @def TEGRA186_CLK_AON_I2C_SLOW + * @def TEGRA186_CLK_I2C1 + * @def TEGRA186_CLK_I2C2 + * @def TEGRA186_CLK_I2C3 + * @def TEGRA186_CLK_I2C4 + * @def TEGRA186_CLK_I2C5 + * @def TEGRA186_CLK_I2C6 + * @def TEGRA186_CLK_I2C8 + * @def TEGRA186_CLK_I2C9 + * @def TEGRA186_CLK_I2C1 + * @def TEGRA186_CLK_I2C12 + * @def TEGRA186_CLK_I2C13 + * @def TEGRA186_CLK_I2C14 + * @def TEGRA186_CLK_I2C_SLOW + * @def TEGRA186_CLK_VI_I2C + * @} + * + * @defgroup spi_clks SPI clocks + * @{ + * @def TEGRA186_CLK_SPI1 + * @def TEGRA186_CLK_SPI2 + * @def TEGRA186_CLK_SPI3 + * @def TEGRA186_CLK_SPI4 + * @} + * + * @defgroup storage storage related clocks + * @{ + * @def TEGRA186_CLK_SATA + * @def TEGRA186_CLK_SATA_OOB + * @def TEGRA186_CLK_SATA_IOBIST + * @def TEGRA186_CLK_SDMMC_LEGACY_TM + * @def TEGRA186_CLK_SDMMC1 + * @def TEGRA186_CLK_SDMMC2 + * @def TEGRA186_CLK_SDMMC3 + * @def TEGRA186_CLK_SDMMC4 + * @def TEGRA186_CLK_QSPI + * @def TEGRA186_CLK_QSPI_OUT + * @def TEGRA186_CLK_UFSDEV_REF + * @def TEGRA186_CLK_UFSHC + * @} + * + * @defgroup pwm_clks PWM clocks + * @{ + * @def TEGRA186_CLK_PWM1 + * @def TEGRA186_CLK_PWM2 + * @def TEGRA186_CLK_PWM3 + * @def TEGRA186_CLK_PWM4 + * @def TEGRA186_CLK_PWM5 + * @def TEGRA186_CLK_PWM6 + * @def TEGRA186_CLK_PWM7 + * @def TEGRA186_CLK_PWM8 + * @} + * + * @defgroup plls PLLs and related clocks + * @{ + * @def TEGRA186_CLK_PLLREFE_OUT_GATED + * @def TEGRA186_CLK_PLLREFE_OUT1 + * @def TEGRA186_CLK_PLLD_OUT1 + * @def TEGRA186_CLK_PLLP_OUT0 + * @def TEGRA186_CLK_PLLP_OUT5 + * @def TEGRA186_CLK_PLLA + * @def TEGRA186_CLK_PLLE_PWRSEQ + * @def TEGRA186_CLK_PLLA_OUT1 + * @def TEGRA186_CLK_PLLREFE_REF + * @def TEGRA186_CLK_UPHY_PLL0_PWRSEQ + * @def TEGRA186_CLK_UPHY_PLL1_PWRSEQ + * @def TEGRA186_CLK_PLLREFE_PLLE_PASSTHROUGH + * @def TEGRA186_CLK_PLLREFE_PEX + * @def TEGRA186_CLK_PLLREFE_IDDQ + * @def TEGRA186_CLK_PLLC_OUT_AON + * @def TEGRA186_CLK_PLLC_OUT_ISP + * @def TEGRA186_CLK_PLLC_OUT_VE + * @def TEGRA186_CLK_PLLC4_OUT + * @def TEGRA186_CLK_PLLREFE_OUT + * @def TEGRA186_CLK_PLLREFE_PLL_REF + * @def TEGRA186_CLK_PLLE + * @def TEGRA186_CLK_PLLC + * @def TEGRA186_CLK_PLLP + * @def TEGRA186_CLK_PLLD + * @def TEGRA186_CLK_PLLD2 + * @def TEGRA186_CLK_PLLREFE_VCO + * @def TEGRA186_CLK_PLLC2 + * @def TEGRA186_CLK_PLLC3 + * @def TEGRA186_CLK_PLLDP + * @def TEGRA186_CLK_PLLC4_VCO + * @def TEGRA186_CLK_PLLA1 + * @def TEGRA186_CLK_PLLNVCSI + * @def TEGRA186_CLK_PLLDISPHUB + * @def TEGRA186_CLK_PLLD3 + * @def TEGRA186_CLK_PLLBPMPCAM + * @def TEGRA186_CLK_PLLAON + * @def TEGRA186_CLK_PLLU + * @def TEGRA186_CLK_PLLC4_VCO_DIV2 + * @def TEGRA186_CLK_PLL_REF + * @def TEGRA186_CLK_PLLREFE_OUT1_DIV5 + * @def TEGRA186_CLK_UTMIP_PLL_PWRSEQ + * @def TEGRA186_CLK_PLL_U_48M + * @def TEGRA186_CLK_PLL_U_480M + * @def TEGRA186_CLK_PLLC4_OUT0 + * @def TEGRA186_CLK_PLLC4_OUT1 + * @def TEGRA186_CLK_PLLC4_OUT2 + * @def TEGRA186_CLK_PLLC4_OUT_MUX + * @def TEGRA186_CLK_DFLLDISP_DIV + * @def TEGRA186_CLK_PLLDISPHUB_DIV + * @def TEGRA186_CLK_PLLP_DIV8 + * @} + * + * @defgroup nafll_clks NAFLL clock sources + * @{ + * @def TEGRA186_CLK_NAFLL_AXI_CBB + * @def TEGRA186_CLK_NAFLL_BCPU + * @def TEGRA186_CLK_NAFLL_BPMP + * @def TEGRA186_CLK_NAFLL_DISP + * @def TEGRA186_CLK_NAFLL_GPU + * @def TEGRA186_CLK_NAFLL_ISP + * @def TEGRA186_CLK_NAFLL_MCPU + * @def TEGRA186_CLK_NAFLL_NVDEC + * @def TEGRA186_CLK_NAFLL_NVENC + * @def TEGRA186_CLK_NAFLL_NVJPG + * @def TEGRA186_CLK_NAFLL_SCE + * @def TEGRA186_CLK_NAFLL_SE + * @def TEGRA186_CLK_NAFLL_TSEC + * @def TEGRA186_CLK_NAFLL_TSECB + * @def TEGRA186_CLK_NAFLL_VI + * @def TEGRA186_CLK_NAFLL_VIC + * @} + * + * @defgroup mphy MPHY related clocks + * @{ + * @def TEGRA186_CLK_MPHY_L0_RX_SYMB + * @def TEGRA186_CLK_MPHY_L0_RX_LS_BIT + * @def TEGRA186_CLK_MPHY_L0_TX_SYMB + * @def TEGRA186_CLK_MPHY_L0_TX_LS_3XBIT + * @def TEGRA186_CLK_MPHY_L0_RX_ANA + * @def TEGRA186_CLK_MPHY_L1_RX_ANA + * @def TEGRA186_CLK_MPHY_IOBIST + * @def TEGRA186_CLK_MPHY_TX_1MHZ_REF + * @def TEGRA186_CLK_MPHY_CORE_PLL_FIXED + * @} + * + * @defgroup eavb EAVB related clocks + * @{ + * @def TEGRA186_CLK_EQOS_AXI + * @def TEGRA186_CLK_EQOS_PTP_REF + * @def TEGRA186_CLK_EQOS_RX + * @def TEGRA186_CLK_EQOS_RX_INPUT + * @def TEGRA186_CLK_EQOS_TX + * @} + * + * @defgroup usb USB related clocks + * @{ + * @def TEGRA186_CLK_PEX_USB_PAD0_MGMT + * @def TEGRA186_CLK_PEX_USB_PAD1_MGMT + * @def TEGRA186_CLK_HSIC_TRK + * @def TEGRA186_CLK_USB2_TRK + * @def TEGRA186_CLK_USB2_HSIC_TRK + * @def TEGRA186_CLK_XUSB_CORE_SS + * @def TEGRA186_CLK_XUSB_CORE_DEV + * @def TEGRA186_CLK_XUSB_FALCON + * @def TEGRA186_CLK_XUSB_FS + * @def TEGRA186_CLK_XUSB + * @def TEGRA186_CLK_XUSB_DEV + * @def TEGRA186_CLK_XUSB_HOST + * @def TEGRA186_CLK_XUSB_SS + * @} + * + * @defgroup bigblock compute block related clocks + * @{ + * @def TEGRA186_CLK_GPCCLK + * @def TEGRA186_CLK_GPC2CLK + * @def TEGRA186_CLK_GPU + * @def TEGRA186_CLK_HOST1X + * @def TEGRA186_CLK_ISP + * @def TEGRA186_CLK_NVDEC + * @def TEGRA186_CLK_NVENC + * @def TEGRA186_CLK_NVJPG + * @def TEGRA186_CLK_SE + * @def TEGRA186_CLK_TSEC + * @def TEGRA186_CLK_TSECB + * @def TEGRA186_CLK_VIC + * @} + * + * @defgroup can CAN bus related clocks + * @{ + * @def TEGRA186_CLK_CAN1 + * @def TEGRA186_CLK_CAN1_HOST + * @def TEGRA186_CLK_CAN2 + * @def TEGRA186_CLK_CAN2_HOST + * @} + * + * @defgroup system basic system clocks + * @{ + * @def TEGRA186_CLK_ACTMON + * @def TEGRA186_CLK_AON_APB + * @def TEGRA186_CLK_AON_CPU_NIC + * @def TEGRA186_CLK_AON_NIC + * @def TEGRA186_CLK_AXI_CBB + * @def TEGRA186_CLK_BPMP_APB + * @def TEGRA186_CLK_BPMP_CPU_NIC + * @def TEGRA186_CLK_BPMP_NIC_RATE + * @def TEGRA186_CLK_CLK_M + * @def TEGRA186_CLK_EMC + * @def TEGRA186_CLK_MSS_ENCRYPT + * @def TEGRA186_CLK_SCE_APB + * @def TEGRA186_CLK_SCE_CPU_NIC + * @def TEGRA186_CLK_SCE_NIC + * @def TEGRA186_CLK_TSC + * @} + * + * @defgroup pcie_clks PCIe related clocks + * @{ + * @def TEGRA186_CLK_AFI + * @def TEGRA186_CLK_PCIE + * @def TEGRA186_CLK_PCIE2_IOBIST + * @def TEGRA186_CLK_PCIERX0 + * @def TEGRA186_CLK_PCIERX1 + * @def TEGRA186_CLK_PCIERX2 + * @def TEGRA186_CLK_PCIERX3 + * @def TEGRA186_CLK_PCIERX4 + * @} + */ + +/** @brief output of gate CLK_ENB_FUSE */ +#define TEGRA186_CLK_FUSE 0 +/** + * @brief It's not what you think + * @details output of gate CLK_ENB_GPU. This output connects to the GPU + * pwrclk. @warning: This is almost certainly not the clock you think + * it is. If you're looking for the clock of the graphics engine, see + * TEGRA186_GPCCLK + */ +#define TEGRA186_CLK_GPU 1 +/** @brief output of gate CLK_ENB_PCIE */ +#define TEGRA186_CLK_PCIE 3 +/** @brief output of the divider IPFS_CLK_DIVISOR */ +#define TEGRA186_CLK_AFI 4 +/** @brief output of gate CLK_ENB_PCIE2_IOBIST */ +#define TEGRA186_CLK_PCIE2_IOBIST 5 +/** @brief output of gate CLK_ENB_PCIERX0*/ +#define TEGRA186_CLK_PCIERX0 6 +/** @brief output of gate CLK_ENB_PCIERX1*/ +#define TEGRA186_CLK_PCIERX1 7 +/** @brief output of gate CLK_ENB_PCIERX2*/ +#define TEGRA186_CLK_PCIERX2 8 +/** @brief output of gate CLK_ENB_PCIERX3*/ +#define TEGRA186_CLK_PCIERX3 9 +/** @brief output of gate CLK_ENB_PCIERX4*/ +#define TEGRA186_CLK_PCIERX4 10 +/** @brief output branch of PLL_C for ISP, controlled by gate CLK_ENB_PLLC_OUT_ISP */ +#define TEGRA186_CLK_PLLC_OUT_ISP 11 +/** @brief output branch of PLL_C for VI, controlled by gate CLK_ENB_PLLC_OUT_VE */ +#define TEGRA186_CLK_PLLC_OUT_VE 12 +/** @brief output branch of PLL_C for AON domain, controlled by gate CLK_ENB_PLLC_OUT_AON */ +#define TEGRA186_CLK_PLLC_OUT_AON 13 +/** @brief output of gate CLK_ENB_SOR_SAFE */ +#define TEGRA186_CLK_SOR_SAFE 39 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S2 */ +#define TEGRA186_CLK_I2S2 42 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S3 */ +#define TEGRA186_CLK_I2S3 43 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPDF_IN */ +#define TEGRA186_CLK_SPDIF_IN 44 +/** @brief output of gate CLK_ENB_SPDIF_DOUBLER */ +#define TEGRA186_CLK_SPDIF_DOUBLER 45 +/** @clkdesc{spi_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_SPI3} */ +#define TEGRA186_CLK_SPI3 46 +/** @clkdesc{i2c_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_I2C1} */ +#define TEGRA186_CLK_I2C1 47 +/** @clkdesc{i2c_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_I2C5} */ +#define TEGRA186_CLK_I2C5 48 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI1 */ +#define TEGRA186_CLK_SPI1 49 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_ISP */ +#define TEGRA186_CLK_ISP 50 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI */ +#define TEGRA186_CLK_VI 51 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC1 */ +#define TEGRA186_CLK_SDMMC1 52 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC2 */ +#define TEGRA186_CLK_SDMMC2 53 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC4 */ +#define TEGRA186_CLK_SDMMC4 54 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTA */ +#define TEGRA186_CLK_UARTA 55 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTB */ +#define TEGRA186_CLK_UARTB 56 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HOST1X */ +#define TEGRA186_CLK_HOST1X 57 +/** + * @brief controls the EMC clock frequency. + * @details Doing a clk_set_rate on this clock will select the + * appropriate clock source, program the source rate and execute a + * specific sequence to switch to the new clock source for both memory + * controllers. This can be used to control the balance between memory + * throughput and memory controller power. + */ +#define TEGRA186_CLK_EMC 58 +/* @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH4 */ +#define TEGRA186_CLK_EXTPERIPH4 73 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI4 */ +#define TEGRA186_CLK_SPI4 74 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C3 */ +#define TEGRA186_CLK_I2C3 75 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC3 */ +#define TEGRA186_CLK_SDMMC3 76 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTD */ +#define TEGRA186_CLK_UARTD 77 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S1 */ +#define TEGRA186_CLK_I2S1 79 +/** output of gate CLK_ENB_DTV */ +#define TEGRA186_CLK_DTV 80 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSEC */ +#define TEGRA186_CLK_TSEC 81 +/** @brief output of gate CLK_ENB_DP2 */ +#define TEGRA186_CLK_DP2 82 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S4 */ +#define TEGRA186_CLK_I2S4 84 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S5 */ +#define TEGRA186_CLK_I2S5 85 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C4 */ +#define TEGRA186_CLK_I2C4 86 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AHUB */ +#define TEGRA186_CLK_AHUB 87 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA2CODEC_2X */ +#define TEGRA186_CLK_HDA2CODEC_2X 88 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH1 */ +#define TEGRA186_CLK_EXTPERIPH1 89 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH2 */ +#define TEGRA186_CLK_EXTPERIPH2 90 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH3 */ +#define TEGRA186_CLK_EXTPERIPH3 91 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C_SLOW */ +#define TEGRA186_CLK_I2C_SLOW 92 +/** @brief output of the SOR1_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1 */ +#define TEGRA186_CLK_SOR1 93 +/** @brief output of gate CLK_ENB_CEC */ +#define TEGRA186_CLK_CEC 94 +/** @brief output of gate CLK_ENB_DPAUX1 */ +#define TEGRA186_CLK_DPAUX1 95 +/** @brief output of gate CLK_ENB_DPAUX */ +#define TEGRA186_CLK_DPAUX 96 +/** @brief output of the SOR0_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0 */ +#define TEGRA186_CLK_SOR0 97 +/** @brief output of gate CLK_ENB_HDA2HDMICODEC */ +#define TEGRA186_CLK_HDA2HDMICODEC 98 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SATA */ +#define TEGRA186_CLK_SATA 99 +/** @brief output of gate CLK_ENB_SATA_OOB */ +#define TEGRA186_CLK_SATA_OOB 100 +/** @brief output of gate CLK_ENB_SATA_IOBIST */ +#define TEGRA186_CLK_SATA_IOBIST 101 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA */ +#define TEGRA186_CLK_HDA 102 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SE */ +#define TEGRA186_CLK_SE 103 +/** @brief output of gate CLK_ENB_APB2APE */ +#define TEGRA186_CLK_APB2APE 104 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_APE */ +#define TEGRA186_CLK_APE 105 +/** @brief output of gate CLK_ENB_IQC1 */ +#define TEGRA186_CLK_IQC1 106 +/** @brief output of gate CLK_ENB_IQC2 */ +#define TEGRA186_CLK_IQC2 107 +/** divide by 2 version of TEGRA186_CLK_PLLREFE_VCO */ +#define TEGRA186_CLK_PLLREFE_OUT 108 +/** @brief output of gate CLK_ENB_PLLREFE_PLL_REF */ +#define TEGRA186_CLK_PLLREFE_PLL_REF 109 +/** @brief output of gate CLK_ENB_PLLC4_OUT */ +#define TEGRA186_CLK_PLLC4_OUT 110 +/** @brief output of mux xusb_core_clk_switch on page 67 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB 111 +/** controls xusb_dev_ce signal on page 66 and 67 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_DEV 112 +/** controls xusb_host_ce signal on page 67 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_HOST 113 +/** controls xusb_ss_ce signal on page 67 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_SS 114 +/** @brief output of gate CLK_ENB_DSI */ +#define TEGRA186_CLK_DSI 115 +/** @brief output of gate CLK_ENB_MIPI_CAL */ +#define TEGRA186_CLK_MIPI_CAL 116 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIA_LP */ +#define TEGRA186_CLK_DSIA_LP 117 +/** @brief output of gate CLK_ENB_DSIB */ +#define TEGRA186_CLK_DSIB 118 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIB_LP */ +#define TEGRA186_CLK_DSIB_LP 119 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC1 */ +#define TEGRA186_CLK_DMIC1 122 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC2 */ +#define TEGRA186_CLK_DMIC2 123 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AUD_MCLK */ +#define TEGRA186_CLK_AUD_MCLK 124 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */ +#define TEGRA186_CLK_I2C6 125 +/**output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UART_FST_MIPI_CAL */ +#define TEGRA186_CLK_UART_FST_MIPI_CAL 126 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VIC */ +#define TEGRA186_CLK_VIC 127 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC_LEGACY_TM */ +#define TEGRA186_CLK_SDMMC_LEGACY_TM 128 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDEC */ +#define TEGRA186_CLK_NVDEC 129 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVJPG */ +#define TEGRA186_CLK_NVJPG 130 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVENC */ +#define TEGRA186_CLK_NVENC 131 +/** @brief output of the QSPI_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI */ +#define TEGRA186_CLK_QSPI 132 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI_I2C */ +#define TEGRA186_CLK_VI_I2C 133 +/** @brief output of gate CLK_ENB_HSIC_TRK */ +#define TEGRA186_CLK_HSIC_TRK 134 +/** @brief output of gate CLK_ENB_USB2_TRK */ +#define TEGRA186_CLK_USB2_TRK 135 +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_MAUD */ +#define TEGRA186_CLK_MAUD 136 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSECB */ +#define TEGRA186_CLK_TSECB 137 +/** @brief output of gate CLK_ENB_ADSP */ +#define TEGRA186_CLK_ADSP 138 +/** @brief output of gate CLK_ENB_ADSPNEON */ +#define TEGRA186_CLK_ADSPNEON 139 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_RX_LS_SYMB */ +#define TEGRA186_CLK_MPHY_L0_RX_SYMB 140 +/** @brief output of gate CLK_ENB_MPHY_L0_RX_LS_BIT */ +#define TEGRA186_CLK_MPHY_L0_RX_LS_BIT 141 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_TX_LS_SYMB */ +#define TEGRA186_CLK_MPHY_L0_TX_SYMB 142 +/** @brief output of gate CLK_ENB_MPHY_L0_TX_LS_3XBIT */ +#define TEGRA186_CLK_MPHY_L0_TX_LS_3XBIT 143 +/** @brief output of gate CLK_ENB_MPHY_L0_RX_ANA */ +#define TEGRA186_CLK_MPHY_L0_RX_ANA 144 +/** @brief output of gate CLK_ENB_MPHY_L1_RX_ANA */ +#define TEGRA186_CLK_MPHY_L1_RX_ANA 145 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_IOBIST */ +#define TEGRA186_CLK_MPHY_IOBIST 146 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_TX_1MHZ_REF */ +#define TEGRA186_CLK_MPHY_TX_1MHZ_REF 147 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_CORE_PLL_FIXED */ +#define TEGRA186_CLK_MPHY_CORE_PLL_FIXED 148 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AXI_CBB */ +#define TEGRA186_CLK_AXI_CBB 149 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC3 */ +#define TEGRA186_CLK_DMIC3 150 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC4 */ +#define TEGRA186_CLK_DMIC4 151 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK1 */ +#define TEGRA186_CLK_DSPK1 152 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK2 */ +#define TEGRA186_CLK_DSPK2 153 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */ +#define TEGRA186_CLK_I2S6 154 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P0 */ +#define TEGRA186_CLK_NVDISPLAY_P0 155 +/** @brief output of the NVDISPLAY_DISP_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP */ +#define TEGRA186_CLK_NVDISPLAY_DISP 156 +/** @brief output of gate CLK_ENB_NVDISPLAY_DSC */ +#define TEGRA186_CLK_NVDISPLAY_DSC 157 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAYHUB */ +#define TEGRA186_CLK_NVDISPLAYHUB 158 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P1 */ +#define TEGRA186_CLK_NVDISPLAY_P1 159 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P2 */ +#define TEGRA186_CLK_NVDISPLAY_P2 160 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TACH */ +#define TEGRA186_CLK_TACH 166 +/** @brief output of gate CLK_ENB_EQOS */ +#define TEGRA186_CLK_EQOS_AXI 167 +/** @brief output of gate CLK_ENB_EQOS_RX */ +#define TEGRA186_CLK_EQOS_RX 168 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSHC_CG_SYS */ +#define TEGRA186_CLK_UFSHC 178 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSDEV_REF */ +#define TEGRA186_CLK_UFSDEV_REF 179 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSI */ +#define TEGRA186_CLK_NVCSI 180 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSILP */ +#define TEGRA186_CLK_NVCSILP 181 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C7 */ +#define TEGRA186_CLK_I2C7 182 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C9 */ +#define TEGRA186_CLK_I2C9 183 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C12 */ +#define TEGRA186_CLK_I2C12 184 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C13 */ +#define TEGRA186_CLK_I2C13 185 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C14 */ +#define TEGRA186_CLK_I2C14 186 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM1 */ +#define TEGRA186_CLK_PWM1 187 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM2 */ +#define TEGRA186_CLK_PWM2 188 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM3 */ +#define TEGRA186_CLK_PWM3 189 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM5 */ +#define TEGRA186_CLK_PWM5 190 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM6 */ +#define TEGRA186_CLK_PWM6 191 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM7 */ +#define TEGRA186_CLK_PWM7 192 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM8 */ +#define TEGRA186_CLK_PWM8 193 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTE */ +#define TEGRA186_CLK_UARTE 194 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTF */ +#define TEGRA186_CLK_UARTF 195 +/** @deprecated */ +#define TEGRA186_CLK_DBGAPB 196 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_CPU_NIC */ +#define TEGRA186_CLK_BPMP_CPU_NIC 197 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_APB */ +#define TEGRA186_CLK_BPMP_APB 199 +/** @brief output of mux controlled by TEGRA186_CLK_SOC_ACTMON */ +#define TEGRA186_CLK_ACTMON 201 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_CPU_NIC */ +#define TEGRA186_CLK_AON_CPU_NIC 208 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN1 */ +#define TEGRA186_CLK_CAN1 210 +/** @brief output of gate CLK_ENB_CAN1_HOST */ +#define TEGRA186_CLK_CAN1_HOST 211 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN2 */ +#define TEGRA186_CLK_CAN2 212 +/** @brief output of gate CLK_ENB_CAN2_HOST */ +#define TEGRA186_CLK_CAN2_HOST 213 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_APB */ +#define TEGRA186_CLK_AON_APB 214 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTC */ +#define TEGRA186_CLK_UARTC 215 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTG */ +#define TEGRA186_CLK_UARTG 216 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_UART_FST_MIPI_CAL */ +#define TEGRA186_CLK_AON_UART_FST_MIPI_CAL 217 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C2 */ +#define TEGRA186_CLK_I2C2 218 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C8 */ +#define TEGRA186_CLK_I2C8 219 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C10 */ +#define TEGRA186_CLK_I2C10 220 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_I2C_SLOW */ +#define TEGRA186_CLK_AON_I2C_SLOW 221 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI2 */ +#define TEGRA186_CLK_SPI2 222 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC5 */ +#define TEGRA186_CLK_DMIC5 223 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_TOUCH */ +#define TEGRA186_CLK_AON_TOUCH 224 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM4 */ +#define TEGRA186_CLK_PWM4 225 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSC. This clock object is read only and is used for all timers in the system. */ +#define TEGRA186_CLK_TSC 226 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_MSS_ENCRYPT */ +#define TEGRA186_CLK_MSS_ENCRYPT 227 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_CPU_NIC */ +#define TEGRA186_CLK_SCE_CPU_NIC 228 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_APB */ +#define TEGRA186_CLK_SCE_APB 230 +/** @brief output of gate CLK_ENB_DSIC */ +#define TEGRA186_CLK_DSIC 231 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIC_LP */ +#define TEGRA186_CLK_DSIC_LP 232 +/** @brief output of gate CLK_ENB_DSID */ +#define TEGRA186_CLK_DSID 233 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSID_LP */ +#define TEGRA186_CLK_DSID_LP 234 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_SATA_USB_RX_BYP */ +#define TEGRA186_CLK_PEX_SATA_USB_RX_BYP 236 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_OUT */ +#define TEGRA186_CLK_SPDIF_OUT 238 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_PTP_REF_CLK_0 */ +#define TEGRA186_CLK_EQOS_PTP_REF 239 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_TX_CLK */ +#define TEGRA186_CLK_EQOS_TX 240 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_USB2_HSIC_TRK */ +#define TEGRA186_CLK_USB2_HSIC_TRK 241 +/** @brief output of mux xusb_ss_clk_switch on page 66 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_CORE_SS 242 +/** @brief output of mux xusb_core_dev_clk_switch on page 67 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_CORE_DEV 243 +/** @brief output of mux xusb_core_falcon_clk_switch on page 67 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_FALCON 244 +/** @brief output of mux xusb_fs_clk_switch on page 66 of T186_Clocks_IAS.doc */ +#define TEGRA186_CLK_XUSB_FS 245 +/** @brief output of the divider CLK_RST_CONTROLLER_PLLA_OUT */ +#define TEGRA186_CLK_PLL_A_OUT0 246 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S1 */ +#define TEGRA186_CLK_SYNC_I2S1 247 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S2 */ +#define TEGRA186_CLK_SYNC_I2S2 248 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S3 */ +#define TEGRA186_CLK_SYNC_I2S3 249 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S4 */ +#define TEGRA186_CLK_SYNC_I2S4 250 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S5 */ +#define TEGRA186_CLK_SYNC_I2S5 251 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S6 */ +#define TEGRA186_CLK_SYNC_I2S6 252 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK1 */ +#define TEGRA186_CLK_SYNC_DSPK1 253 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK2 */ +#define TEGRA186_CLK_SYNC_DSPK2 254 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC1 */ +#define TEGRA186_CLK_SYNC_DMIC1 255 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC2 */ +#define TEGRA186_CLK_SYNC_DMIC2 256 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC3 */ +#define TEGRA186_CLK_SYNC_DMIC3 257 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC4 */ +#define TEGRA186_CLK_SYNC_DMIC4 259 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_SPDIF */ +#define TEGRA186_CLK_SYNC_SPDIF 260 +/** @brief output of gate CLK_ENB_PLLREFE_OUT */ +#define TEGRA186_CLK_PLLREFE_OUT_GATED 261 +/** @brief output of the divider PLLREFE_DIVP in CLK_RST_CONTROLLER_PLLREFE_BASE. PLLREFE has 2 outputs: + * * VCO/pdiv defined by this clock object + * * VCO/2 defined by TEGRA186_CLK_PLLREFE_OUT + */ +#define TEGRA186_CLK_PLLREFE_OUT1 262 +#define TEGRA186_CLK_PLLD_OUT1 267 +/** @brief output of the divider PLLP_DIVP in CLK_RST_CONTROLLER_PLLP_BASE */ +#define TEGRA186_CLK_PLLP_OUT0 269 +/** @brief output of the divider CLK_RST_CONTROLLER_PLLP_OUTC */ +#define TEGRA186_CLK_PLLP_OUT5 270 +/** PLL controlled by CLK_RST_CONTROLLER_PLLA_BASE for use by audio clocks */ +#define TEGRA186_CLK_PLLA 271 +/** @brief output of mux controlled by CLK_RST_CONTROLLER_ACLK_BURST_POLICY divided by the divider controlled by ACLK_CLK_DIVISOR in CLK_RST_CONTROLLER_SUPER_ACLK_DIVIDER */ +#define TEGRA186_CLK_ACLK 273 +/** fixed 48MHz clock divided down from TEGRA186_CLK_PLL_U */ +#define TEGRA186_CLK_PLL_U_48M 274 +/** fixed 480MHz clock divided down from TEGRA186_CLK_PLL_U */ +#define TEGRA186_CLK_PLL_U_480M 275 +/** @brief output of the divider PLLC4_DIVP in CLK_RST_CONTROLLER_PLLC4_BASE. Output frequency is TEGRA186_CLK_PLLC4_VCO/PLLC4_DIVP */ +#define TEGRA186_CLK_PLLC4_OUT0 276 +/** fixed /3 divider. Output frequency of this clock is TEGRA186_CLK_PLLC4_VCO/3 */ +#define TEGRA186_CLK_PLLC4_OUT1 277 +/** fixed /5 divider. Output frequency of this clock is TEGRA186_CLK_PLLC4_VCO/5 */ +#define TEGRA186_CLK_PLLC4_OUT2 278 +/** @brief output of mux controlled by PLLC4_CLK_SEL in CLK_RST_CONTROLLER_PLLC4_MISC1 */ +#define TEGRA186_CLK_PLLC4_OUT_MUX 279 +/** @brief output of divider NVDISPLAY_DISP_CLK_DIVISOR in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP when DFLLDISP_DIV is selected in NVDISPLAY_DISP_CLK_SRC */ +#define TEGRA186_CLK_DFLLDISP_DIV 284 +/** @brief output of divider NVDISPLAY_DISP_CLK_DIVISOR in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP when PLLDISPHUB_DIV is selected in NVDISPLAY_DISP_CLK_SRC */ +#define TEGRA186_CLK_PLLDISPHUB_DIV 285 +/** fixed /8 divider which is used as the input for TEGRA186_CLK_SOR_SAFE */ +#define TEGRA186_CLK_PLLP_DIV8 286 +/** @brief output of divider CLK_RST_CONTROLLER_BPMP_NIC_RATE */ +#define TEGRA186_CLK_BPMP_NIC 287 +/** @brief output of the divider CLK_RST_CONTROLLER_PLLA1_OUT1 */ +#define TEGRA186_CLK_PLL_A_OUT1 288 +/** @deprecated */ +#define TEGRA186_CLK_GPC2CLK 289 +/** A fake clock which must be enabled during KFUSE read operations to ensure adequate VDD_CORE voltage. */ +#define TEGRA186_CLK_KFUSE 293 +/** + * @brief controls the PLLE hardware sequencer. + * @details This clock only has enable and disable methods. When the + * PLLE hw sequencer is enabled, PLLE, will be enabled or disabled by + * hw based on the control signals from the PCIe, SATA and XUSB + * clocks. When the PLLE hw sequencer is disabled, the state of PLLE + * is controlled by sw using clk_enable/clk_disable on + * TEGRA186_CLK_PLLE. + */ +#define TEGRA186_CLK_PLLE_PWRSEQ 294 +/** fixed 60MHz clock divided down from, TEGRA186_CLK_PLL_U */ +#define TEGRA186_CLK_PLLREFE_REF 295 +/** @brief output of mux controlled by SOR0_CLK_SEL0 and SOR0_CLK_SEL1 in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0 */ +#define TEGRA186_CLK_SOR0_OUT 296 +/** @brief output of mux controlled by SOR1_CLK_SEL0 and SOR1_CLK_SEL1 in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1 */ +#define TEGRA186_CLK_SOR1_OUT 297 +/** @brief fixed /5 divider. Output frequency of this clock is TEGRA186_CLK_PLLREFE_OUT1/5. Used as input for TEGRA186_CLK_EQOS_AXI */ +#define TEGRA186_CLK_PLLREFE_OUT1_DIV5 298 +/** @brief controls the UTMIP_PLL (aka PLLU) hardware sqeuencer */ +#define TEGRA186_CLK_UTMIP_PLL_PWRSEQ 301 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL0_MGMT */ +#define TEGRA186_CLK_PEX_USB_PAD0_MGMT 302 +/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL1_MGMT */ +#define TEGRA186_CLK_PEX_USB_PAD1_MGMT 303 +/** @brief controls the UPHY_PLL0 hardware sqeuencer */ +#define TEGRA186_CLK_UPHY_PLL0_PWRSEQ 304 +/** @brief controls the UPHY_PLL1 hardware sqeuencer */ +#define TEGRA186_CLK_UPHY_PLL1_PWRSEQ 305 +/** @brief control for PLLREFE_IDDQ in CLK_RST_CONTROLLER_PLLREFE_MISC so the bypass output even be used when the PLL is disabled */ +#define TEGRA186_CLK_PLLREFE_PLLE_PASSTHROUGH 306 +/** @brief output of the mux controlled by PLLREFE_SEL_CLKIN_PEX in CLK_RST_CONTROLLER_PLLREFE_MISC */ +#define TEGRA186_CLK_PLLREFE_PEX 307 +/** @brief control for PLLREFE_IDDQ in CLK_RST_CONTROLLER_PLLREFE_MISC to turn on the PLL when enabled */ +#define TEGRA186_CLK_PLLREFE_IDDQ 308 +/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI */ +#define TEGRA186_CLK_QSPI_OUT 309 +/** + * @brief GPC2CLK-div-2 + * @details fixed /2 divider. Output frequency is + * TEGRA186_CLK_GPC2CLK/2. The frequency of this clock is the + * frequency at which the GPU graphics engine runs. */ +#define TEGRA186_CLK_GPCCLK 310 +/** @brief output of divider CLK_RST_CONTROLLER_AON_NIC_RATE */ +#define TEGRA186_CLK_AON_NIC 450 +/** @brief output of divider CLK_RST_CONTROLLER_SCE_NIC_RATE */ +#define TEGRA186_CLK_SCE_NIC 451 +/** Fixed 100MHz PLL for PCIe, SATA and superspeed USB */ +#define TEGRA186_CLK_PLLE 512 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC_BASE */ +#define TEGRA186_CLK_PLLC 513 +/** Fixed 408MHz PLL for use by peripheral clocks */ +#define TEGRA186_CLK_PLLP 516 +/** @deprecated */ +#define TEGRA186_CLK_PLL_P TEGRA186_CLK_PLLP +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD_BASE for use by DSI */ +#define TEGRA186_CLK_PLLD 518 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD2_BASE for use by HDMI or DP */ +#define TEGRA186_CLK_PLLD2 519 +/** + * @brief PLL controlled by CLK_RST_CONTROLLER_PLLREFE_BASE. + * @details Note that this clock only controls the VCO output, before + * the post-divider. See TEGRA186_CLK_PLLREFE_OUT1 for more + * information. + */ +#define TEGRA186_CLK_PLLREFE_VCO 520 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC2_BASE */ +#define TEGRA186_CLK_PLLC2 521 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC3_BASE */ +#define TEGRA186_CLK_PLLC3 522 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLDP_BASE for use as the DP link clock */ +#define TEGRA186_CLK_PLLDP 523 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */ +#define TEGRA186_CLK_PLLC4_VCO 524 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLA1_BASE for use by audio clocks */ +#define TEGRA186_CLK_PLLA1 525 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLNVCSI_BASE */ +#define TEGRA186_CLK_PLLNVCSI 526 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLDISPHUB_BASE */ +#define TEGRA186_CLK_PLLDISPHUB 527 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD3_BASE for use by HDMI or DP */ +#define TEGRA186_CLK_PLLD3 528 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLBPMPCAM_BASE */ +#define TEGRA186_CLK_PLLBPMPCAM 531 +/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLAON_BASE for use by IP blocks in the AON domain */ +#define TEGRA186_CLK_PLLAON 532 +/** Fixed frequency 960MHz PLL for USB and EAVB */ +#define TEGRA186_CLK_PLLU 533 +/** fixed /2 divider. Output frequency is TEGRA186_CLK_PLLC4_VCO/2 */ +#define TEGRA186_CLK_PLLC4_VCO_DIV2 535 +/** @brief NAFLL clock source for AXI_CBB */ +#define TEGRA186_CLK_NAFLL_AXI_CBB 564 +/** @brief NAFLL clock source for BPMP */ +#define TEGRA186_CLK_NAFLL_BPMP 565 +/** @brief NAFLL clock source for ISP */ +#define TEGRA186_CLK_NAFLL_ISP 566 +/** @brief NAFLL clock source for NVDEC */ +#define TEGRA186_CLK_NAFLL_NVDEC 567 +/** @brief NAFLL clock source for NVENC */ +#define TEGRA186_CLK_NAFLL_NVENC 568 +/** @brief NAFLL clock source for NVJPG */ +#define TEGRA186_CLK_NAFLL_NVJPG 569 +/** @brief NAFLL clock source for SCE */ +#define TEGRA186_CLK_NAFLL_SCE 570 +/** @brief NAFLL clock source for SE */ +#define TEGRA186_CLK_NAFLL_SE 571 +/** @brief NAFLL clock source for TSEC */ +#define TEGRA186_CLK_NAFLL_TSEC 572 +/** @brief NAFLL clock source for TSECB */ +#define TEGRA186_CLK_NAFLL_TSECB 573 +/** @brief NAFLL clock source for VI */ +#define TEGRA186_CLK_NAFLL_VI 574 +/** @brief NAFLL clock source for VIC */ +#define TEGRA186_CLK_NAFLL_VIC 575 +/** @brief NAFLL clock source for DISP */ +#define TEGRA186_CLK_NAFLL_DISP 576 +/** @brief NAFLL clock source for GPU */ +#define TEGRA186_CLK_NAFLL_GPU 577 +/** @brief NAFLL clock source for M-CPU cluster */ +#define TEGRA186_CLK_NAFLL_MCPU 578 +/** @brief NAFLL clock source for B-CPU cluster */ +#define TEGRA186_CLK_NAFLL_BCPU 579 +/** @brief input from Tegra's CLK_32K_IN pad */ +#define TEGRA186_CLK_CLK_32K 608 +/** @brief output of divider CLK_RST_CONTROLLER_CLK_M_DIVIDE */ +#define TEGRA186_CLK_CLK_M 609 +/** @brief output of divider PLL_REF_DIV in CLK_RST_CONTROLLER_OSC_CTRL */ +#define TEGRA186_CLK_PLL_REF 610 +/** @brief input from Tegra's XTAL_IN */ +#define TEGRA186_CLK_OSC 612 +/** @brief clock recovered from EAVB input */ +#define TEGRA186_CLK_EQOS_RX_INPUT 613 +/** @brief clock recovered from DTV input */ +#define TEGRA186_CLK_DTV_INPUT 614 +/** @brief SOR0 brick output which feeds into SOR0_CLK_SEL mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0*/ +#define TEGRA186_CLK_SOR0_PAD_CLKOUT 615 +/** @brief SOR1 brick output which feeds into SOR1_CLK_SEL mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1*/ +#define TEGRA186_CLK_SOR1_PAD_CLKOUT 616 +/** @brief clock recovered from I2S1 input */ +#define TEGRA186_CLK_I2S1_SYNC_INPUT 617 +/** @brief clock recovered from I2S2 input */ +#define TEGRA186_CLK_I2S2_SYNC_INPUT 618 +/** @brief clock recovered from I2S3 input */ +#define TEGRA186_CLK_I2S3_SYNC_INPUT 619 +/** @brief clock recovered from I2S4 input */ +#define TEGRA186_CLK_I2S4_SYNC_INPUT 620 +/** @brief clock recovered from I2S5 input */ +#define TEGRA186_CLK_I2S5_SYNC_INPUT 621 +/** @brief clock recovered from I2S6 input */ +#define TEGRA186_CLK_I2S6_SYNC_INPUT 622 +/** @brief clock recovered from SPDIFIN input */ +#define TEGRA186_CLK_SPDIFIN_SYNC_INPUT 623 + +/** + * @brief subject to change + * @details maximum clock identifier value plus one. + */ +#define TEGRA186_CLK_CLK_MAX 624 + +/** @} */ + +#endif diff --git a/include/dt-bindings/clock/tegra194-clock.h b/include/dt-bindings/clock/tegra194-clock.h new file mode 100644 index 0000000..a2ff663 --- /dev/null +++ b/include/dt-bindings/clock/tegra194-clock.h @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __ABI_MACH_T194_CLOCK_H +#define __ABI_MACH_T194_CLOCK_H + +#define TEGRA194_CLK_ACTMON 1 +#define TEGRA194_CLK_ADSP 2 +#define TEGRA194_CLK_ADSPNEON 3 +#define TEGRA194_CLK_AHUB 4 +#define TEGRA194_CLK_APB2APE 5 +#define TEGRA194_CLK_APE 6 +#define TEGRA194_CLK_AUD_MCLK 7 +#define TEGRA194_CLK_AXI_CBB 8 +#define TEGRA194_CLK_CAN1 9 +#define TEGRA194_CLK_CAN1_HOST 10 +#define TEGRA194_CLK_CAN2 11 +#define TEGRA194_CLK_CAN2_HOST 12 +#define TEGRA194_CLK_CEC 13 +#define TEGRA194_CLK_CLK_M 14 +#define TEGRA194_CLK_DMIC1 15 +#define TEGRA194_CLK_DMIC2 16 +#define TEGRA194_CLK_DMIC3 17 +#define TEGRA194_CLK_DMIC4 18 +#define TEGRA194_CLK_DPAUX 19 +#define TEGRA194_CLK_DPAUX1 20 +#define TEGRA194_CLK_ACLK 21 +#define TEGRA194_CLK_MSS_ENCRYPT 22 +#define TEGRA194_CLK_EQOS_RX_INPUT 23 +#define TEGRA194_CLK_IQC2 24 +#define TEGRA194_CLK_AON_APB 25 +#define TEGRA194_CLK_AON_NIC 26 +#define TEGRA194_CLK_AON_CPU_NIC 27 +#define TEGRA194_CLK_PLLA1 28 +#define TEGRA194_CLK_DSPK1 29 +#define TEGRA194_CLK_DSPK2 30 +#define TEGRA194_CLK_EMC 31 +#define TEGRA194_CLK_EQOS_AXI 32 +#define TEGRA194_CLK_EQOS_PTP_REF 33 +#define TEGRA194_CLK_EQOS_RX 34 +#define TEGRA194_CLK_EQOS_TX 35 +#define TEGRA194_CLK_EXTPERIPH1 36 +#define TEGRA194_CLK_EXTPERIPH2 37 +#define TEGRA194_CLK_EXTPERIPH3 38 +#define TEGRA194_CLK_EXTPERIPH4 39 +#define TEGRA194_CLK_FUSE 40 +#define TEGRA194_CLK_GPCCLK 41 +#define TEGRA194_CLK_GPU_PWR 42 +#define TEGRA194_CLK_HDA 43 +#define TEGRA194_CLK_HDA2CODEC_2X 44 +#define TEGRA194_CLK_HDA2HDMICODEC 45 +#define TEGRA194_CLK_HOST1X 46 +#define TEGRA194_CLK_HSIC_TRK 47 +#define TEGRA194_CLK_I2C1 48 +#define TEGRA194_CLK_I2C2 49 +#define TEGRA194_CLK_I2C3 50 +#define TEGRA194_CLK_I2C4 51 +#define TEGRA194_CLK_I2C6 52 +#define TEGRA194_CLK_I2C7 53 +#define TEGRA194_CLK_I2C8 54 +#define TEGRA194_CLK_I2C9 55 +#define TEGRA194_CLK_I2S1 56 +#define TEGRA194_CLK_I2S1_SYNC_INPUT 57 +#define TEGRA194_CLK_I2S2 58 +#define TEGRA194_CLK_I2S2_SYNC_INPUT 59 +#define TEGRA194_CLK_I2S3 60 +#define TEGRA194_CLK_I2S3_SYNC_INPUT 61 +#define TEGRA194_CLK_I2S4 62 +#define TEGRA194_CLK_I2S4_SYNC_INPUT 63 +#define TEGRA194_CLK_I2S5 64 +#define TEGRA194_CLK_I2S5_SYNC_INPUT 65 +#define TEGRA194_CLK_I2S6 66 +#define TEGRA194_CLK_I2S6_SYNC_INPUT 67 +#define TEGRA194_CLK_IQC1 68 +#define TEGRA194_CLK_ISP 69 +#define TEGRA194_CLK_KFUSE 70 +#define TEGRA194_CLK_MAUD 71 +#define TEGRA194_CLK_MIPI_CAL 72 +#define TEGRA194_CLK_MPHY_CORE_PLL_FIXED 73 +#define TEGRA194_CLK_MPHY_L0_RX_ANA 74 +#define TEGRA194_CLK_MPHY_L0_RX_LS_BIT 75 +#define TEGRA194_CLK_MPHY_L0_RX_SYMB 76 +#define TEGRA194_CLK_MPHY_L0_TX_LS_3XBIT 77 +#define TEGRA194_CLK_MPHY_L0_TX_SYMB 78 +#define TEGRA194_CLK_MPHY_L1_RX_ANA 79 +#define TEGRA194_CLK_MPHY_TX_1MHZ_REF 80 +#define TEGRA194_CLK_NVCSI 81 +#define TEGRA194_CLK_NVCSILP 82 +#define TEGRA194_CLK_NVDEC 83 +#define TEGRA194_CLK_NVDISPLAYHUB 84 +#define TEGRA194_CLK_NVDISPLAY_DISP 85 +#define TEGRA194_CLK_NVDISPLAY_P0 86 +#define TEGRA194_CLK_NVDISPLAY_P1 87 +#define TEGRA194_CLK_NVDISPLAY_P2 88 +#define TEGRA194_CLK_NVENC 89 +#define TEGRA194_CLK_NVJPG 90 +#define TEGRA194_CLK_OSC 91 +#define TEGRA194_CLK_AON_TOUCH 92 +#define TEGRA194_CLK_PLLA 93 +#define TEGRA194_CLK_PLLAON 94 +#define TEGRA194_CLK_PLLD 95 +#define TEGRA194_CLK_PLLD2 96 +#define TEGRA194_CLK_PLLD3 97 +#define TEGRA194_CLK_PLLDP 98 +#define TEGRA194_CLK_PLLD4 99 +#define TEGRA194_CLK_PLLE 100 +#define TEGRA194_CLK_PLLP 101 +#define TEGRA194_CLK_PLLP_OUT0 102 +#define TEGRA194_CLK_UTMIPLL 103 +#define TEGRA194_CLK_PLLA_OUT0 104 +#define TEGRA194_CLK_PWM1 105 +#define TEGRA194_CLK_PWM2 106 +#define TEGRA194_CLK_PWM3 107 +#define TEGRA194_CLK_PWM4 108 +#define TEGRA194_CLK_PWM5 109 +#define TEGRA194_CLK_PWM6 110 +#define TEGRA194_CLK_PWM7 111 +#define TEGRA194_CLK_PWM8 112 +#define TEGRA194_CLK_RCE_CPU_NIC 113 +#define TEGRA194_CLK_RCE_NIC 114 +#define TEGRA194_CLK_SATA 115 +#define TEGRA194_CLK_SATA_OOB 116 +#define TEGRA194_CLK_AON_I2C_SLOW 117 +#define TEGRA194_CLK_SCE_CPU_NIC 118 +#define TEGRA194_CLK_SCE_NIC 119 +#define TEGRA194_CLK_SDMMC1 120 +#define TEGRA194_CLK_UPHY_PLL3 121 +#define TEGRA194_CLK_SDMMC3 122 +#define TEGRA194_CLK_SDMMC4 123 +#define TEGRA194_CLK_SE 124 +#define TEGRA194_CLK_SOR0_OUT 125 +#define TEGRA194_CLK_SOR0_REF 126 +#define TEGRA194_CLK_SOR0_PAD_CLKOUT 127 +#define TEGRA194_CLK_SOR1_OUT 128 +#define TEGRA194_CLK_SOR1_REF 129 +#define TEGRA194_CLK_SOR1_PAD_CLKOUT 130 +#define TEGRA194_CLK_SOR_SAFE 131 +#define TEGRA194_CLK_IQC1_IN 132 +#define TEGRA194_CLK_IQC2_IN 133 +#define TEGRA194_CLK_DMIC5 134 +#define TEGRA194_CLK_SPI1 135 +#define TEGRA194_CLK_SPI2 136 +#define TEGRA194_CLK_SPI3 137 +#define TEGRA194_CLK_I2C_SLOW 138 +#define TEGRA194_CLK_SYNC_DMIC1 139 +#define TEGRA194_CLK_SYNC_DMIC2 140 +#define TEGRA194_CLK_SYNC_DMIC3 141 +#define TEGRA194_CLK_SYNC_DMIC4 142 +#define TEGRA194_CLK_SYNC_DSPK1 143 +#define TEGRA194_CLK_SYNC_DSPK2 144 +#define TEGRA194_CLK_SYNC_I2S1 145 +#define TEGRA194_CLK_SYNC_I2S2 146 +#define TEGRA194_CLK_SYNC_I2S3 147 +#define TEGRA194_CLK_SYNC_I2S4 148 +#define TEGRA194_CLK_SYNC_I2S5 149 +#define TEGRA194_CLK_SYNC_I2S6 150 +#define TEGRA194_CLK_MPHY_FORCE_LS_MODE 151 +#define TEGRA194_CLK_TACH 152 +#define TEGRA194_CLK_TSEC 153 +#define TEGRA194_CLK_TSECB 154 +#define TEGRA194_CLK_UARTA 155 +#define TEGRA194_CLK_UARTB 156 +#define TEGRA194_CLK_UARTC 157 +#define TEGRA194_CLK_UARTD 158 +#define TEGRA194_CLK_UARTE 159 +#define TEGRA194_CLK_UARTF 160 +#define TEGRA194_CLK_UARTG 161 +#define TEGRA194_CLK_UART_FST_MIPI_CAL 162 +#define TEGRA194_CLK_UFSDEV_REF 163 +#define TEGRA194_CLK_UFSHC 164 +#define TEGRA194_CLK_USB2_TRK 165 +#define TEGRA194_CLK_VI 166 +#define TEGRA194_CLK_VIC 167 +#define TEGRA194_CLK_PVA0_AXI 168 +#define TEGRA194_CLK_PVA0_VPS0 169 +#define TEGRA194_CLK_PVA0_VPS1 170 +#define TEGRA194_CLK_PVA1_AXI 171 +#define TEGRA194_CLK_PVA1_VPS0 172 +#define TEGRA194_CLK_PVA1_VPS1 173 +#define TEGRA194_CLK_DLA0_FALCON 174 +#define TEGRA194_CLK_DLA0_CORE 175 +#define TEGRA194_CLK_DLA1_FALCON 176 +#define TEGRA194_CLK_DLA1_CORE 177 +#define TEGRA194_CLK_SOR2_OUT 178 +#define TEGRA194_CLK_SOR2_REF 179 +#define TEGRA194_CLK_SOR2_PAD_CLKOUT 180 +#define TEGRA194_CLK_SOR3_OUT 181 +#define TEGRA194_CLK_SOR3_REF 182 +#define TEGRA194_CLK_SOR3_PAD_CLKOUT 183 +#define TEGRA194_CLK_NVDISPLAY_P3 184 +#define TEGRA194_CLK_DPAUX2 185 +#define TEGRA194_CLK_DPAUX3 186 +#define TEGRA194_CLK_NVDEC1 187 +#define TEGRA194_CLK_NVENC1 188 +#define TEGRA194_CLK_SE_FREE 189 +#define TEGRA194_CLK_UARTH 190 +#define TEGRA194_CLK_FUSE_SERIAL 191 +#define TEGRA194_CLK_QSPI0 192 +#define TEGRA194_CLK_QSPI1 193 +#define TEGRA194_CLK_QSPI0_PM 194 +#define TEGRA194_CLK_QSPI1_PM 195 +#define TEGRA194_CLK_VI_CONST 196 +#define TEGRA194_CLK_NAFLL_BPMP 197 +#define TEGRA194_CLK_NAFLL_SCE 198 +#define TEGRA194_CLK_NAFLL_NVDEC 199 +#define TEGRA194_CLK_NAFLL_NVJPG 200 +#define TEGRA194_CLK_NAFLL_TSEC 201 +#define TEGRA194_CLK_NAFLL_TSECB 202 +#define TEGRA194_CLK_NAFLL_VI 203 +#define TEGRA194_CLK_NAFLL_SE 204 +#define TEGRA194_CLK_NAFLL_NVENC 205 +#define TEGRA194_CLK_NAFLL_ISP 206 +#define TEGRA194_CLK_NAFLL_VIC 207 +#define TEGRA194_CLK_NAFLL_NVDISPLAYHUB 208 +#define TEGRA194_CLK_NAFLL_AXICBB 209 +#define TEGRA194_CLK_NAFLL_DLA 210 +#define TEGRA194_CLK_NAFLL_PVA_CORE 211 +#define TEGRA194_CLK_NAFLL_PVA_VPS 212 +#define TEGRA194_CLK_NAFLL_CVNAS 213 +#define TEGRA194_CLK_NAFLL_RCE 214 +#define TEGRA194_CLK_NAFLL_NVENC1 215 +#define TEGRA194_CLK_NAFLL_DLA_FALCON 216 +#define TEGRA194_CLK_NAFLL_NVDEC1 217 +#define TEGRA194_CLK_NAFLL_GPU 218 +#define TEGRA194_CLK_SDMMC_LEGACY_TM 219 +#define TEGRA194_CLK_PEX0_CORE_0 220 +#define TEGRA194_CLK_PEX0_CORE_1 221 +#define TEGRA194_CLK_PEX0_CORE_2 222 +#define TEGRA194_CLK_PEX0_CORE_3 223 +#define TEGRA194_CLK_PEX0_CORE_4 224 +#define TEGRA194_CLK_PEX1_CORE_5 225 +#define TEGRA194_CLK_PEX_REF1 226 +#define TEGRA194_CLK_PEX_REF2 227 +#define TEGRA194_CLK_CSI_A 229 +#define TEGRA194_CLK_CSI_B 230 +#define TEGRA194_CLK_CSI_C 231 +#define TEGRA194_CLK_CSI_D 232 +#define TEGRA194_CLK_CSI_E 233 +#define TEGRA194_CLK_CSI_F 234 +#define TEGRA194_CLK_CSI_G 235 +#define TEGRA194_CLK_CSI_H 236 +#define TEGRA194_CLK_PLLC4 237 +#define TEGRA194_CLK_PLLC4_OUT 238 +#define TEGRA194_CLK_PLLC4_OUT1 239 +#define TEGRA194_CLK_PLLC4_OUT2 240 +#define TEGRA194_CLK_PLLC4_MUXED 241 +#define TEGRA194_CLK_PLLC4_VCO_DIV2 242 +#define TEGRA194_CLK_CSI_A_PAD 244 +#define TEGRA194_CLK_CSI_B_PAD 245 +#define TEGRA194_CLK_CSI_C_PAD 246 +#define TEGRA194_CLK_CSI_D_PAD 247 +#define TEGRA194_CLK_CSI_E_PAD 248 +#define TEGRA194_CLK_CSI_F_PAD 249 +#define TEGRA194_CLK_CSI_G_PAD 250 +#define TEGRA194_CLK_CSI_H_PAD 251 +#define TEGRA194_CLK_PEX_SATA_USB_RX_BYP 254 +#define TEGRA194_CLK_PEX_USB_PAD_PLL0_MGMT 255 +#define TEGRA194_CLK_PEX_USB_PAD_PLL1_MGMT 256 +#define TEGRA194_CLK_PEX_USB_PAD_PLL2_MGMT 257 +#define TEGRA194_CLK_PEX_USB_PAD_PLL3_MGMT 258 +#define TEGRA194_CLK_XUSB_CORE_DEV 265 +#define TEGRA194_CLK_XUSB_CORE_MUX 266 +#define TEGRA194_CLK_XUSB_CORE_HOST 267 +#define TEGRA194_CLK_XUSB_CORE_SS 268 +#define TEGRA194_CLK_XUSB_FALCON 269 +#define TEGRA194_CLK_XUSB_FALCON_HOST 270 +#define TEGRA194_CLK_XUSB_FALCON_SS 271 +#define TEGRA194_CLK_XUSB_FS 272 +#define TEGRA194_CLK_XUSB_FS_HOST 273 +#define TEGRA194_CLK_XUSB_FS_DEV 274 +#define TEGRA194_CLK_XUSB_SS 275 +#define TEGRA194_CLK_XUSB_SS_DEV 276 +#define TEGRA194_CLK_XUSB_SS_SUPERSPEED 277 +#define TEGRA194_CLK_PLLDISPHUB 278 +#define TEGRA194_CLK_PLLDISPHUB_DIV 279 +#define TEGRA194_CLK_NAFLL_CLUSTER0 280 +#define TEGRA194_CLK_NAFLL_CLUSTER1 281 +#define TEGRA194_CLK_NAFLL_CLUSTER2 282 +#define TEGRA194_CLK_NAFLL_CLUSTER3 283 +#define TEGRA194_CLK_CAN1_CORE 284 +#define TEGRA194_CLK_CAN2_CORE 285 +#define TEGRA194_CLK_PLLA1_OUT1 286 +#define TEGRA194_CLK_PLLREFE_VCOOUT 288 +#define TEGRA194_CLK_CLK_32K 289 +#define TEGRA194_CLK_SPDIFIN_SYNC_INPUT 290 +#define TEGRA194_CLK_UTMIPLL_CLKOUT48 291 +#define TEGRA194_CLK_UTMIPLL_CLKOUT480 292 +#define TEGRA194_CLK_CVNAS 293 +#define TEGRA194_CLK_PLLNVCSI 294 +#define TEGRA194_CLK_PVA0_CPU_AXI 295 +#define TEGRA194_CLK_PVA1_CPU_AXI 296 +#define TEGRA194_CLK_PVA0_VPS 297 +#define TEGRA194_CLK_PVA1_VPS 298 +#define TEGRA194_CLK_DLA0_FALCON_MUX 299 +#define TEGRA194_CLK_DLA1_FALCON_MUX 300 +#define TEGRA194_CLK_DLA0_CORE_MUX 301 +#define TEGRA194_CLK_DLA1_CORE_MUX 302 +#define TEGRA194_CLK_UTMIPLL_HPS 304 +#define TEGRA194_CLK_I2C5 305 +#define TEGRA194_CLK_I2C10 306 +#define TEGRA194_CLK_BPMP_CPU_NIC 307 +#define TEGRA194_CLK_BPMP_APB 308 +#define TEGRA194_CLK_TSC 309 +#define TEGRA194_CLK_EMCSA 310 +#define TEGRA194_CLK_EMCSB 311 +#define TEGRA194_CLK_EMCSC 312 +#define TEGRA194_CLK_EMCSD 313 +#define TEGRA194_CLK_PLLC 314 +#define TEGRA194_CLK_PLLC2 315 +#define TEGRA194_CLK_PLLC3 316 +#define TEGRA194_CLK_TSC_REF 317 +#define TEGRA194_CLK_FUSE_BURN 318 +#define TEGRA194_CLK_PEX0_CORE_0M 319 +#define TEGRA194_CLK_PEX0_CORE_1M 320 +#define TEGRA194_CLK_PEX0_CORE_2M 321 +#define TEGRA194_CLK_PEX0_CORE_3M 322 +#define TEGRA194_CLK_PEX0_CORE_4M 323 +#define TEGRA194_CLK_PEX1_CORE_5M 324 +#define TEGRA194_CLK_PLLE_HPS 326 + +#endif diff --git a/include/dt-bindings/clock/tegra20-car.h b/include/dt-bindings/clock/tegra20-car.h new file mode 100644 index 0000000..b21a0eb --- /dev/null +++ b/include/dt-bindings/clock/tegra20-car.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra20-car. + * + * The first 96 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 95 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 96 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA20_CAR_H +#define _DT_BINDINGS_CLOCK_TEGRA20_CAR_H + +#define TEGRA20_CLK_CPU 0 +/* 1 */ +/* 2 */ +#define TEGRA20_CLK_AC97 3 +#define TEGRA20_CLK_RTC 4 +#define TEGRA20_CLK_TIMER 5 +#define TEGRA20_CLK_UARTA 6 +/* 7 (register bit affects uart2 and vfir) */ +#define TEGRA20_CLK_GPIO 8 +#define TEGRA20_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA20_CLK_I2S1 11 +#define TEGRA20_CLK_I2C1 12 +#define TEGRA20_CLK_NDFLASH 13 +#define TEGRA20_CLK_SDMMC1 14 +#define TEGRA20_CLK_SDMMC4 15 +#define TEGRA20_CLK_TWC 16 +#define TEGRA20_CLK_PWM 17 +#define TEGRA20_CLK_I2S2 18 +#define TEGRA20_CLK_EPP 19 +/* 20 (register bit affects vi and vi_sensor) */ +#define TEGRA20_CLK_GR2D 21 +#define TEGRA20_CLK_USBD 22 +#define TEGRA20_CLK_ISP 23 +#define TEGRA20_CLK_GR3D 24 +#define TEGRA20_CLK_IDE 25 +#define TEGRA20_CLK_DISP2 26 +#define TEGRA20_CLK_DISP1 27 +#define TEGRA20_CLK_HOST1X 28 +#define TEGRA20_CLK_VCP 29 +/* 30 */ +#define TEGRA20_CLK_CACHE2 31 + +#define TEGRA20_CLK_MC 32 +#define TEGRA20_CLK_AHBDMA 33 +#define TEGRA20_CLK_APBDMA 34 +/* 35 */ +#define TEGRA20_CLK_KBC 36 +#define TEGRA20_CLK_STAT_MON 37 +#define TEGRA20_CLK_PMC 38 +#define TEGRA20_CLK_FUSE 39 +#define TEGRA20_CLK_KFUSE 40 +#define TEGRA20_CLK_SBC1 41 +#define TEGRA20_CLK_NOR 42 +#define TEGRA20_CLK_SPI 43 +#define TEGRA20_CLK_SBC2 44 +#define TEGRA20_CLK_XIO 45 +#define TEGRA20_CLK_SBC3 46 +#define TEGRA20_CLK_DVC 47 +#define TEGRA20_CLK_DSI 48 +/* 49 (register bit affects tvo and cve) */ +#define TEGRA20_CLK_MIPI 50 +#define TEGRA20_CLK_HDMI 51 +#define TEGRA20_CLK_CSI 52 +#define TEGRA20_CLK_TVDAC 53 +#define TEGRA20_CLK_I2C2 54 +#define TEGRA20_CLK_UARTC 55 +/* 56 */ +#define TEGRA20_CLK_EMC 57 +#define TEGRA20_CLK_USB2 58 +#define TEGRA20_CLK_USB3 59 +#define TEGRA20_CLK_MPE 60 +#define TEGRA20_CLK_VDE 61 +#define TEGRA20_CLK_BSEA 62 +#define TEGRA20_CLK_BSEV 63 + +#define TEGRA20_CLK_SPEEDO 64 +#define TEGRA20_CLK_UARTD 65 +#define TEGRA20_CLK_UARTE 66 +#define TEGRA20_CLK_I2C3 67 +#define TEGRA20_CLK_SBC4 68 +#define TEGRA20_CLK_SDMMC3 69 +#define TEGRA20_CLK_PEX 70 +#define TEGRA20_CLK_OWR 71 +#define TEGRA20_CLK_AFI 72 +#define TEGRA20_CLK_CSITE 73 +/* 74 */ +#define TEGRA20_CLK_AVPUCQ 75 +#define TEGRA20_CLK_LA 76 +/* 77 */ +/* 78 */ +/* 79 */ +/* 80 */ +/* 81 */ +/* 82 */ +/* 83 */ +#define TEGRA20_CLK_IRAMA 84 +#define TEGRA20_CLK_IRAMB 85 +#define TEGRA20_CLK_IRAMC 86 +#define TEGRA20_CLK_IRAMD 87 +#define TEGRA20_CLK_CRAM2 88 +#define TEGRA20_CLK_AUDIO_2X 89 /* a/k/a audio_2x_sync_clk */ +#define TEGRA20_CLK_CLK_D 90 +/* 91 */ +#define TEGRA20_CLK_CSUS 92 +#define TEGRA20_CLK_CDEV2 93 +#define TEGRA20_CLK_CDEV1 94 +/* 95 */ + +#define TEGRA20_CLK_UARTB 96 +#define TEGRA20_CLK_VFIR 97 +#define TEGRA20_CLK_SPDIF_IN 98 +#define TEGRA20_CLK_SPDIF_OUT 99 +#define TEGRA20_CLK_VI 100 +#define TEGRA20_CLK_VI_SENSOR 101 +#define TEGRA20_CLK_TVO 102 +#define TEGRA20_CLK_CVE 103 +#define TEGRA20_CLK_OSC 104 +#define TEGRA20_CLK_CLK_32K 105 /* a/k/a clk_s */ +#define TEGRA20_CLK_CLK_M 106 +#define TEGRA20_CLK_SCLK 107 +#define TEGRA20_CLK_CCLK 108 +#define TEGRA20_CLK_HCLK 109 +#define TEGRA20_CLK_PCLK 110 +#define TEGRA20_CLK_BLINK 111 +#define TEGRA20_CLK_PLL_A 112 +#define TEGRA20_CLK_PLL_A_OUT0 113 +#define TEGRA20_CLK_PLL_C 114 +#define TEGRA20_CLK_PLL_C_OUT1 115 +#define TEGRA20_CLK_PLL_D 116 +#define TEGRA20_CLK_PLL_D_OUT0 117 +#define TEGRA20_CLK_PLL_E 118 +#define TEGRA20_CLK_PLL_M 119 +#define TEGRA20_CLK_PLL_M_OUT1 120 +#define TEGRA20_CLK_PLL_P 121 +#define TEGRA20_CLK_PLL_P_OUT1 122 +#define TEGRA20_CLK_PLL_P_OUT2 123 +#define TEGRA20_CLK_PLL_P_OUT3 124 +#define TEGRA20_CLK_PLL_P_OUT4 125 +#define TEGRA20_CLK_PLL_S 126 +#define TEGRA20_CLK_PLL_U 127 + +#define TEGRA20_CLK_PLL_X 128 +#define TEGRA20_CLK_COP 129 /* a/k/a avp */ +#define TEGRA20_CLK_AUDIO 130 /* a/k/a audio_sync_clk */ +#define TEGRA20_CLK_PLL_REF 131 +#define TEGRA20_CLK_TWD 132 +#define TEGRA20_CLK_CLK_MAX 133 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA20_CAR_H */ diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h new file mode 100644 index 0000000..6b77e72 --- /dev/null +++ b/include/dt-bindings/clock/tegra210-car.h @@ -0,0 +1,412 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra210-car. + * + * The first 224 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 224 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 224 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA210_CAR_H +#define _DT_BINDINGS_CLOCK_TEGRA210_CAR_H + +/* 0 */ +/* 1 */ +/* 2 */ +#define TEGRA210_CLK_ISPB 3 +#define TEGRA210_CLK_RTC 4 +#define TEGRA210_CLK_TIMER 5 +#define TEGRA210_CLK_UARTA 6 +/* 7 (register bit affects uartb and vfir) */ +#define TEGRA210_CLK_GPIO 8 +#define TEGRA210_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA210_CLK_I2S1 11 +#define TEGRA210_CLK_I2C1 12 +/* 13 */ +#define TEGRA210_CLK_SDMMC1 14 +#define TEGRA210_CLK_SDMMC4 15 +/* 16 */ +#define TEGRA210_CLK_PWM 17 +#define TEGRA210_CLK_I2S2 18 +/* 19 */ +/* 20 (register bit affects vi and vi_sensor) */ +/* 21 */ +#define TEGRA210_CLK_USBD 22 +#define TEGRA210_CLK_ISPA 23 +/* 24 */ +/* 25 */ +#define TEGRA210_CLK_DISP2 26 +#define TEGRA210_CLK_DISP1 27 +#define TEGRA210_CLK_HOST1X 28 +/* 29 */ +#define TEGRA210_CLK_I2S0 30 +/* 31 */ + +#define TEGRA210_CLK_MC 32 +#define TEGRA210_CLK_AHBDMA 33 +#define TEGRA210_CLK_APBDMA 34 +/* 35 */ +/* 36 */ +/* 37 */ +#define TEGRA210_CLK_PMC 38 +/* 39 (register bit affects fuse and fuse_burn) */ +#define TEGRA210_CLK_KFUSE 40 +#define TEGRA210_CLK_SBC1 41 +/* 42 */ +/* 43 */ +#define TEGRA210_CLK_SBC2 44 +/* 45 */ +#define TEGRA210_CLK_SBC3 46 +#define TEGRA210_CLK_I2C5 47 +#define TEGRA210_CLK_DSIA 48 +/* 49 */ +/* 50 */ +/* 51 */ +#define TEGRA210_CLK_CSI 52 +/* 53 */ +#define TEGRA210_CLK_I2C2 54 +#define TEGRA210_CLK_UARTC 55 +#define TEGRA210_CLK_MIPI_CAL 56 +#define TEGRA210_CLK_EMC 57 +#define TEGRA210_CLK_USB2 58 +/* 59 */ +/* 60 */ +/* 61 */ +/* 62 */ +#define TEGRA210_CLK_BSEV 63 + +/* 64 */ +#define TEGRA210_CLK_UARTD 65 +/* 66 */ +#define TEGRA210_CLK_I2C3 67 +#define TEGRA210_CLK_SBC4 68 +#define TEGRA210_CLK_SDMMC3 69 +#define TEGRA210_CLK_PCIE 70 +#define TEGRA210_CLK_OWR 71 +#define TEGRA210_CLK_AFI 72 +#define TEGRA210_CLK_CSITE 73 +/* 74 */ +/* 75 */ +#define TEGRA210_CLK_LA 76 +/* 77 */ +#define TEGRA210_CLK_SOC_THERM 78 +#define TEGRA210_CLK_DTV 79 +/* 80 */ +#define TEGRA210_CLK_I2CSLOW 81 +#define TEGRA210_CLK_DSIB 82 +#define TEGRA210_CLK_TSEC 83 +/* 84 */ +/* 85 */ +/* 86 */ +/* 87 */ +/* 88 */ +#define TEGRA210_CLK_XUSB_HOST 89 +/* 90 */ +/* 91 */ +#define TEGRA210_CLK_CSUS 92 +/* 93 */ +/* 94 */ +/* 95 (bit affects xusb_dev and xusb_dev_src) */ + +/* 96 */ +/* 97 */ +/* 98 */ +#define TEGRA210_CLK_MSELECT 99 +#define TEGRA210_CLK_TSENSOR 100 +#define TEGRA210_CLK_I2S3 101 +#define TEGRA210_CLK_I2S4 102 +#define TEGRA210_CLK_I2C4 103 +/* 104 */ +/* 105 */ +#define TEGRA210_CLK_D_AUDIO 106 +#define TEGRA210_CLK_APB2APE 107 +/* 108 */ +/* 109 */ +/* 110 */ +#define TEGRA210_CLK_HDA2CODEC_2X 111 +/* 112 */ +/* 113 */ +/* 114 */ +/* 115 */ +/* 116 */ +/* 117 */ +#define TEGRA210_CLK_SPDIF_2X 118 +#define TEGRA210_CLK_ACTMON 119 +#define TEGRA210_CLK_EXTERN1 120 +#define TEGRA210_CLK_EXTERN2 121 +#define TEGRA210_CLK_EXTERN3 122 +#define TEGRA210_CLK_SATA_OOB 123 +#define TEGRA210_CLK_SATA 124 +#define TEGRA210_CLK_HDA 125 +/* 126 */ +/* 127 */ + +#define TEGRA210_CLK_HDA2HDMI 128 +/* 129 */ +/* 130 */ +/* 131 */ +/* 132 */ +/* 133 */ +/* 134 */ +/* 135 */ +#define TEGRA210_CLK_CEC 136 +/* 137 */ +/* 138 */ +/* 139 */ +/* 140 */ +/* 141 */ +/* 142 */ +/* (bit affects xusb_falcon_src, xusb_fs_src, xusb_host_src and xusb_ss_src) */ +#define TEGRA210_CLK_XUSB_GATE 143 +#define TEGRA210_CLK_CILAB 144 +#define TEGRA210_CLK_CILCD 145 +#define TEGRA210_CLK_CILE 146 +#define TEGRA210_CLK_DSIALP 147 +#define TEGRA210_CLK_DSIBLP 148 +#define TEGRA210_CLK_ENTROPY 149 +/* 150 */ +/* 151 */ +#define TEGRA210_CLK_DP2 152 +/* 153 */ +/* 154 */ +/* 155 (bit affects dfll_ref and dfll_soc) */ +#define TEGRA210_CLK_XUSB_SS 156 +/* 157 */ +/* 158 */ +/* 159 */ + +/* 160 */ +#define TEGRA210_CLK_DMIC1 161 +#define TEGRA210_CLK_DMIC2 162 +/* 163 */ +/* 164 */ +/* 165 */ +#define TEGRA210_CLK_I2C6 166 +/* 167 */ +/* 168 */ +/* 169 */ +/* 170 */ +#define TEGRA210_CLK_VIM2_CLK 171 +/* 172 */ +#define TEGRA210_CLK_MIPIBIF 173 +/* 174 */ +/* 175 */ +/* 176 */ +#define TEGRA210_CLK_CLK72MHZ 177 +#define TEGRA210_CLK_VIC03 178 +/* 179 */ +/* 180 */ +#define TEGRA210_CLK_DPAUX 181 +#define TEGRA210_CLK_SOR0 182 +#define TEGRA210_CLK_SOR1 183 +#define TEGRA210_CLK_GPU 184 +#define TEGRA210_CLK_DBGAPB 185 +/* 186 */ +#define TEGRA210_CLK_PLL_P_OUT_ADSP 187 +/* 188 ((bit affects pll_a_out_adsp and pll_a_out0_out_adsp)*/ +#define TEGRA210_CLK_PLL_G_REF 189 +/* 190 */ +/* 191 */ + +/* 192 */ +#define TEGRA210_CLK_SDMMC_LEGACY 193 +#define TEGRA210_CLK_NVDEC 194 +#define TEGRA210_CLK_NVJPG 195 +/* 196 */ +#define TEGRA210_CLK_DMIC3 197 +#define TEGRA210_CLK_APE 198 +#define TEGRA210_CLK_ADSP 199 +/* 200 */ +/* 201 */ +#define TEGRA210_CLK_MAUD 202 +/* 203 */ +/* 204 */ +/* 205 */ +#define TEGRA210_CLK_TSECB 206 +#define TEGRA210_CLK_DPAUX1 207 +#define TEGRA210_CLK_VI_I2C 208 +#define TEGRA210_CLK_HSIC_TRK 209 +#define TEGRA210_CLK_USB2_TRK 210 +#define TEGRA210_CLK_QSPI 211 +#define TEGRA210_CLK_UARTAPE 212 +/* 213 */ +/* 214 */ +/* 215 */ +/* 216 */ +/* 217 */ +#define TEGRA210_CLK_ADSP_NEON 218 +#define TEGRA210_CLK_NVENC 219 +#define TEGRA210_CLK_IQC2 220 +#define TEGRA210_CLK_IQC1 221 +#define TEGRA210_CLK_SOR_SAFE 222 +#define TEGRA210_CLK_PLL_P_OUT_CPU 223 + + +#define TEGRA210_CLK_UARTB 224 +#define TEGRA210_CLK_VFIR 225 +#define TEGRA210_CLK_SPDIF_IN 226 +#define TEGRA210_CLK_SPDIF_OUT 227 +#define TEGRA210_CLK_VI 228 +#define TEGRA210_CLK_VI_SENSOR 229 +#define TEGRA210_CLK_FUSE 230 +#define TEGRA210_CLK_FUSE_BURN 231 +#define TEGRA210_CLK_CLK_32K 232 +#define TEGRA210_CLK_CLK_M 233 +#define TEGRA210_CLK_CLK_M_DIV2 234 +#define TEGRA210_CLK_CLK_M_DIV4 235 +#define TEGRA210_CLK_PLL_REF 236 +#define TEGRA210_CLK_PLL_C 237 +#define TEGRA210_CLK_PLL_C_OUT1 238 +#define TEGRA210_CLK_PLL_C2 239 +#define TEGRA210_CLK_PLL_C3 240 +#define TEGRA210_CLK_PLL_M 241 +#define TEGRA210_CLK_PLL_M_OUT1 242 +#define TEGRA210_CLK_PLL_P 243 +#define TEGRA210_CLK_PLL_P_OUT1 244 +#define TEGRA210_CLK_PLL_P_OUT2 245 +#define TEGRA210_CLK_PLL_P_OUT3 246 +#define TEGRA210_CLK_PLL_P_OUT4 247 +#define TEGRA210_CLK_PLL_A 248 +#define TEGRA210_CLK_PLL_A_OUT0 249 +#define TEGRA210_CLK_PLL_D 250 +#define TEGRA210_CLK_PLL_D_OUT0 251 +#define TEGRA210_CLK_PLL_D2 252 +#define TEGRA210_CLK_PLL_D2_OUT0 253 +#define TEGRA210_CLK_PLL_U 254 +#define TEGRA210_CLK_PLL_U_480M 255 + +#define TEGRA210_CLK_PLL_U_60M 256 +#define TEGRA210_CLK_PLL_U_48M 257 +/* 258 */ +#define TEGRA210_CLK_PLL_X 259 +#define TEGRA210_CLK_PLL_X_OUT0 260 +#define TEGRA210_CLK_PLL_RE_VCO 261 +#define TEGRA210_CLK_PLL_RE_OUT 262 +#define TEGRA210_CLK_PLL_E 263 +#define TEGRA210_CLK_SPDIF_IN_SYNC 264 +#define TEGRA210_CLK_I2S0_SYNC 265 +#define TEGRA210_CLK_I2S1_SYNC 266 +#define TEGRA210_CLK_I2S2_SYNC 267 +#define TEGRA210_CLK_I2S3_SYNC 268 +#define TEGRA210_CLK_I2S4_SYNC 269 +#define TEGRA210_CLK_VIMCLK_SYNC 270 +#define TEGRA210_CLK_AUDIO0 271 +#define TEGRA210_CLK_AUDIO1 272 +#define TEGRA210_CLK_AUDIO2 273 +#define TEGRA210_CLK_AUDIO3 274 +#define TEGRA210_CLK_AUDIO4 275 +#define TEGRA210_CLK_SPDIF 276 +#define TEGRA210_CLK_CLK_OUT_1 277 +#define TEGRA210_CLK_CLK_OUT_2 278 +#define TEGRA210_CLK_CLK_OUT_3 279 +#define TEGRA210_CLK_BLINK 280 +/* 281 */ +#define TEGRA210_CLK_SOR1_SRC 282 +#define TEGRA210_CLK_SOR1_OUT 282 +/* 283 */ +#define TEGRA210_CLK_XUSB_HOST_SRC 284 +#define TEGRA210_CLK_XUSB_FALCON_SRC 285 +#define TEGRA210_CLK_XUSB_FS_SRC 286 +#define TEGRA210_CLK_XUSB_SS_SRC 287 + +#define TEGRA210_CLK_XUSB_DEV_SRC 288 +#define TEGRA210_CLK_XUSB_DEV 289 +#define TEGRA210_CLK_XUSB_HS_SRC 290 +#define TEGRA210_CLK_SCLK 291 +#define TEGRA210_CLK_HCLK 292 +#define TEGRA210_CLK_PCLK 293 +#define TEGRA210_CLK_CCLK_G 294 +#define TEGRA210_CLK_CCLK_LP 295 +#define TEGRA210_CLK_DFLL_REF 296 +#define TEGRA210_CLK_DFLL_SOC 297 +#define TEGRA210_CLK_VI_SENSOR2 298 +#define TEGRA210_CLK_PLL_P_OUT5 299 +#define TEGRA210_CLK_CML0 300 +#define TEGRA210_CLK_CML1 301 +#define TEGRA210_CLK_PLL_C4 302 +#define TEGRA210_CLK_PLL_DP 303 +#define TEGRA210_CLK_PLL_E_MUX 304 +#define TEGRA210_CLK_PLL_MB 305 +#define TEGRA210_CLK_PLL_A1 306 +#define TEGRA210_CLK_PLL_D_DSI_OUT 307 +#define TEGRA210_CLK_PLL_C4_OUT0 308 +#define TEGRA210_CLK_PLL_C4_OUT1 309 +#define TEGRA210_CLK_PLL_C4_OUT2 310 +#define TEGRA210_CLK_PLL_C4_OUT3 311 +#define TEGRA210_CLK_PLL_U_OUT 312 +#define TEGRA210_CLK_PLL_U_OUT1 313 +#define TEGRA210_CLK_PLL_U_OUT2 314 +#define TEGRA210_CLK_USB2_HSIC_TRK 315 +#define TEGRA210_CLK_PLL_P_OUT_HSIO 316 +#define TEGRA210_CLK_PLL_P_OUT_XUSB 317 +#define TEGRA210_CLK_XUSB_SSP_SRC 318 +#define TEGRA210_CLK_PLL_RE_OUT1 319 +/* 320 */ +/* 321 */ +#define TEGRA210_CLK_ISP 322 +#define TEGRA210_CLK_PLL_A_OUT_ADSP 323 +#define TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP 324 +/* 325 */ +/* 326 */ +/* 327 */ +/* 328 */ +/* 329 */ +/* 330 */ +/* 331 */ +/* 332 */ +/* 333 */ +/* 334 */ +/* 335 */ +/* 336 */ +/* 337 */ +/* 338 */ +/* 339 */ +/* 340 */ +/* 341 */ +/* 342 */ +/* 343 */ +/* 344 */ +/* 345 */ +/* 346 */ +/* 347 */ +/* 348 */ +/* 349 */ + +#define TEGRA210_CLK_AUDIO0_MUX 350 +#define TEGRA210_CLK_AUDIO1_MUX 351 +#define TEGRA210_CLK_AUDIO2_MUX 352 +#define TEGRA210_CLK_AUDIO3_MUX 353 +#define TEGRA210_CLK_AUDIO4_MUX 354 +#define TEGRA210_CLK_SPDIF_MUX 355 +#define TEGRA210_CLK_CLK_OUT_1_MUX 356 +#define TEGRA210_CLK_CLK_OUT_2_MUX 357 +#define TEGRA210_CLK_CLK_OUT_3_MUX 358 +#define TEGRA210_CLK_DSIA_MUX 359 +#define TEGRA210_CLK_DSIB_MUX 360 +#define TEGRA210_CLK_SOR0_LVDS 361 +#define TEGRA210_CLK_XUSB_SS_DIV2 362 + +#define TEGRA210_CLK_PLL_M_UD 363 +#define TEGRA210_CLK_PLL_C_UD 364 +#define TEGRA210_CLK_SCLK_MUX 365 + +#define TEGRA210_CLK_ACLK 370 + +#define TEGRA210_CLK_DMIC1_SYNC_CLK 388 +#define TEGRA210_CLK_DMIC1_SYNC_CLK_MUX 389 +#define TEGRA210_CLK_DMIC2_SYNC_CLK 390 +#define TEGRA210_CLK_DMIC2_SYNC_CLK_MUX 391 +#define TEGRA210_CLK_DMIC3_SYNC_CLK 392 +#define TEGRA210_CLK_DMIC3_SYNC_CLK_MUX 393 + +#define TEGRA210_CLK_CLK_MAX 394 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA210_CAR_H */ diff --git a/include/dt-bindings/clock/tegra30-car.h b/include/dt-bindings/clock/tegra30-car.h new file mode 100644 index 0000000..3c90f15 --- /dev/null +++ b/include/dt-bindings/clock/tegra30-car.h @@ -0,0 +1,274 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra30-car. + * + * The first 130 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 160 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 160 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA30_CAR_H +#define _DT_BINDINGS_CLOCK_TEGRA30_CAR_H + +#define TEGRA30_CLK_CPU 0 +/* 1 */ +/* 2 */ +/* 3 */ +#define TEGRA30_CLK_RTC 4 +#define TEGRA30_CLK_TIMER 5 +#define TEGRA30_CLK_UARTA 6 +/* 7 (register bit affects uartb and vfir) */ +#define TEGRA30_CLK_GPIO 8 +#define TEGRA30_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA30_CLK_I2S1 11 +#define TEGRA30_CLK_I2C1 12 +#define TEGRA30_CLK_NDFLASH 13 +#define TEGRA30_CLK_SDMMC1 14 +#define TEGRA30_CLK_SDMMC4 15 +/* 16 */ +#define TEGRA30_CLK_PWM 17 +#define TEGRA30_CLK_I2S2 18 +#define TEGRA30_CLK_EPP 19 +/* 20 (register bit affects vi and vi_sensor) */ +#define TEGRA30_CLK_GR2D 21 +#define TEGRA30_CLK_USBD 22 +#define TEGRA30_CLK_ISP 23 +#define TEGRA30_CLK_GR3D 24 +/* 25 */ +#define TEGRA30_CLK_DISP2 26 +#define TEGRA30_CLK_DISP1 27 +#define TEGRA30_CLK_HOST1X 28 +#define TEGRA30_CLK_VCP 29 +#define TEGRA30_CLK_I2S0 30 +#define TEGRA30_CLK_COP_CACHE 31 + +#define TEGRA30_CLK_MC 32 +#define TEGRA30_CLK_AHBDMA 33 +#define TEGRA30_CLK_APBDMA 34 +/* 35 */ +#define TEGRA30_CLK_KBC 36 +#define TEGRA30_CLK_STATMON 37 +#define TEGRA30_CLK_PMC 38 +/* 39 (register bit affects fuse and fuse_burn) */ +#define TEGRA30_CLK_KFUSE 40 +#define TEGRA30_CLK_SBC1 41 +#define TEGRA30_CLK_NOR 42 +/* 43 */ +#define TEGRA30_CLK_SBC2 44 +/* 45 */ +#define TEGRA30_CLK_SBC3 46 +#define TEGRA30_CLK_I2C5 47 +#define TEGRA30_CLK_DSIA 48 +/* 49 (register bit affects cve and tvo) */ +#define TEGRA30_CLK_MIPI 50 +#define TEGRA30_CLK_HDMI 51 +#define TEGRA30_CLK_CSI 52 +#define TEGRA30_CLK_TVDAC 53 +#define TEGRA30_CLK_I2C2 54 +#define TEGRA30_CLK_UARTC 55 +/* 56 */ +#define TEGRA30_CLK_EMC 57 +#define TEGRA30_CLK_USB2 58 +#define TEGRA30_CLK_USB3 59 +#define TEGRA30_CLK_MPE 60 +#define TEGRA30_CLK_VDE 61 +#define TEGRA30_CLK_BSEA 62 +#define TEGRA30_CLK_BSEV 63 + +#define TEGRA30_CLK_SPEEDO 64 +#define TEGRA30_CLK_UARTD 65 +#define TEGRA30_CLK_UARTE 66 +#define TEGRA30_CLK_I2C3 67 +#define TEGRA30_CLK_SBC4 68 +#define TEGRA30_CLK_SDMMC3 69 +#define TEGRA30_CLK_PCIE 70 +#define TEGRA30_CLK_OWR 71 +#define TEGRA30_CLK_AFI 72 +#define TEGRA30_CLK_CSITE 73 +/* 74 */ +#define TEGRA30_CLK_AVPUCQ 75 +#define TEGRA30_CLK_LA 76 +/* 77 */ +/* 78 */ +#define TEGRA30_CLK_DTV 79 +#define TEGRA30_CLK_NDSPEED 80 +#define TEGRA30_CLK_I2CSLOW 81 +#define TEGRA30_CLK_DSIB 82 +/* 83 */ +#define TEGRA30_CLK_IRAMA 84 +#define TEGRA30_CLK_IRAMB 85 +#define TEGRA30_CLK_IRAMC 86 +#define TEGRA30_CLK_IRAMD 87 +#define TEGRA30_CLK_CRAM2 88 +/* 89 */ +#define TEGRA30_CLK_AUDIO_2X 90 /* a/k/a audio_2x_sync_clk */ +/* 91 */ +#define TEGRA30_CLK_CSUS 92 +#define TEGRA30_CLK_CDEV2 93 +#define TEGRA30_CLK_CDEV1 94 +/* 95 */ + +#define TEGRA30_CLK_CPU_G 96 +#define TEGRA30_CLK_CPU_LP 97 +#define TEGRA30_CLK_GR3D2 98 +#define TEGRA30_CLK_MSELECT 99 +#define TEGRA30_CLK_TSENSOR 100 +#define TEGRA30_CLK_I2S3 101 +#define TEGRA30_CLK_I2S4 102 +#define TEGRA30_CLK_I2C4 103 +#define TEGRA30_CLK_SBC5 104 +#define TEGRA30_CLK_SBC6 105 +#define TEGRA30_CLK_D_AUDIO 106 +#define TEGRA30_CLK_APBIF 107 +#define TEGRA30_CLK_DAM0 108 +#define TEGRA30_CLK_DAM1 109 +#define TEGRA30_CLK_DAM2 110 +#define TEGRA30_CLK_HDA2CODEC_2X 111 +#define TEGRA30_CLK_ATOMICS 112 +#define TEGRA30_CLK_AUDIO0_2X 113 +#define TEGRA30_CLK_AUDIO1_2X 114 +#define TEGRA30_CLK_AUDIO2_2X 115 +#define TEGRA30_CLK_AUDIO3_2X 116 +#define TEGRA30_CLK_AUDIO4_2X 117 +#define TEGRA30_CLK_SPDIF_2X 118 +#define TEGRA30_CLK_ACTMON 119 +#define TEGRA30_CLK_EXTERN1 120 +#define TEGRA30_CLK_EXTERN2 121 +#define TEGRA30_CLK_EXTERN3 122 +#define TEGRA30_CLK_SATA_OOB 123 +#define TEGRA30_CLK_SATA 124 +#define TEGRA30_CLK_HDA 125 +/* 126 */ +#define TEGRA30_CLK_SE 127 + +#define TEGRA30_CLK_HDA2HDMI 128 +#define TEGRA30_CLK_SATA_COLD 129 +/* 130 */ +/* 131 */ +/* 132 */ +/* 133 */ +/* 134 */ +/* 135 */ +#define TEGRA30_CLK_CEC 136 +/* 137 */ +/* 138 */ +/* 139 */ +/* 140 */ +/* 141 */ +/* 142 */ +/* 143 */ +/* 144 */ +/* 145 */ +/* 146 */ +/* 147 */ +/* 148 */ +/* 149 */ +/* 150 */ +/* 151 */ +/* 152 */ +/* 153 */ +/* 154 */ +/* 155 */ +/* 156 */ +/* 157 */ +/* 158 */ +/* 159 */ + +#define TEGRA30_CLK_UARTB 160 +#define TEGRA30_CLK_VFIR 161 +#define TEGRA30_CLK_SPDIF_IN 162 +#define TEGRA30_CLK_SPDIF_OUT 163 +#define TEGRA30_CLK_VI 164 +#define TEGRA30_CLK_VI_SENSOR 165 +#define TEGRA30_CLK_FUSE 166 +#define TEGRA30_CLK_FUSE_BURN 167 +#define TEGRA30_CLK_CVE 168 +#define TEGRA30_CLK_TVO 169 +#define TEGRA30_CLK_CLK_32K 170 +#define TEGRA30_CLK_CLK_M 171 +#define TEGRA30_CLK_CLK_M_DIV2 172 +#define TEGRA30_CLK_CLK_M_DIV4 173 +#define TEGRA30_CLK_PLL_REF 174 +#define TEGRA30_CLK_PLL_C 175 +#define TEGRA30_CLK_PLL_C_OUT1 176 +#define TEGRA30_CLK_PLL_M 177 +#define TEGRA30_CLK_PLL_M_OUT1 178 +#define TEGRA30_CLK_PLL_P 179 +#define TEGRA30_CLK_PLL_P_OUT1 180 +#define TEGRA30_CLK_PLL_P_OUT2 181 +#define TEGRA30_CLK_PLL_P_OUT3 182 +#define TEGRA30_CLK_PLL_P_OUT4 183 +#define TEGRA30_CLK_PLL_A 184 +#define TEGRA30_CLK_PLL_A_OUT0 185 +#define TEGRA30_CLK_PLL_D 186 +#define TEGRA30_CLK_PLL_D_OUT0 187 +#define TEGRA30_CLK_PLL_D2 188 +#define TEGRA30_CLK_PLL_D2_OUT0 189 +#define TEGRA30_CLK_PLL_U 190 +#define TEGRA30_CLK_PLL_X 191 + +#define TEGRA30_CLK_PLL_X_OUT0 192 +#define TEGRA30_CLK_PLL_E 193 +#define TEGRA30_CLK_SPDIF_IN_SYNC 194 +#define TEGRA30_CLK_I2S0_SYNC 195 +#define TEGRA30_CLK_I2S1_SYNC 196 +#define TEGRA30_CLK_I2S2_SYNC 197 +#define TEGRA30_CLK_I2S3_SYNC 198 +#define TEGRA30_CLK_I2S4_SYNC 199 +#define TEGRA30_CLK_VIMCLK_SYNC 200 +#define TEGRA30_CLK_AUDIO0 201 +#define TEGRA30_CLK_AUDIO1 202 +#define TEGRA30_CLK_AUDIO2 203 +#define TEGRA30_CLK_AUDIO3 204 +#define TEGRA30_CLK_AUDIO4 205 +#define TEGRA30_CLK_SPDIF 206 +#define TEGRA30_CLK_CLK_OUT_1 207 /* (extern1) */ +#define TEGRA30_CLK_CLK_OUT_2 208 /* (extern2) */ +#define TEGRA30_CLK_CLK_OUT_3 209 /* (extern3) */ +#define TEGRA30_CLK_SCLK 210 +#define TEGRA30_CLK_BLINK 211 +#define TEGRA30_CLK_CCLK_G 212 +#define TEGRA30_CLK_CCLK_LP 213 +#define TEGRA30_CLK_TWD 214 +#define TEGRA30_CLK_CML0 215 +#define TEGRA30_CLK_CML1 216 +#define TEGRA30_CLK_HCLK 217 +#define TEGRA30_CLK_PCLK 218 +/* 219 */ +/* 220 */ +/* 221 */ +/* 222 */ +/* 223 */ + +/* 288 */ +/* 289 */ +/* 290 */ +/* 291 */ +/* 292 */ +/* 293 */ +/* 294 */ +/* 295 */ +/* 296 */ +/* 297 */ +/* 298 */ +/* 299 */ +#define TEGRA30_CLK_CLK_OUT_1_MUX 300 +#define TEGRA30_CLK_CLK_OUT_2_MUX 301 +#define TEGRA30_CLK_CLK_OUT_3_MUX 302 +#define TEGRA30_CLK_AUDIO0_MUX 303 +#define TEGRA30_CLK_AUDIO1_MUX 304 +#define TEGRA30_CLK_AUDIO2_MUX 305 +#define TEGRA30_CLK_AUDIO3_MUX 306 +#define TEGRA30_CLK_AUDIO4_MUX 307 +#define TEGRA30_CLK_SPDIF_MUX 308 +#define TEGRA30_CLK_CLK_MAX 309 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA30_CAR_H */ diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h new file mode 100644 index 0000000..95394f3 --- /dev/null +++ b/include/dt-bindings/clock/vf610-clock.h @@ -0,0 +1,200 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2013 Freescale Semiconductor, Inc. + */ + +#ifndef __DT_BINDINGS_CLOCK_VF610_H +#define __DT_BINDINGS_CLOCK_VF610_H + +#define VF610_CLK_DUMMY 0 +#define VF610_CLK_SIRC_128K 1 +#define VF610_CLK_SIRC_32K 2 +#define VF610_CLK_FIRC 3 +#define VF610_CLK_SXOSC 4 +#define VF610_CLK_FXOSC 5 +#define VF610_CLK_FXOSC_HALF 6 +#define VF610_CLK_SLOW_CLK_SEL 7 +#define VF610_CLK_FASK_CLK_SEL 8 +#define VF610_CLK_AUDIO_EXT 9 +#define VF610_CLK_ENET_EXT 10 +#define VF610_CLK_PLL1_SYS 11 +#define VF610_CLK_PLL1_PFD1 12 +#define VF610_CLK_PLL1_PFD2 13 +#define VF610_CLK_PLL1_PFD3 14 +#define VF610_CLK_PLL1_PFD4 15 +#define VF610_CLK_PLL2_BUS 16 +#define VF610_CLK_PLL2_PFD1 17 +#define VF610_CLK_PLL2_PFD2 18 +#define VF610_CLK_PLL2_PFD3 19 +#define VF610_CLK_PLL2_PFD4 20 +#define VF610_CLK_PLL3_USB_OTG 21 +#define VF610_CLK_PLL3_PFD1 22 +#define VF610_CLK_PLL3_PFD2 23 +#define VF610_CLK_PLL3_PFD3 24 +#define VF610_CLK_PLL3_PFD4 25 +#define VF610_CLK_PLL4_AUDIO 26 +#define VF610_CLK_PLL5_ENET 27 +#define VF610_CLK_PLL6_VIDEO 28 +#define VF610_CLK_PLL3_MAIN_DIV 29 +#define VF610_CLK_PLL4_MAIN_DIV 30 +#define VF610_CLK_PLL6_MAIN_DIV 31 +#define VF610_CLK_PLL1_PFD_SEL 32 +#define VF610_CLK_PLL2_PFD_SEL 33 +#define VF610_CLK_SYS_SEL 34 +#define VF610_CLK_DDR_SEL 35 +#define VF610_CLK_SYS_BUS 36 +#define VF610_CLK_PLATFORM_BUS 37 +#define VF610_CLK_IPG_BUS 38 +#define VF610_CLK_UART0 39 +#define VF610_CLK_UART1 40 +#define VF610_CLK_UART2 41 +#define VF610_CLK_UART3 42 +#define VF610_CLK_UART4 43 +#define VF610_CLK_UART5 44 +#define VF610_CLK_PIT 45 +#define VF610_CLK_I2C0 46 +#define VF610_CLK_I2C1 47 +#define VF610_CLK_I2C2 48 +#define VF610_CLK_I2C3 49 +#define VF610_CLK_FTM0_EXT_SEL 50 +#define VF610_CLK_FTM0_FIX_SEL 51 +#define VF610_CLK_FTM0_EXT_FIX_EN 52 +#define VF610_CLK_FTM1_EXT_SEL 53 +#define VF610_CLK_FTM1_FIX_SEL 54 +#define VF610_CLK_FTM1_EXT_FIX_EN 55 +#define VF610_CLK_FTM2_EXT_SEL 56 +#define VF610_CLK_FTM2_FIX_SEL 57 +#define VF610_CLK_FTM2_EXT_FIX_EN 58 +#define VF610_CLK_FTM3_EXT_SEL 59 +#define VF610_CLK_FTM3_FIX_SEL 60 +#define VF610_CLK_FTM3_EXT_FIX_EN 61 +#define VF610_CLK_FTM0 62 +#define VF610_CLK_FTM1 63 +#define VF610_CLK_FTM2 64 +#define VF610_CLK_FTM3 65 +#define VF610_CLK_ENET_50M 66 +#define VF610_CLK_ENET_25M 67 +#define VF610_CLK_ENET_SEL 68 +#define VF610_CLK_ENET 69 +#define VF610_CLK_ENET_TS_SEL 70 +#define VF610_CLK_ENET_TS 71 +#define VF610_CLK_DSPI0 72 +#define VF610_CLK_DSPI1 73 +#define VF610_CLK_DSPI2 74 +#define VF610_CLK_DSPI3 75 +#define VF610_CLK_WDT 76 +#define VF610_CLK_ESDHC0_SEL 77 +#define VF610_CLK_ESDHC0_EN 78 +#define VF610_CLK_ESDHC0_DIV 79 +#define VF610_CLK_ESDHC0 80 +#define VF610_CLK_ESDHC1_SEL 81 +#define VF610_CLK_ESDHC1_EN 82 +#define VF610_CLK_ESDHC1_DIV 83 +#define VF610_CLK_ESDHC1 84 +#define VF610_CLK_DCU0_SEL 85 +#define VF610_CLK_DCU0_EN 86 +#define VF610_CLK_DCU0_DIV 87 +#define VF610_CLK_DCU0 88 +#define VF610_CLK_DCU1_SEL 89 +#define VF610_CLK_DCU1_EN 90 +#define VF610_CLK_DCU1_DIV 91 +#define VF610_CLK_DCU1 92 +#define VF610_CLK_ESAI_SEL 93 +#define VF610_CLK_ESAI_EN 94 +#define VF610_CLK_ESAI_DIV 95 +#define VF610_CLK_ESAI 96 +#define VF610_CLK_SAI0_SEL 97 +#define VF610_CLK_SAI0_EN 98 +#define VF610_CLK_SAI0_DIV 99 +#define VF610_CLK_SAI0 100 +#define VF610_CLK_SAI1_SEL 101 +#define VF610_CLK_SAI1_EN 102 +#define VF610_CLK_SAI1_DIV 103 +#define VF610_CLK_SAI1 104 +#define VF610_CLK_SAI2_SEL 105 +#define VF610_CLK_SAI2_EN 106 +#define VF610_CLK_SAI2_DIV 107 +#define VF610_CLK_SAI2 108 +#define VF610_CLK_SAI3_SEL 109 +#define VF610_CLK_SAI3_EN 110 +#define VF610_CLK_SAI3_DIV 111 +#define VF610_CLK_SAI3 112 +#define VF610_CLK_USBC0 113 +#define VF610_CLK_USBC1 114 +#define VF610_CLK_QSPI0_SEL 115 +#define VF610_CLK_QSPI0_EN 116 +#define VF610_CLK_QSPI0_X4_DIV 117 +#define VF610_CLK_QSPI0_X2_DIV 118 +#define VF610_CLK_QSPI0_X1_DIV 119 +#define VF610_CLK_QSPI1_SEL 120 +#define VF610_CLK_QSPI1_EN 121 +#define VF610_CLK_QSPI1_X4_DIV 122 +#define VF610_CLK_QSPI1_X2_DIV 123 +#define VF610_CLK_QSPI1_X1_DIV 124 +#define VF610_CLK_QSPI0 125 +#define VF610_CLK_QSPI1 126 +#define VF610_CLK_NFC_SEL 127 +#define VF610_CLK_NFC_EN 128 +#define VF610_CLK_NFC_PRE_DIV 129 +#define VF610_CLK_NFC_FRAC_DIV 130 +#define VF610_CLK_NFC_INV 131 +#define VF610_CLK_NFC 132 +#define VF610_CLK_VADC_SEL 133 +#define VF610_CLK_VADC_EN 134 +#define VF610_CLK_VADC_DIV 135 +#define VF610_CLK_VADC_DIV_HALF 136 +#define VF610_CLK_VADC 137 +#define VF610_CLK_ADC0 138 +#define VF610_CLK_ADC1 139 +#define VF610_CLK_DAC0 140 +#define VF610_CLK_DAC1 141 +#define VF610_CLK_FLEXCAN0 142 +#define VF610_CLK_FLEXCAN1 143 +#define VF610_CLK_ASRC 144 +#define VF610_CLK_GPU_SEL 145 +#define VF610_CLK_GPU_EN 146 +#define VF610_CLK_GPU2D 147 +#define VF610_CLK_ENET0 148 +#define VF610_CLK_ENET1 149 +#define VF610_CLK_DMAMUX0 150 +#define VF610_CLK_DMAMUX1 151 +#define VF610_CLK_DMAMUX2 152 +#define VF610_CLK_DMAMUX3 153 +#define VF610_CLK_FLEXCAN0_EN 154 +#define VF610_CLK_FLEXCAN1_EN 155 +#define VF610_CLK_PLL7_USB_HOST 156 +#define VF610_CLK_USBPHY0 157 +#define VF610_CLK_USBPHY1 158 +#define VF610_CLK_LVDS1_IN 159 +#define VF610_CLK_ANACLK1 160 +#define VF610_CLK_PLL1_BYPASS_SRC 161 +#define VF610_CLK_PLL2_BYPASS_SRC 162 +#define VF610_CLK_PLL3_BYPASS_SRC 163 +#define VF610_CLK_PLL4_BYPASS_SRC 164 +#define VF610_CLK_PLL5_BYPASS_SRC 165 +#define VF610_CLK_PLL6_BYPASS_SRC 166 +#define VF610_CLK_PLL7_BYPASS_SRC 167 +#define VF610_CLK_PLL1 168 +#define VF610_CLK_PLL2 169 +#define VF610_CLK_PLL3 170 +#define VF610_CLK_PLL4 171 +#define VF610_CLK_PLL5 172 +#define VF610_CLK_PLL6 173 +#define VF610_CLK_PLL7 174 +#define VF610_PLL1_BYPASS 175 +#define VF610_PLL2_BYPASS 176 +#define VF610_PLL3_BYPASS 177 +#define VF610_PLL4_BYPASS 178 +#define VF610_PLL5_BYPASS 179 +#define VF610_PLL6_BYPASS 180 +#define VF610_PLL7_BYPASS 181 +#define VF610_CLK_SNVS 182 +#define VF610_CLK_DAP 183 +#define VF610_CLK_OCOTP 184 +#define VF610_CLK_DDRMC 185 +#define VF610_CLK_WKPU 186 +#define VF610_CLK_TCON0 187 +#define VF610_CLK_TCON1 188 +#define VF610_CLK_END 189 + +#endif /* __DT_BINDINGS_CLOCK_VF610_H */ diff --git a/include/dt-bindings/clock/xlnx-zynqmp-clk.h b/include/dt-bindings/clock/xlnx-zynqmp-clk.h new file mode 100644 index 0000000..cdc4c0b --- /dev/null +++ b/include/dt-bindings/clock/xlnx-zynqmp-clk.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Xilinx Zynq MPSoC Firmware layer + * + * Copyright (C) 2014-2018 Xilinx, Inc. + * + */ + +#ifndef _DT_BINDINGS_CLK_ZYNQMP_H +#define _DT_BINDINGS_CLK_ZYNQMP_H + +#define IOPLL 0 +#define RPLL 1 +#define APLL 2 +#define DPLL 3 +#define VPLL 4 +#define IOPLL_TO_FPD 5 +#define RPLL_TO_FPD 6 +#define APLL_TO_LPD 7 +#define DPLL_TO_LPD 8 +#define VPLL_TO_LPD 9 +#define ACPU 10 +#define ACPU_HALF 11 +#define DBF_FPD 12 +#define DBF_LPD 13 +#define DBG_TRACE 14 +#define DBG_TSTMP 15 +#define DP_VIDEO_REF 16 +#define DP_AUDIO_REF 17 +#define DP_STC_REF 18 +#define GDMA_REF 19 +#define DPDMA_REF 20 +#define DDR_REF 21 +#define SATA_REF 22 +#define PCIE_REF 23 +#define GPU_REF 24 +#define GPU_PP0_REF 25 +#define GPU_PP1_REF 26 +#define TOPSW_MAIN 27 +#define TOPSW_LSBUS 28 +#define GTGREF0_REF 29 +#define LPD_SWITCH 30 +#define LPD_LSBUS 31 +#define USB0_BUS_REF 32 +#define USB1_BUS_REF 33 +#define USB3_DUAL_REF 34 +#define USB0 35 +#define USB1 36 +#define CPU_R5 37 +#define CPU_R5_CORE 38 +#define CSU_SPB 39 +#define CSU_PLL 40 +#define PCAP 41 +#define IOU_SWITCH 42 +#define GEM_TSU_REF 43 +#define GEM_TSU 44 +#define GEM0_TX 45 +#define GEM1_TX 46 +#define GEM2_TX 47 +#define GEM3_TX 48 +#define GEM0_RX 49 +#define GEM1_RX 50 +#define GEM2_RX 51 +#define GEM3_RX 52 +#define QSPI_REF 53 +#define SDIO0_REF 54 +#define SDIO1_REF 55 +#define UART0_REF 56 +#define UART1_REF 57 +#define SPI0_REF 58 +#define SPI1_REF 59 +#define NAND_REF 60 +#define I2C0_REF 61 +#define I2C1_REF 62 +#define CAN0_REF 63 +#define CAN1_REF 64 +#define CAN0 65 +#define CAN1 66 +#define DLL_REF 67 +#define ADMA_REF 68 +#define TIMESTAMP_REF 69 +#define AMS_REF 70 +#define PL0_REF 71 +#define PL1_REF 72 +#define PL2_REF 73 +#define PL3_REF 74 +#define WDT 75 +#define IOPLL_INT 76 +#define IOPLL_PRE_SRC 77 +#define IOPLL_HALF 78 +#define IOPLL_INT_MUX 79 +#define IOPLL_POST_SRC 80 +#define RPLL_INT 81 +#define RPLL_PRE_SRC 82 +#define RPLL_HALF 83 +#define RPLL_INT_MUX 84 +#define RPLL_POST_SRC 85 +#define APLL_INT 86 +#define APLL_PRE_SRC 87 +#define APLL_HALF 88 +#define APLL_INT_MUX 89 +#define APLL_POST_SRC 90 +#define DPLL_INT 91 +#define DPLL_PRE_SRC 92 +#define DPLL_HALF 93 +#define DPLL_INT_MUX 94 +#define DPLL_POST_SRC 95 +#define VPLL_INT 96 +#define VPLL_PRE_SRC 97 +#define VPLL_HALF 98 +#define VPLL_INT_MUX 99 +#define VPLL_POST_SRC 100 +#define CAN0_MIO 101 +#define CAN1_MIO 102 +#define ACPU_FULL 103 +#define GEM0_REF 104 +#define GEM1_REF 105 +#define GEM2_REF 106 +#define GEM3_REF 107 +#define GEM0_REF_UNG 108 +#define GEM1_REF_UNG 109 +#define GEM2_REF_UNG 110 +#define GEM3_REF_UNG 111 +#define LPD_WDT 112 + +#endif diff --git a/include/dt-bindings/clock/zx296702-clock.h b/include/dt-bindings/clock/zx296702-clock.h new file mode 100644 index 0000000..e041261 --- /dev/null +++ b/include/dt-bindings/clock/zx296702-clock.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2014 Linaro Ltd. + * Copyright (C) 2014 ZTE Corporation. + */ + +#ifndef __DT_BINDINGS_CLOCK_ZX296702_H +#define __DT_BINDINGS_CLOCK_ZX296702_H + +#define ZX296702_OSC 0 +#define ZX296702_PLL_A9 1 +#define ZX296702_PLL_A9_350M 2 +#define ZX296702_PLL_MAC_1000M 3 +#define ZX296702_PLL_MAC_333M 4 +#define ZX296702_PLL_MM0_1188M 5 +#define ZX296702_PLL_MM0_396M 6 +#define ZX296702_PLL_MM0_198M 7 +#define ZX296702_PLL_MM1_108M 8 +#define ZX296702_PLL_MM1_72M 9 +#define ZX296702_PLL_MM1_54M 10 +#define ZX296702_PLL_LSP_104M 11 +#define ZX296702_PLL_LSP_26M 12 +#define ZX296702_PLL_AUDIO_294M912 13 +#define ZX296702_PLL_DDR_266M 14 +#define ZX296702_CLK_148M5 15 +#define ZX296702_MATRIX_ACLK 16 +#define ZX296702_MAIN_HCLK 17 +#define ZX296702_MAIN_PCLK 18 +#define ZX296702_CLK_500 19 +#define ZX296702_CLK_250 20 +#define ZX296702_CLK_125 21 +#define ZX296702_CLK_74M25 22 +#define ZX296702_A9_WCLK 23 +#define ZX296702_A9_AS1_ACLK_MUX 24 +#define ZX296702_A9_TRACE_CLKIN_MUX 25 +#define ZX296702_A9_AS1_ACLK_DIV 26 +#define ZX296702_CLK_2 27 +#define ZX296702_CLK_27 28 +#define ZX296702_DECPPU_ACLK_MUX 29 +#define ZX296702_PPU_ACLK_MUX 30 +#define ZX296702_MALI400_ACLK_MUX 31 +#define ZX296702_VOU_ACLK_MUX 32 +#define ZX296702_VOU_MAIN_WCLK_MUX 33 +#define ZX296702_VOU_AUX_WCLK_MUX 34 +#define ZX296702_VOU_SCALER_WCLK_MUX 35 +#define ZX296702_R2D_ACLK_MUX 36 +#define ZX296702_R2D_WCLK_MUX 37 +#define ZX296702_CLK_50 38 +#define ZX296702_CLK_25 39 +#define ZX296702_CLK_12 40 +#define ZX296702_CLK_16M384 41 +#define ZX296702_CLK_32K768 42 +#define ZX296702_SEC_WCLK_DIV 43 +#define ZX296702_DDR_WCLK_MUX 44 +#define ZX296702_NAND_WCLK_MUX 45 +#define ZX296702_LSP_26_WCLK_MUX 46 +#define ZX296702_A9_AS0_ACLK 47 +#define ZX296702_A9_AS1_ACLK 48 +#define ZX296702_A9_TRACE_CLKIN 49 +#define ZX296702_DECPPU_AXI_M_ACLK 50 +#define ZX296702_DECPPU_AHB_S_HCLK 51 +#define ZX296702_PPU_AXI_M_ACLK 52 +#define ZX296702_PPU_AHB_S_HCLK 53 +#define ZX296702_VOU_AXI_M_ACLK 54 +#define ZX296702_VOU_APB_PCLK 55 +#define ZX296702_VOU_MAIN_CHANNEL_WCLK 56 +#define ZX296702_VOU_AUX_CHANNEL_WCLK 57 +#define ZX296702_VOU_HDMI_OSCLK_CEC 58 +#define ZX296702_VOU_SCALER_WCLK 59 +#define ZX296702_MALI400_AXI_M_ACLK 60 +#define ZX296702_MALI400_APB_PCLK 61 +#define ZX296702_R2D_WCLK 62 +#define ZX296702_R2D_AXI_M_ACLK 63 +#define ZX296702_R2D_AHB_HCLK 64 +#define ZX296702_DDR3_AXI_S0_ACLK 65 +#define ZX296702_DDR3_APB_PCLK 66 +#define ZX296702_DDR3_WCLK 67 +#define ZX296702_USB20_0_AHB_HCLK 68 +#define ZX296702_USB20_0_EXTREFCLK 69 +#define ZX296702_USB20_1_AHB_HCLK 70 +#define ZX296702_USB20_1_EXTREFCLK 71 +#define ZX296702_USB20_2_AHB_HCLK 72 +#define ZX296702_USB20_2_EXTREFCLK 73 +#define ZX296702_GMAC_AXI_M_ACLK 74 +#define ZX296702_GMAC_APB_PCLK 75 +#define ZX296702_GMAC_125_CLKIN 76 +#define ZX296702_GMAC_RMII_CLKIN 77 +#define ZX296702_GMAC_25M_CLK 78 +#define ZX296702_NANDFLASH_AHB_HCLK 79 +#define ZX296702_NANDFLASH_WCLK 80 +#define ZX296702_LSP0_APB_PCLK 81 +#define ZX296702_LSP0_AHB_HCLK 82 +#define ZX296702_LSP0_26M_WCLK 83 +#define ZX296702_LSP0_104M_WCLK 84 +#define ZX296702_LSP0_16M384_WCLK 85 +#define ZX296702_LSP1_APB_PCLK 86 +#define ZX296702_LSP1_26M_WCLK 87 +#define ZX296702_LSP1_104M_WCLK 88 +#define ZX296702_LSP1_32K_CLK 89 +#define ZX296702_AON_HCLK 90 +#define ZX296702_SYS_CTRL_PCLK 91 +#define ZX296702_DMA_PCLK 92 +#define ZX296702_DMA_ACLK 93 +#define ZX296702_SEC_HCLK 94 +#define ZX296702_AES_WCLK 95 +#define ZX296702_DES_WCLK 96 +#define ZX296702_IRAM_ACLK 97 +#define ZX296702_IROM_ACLK 98 +#define ZX296702_BOOT_CTRL_HCLK 99 +#define ZX296702_EFUSE_CLK_30 100 +#define ZX296702_VOU_MAIN_CHANNEL_DIV 101 +#define ZX296702_VOU_AUX_CHANNEL_DIV 102 +#define ZX296702_VOU_TV_ENC_HD_DIV 103 +#define ZX296702_VOU_TV_ENC_SD_DIV 104 +#define ZX296702_VL0_MUX 105 +#define ZX296702_VL1_MUX 106 +#define ZX296702_VL2_MUX 107 +#define ZX296702_GL0_MUX 108 +#define ZX296702_GL1_MUX 109 +#define ZX296702_GL2_MUX 110 +#define ZX296702_WB_MUX 111 +#define ZX296702_HDMI_MUX 112 +#define ZX296702_VOU_TV_ENC_HD_MUX 113 +#define ZX296702_VOU_TV_ENC_SD_MUX 114 +#define ZX296702_VL0_CLK 115 +#define ZX296702_VL1_CLK 116 +#define ZX296702_VL2_CLK 117 +#define ZX296702_GL0_CLK 118 +#define ZX296702_GL1_CLK 119 +#define ZX296702_GL2_CLK 120 +#define ZX296702_WB_CLK 121 +#define ZX296702_CL_CLK 122 +#define ZX296702_MAIN_MIX_CLK 123 +#define ZX296702_AUX_MIX_CLK 124 +#define ZX296702_HDMI_CLK 125 +#define ZX296702_VOU_TV_ENC_HD_DAC_CLK 126 +#define ZX296702_VOU_TV_ENC_SD_DAC_CLK 127 +#define ZX296702_A9_PERIPHCLK 128 +#define ZX296702_TOPCLK_END 129 + +#define ZX296702_SDMMC1_WCLK_MUX 0 +#define ZX296702_SDMMC1_WCLK_DIV 1 +#define ZX296702_SDMMC1_WCLK 2 +#define ZX296702_SDMMC1_PCLK 3 +#define ZX296702_SPDIF0_WCLK_MUX 4 +#define ZX296702_SPDIF0_WCLK 5 +#define ZX296702_SPDIF0_PCLK 6 +#define ZX296702_SPDIF0_DIV 7 +#define ZX296702_I2S0_WCLK_MUX 8 +#define ZX296702_I2S0_WCLK 9 +#define ZX296702_I2S0_PCLK 10 +#define ZX296702_I2S0_DIV 11 +#define ZX296702_I2S1_WCLK_MUX 12 +#define ZX296702_I2S1_WCLK 13 +#define ZX296702_I2S1_PCLK 14 +#define ZX296702_I2S1_DIV 15 +#define ZX296702_I2S2_WCLK_MUX 16 +#define ZX296702_I2S2_WCLK 17 +#define ZX296702_I2S2_PCLK 18 +#define ZX296702_I2S2_DIV 19 +#define ZX296702_GPIO_CLK 20 +#define ZX296702_LSP0CLK_END 21 + +#define ZX296702_UART0_WCLK_MUX 0 +#define ZX296702_UART0_WCLK 1 +#define ZX296702_UART0_PCLK 2 +#define ZX296702_UART1_WCLK_MUX 3 +#define ZX296702_UART1_WCLK 4 +#define ZX296702_UART1_PCLK 5 +#define ZX296702_SDMMC0_WCLK_MUX 6 +#define ZX296702_SDMMC0_WCLK_DIV 7 +#define ZX296702_SDMMC0_WCLK 8 +#define ZX296702_SDMMC0_PCLK 9 +#define ZX296702_SPDIF1_WCLK_MUX 10 +#define ZX296702_SPDIF1_WCLK 11 +#define ZX296702_SPDIF1_PCLK 12 +#define ZX296702_SPDIF1_DIV 13 +#define ZX296702_LSP1CLK_END 14 + +#endif /* __DT_BINDINGS_CLOCK_ZX296702_H */ diff --git a/include/dt-bindings/clock/zx296718-clock.h b/include/dt-bindings/clock/zx296718-clock.h new file mode 100644 index 0000000..bf2ff6d --- /dev/null +++ b/include/dt-bindings/clock/zx296718-clock.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015 - 2016 ZTE Corporation. + */ +#ifndef __DT_BINDINGS_CLOCK_ZX296718_H +#define __DT_BINDINGS_CLOCK_ZX296718_H + +/* PLL */ +#define ZX296718_PLL_CPU 1 +#define ZX296718_PLL_MAC 2 +#define ZX296718_PLL_MM0 3 +#define ZX296718_PLL_MM1 4 +#define ZX296718_PLL_VGA 5 +#define ZX296718_PLL_DDR 6 +#define ZX296718_PLL_AUDIO 7 +#define ZX296718_PLL_HSIC 8 +#define CPU_DBG_GATE 9 +#define A72_GATE 10 +#define CPU_PERI_GATE 11 +#define A53_GATE 12 +#define DDR1_GATE 13 +#define DDR0_GATE 14 +#define SD1_WCLK 15 +#define SD1_AHB 16 +#define SD0_WCLK 17 +#define SD0_AHB 18 +#define EMMC_WCLK 19 +#define EMMC_NAND_AXI 20 +#define NAND_WCLK 21 +#define EMMC_NAND_AHB 22 +#define LSP1_148M5 23 +#define LSP1_99M 24 +#define LSP1_24M 25 +#define LSP0_74M25 26 +#define LSP0_32K 27 +#define LSP0_148M5 28 +#define LSP0_99M 29 +#define LSP0_24M 30 +#define DEMUX_AXI 31 +#define DEMUX_APB 32 +#define DEMUX_148M5 33 +#define DEMUX_108M 34 +#define AUDIO_APB 35 +#define AUDIO_99M 36 +#define AUDIO_24M 37 +#define AUDIO_16M384 38 +#define AUDIO_32K 39 +#define WDT_WCLK 40 +#define TIMER_WCLK 41 +#define VDE_ACLK 42 +#define VCE_ACLK 43 +#define HDE_ACLK 44 +#define GPU_ACLK 45 +#define SAPPU_ACLK 46 +#define SAPPU_WCLK 47 +#define VOU_ACLK 48 +#define VOU_MAIN_WCLK 49 +#define VOU_AUX_WCLK 50 +#define VOU_PPU_WCLK 51 +#define MIPI_CFG_CLK 52 +#define VGA_I2C_WCLK 53 +#define MIPI_REF_CLK 54 +#define HDMI_OSC_CEC 55 +#define HDMI_OSC_CLK 56 +#define HDMI_XCLK 57 +#define VIU_M0_ACLK 58 +#define VIU_M1_ACLK 59 +#define VIU_WCLK 60 +#define VIU_JPEG_WCLK 61 +#define VIU_CFG_CLK 62 +#define TS_SYS_WCLK 63 +#define TS_SYS_108M 64 +#define USB20_HCLK 65 +#define USB20_PHY_CLK 66 +#define USB21_HCLK 67 +#define USB21_PHY_CLK 68 +#define GMAC_RMIICLK 69 +#define GMAC_PCLK 70 +#define GMAC_ACLK 71 +#define GMAC_RFCLK 72 +#define TEMPSENSOR_GATE 73 + +#define TOP_NR_CLKS 74 + + +#define LSP0_TIMER3_PCLK 1 +#define LSP0_TIMER3_WCLK 2 +#define LSP0_TIMER4_PCLK 3 +#define LSP0_TIMER4_WCLK 4 +#define LSP0_TIMER5_PCLK 5 +#define LSP0_TIMER5_WCLK 6 +#define LSP0_UART3_PCLK 7 +#define LSP0_UART3_WCLK 8 +#define LSP0_UART1_PCLK 9 +#define LSP0_UART1_WCLK 10 +#define LSP0_UART2_PCLK 11 +#define LSP0_UART2_WCLK 12 +#define LSP0_SPIFC0_PCLK 13 +#define LSP0_SPIFC0_WCLK 14 +#define LSP0_I2C4_PCLK 15 +#define LSP0_I2C4_WCLK 16 +#define LSP0_I2C5_PCLK 17 +#define LSP0_I2C5_WCLK 18 +#define LSP0_SSP0_PCLK 19 +#define LSP0_SSP0_WCLK 20 +#define LSP0_SSP1_PCLK 21 +#define LSP0_SSP1_WCLK 22 +#define LSP0_USIM_PCLK 23 +#define LSP0_USIM_WCLK 24 +#define LSP0_GPIO_PCLK 25 +#define LSP0_GPIO_WCLK 26 +#define LSP0_I2C3_PCLK 27 +#define LSP0_I2C3_WCLK 28 + +#define LSP0_NR_CLKS 29 + + +#define LSP1_UART4_PCLK 1 +#define LSP1_UART4_WCLK 2 +#define LSP1_UART5_PCLK 3 +#define LSP1_UART5_WCLK 4 +#define LSP1_PWM_PCLK 5 +#define LSP1_PWM_WCLK 6 +#define LSP1_I2C2_PCLK 7 +#define LSP1_I2C2_WCLK 8 +#define LSP1_SSP2_PCLK 9 +#define LSP1_SSP2_WCLK 10 +#define LSP1_SSP3_PCLK 11 +#define LSP1_SSP3_WCLK 12 +#define LSP1_SSP4_PCLK 13 +#define LSP1_SSP4_WCLK 14 +#define LSP1_USIM1_PCLK 15 +#define LSP1_USIM1_WCLK 16 + +#define LSP1_NR_CLKS 17 + + +#define AUDIO_I2S0_WCLK 1 +#define AUDIO_I2S0_PCLK 2 +#define AUDIO_I2S1_WCLK 3 +#define AUDIO_I2S1_PCLK 4 +#define AUDIO_I2S2_WCLK 5 +#define AUDIO_I2S2_PCLK 6 +#define AUDIO_I2S3_WCLK 7 +#define AUDIO_I2S3_PCLK 8 +#define AUDIO_I2C0_WCLK 9 +#define AUDIO_I2C0_PCLK 10 +#define AUDIO_SPDIF0_WCLK 11 +#define AUDIO_SPDIF0_PCLK 12 +#define AUDIO_SPDIF1_WCLK 13 +#define AUDIO_SPDIF1_PCLK 14 +#define AUDIO_TIMER_WCLK 15 +#define AUDIO_TIMER_PCLK 16 +#define AUDIO_TDM_WCLK 17 +#define AUDIO_TDM_PCLK 18 +#define AUDIO_TS_PCLK 19 +#define I2S0_WCLK_MUX 20 +#define I2S1_WCLK_MUX 21 +#define I2S2_WCLK_MUX 22 +#define I2S3_WCLK_MUX 23 + +#define AUDIO_NR_CLKS 24 + +#endif diff --git a/include/dt-bindings/display/tda998x.h b/include/dt-bindings/display/tda998x.h new file mode 100644 index 0000000..746831f --- /dev/null +++ b/include/dt-bindings/display/tda998x.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_TDA998X_H +#define _DT_BINDINGS_TDA998X_H + +#define TDA998x_SPDIF 1 +#define TDA998x_I2S 2 + +#endif /*_DT_BINDINGS_TDA998X_H */ diff --git a/include/dt-bindings/dma/at91.h b/include/dt-bindings/dma/at91.h new file mode 100644 index 0000000..e7b3e06 --- /dev/null +++ b/include/dt-bindings/dma/at91.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This header provides macros for at91 dma bindings. + * + * Copyright (C) 2013 Ludovic Desroches + */ + +#ifndef __DT_BINDINGS_AT91_DMA_H__ +#define __DT_BINDINGS_AT91_DMA_H__ + +/* ---------- HDMAC ---------- */ + +/* + * Source and/or destination peripheral ID + */ +#define AT91_DMA_CFG_PER_ID_MASK (0xff) +#define AT91_DMA_CFG_PER_ID(id) (id & AT91_DMA_CFG_PER_ID_MASK) + +/* + * FIFO configuration: it defines when a request is serviced. + */ +#define AT91_DMA_CFG_FIFOCFG_OFFSET (8) +#define AT91_DMA_CFG_FIFOCFG_MASK (0xf << AT91_DMA_CFG_FIFOCFG_OFFSET) +#define AT91_DMA_CFG_FIFOCFG_HALF (0x0 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* half FIFO (default behavior) */ +#define AT91_DMA_CFG_FIFOCFG_ALAP (0x1 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* largest defined AHB burst */ +#define AT91_DMA_CFG_FIFOCFG_ASAP (0x2 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* single AHB access */ + + +/* ---------- XDMAC ---------- */ +#define AT91_XDMAC_DT_MEM_IF_MASK (0x1) +#define AT91_XDMAC_DT_MEM_IF_OFFSET (13) +#define AT91_XDMAC_DT_MEM_IF(mem_if) (((mem_if) & AT91_XDMAC_DT_MEM_IF_MASK) \ + << AT91_XDMAC_DT_MEM_IF_OFFSET) +#define AT91_XDMAC_DT_GET_MEM_IF(cfg) (((cfg) >> AT91_XDMAC_DT_MEM_IF_OFFSET) \ + & AT91_XDMAC_DT_MEM_IF_MASK) + +#define AT91_XDMAC_DT_PER_IF_MASK (0x1) +#define AT91_XDMAC_DT_PER_IF_OFFSET (14) +#define AT91_XDMAC_DT_PER_IF(per_if) (((per_if) & AT91_XDMAC_DT_PER_IF_MASK) \ + << AT91_XDMAC_DT_PER_IF_OFFSET) +#define AT91_XDMAC_DT_GET_PER_IF(cfg) (((cfg) >> AT91_XDMAC_DT_PER_IF_OFFSET) \ + & AT91_XDMAC_DT_PER_IF_MASK) + +#define AT91_XDMAC_DT_PERID_MASK (0x7f) +#define AT91_XDMAC_DT_PERID_OFFSET (24) +#define AT91_XDMAC_DT_PERID(perid) (((perid) & AT91_XDMAC_DT_PERID_MASK) \ + << AT91_XDMAC_DT_PERID_OFFSET) +#define AT91_XDMAC_DT_GET_PERID(cfg) (((cfg) >> AT91_XDMAC_DT_PERID_OFFSET) \ + & AT91_XDMAC_DT_PERID_MASK) + +#endif /* __DT_BINDINGS_AT91_DMA_H__ */ diff --git a/include/dt-bindings/dma/axi-dmac.h b/include/dt-bindings/dma/axi-dmac.h new file mode 100644 index 0000000..ad9e6ec --- /dev/null +++ b/include/dt-bindings/dma/axi-dmac.h @@ -0,0 +1,48 @@ +/* + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DT_BINDINGS_DMA_AXI_DMAC_H__ +#define __DT_BINDINGS_DMA_AXI_DMAC_H__ + +#define AXI_DMAC_BUS_TYPE_AXI_MM 0 +#define AXI_DMAC_BUS_TYPE_AXI_STREAM 1 +#define AXI_DMAC_BUS_TYPE_FIFO 2 + +#endif diff --git a/include/dt-bindings/dma/dw-dmac.h b/include/dt-bindings/dma/dw-dmac.h new file mode 100644 index 0000000..d1ca705 --- /dev/null +++ b/include/dt-bindings/dma/dw-dmac.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ + +#ifndef __DT_BINDINGS_DMA_DW_DMAC_H__ +#define __DT_BINDINGS_DMA_DW_DMAC_H__ + +/* + * Protection Control bits provide protection against illegal transactions. + * The protection bits[0:2] are one-to-one mapped to AHB HPROT[3:1] signals. + */ +#define DW_DMAC_HPROT1_PRIVILEGED_MODE (1 << 0) /* Privileged Mode */ +#define DW_DMAC_HPROT2_BUFFERABLE (1 << 1) /* DMA is bufferable */ +#define DW_DMAC_HPROT3_CACHEABLE (1 << 2) /* DMA is cacheable */ + +#endif /* __DT_BINDINGS_DMA_DW_DMAC_H__ */ diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h new file mode 100644 index 0000000..df017fd --- /dev/null +++ b/include/dt-bindings/dma/jz4780-dma.h @@ -0,0 +1,49 @@ +#ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__ +#define __DT_BINDINGS_DMA_JZ4780_DMA_H__ + +/* + * Request type numbers for the JZ4780 DMA controller (written to the DRTn + * register for the channel). + */ +#define JZ4780_DMA_I2S1_TX 0x4 +#define JZ4780_DMA_I2S1_RX 0x5 +#define JZ4780_DMA_I2S0_TX 0x6 +#define JZ4780_DMA_I2S0_RX 0x7 +#define JZ4780_DMA_AUTO 0x8 +#define JZ4780_DMA_SADC_RX 0x9 +#define JZ4780_DMA_UART4_TX 0xc +#define JZ4780_DMA_UART4_RX 0xd +#define JZ4780_DMA_UART3_TX 0xe +#define JZ4780_DMA_UART3_RX 0xf +#define JZ4780_DMA_UART2_TX 0x10 +#define JZ4780_DMA_UART2_RX 0x11 +#define JZ4780_DMA_UART1_TX 0x12 +#define JZ4780_DMA_UART1_RX 0x13 +#define JZ4780_DMA_UART0_TX 0x14 +#define JZ4780_DMA_UART0_RX 0x15 +#define JZ4780_DMA_SSI0_TX 0x16 +#define JZ4780_DMA_SSI0_RX 0x17 +#define JZ4780_DMA_SSI1_TX 0x18 +#define JZ4780_DMA_SSI1_RX 0x19 +#define JZ4780_DMA_MSC0_TX 0x1a +#define JZ4780_DMA_MSC0_RX 0x1b +#define JZ4780_DMA_MSC1_TX 0x1c +#define JZ4780_DMA_MSC1_RX 0x1d +#define JZ4780_DMA_MSC2_TX 0x1e +#define JZ4780_DMA_MSC2_RX 0x1f +#define JZ4780_DMA_PCM0_TX 0x20 +#define JZ4780_DMA_PCM0_RX 0x21 +#define JZ4780_DMA_SMB0_TX 0x24 +#define JZ4780_DMA_SMB0_RX 0x25 +#define JZ4780_DMA_SMB1_TX 0x26 +#define JZ4780_DMA_SMB1_RX 0x27 +#define JZ4780_DMA_SMB2_TX 0x28 +#define JZ4780_DMA_SMB2_RX 0x29 +#define JZ4780_DMA_SMB3_TX 0x2a +#define JZ4780_DMA_SMB3_RX 0x2b +#define JZ4780_DMA_SMB4_TX 0x2c +#define JZ4780_DMA_SMB4_RX 0x2d +#define JZ4780_DMA_DES_TX 0x2e +#define JZ4780_DMA_DES_RX 0x2f + +#endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */ diff --git a/include/dt-bindings/dma/nbpfaxi.h b/include/dt-bindings/dma/nbpfaxi.h new file mode 100644 index 0000000..88e59ac --- /dev/null +++ b/include/dt-bindings/dma/nbpfaxi.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. + * Author: Guennadi Liakhovetski + */ + +#ifndef DT_BINDINGS_NBPFAXI_H +#define DT_BINDINGS_NBPFAXI_H + +/** + * Use "#dma-cells = <2>;" with the second integer defining slave DMA flags: + */ +#define NBPF_SLAVE_RQ_HIGH 1 +#define NBPF_SLAVE_RQ_LOW 2 +#define NBPF_SLAVE_RQ_LEVEL 4 + +#endif diff --git a/include/dt-bindings/dma/sun4i-a10.h b/include/dt-bindings/dma/sun4i-a10.h new file mode 100644 index 0000000..8caba9e --- /dev/null +++ b/include/dt-bindings/dma/sun4i-a10.h @@ -0,0 +1,56 @@ +/* + * Copyright 2014 Maxime Ripard + * + * Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this file; if not, write to the Free + * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DT_BINDINGS_DMA_SUN4I_A10_H_ +#define __DT_BINDINGS_DMA_SUN4I_A10_H_ + +#define SUN4I_DMA_NORMAL 0 +#define SUN4I_DMA_DEDICATED 1 + +#endif /* __DT_BINDINGS_DMA_SUN4I_A10_H_ */ diff --git a/include/dt-bindings/firmware/imx/rsrc.h b/include/dt-bindings/firmware/imx/rsrc.h new file mode 100644 index 0000000..4e61f64 --- /dev/null +++ b/include/dt-bindings/firmware/imx/rsrc.h @@ -0,0 +1,550 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP + */ + +#ifndef __DT_BINDINGS_RSCRC_IMX_H +#define __DT_BINDINGS_RSCRC_IMX_H + +/* + * These defines are used to indicate a resource. Resources include peripherals + * and bus masters (but not memory regions). Note items from list should + * never be changed or removed (only added to at the end of the list). + */ + +#define IMX_SC_R_A53 0 +#define IMX_SC_R_A53_0 1 +#define IMX_SC_R_A53_1 2 +#define IMX_SC_R_A53_2 3 +#define IMX_SC_R_A53_3 4 +#define IMX_SC_R_A72 5 +#define IMX_SC_R_A72_0 6 +#define IMX_SC_R_A72_1 7 +#define IMX_SC_R_A72_2 8 +#define IMX_SC_R_A72_3 9 +#define IMX_SC_R_CCI 10 +#define IMX_SC_R_DB 11 +#define IMX_SC_R_DRC_0 12 +#define IMX_SC_R_DRC_1 13 +#define IMX_SC_R_GIC_SMMU 14 +#define IMX_SC_R_IRQSTR_M4_0 15 +#define IMX_SC_R_IRQSTR_M4_1 16 +#define IMX_SC_R_SMMU 17 +#define IMX_SC_R_GIC 18 +#define IMX_SC_R_DC_0_BLIT0 19 +#define IMX_SC_R_DC_0_BLIT1 20 +#define IMX_SC_R_DC_0_BLIT2 21 +#define IMX_SC_R_DC_0_BLIT_OUT 22 +#define IMX_SC_R_PERF 23 +#define IMX_SC_R_DC_0_WARP 25 +#define IMX_SC_R_DC_0_VIDEO0 28 +#define IMX_SC_R_DC_0_VIDEO1 29 +#define IMX_SC_R_DC_0_FRAC0 30 +#define IMX_SC_R_DC_0 32 +#define IMX_SC_R_GPU_2_PID0 33 +#define IMX_SC_R_DC_0_PLL_0 34 +#define IMX_SC_R_DC_0_PLL_1 35 +#define IMX_SC_R_DC_1_BLIT0 36 +#define IMX_SC_R_DC_1_BLIT1 37 +#define IMX_SC_R_DC_1_BLIT2 38 +#define IMX_SC_R_DC_1_BLIT_OUT 39 +#define IMX_SC_R_DC_1_WARP 42 +#define IMX_SC_R_DC_1_VIDEO0 45 +#define IMX_SC_R_DC_1_VIDEO1 46 +#define IMX_SC_R_DC_1_FRAC0 47 +#define IMX_SC_R_DC_1 49 +#define IMX_SC_R_DC_1_PLL_0 51 +#define IMX_SC_R_DC_1_PLL_1 52 +#define IMX_SC_R_SPI_0 53 +#define IMX_SC_R_SPI_1 54 +#define IMX_SC_R_SPI_2 55 +#define IMX_SC_R_SPI_3 56 +#define IMX_SC_R_UART_0 57 +#define IMX_SC_R_UART_1 58 +#define IMX_SC_R_UART_2 59 +#define IMX_SC_R_UART_3 60 +#define IMX_SC_R_UART_4 61 +#define IMX_SC_R_EMVSIM_0 62 +#define IMX_SC_R_EMVSIM_1 63 +#define IMX_SC_R_DMA_0_CH0 64 +#define IMX_SC_R_DMA_0_CH1 65 +#define IMX_SC_R_DMA_0_CH2 66 +#define IMX_SC_R_DMA_0_CH3 67 +#define IMX_SC_R_DMA_0_CH4 68 +#define IMX_SC_R_DMA_0_CH5 69 +#define IMX_SC_R_DMA_0_CH6 70 +#define IMX_SC_R_DMA_0_CH7 71 +#define IMX_SC_R_DMA_0_CH8 72 +#define IMX_SC_R_DMA_0_CH9 73 +#define IMX_SC_R_DMA_0_CH10 74 +#define IMX_SC_R_DMA_0_CH11 75 +#define IMX_SC_R_DMA_0_CH12 76 +#define IMX_SC_R_DMA_0_CH13 77 +#define IMX_SC_R_DMA_0_CH14 78 +#define IMX_SC_R_DMA_0_CH15 79 +#define IMX_SC_R_DMA_0_CH16 80 +#define IMX_SC_R_DMA_0_CH17 81 +#define IMX_SC_R_DMA_0_CH18 82 +#define IMX_SC_R_DMA_0_CH19 83 +#define IMX_SC_R_DMA_0_CH20 84 +#define IMX_SC_R_DMA_0_CH21 85 +#define IMX_SC_R_DMA_0_CH22 86 +#define IMX_SC_R_DMA_0_CH23 87 +#define IMX_SC_R_DMA_0_CH24 88 +#define IMX_SC_R_DMA_0_CH25 89 +#define IMX_SC_R_DMA_0_CH26 90 +#define IMX_SC_R_DMA_0_CH27 91 +#define IMX_SC_R_DMA_0_CH28 92 +#define IMX_SC_R_DMA_0_CH29 93 +#define IMX_SC_R_DMA_0_CH30 94 +#define IMX_SC_R_DMA_0_CH31 95 +#define IMX_SC_R_I2C_0 96 +#define IMX_SC_R_I2C_1 97 +#define IMX_SC_R_I2C_2 98 +#define IMX_SC_R_I2C_3 99 +#define IMX_SC_R_I2C_4 100 +#define IMX_SC_R_ADC_0 101 +#define IMX_SC_R_ADC_1 102 +#define IMX_SC_R_FTM_0 103 +#define IMX_SC_R_FTM_1 104 +#define IMX_SC_R_CAN_0 105 +#define IMX_SC_R_CAN_1 106 +#define IMX_SC_R_CAN_2 107 +#define IMX_SC_R_DMA_1_CH0 108 +#define IMX_SC_R_DMA_1_CH1 109 +#define IMX_SC_R_DMA_1_CH2 110 +#define IMX_SC_R_DMA_1_CH3 111 +#define IMX_SC_R_DMA_1_CH4 112 +#define IMX_SC_R_DMA_1_CH5 113 +#define IMX_SC_R_DMA_1_CH6 114 +#define IMX_SC_R_DMA_1_CH7 115 +#define IMX_SC_R_DMA_1_CH8 116 +#define IMX_SC_R_DMA_1_CH9 117 +#define IMX_SC_R_DMA_1_CH10 118 +#define IMX_SC_R_DMA_1_CH11 119 +#define IMX_SC_R_DMA_1_CH12 120 +#define IMX_SC_R_DMA_1_CH13 121 +#define IMX_SC_R_DMA_1_CH14 122 +#define IMX_SC_R_DMA_1_CH15 123 +#define IMX_SC_R_DMA_1_CH16 124 +#define IMX_SC_R_DMA_1_CH17 125 +#define IMX_SC_R_DMA_1_CH18 126 +#define IMX_SC_R_DMA_1_CH19 127 +#define IMX_SC_R_DMA_1_CH20 128 +#define IMX_SC_R_DMA_1_CH21 129 +#define IMX_SC_R_DMA_1_CH22 130 +#define IMX_SC_R_DMA_1_CH23 131 +#define IMX_SC_R_DMA_1_CH24 132 +#define IMX_SC_R_DMA_1_CH25 133 +#define IMX_SC_R_DMA_1_CH26 134 +#define IMX_SC_R_DMA_1_CH27 135 +#define IMX_SC_R_DMA_1_CH28 136 +#define IMX_SC_R_DMA_1_CH29 137 +#define IMX_SC_R_DMA_1_CH30 138 +#define IMX_SC_R_DMA_1_CH31 139 +#define IMX_SC_R_UNUSED1 140 +#define IMX_SC_R_UNUSED2 141 +#define IMX_SC_R_UNUSED3 142 +#define IMX_SC_R_UNUSED4 143 +#define IMX_SC_R_GPU_0_PID0 144 +#define IMX_SC_R_GPU_0_PID1 145 +#define IMX_SC_R_GPU_0_PID2 146 +#define IMX_SC_R_GPU_0_PID3 147 +#define IMX_SC_R_GPU_1_PID0 148 +#define IMX_SC_R_GPU_1_PID1 149 +#define IMX_SC_R_GPU_1_PID2 150 +#define IMX_SC_R_GPU_1_PID3 151 +#define IMX_SC_R_PCIE_A 152 +#define IMX_SC_R_SERDES_0 153 +#define IMX_SC_R_MATCH_0 154 +#define IMX_SC_R_MATCH_1 155 +#define IMX_SC_R_MATCH_2 156 +#define IMX_SC_R_MATCH_3 157 +#define IMX_SC_R_MATCH_4 158 +#define IMX_SC_R_MATCH_5 159 +#define IMX_SC_R_MATCH_6 160 +#define IMX_SC_R_MATCH_7 161 +#define IMX_SC_R_MATCH_8 162 +#define IMX_SC_R_MATCH_9 163 +#define IMX_SC_R_MATCH_10 164 +#define IMX_SC_R_MATCH_11 165 +#define IMX_SC_R_MATCH_12 166 +#define IMX_SC_R_MATCH_13 167 +#define IMX_SC_R_MATCH_14 168 +#define IMX_SC_R_PCIE_B 169 +#define IMX_SC_R_SATA_0 170 +#define IMX_SC_R_SERDES_1 171 +#define IMX_SC_R_HSIO_GPIO 172 +#define IMX_SC_R_MATCH_15 173 +#define IMX_SC_R_MATCH_16 174 +#define IMX_SC_R_MATCH_17 175 +#define IMX_SC_R_MATCH_18 176 +#define IMX_SC_R_MATCH_19 177 +#define IMX_SC_R_MATCH_20 178 +#define IMX_SC_R_MATCH_21 179 +#define IMX_SC_R_MATCH_22 180 +#define IMX_SC_R_MATCH_23 181 +#define IMX_SC_R_MATCH_24 182 +#define IMX_SC_R_MATCH_25 183 +#define IMX_SC_R_MATCH_26 184 +#define IMX_SC_R_MATCH_27 185 +#define IMX_SC_R_MATCH_28 186 +#define IMX_SC_R_LCD_0 187 +#define IMX_SC_R_LCD_0_PWM_0 188 +#define IMX_SC_R_LCD_0_I2C_0 189 +#define IMX_SC_R_LCD_0_I2C_1 190 +#define IMX_SC_R_PWM_0 191 +#define IMX_SC_R_PWM_1 192 +#define IMX_SC_R_PWM_2 193 +#define IMX_SC_R_PWM_3 194 +#define IMX_SC_R_PWM_4 195 +#define IMX_SC_R_PWM_5 196 +#define IMX_SC_R_PWM_6 197 +#define IMX_SC_R_PWM_7 198 +#define IMX_SC_R_GPIO_0 199 +#define IMX_SC_R_GPIO_1 200 +#define IMX_SC_R_GPIO_2 201 +#define IMX_SC_R_GPIO_3 202 +#define IMX_SC_R_GPIO_4 203 +#define IMX_SC_R_GPIO_5 204 +#define IMX_SC_R_GPIO_6 205 +#define IMX_SC_R_GPIO_7 206 +#define IMX_SC_R_GPT_0 207 +#define IMX_SC_R_GPT_1 208 +#define IMX_SC_R_GPT_2 209 +#define IMX_SC_R_GPT_3 210 +#define IMX_SC_R_GPT_4 211 +#define IMX_SC_R_KPP 212 +#define IMX_SC_R_MU_0A 213 +#define IMX_SC_R_MU_1A 214 +#define IMX_SC_R_MU_2A 215 +#define IMX_SC_R_MU_3A 216 +#define IMX_SC_R_MU_4A 217 +#define IMX_SC_R_MU_5A 218 +#define IMX_SC_R_MU_6A 219 +#define IMX_SC_R_MU_7A 220 +#define IMX_SC_R_MU_8A 221 +#define IMX_SC_R_MU_9A 222 +#define IMX_SC_R_MU_10A 223 +#define IMX_SC_R_MU_11A 224 +#define IMX_SC_R_MU_12A 225 +#define IMX_SC_R_MU_13A 226 +#define IMX_SC_R_MU_5B 227 +#define IMX_SC_R_MU_6B 228 +#define IMX_SC_R_MU_7B 229 +#define IMX_SC_R_MU_8B 230 +#define IMX_SC_R_MU_9B 231 +#define IMX_SC_R_MU_10B 232 +#define IMX_SC_R_MU_11B 233 +#define IMX_SC_R_MU_12B 234 +#define IMX_SC_R_MU_13B 235 +#define IMX_SC_R_ROM_0 236 +#define IMX_SC_R_FSPI_0 237 +#define IMX_SC_R_FSPI_1 238 +#define IMX_SC_R_IEE 239 +#define IMX_SC_R_IEE_R0 240 +#define IMX_SC_R_IEE_R1 241 +#define IMX_SC_R_IEE_R2 242 +#define IMX_SC_R_IEE_R3 243 +#define IMX_SC_R_IEE_R4 244 +#define IMX_SC_R_IEE_R5 245 +#define IMX_SC_R_IEE_R6 246 +#define IMX_SC_R_IEE_R7 247 +#define IMX_SC_R_SDHC_0 248 +#define IMX_SC_R_SDHC_1 249 +#define IMX_SC_R_SDHC_2 250 +#define IMX_SC_R_ENET_0 251 +#define IMX_SC_R_ENET_1 252 +#define IMX_SC_R_MLB_0 253 +#define IMX_SC_R_DMA_2_CH0 254 +#define IMX_SC_R_DMA_2_CH1 255 +#define IMX_SC_R_DMA_2_CH2 256 +#define IMX_SC_R_DMA_2_CH3 257 +#define IMX_SC_R_DMA_2_CH4 258 +#define IMX_SC_R_USB_0 259 +#define IMX_SC_R_USB_1 260 +#define IMX_SC_R_USB_0_PHY 261 +#define IMX_SC_R_USB_2 262 +#define IMX_SC_R_USB_2_PHY 263 +#define IMX_SC_R_DTCP 264 +#define IMX_SC_R_NAND 265 +#define IMX_SC_R_LVDS_0 266 +#define IMX_SC_R_LVDS_0_PWM_0 267 +#define IMX_SC_R_LVDS_0_I2C_0 268 +#define IMX_SC_R_LVDS_0_I2C_1 269 +#define IMX_SC_R_LVDS_1 270 +#define IMX_SC_R_LVDS_1_PWM_0 271 +#define IMX_SC_R_LVDS_1_I2C_0 272 +#define IMX_SC_R_LVDS_1_I2C_1 273 +#define IMX_SC_R_LVDS_2 274 +#define IMX_SC_R_LVDS_2_PWM_0 275 +#define IMX_SC_R_LVDS_2_I2C_0 276 +#define IMX_SC_R_LVDS_2_I2C_1 277 +#define IMX_SC_R_M4_0_PID0 278 +#define IMX_SC_R_M4_0_PID1 279 +#define IMX_SC_R_M4_0_PID2 280 +#define IMX_SC_R_M4_0_PID3 281 +#define IMX_SC_R_M4_0_PID4 282 +#define IMX_SC_R_M4_0_RGPIO 283 +#define IMX_SC_R_M4_0_SEMA42 284 +#define IMX_SC_R_M4_0_TPM 285 +#define IMX_SC_R_M4_0_PIT 286 +#define IMX_SC_R_M4_0_UART 287 +#define IMX_SC_R_M4_0_I2C 288 +#define IMX_SC_R_M4_0_INTMUX 289 +#define IMX_SC_R_M4_0_MU_0B 292 +#define IMX_SC_R_M4_0_MU_0A0 293 +#define IMX_SC_R_M4_0_MU_0A1 294 +#define IMX_SC_R_M4_0_MU_0A2 295 +#define IMX_SC_R_M4_0_MU_0A3 296 +#define IMX_SC_R_M4_0_MU_1A 297 +#define IMX_SC_R_M4_1_PID0 298 +#define IMX_SC_R_M4_1_PID1 299 +#define IMX_SC_R_M4_1_PID2 300 +#define IMX_SC_R_M4_1_PID3 301 +#define IMX_SC_R_M4_1_PID4 302 +#define IMX_SC_R_M4_1_RGPIO 303 +#define IMX_SC_R_M4_1_SEMA42 304 +#define IMX_SC_R_M4_1_TPM 305 +#define IMX_SC_R_M4_1_PIT 306 +#define IMX_SC_R_M4_1_UART 307 +#define IMX_SC_R_M4_1_I2C 308 +#define IMX_SC_R_M4_1_INTMUX 309 +#define IMX_SC_R_M4_1_MU_0B 312 +#define IMX_SC_R_M4_1_MU_0A0 313 +#define IMX_SC_R_M4_1_MU_0A1 314 +#define IMX_SC_R_M4_1_MU_0A2 315 +#define IMX_SC_R_M4_1_MU_0A3 316 +#define IMX_SC_R_M4_1_MU_1A 317 +#define IMX_SC_R_SAI_0 318 +#define IMX_SC_R_SAI_1 319 +#define IMX_SC_R_SAI_2 320 +#define IMX_SC_R_IRQSTR_SCU2 321 +#define IMX_SC_R_IRQSTR_DSP 322 +#define IMX_SC_R_ELCDIF_PLL 323 +#define IMX_SC_R_OCRAM 324 +#define IMX_SC_R_AUDIO_PLL_0 325 +#define IMX_SC_R_PI_0 326 +#define IMX_SC_R_PI_0_PWM_0 327 +#define IMX_SC_R_PI_0_PWM_1 328 +#define IMX_SC_R_PI_0_I2C_0 329 +#define IMX_SC_R_PI_0_PLL 330 +#define IMX_SC_R_PI_1 331 +#define IMX_SC_R_PI_1_PWM_0 332 +#define IMX_SC_R_PI_1_PWM_1 333 +#define IMX_SC_R_PI_1_I2C_0 334 +#define IMX_SC_R_PI_1_PLL 335 +#define IMX_SC_R_SC_PID0 336 +#define IMX_SC_R_SC_PID1 337 +#define IMX_SC_R_SC_PID2 338 +#define IMX_SC_R_SC_PID3 339 +#define IMX_SC_R_SC_PID4 340 +#define IMX_SC_R_SC_SEMA42 341 +#define IMX_SC_R_SC_TPM 342 +#define IMX_SC_R_SC_PIT 343 +#define IMX_SC_R_SC_UART 344 +#define IMX_SC_R_SC_I2C 345 +#define IMX_SC_R_SC_MU_0B 346 +#define IMX_SC_R_SC_MU_0A0 347 +#define IMX_SC_R_SC_MU_0A1 348 +#define IMX_SC_R_SC_MU_0A2 349 +#define IMX_SC_R_SC_MU_0A3 350 +#define IMX_SC_R_SC_MU_1A 351 +#define IMX_SC_R_SYSCNT_RD 352 +#define IMX_SC_R_SYSCNT_CMP 353 +#define IMX_SC_R_DEBUG 354 +#define IMX_SC_R_SYSTEM 355 +#define IMX_SC_R_SNVS 356 +#define IMX_SC_R_OTP 357 +#define IMX_SC_R_VPU_PID0 358 +#define IMX_SC_R_VPU_PID1 359 +#define IMX_SC_R_VPU_PID2 360 +#define IMX_SC_R_VPU_PID3 361 +#define IMX_SC_R_VPU_PID4 362 +#define IMX_SC_R_VPU_PID5 363 +#define IMX_SC_R_VPU_PID6 364 +#define IMX_SC_R_VPU_PID7 365 +#define IMX_SC_R_VPU_UART 366 +#define IMX_SC_R_VPUCORE 367 +#define IMX_SC_R_VPUCORE_0 368 +#define IMX_SC_R_VPUCORE_1 369 +#define IMX_SC_R_VPUCORE_2 370 +#define IMX_SC_R_VPUCORE_3 371 +#define IMX_SC_R_DMA_4_CH0 372 +#define IMX_SC_R_DMA_4_CH1 373 +#define IMX_SC_R_DMA_4_CH2 374 +#define IMX_SC_R_DMA_4_CH3 375 +#define IMX_SC_R_DMA_4_CH4 376 +#define IMX_SC_R_ISI_CH0 377 +#define IMX_SC_R_ISI_CH1 378 +#define IMX_SC_R_ISI_CH2 379 +#define IMX_SC_R_ISI_CH3 380 +#define IMX_SC_R_ISI_CH4 381 +#define IMX_SC_R_ISI_CH5 382 +#define IMX_SC_R_ISI_CH6 383 +#define IMX_SC_R_ISI_CH7 384 +#define IMX_SC_R_MJPEG_DEC_S0 385 +#define IMX_SC_R_MJPEG_DEC_S1 386 +#define IMX_SC_R_MJPEG_DEC_S2 387 +#define IMX_SC_R_MJPEG_DEC_S3 388 +#define IMX_SC_R_MJPEG_ENC_S0 389 +#define IMX_SC_R_MJPEG_ENC_S1 390 +#define IMX_SC_R_MJPEG_ENC_S2 391 +#define IMX_SC_R_MJPEG_ENC_S3 392 +#define IMX_SC_R_MIPI_0 393 +#define IMX_SC_R_MIPI_0_PWM_0 394 +#define IMX_SC_R_MIPI_0_I2C_0 395 +#define IMX_SC_R_MIPI_0_I2C_1 396 +#define IMX_SC_R_MIPI_1 397 +#define IMX_SC_R_MIPI_1_PWM_0 398 +#define IMX_SC_R_MIPI_1_I2C_0 399 +#define IMX_SC_R_MIPI_1_I2C_1 400 +#define IMX_SC_R_CSI_0 401 +#define IMX_SC_R_CSI_0_PWM_0 402 +#define IMX_SC_R_CSI_0_I2C_0 403 +#define IMX_SC_R_CSI_1 404 +#define IMX_SC_R_CSI_1_PWM_0 405 +#define IMX_SC_R_CSI_1_I2C_0 406 +#define IMX_SC_R_HDMI 407 +#define IMX_SC_R_HDMI_I2S 408 +#define IMX_SC_R_HDMI_I2C_0 409 +#define IMX_SC_R_HDMI_PLL_0 410 +#define IMX_SC_R_HDMI_RX 411 +#define IMX_SC_R_HDMI_RX_BYPASS 412 +#define IMX_SC_R_HDMI_RX_I2C_0 413 +#define IMX_SC_R_ASRC_0 414 +#define IMX_SC_R_ESAI_0 415 +#define IMX_SC_R_SPDIF_0 416 +#define IMX_SC_R_SPDIF_1 417 +#define IMX_SC_R_SAI_3 418 +#define IMX_SC_R_SAI_4 419 +#define IMX_SC_R_SAI_5 420 +#define IMX_SC_R_GPT_5 421 +#define IMX_SC_R_GPT_6 422 +#define IMX_SC_R_GPT_7 423 +#define IMX_SC_R_GPT_8 424 +#define IMX_SC_R_GPT_9 425 +#define IMX_SC_R_GPT_10 426 +#define IMX_SC_R_DMA_2_CH5 427 +#define IMX_SC_R_DMA_2_CH6 428 +#define IMX_SC_R_DMA_2_CH7 429 +#define IMX_SC_R_DMA_2_CH8 430 +#define IMX_SC_R_DMA_2_CH9 431 +#define IMX_SC_R_DMA_2_CH10 432 +#define IMX_SC_R_DMA_2_CH11 433 +#define IMX_SC_R_DMA_2_CH12 434 +#define IMX_SC_R_DMA_2_CH13 435 +#define IMX_SC_R_DMA_2_CH14 436 +#define IMX_SC_R_DMA_2_CH15 437 +#define IMX_SC_R_DMA_2_CH16 438 +#define IMX_SC_R_DMA_2_CH17 439 +#define IMX_SC_R_DMA_2_CH18 440 +#define IMX_SC_R_DMA_2_CH19 441 +#define IMX_SC_R_DMA_2_CH20 442 +#define IMX_SC_R_DMA_2_CH21 443 +#define IMX_SC_R_DMA_2_CH22 444 +#define IMX_SC_R_DMA_2_CH23 445 +#define IMX_SC_R_DMA_2_CH24 446 +#define IMX_SC_R_DMA_2_CH25 447 +#define IMX_SC_R_DMA_2_CH26 448 +#define IMX_SC_R_DMA_2_CH27 449 +#define IMX_SC_R_DMA_2_CH28 450 +#define IMX_SC_R_DMA_2_CH29 451 +#define IMX_SC_R_DMA_2_CH30 452 +#define IMX_SC_R_DMA_2_CH31 453 +#define IMX_SC_R_ASRC_1 454 +#define IMX_SC_R_ESAI_1 455 +#define IMX_SC_R_SAI_6 456 +#define IMX_SC_R_SAI_7 457 +#define IMX_SC_R_AMIX 458 +#define IMX_SC_R_MQS_0 459 +#define IMX_SC_R_DMA_3_CH0 460 +#define IMX_SC_R_DMA_3_CH1 461 +#define IMX_SC_R_DMA_3_CH2 462 +#define IMX_SC_R_DMA_3_CH3 463 +#define IMX_SC_R_DMA_3_CH4 464 +#define IMX_SC_R_DMA_3_CH5 465 +#define IMX_SC_R_DMA_3_CH6 466 +#define IMX_SC_R_DMA_3_CH7 467 +#define IMX_SC_R_DMA_3_CH8 468 +#define IMX_SC_R_DMA_3_CH9 469 +#define IMX_SC_R_DMA_3_CH10 470 +#define IMX_SC_R_DMA_3_CH11 471 +#define IMX_SC_R_DMA_3_CH12 472 +#define IMX_SC_R_DMA_3_CH13 473 +#define IMX_SC_R_DMA_3_CH14 474 +#define IMX_SC_R_DMA_3_CH15 475 +#define IMX_SC_R_DMA_3_CH16 476 +#define IMX_SC_R_DMA_3_CH17 477 +#define IMX_SC_R_DMA_3_CH18 478 +#define IMX_SC_R_DMA_3_CH19 479 +#define IMX_SC_R_DMA_3_CH20 480 +#define IMX_SC_R_DMA_3_CH21 481 +#define IMX_SC_R_DMA_3_CH22 482 +#define IMX_SC_R_DMA_3_CH23 483 +#define IMX_SC_R_DMA_3_CH24 484 +#define IMX_SC_R_DMA_3_CH25 485 +#define IMX_SC_R_DMA_3_CH26 486 +#define IMX_SC_R_DMA_3_CH27 487 +#define IMX_SC_R_DMA_3_CH28 488 +#define IMX_SC_R_DMA_3_CH29 489 +#define IMX_SC_R_DMA_3_CH30 490 +#define IMX_SC_R_DMA_3_CH31 491 +#define IMX_SC_R_AUDIO_PLL_1 492 +#define IMX_SC_R_AUDIO_CLK_0 493 +#define IMX_SC_R_AUDIO_CLK_1 494 +#define IMX_SC_R_MCLK_OUT_0 495 +#define IMX_SC_R_MCLK_OUT_1 496 +#define IMX_SC_R_PMIC_0 497 +#define IMX_SC_R_PMIC_1 498 +#define IMX_SC_R_SECO 499 +#define IMX_SC_R_CAAM_JR1 500 +#define IMX_SC_R_CAAM_JR2 501 +#define IMX_SC_R_CAAM_JR3 502 +#define IMX_SC_R_SECO_MU_2 503 +#define IMX_SC_R_SECO_MU_3 504 +#define IMX_SC_R_SECO_MU_4 505 +#define IMX_SC_R_HDMI_RX_PWM_0 506 +#define IMX_SC_R_A35 507 +#define IMX_SC_R_A35_0 508 +#define IMX_SC_R_A35_1 509 +#define IMX_SC_R_A35_2 510 +#define IMX_SC_R_A35_3 511 +#define IMX_SC_R_DSP 512 +#define IMX_SC_R_DSP_RAM 513 +#define IMX_SC_R_CAAM_JR1_OUT 514 +#define IMX_SC_R_CAAM_JR2_OUT 515 +#define IMX_SC_R_CAAM_JR3_OUT 516 +#define IMX_SC_R_VPU_DEC_0 517 +#define IMX_SC_R_VPU_ENC_0 518 +#define IMX_SC_R_CAAM_JR0 519 +#define IMX_SC_R_CAAM_JR0_OUT 520 +#define IMX_SC_R_PMIC_2 521 +#define IMX_SC_R_DBLOGIC 522 +#define IMX_SC_R_HDMI_PLL_1 523 +#define IMX_SC_R_BOARD_R0 524 +#define IMX_SC_R_BOARD_R1 525 +#define IMX_SC_R_BOARD_R2 526 +#define IMX_SC_R_BOARD_R3 527 +#define IMX_SC_R_BOARD_R4 528 +#define IMX_SC_R_BOARD_R5 529 +#define IMX_SC_R_BOARD_R6 530 +#define IMX_SC_R_BOARD_R7 531 +#define IMX_SC_R_MJPEG_DEC_MP 532 +#define IMX_SC_R_MJPEG_ENC_MP 533 +#define IMX_SC_R_VPU_TS_0 534 +#define IMX_SC_R_VPU_MU_0 535 +#define IMX_SC_R_VPU_MU_1 536 +#define IMX_SC_R_VPU_MU_2 537 +#define IMX_SC_R_VPU_MU_3 538 +#define IMX_SC_R_VPU_ENC_1 539 +#define IMX_SC_R_VPU 540 +#define IMX_SC_R_DMA_5_CH0 541 +#define IMX_SC_R_DMA_5_CH1 542 +#define IMX_SC_R_DMA_5_CH2 543 +#define IMX_SC_R_DMA_5_CH3 544 +#define IMX_SC_R_ATTESTATION 545 +#define IMX_SC_R_LAST 546 + +#endif /* __DT_BINDINGS_RSCRC_IMX_H */ diff --git a/include/dt-bindings/gce/mt8173-gce.h b/include/dt-bindings/gce/mt8173-gce.h new file mode 100644 index 0000000..ffcf94b --- /dev/null +++ b/include/dt-bindings/gce/mt8173-gce.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Houlong Wei + * + */ + +#ifndef _DT_BINDINGS_GCE_MT8173_H +#define _DT_BINDINGS_GCE_MT8173_H + +/* GCE HW thread priority */ +#define CMDQ_THR_PRIO_LOWEST 0 +#define CMDQ_THR_PRIO_HIGHEST 1 + +/* GCE SUBSYS */ +#define SUBSYS_1400XXXX 1 +#define SUBSYS_1401XXXX 2 +#define SUBSYS_1402XXXX 3 + +/* GCE HW EVENT */ +#define CMDQ_EVENT_DISP_OVL0_SOF 11 +#define CMDQ_EVENT_DISP_OVL1_SOF 12 +#define CMDQ_EVENT_DISP_RDMA0_SOF 13 +#define CMDQ_EVENT_DISP_RDMA1_SOF 14 +#define CMDQ_EVENT_DISP_RDMA2_SOF 15 +#define CMDQ_EVENT_DISP_WDMA0_SOF 16 +#define CMDQ_EVENT_DISP_WDMA1_SOF 17 +#define CMDQ_EVENT_DISP_OVL0_EOF 39 +#define CMDQ_EVENT_DISP_OVL1_EOF 40 +#define CMDQ_EVENT_DISP_RDMA0_EOF 41 +#define CMDQ_EVENT_DISP_RDMA1_EOF 42 +#define CMDQ_EVENT_DISP_RDMA2_EOF 43 +#define CMDQ_EVENT_DISP_WDMA0_EOF 44 +#define CMDQ_EVENT_DISP_WDMA1_EOF 45 +#define CMDQ_EVENT_MUTEX0_STREAM_EOF 53 +#define CMDQ_EVENT_MUTEX1_STREAM_EOF 54 +#define CMDQ_EVENT_MUTEX2_STREAM_EOF 55 +#define CMDQ_EVENT_MUTEX3_STREAM_EOF 56 +#define CMDQ_EVENT_MUTEX4_STREAM_EOF 57 +#define CMDQ_EVENT_DISP_RDMA0_UNDERRUN 63 +#define CMDQ_EVENT_DISP_RDMA1_UNDERRUN 64 +#define CMDQ_EVENT_DISP_RDMA2_UNDERRUN 65 + +#endif diff --git a/include/dt-bindings/gce/mt8183-gce.h b/include/dt-bindings/gce/mt8183-gce.h new file mode 100644 index 0000000..29c9674 --- /dev/null +++ b/include/dt-bindings/gce/mt8183-gce.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Bibby Hsieh + * + */ + +#ifndef _DT_BINDINGS_GCE_MT8183_H +#define _DT_BINDINGS_GCE_MT8183_H + +#define CMDQ_NO_TIMEOUT 0xffffffff + +/* GCE HW thread priority */ +#define CMDQ_THR_PRIO_LOWEST 0 +#define CMDQ_THR_PRIO_HIGHEST 1 + +/* GCE SUBSYS */ +#define SUBSYS_1300XXXX 0 +#define SUBSYS_1400XXXX 1 +#define SUBSYS_1401XXXX 2 +#define SUBSYS_1402XXXX 3 +#define SUBSYS_1502XXXX 4 +#define SUBSYS_1880XXXX 5 +#define SUBSYS_1881XXXX 6 +#define SUBSYS_1882XXXX 7 +#define SUBSYS_1883XXXX 8 +#define SUBSYS_1884XXXX 9 +#define SUBSYS_1000XXXX 10 +#define SUBSYS_1001XXXX 11 +#define SUBSYS_1002XXXX 12 +#define SUBSYS_1003XXXX 13 +#define SUBSYS_1004XXXX 14 +#define SUBSYS_1005XXXX 15 +#define SUBSYS_1020XXXX 16 +#define SUBSYS_1028XXXX 17 +#define SUBSYS_1700XXXX 18 +#define SUBSYS_1701XXXX 19 +#define SUBSYS_1702XXXX 20 +#define SUBSYS_1703XXXX 21 +#define SUBSYS_1800XXXX 22 +#define SUBSYS_1801XXXX 23 +#define SUBSYS_1802XXXX 24 +#define SUBSYS_1804XXXX 25 +#define SUBSYS_1805XXXX 26 +#define SUBSYS_1808XXXX 27 +#define SUBSYS_180aXXXX 28 +#define SUBSYS_180bXXXX 29 + +#define CMDQ_EVENT_DISP_RDMA0_SOF 0 +#define CMDQ_EVENT_DISP_RDMA1_SOF 1 +#define CMDQ_EVENT_MDP_RDMA0_SOF 2 +#define CMDQ_EVENT_MDP_RSZ0_SOF 4 +#define CMDQ_EVENT_MDP_RSZ1_SOF 5 +#define CMDQ_EVENT_MDP_TDSHP_SOF 6 +#define CMDQ_EVENT_MDP_WROT0_SOF 7 +#define CMDQ_EVENT_MDP_WDMA0_SOF 8 +#define CMDQ_EVENT_DISP_OVL0_SOF 9 +#define CMDQ_EVENT_DISP_OVL0_2L_SOF 10 +#define CMDQ_EVENT_DISP_OVL1_2L_SOF 11 +#define CMDQ_EVENT_DISP_WDMA0_SOF 12 +#define CMDQ_EVENT_DISP_COLOR0_SOF 13 +#define CMDQ_EVENT_DISP_CCORR0_SOF 14 +#define CMDQ_EVENT_DISP_AAL0_SOF 15 +#define CMDQ_EVENT_DISP_GAMMA0_SOF 16 +#define CMDQ_EVENT_DISP_DITHER0_SOF 17 +#define CMDQ_EVENT_DISP_PWM0_SOF 18 +#define CMDQ_EVENT_DISP_DSI0_SOF 19 +#define CMDQ_EVENT_DISP_DPI0_SOF 20 +#define CMDQ_EVENT_DISP_RSZ_SOF 22 +#define CMDQ_EVENT_MDP_AAL_SOF 23 +#define CMDQ_EVENT_MDP_CCORR_SOF 24 +#define CMDQ_EVENT_DISP_DBI_SOF 25 +#define CMDQ_EVENT_DISP_RDMA0_EOF 26 +#define CMDQ_EVENT_DISP_RDMA1_EOF 27 +#define CMDQ_EVENT_MDP_RDMA0_EOF 28 +#define CMDQ_EVENT_MDP_RSZ0_EOF 30 +#define CMDQ_EVENT_MDP_RSZ1_EOF 31 +#define CMDQ_EVENT_MDP_TDSHP_EOF 32 +#define CMDQ_EVENT_MDP_WROT0_EOF 33 +#define CMDQ_EVENT_MDP_WDMA0_EOF 34 +#define CMDQ_EVENT_DISP_OVL0_EOF 35 +#define CMDQ_EVENT_DISP_OVL0_2L_EOF 36 +#define CMDQ_EVENT_DISP_OVL1_2L_EOF 37 +#define CMDQ_EVENT_DISP_WDMA0_EOF 38 +#define CMDQ_EVENT_DISP_COLOR0_EOF 39 +#define CMDQ_EVENT_DISP_CCORR0_EOF 40 +#define CMDQ_EVENT_DISP_AAL0_EOF 41 +#define CMDQ_EVENT_DISP_GAMMA0_EOF 42 +#define CMDQ_EVENT_DISP_DITHER0_EOF 43 +#define CMDQ_EVENT_DSI0_EOF 44 +#define CMDQ_EVENT_DPI0_EOF 45 +#define CMDQ_EVENT_DISP_RSZ_EOF 47 +#define CMDQ_EVENT_MDP_AAL_EOF 48 +#define CMDQ_EVENT_MDP_CCORR_EOF 49 +#define CMDQ_EVENT_DBI_EOF 50 +#define CMDQ_EVENT_MUTEX_STREAM_DONE0 130 +#define CMDQ_EVENT_MUTEX_STREAM_DONE1 131 +#define CMDQ_EVENT_MUTEX_STREAM_DONE2 132 +#define CMDQ_EVENT_MUTEX_STREAM_DONE3 133 +#define CMDQ_EVENT_MUTEX_STREAM_DONE4 134 +#define CMDQ_EVENT_MUTEX_STREAM_DONE5 135 +#define CMDQ_EVENT_MUTEX_STREAM_DONE6 136 +#define CMDQ_EVENT_MUTEX_STREAM_DONE7 137 +#define CMDQ_EVENT_MUTEX_STREAM_DONE8 138 +#define CMDQ_EVENT_MUTEX_STREAM_DONE9 139 +#define CMDQ_EVENT_MUTEX_STREAM_DONE10 140 +#define CMDQ_EVENT_MUTEX_STREAM_DONE11 141 +#define CMDQ_EVENT_DISP_RDMA0_BUF_UNDERRUN_EVEN 142 +#define CMDQ_EVENT_DISP_RDMA1_BUF_UNDERRUN_EVEN 143 +#define CMDQ_EVENT_DSI0_TE_EVENT 144 +#define CMDQ_EVENT_DSI0_IRQ_EVENT 145 +#define CMDQ_EVENT_DSI0_DONE_EVENT 146 +#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE 150 +#define CMDQ_EVENT_MDP_WDMA_SW_RST_DONE 151 +#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE 152 +#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE 154 +#define CMDQ_EVENT_DISP_OVL0_FRAME_RST_DONE_PULE 155 +#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_RST_DONE_ULSE 156 +#define CMDQ_EVENT_DISP_OVL1_2L_FRAME_RST_DONE_ULSE 157 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_0 257 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_1 258 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_2 259 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_3 260 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_4 261 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_5 262 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_6 263 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_7 264 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_8 265 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_9 266 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_10 267 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_11 268 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_12 269 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_13 270 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_14 271 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_15 272 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_16 273 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_17 274 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_18 275 +#define CMDQ_EVENT_AMD_FRAME_DONE 276 +#define CMDQ_EVENT_DVE_DONE 277 +#define CMDQ_EVENT_WMFE_DONE 278 +#define CMDQ_EVENT_RSC_DONE 279 +#define CMDQ_EVENT_MFB_DONE 280 +#define CMDQ_EVENT_WPE_A_DONE 281 +#define CMDQ_EVENT_SPE_B_DONE 282 +#define CMDQ_EVENT_OCC_DONE 283 +#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE 289 +#define CMDQ_EVENT_JPG_ENC_CMDQ_DONE 290 +#define CMDQ_EVENT_JPG_DEC_CMDQ_DONE 291 +#define CMDQ_EVENT_VENC_CMDQ_MB_DONE 292 +#define CMDQ_EVENT_VENC_CMDQ_128BYTE_DONE 293 +#define CMDQ_EVENT_ISP_FRAME_DONE_A 321 +#define CMDQ_EVENT_ISP_FRAME_DONE_B 322 +#define CMDQ_EVENT_CAMSV0_PASS1_DONE 323 +#define CMDQ_EVENT_CAMSV1_PASS1_DONE 324 +#define CMDQ_EVENT_CAMSV2_PASS1_DONE 325 +#define CMDQ_EVENT_TSF_DONE 326 +#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 327 +#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 328 +#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 329 +#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 330 +#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 331 +#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 332 +#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 333 +#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 334 +#define CMDQ_EVENT_IPU_CORE0_DONE0 353 +#define CMDQ_EVENT_IPU_CORE0_DONE1 354 +#define CMDQ_EVENT_IPU_CORE0_DONE2 355 +#define CMDQ_EVENT_IPU_CORE0_DONE3 356 +#define CMDQ_EVENT_IPU_CORE1_DONE0 385 +#define CMDQ_EVENT_IPU_CORE1_DONE1 386 +#define CMDQ_EVENT_IPU_CORE1_DONE2 387 +#define CMDQ_EVENT_IPU_CORE1_DONE3 388 + +#endif diff --git a/include/dt-bindings/gpio/aspeed-gpio.h b/include/dt-bindings/gpio/aspeed-gpio.h new file mode 100644 index 0000000..56fc488 --- /dev/null +++ b/include/dt-bindings/gpio/aspeed-gpio.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * This header provides constants for binding aspeed,*-gpio. + * + * The first cell in Aspeed's GPIO specifier is the GPIO ID. The macros below + * provide names for this. + * + * The second cell contains standard flag values specified in gpio.h. + */ + +#ifndef _DT_BINDINGS_GPIO_ASPEED_GPIO_H +#define _DT_BINDINGS_GPIO_ASPEED_GPIO_H + +#include + +#define ASPEED_GPIO_PORT_A 0 +#define ASPEED_GPIO_PORT_B 1 +#define ASPEED_GPIO_PORT_C 2 +#define ASPEED_GPIO_PORT_D 3 +#define ASPEED_GPIO_PORT_E 4 +#define ASPEED_GPIO_PORT_F 5 +#define ASPEED_GPIO_PORT_G 6 +#define ASPEED_GPIO_PORT_H 7 +#define ASPEED_GPIO_PORT_I 8 +#define ASPEED_GPIO_PORT_J 9 +#define ASPEED_GPIO_PORT_K 10 +#define ASPEED_GPIO_PORT_L 11 +#define ASPEED_GPIO_PORT_M 12 +#define ASPEED_GPIO_PORT_N 13 +#define ASPEED_GPIO_PORT_O 14 +#define ASPEED_GPIO_PORT_P 15 +#define ASPEED_GPIO_PORT_Q 16 +#define ASPEED_GPIO_PORT_R 17 +#define ASPEED_GPIO_PORT_S 18 +#define ASPEED_GPIO_PORT_T 19 +#define ASPEED_GPIO_PORT_U 20 +#define ASPEED_GPIO_PORT_V 21 +#define ASPEED_GPIO_PORT_W 22 +#define ASPEED_GPIO_PORT_X 23 +#define ASPEED_GPIO_PORT_Y 24 +#define ASPEED_GPIO_PORT_Z 25 +#define ASPEED_GPIO_PORT_AA 26 +#define ASPEED_GPIO_PORT_AB 27 +#define ASPEED_GPIO_PORT_AC 28 + +#define ASPEED_GPIO(port, offset) \ + ((ASPEED_GPIO_PORT_##port * 8) + offset) + +#endif diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h new file mode 100644 index 0000000..c029467 --- /dev/null +++ b/include/dt-bindings/gpio/gpio.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for most GPIO bindings. + * + * Most GPIO bindings include a flags cell as part of the GPIO specifier. + * In most cases, the format of the flags cell uses the standard values + * defined in this header. + */ + +#ifndef _DT_BINDINGS_GPIO_GPIO_H +#define _DT_BINDINGS_GPIO_GPIO_H + +/* Bit 0 express polarity */ +#define GPIO_ACTIVE_HIGH 0 +#define GPIO_ACTIVE_LOW 1 + +/* Bit 1 express single-endedness */ +#define GPIO_PUSH_PULL 0 +#define GPIO_SINGLE_ENDED 2 + +/* Bit 2 express Open drain or open source */ +#define GPIO_LINE_OPEN_SOURCE 0 +#define GPIO_LINE_OPEN_DRAIN 4 + +/* + * Open Drain/Collector is the combination of single-ended open drain interface. + * Open Source/Emitter is the combination of single-ended open source interface. + */ +#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN) +#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE) + +/* Bit 3 express GPIO suspend/resume and reset persistence */ +#define GPIO_PERSISTENT 0 +#define GPIO_TRANSITORY 8 + +/* Bit 4 express pull up */ +#define GPIO_PULL_UP 16 + +/* Bit 5 express pull down */ +#define GPIO_PULL_DOWN 32 + +#endif diff --git a/include/dt-bindings/gpio/meson-axg-gpio.h b/include/dt-bindings/gpio/meson-axg-gpio.h new file mode 100644 index 0000000..25bb1ff --- /dev/null +++ b/include/dt-bindings/gpio/meson-axg-gpio.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2017 Amlogic, Inc. All rights reserved. + * Author: Xingyu Chen + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#ifndef _DT_BINDINGS_MESON_AXG_GPIO_H +#define _DT_BINDINGS_MESON_AXG_GPIO_H + +/* First GPIO chip */ +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOAO_12 12 +#define GPIOAO_13 13 +#define GPIO_TEST_N 14 + +/* Second GPIO chip */ +#define GPIOZ_0 0 +#define GPIOZ_1 1 +#define GPIOZ_2 2 +#define GPIOZ_3 3 +#define GPIOZ_4 4 +#define GPIOZ_5 5 +#define GPIOZ_6 6 +#define GPIOZ_7 7 +#define GPIOZ_8 8 +#define GPIOZ_9 9 +#define GPIOZ_10 10 +#define BOOT_0 11 +#define BOOT_1 12 +#define BOOT_2 13 +#define BOOT_3 14 +#define BOOT_4 15 +#define BOOT_5 16 +#define BOOT_6 17 +#define BOOT_7 18 +#define BOOT_8 19 +#define BOOT_9 20 +#define BOOT_10 21 +#define BOOT_11 22 +#define BOOT_12 23 +#define BOOT_13 24 +#define BOOT_14 25 +#define GPIOA_0 26 +#define GPIOA_1 27 +#define GPIOA_2 28 +#define GPIOA_3 29 +#define GPIOA_4 30 +#define GPIOA_5 31 +#define GPIOA_6 32 +#define GPIOA_7 33 +#define GPIOA_8 34 +#define GPIOA_9 35 +#define GPIOA_10 36 +#define GPIOA_11 37 +#define GPIOA_12 38 +#define GPIOA_13 39 +#define GPIOA_14 40 +#define GPIOA_15 41 +#define GPIOA_16 42 +#define GPIOA_17 43 +#define GPIOA_18 44 +#define GPIOA_19 45 +#define GPIOA_20 46 +#define GPIOX_0 47 +#define GPIOX_1 48 +#define GPIOX_2 49 +#define GPIOX_3 50 +#define GPIOX_4 51 +#define GPIOX_5 52 +#define GPIOX_6 53 +#define GPIOX_7 54 +#define GPIOX_8 55 +#define GPIOX_9 56 +#define GPIOX_10 57 +#define GPIOX_11 58 +#define GPIOX_12 59 +#define GPIOX_13 60 +#define GPIOX_14 61 +#define GPIOX_15 62 +#define GPIOX_16 63 +#define GPIOX_17 64 +#define GPIOX_18 65 +#define GPIOX_19 66 +#define GPIOX_20 67 +#define GPIOX_21 68 +#define GPIOX_22 69 +#define GPIOY_0 70 +#define GPIOY_1 71 +#define GPIOY_2 72 +#define GPIOY_3 73 +#define GPIOY_4 74 +#define GPIOY_5 75 +#define GPIOY_6 76 +#define GPIOY_7 77 +#define GPIOY_8 78 +#define GPIOY_9 79 +#define GPIOY_10 80 +#define GPIOY_11 81 +#define GPIOY_12 82 +#define GPIOY_13 83 +#define GPIOY_14 84 +#define GPIOY_15 85 + +#endif /* _DT_BINDINGS_MESON_AXG_GPIO_H */ diff --git a/include/dt-bindings/gpio/meson-g12a-gpio.h b/include/dt-bindings/gpio/meson-g12a-gpio.h new file mode 100644 index 0000000..f7bd693 --- /dev/null +++ b/include/dt-bindings/gpio/meson-g12a-gpio.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2018 Amlogic, Inc. All rights reserved. + * Author: Xingyu Chen + */ + +#ifndef _DT_BINDINGS_MESON_G12A_GPIO_H +#define _DT_BINDINGS_MESON_G12A_GPIO_H + +/* First GPIO chip */ +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOE_0 12 +#define GPIOE_1 13 +#define GPIOE_2 14 + +/* Second GPIO chip */ +#define GPIOZ_0 0 +#define GPIOZ_1 1 +#define GPIOZ_2 2 +#define GPIOZ_3 3 +#define GPIOZ_4 4 +#define GPIOZ_5 5 +#define GPIOZ_6 6 +#define GPIOZ_7 7 +#define GPIOZ_8 8 +#define GPIOZ_9 9 +#define GPIOZ_10 10 +#define GPIOZ_11 11 +#define GPIOZ_12 12 +#define GPIOZ_13 13 +#define GPIOZ_14 14 +#define GPIOZ_15 15 +#define GPIOH_0 16 +#define GPIOH_1 17 +#define GPIOH_2 18 +#define GPIOH_3 19 +#define GPIOH_4 20 +#define GPIOH_5 21 +#define GPIOH_6 22 +#define GPIOH_7 23 +#define GPIOH_8 24 +#define BOOT_0 25 +#define BOOT_1 26 +#define BOOT_2 27 +#define BOOT_3 28 +#define BOOT_4 29 +#define BOOT_5 30 +#define BOOT_6 31 +#define BOOT_7 32 +#define BOOT_8 33 +#define BOOT_9 34 +#define BOOT_10 35 +#define BOOT_11 36 +#define BOOT_12 37 +#define BOOT_13 38 +#define BOOT_14 39 +#define BOOT_15 40 +#define GPIOC_0 41 +#define GPIOC_1 42 +#define GPIOC_2 43 +#define GPIOC_3 44 +#define GPIOC_4 45 +#define GPIOC_5 46 +#define GPIOC_6 47 +#define GPIOC_7 48 +#define GPIOA_0 49 +#define GPIOA_1 50 +#define GPIOA_2 51 +#define GPIOA_3 52 +#define GPIOA_4 53 +#define GPIOA_5 54 +#define GPIOA_6 55 +#define GPIOA_7 56 +#define GPIOA_8 57 +#define GPIOA_9 58 +#define GPIOA_10 59 +#define GPIOA_11 60 +#define GPIOA_12 61 +#define GPIOA_13 62 +#define GPIOA_14 63 +#define GPIOA_15 64 +#define GPIOX_0 65 +#define GPIOX_1 66 +#define GPIOX_2 67 +#define GPIOX_3 68 +#define GPIOX_4 69 +#define GPIOX_5 70 +#define GPIOX_6 71 +#define GPIOX_7 72 +#define GPIOX_8 73 +#define GPIOX_9 74 +#define GPIOX_10 75 +#define GPIOX_11 76 +#define GPIOX_12 77 +#define GPIOX_13 78 +#define GPIOX_14 79 +#define GPIOX_15 80 +#define GPIOX_16 81 +#define GPIOX_17 82 +#define GPIOX_18 83 +#define GPIOX_19 84 + +#endif /* _DT_BINDINGS_MESON_G12A_GPIO_H */ diff --git a/include/dt-bindings/gpio/meson-gxbb-gpio.h b/include/dt-bindings/gpio/meson-gxbb-gpio.h new file mode 100644 index 0000000..489c75b --- /dev/null +++ b/include/dt-bindings/gpio/meson-gxbb-gpio.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * GPIO definitions for Amlogic Meson GXBB SoCs + * + * Copyright (C) 2016 Endless Mobile, Inc. + * Author: Carlo Caione + */ + +#ifndef _DT_BINDINGS_MESON_GXBB_GPIO_H +#define _DT_BINDINGS_MESON_GXBB_GPIO_H + +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOAO_12 12 +#define GPIOAO_13 13 +#define GPIO_TEST_N 14 + +#define GPIOZ_0 0 +#define GPIOZ_1 1 +#define GPIOZ_2 2 +#define GPIOZ_3 3 +#define GPIOZ_4 4 +#define GPIOZ_5 5 +#define GPIOZ_6 6 +#define GPIOZ_7 7 +#define GPIOZ_8 8 +#define GPIOZ_9 9 +#define GPIOZ_10 10 +#define GPIOZ_11 11 +#define GPIOZ_12 12 +#define GPIOZ_13 13 +#define GPIOZ_14 14 +#define GPIOZ_15 15 +#define GPIOH_0 16 +#define GPIOH_1 17 +#define GPIOH_2 18 +#define GPIOH_3 19 +#define BOOT_0 20 +#define BOOT_1 21 +#define BOOT_2 22 +#define BOOT_3 23 +#define BOOT_4 24 +#define BOOT_5 25 +#define BOOT_6 26 +#define BOOT_7 27 +#define BOOT_8 28 +#define BOOT_9 29 +#define BOOT_10 30 +#define BOOT_11 31 +#define BOOT_12 32 +#define BOOT_13 33 +#define BOOT_14 34 +#define BOOT_15 35 +#define BOOT_16 36 +#define BOOT_17 37 +#define CARD_0 38 +#define CARD_1 39 +#define CARD_2 40 +#define CARD_3 41 +#define CARD_4 42 +#define CARD_5 43 +#define CARD_6 44 +#define GPIODV_0 45 +#define GPIODV_1 46 +#define GPIODV_2 47 +#define GPIODV_3 48 +#define GPIODV_4 49 +#define GPIODV_5 50 +#define GPIODV_6 51 +#define GPIODV_7 52 +#define GPIODV_8 53 +#define GPIODV_9 54 +#define GPIODV_10 55 +#define GPIODV_11 56 +#define GPIODV_12 57 +#define GPIODV_13 58 +#define GPIODV_14 59 +#define GPIODV_15 60 +#define GPIODV_16 61 +#define GPIODV_17 62 +#define GPIODV_18 63 +#define GPIODV_19 64 +#define GPIODV_20 65 +#define GPIODV_21 66 +#define GPIODV_22 67 +#define GPIODV_23 68 +#define GPIODV_24 69 +#define GPIODV_25 70 +#define GPIODV_26 71 +#define GPIODV_27 72 +#define GPIODV_28 73 +#define GPIODV_29 74 +#define GPIOY_0 75 +#define GPIOY_1 76 +#define GPIOY_2 77 +#define GPIOY_3 78 +#define GPIOY_4 79 +#define GPIOY_5 80 +#define GPIOY_6 81 +#define GPIOY_7 82 +#define GPIOY_8 83 +#define GPIOY_9 84 +#define GPIOY_10 85 +#define GPIOY_11 86 +#define GPIOY_12 87 +#define GPIOY_13 88 +#define GPIOY_14 89 +#define GPIOY_15 90 +#define GPIOY_16 91 +#define GPIOX_0 92 +#define GPIOX_1 93 +#define GPIOX_2 94 +#define GPIOX_3 95 +#define GPIOX_4 96 +#define GPIOX_5 97 +#define GPIOX_6 98 +#define GPIOX_7 99 +#define GPIOX_8 100 +#define GPIOX_9 101 +#define GPIOX_10 102 +#define GPIOX_11 103 +#define GPIOX_12 104 +#define GPIOX_13 105 +#define GPIOX_14 106 +#define GPIOX_15 107 +#define GPIOX_16 108 +#define GPIOX_17 109 +#define GPIOX_18 110 +#define GPIOX_19 111 +#define GPIOX_20 112 +#define GPIOX_21 113 +#define GPIOX_22 114 +#define GPIOCLK_0 115 +#define GPIOCLK_1 116 +#define GPIOCLK_2 117 +#define GPIOCLK_3 118 + +#endif diff --git a/include/dt-bindings/gpio/meson-gxl-gpio.h b/include/dt-bindings/gpio/meson-gxl-gpio.h new file mode 100644 index 0000000..0a001ae --- /dev/null +++ b/include/dt-bindings/gpio/meson-gxl-gpio.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * GPIO definitions for Amlogic Meson GXL SoCs + * + * Copyright (C) 2016 Endless Mobile, Inc. + * Author: Carlo Caione + */ + +#ifndef _DT_BINDINGS_MESON_GXL_GPIO_H +#define _DT_BINDINGS_MESON_GXL_GPIO_H + +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIO_TEST_N 10 + +#define GPIOZ_0 0 +#define GPIOZ_1 1 +#define GPIOZ_2 2 +#define GPIOZ_3 3 +#define GPIOZ_4 4 +#define GPIOZ_5 5 +#define GPIOZ_6 6 +#define GPIOZ_7 7 +#define GPIOZ_8 8 +#define GPIOZ_9 9 +#define GPIOZ_10 10 +#define GPIOZ_11 11 +#define GPIOZ_12 12 +#define GPIOZ_13 13 +#define GPIOZ_14 14 +#define GPIOZ_15 15 +#define GPIOH_0 16 +#define GPIOH_1 17 +#define GPIOH_2 18 +#define GPIOH_3 19 +#define GPIOH_4 20 +#define GPIOH_5 21 +#define GPIOH_6 22 +#define GPIOH_7 23 +#define GPIOH_8 24 +#define GPIOH_9 25 +#define BOOT_0 26 +#define BOOT_1 27 +#define BOOT_2 28 +#define BOOT_3 29 +#define BOOT_4 30 +#define BOOT_5 31 +#define BOOT_6 32 +#define BOOT_7 33 +#define BOOT_8 34 +#define BOOT_9 35 +#define BOOT_10 36 +#define BOOT_11 37 +#define BOOT_12 38 +#define BOOT_13 39 +#define BOOT_14 40 +#define BOOT_15 41 +#define CARD_0 42 +#define CARD_1 43 +#define CARD_2 44 +#define CARD_3 45 +#define CARD_4 46 +#define CARD_5 47 +#define CARD_6 48 +#define GPIODV_0 49 +#define GPIODV_1 50 +#define GPIODV_2 51 +#define GPIODV_3 52 +#define GPIODV_4 53 +#define GPIODV_5 54 +#define GPIODV_6 55 +#define GPIODV_7 56 +#define GPIODV_8 57 +#define GPIODV_9 58 +#define GPIODV_10 59 +#define GPIODV_11 60 +#define GPIODV_12 61 +#define GPIODV_13 62 +#define GPIODV_14 63 +#define GPIODV_15 64 +#define GPIODV_16 65 +#define GPIODV_17 66 +#define GPIODV_18 67 +#define GPIODV_19 68 +#define GPIODV_20 69 +#define GPIODV_21 70 +#define GPIODV_22 71 +#define GPIODV_23 72 +#define GPIODV_24 73 +#define GPIODV_25 74 +#define GPIODV_26 75 +#define GPIODV_27 76 +#define GPIODV_28 77 +#define GPIODV_29 78 +#define GPIOX_0 79 +#define GPIOX_1 80 +#define GPIOX_2 81 +#define GPIOX_3 82 +#define GPIOX_4 83 +#define GPIOX_5 84 +#define GPIOX_6 85 +#define GPIOX_7 86 +#define GPIOX_8 87 +#define GPIOX_9 88 +#define GPIOX_10 89 +#define GPIOX_11 90 +#define GPIOX_12 91 +#define GPIOX_13 92 +#define GPIOX_14 93 +#define GPIOX_15 94 +#define GPIOX_16 95 +#define GPIOX_17 96 +#define GPIOX_18 97 +#define GPIOCLK_0 98 +#define GPIOCLK_1 99 + +#endif diff --git a/include/dt-bindings/gpio/meson8-gpio.h b/include/dt-bindings/gpio/meson8-gpio.h new file mode 100644 index 0000000..e2d0831 --- /dev/null +++ b/include/dt-bindings/gpio/meson8-gpio.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * GPIO definitions for Amlogic Meson8 SoCs + * + * Copyright (C) 2014 Beniamino Galvani + */ + +#ifndef _DT_BINDINGS_MESON8_GPIO_H +#define _DT_BINDINGS_MESON8_GPIO_H + +/* First GPIO chip */ +#define GPIOX_0 0 +#define GPIOX_1 1 +#define GPIOX_2 2 +#define GPIOX_3 3 +#define GPIOX_4 4 +#define GPIOX_5 5 +#define GPIOX_6 6 +#define GPIOX_7 7 +#define GPIOX_8 8 +#define GPIOX_9 9 +#define GPIOX_10 10 +#define GPIOX_11 11 +#define GPIOX_12 12 +#define GPIOX_13 13 +#define GPIOX_14 14 +#define GPIOX_15 15 +#define GPIOX_16 16 +#define GPIOX_17 17 +#define GPIOX_18 18 +#define GPIOX_19 19 +#define GPIOX_20 20 +#define GPIOX_21 21 +#define GPIOY_0 22 +#define GPIOY_1 23 +#define GPIOY_2 24 +#define GPIOY_3 25 +#define GPIOY_4 26 +#define GPIOY_5 27 +#define GPIOY_6 28 +#define GPIOY_7 29 +#define GPIOY_8 30 +#define GPIOY_9 31 +#define GPIOY_10 32 +#define GPIOY_11 33 +#define GPIOY_12 34 +#define GPIOY_13 35 +#define GPIOY_14 36 +#define GPIOY_15 37 +#define GPIOY_16 38 +#define GPIODV_0 39 +#define GPIODV_1 40 +#define GPIODV_2 41 +#define GPIODV_3 42 +#define GPIODV_4 43 +#define GPIODV_5 44 +#define GPIODV_6 45 +#define GPIODV_7 46 +#define GPIODV_8 47 +#define GPIODV_9 48 +#define GPIODV_10 49 +#define GPIODV_11 50 +#define GPIODV_12 51 +#define GPIODV_13 52 +#define GPIODV_14 53 +#define GPIODV_15 54 +#define GPIODV_16 55 +#define GPIODV_17 56 +#define GPIODV_18 57 +#define GPIODV_19 58 +#define GPIODV_20 59 +#define GPIODV_21 60 +#define GPIODV_22 61 +#define GPIODV_23 62 +#define GPIODV_24 63 +#define GPIODV_25 64 +#define GPIODV_26 65 +#define GPIODV_27 66 +#define GPIODV_28 67 +#define GPIODV_29 68 +#define GPIOH_0 69 +#define GPIOH_1 70 +#define GPIOH_2 71 +#define GPIOH_3 72 +#define GPIOH_4 73 +#define GPIOH_5 74 +#define GPIOH_6 75 +#define GPIOH_7 76 +#define GPIOH_8 77 +#define GPIOH_9 78 +#define GPIOZ_0 79 +#define GPIOZ_1 80 +#define GPIOZ_2 81 +#define GPIOZ_3 82 +#define GPIOZ_4 83 +#define GPIOZ_5 84 +#define GPIOZ_6 85 +#define GPIOZ_7 86 +#define GPIOZ_8 87 +#define GPIOZ_9 88 +#define GPIOZ_10 89 +#define GPIOZ_11 90 +#define GPIOZ_12 91 +#define GPIOZ_13 92 +#define GPIOZ_14 93 +#define CARD_0 94 +#define CARD_1 95 +#define CARD_2 96 +#define CARD_3 97 +#define CARD_4 98 +#define CARD_5 99 +#define CARD_6 100 +#define BOOT_0 101 +#define BOOT_1 102 +#define BOOT_2 103 +#define BOOT_3 104 +#define BOOT_4 105 +#define BOOT_5 106 +#define BOOT_6 107 +#define BOOT_7 108 +#define BOOT_8 109 +#define BOOT_9 110 +#define BOOT_10 111 +#define BOOT_11 112 +#define BOOT_12 113 +#define BOOT_13 114 +#define BOOT_14 115 +#define BOOT_15 116 +#define BOOT_16 117 +#define BOOT_17 118 +#define BOOT_18 119 + +/* Second GPIO chip */ +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOAO_12 12 +#define GPIOAO_13 13 +#define GPIO_BSD_EN 14 +#define GPIO_TEST_N 15 + +#endif /* _DT_BINDINGS_MESON8_GPIO_H */ diff --git a/include/dt-bindings/gpio/meson8b-gpio.h b/include/dt-bindings/gpio/meson8b-gpio.h new file mode 100644 index 0000000..7c3bc07 --- /dev/null +++ b/include/dt-bindings/gpio/meson8b-gpio.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * GPIO definitions for Amlogic Meson8b SoCs + * + * Copyright (C) 2015 Endless Mobile, Inc. + * Author: Carlo Caione + */ + +#ifndef _DT_BINDINGS_MESON8B_GPIO_H +#define _DT_BINDINGS_MESON8B_GPIO_H + +/* EE (CBUS) GPIO chip */ +#define GPIOX_0 0 +#define GPIOX_1 1 +#define GPIOX_2 2 +#define GPIOX_3 3 +#define GPIOX_4 4 +#define GPIOX_5 5 +#define GPIOX_6 6 +#define GPIOX_7 7 +#define GPIOX_8 8 +#define GPIOX_9 9 +#define GPIOX_10 10 +#define GPIOX_11 11 +#define GPIOX_16 12 +#define GPIOX_17 13 +#define GPIOX_18 14 +#define GPIOX_19 15 +#define GPIOX_20 16 +#define GPIOX_21 17 + +#define GPIOY_0 18 +#define GPIOY_1 19 +#define GPIOY_3 20 +#define GPIOY_6 21 +#define GPIOY_7 22 +#define GPIOY_8 23 +#define GPIOY_9 24 +#define GPIOY_10 25 +#define GPIOY_11 26 +#define GPIOY_12 27 +#define GPIOY_13 28 +#define GPIOY_14 29 + +#define GPIODV_9 30 +#define GPIODV_24 31 +#define GPIODV_25 32 +#define GPIODV_26 33 +#define GPIODV_27 34 +#define GPIODV_28 35 +#define GPIODV_29 36 + +#define GPIOH_0 37 +#define GPIOH_1 38 +#define GPIOH_2 39 +#define GPIOH_3 40 +#define GPIOH_4 41 +#define GPIOH_5 42 +#define GPIOH_6 43 +#define GPIOH_7 44 +#define GPIOH_8 45 +#define GPIOH_9 46 + +#define CARD_0 47 +#define CARD_1 48 +#define CARD_2 49 +#define CARD_3 50 +#define CARD_4 51 +#define CARD_5 52 +#define CARD_6 53 + +#define BOOT_0 54 +#define BOOT_1 55 +#define BOOT_2 56 +#define BOOT_3 57 +#define BOOT_4 58 +#define BOOT_5 59 +#define BOOT_6 60 +#define BOOT_7 61 +#define BOOT_8 62 +#define BOOT_9 63 +#define BOOT_10 64 +#define BOOT_11 65 +#define BOOT_12 66 +#define BOOT_13 67 +#define BOOT_14 68 +#define BOOT_15 69 +#define BOOT_16 70 +#define BOOT_17 71 +#define BOOT_18 72 + +#define DIF_0_P 73 +#define DIF_0_N 74 +#define DIF_1_P 75 +#define DIF_1_N 76 +#define DIF_2_P 77 +#define DIF_2_N 78 +#define DIF_3_P 79 +#define DIF_3_N 80 +#define DIF_4_P 81 +#define DIF_4_N 82 + +/* AO GPIO chip */ +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOAO_12 12 +#define GPIOAO_13 13 +#define GPIO_BSD_EN 14 +#define GPIO_TEST_N 15 + +#endif /* _DT_BINDINGS_MESON8B_GPIO_H */ diff --git a/include/dt-bindings/gpio/tegra-gpio.h b/include/dt-bindings/gpio/tegra-gpio.h new file mode 100644 index 0000000..7625dbc --- /dev/null +++ b/include/dt-bindings/gpio/tegra-gpio.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra*-gpio. + * + * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below + * provide names for this. + * + * The second cell contains standard flag values specified in gpio.h. + */ + +#ifndef _DT_BINDINGS_GPIO_TEGRA_GPIO_H +#define _DT_BINDINGS_GPIO_TEGRA_GPIO_H + +#include + +#define TEGRA_GPIO_PORT_A 0 +#define TEGRA_GPIO_PORT_B 1 +#define TEGRA_GPIO_PORT_C 2 +#define TEGRA_GPIO_PORT_D 3 +#define TEGRA_GPIO_PORT_E 4 +#define TEGRA_GPIO_PORT_F 5 +#define TEGRA_GPIO_PORT_G 6 +#define TEGRA_GPIO_PORT_H 7 +#define TEGRA_GPIO_PORT_I 8 +#define TEGRA_GPIO_PORT_J 9 +#define TEGRA_GPIO_PORT_K 10 +#define TEGRA_GPIO_PORT_L 11 +#define TEGRA_GPIO_PORT_M 12 +#define TEGRA_GPIO_PORT_N 13 +#define TEGRA_GPIO_PORT_O 14 +#define TEGRA_GPIO_PORT_P 15 +#define TEGRA_GPIO_PORT_Q 16 +#define TEGRA_GPIO_PORT_R 17 +#define TEGRA_GPIO_PORT_S 18 +#define TEGRA_GPIO_PORT_T 19 +#define TEGRA_GPIO_PORT_U 20 +#define TEGRA_GPIO_PORT_V 21 +#define TEGRA_GPIO_PORT_W 22 +#define TEGRA_GPIO_PORT_X 23 +#define TEGRA_GPIO_PORT_Y 24 +#define TEGRA_GPIO_PORT_Z 25 +#define TEGRA_GPIO_PORT_AA 26 +#define TEGRA_GPIO_PORT_BB 27 +#define TEGRA_GPIO_PORT_CC 28 +#define TEGRA_GPIO_PORT_DD 29 +#define TEGRA_GPIO_PORT_EE 30 +#define TEGRA_GPIO_PORT_FF 31 + +#define TEGRA_GPIO(port, offset) \ + ((TEGRA_GPIO_PORT_##port * 8) + offset) + +#endif diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h new file mode 100644 index 0000000..0782b05 --- /dev/null +++ b/include/dt-bindings/gpio/tegra186-gpio.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra186-gpio*. + * + * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below + * provide names for this. + * + * The second cell contains standard flag values specified in gpio.h. + */ + +#ifndef _DT_BINDINGS_GPIO_TEGRA_GPIO_H +#define _DT_BINDINGS_GPIO_TEGRA_GPIO_H + +#include + +/* GPIOs implemented by main GPIO controller */ +#define TEGRA186_MAIN_GPIO_PORT_A 0 +#define TEGRA186_MAIN_GPIO_PORT_B 1 +#define TEGRA186_MAIN_GPIO_PORT_C 2 +#define TEGRA186_MAIN_GPIO_PORT_D 3 +#define TEGRA186_MAIN_GPIO_PORT_E 4 +#define TEGRA186_MAIN_GPIO_PORT_F 5 +#define TEGRA186_MAIN_GPIO_PORT_G 6 +#define TEGRA186_MAIN_GPIO_PORT_H 7 +#define TEGRA186_MAIN_GPIO_PORT_I 8 +#define TEGRA186_MAIN_GPIO_PORT_J 9 +#define TEGRA186_MAIN_GPIO_PORT_K 10 +#define TEGRA186_MAIN_GPIO_PORT_L 11 +#define TEGRA186_MAIN_GPIO_PORT_M 12 +#define TEGRA186_MAIN_GPIO_PORT_N 13 +#define TEGRA186_MAIN_GPIO_PORT_O 14 +#define TEGRA186_MAIN_GPIO_PORT_P 15 +#define TEGRA186_MAIN_GPIO_PORT_Q 16 +#define TEGRA186_MAIN_GPIO_PORT_R 17 +#define TEGRA186_MAIN_GPIO_PORT_T 18 +#define TEGRA186_MAIN_GPIO_PORT_X 19 +#define TEGRA186_MAIN_GPIO_PORT_Y 20 +#define TEGRA186_MAIN_GPIO_PORT_BB 21 +#define TEGRA186_MAIN_GPIO_PORT_CC 22 + +#define TEGRA186_MAIN_GPIO(port, offset) \ + ((TEGRA186_MAIN_GPIO_PORT_##port * 8) + offset) + +/* GPIOs implemented by AON GPIO controller */ +#define TEGRA186_AON_GPIO_PORT_S 0 +#define TEGRA186_AON_GPIO_PORT_U 1 +#define TEGRA186_AON_GPIO_PORT_V 2 +#define TEGRA186_AON_GPIO_PORT_W 3 +#define TEGRA186_AON_GPIO_PORT_Z 4 +#define TEGRA186_AON_GPIO_PORT_AA 5 +#define TEGRA186_AON_GPIO_PORT_EE 6 +#define TEGRA186_AON_GPIO_PORT_FF 7 + +#define TEGRA186_AON_GPIO(port, offset) \ + ((TEGRA186_AON_GPIO_PORT_##port * 8) + offset) + +#endif diff --git a/include/dt-bindings/gpio/tegra194-gpio.h b/include/dt-bindings/gpio/tegra194-gpio.h new file mode 100644 index 0000000..ede8602 --- /dev/null +++ b/include/dt-bindings/gpio/tegra194-gpio.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ + +/* + * This header provides constants for binding nvidia,tegra194-gpio*. + * + * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below + * provide names for this. + * + * The second cell contains standard flag values specified in gpio.h. + */ + +#ifndef _DT_BINDINGS_GPIO_TEGRA194_GPIO_H +#define _DT_BINDINGS_GPIO_TEGRA194_GPIO_H + +#include + +/* GPIOs implemented by main GPIO controller */ +#define TEGRA194_MAIN_GPIO_PORT_A 0 +#define TEGRA194_MAIN_GPIO_PORT_B 1 +#define TEGRA194_MAIN_GPIO_PORT_C 2 +#define TEGRA194_MAIN_GPIO_PORT_D 3 +#define TEGRA194_MAIN_GPIO_PORT_E 4 +#define TEGRA194_MAIN_GPIO_PORT_F 5 +#define TEGRA194_MAIN_GPIO_PORT_G 6 +#define TEGRA194_MAIN_GPIO_PORT_H 7 +#define TEGRA194_MAIN_GPIO_PORT_I 8 +#define TEGRA194_MAIN_GPIO_PORT_J 9 +#define TEGRA194_MAIN_GPIO_PORT_K 10 +#define TEGRA194_MAIN_GPIO_PORT_L 11 +#define TEGRA194_MAIN_GPIO_PORT_M 12 +#define TEGRA194_MAIN_GPIO_PORT_N 13 +#define TEGRA194_MAIN_GPIO_PORT_O 14 +#define TEGRA194_MAIN_GPIO_PORT_P 15 +#define TEGRA194_MAIN_GPIO_PORT_Q 16 +#define TEGRA194_MAIN_GPIO_PORT_R 17 +#define TEGRA194_MAIN_GPIO_PORT_S 18 +#define TEGRA194_MAIN_GPIO_PORT_T 19 +#define TEGRA194_MAIN_GPIO_PORT_U 20 +#define TEGRA194_MAIN_GPIO_PORT_V 21 +#define TEGRA194_MAIN_GPIO_PORT_W 22 +#define TEGRA194_MAIN_GPIO_PORT_X 23 +#define TEGRA194_MAIN_GPIO_PORT_Y 24 +#define TEGRA194_MAIN_GPIO_PORT_Z 25 +#define TEGRA194_MAIN_GPIO_PORT_FF 26 +#define TEGRA194_MAIN_GPIO_PORT_GG 27 + +#define TEGRA194_MAIN_GPIO(port, offset) \ + ((TEGRA194_MAIN_GPIO_PORT_##port * 8) + offset) + +/* GPIOs implemented by AON GPIO controller */ +#define TEGRA194_AON_GPIO_PORT_AA 0 +#define TEGRA194_AON_GPIO_PORT_BB 1 +#define TEGRA194_AON_GPIO_PORT_CC 2 +#define TEGRA194_AON_GPIO_PORT_DD 3 +#define TEGRA194_AON_GPIO_PORT_EE 4 + +#define TEGRA194_AON_GPIO(port, offset) \ + ((TEGRA194_AON_GPIO_PORT_##port * 8) + offset) + +#endif diff --git a/include/dt-bindings/gpio/uniphier-gpio.h b/include/dt-bindings/gpio/uniphier-gpio.h new file mode 100644 index 0000000..9f0ad17 --- /dev/null +++ b/include/dt-bindings/gpio/uniphier-gpio.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2017 Socionext Inc. + * Author: Masahiro Yamada + */ + +#ifndef _DT_BINDINGS_GPIO_UNIPHIER_H +#define _DT_BINDINGS_GPIO_UNIPHIER_H + +#define UNIPHIER_GPIO_LINES_PER_BANK 8 + +#define UNIPHIER_GPIO_IRQ_OFFSET ((UNIPHIER_GPIO_LINES_PER_BANK) * 15) + +#define UNIPHIER_GPIO_PORT(bank, line) \ + ((UNIPHIER_GPIO_LINES_PER_BANK) * (bank) + (line)) + +#define UNIPHIER_GPIO_IRQ(n) ((UNIPHIER_GPIO_IRQ_OFFSET) + (n)) + +#endif /* _DT_BINDINGS_GPIO_UNIPHIER_H */ diff --git a/include/dt-bindings/i2c/i2c.h b/include/dt-bindings/i2c/i2c.h new file mode 100644 index 0000000..0c12c38 --- /dev/null +++ b/include/dt-bindings/i2c/i2c.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This header provides constants for I2C bindings + * + * Copyright (C) 2015 by Sang Engineering + * Copyright (C) 2015 by Renesas Electronics Corporation + * + * Wolfram Sang + */ + +#ifndef _DT_BINDINGS_I2C_I2C_H +#define _DT_BINDINGS_I2C_I2C_H + +#define I2C_TEN_BIT_ADDRESS (1 << 31) +#define I2C_OWN_SLAVE_ADDRESS (1 << 30) + +#endif diff --git a/include/dt-bindings/iio/adc/at91-sama5d2_adc.h b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h new file mode 100644 index 0000000..70f99db --- /dev/null +++ b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for configuring the AT91 SAMA5D2 ADC + */ + +#ifndef _DT_BINDINGS_IIO_ADC_AT91_SAMA5D2_ADC_H +#define _DT_BINDINGS_IIO_ADC_AT91_SAMA5D2_ADC_H + +/* X relative position channel index */ +#define AT91_SAMA5D2_ADC_X_CHANNEL 24 +/* Y relative position channel index */ +#define AT91_SAMA5D2_ADC_Y_CHANNEL 25 +/* pressure channel index */ +#define AT91_SAMA5D2_ADC_P_CHANNEL 26 + +#endif diff --git a/include/dt-bindings/iio/adc/fsl-imx25-gcq.h b/include/dt-bindings/iio/adc/fsl-imx25-gcq.h new file mode 100644 index 0000000..08ef4d2 --- /dev/null +++ b/include/dt-bindings/iio/adc/fsl-imx25-gcq.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for configuring the I.MX25 ADC + */ + +#ifndef _DT_BINDINGS_IIO_ADC_FS_IMX25_GCQ_H +#define _DT_BINDINGS_IIO_ADC_FS_IMX25_GCQ_H + +#define MX25_ADC_REFP_YP 0 /* YP voltage reference */ +#define MX25_ADC_REFP_XP 1 /* XP voltage reference */ +#define MX25_ADC_REFP_EXT 2 /* External voltage reference */ +#define MX25_ADC_REFP_INT 3 /* Internal voltage reference */ + +#define MX25_ADC_REFN_XN 0 /* XN ground reference */ +#define MX25_ADC_REFN_YN 1 /* YN ground reference */ +#define MX25_ADC_REFN_NGND 2 /* Internal ground reference */ +#define MX25_ADC_REFN_NGND2 3 /* External ground reference */ + +#endif diff --git a/include/dt-bindings/iio/adc/ingenic,adc.h b/include/dt-bindings/iio/adc/ingenic,adc.h new file mode 100644 index 0000000..82706b2 --- /dev/null +++ b/include/dt-bindings/iio/adc/ingenic,adc.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_IIO_ADC_INGENIC_ADC_H +#define _DT_BINDINGS_IIO_ADC_INGENIC_ADC_H + +/* ADC channel idx. */ +#define INGENIC_ADC_AUX 0 +#define INGENIC_ADC_BATTERY 1 + +#endif diff --git a/include/dt-bindings/iio/adi,ad5592r.h b/include/dt-bindings/iio/adi,ad5592r.h new file mode 100644 index 0000000..9f8c7b8 --- /dev/null +++ b/include/dt-bindings/iio/adi,ad5592r.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_ADI_AD5592R_H +#define _DT_BINDINGS_ADI_AD5592R_H + +#define CH_MODE_UNUSED 0 +#define CH_MODE_ADC 1 +#define CH_MODE_DAC 2 +#define CH_MODE_DAC_AND_ADC 3 +#define CH_MODE_GPIO 8 + +#define CH_OFFSTATE_PULLDOWN 0 +#define CH_OFFSTATE_OUT_LOW 1 +#define CH_OFFSTATE_OUT_HIGH 2 +#define CH_OFFSTATE_OUT_TRISTATE 3 + +#endif /* _DT_BINDINGS_ADI_AD5592R_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h new file mode 100644 index 0000000..61d556d --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-vadc.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2014,2018 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_H + +/* Voltage ADC channels */ +#define VADC_USBIN 0x00 +#define VADC_DCIN 0x01 +#define VADC_VCHG_SNS 0x02 +#define VADC_SPARE1_03 0x03 +#define VADC_USB_ID_MV 0x04 +#define VADC_VCOIN 0x05 +#define VADC_VBAT_SNS 0x06 +#define VADC_VSYS 0x07 +#define VADC_DIE_TEMP 0x08 +#define VADC_REF_625MV 0x09 +#define VADC_REF_1250MV 0x0a +#define VADC_CHG_TEMP 0x0b +#define VADC_SPARE1 0x0c +#define VADC_SPARE2 0x0d +#define VADC_GND_REF 0x0e +#define VADC_VDD_VADC 0x0f + +#define VADC_P_MUX1_1_1 0x10 +#define VADC_P_MUX2_1_1 0x11 +#define VADC_P_MUX3_1_1 0x12 +#define VADC_P_MUX4_1_1 0x13 +#define VADC_P_MUX5_1_1 0x14 +#define VADC_P_MUX6_1_1 0x15 +#define VADC_P_MUX7_1_1 0x16 +#define VADC_P_MUX8_1_1 0x17 +#define VADC_P_MUX9_1_1 0x18 +#define VADC_P_MUX10_1_1 0x19 +#define VADC_P_MUX11_1_1 0x1a +#define VADC_P_MUX12_1_1 0x1b +#define VADC_P_MUX13_1_1 0x1c +#define VADC_P_MUX14_1_1 0x1d +#define VADC_P_MUX15_1_1 0x1e +#define VADC_P_MUX16_1_1 0x1f + +#define VADC_P_MUX1_1_3 0x20 +#define VADC_P_MUX2_1_3 0x21 +#define VADC_P_MUX3_1_3 0x22 +#define VADC_P_MUX4_1_3 0x23 +#define VADC_P_MUX5_1_3 0x24 +#define VADC_P_MUX6_1_3 0x25 +#define VADC_P_MUX7_1_3 0x26 +#define VADC_P_MUX8_1_3 0x27 +#define VADC_P_MUX9_1_3 0x28 +#define VADC_P_MUX10_1_3 0x29 +#define VADC_P_MUX11_1_3 0x2a +#define VADC_P_MUX12_1_3 0x2b +#define VADC_P_MUX13_1_3 0x2c +#define VADC_P_MUX14_1_3 0x2d +#define VADC_P_MUX15_1_3 0x2e +#define VADC_P_MUX16_1_3 0x2f + +#define VADC_LR_MUX1_BAT_THERM 0x30 +#define VADC_LR_MUX2_BAT_ID 0x31 +#define VADC_LR_MUX3_XO_THERM 0x32 +#define VADC_LR_MUX4_AMUX_THM1 0x33 +#define VADC_LR_MUX5_AMUX_THM2 0x34 +#define VADC_LR_MUX6_AMUX_THM3 0x35 +#define VADC_LR_MUX7_HW_ID 0x36 +#define VADC_LR_MUX8_AMUX_THM4 0x37 +#define VADC_LR_MUX9_AMUX_THM5 0x38 +#define VADC_LR_MUX10_USB_ID 0x39 +#define VADC_AMUX_PU1 0x3a +#define VADC_AMUX_PU2 0x3b +#define VADC_LR_MUX3_BUF_XO_THERM 0x3c + +#define VADC_LR_MUX1_PU1_BAT_THERM 0x70 +#define VADC_LR_MUX2_PU1_BAT_ID 0x71 +#define VADC_LR_MUX3_PU1_XO_THERM 0x72 +#define VADC_LR_MUX4_PU1_AMUX_THM1 0x73 +#define VADC_LR_MUX5_PU1_AMUX_THM2 0x74 +#define VADC_LR_MUX6_PU1_AMUX_THM3 0x75 +#define VADC_LR_MUX7_PU1_AMUX_HW_ID 0x76 +#define VADC_LR_MUX8_PU1_AMUX_THM4 0x77 +#define VADC_LR_MUX9_PU1_AMUX_THM5 0x78 +#define VADC_LR_MUX10_PU1_AMUX_USB_ID 0x79 +#define VADC_LR_MUX3_BUF_PU1_XO_THERM 0x7c + +#define VADC_LR_MUX1_PU2_BAT_THERM 0xb0 +#define VADC_LR_MUX2_PU2_BAT_ID 0xb1 +#define VADC_LR_MUX3_PU2_XO_THERM 0xb2 +#define VADC_LR_MUX4_PU2_AMUX_THM1 0xb3 +#define VADC_LR_MUX5_PU2_AMUX_THM2 0xb4 +#define VADC_LR_MUX6_PU2_AMUX_THM3 0xb5 +#define VADC_LR_MUX7_PU2_AMUX_HW_ID 0xb6 +#define VADC_LR_MUX8_PU2_AMUX_THM4 0xb7 +#define VADC_LR_MUX9_PU2_AMUX_THM5 0xb8 +#define VADC_LR_MUX10_PU2_AMUX_USB_ID 0xb9 +#define VADC_LR_MUX3_BUF_PU2_XO_THERM 0xbc + +#define VADC_LR_MUX1_PU1_PU2_BAT_THERM 0xf0 +#define VADC_LR_MUX2_PU1_PU2_BAT_ID 0xf1 +#define VADC_LR_MUX3_PU1_PU2_XO_THERM 0xf2 +#define VADC_LR_MUX4_PU1_PU2_AMUX_THM1 0xf3 +#define VADC_LR_MUX5_PU1_PU2_AMUX_THM2 0xf4 +#define VADC_LR_MUX6_PU1_PU2_AMUX_THM3 0xf5 +#define VADC_LR_MUX7_PU1_PU2_AMUX_HW_ID 0xf6 +#define VADC_LR_MUX8_PU1_PU2_AMUX_THM4 0xf7 +#define VADC_LR_MUX9_PU1_PU2_AMUX_THM5 0xf8 +#define VADC_LR_MUX10_PU1_PU2_AMUX_USB_ID 0xf9 +#define VADC_LR_MUX3_BUF_PU1_PU2_XO_THERM 0xfc + +/* ADC channels for SPMI PMIC5 */ + +#define ADC5_REF_GND 0x00 +#define ADC5_1P25VREF 0x01 +#define ADC5_VREF_VADC 0x02 +#define ADC5_VREF_VADC5_DIV_3 0x82 +#define ADC5_VPH_PWR 0x83 +#define ADC5_VBAT_SNS 0x84 +#define ADC5_VCOIN 0x85 +#define ADC5_DIE_TEMP 0x06 +#define ADC5_USB_IN_I 0x07 +#define ADC5_USB_IN_V_16 0x08 +#define ADC5_CHG_TEMP 0x09 +#define ADC5_BAT_THERM 0x0a +#define ADC5_BAT_ID 0x0b +#define ADC5_XO_THERM 0x0c +#define ADC5_AMUX_THM1 0x0d +#define ADC5_AMUX_THM2 0x0e +#define ADC5_AMUX_THM3 0x0f +#define ADC5_AMUX_THM4 0x10 +#define ADC5_AMUX_THM5 0x11 +#define ADC5_GPIO1 0x12 +#define ADC5_GPIO2 0x13 +#define ADC5_GPIO3 0x14 +#define ADC5_GPIO4 0x15 +#define ADC5_GPIO5 0x16 +#define ADC5_GPIO6 0x17 +#define ADC5_GPIO7 0x18 +#define ADC5_SBUx 0x99 +#define ADC5_MID_CHG_DIV6 0x1e +#define ADC5_OFF 0xff + +/* 30k pull-up1 */ +#define ADC5_BAT_THERM_30K_PU 0x2a +#define ADC5_BAT_ID_30K_PU 0x2b +#define ADC5_XO_THERM_30K_PU 0x2c +#define ADC5_AMUX_THM1_30K_PU 0x2d +#define ADC5_AMUX_THM2_30K_PU 0x2e +#define ADC5_AMUX_THM3_30K_PU 0x2f +#define ADC5_AMUX_THM4_30K_PU 0x30 +#define ADC5_AMUX_THM5_30K_PU 0x31 +#define ADC5_GPIO1_30K_PU 0x32 +#define ADC5_GPIO2_30K_PU 0x33 +#define ADC5_GPIO3_30K_PU 0x34 +#define ADC5_GPIO4_30K_PU 0x35 +#define ADC5_GPIO5_30K_PU 0x36 +#define ADC5_GPIO6_30K_PU 0x37 +#define ADC5_GPIO7_30K_PU 0x38 +#define ADC5_SBUx_30K_PU 0x39 + +/* 100k pull-up2 */ +#define ADC5_BAT_THERM_100K_PU 0x4a +#define ADC5_BAT_ID_100K_PU 0x4b +#define ADC5_XO_THERM_100K_PU 0x4c +#define ADC5_AMUX_THM1_100K_PU 0x4d +#define ADC5_AMUX_THM2_100K_PU 0x4e +#define ADC5_AMUX_THM3_100K_PU 0x4f +#define ADC5_AMUX_THM4_100K_PU 0x50 +#define ADC5_AMUX_THM5_100K_PU 0x51 +#define ADC5_GPIO1_100K_PU 0x52 +#define ADC5_GPIO2_100K_PU 0x53 +#define ADC5_GPIO3_100K_PU 0x54 +#define ADC5_GPIO4_100K_PU 0x55 +#define ADC5_GPIO5_100K_PU 0x56 +#define ADC5_GPIO6_100K_PU 0x57 +#define ADC5_GPIO7_100K_PU 0x58 +#define ADC5_SBUx_100K_PU 0x59 + +/* 400k pull-up3 */ +#define ADC5_BAT_THERM_400K_PU 0x6a +#define ADC5_BAT_ID_400K_PU 0x6b +#define ADC5_XO_THERM_400K_PU 0x6c +#define ADC5_AMUX_THM1_400K_PU 0x6d +#define ADC5_AMUX_THM2_400K_PU 0x6e +#define ADC5_AMUX_THM3_400K_PU 0x6f +#define ADC5_AMUX_THM4_400K_PU 0x70 +#define ADC5_AMUX_THM5_400K_PU 0x71 +#define ADC5_GPIO1_400K_PU 0x72 +#define ADC5_GPIO2_400K_PU 0x73 +#define ADC5_GPIO3_400K_PU 0x74 +#define ADC5_GPIO4_400K_PU 0x75 +#define ADC5_GPIO5_400K_PU 0x76 +#define ADC5_GPIO6_400K_PU 0x77 +#define ADC5_GPIO7_400K_PU 0x78 +#define ADC5_SBUx_400K_PU 0x79 + +/* 1/3 Divider */ +#define ADC5_GPIO1_DIV3 0x92 +#define ADC5_GPIO2_DIV3 0x93 +#define ADC5_GPIO3_DIV3 0x94 +#define ADC5_GPIO4_DIV3 0x95 +#define ADC5_GPIO5_DIV3 0x96 +#define ADC5_GPIO6_DIV3 0x97 +#define ADC5_GPIO7_DIV3 0x98 +#define ADC5_SBUx_DIV3 0x99 + +/* Current and combined current/voltage channels */ +#define ADC5_INT_EXT_ISENSE 0xa1 +#define ADC5_PARALLEL_ISENSE 0xa5 +#define ADC5_CUR_REPLICA_VDS 0xa7 +#define ADC5_CUR_SENS_BATFET_VDS_OFFSET 0xa9 +#define ADC5_CUR_SENS_REPLICA_VDS_OFFSET 0xab +#define ADC5_EXT_SENS_OFFSET 0xad + +#define ADC5_INT_EXT_ISENSE_VBAT_VDATA 0xb0 +#define ADC5_INT_EXT_ISENSE_VBAT_IDATA 0xb1 +#define ADC5_EXT_ISENSE_VBAT_VDATA 0xb2 +#define ADC5_EXT_ISENSE_VBAT_IDATA 0xb3 +#define ADC5_PARALLEL_ISENSE_VBAT_VDATA 0xb4 +#define ADC5_PARALLEL_ISENSE_VBAT_IDATA 0xb5 + +#define ADC5_MAX_CHANNEL 0xc0 + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_H */ diff --git a/include/dt-bindings/iio/temperature/thermocouple.h b/include/dt-bindings/iio/temperature/thermocouple.h new file mode 100644 index 0000000..ce037f5 --- /dev/null +++ b/include/dt-bindings/iio/temperature/thermocouple.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_TEMPERATURE_THERMOCOUPLE_H +#define _DT_BINDINGS_TEMPERATURE_THERMOCOUPLE_H + + +#define THERMOCOUPLE_TYPE_B 0x00 +#define THERMOCOUPLE_TYPE_E 0x01 +#define THERMOCOUPLE_TYPE_J 0x02 +#define THERMOCOUPLE_TYPE_K 0x03 +#define THERMOCOUPLE_TYPE_N 0x04 +#define THERMOCOUPLE_TYPE_R 0x05 +#define THERMOCOUPLE_TYPE_S 0x06 +#define THERMOCOUPLE_TYPE_T 0x07 + +#endif /* _DT_BINDINGS_TEMPERATURE_THERMOCOUPLE_H */ diff --git a/include/dt-bindings/input/gpio-keys.h b/include/dt-bindings/input/gpio-keys.h new file mode 100644 index 0000000..8962df7 --- /dev/null +++ b/include/dt-bindings/input/gpio-keys.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for gpio keys bindings. + */ + +#ifndef _DT_BINDINGS_GPIO_KEYS_H +#define _DT_BINDINGS_GPIO_KEYS_H + +#define EV_ACT_ANY 0x00 /* asserted or deasserted */ +#define EV_ACT_ASSERTED 0x01 /* asserted */ +#define EV_ACT_DEASSERTED 0x02 /* deasserted */ + +#endif /* _DT_BINDINGS_GPIO_KEYS_H */ diff --git a/include/dt-bindings/input/input.h b/include/dt-bindings/input/input.h new file mode 100644 index 0000000..bcf0ae1 --- /dev/null +++ b/include/dt-bindings/input/input.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for most input bindings. + * + * Most input bindings include key code, matrix key code format. + * In most cases, key code and matrix key code format uses + * the standard values/macro defined in this header. + */ + +#ifndef _DT_BINDINGS_INPUT_INPUT_H +#define _DT_BINDINGS_INPUT_INPUT_H + +#include "linux-event-codes.h" + +#define MATRIX_KEY(row, col, code) \ + ((((row) & 0xFF) << 24) | (((col) & 0xFF) << 16) | ((code) & 0xFFFF)) + +#endif /* _DT_BINDINGS_INPUT_INPUT_H */ diff --git a/include/dt-bindings/input/linux-event-codes.h b/include/dt-bindings/input/linux-event-codes.h new file mode 120000 index 0000000..693bbcd --- /dev/null +++ b/include/dt-bindings/input/linux-event-codes.h @@ -0,0 +1 @@ +../../uapi/linux/input-event-codes.h \ No newline at end of file diff --git a/include/dt-bindings/input/ti-drv260x.h b/include/dt-bindings/input/ti-drv260x.h new file mode 100644 index 0000000..af71082 --- /dev/null +++ b/include/dt-bindings/input/ti-drv260x.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * DRV260X haptics driver family + * + * Author: Dan Murphy + * + * Copyright: (C) 2014 Texas Instruments, Inc. + */ + +#ifndef _DT_BINDINGS_TI_DRV260X_H +#define _DT_BINDINGS_TI_DRV260X_H + +/* Calibration Types */ +#define DRV260X_LRA_MODE 0x00 +#define DRV260X_LRA_NO_CAL_MODE 0x01 +#define DRV260X_ERM_MODE 0x02 + +/* Library Selection */ +#define DRV260X_LIB_EMPTY 0x00 +#define DRV260X_ERM_LIB_A 0x01 +#define DRV260X_ERM_LIB_B 0x02 +#define DRV260X_ERM_LIB_C 0x03 +#define DRV260X_ERM_LIB_D 0x04 +#define DRV260X_ERM_LIB_E 0x05 +#define DRV260X_LIB_LRA 0x06 +#define DRV260X_ERM_LIB_F 0x07 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,qcs404.h b/include/dt-bindings/interconnect/qcom,qcs404.h new file mode 100644 index 0000000..960f6e3 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,qcs404.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Qualcomm interconnect IDs + * + * Copyright (c) 2019, Linaro Ltd. + * Author: Georgi Djakov + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCS404_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_QCS404_H + +#define MASTER_AMPSS_M0 0 +#define MASTER_OXILI 1 +#define MASTER_MDP_PORT0 2 +#define MASTER_SNOC_BIMC_1 3 +#define MASTER_TCU_0 4 +#define SLAVE_EBI_CH0 5 +#define SLAVE_BIMC_SNOC 6 + +#define MASTER_SPDM 0 +#define MASTER_BLSP_1 1 +#define MASTER_BLSP_2 2 +#define MASTER_XI_USB_HS1 3 +#define MASTER_CRYPT0 4 +#define MASTER_SDCC_1 5 +#define MASTER_SDCC_2 6 +#define MASTER_SNOC_PCNOC 7 +#define MASTER_QPIC 8 +#define PCNOC_INT_0 9 +#define PCNOC_INT_2 10 +#define PCNOC_INT_3 11 +#define PCNOC_S_0 12 +#define PCNOC_S_1 13 +#define PCNOC_S_2 14 +#define PCNOC_S_3 15 +#define PCNOC_S_4 16 +#define PCNOC_S_6 17 +#define PCNOC_S_7 18 +#define PCNOC_S_8 19 +#define PCNOC_S_9 20 +#define PCNOC_S_10 21 +#define PCNOC_S_11 22 +#define SLAVE_SPDM 23 +#define SLAVE_PDM 24 +#define SLAVE_PRNG 25 +#define SLAVE_TCSR 26 +#define SLAVE_SNOC_CFG 27 +#define SLAVE_MESSAGE_RAM 28 +#define SLAVE_DISP_SS_CFG 29 +#define SLAVE_GPU_CFG 30 +#define SLAVE_BLSP_1 31 +#define SLAVE_BLSP_2 32 +#define SLAVE_TLMM_NORTH 33 +#define SLAVE_PCIE 34 +#define SLAVE_ETHERNET 35 +#define SLAVE_TLMM_EAST 36 +#define SLAVE_TCU 37 +#define SLAVE_PMIC_ARB 38 +#define SLAVE_SDCC_1 39 +#define SLAVE_SDCC_2 40 +#define SLAVE_TLMM_SOUTH 41 +#define SLAVE_USB_HS 42 +#define SLAVE_USB3 43 +#define SLAVE_CRYPTO_0_CFG 44 +#define SLAVE_PCNOC_SNOC 45 + +#define MASTER_QDSS_BAM 0 +#define MASTER_BIMC_SNOC 1 +#define MASTER_PCNOC_SNOC 2 +#define MASTER_QDSS_ETR 3 +#define MASTER_EMAC 4 +#define MASTER_PCIE 5 +#define MASTER_USB3 6 +#define QDSS_INT 7 +#define SNOC_INT_0 8 +#define SNOC_INT_1 9 +#define SNOC_INT_2 10 +#define SLAVE_KPSS_AHB 11 +#define SLAVE_WCSS 12 +#define SLAVE_SNOC_BIMC_1 13 +#define SLAVE_IMEM 14 +#define SLAVE_SNOC_PCNOC 15 +#define SLAVE_QDSS_STM 16 +#define SLAVE_CATS_0 17 +#define SLAVE_CATS_1 18 +#define SLAVE_LPASS 19 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,sdm845.h b/include/dt-bindings/interconnect/qcom,sdm845.h new file mode 100644 index 0000000..7b2393b --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sdm845.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Qualcomm SDM845 interconnect IDs + * + * Copyright (c) 2018, Linaro Ltd. + * Author: Georgi Djakov + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H + +#define MASTER_A1NOC_CFG 0 +#define MASTER_BLSP_1 1 +#define MASTER_TSIF 2 +#define MASTER_SDCC_2 3 +#define MASTER_SDCC_4 4 +#define MASTER_UFS_CARD 5 +#define MASTER_UFS_MEM 6 +#define MASTER_PCIE_0 7 +#define MASTER_A2NOC_CFG 8 +#define MASTER_QDSS_BAM 9 +#define MASTER_BLSP_2 10 +#define MASTER_CNOC_A2NOC 11 +#define MASTER_CRYPTO 12 +#define MASTER_IPA 13 +#define MASTER_PCIE_1 14 +#define MASTER_QDSS_ETR 15 +#define MASTER_USB3_0 16 +#define MASTER_USB3_1 17 +#define MASTER_CAMNOC_HF0_UNCOMP 18 +#define MASTER_CAMNOC_HF1_UNCOMP 19 +#define MASTER_CAMNOC_SF_UNCOMP 20 +#define MASTER_SPDM 21 +#define MASTER_TIC 22 +#define MASTER_SNOC_CNOC 23 +#define MASTER_QDSS_DAP 24 +#define MASTER_CNOC_DC_NOC 25 +#define MASTER_APPSS_PROC 26 +#define MASTER_GNOC_CFG 27 +#define MASTER_LLCC 28 +#define MASTER_TCU_0 29 +#define MASTER_MEM_NOC_CFG 30 +#define MASTER_GNOC_MEM_NOC 31 +#define MASTER_MNOC_HF_MEM_NOC 32 +#define MASTER_MNOC_SF_MEM_NOC 33 +#define MASTER_SNOC_GC_MEM_NOC 34 +#define MASTER_SNOC_SF_MEM_NOC 35 +#define MASTER_GFX3D 36 +#define MASTER_CNOC_MNOC_CFG 37 +#define MASTER_CAMNOC_HF0 38 +#define MASTER_CAMNOC_HF1 39 +#define MASTER_CAMNOC_SF 40 +#define MASTER_MDP0 41 +#define MASTER_MDP1 42 +#define MASTER_ROTATOR 43 +#define MASTER_VIDEO_P0 44 +#define MASTER_VIDEO_P1 45 +#define MASTER_VIDEO_PROC 46 +#define MASTER_SNOC_CFG 47 +#define MASTER_A1NOC_SNOC 48 +#define MASTER_A2NOC_SNOC 49 +#define MASTER_GNOC_SNOC 50 +#define MASTER_MEM_NOC_SNOC 51 +#define MASTER_ANOC_PCIE_SNOC 52 +#define MASTER_PIMEM 53 +#define MASTER_GIC 54 +#define SLAVE_A1NOC_SNOC 55 +#define SLAVE_SERVICE_A1NOC 56 +#define SLAVE_ANOC_PCIE_A1NOC_SNOC 57 +#define SLAVE_A2NOC_SNOC 58 +#define SLAVE_ANOC_PCIE_SNOC 59 +#define SLAVE_SERVICE_A2NOC 60 +#define SLAVE_CAMNOC_UNCOMP 61 +#define SLAVE_A1NOC_CFG 62 +#define SLAVE_A2NOC_CFG 63 +#define SLAVE_AOP 64 +#define SLAVE_AOSS 65 +#define SLAVE_CAMERA_CFG 66 +#define SLAVE_CLK_CTL 67 +#define SLAVE_CDSP_CFG 68 +#define SLAVE_RBCPR_CX_CFG 69 +#define SLAVE_CRYPTO_0_CFG 70 +#define SLAVE_DCC_CFG 71 +#define SLAVE_CNOC_DDRSS 72 +#define SLAVE_DISPLAY_CFG 73 +#define SLAVE_GLM 74 +#define SLAVE_GFX3D_CFG 75 +#define SLAVE_IMEM_CFG 76 +#define SLAVE_IPA_CFG 77 +#define SLAVE_CNOC_MNOC_CFG 78 +#define SLAVE_PCIE_0_CFG 79 +#define SLAVE_PCIE_1_CFG 80 +#define SLAVE_PDM 81 +#define SLAVE_SOUTH_PHY_CFG 82 +#define SLAVE_PIMEM_CFG 83 +#define SLAVE_PRNG 84 +#define SLAVE_QDSS_CFG 85 +#define SLAVE_BLSP_2 86 +#define SLAVE_BLSP_1 87 +#define SLAVE_SDCC_2 88 +#define SLAVE_SDCC_4 89 +#define SLAVE_SNOC_CFG 90 +#define SLAVE_SPDM_WRAPPER 91 +#define SLAVE_SPSS_CFG 92 +#define SLAVE_TCSR 93 +#define SLAVE_TLMM_NORTH 94 +#define SLAVE_TLMM_SOUTH 95 +#define SLAVE_TSIF 96 +#define SLAVE_UFS_CARD_CFG 97 +#define SLAVE_UFS_MEM_CFG 98 +#define SLAVE_USB3_0 99 +#define SLAVE_USB3_1 100 +#define SLAVE_VENUS_CFG 101 +#define SLAVE_VSENSE_CTRL_CFG 102 +#define SLAVE_CNOC_A2NOC 103 +#define SLAVE_SERVICE_CNOC 104 +#define SLAVE_LLCC_CFG 105 +#define SLAVE_MEM_NOC_CFG 106 +#define SLAVE_GNOC_SNOC 107 +#define SLAVE_GNOC_MEM_NOC 108 +#define SLAVE_SERVICE_GNOC 109 +#define SLAVE_EBI1 110 +#define SLAVE_MSS_PROC_MS_MPU_CFG 111 +#define SLAVE_MEM_NOC_GNOC 112 +#define SLAVE_LLCC 113 +#define SLAVE_MEM_NOC_SNOC 114 +#define SLAVE_SERVICE_MEM_NOC 115 +#define SLAVE_MNOC_SF_MEM_NOC 116 +#define SLAVE_MNOC_HF_MEM_NOC 117 +#define SLAVE_SERVICE_MNOC 118 +#define SLAVE_APPSS 119 +#define SLAVE_SNOC_CNOC 120 +#define SLAVE_SNOC_MEM_NOC_GC 121 +#define SLAVE_SNOC_MEM_NOC_SF 122 +#define SLAVE_IMEM 123 +#define SLAVE_PCIE_0 124 +#define SLAVE_PCIE_1 125 +#define SLAVE_PIMEM 126 +#define SLAVE_SERVICE_SNOC 127 +#define SLAVE_QDSS_STM 128 +#define SLAVE_TCU 129 + +#endif diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h new file mode 100644 index 0000000..35b6f69 --- /dev/null +++ b/include/dt-bindings/interrupt-controller/arm-gic.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * This header provides constants for the ARM GIC. + */ + +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_ARM_GIC_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_ARM_GIC_H + +#include + +/* interrupt specifier cell 0 */ + +#define GIC_SPI 0 +#define GIC_PPI 1 + +/* + * Interrupt specifier cell 2. + * The flags in irq.h are valid, plus those below. + */ +#define GIC_CPU_MASK_RAW(x) ((x) << 8) +#define GIC_CPU_MASK_SIMPLE(num) GIC_CPU_MASK_RAW((1 << (num)) - 1) + +#endif diff --git a/include/dt-bindings/interrupt-controller/irq-st.h b/include/dt-bindings/interrupt-controller/irq-st.h new file mode 100644 index 0000000..9c9c8e2 --- /dev/null +++ b/include/dt-bindings/interrupt-controller/irq-st.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * include/linux/irqchip/irq-st.h + * + * Copyright (C) 2014 STMicroelectronics – All Rights Reserved + * + * Author: Lee Jones + */ + +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_ST_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_ST_H + +#define ST_IRQ_SYSCFG_EXT_0 0 +#define ST_IRQ_SYSCFG_EXT_1 1 +#define ST_IRQ_SYSCFG_EXT_2 2 +#define ST_IRQ_SYSCFG_CTI_0 3 +#define ST_IRQ_SYSCFG_CTI_1 4 +#define ST_IRQ_SYSCFG_PMU_0 5 +#define ST_IRQ_SYSCFG_PMU_1 6 +#define ST_IRQ_SYSCFG_pl310_L2 7 +#define ST_IRQ_SYSCFG_DISABLED 0xFFFFFFFF + +#define ST_IRQ_SYSCFG_EXT_1_INV 0x1 +#define ST_IRQ_SYSCFG_EXT_2_INV 0x2 +#define ST_IRQ_SYSCFG_EXT_3_INV 0x4 + +#endif diff --git a/include/dt-bindings/interrupt-controller/irq.h b/include/dt-bindings/interrupt-controller/irq.h new file mode 100644 index 0000000..9e3d183 --- /dev/null +++ b/include/dt-bindings/interrupt-controller/irq.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * This header provides constants for most IRQ bindings. + * + * Most IRQ bindings include a flags cell as part of the IRQ specifier. + * In most cases, the format of the flags cell uses the standard values + * defined in this header. + */ + +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_IRQ_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_IRQ_H + +#define IRQ_TYPE_NONE 0 +#define IRQ_TYPE_EDGE_RISING 1 +#define IRQ_TYPE_EDGE_FALLING 2 +#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) +#define IRQ_TYPE_LEVEL_HIGH 4 +#define IRQ_TYPE_LEVEL_LOW 8 + +#endif diff --git a/include/dt-bindings/interrupt-controller/mips-gic.h b/include/dt-bindings/interrupt-controller/mips-gic.h new file mode 100644 index 0000000..bd45cee --- /dev/null +++ b/include/dt-bindings/interrupt-controller/mips-gic.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H + +#include + +#define GIC_SHARED 0 +#define GIC_LOCAL 1 + +#endif diff --git a/include/dt-bindings/interrupt-controller/mvebu-icu.h b/include/dt-bindings/interrupt-controller/mvebu-icu.h new file mode 100644 index 0000000..bb5217c --- /dev/null +++ b/include/dt-bindings/interrupt-controller/mvebu-icu.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the MVEBU ICU driver. + */ + +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_MVEBU_ICU_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_MVEBU_ICU_H + +/* interrupt specifier cell 0 */ + +#define ICU_GRP_NSR 0x0 +#define ICU_GRP_SR 0x1 +#define ICU_GRP_SEI 0x4 +#define ICU_GRP_REI 0x5 + +#endif diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h new file mode 100644 index 0000000..9e1256a --- /dev/null +++ b/include/dt-bindings/leds/common.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides macros for the common LEDs device tree bindings. + * + * Copyright (C) 2015, Samsung Electronics Co., Ltd. + * Author: Jacek Anaszewski + * + * Copyright (C) 2019 Jacek Anaszewski + */ + +#ifndef __DT_BINDINGS_LEDS_H +#define __DT_BINDINGS_LEDS_H + +/* External trigger type */ +#define LEDS_TRIG_TYPE_EDGE 0 +#define LEDS_TRIG_TYPE_LEVEL 1 + +/* Boost modes */ +#define LEDS_BOOST_OFF 0 +#define LEDS_BOOST_ADAPTIVE 1 +#define LEDS_BOOST_FIXED 2 + +/* Standard LED colors */ +#define LED_COLOR_ID_WHITE 0 +#define LED_COLOR_ID_RED 1 +#define LED_COLOR_ID_GREEN 2 +#define LED_COLOR_ID_BLUE 3 +#define LED_COLOR_ID_AMBER 4 +#define LED_COLOR_ID_VIOLET 5 +#define LED_COLOR_ID_YELLOW 6 +#define LED_COLOR_ID_IR 7 +#define LED_COLOR_ID_MAX 8 + +/* Standard LED functions */ +#define LED_FUNCTION_ACTIVITY "activity" +#define LED_FUNCTION_ALARM "alarm" +#define LED_FUNCTION_BACKLIGHT "backlight" +#define LED_FUNCTION_BLUETOOTH "bluetooth" +#define LED_FUNCTION_BOOT "boot" +#define LED_FUNCTION_CPU "cpu" +#define LED_FUNCTION_CAPSLOCK "capslock" +#define LED_FUNCTION_CHARGING "charging" +#define LED_FUNCTION_DEBUG "debug" +#define LED_FUNCTION_DISK "disk" +#define LED_FUNCTION_DISK_ACTIVITY "disk-activity" +#define LED_FUNCTION_DISK_ERR "disk-err" +#define LED_FUNCTION_DISK_READ "disk-read" +#define LED_FUNCTION_DISK_WRITE "disk-write" +#define LED_FUNCTION_FAULT "fault" +#define LED_FUNCTION_FLASH "flash" +#define LED_FUNCTION_HEARTBEAT "heartbeat" +#define LED_FUNCTION_INDICATOR "indicator" +#define LED_FUNCTION_KBD_BACKLIGHT "kbd_backlight" +#define LED_FUNCTION_LAN "lan" +#define LED_FUNCTION_MAIL "mail" +#define LED_FUNCTION_MTD "mtd" +#define LED_FUNCTION_MICMUTE "micmute" +#define LED_FUNCTION_MUTE "mute" +#define LED_FUNCTION_NUMLOCK "numlock" +#define LED_FUNCTION_PANIC "panic" +#define LED_FUNCTION_PROGRAMMING "programming" +#define LED_FUNCTION_POWER "power" +#define LED_FUNCTION_RX "rx" +#define LED_FUNCTION_SD "sd" +#define LED_FUNCTION_SCROLLLOCK "scrolllock" +#define LED_FUNCTION_STANDBY "standby" +#define LED_FUNCTION_STATUS "status" +#define LED_FUNCTION_TORCH "torch" +#define LED_FUNCTION_TX "tx" +#define LED_FUNCTION_USB "usb" +#define LED_FUNCTION_WAN "wan" +#define LED_FUNCTION_WLAN "wlan" +#define LED_FUNCTION_WPS "wps" + +#endif /* __DT_BINDINGS_LEDS_H */ diff --git a/include/dt-bindings/leds/leds-netxbig.h b/include/dt-bindings/leds/leds-netxbig.h new file mode 100644 index 0000000..92658b0 --- /dev/null +++ b/include/dt-bindings/leds/leds-netxbig.h @@ -0,0 +1,18 @@ +/* + * This header provides constants for netxbig LED bindings. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _DT_BINDINGS_LEDS_NETXBIG_H +#define _DT_BINDINGS_LEDS_NETXBIG_H + +#define NETXBIG_LED_OFF 0 +#define NETXBIG_LED_ON 1 +#define NETXBIG_LED_SATA 2 +#define NETXBIG_LED_TIMER1 3 +#define NETXBIG_LED_TIMER2 4 + +#endif /* _DT_BINDINGS_LEDS_NETXBIG_H */ diff --git a/include/dt-bindings/leds/leds-ns2.h b/include/dt-bindings/leds/leds-ns2.h new file mode 100644 index 0000000..fd61574 --- /dev/null +++ b/include/dt-bindings/leds/leds-ns2.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_LEDS_NS2_H +#define _DT_BINDINGS_LEDS_NS2_H + +#define NS_V2_LED_OFF 0 +#define NS_V2_LED_ON 1 +#define NS_V2_LED_SATA 2 + +#endif diff --git a/include/dt-bindings/leds/leds-pca9532.h b/include/dt-bindings/leds/leds-pca9532.h new file mode 100644 index 0000000..4d917aa --- /dev/null +++ b/include/dt-bindings/leds/leds-pca9532.h @@ -0,0 +1,18 @@ +/* + * This header provides constants for pca9532 LED bindings. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _DT_BINDINGS_LEDS_PCA9532_H +#define _DT_BINDINGS_LEDS_PCA9532_H + +#define PCA9532_TYPE_NONE 0 +#define PCA9532_TYPE_LED 1 +#define PCA9532_TYPE_N2100_BEEP 2 +#define PCA9532_TYPE_GPIO 3 +#define PCA9532_LED_TIMER2 4 + +#endif /* _DT_BINDINGS_LEDS_PCA9532_H */ diff --git a/include/dt-bindings/leds/leds-pca955x.h b/include/dt-bindings/leds/leds-pca955x.h new file mode 100644 index 0000000..78cb7e9 --- /dev/null +++ b/include/dt-bindings/leds/leds-pca955x.h @@ -0,0 +1,16 @@ +/* + * This header provides constants for pca955x LED bindings. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _DT_BINDINGS_LEDS_PCA955X_H +#define _DT_BINDINGS_LEDS_PCA955X_H + +#define PCA955X_TYPE_NONE 0 +#define PCA955X_TYPE_LED 1 +#define PCA955X_TYPE_GPIO 2 + +#endif /* _DT_BINDINGS_LEDS_PCA955X_H */ diff --git a/include/dt-bindings/mailbox/tegra186-hsp.h b/include/dt-bindings/mailbox/tegra186-hsp.h new file mode 100644 index 0000000..3bdec7a --- /dev/null +++ b/include/dt-bindings/mailbox/tegra186-hsp.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra186-hsp. + */ + +#ifndef _DT_BINDINGS_MAILBOX_TEGRA186_HSP_H +#define _DT_BINDINGS_MAILBOX_TEGRA186_HSP_H + +/* + * These define the type of mailbox that is to be used (doorbell, shared + * mailbox, shared semaphore or arbitrated semaphore). + */ +#define TEGRA_HSP_MBOX_TYPE_DB 0x0 +#define TEGRA_HSP_MBOX_TYPE_SM 0x1 +#define TEGRA_HSP_MBOX_TYPE_SS 0x2 +#define TEGRA_HSP_MBOX_TYPE_AS 0x3 + +/* + * These defines represent the bit associated with the given master ID in the + * doorbell registers. + */ +#define TEGRA_HSP_DB_MASTER_CCPLEX 17 +#define TEGRA_HSP_DB_MASTER_BPMP 19 + +/* + * Shared mailboxes are unidirectional, so the direction needs to be specified + * in the device tree. + */ +#define TEGRA_HSP_SM_MASK 0x00ffffff +#define TEGRA_HSP_SM_FLAG_RX (0 << 31) +#define TEGRA_HSP_SM_FLAG_TX (1 << 31) + +#define TEGRA_HSP_SM_RX(x) (TEGRA_HSP_SM_FLAG_RX | ((x) & TEGRA_HSP_SM_MASK)) +#define TEGRA_HSP_SM_TX(x) (TEGRA_HSP_SM_FLAG_TX | ((x) & TEGRA_HSP_SM_MASK)) + +#endif diff --git a/include/dt-bindings/media/c8sectpfe.h b/include/dt-bindings/media/c8sectpfe.h new file mode 100644 index 0000000..6b1fb6f --- /dev/null +++ b/include/dt-bindings/media/c8sectpfe.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_C8SECTPFE_H +#define __DT_C8SECTPFE_H + +#define STV0367_TDA18212_NIMA_1 0 +#define STV0367_TDA18212_NIMA_2 1 +#define STV0367_TDA18212_NIMB_1 2 +#define STV0367_TDA18212_NIMB_2 3 + +#define STV0903_6110_LNB24_NIMA 4 +#define STV0903_6110_LNB24_NIMB 5 + +#endif /* __DT_C8SECTPFE_H */ diff --git a/include/dt-bindings/media/omap3-isp.h b/include/dt-bindings/media/omap3-isp.h new file mode 100644 index 0000000..436c712 --- /dev/null +++ b/include/dt-bindings/media/omap3-isp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * include/dt-bindings/media/omap3-isp.h + * + * Copyright (C) 2015 Sakari Ailus + */ + +#ifndef __DT_BINDINGS_OMAP3_ISP_H__ +#define __DT_BINDINGS_OMAP3_ISP_H__ + +#define OMAP3ISP_PHY_TYPE_COMPLEX_IO 0 +#define OMAP3ISP_PHY_TYPE_CSIPHY 1 + +#endif /* __DT_BINDINGS_OMAP3_ISP_H__ */ diff --git a/include/dt-bindings/media/tda1997x.h b/include/dt-bindings/media/tda1997x.h new file mode 100644 index 0000000..bd9fbd7 --- /dev/null +++ b/include/dt-bindings/media/tda1997x.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2017 Gateworks Corporation + */ +#ifndef _DT_BINDINGS_MEDIA_TDA1997X_H +#define _DT_BINDINGS_MEDIA_TDA1997X_H + +/* TDA19973 36bit Video Port control registers */ +#define TDA1997X_VP36_35_32 0 +#define TDA1997X_VP36_31_28 1 +#define TDA1997X_VP36_27_24 2 +#define TDA1997X_VP36_23_20 3 +#define TDA1997X_VP36_19_16 4 +#define TDA1997X_VP36_15_12 5 +#define TDA1997X_VP36_11_08 6 +#define TDA1997X_VP36_07_04 7 +#define TDA1997X_VP36_03_00 8 + +/* TDA19971 24bit Video Port control registers */ +#define TDA1997X_VP24_V23_20 0 +#define TDA1997X_VP24_V19_16 1 +#define TDA1997X_VP24_V15_12 3 +#define TDA1997X_VP24_V11_08 4 +#define TDA1997X_VP24_V07_04 6 +#define TDA1997X_VP24_V03_00 7 + +/* Pin groups */ +#define TDA1997X_VP_OUT_EN 0x80 /* enable output group */ +#define TDA1997X_VP_HIZ 0x40 /* hi-Z output group when not used */ +#define TDA1997X_VP_SWP 0x10 /* pin-swap output group */ +#define TDA1997X_R_CR_CBCR_3_0 (0 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_R_CR_CBCR_7_4 (1 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_R_CR_CBCR_11_8 (2 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_B_CB_3_0 (3 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_B_CB_7_4 (4 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_B_CB_11_8 (5 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_G_Y_3_0 (6 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_G_Y_7_4 (7 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +#define TDA1997X_G_Y_11_8 (8 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ) +/* pinswapped groups */ +#define TDA1997X_R_CR_CBCR_3_0_S (TDA1997X_R_CR_CBCR_3_0 | TDA1997X_VP_SWAP) +#define TDA1997X_R_CR_CBCR_7_4_S (TDA1997X_R_CR_CBCR_7_4 | TDA1997X_VP_SWAP) +#define TDA1997X_R_CR_CBCR_11_8_S (TDA1997X_R_CR_CBCR_11_8 | TDA1997X_VP_SWAP) +#define TDA1997X_B_CB_3_0_S (TDA1997X_B_CB_3_0 | TDA1997X_VP_SWAP) +#define TDA1997X_B_CB_7_4_S (TDA1997X_B_CB_7_4 | TDA1997X_VP_SWAP) +#define TDA1997X_B_CB_11_8_S (TDA1997X_B_CB_11_8 | TDA1997X_VP_SWAP) +#define TDA1997X_G_Y_3_0_S (TDA1997X_G_Y_3_0 | TDA1997X_VP_SWAP) +#define TDA1997X_G_Y_7_4_S (TDA1997X_G_Y_7_4 | TDA1997X_VP_SWAP) +#define TDA1997X_G_Y_11_8_S (TDA1997X_G_Y_11_8 | TDA1997X_VP_SWAP) + +/* Audio bus DAI format */ +#define TDA1997X_I2S16 1 /* I2S 16bit */ +#define TDA1997X_I2S32 2 /* I2S 32bit */ +#define TDA1997X_SPDIF 3 /* SPDIF */ +#define TDA1997X_OBA 4 /* One Bit Audio */ +#define TDA1997X_DST 5 /* Direct Stream Transfer */ +#define TDA1997X_I2S16_HBR 6 /* HBR straight in I2S 16bit mode */ +#define TDA1997X_I2S16_HBR_DEMUX 7 /* HBR demux in I2S 16bit mode */ +#define TDA1997X_I2S32_HBR_DEMUX 8 /* HBR demux in I2S 32bit mode */ +#define TDA1997X_SPDIF_HBR_DEMUX 9 /* HBR demux in SPDIF mode */ + +/* Audio bus channel layout */ +#define TDA1997X_LAYOUT0 0 /* 2-channel */ +#define TDA1997X_LAYOUT1 1 /* 8-channel */ + +/* Audio bus clock */ +#define TDA1997X_ACLK_16FS 0 +#define TDA1997X_ACLK_32FS 1 +#define TDA1997X_ACLK_64FS 2 +#define TDA1997X_ACLK_128FS 3 +#define TDA1997X_ACLK_256FS 4 +#define TDA1997X_ACLK_512FS 5 + +#endif /* _DT_BINDINGS_MEDIA_TDA1997X_H */ diff --git a/include/dt-bindings/media/tvp5150.h b/include/dt-bindings/media/tvp5150.h new file mode 100644 index 0000000..01eedf4 --- /dev/null +++ b/include/dt-bindings/media/tvp5150.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + tvp5150.h - definition for tvp5150 inputs + + Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl) + +*/ + +#ifndef _DT_BINDINGS_MEDIA_TVP5150_H +#define _DT_BINDINGS_MEDIA_TVP5150_H + +/* TVP5150 HW inputs */ +#define TVP5150_COMPOSITE0 0 +#define TVP5150_COMPOSITE1 1 +#define TVP5150_SVIDEO 2 + +#define TVP5150_INPUT_NUM 3 + +/* TVP5150 HW outputs */ +#define TVP5150_NORMAL 0 +#define TVP5150_BLACK_SCREEN 1 + +#endif /* _DT_BINDINGS_MEDIA_TVP5150_H */ diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h new file mode 100644 index 0000000..94ed3ed --- /dev/null +++ b/include/dt-bindings/media/xilinx-vip.h @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Xilinx Video IP Core + * + * Copyright (C) 2013-2015 Ideas on Board + * Copyright (C) 2013-2015 Xilinx, Inc. + * + * Contacts: Hyun Kwon + * Laurent Pinchart + */ + +#ifndef __DT_BINDINGS_MEDIA_XILINX_VIP_H__ +#define __DT_BINDINGS_MEDIA_XILINX_VIP_H__ + +/* + * Video format codes as defined in "AXI4-Stream Video IP and System Design + * Guide". + */ +#define XVIP_VF_YUV_422 0 +#define XVIP_VF_YUV_444 1 +#define XVIP_VF_RBG 2 +#define XVIP_VF_YUV_420 3 +#define XVIP_VF_YUVA_422 4 +#define XVIP_VF_YUVA_444 5 +#define XVIP_VF_RGBA 6 +#define XVIP_VF_YUVA_420 7 +#define XVIP_VF_YUVD_422 8 +#define XVIP_VF_YUVD_444 9 +#define XVIP_VF_RGBD 10 +#define XVIP_VF_YUVD_420 11 +#define XVIP_VF_MONO_SENSOR 12 +#define XVIP_VF_CUSTOM2 13 +#define XVIP_VF_CUSTOM3 14 +#define XVIP_VF_CUSTOM4 15 + +#endif /* __DT_BINDINGS_MEDIA_XILINX_VIP_H__ */ diff --git a/include/dt-bindings/memory/mt2701-larb-port.h b/include/dt-bindings/memory/mt2701-larb-port.h new file mode 100644 index 0000000..2d85c2e --- /dev/null +++ b/include/dt-bindings/memory/mt2701-larb-port.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: Honghui Zhang + */ + +#ifndef _MT2701_LARB_PORT_H_ +#define _MT2701_LARB_PORT_H_ + +/* + * Mediatek m4u generation 1 such as mt2701 has flat m4u port numbers, + * the first port's id for larb[N] would be the last port's id of larb[N - 1] + * plus one while larb[0]'s first port number is 0. The definition of + * MT2701_M4U_ID_LARBx is following HW register spec. + * But m4u generation 2 like mt8173 have different port number, it use fixed + * offset for each larb, the first port's id for larb[N] would be (N * 32). + */ +#define LARB0_PORT_OFFSET 0 +#define LARB1_PORT_OFFSET 11 +#define LARB2_PORT_OFFSET 21 +#define LARB3_PORT_OFFSET 44 + +#define MT2701_M4U_ID_LARB0(port) ((port) + LARB0_PORT_OFFSET) +#define MT2701_M4U_ID_LARB1(port) ((port) + LARB1_PORT_OFFSET) +#define MT2701_M4U_ID_LARB2(port) ((port) + LARB2_PORT_OFFSET) + +/* Port define for larb0 */ +#define MT2701_M4U_PORT_DISP_OVL_0 MT2701_M4U_ID_LARB0(0) +#define MT2701_M4U_PORT_DISP_RDMA1 MT2701_M4U_ID_LARB0(1) +#define MT2701_M4U_PORT_DISP_RDMA MT2701_M4U_ID_LARB0(2) +#define MT2701_M4U_PORT_DISP_WDMA MT2701_M4U_ID_LARB0(3) +#define MT2701_M4U_PORT_MM_CMDQ MT2701_M4U_ID_LARB0(4) +#define MT2701_M4U_PORT_MDP_RDMA MT2701_M4U_ID_LARB0(5) +#define MT2701_M4U_PORT_MDP_WDMA MT2701_M4U_ID_LARB0(6) +#define MT2701_M4U_PORT_MDP_ROTO MT2701_M4U_ID_LARB0(7) +#define MT2701_M4U_PORT_MDP_ROTCO MT2701_M4U_ID_LARB0(8) +#define MT2701_M4U_PORT_MDP_ROTVO MT2701_M4U_ID_LARB0(9) +#define MT2701_M4U_PORT_MDP_RDMA1 MT2701_M4U_ID_LARB0(10) + +/* Port define for larb1 */ +#define MT2701_M4U_PORT_VDEC_MC_EXT MT2701_M4U_ID_LARB1(0) +#define MT2701_M4U_PORT_VDEC_PP_EXT MT2701_M4U_ID_LARB1(1) +#define MT2701_M4U_PORT_VDEC_PPWRAP_EXT MT2701_M4U_ID_LARB1(2) +#define MT2701_M4U_PORT_VDEC_AVC_MV_EXT MT2701_M4U_ID_LARB1(3) +#define MT2701_M4U_PORT_VDEC_PRED_RD_EXT MT2701_M4U_ID_LARB1(4) +#define MT2701_M4U_PORT_VDEC_PRED_WR_EXT MT2701_M4U_ID_LARB1(5) +#define MT2701_M4U_PORT_VDEC_VLD_EXT MT2701_M4U_ID_LARB1(6) +#define MT2701_M4U_PORT_VDEC_VLD2_EXT MT2701_M4U_ID_LARB1(7) +#define MT2701_M4U_PORT_VDEC_TILE_EXT MT2701_M4U_ID_LARB1(8) +#define MT2701_M4U_PORT_VDEC_IMG_RESZ_EXT MT2701_M4U_ID_LARB1(9) + +/* Port define for larb2 */ +#define MT2701_M4U_PORT_VENC_RCPU MT2701_M4U_ID_LARB2(0) +#define MT2701_M4U_PORT_VENC_REC_FRM MT2701_M4U_ID_LARB2(1) +#define MT2701_M4U_PORT_VENC_BSDMA MT2701_M4U_ID_LARB2(2) +#define MT2701_M4U_PORT_JPGENC_RDMA MT2701_M4U_ID_LARB2(3) +#define MT2701_M4U_PORT_VENC_LT_RCPU MT2701_M4U_ID_LARB2(4) +#define MT2701_M4U_PORT_VENC_LT_REC_FRM MT2701_M4U_ID_LARB2(5) +#define MT2701_M4U_PORT_VENC_LT_BSDMA MT2701_M4U_ID_LARB2(6) +#define MT2701_M4U_PORT_JPGDEC_BSDMA MT2701_M4U_ID_LARB2(7) +#define MT2701_M4U_PORT_VENC_SV_COMV MT2701_M4U_ID_LARB2(8) +#define MT2701_M4U_PORT_VENC_RD_COMV MT2701_M4U_ID_LARB2(9) +#define MT2701_M4U_PORT_JPGENC_BSDMA MT2701_M4U_ID_LARB2(10) +#define MT2701_M4U_PORT_VENC_CUR_LUMA MT2701_M4U_ID_LARB2(11) +#define MT2701_M4U_PORT_VENC_CUR_CHROMA MT2701_M4U_ID_LARB2(12) +#define MT2701_M4U_PORT_VENC_REF_LUMA MT2701_M4U_ID_LARB2(13) +#define MT2701_M4U_PORT_VENC_REF_CHROMA MT2701_M4U_ID_LARB2(14) +#define MT2701_M4U_PORT_IMG_RESZ MT2701_M4U_ID_LARB2(15) +#define MT2701_M4U_PORT_VENC_LT_SV_COMV MT2701_M4U_ID_LARB2(16) +#define MT2701_M4U_PORT_VENC_LT_RD_COMV MT2701_M4U_ID_LARB2(17) +#define MT2701_M4U_PORT_VENC_LT_CUR_LUMA MT2701_M4U_ID_LARB2(18) +#define MT2701_M4U_PORT_VENC_LT_CUR_CHROMA MT2701_M4U_ID_LARB2(19) +#define MT2701_M4U_PORT_VENC_LT_REF_LUMA MT2701_M4U_ID_LARB2(20) +#define MT2701_M4U_PORT_VENC_LT_REF_CHROMA MT2701_M4U_ID_LARB2(21) +#define MT2701_M4U_PORT_JPGDEC_WDMA MT2701_M4U_ID_LARB2(22) + +#endif diff --git a/include/dt-bindings/memory/mt2712-larb-port.h b/include/dt-bindings/memory/mt2712-larb-port.h new file mode 100644 index 0000000..6f9aa73 --- /dev/null +++ b/include/dt-bindings/memory/mt2712-larb-port.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Yong Wu + */ +#ifndef __DTS_IOMMU_PORT_MT2712_H +#define __DTS_IOMMU_PORT_MT2712_H + +#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) + +#define M4U_LARB0_ID 0 +#define M4U_LARB1_ID 1 +#define M4U_LARB2_ID 2 +#define M4U_LARB3_ID 3 +#define M4U_LARB4_ID 4 +#define M4U_LARB5_ID 5 +#define M4U_LARB6_ID 6 +#define M4U_LARB7_ID 7 +#define M4U_LARB8_ID 8 +#define M4U_LARB9_ID 9 + +/* larb0 */ +#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0) +#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1) +#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2) +#define M4U_PORT_DISP_OD_R MTK_M4U_ID(M4U_LARB0_ID, 3) +#define M4U_PORT_DISP_OD_W MTK_M4U_ID(M4U_LARB0_ID, 4) +#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5) +#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 6) +#define M4U_PORT_DISP_RDMA2 MTK_M4U_ID(M4U_LARB0_ID, 7) + +/* larb1 */ +#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB1_ID, 0) +#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB1_ID, 1) +#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB1_ID, 2) +#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB1_ID, 3) +#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB1_ID, 4) +#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB1_ID, 5) +#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB1_ID, 6) +#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB1_ID, 7) +#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB1_ID, 8) +#define M4U_PORT_HW_VDEC_TILE MTK_M4U_ID(M4U_LARB1_ID, 9) +#define M4U_PORT_HW_IMG_RESZ_EXT MTK_M4U_ID(M4U_LARB1_ID, 10) + +/* larb2 */ +#define M4U_PORT_CAM_DMA0 MTK_M4U_ID(M4U_LARB2_ID, 0) +#define M4U_PORT_CAM_DMA1 MTK_M4U_ID(M4U_LARB2_ID, 1) +#define M4U_PORT_CAM_DMA2 MTK_M4U_ID(M4U_LARB2_ID, 2) + +/* larb3 */ +#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0) +#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1) +#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2) +#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3) +#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4) +#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 5) +#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 6) +#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 7) +#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 8) + +/* larb4 */ +#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB4_ID, 0) +#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 1) +#define M4U_PORT_DISP_WDMA1 MTK_M4U_ID(M4U_LARB4_ID, 2) +#define M4U_PORT_DISP_OD1_R MTK_M4U_ID(M4U_LARB4_ID, 3) +#define M4U_PORT_DISP_OD1_W MTK_M4U_ID(M4U_LARB4_ID, 4) +#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 5) +#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB4_ID, 6) + +/* larb5 */ +#define M4U_PORT_DISP_OVL2 MTK_M4U_ID(M4U_LARB5_ID, 0) +#define M4U_PORT_DISP_WDMA2 MTK_M4U_ID(M4U_LARB5_ID, 1) +#define M4U_PORT_MDP_RDMA2 MTK_M4U_ID(M4U_LARB5_ID, 2) +#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB5_ID, 3) + +/* larb6 */ +#define M4U_PORT_JPGDEC_WDMA_0 MTK_M4U_ID(M4U_LARB6_ID, 0) +#define M4U_PORT_JPGDEC_WDMA_1 MTK_M4U_ID(M4U_LARB6_ID, 1) +#define M4U_PORT_JPGDEC_BSDMA_0 MTK_M4U_ID(M4U_LARB6_ID, 2) +#define M4U_PORT_JPGDEC_BSDMA_1 MTK_M4U_ID(M4U_LARB6_ID, 3) + +/* larb7 */ +#define M4U_PORT_MDP_RDMA3 MTK_M4U_ID(M4U_LARB7_ID, 0) +#define M4U_PORT_MDP_WROT2 MTK_M4U_ID(M4U_LARB7_ID, 1) + +/* larb8 */ +#define M4U_PORT_VDO MTK_M4U_ID(M4U_LARB8_ID, 0) +#define M4U_PORT_NR MTK_M4U_ID(M4U_LARB8_ID, 1) +#define M4U_PORT_WR_CHANNEL0 MTK_M4U_ID(M4U_LARB8_ID, 2) + +/* larb9 */ +#define M4U_PORT_TVD MTK_M4U_ID(M4U_LARB9_ID, 0) +#define M4U_PORT_WR_CHANNEL1 MTK_M4U_ID(M4U_LARB9_ID, 1) + +#endif diff --git a/include/dt-bindings/memory/mt8173-larb-port.h b/include/dt-bindings/memory/mt8173-larb-port.h new file mode 100644 index 0000000..9f31ccf --- /dev/null +++ b/include/dt-bindings/memory/mt8173-larb-port.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2016 MediaTek Inc. + * Author: Yong Wu + */ +#ifndef __DTS_IOMMU_PORT_MT8173_H +#define __DTS_IOMMU_PORT_MT8173_H + +#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) + +#define M4U_LARB0_ID 0 +#define M4U_LARB1_ID 1 +#define M4U_LARB2_ID 2 +#define M4U_LARB3_ID 3 +#define M4U_LARB4_ID 4 +#define M4U_LARB5_ID 5 + +/* larb0 */ +#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0) +#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1) +#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2) +#define M4U_PORT_DISP_OD_R MTK_M4U_ID(M4U_LARB0_ID, 3) +#define M4U_PORT_DISP_OD_W MTK_M4U_ID(M4U_LARB0_ID, 4) +#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5) +#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 6) +#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 7) + +/* larb1 */ +#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB1_ID, 0) +#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB1_ID, 1) +#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB1_ID, 2) +#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB1_ID, 3) +#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB1_ID, 4) +#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB1_ID, 5) +#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB1_ID, 6) +#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB1_ID, 7) +#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB1_ID, 8) +#define M4U_PORT_HW_VDEC_TILE MTK_M4U_ID(M4U_LARB1_ID, 9) + +/* larb2 */ +#define M4U_PORT_IMGO MTK_M4U_ID(M4U_LARB2_ID, 0) +#define M4U_PORT_RRZO MTK_M4U_ID(M4U_LARB2_ID, 1) +#define M4U_PORT_AAO MTK_M4U_ID(M4U_LARB2_ID, 2) +#define M4U_PORT_LCSO MTK_M4U_ID(M4U_LARB2_ID, 3) +#define M4U_PORT_ESFKO MTK_M4U_ID(M4U_LARB2_ID, 4) +#define M4U_PORT_IMGO_D MTK_M4U_ID(M4U_LARB2_ID, 5) +#define M4U_PORT_LSCI MTK_M4U_ID(M4U_LARB2_ID, 6) +#define M4U_PORT_LSCI_D MTK_M4U_ID(M4U_LARB2_ID, 7) +#define M4U_PORT_BPCI MTK_M4U_ID(M4U_LARB2_ID, 8) +#define M4U_PORT_BPCI_D MTK_M4U_ID(M4U_LARB2_ID, 9) +#define M4U_PORT_UFDI MTK_M4U_ID(M4U_LARB2_ID, 10) +#define M4U_PORT_IMGI MTK_M4U_ID(M4U_LARB2_ID, 11) +#define M4U_PORT_IMG2O MTK_M4U_ID(M4U_LARB2_ID, 12) +#define M4U_PORT_IMG3O MTK_M4U_ID(M4U_LARB2_ID, 13) +#define M4U_PORT_VIPI MTK_M4U_ID(M4U_LARB2_ID, 14) +#define M4U_PORT_VIP2I MTK_M4U_ID(M4U_LARB2_ID, 15) +#define M4U_PORT_VIP3I MTK_M4U_ID(M4U_LARB2_ID, 16) +#define M4U_PORT_LCEI MTK_M4U_ID(M4U_LARB2_ID, 17) +#define M4U_PORT_RB MTK_M4U_ID(M4U_LARB2_ID, 18) +#define M4U_PORT_RP MTK_M4U_ID(M4U_LARB2_ID, 19) +#define M4U_PORT_WR MTK_M4U_ID(M4U_LARB2_ID, 20) + +/* larb3 */ +#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0) +#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1) +#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2) +#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3) +#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4) +#define M4U_PORT_JPGENC_RDMA MTK_M4U_ID(M4U_LARB3_ID, 5) +#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 6) +#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 7) +#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 8) +#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 9) +#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 10) +#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 11) +#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 12) +#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 13) +#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 14) + +/* larb4 */ +#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB4_ID, 0) +#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 1) +#define M4U_PORT_DISP_RDMA2 MTK_M4U_ID(M4U_LARB4_ID, 2) +#define M4U_PORT_DISP_WDMA1 MTK_M4U_ID(M4U_LARB4_ID, 3) +#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 4) +#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB4_ID, 5) + +/* larb5 */ +#define M4U_PORT_VENC_RCPU_SET2 MTK_M4U_ID(M4U_LARB5_ID, 0) +#define M4U_PORT_VENC_REC_FRM_SET2 MTK_M4U_ID(M4U_LARB5_ID, 1) +#define M4U_PORT_VENC_REF_LUMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 2) +#define M4U_PORT_VENC_REC_CHROMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 3) +#define M4U_PORT_VENC_BSDMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 4) +#define M4U_PORT_VENC_CUR_LUMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 5) +#define M4U_PORT_VENC_CUR_CHROMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 6) +#define M4U_PORT_VENC_RD_COMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 7) +#define M4U_PORT_VENC_SV_COMA_SET2 MTK_M4U_ID(M4U_LARB5_ID, 8) + +#endif diff --git a/include/dt-bindings/memory/mt8183-larb-port.h b/include/dt-bindings/memory/mt8183-larb-port.h new file mode 100644 index 0000000..2c579f3 --- /dev/null +++ b/include/dt-bindings/memory/mt8183-larb-port.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Yong Wu + */ +#ifndef __DTS_IOMMU_PORT_MT8183_H +#define __DTS_IOMMU_PORT_MT8183_H + +#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) + +#define M4U_LARB0_ID 0 +#define M4U_LARB1_ID 1 +#define M4U_LARB2_ID 2 +#define M4U_LARB3_ID 3 +#define M4U_LARB4_ID 4 +#define M4U_LARB5_ID 5 +#define M4U_LARB6_ID 6 +#define M4U_LARB7_ID 7 + +/* larb0 */ +#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0) +#define M4U_PORT_DISP_2L_OVL0_LARB0 MTK_M4U_ID(M4U_LARB0_ID, 1) +#define M4U_PORT_DISP_2L_OVL1_LARB0 MTK_M4U_ID(M4U_LARB0_ID, 2) +#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 3) +#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 4) +#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5) +#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 6) +#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 7) +#define M4U_PORT_MDP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 8) +#define M4U_PORT_DISP_FAKE0 MTK_M4U_ID(M4U_LARB0_ID, 9) + +/* larb1 */ +#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB1_ID, 0) +#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB1_ID, 1) +#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB1_ID, 2) +#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB1_ID, 3) +#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB1_ID, 4) +#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB1_ID, 5) +#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB1_ID, 6) + +/* larb2 VPU0 */ +#define M4U_PORT_IMG_IPUO MTK_M4U_ID(M4U_LARB2_ID, 0) +#define M4U_PORT_IMG_IPU3O MTK_M4U_ID(M4U_LARB2_ID, 1) +#define M4U_PORT_IMG_IPUI MTK_M4U_ID(M4U_LARB2_ID, 2) + +/* larb3 VPU1 */ +#define M4U_PORT_CAM_IPUO MTK_M4U_ID(M4U_LARB3_ID, 0) +#define M4U_PORT_CAM_IPU2O MTK_M4U_ID(M4U_LARB3_ID, 1) +#define M4U_PORT_CAM_IPU3O MTK_M4U_ID(M4U_LARB3_ID, 2) +#define M4U_PORT_CAM_IPUI MTK_M4U_ID(M4U_LARB3_ID, 3) +#define M4U_PORT_CAM_IPU2I MTK_M4U_ID(M4U_LARB3_ID, 4) + +/* larb4 */ +#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB4_ID, 0) +#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB4_ID, 1) +#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB4_ID, 2) +#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB4_ID, 3) +#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB4_ID, 4) +#define M4U_PORT_JPGENC_RDMA MTK_M4U_ID(M4U_LARB4_ID, 5) +#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB4_ID, 6) +#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB4_ID, 7) +#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB4_ID, 8) +#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB4_ID, 9) +#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB4_ID, 10) + +/* larb5 */ +#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB5_ID, 0) +#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB5_ID, 1) +#define M4U_PORT_CAM_IMG3O MTK_M4U_ID(M4U_LARB5_ID, 2) +#define M4U_PORT_CAM_VIPI MTK_M4U_ID(M4U_LARB5_ID, 3) +#define M4U_PORT_CAM_LCEI MTK_M4U_ID(M4U_LARB5_ID, 4) +#define M4U_PORT_CAM_SMXI MTK_M4U_ID(M4U_LARB5_ID, 5) +#define M4U_PORT_CAM_SMXO MTK_M4U_ID(M4U_LARB5_ID, 6) +#define M4U_PORT_CAM_WPE0_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 7) +#define M4U_PORT_CAM_WPE0_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 8) +#define M4U_PORT_CAM_WPE0_WDMA MTK_M4U_ID(M4U_LARB5_ID, 9) +#define M4U_PORT_CAM_FDVT_RP MTK_M4U_ID(M4U_LARB5_ID, 10) +#define M4U_PORT_CAM_FDVT_WR MTK_M4U_ID(M4U_LARB5_ID, 11) +#define M4U_PORT_CAM_FDVT_RB MTK_M4U_ID(M4U_LARB5_ID, 12) +#define M4U_PORT_CAM_WPE1_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 13) +#define M4U_PORT_CAM_WPE1_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 14) +#define M4U_PORT_CAM_WPE1_WDMA MTK_M4U_ID(M4U_LARB5_ID, 15) +#define M4U_PORT_CAM_DPE_RDMA MTK_M4U_ID(M4U_LARB5_ID, 16) +#define M4U_PORT_CAM_DPE_WDMA MTK_M4U_ID(M4U_LARB5_ID, 17) +#define M4U_PORT_CAM_MFB_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 18) +#define M4U_PORT_CAM_MFB_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 19) +#define M4U_PORT_CAM_MFB_WDMA MTK_M4U_ID(M4U_LARB5_ID, 20) +#define M4U_PORT_CAM_RSC_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 21) +#define M4U_PORT_CAM_RSC_WDMA MTK_M4U_ID(M4U_LARB5_ID, 22) +#define M4U_PORT_CAM_OWE_RDMA MTK_M4U_ID(M4U_LARB5_ID, 23) +#define M4U_PORT_CAM_OWE_WDMA MTK_M4U_ID(M4U_LARB5_ID, 24) + +/* larb6 */ +#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB6_ID, 0) +#define M4U_PORT_CAM_RRZO MTK_M4U_ID(M4U_LARB6_ID, 1) +#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB6_ID, 2) +#define M4U_PORT_CAM_AFO MTK_M4U_ID(M4U_LARB6_ID, 3) +#define M4U_PORT_CAM_LSCI0 MTK_M4U_ID(M4U_LARB6_ID, 4) +#define M4U_PORT_CAM_LSCI1 MTK_M4U_ID(M4U_LARB6_ID, 5) +#define M4U_PORT_CAM_PDO MTK_M4U_ID(M4U_LARB6_ID, 6) +#define M4U_PORT_CAM_BPCI MTK_M4U_ID(M4U_LARB6_ID, 7) +#define M4U_PORT_CAM_LCSO MTK_M4U_ID(M4U_LARB6_ID, 8) +#define M4U_PORT_CAM_CAM_RSSO_A MTK_M4U_ID(M4U_LARB6_ID, 9) +#define M4U_PORT_CAM_UFEO MTK_M4U_ID(M4U_LARB6_ID, 10) +#define M4U_PORT_CAM_SOCO MTK_M4U_ID(M4U_LARB6_ID, 11) +#define M4U_PORT_CAM_SOC1 MTK_M4U_ID(M4U_LARB6_ID, 12) +#define M4U_PORT_CAM_SOC2 MTK_M4U_ID(M4U_LARB6_ID, 13) +#define M4U_PORT_CAM_CCUI MTK_M4U_ID(M4U_LARB6_ID, 14) +#define M4U_PORT_CAM_CCUO MTK_M4U_ID(M4U_LARB6_ID, 15) +#define M4U_PORT_CAM_RAWI_A MTK_M4U_ID(M4U_LARB6_ID, 16) +#define M4U_PORT_CAM_CCUG MTK_M4U_ID(M4U_LARB6_ID, 17) +#define M4U_PORT_CAM_PSO MTK_M4U_ID(M4U_LARB6_ID, 18) +#define M4U_PORT_CAM_AFO_1 MTK_M4U_ID(M4U_LARB6_ID, 19) +#define M4U_PORT_CAM_LSCI_2 MTK_M4U_ID(M4U_LARB6_ID, 20) +#define M4U_PORT_CAM_PDI MTK_M4U_ID(M4U_LARB6_ID, 21) +#define M4U_PORT_CAM_FLKO MTK_M4U_ID(M4U_LARB6_ID, 22) +#define M4U_PORT_CAM_LMVO MTK_M4U_ID(M4U_LARB6_ID, 23) +#define M4U_PORT_CAM_UFGO MTK_M4U_ID(M4U_LARB6_ID, 24) +#define M4U_PORT_CAM_SPARE MTK_M4U_ID(M4U_LARB6_ID, 25) +#define M4U_PORT_CAM_SPARE_2 MTK_M4U_ID(M4U_LARB6_ID, 26) +#define M4U_PORT_CAM_SPARE_3 MTK_M4U_ID(M4U_LARB6_ID, 27) +#define M4U_PORT_CAM_SPARE_4 MTK_M4U_ID(M4U_LARB6_ID, 28) +#define M4U_PORT_CAM_SPARE_5 MTK_M4U_ID(M4U_LARB6_ID, 29) +#define M4U_PORT_CAM_SPARE_6 MTK_M4U_ID(M4U_LARB6_ID, 30) + +/* CCU */ +#define M4U_PORT_CCU0 MTK_M4U_ID(M4U_LARB7_ID, 0) +#define M4U_PORT_CCU1 MTK_M4U_ID(M4U_LARB7_ID, 1) + +#endif diff --git a/include/dt-bindings/memory/tegra114-mc.h b/include/dt-bindings/memory/tegra114-mc.h new file mode 100644 index 0000000..dfe99c8 --- /dev/null +++ b/include/dt-bindings/memory/tegra114-mc.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DT_BINDINGS_MEMORY_TEGRA114_MC_H +#define DT_BINDINGS_MEMORY_TEGRA114_MC_H + +#define TEGRA_SWGROUP_PTC 0 +#define TEGRA_SWGROUP_DC 1 +#define TEGRA_SWGROUP_DCB 2 +#define TEGRA_SWGROUP_EPP 3 +#define TEGRA_SWGROUP_G2 4 +#define TEGRA_SWGROUP_AVPC 5 +#define TEGRA_SWGROUP_NV 6 +#define TEGRA_SWGROUP_HDA 7 +#define TEGRA_SWGROUP_HC 8 +#define TEGRA_SWGROUP_MSENC 9 +#define TEGRA_SWGROUP_PPCS 10 +#define TEGRA_SWGROUP_VDE 11 +#define TEGRA_SWGROUP_MPCORELP 12 +#define TEGRA_SWGROUP_MPCORE 13 +#define TEGRA_SWGROUP_VI 14 +#define TEGRA_SWGROUP_ISP 15 +#define TEGRA_SWGROUP_XUSB_HOST 16 +#define TEGRA_SWGROUP_XUSB_DEV 17 +#define TEGRA_SWGROUP_EMUCIF 18 +#define TEGRA_SWGROUP_TSEC 19 + +#define TEGRA114_MC_RESET_AVPC 0 +#define TEGRA114_MC_RESET_DC 1 +#define TEGRA114_MC_RESET_DCB 2 +#define TEGRA114_MC_RESET_EPP 3 +#define TEGRA114_MC_RESET_2D 4 +#define TEGRA114_MC_RESET_HC 5 +#define TEGRA114_MC_RESET_HDA 6 +#define TEGRA114_MC_RESET_ISP 7 +#define TEGRA114_MC_RESET_MPCORE 8 +#define TEGRA114_MC_RESET_MPCORELP 9 +#define TEGRA114_MC_RESET_MPE 10 +#define TEGRA114_MC_RESET_3D 11 +#define TEGRA114_MC_RESET_3D2 12 +#define TEGRA114_MC_RESET_PPCS 13 +#define TEGRA114_MC_RESET_VDE 14 +#define TEGRA114_MC_RESET_VI 15 + +#endif diff --git a/include/dt-bindings/memory/tegra124-mc.h b/include/dt-bindings/memory/tegra124-mc.h new file mode 100644 index 0000000..186e6b7 --- /dev/null +++ b/include/dt-bindings/memory/tegra124-mc.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DT_BINDINGS_MEMORY_TEGRA124_MC_H +#define DT_BINDINGS_MEMORY_TEGRA124_MC_H + +#define TEGRA_SWGROUP_PTC 0 +#define TEGRA_SWGROUP_DC 1 +#define TEGRA_SWGROUP_DCB 2 +#define TEGRA_SWGROUP_AFI 3 +#define TEGRA_SWGROUP_AVPC 4 +#define TEGRA_SWGROUP_HDA 5 +#define TEGRA_SWGROUP_HC 6 +#define TEGRA_SWGROUP_MSENC 7 +#define TEGRA_SWGROUP_PPCS 8 +#define TEGRA_SWGROUP_SATA 9 +#define TEGRA_SWGROUP_VDE 10 +#define TEGRA_SWGROUP_MPCORELP 11 +#define TEGRA_SWGROUP_MPCORE 12 +#define TEGRA_SWGROUP_ISP2 13 +#define TEGRA_SWGROUP_XUSB_HOST 14 +#define TEGRA_SWGROUP_XUSB_DEV 15 +#define TEGRA_SWGROUP_ISP2B 16 +#define TEGRA_SWGROUP_TSEC 17 +#define TEGRA_SWGROUP_A9AVP 18 +#define TEGRA_SWGROUP_GPU 19 +#define TEGRA_SWGROUP_SDMMC1A 20 +#define TEGRA_SWGROUP_SDMMC2A 21 +#define TEGRA_SWGROUP_SDMMC3A 22 +#define TEGRA_SWGROUP_SDMMC4A 23 +#define TEGRA_SWGROUP_VIC 24 +#define TEGRA_SWGROUP_VI 25 + +#define TEGRA124_MC_RESET_AFI 0 +#define TEGRA124_MC_RESET_AVPC 1 +#define TEGRA124_MC_RESET_DC 2 +#define TEGRA124_MC_RESET_DCB 3 +#define TEGRA124_MC_RESET_HC 4 +#define TEGRA124_MC_RESET_HDA 5 +#define TEGRA124_MC_RESET_ISP2 6 +#define TEGRA124_MC_RESET_MPCORE 7 +#define TEGRA124_MC_RESET_MPCORELP 8 +#define TEGRA124_MC_RESET_MSENC 9 +#define TEGRA124_MC_RESET_PPCS 10 +#define TEGRA124_MC_RESET_SATA 11 +#define TEGRA124_MC_RESET_VDE 12 +#define TEGRA124_MC_RESET_VI 13 +#define TEGRA124_MC_RESET_VIC 14 +#define TEGRA124_MC_RESET_XUSB_HOST 15 +#define TEGRA124_MC_RESET_XUSB_DEV 16 +#define TEGRA124_MC_RESET_TSEC 17 +#define TEGRA124_MC_RESET_SDMMC1 18 +#define TEGRA124_MC_RESET_SDMMC2 19 +#define TEGRA124_MC_RESET_SDMMC3 20 +#define TEGRA124_MC_RESET_SDMMC4 21 +#define TEGRA124_MC_RESET_ISP2B 22 +#define TEGRA124_MC_RESET_GPU 23 + +#endif diff --git a/include/dt-bindings/memory/tegra186-mc.h b/include/dt-bindings/memory/tegra186-mc.h new file mode 100644 index 0000000..6481353 --- /dev/null +++ b/include/dt-bindings/memory/tegra186-mc.h @@ -0,0 +1,111 @@ +#ifndef DT_BINDINGS_MEMORY_TEGRA186_MC_H +#define DT_BINDINGS_MEMORY_TEGRA186_MC_H + +/* special clients */ +#define TEGRA186_SID_INVALID 0x00 +#define TEGRA186_SID_PASSTHROUGH 0x7f + +/* host1x clients */ +#define TEGRA186_SID_HOST1X 0x01 +#define TEGRA186_SID_CSI 0x02 +#define TEGRA186_SID_VIC 0x03 +#define TEGRA186_SID_VI 0x04 +#define TEGRA186_SID_ISP 0x05 +#define TEGRA186_SID_NVDEC 0x06 +#define TEGRA186_SID_NVENC 0x07 +#define TEGRA186_SID_NVJPG 0x08 +#define TEGRA186_SID_NVDISPLAY 0x09 +#define TEGRA186_SID_TSEC 0x0a +#define TEGRA186_SID_TSECB 0x0b +#define TEGRA186_SID_SE 0x0c +#define TEGRA186_SID_SE1 0x0d +#define TEGRA186_SID_SE2 0x0e +#define TEGRA186_SID_SE3 0x0f + +/* GPU clients */ +#define TEGRA186_SID_GPU 0x10 + +/* other SoC clients */ +#define TEGRA186_SID_AFI 0x11 +#define TEGRA186_SID_HDA 0x12 +#define TEGRA186_SID_ETR 0x13 +#define TEGRA186_SID_EQOS 0x14 +#define TEGRA186_SID_UFSHC 0x15 +#define TEGRA186_SID_AON 0x16 +#define TEGRA186_SID_SDMMC4 0x17 +#define TEGRA186_SID_SDMMC3 0x18 +#define TEGRA186_SID_SDMMC2 0x19 +#define TEGRA186_SID_SDMMC1 0x1a +#define TEGRA186_SID_XUSB_HOST 0x1b +#define TEGRA186_SID_XUSB_DEV 0x1c +#define TEGRA186_SID_SATA 0x1d +#define TEGRA186_SID_APE 0x1e +#define TEGRA186_SID_SCE 0x1f + +/* GPC DMA clients */ +#define TEGRA186_SID_GPCDMA_0 0x20 +#define TEGRA186_SID_GPCDMA_1 0x21 +#define TEGRA186_SID_GPCDMA_2 0x22 +#define TEGRA186_SID_GPCDMA_3 0x23 +#define TEGRA186_SID_GPCDMA_4 0x24 +#define TEGRA186_SID_GPCDMA_5 0x25 +#define TEGRA186_SID_GPCDMA_6 0x26 +#define TEGRA186_SID_GPCDMA_7 0x27 + +/* APE DMA clients */ +#define TEGRA186_SID_APE_1 0x28 +#define TEGRA186_SID_APE_2 0x29 + +/* camera RTCPU */ +#define TEGRA186_SID_RCE 0x2a + +/* camera RTCPU on host1x address space */ +#define TEGRA186_SID_RCE_1X 0x2b + +/* APE DMA clients */ +#define TEGRA186_SID_APE_3 0x2c + +/* camera RTCPU running on APE */ +#define TEGRA186_SID_APE_CAM 0x2d +#define TEGRA186_SID_APE_CAM_1X 0x2e + +/* + * The BPMP has its SID value hardcoded in the firmware. Changing it requires + * considerable effort. + */ +#define TEGRA186_SID_BPMP 0x32 + +/* for SMMU tests */ +#define TEGRA186_SID_SMMU_TEST 0x33 + +/* host1x virtualization channels */ +#define TEGRA186_SID_HOST1X_CTX0 0x38 +#define TEGRA186_SID_HOST1X_CTX1 0x39 +#define TEGRA186_SID_HOST1X_CTX2 0x3a +#define TEGRA186_SID_HOST1X_CTX3 0x3b +#define TEGRA186_SID_HOST1X_CTX4 0x3c +#define TEGRA186_SID_HOST1X_CTX5 0x3d +#define TEGRA186_SID_HOST1X_CTX6 0x3e +#define TEGRA186_SID_HOST1X_CTX7 0x3f + +/* host1x command buffers */ +#define TEGRA186_SID_HOST1X_VM0 0x40 +#define TEGRA186_SID_HOST1X_VM1 0x41 +#define TEGRA186_SID_HOST1X_VM2 0x42 +#define TEGRA186_SID_HOST1X_VM3 0x43 +#define TEGRA186_SID_HOST1X_VM4 0x44 +#define TEGRA186_SID_HOST1X_VM5 0x45 +#define TEGRA186_SID_HOST1X_VM6 0x46 +#define TEGRA186_SID_HOST1X_VM7 0x47 + +/* SE data buffers */ +#define TEGRA186_SID_SE_VM0 0x48 +#define TEGRA186_SID_SE_VM1 0x49 +#define TEGRA186_SID_SE_VM2 0x4a +#define TEGRA186_SID_SE_VM3 0x4b +#define TEGRA186_SID_SE_VM4 0x4c +#define TEGRA186_SID_SE_VM5 0x4d +#define TEGRA186_SID_SE_VM6 0x4e +#define TEGRA186_SID_SE_VM7 0x4f + +#endif diff --git a/include/dt-bindings/memory/tegra20-mc.h b/include/dt-bindings/memory/tegra20-mc.h new file mode 100644 index 0000000..35e131e --- /dev/null +++ b/include/dt-bindings/memory/tegra20-mc.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DT_BINDINGS_MEMORY_TEGRA20_MC_H +#define DT_BINDINGS_MEMORY_TEGRA20_MC_H + +#define TEGRA20_MC_RESET_AVPC 0 +#define TEGRA20_MC_RESET_DC 1 +#define TEGRA20_MC_RESET_DCB 2 +#define TEGRA20_MC_RESET_EPP 3 +#define TEGRA20_MC_RESET_2D 4 +#define TEGRA20_MC_RESET_HC 5 +#define TEGRA20_MC_RESET_ISP 6 +#define TEGRA20_MC_RESET_MPCORE 7 +#define TEGRA20_MC_RESET_MPEA 8 +#define TEGRA20_MC_RESET_MPEB 9 +#define TEGRA20_MC_RESET_MPEC 10 +#define TEGRA20_MC_RESET_3D 11 +#define TEGRA20_MC_RESET_PPCS 12 +#define TEGRA20_MC_RESET_VDE 13 +#define TEGRA20_MC_RESET_VI 14 + +#endif diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h new file mode 100644 index 0000000..cacf056 --- /dev/null +++ b/include/dt-bindings/memory/tegra210-mc.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DT_BINDINGS_MEMORY_TEGRA210_MC_H +#define DT_BINDINGS_MEMORY_TEGRA210_MC_H + +#define TEGRA_SWGROUP_PTC 0 +#define TEGRA_SWGROUP_DC 1 +#define TEGRA_SWGROUP_DCB 2 +#define TEGRA_SWGROUP_AFI 3 +#define TEGRA_SWGROUP_AVPC 4 +#define TEGRA_SWGROUP_HDA 5 +#define TEGRA_SWGROUP_HC 6 +#define TEGRA_SWGROUP_NVENC 7 +#define TEGRA_SWGROUP_PPCS 8 +#define TEGRA_SWGROUP_SATA 9 +#define TEGRA_SWGROUP_MPCORE 10 +#define TEGRA_SWGROUP_ISP2 11 +#define TEGRA_SWGROUP_XUSB_HOST 12 +#define TEGRA_SWGROUP_XUSB_DEV 13 +#define TEGRA_SWGROUP_ISP2B 14 +#define TEGRA_SWGROUP_TSEC 15 +#define TEGRA_SWGROUP_A9AVP 16 +#define TEGRA_SWGROUP_GPU 17 +#define TEGRA_SWGROUP_SDMMC1A 18 +#define TEGRA_SWGROUP_SDMMC2A 19 +#define TEGRA_SWGROUP_SDMMC3A 20 +#define TEGRA_SWGROUP_SDMMC4A 21 +#define TEGRA_SWGROUP_VIC 22 +#define TEGRA_SWGROUP_VI 23 +#define TEGRA_SWGROUP_NVDEC 24 +#define TEGRA_SWGROUP_APE 25 +#define TEGRA_SWGROUP_NVJPG 26 +#define TEGRA_SWGROUP_SE 27 +#define TEGRA_SWGROUP_AXIAP 28 +#define TEGRA_SWGROUP_ETR 29 +#define TEGRA_SWGROUP_TSECB 30 + +#define TEGRA210_MC_RESET_AFI 0 +#define TEGRA210_MC_RESET_AVPC 1 +#define TEGRA210_MC_RESET_DC 2 +#define TEGRA210_MC_RESET_DCB 3 +#define TEGRA210_MC_RESET_HC 4 +#define TEGRA210_MC_RESET_HDA 5 +#define TEGRA210_MC_RESET_ISP2 6 +#define TEGRA210_MC_RESET_MPCORE 7 +#define TEGRA210_MC_RESET_NVENC 8 +#define TEGRA210_MC_RESET_PPCS 9 +#define TEGRA210_MC_RESET_SATA 10 +#define TEGRA210_MC_RESET_VI 11 +#define TEGRA210_MC_RESET_VIC 12 +#define TEGRA210_MC_RESET_XUSB_HOST 13 +#define TEGRA210_MC_RESET_XUSB_DEV 14 +#define TEGRA210_MC_RESET_A9AVP 15 +#define TEGRA210_MC_RESET_TSEC 16 +#define TEGRA210_MC_RESET_SDMMC1 17 +#define TEGRA210_MC_RESET_SDMMC2 18 +#define TEGRA210_MC_RESET_SDMMC3 19 +#define TEGRA210_MC_RESET_SDMMC4 20 +#define TEGRA210_MC_RESET_ISP2B 21 +#define TEGRA210_MC_RESET_GPU 22 +#define TEGRA210_MC_RESET_NVDEC 23 +#define TEGRA210_MC_RESET_APE 24 +#define TEGRA210_MC_RESET_SE 25 +#define TEGRA210_MC_RESET_NVJPG 26 +#define TEGRA210_MC_RESET_AXIAP 27 +#define TEGRA210_MC_RESET_ETR 28 +#define TEGRA210_MC_RESET_TSECB 29 + +#endif diff --git a/include/dt-bindings/memory/tegra30-mc.h b/include/dt-bindings/memory/tegra30-mc.h new file mode 100644 index 0000000..169f005 --- /dev/null +++ b/include/dt-bindings/memory/tegra30-mc.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DT_BINDINGS_MEMORY_TEGRA30_MC_H +#define DT_BINDINGS_MEMORY_TEGRA30_MC_H + +#define TEGRA_SWGROUP_PTC 0 +#define TEGRA_SWGROUP_DC 1 +#define TEGRA_SWGROUP_DCB 2 +#define TEGRA_SWGROUP_EPP 3 +#define TEGRA_SWGROUP_G2 4 +#define TEGRA_SWGROUP_MPE 5 +#define TEGRA_SWGROUP_VI 6 +#define TEGRA_SWGROUP_AFI 7 +#define TEGRA_SWGROUP_AVPC 8 +#define TEGRA_SWGROUP_NV 9 +#define TEGRA_SWGROUP_NV2 10 +#define TEGRA_SWGROUP_HDA 11 +#define TEGRA_SWGROUP_HC 12 +#define TEGRA_SWGROUP_PPCS 13 +#define TEGRA_SWGROUP_SATA 14 +#define TEGRA_SWGROUP_VDE 15 +#define TEGRA_SWGROUP_MPCORELP 16 +#define TEGRA_SWGROUP_MPCORE 17 +#define TEGRA_SWGROUP_ISP 18 + +#define TEGRA30_MC_RESET_AFI 0 +#define TEGRA30_MC_RESET_AVPC 1 +#define TEGRA30_MC_RESET_DC 2 +#define TEGRA30_MC_RESET_DCB 3 +#define TEGRA30_MC_RESET_EPP 4 +#define TEGRA30_MC_RESET_2D 5 +#define TEGRA30_MC_RESET_HC 6 +#define TEGRA30_MC_RESET_HDA 7 +#define TEGRA30_MC_RESET_ISP 8 +#define TEGRA30_MC_RESET_MPCORE 9 +#define TEGRA30_MC_RESET_MPCORELP 10 +#define TEGRA30_MC_RESET_MPE 11 +#define TEGRA30_MC_RESET_3D 12 +#define TEGRA30_MC_RESET_3D2 13 +#define TEGRA30_MC_RESET_PPCS 14 +#define TEGRA30_MC_RESET_SATA 15 +#define TEGRA30_MC_RESET_VDE 16 +#define TEGRA30_MC_RESET_VI 17 + +#endif diff --git a/include/dt-bindings/mfd/arizona.h b/include/dt-bindings/mfd/arizona.h new file mode 100644 index 0000000..1056108 --- /dev/null +++ b/include/dt-bindings/mfd/arizona.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Device Tree defines for Arizona devices + * + * Copyright 2015 Cirrus Logic Inc. + * + * Author: Charles Keepax + */ + +#ifndef _DT_BINDINGS_MFD_ARIZONA_H +#define _DT_BINDINGS_MFD_ARIZONA_H + +/* GPIO Function Definitions */ +#define ARIZONA_GP_FN_TXLRCLK 0x00 +#define ARIZONA_GP_FN_GPIO 0x01 +#define ARIZONA_GP_FN_IRQ1 0x02 +#define ARIZONA_GP_FN_IRQ2 0x03 +#define ARIZONA_GP_FN_OPCLK 0x04 +#define ARIZONA_GP_FN_FLL1_OUT 0x05 +#define ARIZONA_GP_FN_FLL2_OUT 0x06 +#define ARIZONA_GP_FN_PWM1 0x08 +#define ARIZONA_GP_FN_PWM2 0x09 +#define ARIZONA_GP_FN_SYSCLK_UNDERCLOCKED 0x0A +#define ARIZONA_GP_FN_ASYNCCLK_UNDERCLOCKED 0x0B +#define ARIZONA_GP_FN_FLL1_LOCK 0x0C +#define ARIZONA_GP_FN_FLL2_LOCK 0x0D +#define ARIZONA_GP_FN_FLL1_CLOCK_OK 0x0F +#define ARIZONA_GP_FN_FLL2_CLOCK_OK 0x10 +#define ARIZONA_GP_FN_HEADPHONE_DET 0x12 +#define ARIZONA_GP_FN_MIC_DET 0x13 +#define ARIZONA_GP_FN_WSEQ_STATUS 0x15 +#define ARIZONA_GP_FN_CIF_ADDRESS_ERROR 0x16 +#define ARIZONA_GP_FN_ASRC1_LOCK 0x1A +#define ARIZONA_GP_FN_ASRC2_LOCK 0x1B +#define ARIZONA_GP_FN_ASRC_CONFIG_ERROR 0x1C +#define ARIZONA_GP_FN_DRC1_SIGNAL_DETECT 0x1D +#define ARIZONA_GP_FN_DRC1_ANTICLIP 0x1E +#define ARIZONA_GP_FN_DRC1_DECAY 0x1F +#define ARIZONA_GP_FN_DRC1_NOISE 0x20 +#define ARIZONA_GP_FN_DRC1_QUICK_RELEASE 0x21 +#define ARIZONA_GP_FN_DRC2_SIGNAL_DETECT 0x22 +#define ARIZONA_GP_FN_DRC2_ANTICLIP 0x23 +#define ARIZONA_GP_FN_DRC2_DECAY 0x24 +#define ARIZONA_GP_FN_DRC2_NOISE 0x25 +#define ARIZONA_GP_FN_DRC2_QUICK_RELEASE 0x26 +#define ARIZONA_GP_FN_MIXER_DROPPED_SAMPLE 0x27 +#define ARIZONA_GP_FN_AIF1_CONFIG_ERROR 0x28 +#define ARIZONA_GP_FN_AIF2_CONFIG_ERROR 0x29 +#define ARIZONA_GP_FN_AIF3_CONFIG_ERROR 0x2A +#define ARIZONA_GP_FN_SPK_TEMP_SHUTDOWN 0x2B +#define ARIZONA_GP_FN_SPK_TEMP_WARNING 0x2C +#define ARIZONA_GP_FN_UNDERCLOCKED 0x2D +#define ARIZONA_GP_FN_OVERCLOCKED 0x2E +#define ARIZONA_GP_FN_DSP_IRQ1 0x35 +#define ARIZONA_GP_FN_DSP_IRQ2 0x36 +#define ARIZONA_GP_FN_ASYNC_OPCLK 0x3D +#define ARIZONA_GP_FN_BOOT_DONE 0x44 +#define ARIZONA_GP_FN_DSP1_RAM_READY 0x45 +#define ARIZONA_GP_FN_SYSCLK_ENA_STATUS 0x4B +#define ARIZONA_GP_FN_ASYNCCLK_ENA_STATUS 0x4C + +/* GPIO Configuration Bits */ +#define ARIZONA_GPN_DIR 0x8000 +#define ARIZONA_GPN_PU 0x4000 +#define ARIZONA_GPN_PD 0x2000 +#define ARIZONA_GPN_LVL 0x0800 +#define ARIZONA_GPN_POL 0x0400 +#define ARIZONA_GPN_OP_CFG 0x0200 +#define ARIZONA_GPN_DB 0x0100 + +/* Provide some defines for the most common configs */ +#define ARIZONA_GP_DEFAULT 0xffffffff +#define ARIZONA_GP_OUTPUT (ARIZONA_GP_FN_GPIO) +#define ARIZONA_GP_INPUT (ARIZONA_GP_FN_GPIO | \ + ARIZONA_GPN_DIR) + +#define ARIZONA_32KZ_MCLK1 1 +#define ARIZONA_32KZ_MCLK2 2 +#define ARIZONA_32KZ_NONE 3 + +#define ARIZONA_DMIC_MICVDD 0 +#define ARIZONA_DMIC_MICBIAS1 1 +#define ARIZONA_DMIC_MICBIAS2 2 +#define ARIZONA_DMIC_MICBIAS3 3 + +#define ARIZONA_INMODE_DIFF 0 +#define ARIZONA_INMODE_SE 1 +#define ARIZONA_INMODE_DMIC 2 + +#define ARIZONA_MICD_TIME_CONTINUOUS 0 +#define ARIZONA_MICD_TIME_250US 1 +#define ARIZONA_MICD_TIME_500US 2 +#define ARIZONA_MICD_TIME_1MS 3 +#define ARIZONA_MICD_TIME_2MS 4 +#define ARIZONA_MICD_TIME_4MS 5 +#define ARIZONA_MICD_TIME_8MS 6 +#define ARIZONA_MICD_TIME_16MS 7 +#define ARIZONA_MICD_TIME_32MS 8 +#define ARIZONA_MICD_TIME_64MS 9 +#define ARIZONA_MICD_TIME_128MS 10 +#define ARIZONA_MICD_TIME_256MS 11 +#define ARIZONA_MICD_TIME_512MS 12 + +#define ARIZONA_ACCDET_MODE_MIC 0 +#define ARIZONA_ACCDET_MODE_HPL 1 +#define ARIZONA_ACCDET_MODE_HPR 2 +#define ARIZONA_ACCDET_MODE_HPM 4 +#define ARIZONA_ACCDET_MODE_ADC 7 + +#define ARIZONA_GPSW_OPEN 0 +#define ARIZONA_GPSW_CLOSED 1 +#define ARIZONA_GPSW_CLAMP_ENABLED 2 +#define ARIZONA_GPSW_CLAMP_DISABLED 3 + +#endif diff --git a/include/dt-bindings/mfd/as3722.h b/include/dt-bindings/mfd/as3722.h new file mode 100644 index 0000000..9ef0cba --- /dev/null +++ b/include/dt-bindings/mfd/as3722.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides macros for ams AS3722 device bindings. + * + * Copyright (c) 2013, NVIDIA Corporation. + * + * Author: Laxman Dewangan + * + */ + +#ifndef __DT_BINDINGS_AS3722_H__ +#define __DT_BINDINGS_AS3722_H__ + +/* External control pins */ +#define AS3722_EXT_CONTROL_PIN_ENABLE1 1 +#define AS3722_EXT_CONTROL_PIN_ENABLE2 2 +#define AS3722_EXT_CONTROL_PIN_ENABLE3 3 + +/* Interrupt numbers for AS3722 */ +#define AS3722_IRQ_LID 0 +#define AS3722_IRQ_ACOK 1 +#define AS3722_IRQ_ENABLE1 2 +#define AS3722_IRQ_OCCUR_ALARM_SD0 3 +#define AS3722_IRQ_ONKEY_LONG_PRESS 4 +#define AS3722_IRQ_ONKEY 5 +#define AS3722_IRQ_OVTMP 6 +#define AS3722_IRQ_LOWBAT 7 +#define AS3722_IRQ_SD0_LV 8 +#define AS3722_IRQ_SD1_LV 9 +#define AS3722_IRQ_SD2_LV 10 +#define AS3722_IRQ_PWM1_OV_PROT 11 +#define AS3722_IRQ_PWM2_OV_PROT 12 +#define AS3722_IRQ_ENABLE2 13 +#define AS3722_IRQ_SD6_LV 14 +#define AS3722_IRQ_RTC_REP 15 +#define AS3722_IRQ_RTC_ALARM 16 +#define AS3722_IRQ_GPIO1 17 +#define AS3722_IRQ_GPIO2 18 +#define AS3722_IRQ_GPIO3 19 +#define AS3722_IRQ_GPIO4 20 +#define AS3722_IRQ_GPIO5 21 +#define AS3722_IRQ_WATCHDOG 22 +#define AS3722_IRQ_ENABLE3 23 +#define AS3722_IRQ_TEMP_SD0_SHUTDOWN 24 +#define AS3722_IRQ_TEMP_SD1_SHUTDOWN 25 +#define AS3722_IRQ_TEMP_SD2_SHUTDOWN 26 +#define AS3722_IRQ_TEMP_SD0_ALARM 27 +#define AS3722_IRQ_TEMP_SD1_ALARM 28 +#define AS3722_IRQ_TEMP_SD6_ALARM 29 +#define AS3722_IRQ_OCCUR_ALARM_SD6 30 +#define AS3722_IRQ_ADC 31 + +#endif /* __DT_BINDINGS_AS3722_H__ */ diff --git a/include/dt-bindings/mfd/at91-usart.h b/include/dt-bindings/mfd/at91-usart.h new file mode 100644 index 0000000..2de5bc3 --- /dev/null +++ b/include/dt-bindings/mfd/at91-usart.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides macros for AT91 USART DT bindings. + * + * Copyright (C) 2018 Microchip Technology + * + * Author: Radu Pirea + * + */ + +#ifndef __DT_BINDINGS_AT91_USART_H__ +#define __DT_BINDINGS_AT91_USART_H__ + +#define AT91_USART_MODE_SERIAL 0 +#define AT91_USART_MODE_SPI 1 + +#endif /* __DT_BINDINGS_AT91_USART_H__ */ diff --git a/include/dt-bindings/mfd/atmel-flexcom.h b/include/dt-bindings/mfd/atmel-flexcom.h new file mode 100644 index 0000000..4e2fc32 --- /dev/null +++ b/include/dt-bindings/mfd/atmel-flexcom.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This header provides macros for Atmel Flexcom DT bindings. + * + * Copyright (C) 2015 Cyrille Pitchen + */ + +#ifndef __DT_BINDINGS_ATMEL_FLEXCOM_H__ +#define __DT_BINDINGS_ATMEL_FLEXCOM_H__ + +#define ATMEL_FLEXCOM_MODE_USART 1 +#define ATMEL_FLEXCOM_MODE_SPI 2 +#define ATMEL_FLEXCOM_MODE_TWI 3 + +#endif /* __DT_BINDINGS_ATMEL_FLEXCOM_H__ */ diff --git a/include/dt-bindings/mfd/dbx500-prcmu.h b/include/dt-bindings/mfd/dbx500-prcmu.h new file mode 100644 index 0000000..0404bcc --- /dev/null +++ b/include/dt-bindings/mfd/dbx500-prcmu.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the PRCMU bindings. + * + */ + +#ifndef _DT_BINDINGS_MFD_PRCMU_H +#define _DT_BINDINGS_MFD_PRCMU_H + +/* + * Clock identifiers. + */ +#define ARMCLK 0 +#define PRCMU_ACLK 1 +#define PRCMU_SVAMMCSPCLK 2 +#define PRCMU_SDMMCHCLK 2 /* DBx540 only. */ +#define PRCMU_SIACLK 3 +#define PRCMU_SIAMMDSPCLK 3 /* DBx540 only. */ +#define PRCMU_SGACLK 4 +#define PRCMU_UARTCLK 5 +#define PRCMU_MSP02CLK 6 +#define PRCMU_MSP1CLK 7 +#define PRCMU_I2CCLK 8 +#define PRCMU_SDMMCCLK 9 +#define PRCMU_SLIMCLK 10 +#define PRCMU_CAMCLK 10 /* DBx540 only. */ +#define PRCMU_PER1CLK 11 +#define PRCMU_PER2CLK 12 +#define PRCMU_PER3CLK 13 +#define PRCMU_PER5CLK 14 +#define PRCMU_PER6CLK 15 +#define PRCMU_PER7CLK 16 +#define PRCMU_LCDCLK 17 +#define PRCMU_BMLCLK 18 +#define PRCMU_HSITXCLK 19 +#define PRCMU_HSIRXCLK 20 +#define PRCMU_HDMICLK 21 +#define PRCMU_APEATCLK 22 +#define PRCMU_APETRACECLK 23 +#define PRCMU_MCDECLK 24 +#define PRCMU_IPI2CCLK 25 +#define PRCMU_DSIALTCLK 26 +#define PRCMU_DMACLK 27 +#define PRCMU_B2R2CLK 28 +#define PRCMU_TVCLK 29 +#define SPARE_UNIPROCLK 30 +#define PRCMU_SSPCLK 31 +#define PRCMU_RNGCLK 32 +#define PRCMU_UICCCLK 33 +#define PRCMU_G1CLK 34 /* DBx540 only. */ +#define PRCMU_HVACLK 35 /* DBx540 only. */ +#define PRCMU_SPARE1CLK 36 +#define PRCMU_SPARE2CLK 37 + +#define PRCMU_NUM_REG_CLOCKS 38 + +#define PRCMU_RTCCLK PRCMU_NUM_REG_CLOCKS +#define PRCMU_SYSCLK 39 +#define PRCMU_CDCLK 40 +#define PRCMU_TIMCLK 41 +#define PRCMU_PLLSOC0 42 +#define PRCMU_PLLSOC1 43 +#define PRCMU_ARMSS 44 +#define PRCMU_PLLDDR 45 + +/* DSI Clocks */ +#define PRCMU_PLLDSI 46 +#define PRCMU_DSI0CLK 47 +#define PRCMU_DSI1CLK 48 +#define PRCMU_DSI0ESCCLK 49 +#define PRCMU_DSI1ESCCLK 50 +#define PRCMU_DSI2ESCCLK 51 + +/* LCD DSI PLL - Ux540 only */ +#define PRCMU_PLLDSI_LCD 52 +#define PRCMU_DSI0CLK_LCD 53 +#define PRCMU_DSI1CLK_LCD 54 +#define PRCMU_DSI0ESCCLK_LCD 55 +#define PRCMU_DSI1ESCCLK_LCD 56 +#define PRCMU_DSI2ESCCLK_LCD 57 + +#define PRCMU_NUM_CLKS 58 + +#endif diff --git a/include/dt-bindings/mfd/max77620.h b/include/dt-bindings/mfd/max77620.h new file mode 100644 index 0000000..1e19c5f --- /dev/null +++ b/include/dt-bindings/mfd/max77620.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides macros for MAXIM MAX77620 device bindings. + * + * Copyright (c) 2016, NVIDIA Corporation. + * Author: Laxman Dewangan + */ + +#ifndef _DT_BINDINGS_MFD_MAX77620_H +#define _DT_BINDINGS_MFD_MAX77620_H + +/* MAX77620 interrupts */ +#define MAX77620_IRQ_TOP_GLBL 0 /* Low-Battery */ +#define MAX77620_IRQ_TOP_SD 1 /* SD power fail */ +#define MAX77620_IRQ_TOP_LDO 2 /* LDO power fail */ +#define MAX77620_IRQ_TOP_GPIO 3 /* GPIO internal int to MAX77620 */ +#define MAX77620_IRQ_TOP_RTC 4 /* RTC */ +#define MAX77620_IRQ_TOP_32K 5 /* 32kHz oscillator */ +#define MAX77620_IRQ_TOP_ONOFF 6 /* ON/OFF oscillator */ +#define MAX77620_IRQ_LBT_MBATLOW 7 /* Thermal alarm status, > 120C */ +#define MAX77620_IRQ_LBT_TJALRM1 8 /* Thermal alarm status, > 120C */ +#define MAX77620_IRQ_LBT_TJALRM2 9 /* Thermal alarm status, > 140C */ + +/* FPS event source */ +#define MAX77620_FPS_EVENT_SRC_EN0 0 +#define MAX77620_FPS_EVENT_SRC_EN1 1 +#define MAX77620_FPS_EVENT_SRC_SW 2 + +/* Device state when FPS event LOW */ +#define MAX77620_FPS_INACTIVE_STATE_SLEEP 0 +#define MAX77620_FPS_INACTIVE_STATE_LOW_POWER 1 + +/* FPS source */ +#define MAX77620_FPS_SRC_0 0 +#define MAX77620_FPS_SRC_1 1 +#define MAX77620_FPS_SRC_2 2 +#define MAX77620_FPS_SRC_NONE 3 +#define MAX77620_FPS_SRC_DEF 4 + +#endif diff --git a/include/dt-bindings/mfd/palmas.h b/include/dt-bindings/mfd/palmas.h new file mode 100644 index 0000000..c4f1d57 --- /dev/null +++ b/include/dt-bindings/mfd/palmas.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides macros for Palmas device bindings. + * + * Copyright (c) 2013, NVIDIA Corporation. + * + * Author: Laxman Dewangan + * + */ + +#ifndef __DT_BINDINGS_PALMAS_H +#define __DT_BINDINGS_PALMAS_H + +/* External control pins */ +#define PALMAS_EXT_CONTROL_PIN_ENABLE1 1 +#define PALMAS_EXT_CONTROL_PIN_ENABLE2 2 +#define PALMAS_EXT_CONTROL_PIN_NSLEEP 3 + +#endif /* __DT_BINDINGS_PALMAS_H */ diff --git a/include/dt-bindings/mfd/qcom-rpm.h b/include/dt-bindings/mfd/qcom-rpm.h new file mode 100644 index 0000000..c9204c4 --- /dev/null +++ b/include/dt-bindings/mfd/qcom-rpm.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the Qualcomm RPM bindings. + */ + +#ifndef _DT_BINDINGS_MFD_QCOM_RPM_H +#define _DT_BINDINGS_MFD_QCOM_RPM_H + +/* + * Constants use to identify individual resources in the RPM. + */ +#define QCOM_RPM_APPS_FABRIC_ARB 1 +#define QCOM_RPM_APPS_FABRIC_CLK 2 +#define QCOM_RPM_APPS_FABRIC_HALT 3 +#define QCOM_RPM_APPS_FABRIC_IOCTL 4 +#define QCOM_RPM_APPS_FABRIC_MODE 5 +#define QCOM_RPM_APPS_L2_CACHE_CTL 6 +#define QCOM_RPM_CFPB_CLK 7 +#define QCOM_RPM_CXO_BUFFERS 8 +#define QCOM_RPM_CXO_CLK 9 +#define QCOM_RPM_DAYTONA_FABRIC_CLK 10 +#define QCOM_RPM_DDR_DMM 11 +#define QCOM_RPM_EBI1_CLK 12 +#define QCOM_RPM_HDMI_SWITCH 13 +#define QCOM_RPM_MMFPB_CLK 14 +#define QCOM_RPM_MM_FABRIC_ARB 15 +#define QCOM_RPM_MM_FABRIC_CLK 16 +#define QCOM_RPM_MM_FABRIC_HALT 17 +#define QCOM_RPM_MM_FABRIC_IOCTL 18 +#define QCOM_RPM_MM_FABRIC_MODE 19 +#define QCOM_RPM_PLL_4 20 +#define QCOM_RPM_PM8058_LDO0 21 +#define QCOM_RPM_PM8058_LDO1 22 +#define QCOM_RPM_PM8058_LDO2 23 +#define QCOM_RPM_PM8058_LDO3 24 +#define QCOM_RPM_PM8058_LDO4 25 +#define QCOM_RPM_PM8058_LDO5 26 +#define QCOM_RPM_PM8058_LDO6 27 +#define QCOM_RPM_PM8058_LDO7 28 +#define QCOM_RPM_PM8058_LDO8 29 +#define QCOM_RPM_PM8058_LDO9 30 +#define QCOM_RPM_PM8058_LDO10 31 +#define QCOM_RPM_PM8058_LDO11 32 +#define QCOM_RPM_PM8058_LDO12 33 +#define QCOM_RPM_PM8058_LDO13 34 +#define QCOM_RPM_PM8058_LDO14 35 +#define QCOM_RPM_PM8058_LDO15 36 +#define QCOM_RPM_PM8058_LDO16 37 +#define QCOM_RPM_PM8058_LDO17 38 +#define QCOM_RPM_PM8058_LDO18 39 +#define QCOM_RPM_PM8058_LDO19 40 +#define QCOM_RPM_PM8058_LDO20 41 +#define QCOM_RPM_PM8058_LDO21 42 +#define QCOM_RPM_PM8058_LDO22 43 +#define QCOM_RPM_PM8058_LDO23 44 +#define QCOM_RPM_PM8058_LDO24 45 +#define QCOM_RPM_PM8058_LDO25 46 +#define QCOM_RPM_PM8058_LVS0 47 +#define QCOM_RPM_PM8058_LVS1 48 +#define QCOM_RPM_PM8058_NCP 49 +#define QCOM_RPM_PM8058_SMPS0 50 +#define QCOM_RPM_PM8058_SMPS1 51 +#define QCOM_RPM_PM8058_SMPS2 52 +#define QCOM_RPM_PM8058_SMPS3 53 +#define QCOM_RPM_PM8058_SMPS4 54 +#define QCOM_RPM_PM8821_LDO1 55 +#define QCOM_RPM_PM8821_SMPS1 56 +#define QCOM_RPM_PM8821_SMPS2 57 +#define QCOM_RPM_PM8901_LDO0 58 +#define QCOM_RPM_PM8901_LDO1 59 +#define QCOM_RPM_PM8901_LDO2 60 +#define QCOM_RPM_PM8901_LDO3 61 +#define QCOM_RPM_PM8901_LDO4 62 +#define QCOM_RPM_PM8901_LDO5 63 +#define QCOM_RPM_PM8901_LDO6 64 +#define QCOM_RPM_PM8901_LVS0 65 +#define QCOM_RPM_PM8901_LVS1 66 +#define QCOM_RPM_PM8901_LVS2 67 +#define QCOM_RPM_PM8901_LVS3 68 +#define QCOM_RPM_PM8901_MVS 69 +#define QCOM_RPM_PM8901_SMPS0 70 +#define QCOM_RPM_PM8901_SMPS1 71 +#define QCOM_RPM_PM8901_SMPS2 72 +#define QCOM_RPM_PM8901_SMPS3 73 +#define QCOM_RPM_PM8901_SMPS4 74 +#define QCOM_RPM_PM8921_CLK1 75 +#define QCOM_RPM_PM8921_CLK2 76 +#define QCOM_RPM_PM8921_LDO1 77 +#define QCOM_RPM_PM8921_LDO2 78 +#define QCOM_RPM_PM8921_LDO3 79 +#define QCOM_RPM_PM8921_LDO4 80 +#define QCOM_RPM_PM8921_LDO5 81 +#define QCOM_RPM_PM8921_LDO6 82 +#define QCOM_RPM_PM8921_LDO7 83 +#define QCOM_RPM_PM8921_LDO8 84 +#define QCOM_RPM_PM8921_LDO9 85 +#define QCOM_RPM_PM8921_LDO10 86 +#define QCOM_RPM_PM8921_LDO11 87 +#define QCOM_RPM_PM8921_LDO12 88 +#define QCOM_RPM_PM8921_LDO13 89 +#define QCOM_RPM_PM8921_LDO14 90 +#define QCOM_RPM_PM8921_LDO15 91 +#define QCOM_RPM_PM8921_LDO16 92 +#define QCOM_RPM_PM8921_LDO17 93 +#define QCOM_RPM_PM8921_LDO18 94 +#define QCOM_RPM_PM8921_LDO19 95 +#define QCOM_RPM_PM8921_LDO20 96 +#define QCOM_RPM_PM8921_LDO21 97 +#define QCOM_RPM_PM8921_LDO22 98 +#define QCOM_RPM_PM8921_LDO23 99 +#define QCOM_RPM_PM8921_LDO24 100 +#define QCOM_RPM_PM8921_LDO25 101 +#define QCOM_RPM_PM8921_LDO26 102 +#define QCOM_RPM_PM8921_LDO27 103 +#define QCOM_RPM_PM8921_LDO28 104 +#define QCOM_RPM_PM8921_LDO29 105 +#define QCOM_RPM_PM8921_LVS1 106 +#define QCOM_RPM_PM8921_LVS2 107 +#define QCOM_RPM_PM8921_LVS3 108 +#define QCOM_RPM_PM8921_LVS4 109 +#define QCOM_RPM_PM8921_LVS5 110 +#define QCOM_RPM_PM8921_LVS6 111 +#define QCOM_RPM_PM8921_LVS7 112 +#define QCOM_RPM_PM8921_MVS 113 +#define QCOM_RPM_PM8921_NCP 114 +#define QCOM_RPM_PM8921_SMPS1 115 +#define QCOM_RPM_PM8921_SMPS2 116 +#define QCOM_RPM_PM8921_SMPS3 117 +#define QCOM_RPM_PM8921_SMPS4 118 +#define QCOM_RPM_PM8921_SMPS5 119 +#define QCOM_RPM_PM8921_SMPS6 120 +#define QCOM_RPM_PM8921_SMPS7 121 +#define QCOM_RPM_PM8921_SMPS8 122 +#define QCOM_RPM_PXO_CLK 123 +#define QCOM_RPM_QDSS_CLK 124 +#define QCOM_RPM_SFPB_CLK 125 +#define QCOM_RPM_SMI_CLK 126 +#define QCOM_RPM_SYS_FABRIC_ARB 127 +#define QCOM_RPM_SYS_FABRIC_CLK 128 +#define QCOM_RPM_SYS_FABRIC_HALT 129 +#define QCOM_RPM_SYS_FABRIC_IOCTL 130 +#define QCOM_RPM_SYS_FABRIC_MODE 131 +#define QCOM_RPM_USB_OTG_SWITCH 132 +#define QCOM_RPM_VDDMIN_GPIO 133 +#define QCOM_RPM_NSS_FABRIC_0_CLK 134 +#define QCOM_RPM_NSS_FABRIC_1_CLK 135 +#define QCOM_RPM_SMB208_S1a 136 +#define QCOM_RPM_SMB208_S1b 137 +#define QCOM_RPM_SMB208_S2a 138 +#define QCOM_RPM_SMB208_S2b 139 +#define QCOM_RPM_PM8018_SMPS1 140 +#define QCOM_RPM_PM8018_SMPS2 141 +#define QCOM_RPM_PM8018_SMPS3 142 +#define QCOM_RPM_PM8018_SMPS4 143 +#define QCOM_RPM_PM8018_SMPS5 144 +#define QCOM_RPM_PM8018_LDO1 145 +#define QCOM_RPM_PM8018_LDO2 146 +#define QCOM_RPM_PM8018_LDO3 147 +#define QCOM_RPM_PM8018_LDO4 148 +#define QCOM_RPM_PM8018_LDO5 149 +#define QCOM_RPM_PM8018_LDO6 150 +#define QCOM_RPM_PM8018_LDO7 151 +#define QCOM_RPM_PM8018_LDO8 152 +#define QCOM_RPM_PM8018_LDO9 153 +#define QCOM_RPM_PM8018_LDO10 154 +#define QCOM_RPM_PM8018_LDO11 155 +#define QCOM_RPM_PM8018_LDO12 156 +#define QCOM_RPM_PM8018_LDO13 157 +#define QCOM_RPM_PM8018_LDO14 158 +#define QCOM_RPM_PM8018_LVS1 159 +#define QCOM_RPM_PM8018_NCP 160 +#define QCOM_RPM_VOLTAGE_CORNER 161 + +/* + * Constants used to select force mode for regulators. + */ +#define QCOM_RPM_FORCE_MODE_NONE 0 +#define QCOM_RPM_FORCE_MODE_LPM 1 +#define QCOM_RPM_FORCE_MODE_HPM 2 +#define QCOM_RPM_FORCE_MODE_AUTO 3 +#define QCOM_RPM_FORCE_MODE_BYPASS 4 + +#endif diff --git a/include/dt-bindings/mfd/st,stpmic1.h b/include/dt-bindings/mfd/st,stpmic1.h new file mode 100644 index 0000000..321cd08 --- /dev/null +++ b/include/dt-bindings/mfd/st,stpmic1.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Author: Philippe Peurichard , + * Pascal Paillet for STMicroelectronics. + */ + +#ifndef __DT_BINDINGS_STPMIC1_H__ +#define __DT_BINDINGS_STPMIC1_H__ + +/* IRQ definitions */ +#define IT_PONKEY_F 0 +#define IT_PONKEY_R 1 +#define IT_WAKEUP_F 2 +#define IT_WAKEUP_R 3 +#define IT_VBUS_OTG_F 4 +#define IT_VBUS_OTG_R 5 +#define IT_SWOUT_F 6 +#define IT_SWOUT_R 7 + +#define IT_CURLIM_BUCK1 8 +#define IT_CURLIM_BUCK2 9 +#define IT_CURLIM_BUCK3 10 +#define IT_CURLIM_BUCK4 11 +#define IT_OCP_OTG 12 +#define IT_OCP_SWOUT 13 +#define IT_OCP_BOOST 14 +#define IT_OVP_BOOST 15 + +#define IT_CURLIM_LDO1 16 +#define IT_CURLIM_LDO2 17 +#define IT_CURLIM_LDO3 18 +#define IT_CURLIM_LDO4 19 +#define IT_CURLIM_LDO5 20 +#define IT_CURLIM_LDO6 21 +#define IT_SHORT_SWOTG 22 +#define IT_SHORT_SWOUT 23 + +#define IT_TWARN_F 24 +#define IT_TWARN_R 25 +#define IT_VINLOW_F 26 +#define IT_VINLOW_R 27 +#define IT_SWIN_F 30 +#define IT_SWIN_R 31 + +/* BUCK MODES definitions */ +#define STPMIC1_BUCK_MODE_NORMAL 0 +#define STPMIC1_BUCK_MODE_LP 2 + +#endif /* __DT_BINDINGS_STPMIC1_H__ */ diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h new file mode 100644 index 0000000..88a7f56 --- /dev/null +++ b/include/dt-bindings/mfd/st-lpc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides shared DT/Driver defines for ST's LPC device + * + * Copyright (C) 2014 STMicroelectronics -- All Rights Reserved + * + * Author: Lee Jones for STMicroelectronics + */ + +#ifndef __DT_BINDINGS_ST_LPC_H__ +#define __DT_BINDINGS_ST_LPC_H__ + +#define ST_LPC_MODE_RTC 0 +#define ST_LPC_MODE_WDT 1 +#define ST_LPC_MODE_CLKSRC 2 + +#endif /* __DT_BINDINGS_ST_LPC_H__ */ diff --git a/include/dt-bindings/mfd/stm32f4-rcc.h b/include/dt-bindings/mfd/stm32f4-rcc.h new file mode 100644 index 0000000..309e8c7 --- /dev/null +++ b/include/dt-bindings/mfd/stm32f4-rcc.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the STM32F4 RCC IP + */ + +#ifndef _DT_BINDINGS_MFD_STM32F4_RCC_H +#define _DT_BINDINGS_MFD_STM32F4_RCC_H + +/* AHB1 */ +#define STM32F4_RCC_AHB1_GPIOA 0 +#define STM32F4_RCC_AHB1_GPIOB 1 +#define STM32F4_RCC_AHB1_GPIOC 2 +#define STM32F4_RCC_AHB1_GPIOD 3 +#define STM32F4_RCC_AHB1_GPIOE 4 +#define STM32F4_RCC_AHB1_GPIOF 5 +#define STM32F4_RCC_AHB1_GPIOG 6 +#define STM32F4_RCC_AHB1_GPIOH 7 +#define STM32F4_RCC_AHB1_GPIOI 8 +#define STM32F4_RCC_AHB1_GPIOJ 9 +#define STM32F4_RCC_AHB1_GPIOK 10 +#define STM32F4_RCC_AHB1_CRC 12 +#define STM32F4_RCC_AHB1_BKPSRAM 18 +#define STM32F4_RCC_AHB1_CCMDATARAM 20 +#define STM32F4_RCC_AHB1_DMA1 21 +#define STM32F4_RCC_AHB1_DMA2 22 +#define STM32F4_RCC_AHB1_DMA2D 23 +#define STM32F4_RCC_AHB1_ETHMAC 25 +#define STM32F4_RCC_AHB1_ETHMACTX 26 +#define STM32F4_RCC_AHB1_ETHMACRX 27 +#define STM32F4_RCC_AHB1_ETHMACPTP 28 +#define STM32F4_RCC_AHB1_OTGHS 29 +#define STM32F4_RCC_AHB1_OTGHSULPI 30 + +#define STM32F4_AHB1_RESET(bit) (STM32F4_RCC_AHB1_##bit + (0x10 * 8)) +#define STM32F4_AHB1_CLOCK(bit) (STM32F4_RCC_AHB1_##bit) + + +/* AHB2 */ +#define STM32F4_RCC_AHB2_DCMI 0 +#define STM32F4_RCC_AHB2_CRYP 4 +#define STM32F4_RCC_AHB2_HASH 5 +#define STM32F4_RCC_AHB2_RNG 6 +#define STM32F4_RCC_AHB2_OTGFS 7 + +#define STM32F4_AHB2_RESET(bit) (STM32F4_RCC_AHB2_##bit + (0x14 * 8)) +#define STM32F4_AHB2_CLOCK(bit) (STM32F4_RCC_AHB2_##bit + 0x20) + +/* AHB3 */ +#define STM32F4_RCC_AHB3_FMC 0 +#define STM32F4_RCC_AHB3_QSPI 1 + +#define STM32F4_AHB3_RESET(bit) (STM32F4_RCC_AHB3_##bit + (0x18 * 8)) +#define STM32F4_AHB3_CLOCK(bit) (STM32F4_RCC_AHB3_##bit + 0x40) + +/* APB1 */ +#define STM32F4_RCC_APB1_TIM2 0 +#define STM32F4_RCC_APB1_TIM3 1 +#define STM32F4_RCC_APB1_TIM4 2 +#define STM32F4_RCC_APB1_TIM5 3 +#define STM32F4_RCC_APB1_TIM6 4 +#define STM32F4_RCC_APB1_TIM7 5 +#define STM32F4_RCC_APB1_TIM12 6 +#define STM32F4_RCC_APB1_TIM13 7 +#define STM32F4_RCC_APB1_TIM14 8 +#define STM32F4_RCC_APB1_WWDG 11 +#define STM32F4_RCC_APB1_SPI2 14 +#define STM32F4_RCC_APB1_SPI3 15 +#define STM32F4_RCC_APB1_UART2 17 +#define STM32F4_RCC_APB1_UART3 18 +#define STM32F4_RCC_APB1_UART4 19 +#define STM32F4_RCC_APB1_UART5 20 +#define STM32F4_RCC_APB1_I2C1 21 +#define STM32F4_RCC_APB1_I2C2 22 +#define STM32F4_RCC_APB1_I2C3 23 +#define STM32F4_RCC_APB1_CAN1 25 +#define STM32F4_RCC_APB1_CAN2 26 +#define STM32F4_RCC_APB1_PWR 28 +#define STM32F4_RCC_APB1_DAC 29 +#define STM32F4_RCC_APB1_UART7 30 +#define STM32F4_RCC_APB1_UART8 31 + +#define STM32F4_APB1_RESET(bit) (STM32F4_RCC_APB1_##bit + (0x20 * 8)) +#define STM32F4_APB1_CLOCK(bit) (STM32F4_RCC_APB1_##bit + 0x80) + +/* APB2 */ +#define STM32F4_RCC_APB2_TIM1 0 +#define STM32F4_RCC_APB2_TIM8 1 +#define STM32F4_RCC_APB2_USART1 4 +#define STM32F4_RCC_APB2_USART6 5 +#define STM32F4_RCC_APB2_ADC1 8 +#define STM32F4_RCC_APB2_ADC2 9 +#define STM32F4_RCC_APB2_ADC3 10 +#define STM32F4_RCC_APB2_SDIO 11 +#define STM32F4_RCC_APB2_SPI1 12 +#define STM32F4_RCC_APB2_SPI4 13 +#define STM32F4_RCC_APB2_SYSCFG 14 +#define STM32F4_RCC_APB2_TIM9 16 +#define STM32F4_RCC_APB2_TIM10 17 +#define STM32F4_RCC_APB2_TIM11 18 +#define STM32F4_RCC_APB2_SPI5 20 +#define STM32F4_RCC_APB2_SPI6 21 +#define STM32F4_RCC_APB2_SAI1 22 +#define STM32F4_RCC_APB2_LTDC 26 +#define STM32F4_RCC_APB2_DSI 27 + +#define STM32F4_APB2_RESET(bit) (STM32F4_RCC_APB2_##bit + (0x24 * 8)) +#define STM32F4_APB2_CLOCK(bit) (STM32F4_RCC_APB2_##bit + 0xA0) + +#endif /* _DT_BINDINGS_MFD_STM32F4_RCC_H */ diff --git a/include/dt-bindings/mfd/stm32f7-rcc.h b/include/dt-bindings/mfd/stm32f7-rcc.h new file mode 100644 index 0000000..a90f361 --- /dev/null +++ b/include/dt-bindings/mfd/stm32f7-rcc.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the STM32F7 RCC IP + */ + +#ifndef _DT_BINDINGS_MFD_STM32F7_RCC_H +#define _DT_BINDINGS_MFD_STM32F7_RCC_H + +/* AHB1 */ +#define STM32F7_RCC_AHB1_GPIOA 0 +#define STM32F7_RCC_AHB1_GPIOB 1 +#define STM32F7_RCC_AHB1_GPIOC 2 +#define STM32F7_RCC_AHB1_GPIOD 3 +#define STM32F7_RCC_AHB1_GPIOE 4 +#define STM32F7_RCC_AHB1_GPIOF 5 +#define STM32F7_RCC_AHB1_GPIOG 6 +#define STM32F7_RCC_AHB1_GPIOH 7 +#define STM32F7_RCC_AHB1_GPIOI 8 +#define STM32F7_RCC_AHB1_GPIOJ 9 +#define STM32F7_RCC_AHB1_GPIOK 10 +#define STM32F7_RCC_AHB1_CRC 12 +#define STM32F7_RCC_AHB1_BKPSRAM 18 +#define STM32F7_RCC_AHB1_DTCMRAM 20 +#define STM32F7_RCC_AHB1_DMA1 21 +#define STM32F7_RCC_AHB1_DMA2 22 +#define STM32F7_RCC_AHB1_DMA2D 23 +#define STM32F7_RCC_AHB1_ETHMAC 25 +#define STM32F7_RCC_AHB1_ETHMACTX 26 +#define STM32F7_RCC_AHB1_ETHMACRX 27 +#define STM32FF_RCC_AHB1_ETHMACPTP 28 +#define STM32F7_RCC_AHB1_OTGHS 29 +#define STM32F7_RCC_AHB1_OTGHSULPI 30 + +#define STM32F7_AHB1_RESET(bit) (STM32F7_RCC_AHB1_##bit + (0x10 * 8)) +#define STM32F7_AHB1_CLOCK(bit) (STM32F7_RCC_AHB1_##bit) + + +/* AHB2 */ +#define STM32F7_RCC_AHB2_DCMI 0 +#define STM32F7_RCC_AHB2_CRYP 4 +#define STM32F7_RCC_AHB2_HASH 5 +#define STM32F7_RCC_AHB2_RNG 6 +#define STM32F7_RCC_AHB2_OTGFS 7 + +#define STM32F7_AHB2_RESET(bit) (STM32F7_RCC_AHB2_##bit + (0x14 * 8)) +#define STM32F7_AHB2_CLOCK(bit) (STM32F7_RCC_AHB2_##bit + 0x20) + +/* AHB3 */ +#define STM32F7_RCC_AHB3_FMC 0 +#define STM32F7_RCC_AHB3_QSPI 1 + +#define STM32F7_AHB3_RESET(bit) (STM32F7_RCC_AHB3_##bit + (0x18 * 8)) +#define STM32F7_AHB3_CLOCK(bit) (STM32F7_RCC_AHB3_##bit + 0x40) + +/* APB1 */ +#define STM32F7_RCC_APB1_TIM2 0 +#define STM32F7_RCC_APB1_TIM3 1 +#define STM32F7_RCC_APB1_TIM4 2 +#define STM32F7_RCC_APB1_TIM5 3 +#define STM32F7_RCC_APB1_TIM6 4 +#define STM32F7_RCC_APB1_TIM7 5 +#define STM32F7_RCC_APB1_TIM12 6 +#define STM32F7_RCC_APB1_TIM13 7 +#define STM32F7_RCC_APB1_TIM14 8 +#define STM32F7_RCC_APB1_LPTIM1 9 +#define STM32F7_RCC_APB1_WWDG 11 +#define STM32F7_RCC_APB1_SPI2 14 +#define STM32F7_RCC_APB1_SPI3 15 +#define STM32F7_RCC_APB1_SPDIFRX 16 +#define STM32F7_RCC_APB1_UART2 17 +#define STM32F7_RCC_APB1_UART3 18 +#define STM32F7_RCC_APB1_UART4 19 +#define STM32F7_RCC_APB1_UART5 20 +#define STM32F7_RCC_APB1_I2C1 21 +#define STM32F7_RCC_APB1_I2C2 22 +#define STM32F7_RCC_APB1_I2C3 23 +#define STM32F7_RCC_APB1_I2C4 24 +#define STM32F7_RCC_APB1_CAN1 25 +#define STM32F7_RCC_APB1_CAN2 26 +#define STM32F7_RCC_APB1_CEC 27 +#define STM32F7_RCC_APB1_PWR 28 +#define STM32F7_RCC_APB1_DAC 29 +#define STM32F7_RCC_APB1_UART7 30 +#define STM32F7_RCC_APB1_UART8 31 + +#define STM32F7_APB1_RESET(bit) (STM32F7_RCC_APB1_##bit + (0x20 * 8)) +#define STM32F7_APB1_CLOCK(bit) (STM32F7_RCC_APB1_##bit + 0x80) + +/* APB2 */ +#define STM32F7_RCC_APB2_TIM1 0 +#define STM32F7_RCC_APB2_TIM8 1 +#define STM32F7_RCC_APB2_USART1 4 +#define STM32F7_RCC_APB2_USART6 5 +#define STM32F7_RCC_APB2_SDMMC2 7 +#define STM32F7_RCC_APB2_ADC1 8 +#define STM32F7_RCC_APB2_ADC2 9 +#define STM32F7_RCC_APB2_ADC3 10 +#define STM32F7_RCC_APB2_SDMMC1 11 +#define STM32F7_RCC_APB2_SPI1 12 +#define STM32F7_RCC_APB2_SPI4 13 +#define STM32F7_RCC_APB2_SYSCFG 14 +#define STM32F7_RCC_APB2_TIM9 16 +#define STM32F7_RCC_APB2_TIM10 17 +#define STM32F7_RCC_APB2_TIM11 18 +#define STM32F7_RCC_APB2_SPI5 20 +#define STM32F7_RCC_APB2_SPI6 21 +#define STM32F7_RCC_APB2_SAI1 22 +#define STM32F7_RCC_APB2_SAI2 23 +#define STM32F7_RCC_APB2_LTDC 26 + +#define STM32F7_APB2_RESET(bit) (STM32F7_RCC_APB2_##bit + (0x24 * 8)) +#define STM32F7_APB2_CLOCK(bit) (STM32F7_RCC_APB2_##bit + 0xA0) + +#endif /* _DT_BINDINGS_MFD_STM32F7_RCC_H */ diff --git a/include/dt-bindings/mfd/stm32h7-rcc.h b/include/dt-bindings/mfd/stm32h7-rcc.h new file mode 100644 index 0000000..461a8e0 --- /dev/null +++ b/include/dt-bindings/mfd/stm32h7-rcc.h @@ -0,0 +1,136 @@ +/* + * This header provides constants for the STM32H7 RCC IP + */ + +#ifndef _DT_BINDINGS_MFD_STM32H7_RCC_H +#define _DT_BINDINGS_MFD_STM32H7_RCC_H + +/* AHB3 */ +#define STM32H7_RCC_AHB3_MDMA 0 +#define STM32H7_RCC_AHB3_DMA2D 4 +#define STM32H7_RCC_AHB3_JPGDEC 5 +#define STM32H7_RCC_AHB3_FMC 12 +#define STM32H7_RCC_AHB3_QUADSPI 14 +#define STM32H7_RCC_AHB3_SDMMC1 16 +#define STM32H7_RCC_AHB3_CPU 31 + +#define STM32H7_AHB3_RESET(bit) (STM32H7_RCC_AHB3_##bit + (0x7C * 8)) + +/* AHB1 */ +#define STM32H7_RCC_AHB1_DMA1 0 +#define STM32H7_RCC_AHB1_DMA2 1 +#define STM32H7_RCC_AHB1_ADC12 5 +#define STM32H7_RCC_AHB1_ART 14 +#define STM32H7_RCC_AHB1_ETH1MAC 15 +#define STM32H7_RCC_AHB1_USB1OTG 25 +#define STM32H7_RCC_AHB1_USB2OTG 27 + +#define STM32H7_AHB1_RESET(bit) (STM32H7_RCC_AHB1_##bit + (0x80 * 8)) + +/* AHB2 */ +#define STM32H7_RCC_AHB2_CAMITF 0 +#define STM32H7_RCC_AHB2_CRYPT 4 +#define STM32H7_RCC_AHB2_HASH 5 +#define STM32H7_RCC_AHB2_RNG 6 +#define STM32H7_RCC_AHB2_SDMMC2 9 + +#define STM32H7_AHB2_RESET(bit) (STM32H7_RCC_AHB2_##bit + (0x84 * 8)) + +/* AHB4 */ +#define STM32H7_RCC_AHB4_GPIOA 0 +#define STM32H7_RCC_AHB4_GPIOB 1 +#define STM32H7_RCC_AHB4_GPIOC 2 +#define STM32H7_RCC_AHB4_GPIOD 3 +#define STM32H7_RCC_AHB4_GPIOE 4 +#define STM32H7_RCC_AHB4_GPIOF 5 +#define STM32H7_RCC_AHB4_GPIOG 6 +#define STM32H7_RCC_AHB4_GPIOH 7 +#define STM32H7_RCC_AHB4_GPIOI 8 +#define STM32H7_RCC_AHB4_GPIOJ 9 +#define STM32H7_RCC_AHB4_GPIOK 10 +#define STM32H7_RCC_AHB4_CRC 19 +#define STM32H7_RCC_AHB4_BDMA 21 +#define STM32H7_RCC_AHB4_ADC3 24 +#define STM32H7_RCC_AHB4_HSEM 25 + +#define STM32H7_AHB4_RESET(bit) (STM32H7_RCC_AHB4_##bit + (0x88 * 8)) + +/* APB3 */ +#define STM32H7_RCC_APB3_LTDC 3 +#define STM32H7_RCC_APB3_DSI 4 + +#define STM32H7_APB3_RESET(bit) (STM32H7_RCC_APB3_##bit + (0x8C * 8)) + +/* APB1L */ +#define STM32H7_RCC_APB1L_TIM2 0 +#define STM32H7_RCC_APB1L_TIM3 1 +#define STM32H7_RCC_APB1L_TIM4 2 +#define STM32H7_RCC_APB1L_TIM5 3 +#define STM32H7_RCC_APB1L_TIM6 4 +#define STM32H7_RCC_APB1L_TIM7 5 +#define STM32H7_RCC_APB1L_TIM12 6 +#define STM32H7_RCC_APB1L_TIM13 7 +#define STM32H7_RCC_APB1L_TIM14 8 +#define STM32H7_RCC_APB1L_LPTIM1 9 +#define STM32H7_RCC_APB1L_SPI2 14 +#define STM32H7_RCC_APB1L_SPI3 15 +#define STM32H7_RCC_APB1L_SPDIF_RX 16 +#define STM32H7_RCC_APB1L_USART2 17 +#define STM32H7_RCC_APB1L_USART3 18 +#define STM32H7_RCC_APB1L_UART4 19 +#define STM32H7_RCC_APB1L_UART5 20 +#define STM32H7_RCC_APB1L_I2C1 21 +#define STM32H7_RCC_APB1L_I2C2 22 +#define STM32H7_RCC_APB1L_I2C3 23 +#define STM32H7_RCC_APB1L_HDMICEC 27 +#define STM32H7_RCC_APB1L_DAC12 29 +#define STM32H7_RCC_APB1L_USART7 30 +#define STM32H7_RCC_APB1L_USART8 31 + +#define STM32H7_APB1L_RESET(bit) (STM32H7_RCC_APB1L_##bit + (0x90 * 8)) + +/* APB1H */ +#define STM32H7_RCC_APB1H_CRS 1 +#define STM32H7_RCC_APB1H_SWP 2 +#define STM32H7_RCC_APB1H_OPAMP 4 +#define STM32H7_RCC_APB1H_MDIOS 5 +#define STM32H7_RCC_APB1H_FDCAN 8 + +#define STM32H7_APB1H_RESET(bit) (STM32H7_RCC_APB1H_##bit + (0x94 * 8)) + +/* APB2 */ +#define STM32H7_RCC_APB2_TIM1 0 +#define STM32H7_RCC_APB2_TIM8 1 +#define STM32H7_RCC_APB2_USART1 4 +#define STM32H7_RCC_APB2_USART6 5 +#define STM32H7_RCC_APB2_SPI1 12 +#define STM32H7_RCC_APB2_SPI4 13 +#define STM32H7_RCC_APB2_TIM15 16 +#define STM32H7_RCC_APB2_TIM16 17 +#define STM32H7_RCC_APB2_TIM17 18 +#define STM32H7_RCC_APB2_SPI5 20 +#define STM32H7_RCC_APB2_SAI1 22 +#define STM32H7_RCC_APB2_SAI2 23 +#define STM32H7_RCC_APB2_SAI3 24 +#define STM32H7_RCC_APB2_DFSDM1 28 +#define STM32H7_RCC_APB2_HRTIM 29 + +#define STM32H7_APB2_RESET(bit) (STM32H7_RCC_APB2_##bit + (0x98 * 8)) + +/* APB4 */ +#define STM32H7_RCC_APB4_SYSCFG 1 +#define STM32H7_RCC_APB4_LPUART1 3 +#define STM32H7_RCC_APB4_SPI6 5 +#define STM32H7_RCC_APB4_I2C4 7 +#define STM32H7_RCC_APB4_LPTIM2 9 +#define STM32H7_RCC_APB4_LPTIM3 10 +#define STM32H7_RCC_APB4_LPTIM4 11 +#define STM32H7_RCC_APB4_LPTIM5 12 +#define STM32H7_RCC_APB4_COMP12 14 +#define STM32H7_RCC_APB4_VREF 15 +#define STM32H7_RCC_APB4_SAI4 21 +#define STM32H7_RCC_APB4_TMPSENS 26 + +#define STM32H7_APB4_RESET(bit) (STM32H7_RCC_APB4_##bit + (0x9C * 8)) + +#endif /* _DT_BINDINGS_MFD_STM32H7_RCC_H */ diff --git a/include/dt-bindings/mips/lantiq_rcu_gphy.h b/include/dt-bindings/mips/lantiq_rcu_gphy.h new file mode 100644 index 0000000..7756d66 --- /dev/null +++ b/include/dt-bindings/mips/lantiq_rcu_gphy.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * + * Copyright (C) 2016 Martin Blumenstingl + * Copyright (C) 2017 Hauke Mehrtens + */ +#ifndef _DT_BINDINGS_MIPS_LANTIQ_RCU_GPHY_H +#define _DT_BINDINGS_MIPS_LANTIQ_RCU_GPHY_H + +#define GPHY_MODE_GE 1 +#define GPHY_MODE_FE 2 + +#endif /* _DT_BINDINGS_MIPS_LANTIQ_RCU_GPHY_H */ diff --git a/include/dt-bindings/mux/mux.h b/include/dt-bindings/mux/mux.h new file mode 100644 index 0000000..0427192 --- /dev/null +++ b/include/dt-bindings/mux/mux.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for most Multiplexer bindings. + * + * Most Multiplexer bindings specify an idle state. In most cases, the + * the multiplexer can be left as is when idle, and in some cases it can + * disconnect the input/output and leave the multiplexer in a high + * impedance state. + */ + +#ifndef _DT_BINDINGS_MUX_MUX_H +#define _DT_BINDINGS_MUX_MUX_H + +#define MUX_IDLE_AS_IS (-1) +#define MUX_IDLE_DISCONNECT (-2) + +#endif diff --git a/include/dt-bindings/net/microchip-lan78xx.h b/include/dt-bindings/net/microchip-lan78xx.h new file mode 100644 index 0000000..0742ff0 --- /dev/null +++ b/include/dt-bindings/net/microchip-lan78xx.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_MICROCHIP_LAN78XX_H +#define _DT_BINDINGS_MICROCHIP_LAN78XX_H + +/* LED modes for LAN7800/LAN7850 embedded PHY */ + +#define LAN78XX_LINK_ACTIVITY 0 +#define LAN78XX_LINK_1000_ACTIVITY 1 +#define LAN78XX_LINK_100_ACTIVITY 2 +#define LAN78XX_LINK_10_ACTIVITY 3 +#define LAN78XX_LINK_100_1000_ACTIVITY 4 +#define LAN78XX_LINK_10_1000_ACTIVITY 5 +#define LAN78XX_LINK_10_100_ACTIVITY 6 +#define LAN78XX_DUPLEX_COLLISION 8 +#define LAN78XX_COLLISION 9 +#define LAN78XX_ACTIVITY 10 +#define LAN78XX_AUTONEG_FAULT 12 +#define LAN78XX_FORCE_LED_OFF 14 +#define LAN78XX_FORCE_LED_ON 15 + +#endif diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h new file mode 100644 index 0000000..9eb2ec2 --- /dev/null +++ b/include/dt-bindings/net/mscc-phy-vsc8531.h @@ -0,0 +1,31 @@ +/* + * Device Tree constants for Microsemi VSC8531 PHY + * + * Author: Nagaraju Lakkaraju + * + * License: Dual MIT/GPL + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _DT_BINDINGS_MSCC_VSC8531_H +#define _DT_BINDINGS_MSCC_VSC8531_H + +/* PHY LED Modes */ +#define VSC8531_LINK_ACTIVITY 0 +#define VSC8531_LINK_1000_ACTIVITY 1 +#define VSC8531_LINK_100_ACTIVITY 2 +#define VSC8531_LINK_10_ACTIVITY 3 +#define VSC8531_LINK_100_1000_ACTIVITY 4 +#define VSC8531_LINK_10_1000_ACTIVITY 5 +#define VSC8531_LINK_10_100_ACTIVITY 6 +#define VSC8584_LINK_100FX_1000X_ACTIVITY 7 +#define VSC8531_DUPLEX_COLLISION 8 +#define VSC8531_COLLISION 9 +#define VSC8531_ACTIVITY 10 +#define VSC8584_100FX_1000X_ACTIVITY 11 +#define VSC8531_AUTONEG_FAULT 12 +#define VSC8531_SERIAL_MODE 13 +#define VSC8531_FORCE_LED_OFF 14 +#define VSC8531_FORCE_LED_ON 15 + +#endif diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h new file mode 100644 index 0000000..6fc4b44 --- /dev/null +++ b/include/dt-bindings/net/ti-dp83867.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Device Tree constants for the Texas Instruments DP83867 PHY + * + * Author: Dan Murphy + * + * Copyright: (C) 2015 Texas Instruments, Inc. + */ + +#ifndef _DT_BINDINGS_TI_DP83867_H +#define _DT_BINDINGS_TI_DP83867_H + +/* PHY CTRL bits */ +#define DP83867_PHYCR_FIFO_DEPTH_3_B_NIB 0x00 +#define DP83867_PHYCR_FIFO_DEPTH_4_B_NIB 0x01 +#define DP83867_PHYCR_FIFO_DEPTH_6_B_NIB 0x02 +#define DP83867_PHYCR_FIFO_DEPTH_8_B_NIB 0x03 + +/* RGMIIDCTL internal delay for rx and tx */ +#define DP83867_RGMIIDCTL_250_PS 0x0 +#define DP83867_RGMIIDCTL_500_PS 0x1 +#define DP83867_RGMIIDCTL_750_PS 0x2 +#define DP83867_RGMIIDCTL_1_NS 0x3 +#define DP83867_RGMIIDCTL_1_25_NS 0x4 +#define DP83867_RGMIIDCTL_1_50_NS 0x5 +#define DP83867_RGMIIDCTL_1_75_NS 0x6 +#define DP83867_RGMIIDCTL_2_00_NS 0x7 +#define DP83867_RGMIIDCTL_2_25_NS 0x8 +#define DP83867_RGMIIDCTL_2_50_NS 0x9 +#define DP83867_RGMIIDCTL_2_75_NS 0xa +#define DP83867_RGMIIDCTL_3_00_NS 0xb +#define DP83867_RGMIIDCTL_3_25_NS 0xc +#define DP83867_RGMIIDCTL_3_50_NS 0xd +#define DP83867_RGMIIDCTL_3_75_NS 0xe +#define DP83867_RGMIIDCTL_4_00_NS 0xf + +/* IO_MUX_CFG - Clock output selection */ +#define DP83867_CLK_O_SEL_CHN_A_RCLK 0x0 +#define DP83867_CLK_O_SEL_CHN_B_RCLK 0x1 +#define DP83867_CLK_O_SEL_CHN_C_RCLK 0x2 +#define DP83867_CLK_O_SEL_CHN_D_RCLK 0x3 +#define DP83867_CLK_O_SEL_CHN_A_RCLK_DIV5 0x4 +#define DP83867_CLK_O_SEL_CHN_B_RCLK_DIV5 0x5 +#define DP83867_CLK_O_SEL_CHN_C_RCLK_DIV5 0x6 +#define DP83867_CLK_O_SEL_CHN_D_RCLK_DIV5 0x7 +#define DP83867_CLK_O_SEL_CHN_A_TCLK 0x8 +#define DP83867_CLK_O_SEL_CHN_B_TCLK 0x9 +#define DP83867_CLK_O_SEL_CHN_C_TCLK 0xA +#define DP83867_CLK_O_SEL_CHN_D_TCLK 0xB +#define DP83867_CLK_O_SEL_REF_CLK 0xC +/* Special flag to indicate clock should be off */ +#define DP83867_CLK_O_SEL_OFF 0xFFFFFFFF +#endif diff --git a/include/dt-bindings/phy/phy-am654-serdes.h b/include/dt-bindings/phy/phy-am654-serdes.h new file mode 100644 index 0000000..e8d9017 --- /dev/null +++ b/include/dt-bindings/phy/phy-am654-serdes.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for AM654 SERDES. + */ + +#ifndef _DT_BINDINGS_AM654_SERDES +#define _DT_BINDINGS_AM654_SERDES + +#define AM654_SERDES_CMU_REFCLK 0 +#define AM654_SERDES_LO_REFCLK 1 +#define AM654_SERDES_RO_REFCLK 2 + +#endif /* _DT_BINDINGS_AM654_SERDES */ diff --git a/include/dt-bindings/phy/phy-lantiq-vrx200-pcie.h b/include/dt-bindings/phy/phy-lantiq-vrx200-pcie.h new file mode 100644 index 0000000..95a7896 --- /dev/null +++ b/include/dt-bindings/phy/phy-lantiq-vrx200-pcie.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 Martin Blumenstingl + */ + +#define LANTIQ_PCIE_PHY_MODE_25MHZ 0 +#define LANTIQ_PCIE_PHY_MODE_25MHZ_SSC 1 +#define LANTIQ_PCIE_PHY_MODE_36MHZ 2 +#define LANTIQ_PCIE_PHY_MODE_36MHZ_SSC 3 +#define LANTIQ_PCIE_PHY_MODE_100MHZ 4 +#define LANTIQ_PCIE_PHY_MODE_100MHZ_SSC 5 diff --git a/include/dt-bindings/phy/phy-ocelot-serdes.h b/include/dt-bindings/phy/phy-ocelot-serdes.h new file mode 100644 index 0000000..fe70ada --- /dev/null +++ b/include/dt-bindings/phy/phy-ocelot-serdes.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* Copyright (c) 2018 Microsemi Corporation */ +#ifndef __PHY_OCELOT_SERDES_H__ +#define __PHY_OCELOT_SERDES_H__ + +#define SERDES1G(x) (x) +#define SERDES1G_MAX SERDES1G(5) +#define SERDES6G(x) (SERDES1G_MAX + 1 + (x)) +#define SERDES6G_MAX SERDES6G(2) +#define SERDES_MAX (SERDES6G_MAX + 1) + +#endif diff --git a/include/dt-bindings/phy/phy-pistachio-usb.h b/include/dt-bindings/phy/phy-pistachio-usb.h new file mode 100644 index 0000000..3542a67 --- /dev/null +++ b/include/dt-bindings/phy/phy-pistachio-usb.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015 Google, Inc. + */ + +#ifndef _DT_BINDINGS_PHY_PISTACHIO +#define _DT_BINDINGS_PHY_PISTACHIO + +#define REFCLK_XO_CRYSTAL 0x0 +#define REFCLK_X0_EXT_CLK 0x1 +#define REFCLK_CLK_CORE 0x2 + +#endif /* _DT_BINDINGS_PHY_PISTACHIO */ diff --git a/include/dt-bindings/phy/phy-qcom-qusb2.h b/include/dt-bindings/phy/phy-qcom-qusb2.h new file mode 100644 index 0000000..5c5e4d8 --- /dev/null +++ b/include/dt-bindings/phy/phy-qcom-qusb2.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_PHY_QUSB2_H_ +#define _DT_BINDINGS_QCOM_PHY_QUSB2_H_ + +/* PHY HSTX TRIM bit values (24mA to 15mA) */ +#define QUSB2_V2_HSTX_TRIM_24_0_MA 0x0 +#define QUSB2_V2_HSTX_TRIM_23_4_MA 0x1 +#define QUSB2_V2_HSTX_TRIM_22_8_MA 0x2 +#define QUSB2_V2_HSTX_TRIM_22_2_MA 0x3 +#define QUSB2_V2_HSTX_TRIM_21_6_MA 0x4 +#define QUSB2_V2_HSTX_TRIM_21_0_MA 0x5 +#define QUSB2_V2_HSTX_TRIM_20_4_MA 0x6 +#define QUSB2_V2_HSTX_TRIM_19_8_MA 0x7 +#define QUSB2_V2_HSTX_TRIM_19_2_MA 0x8 +#define QUSB2_V2_HSTX_TRIM_18_6_MA 0x9 +#define QUSB2_V2_HSTX_TRIM_18_0_MA 0xa +#define QUSB2_V2_HSTX_TRIM_17_4_MA 0xb +#define QUSB2_V2_HSTX_TRIM_16_8_MA 0xc +#define QUSB2_V2_HSTX_TRIM_16_2_MA 0xd +#define QUSB2_V2_HSTX_TRIM_15_6_MA 0xe +#define QUSB2_V2_HSTX_TRIM_15_0_MA 0xf + +/* PHY PREEMPHASIS bit values */ +#define QUSB2_V2_PREEMPHASIS_NONE 0 +#define QUSB2_V2_PREEMPHASIS_5_PERCENT 1 +#define QUSB2_V2_PREEMPHASIS_10_PERCENT 2 +#define QUSB2_V2_PREEMPHASIS_15_PERCENT 3 + +/* PHY PREEMPHASIS-WIDTH bit values */ +#define QUSB2_V2_PREEMPHASIS_WIDTH_FULL_BIT 0 +#define QUSB2_V2_PREEMPHASIS_WIDTH_HALF_BIT 1 + +#endif diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h new file mode 100644 index 0000000..b6a1eaf --- /dev/null +++ b/include/dt-bindings/phy/phy.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * + * This header provides constants for the phy framework + * + * Copyright (C) 2014 STMicroelectronics + * Author: Gabriel Fernandez + */ + +#ifndef _DT_BINDINGS_PHY +#define _DT_BINDINGS_PHY + +#define PHY_NONE 0 +#define PHY_TYPE_SATA 1 +#define PHY_TYPE_PCIE 2 +#define PHY_TYPE_USB2 3 +#define PHY_TYPE_USB3 4 +#define PHY_TYPE_UFS 5 + +#endif /* _DT_BINDINGS_PHY */ diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h new file mode 100644 index 0000000..17877e8 --- /dev/null +++ b/include/dt-bindings/pinctrl/am33xx.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants specific to AM33XX pinctrl bindings. + */ + +#ifndef _DT_BINDINGS_PINCTRL_AM33XX_H +#define _DT_BINDINGS_PINCTRL_AM33XX_H + +#include + +/* am33xx specific mux bit defines */ +#undef PULL_ENA +#undef INPUT_EN + +#define PULL_DISABLE (1 << 3) +#define INPUT_EN (1 << 5) +#define SLEWCTRL_SLOW (1 << 6) +#define SLEWCTRL_FAST 0 + +/* update macro depending on INPUT_EN and PULL_ENA */ +#undef PIN_OUTPUT +#undef PIN_OUTPUT_PULLUP +#undef PIN_OUTPUT_PULLDOWN +#undef PIN_INPUT +#undef PIN_INPUT_PULLUP +#undef PIN_INPUT_PULLDOWN + +#define PIN_OUTPUT (PULL_DISABLE) +#define PIN_OUTPUT_PULLUP (PULL_UP) +#define PIN_OUTPUT_PULLDOWN 0 +#define PIN_INPUT (INPUT_EN | PULL_DISABLE) +#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN (INPUT_EN) + +/* undef non-existing modes */ +#undef PIN_OFF_NONE +#undef PIN_OFF_OUTPUT_HIGH +#undef PIN_OFF_OUTPUT_LOW +#undef PIN_OFF_INPUT_PULLUP +#undef PIN_OFF_INPUT_PULLDOWN +#undef PIN_OFF_WAKEUPENABLE + +#define AM335X_PIN_OFFSET_MIN 0x0800U + +#define AM335X_PIN_GPMC_AD0 0x800 +#define AM335X_PIN_GPMC_AD1 0x804 +#define AM335X_PIN_GPMC_AD2 0x808 +#define AM335X_PIN_GPMC_AD3 0x80c +#define AM335X_PIN_GPMC_AD4 0x810 +#define AM335X_PIN_GPMC_AD5 0x814 +#define AM335X_PIN_GPMC_AD6 0x818 +#define AM335X_PIN_GPMC_AD7 0x81c +#define AM335X_PIN_GPMC_AD8 0x820 +#define AM335X_PIN_GPMC_AD9 0x824 +#define AM335X_PIN_GPMC_AD10 0x828 +#define AM335X_PIN_GPMC_AD11 0x82c +#define AM335X_PIN_GPMC_AD12 0x830 +#define AM335X_PIN_GPMC_AD13 0x834 +#define AM335X_PIN_GPMC_AD14 0x838 +#define AM335X_PIN_GPMC_AD15 0x83c +#define AM335X_PIN_GPMC_A0 0x840 +#define AM335X_PIN_GPMC_A1 0x844 +#define AM335X_PIN_GPMC_A2 0x848 +#define AM335X_PIN_GPMC_A3 0x84c +#define AM335X_PIN_GPMC_A4 0x850 +#define AM335X_PIN_GPMC_A5 0x854 +#define AM335X_PIN_GPMC_A6 0x858 +#define AM335X_PIN_GPMC_A7 0x85c +#define AM335X_PIN_GPMC_A8 0x860 +#define AM335X_PIN_GPMC_A9 0x864 +#define AM335X_PIN_GPMC_A10 0x868 +#define AM335X_PIN_GPMC_A11 0x86c +#define AM335X_PIN_GPMC_WAIT0 0x870 +#define AM335X_PIN_GPMC_WPN 0x874 +#define AM335X_PIN_GPMC_BEN1 0x878 +#define AM335X_PIN_GPMC_CSN0 0x87c +#define AM335X_PIN_GPMC_CSN1 0x880 +#define AM335X_PIN_GPMC_CSN2 0x884 +#define AM335X_PIN_GPMC_CSN3 0x888 +#define AM335X_PIN_GPMC_CLK 0x88c +#define AM335X_PIN_GPMC_ADVN_ALE 0x890 +#define AM335X_PIN_GPMC_OEN_REN 0x894 +#define AM335X_PIN_GPMC_WEN 0x898 +#define AM335X_PIN_GPMC_BEN0_CLE 0x89c +#define AM335X_PIN_LCD_DATA0 0x8a0 +#define AM335X_PIN_LCD_DATA1 0x8a4 +#define AM335X_PIN_LCD_DATA2 0x8a8 +#define AM335X_PIN_LCD_DATA3 0x8ac +#define AM335X_PIN_LCD_DATA4 0x8b0 +#define AM335X_PIN_LCD_DATA5 0x8b4 +#define AM335X_PIN_LCD_DATA6 0x8b8 +#define AM335X_PIN_LCD_DATA7 0x8bc +#define AM335X_PIN_LCD_DATA8 0x8c0 +#define AM335X_PIN_LCD_DATA9 0x8c4 +#define AM335X_PIN_LCD_DATA10 0x8c8 +#define AM335X_PIN_LCD_DATA11 0x8cc +#define AM335X_PIN_LCD_DATA12 0x8d0 +#define AM335X_PIN_LCD_DATA13 0x8d4 +#define AM335X_PIN_LCD_DATA14 0x8d8 +#define AM335X_PIN_LCD_DATA15 0x8dc +#define AM335X_PIN_LCD_VSYNC 0x8e0 +#define AM335X_PIN_LCD_HSYNC 0x8e4 +#define AM335X_PIN_LCD_PCLK 0x8e8 +#define AM335X_PIN_LCD_AC_BIAS_EN 0x8ec +#define AM335X_PIN_MMC0_DAT3 0x8f0 +#define AM335X_PIN_MMC0_DAT2 0x8f4 +#define AM335X_PIN_MMC0_DAT1 0x8f8 +#define AM335X_PIN_MMC0_DAT0 0x8fc +#define AM335X_PIN_MMC0_CLK 0x900 +#define AM335X_PIN_MMC0_CMD 0x904 +#define AM335X_PIN_MII1_COL 0x908 +#define AM335X_PIN_MII1_CRS 0x90c +#define AM335X_PIN_MII1_RX_ER 0x910 +#define AM335X_PIN_MII1_TX_EN 0x914 +#define AM335X_PIN_MII1_RX_DV 0x918 +#define AM335X_PIN_MII1_TXD3 0x91c +#define AM335X_PIN_MII1_TXD2 0x920 +#define AM335X_PIN_MII1_TXD1 0x924 +#define AM335X_PIN_MII1_TXD0 0x928 +#define AM335X_PIN_MII1_TX_CLK 0x92c +#define AM335X_PIN_MII1_RX_CLK 0x930 +#define AM335X_PIN_MII1_RXD3 0x934 +#define AM335X_PIN_MII1_RXD2 0x938 +#define AM335X_PIN_MII1_RXD1 0x93c +#define AM335X_PIN_MII1_RXD0 0x940 +#define AM335X_PIN_RMII1_REF_CLK 0x944 +#define AM335X_PIN_MDIO 0x948 +#define AM335X_PIN_MDC 0x94c +#define AM335X_PIN_SPI0_SCLK 0x950 +#define AM335X_PIN_SPI0_D0 0x954 +#define AM335X_PIN_SPI0_D1 0x958 +#define AM335X_PIN_SPI0_CS0 0x95c +#define AM335X_PIN_SPI0_CS1 0x960 +#define AM335X_PIN_ECAP0_IN_PWM0_OUT 0x964 +#define AM335X_PIN_UART0_CTSN 0x968 +#define AM335X_PIN_UART0_RTSN 0x96c +#define AM335X_PIN_UART0_RXD 0x970 +#define AM335X_PIN_UART0_TXD 0x974 +#define AM335X_PIN_UART1_CTSN 0x978 +#define AM335X_PIN_UART1_RTSN 0x97c +#define AM335X_PIN_UART1_RXD 0x980 +#define AM335X_PIN_UART1_TXD 0x984 +#define AM335X_PIN_I2C0_SDA 0x988 +#define AM335X_PIN_I2C0_SCL 0x98c +#define AM335X_PIN_MCASP0_ACLKX 0x990 +#define AM335X_PIN_MCASP0_FSX 0x994 +#define AM335X_PIN_MCASP0_AXR0 0x998 +#define AM335X_PIN_MCASP0_AHCLKR 0x99c +#define AM335X_PIN_MCASP0_ACLKR 0x9a0 +#define AM335X_PIN_MCASP0_FSR 0x9a4 +#define AM335X_PIN_MCASP0_AXR1 0x9a8 +#define AM335X_PIN_MCASP0_AHCLKX 0x9ac +#define AM335X_PIN_XDMA_EVENT_INTR0 0x9b0 +#define AM335X_PIN_XDMA_EVENT_INTR1 0x9b4 +#define AM335X_PIN_WARMRSTN 0x9b8 +#define AM335X_PIN_NNMI 0x9c0 +#define AM335X_PIN_TMS 0x9d0 +#define AM335X_PIN_TDI 0x9d4 +#define AM335X_PIN_TDO 0x9d8 +#define AM335X_PIN_TCK 0x9dc +#define AM335X_PIN_TRSTN 0x9e0 +#define AM335X_PIN_EMU0 0x9e4 +#define AM335X_PIN_EMU1 0x9e8 +#define AM335X_PIN_RTC_PWRONRSTN 0x9f8 +#define AM335X_PIN_PMIC_POWER_EN 0x9fc +#define AM335X_PIN_EXT_WAKEUP 0xa00 +#define AM335X_PIN_USB0_DRVVBUS 0xa1c +#define AM335X_PIN_USB1_DRVVBUS 0xa34 + +#define AM335X_PIN_OFFSET_MAX 0x0a34U + +#endif diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h new file mode 100644 index 0000000..6ce4a32 --- /dev/null +++ b/include/dt-bindings/pinctrl/am43xx.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants specific to AM43XX pinctrl bindings. + */ + +#ifndef _DT_BINDINGS_PINCTRL_AM43XX_H +#define _DT_BINDINGS_PINCTRL_AM43XX_H + +#define MUX_MODE0 0 +#define MUX_MODE1 1 +#define MUX_MODE2 2 +#define MUX_MODE3 3 +#define MUX_MODE4 4 +#define MUX_MODE5 5 +#define MUX_MODE6 6 +#define MUX_MODE7 7 +#define MUX_MODE8 8 +#define MUX_MODE9 9 + +#define PULL_DISABLE (1 << 16) +#define PULL_UP (1 << 17) +#define INPUT_EN (1 << 18) +#define SLEWCTRL_SLOW (1 << 19) +#define SLEWCTRL_FAST 0 +#define DS0_FORCE_OFF_MODE (1 << 24) +#define DS0_INPUT (1 << 25) +#define DS0_FORCE_OUT_HIGH (1 << 26) +#define DS0_PULL_UP_DOWN_EN (0 << 27) +#define DS0_PULL_UP_DOWN_DIS (1 << 27) +#define DS0_PULL_UP_SEL (1 << 28) +#define WAKEUP_ENABLE (1 << 29) + +#define DS0_PIN_OUTPUT (DS0_FORCE_OFF_MODE) +#define DS0_PIN_OUTPUT_HIGH (DS0_FORCE_OFF_MODE | DS0_FORCE_OUT_HIGH) +#define DS0_PIN_OUTPUT_PULLUP (DS0_FORCE_OFF_MODE | DS0_PULL_UP_DOWN_EN | DS0_PULL_UP_SEL) +#define DS0_PIN_OUTPUT_PULLDOWN (DS0_FORCE_OFF_MODE | DS0_PULL_UP_DOWN_EN) +#define DS0_PIN_INPUT (DS0_FORCE_OFF_MODE | DS0_INPUT) +#define DS0_PIN_INPUT_PULLUP (DS0_FORCE_OFF_MODE | DS0_INPUT | DS0_PULL_UP_DOWN_EN | DS0_PULL_UP_SEL) +#define DS0_PIN_INPUT_PULLDOWN (DS0_FORCE_OFF_MODE | DS0_INPUT | DS0_PULL_UP_DOWN_EN) + +#define PIN_OUTPUT (PULL_DISABLE) +#define PIN_OUTPUT_PULLUP (PULL_UP) +#define PIN_OUTPUT_PULLDOWN 0 +#define PIN_INPUT (INPUT_EN | PULL_DISABLE) +#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN (INPUT_EN) + +/* + * Macro to allow using the absolute physical address instead of the + * padconf registers instead of the offset from padconf base. + */ +#define AM4372_IOPAD(pa, val) (((pa) & 0xffff) - 0x0800) (val) + +#endif + diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h new file mode 100644 index 0000000..3831f91 --- /dev/null +++ b/include/dt-bindings/pinctrl/at91.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This header provides constants for most at91 pinctrl bindings. + * + * Copyright (C) 2013 Jean-Christophe PLAGNIOL-VILLARD + */ + +#ifndef __DT_BINDINGS_AT91_PINCTRL_H__ +#define __DT_BINDINGS_AT91_PINCTRL_H__ + +#define AT91_PINCTRL_NONE (0 << 0) +#define AT91_PINCTRL_PULL_UP (1 << 0) +#define AT91_PINCTRL_MULTI_DRIVE (1 << 1) +#define AT91_PINCTRL_DEGLITCH (1 << 2) +#define AT91_PINCTRL_PULL_DOWN (1 << 3) +#define AT91_PINCTRL_DIS_SCHMIT (1 << 4) +#define AT91_PINCTRL_OUTPUT (1 << 7) +#define AT91_PINCTRL_OUTPUT_VAL(x) ((x & 0x1) << 8) +#define AT91_PINCTRL_SLEWRATE (1 << 9) +#define AT91_PINCTRL_DEBOUNCE (1 << 16) +#define AT91_PINCTRL_DEBOUNCE_VAL(x) (x << 17) + +#define AT91_PINCTRL_PULL_UP_DEGLITCH (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DEGLITCH) + +#define AT91_PINCTRL_DRIVE_STRENGTH_DEFAULT (0x0 << 5) +#define AT91_PINCTRL_DRIVE_STRENGTH_LOW (0x1 << 5) +#define AT91_PINCTRL_DRIVE_STRENGTH_MED (0x2 << 5) +#define AT91_PINCTRL_DRIVE_STRENGTH_HI (0x3 << 5) + +#define AT91_PINCTRL_SLEWRATE_DIS (0x0 << 9) +#define AT91_PINCTRL_SLEWRATE_ENA (0x1 << 9) + +#define AT91_PIOA 0 +#define AT91_PIOB 1 +#define AT91_PIOC 2 +#define AT91_PIOD 3 +#define AT91_PIOE 4 + +#define AT91_PERIPH_GPIO 0 +#define AT91_PERIPH_A 1 +#define AT91_PERIPH_B 2 +#define AT91_PERIPH_C 3 +#define AT91_PERIPH_D 4 + +#define ATMEL_PIO_DRVSTR_LO 1 +#define ATMEL_PIO_DRVSTR_ME 2 +#define ATMEL_PIO_DRVSTR_HI 3 + +#endif /* __DT_BINDINGS_AT91_PINCTRL_H__ */ diff --git a/include/dt-bindings/pinctrl/bcm2835.h b/include/dt-bindings/pinctrl/bcm2835.h new file mode 100644 index 0000000..b5b2654 --- /dev/null +++ b/include/dt-bindings/pinctrl/bcm2835.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header providing constants for bcm2835 pinctrl bindings. + * + * Copyright (C) 2015 Stefan Wahren + */ + +#ifndef __DT_BINDINGS_PINCTRL_BCM2835_H__ +#define __DT_BINDINGS_PINCTRL_BCM2835_H__ + +/* brcm,function property */ +#define BCM2835_FSEL_GPIO_IN 0 +#define BCM2835_FSEL_GPIO_OUT 1 +#define BCM2835_FSEL_ALT5 2 +#define BCM2835_FSEL_ALT4 3 +#define BCM2835_FSEL_ALT0 4 +#define BCM2835_FSEL_ALT1 5 +#define BCM2835_FSEL_ALT2 6 +#define BCM2835_FSEL_ALT3 7 + +/* brcm,pull property */ +#define BCM2835_PUD_OFF 0 +#define BCM2835_PUD_DOWN 1 +#define BCM2835_PUD_UP 2 + +#endif /* __DT_BINDINGS_PINCTRL_BCM2835_H__ */ diff --git a/include/dt-bindings/pinctrl/brcm,pinctrl-stingray.h b/include/dt-bindings/pinctrl/brcm,pinctrl-stingray.h new file mode 100644 index 0000000..caa6c66 --- /dev/null +++ b/include/dt-bindings/pinctrl/brcm,pinctrl-stingray.h @@ -0,0 +1,68 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2017 Broadcom Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DT_BINDINGS_PINCTRL_BRCM_STINGRAY_H__ +#define __DT_BINDINGS_PINCTRL_BRCM_STINGRAY_H__ + +/* Alternate functions available in MUX controller */ +#define MODE_NITRO 0 +#define MODE_NAND 1 +#define MODE_PNOR 2 +#define MODE_GPIO 3 + +/* Pad configuration attribute */ +#define PAD_SLEW_RATE_ENA (1 << 0) +#define PAD_SLEW_RATE_ENA_MASK (1 << 0) + +#define PAD_DRIVE_STRENGTH_2_MA (0 << 1) +#define PAD_DRIVE_STRENGTH_4_MA (1 << 1) +#define PAD_DRIVE_STRENGTH_6_MA (2 << 1) +#define PAD_DRIVE_STRENGTH_8_MA (3 << 1) +#define PAD_DRIVE_STRENGTH_10_MA (4 << 1) +#define PAD_DRIVE_STRENGTH_12_MA (5 << 1) +#define PAD_DRIVE_STRENGTH_14_MA (6 << 1) +#define PAD_DRIVE_STRENGTH_16_MA (7 << 1) +#define PAD_DRIVE_STRENGTH_MASK (7 << 1) + +#define PAD_PULL_UP_ENA (1 << 4) +#define PAD_PULL_UP_ENA_MASK (1 << 4) + +#define PAD_PULL_DOWN_ENA (1 << 5) +#define PAD_PULL_DOWN_ENA_MASK (1 << 5) + +#define PAD_INPUT_PATH_DIS (1 << 6) +#define PAD_INPUT_PATH_DIS_MASK (1 << 6) + +#define PAD_HYSTERESIS_ENA (1 << 7) +#define PAD_HYSTERESIS_ENA_MASK (1 << 7) + +#endif diff --git a/include/dt-bindings/pinctrl/dm814x.h b/include/dt-bindings/pinctrl/dm814x.h new file mode 100644 index 0000000..afbabbc --- /dev/null +++ b/include/dt-bindings/pinctrl/dm814x.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants specific to DM814X pinctrl bindings. + */ + +#ifndef _DT_BINDINGS_PINCTRL_DM814X_H +#define _DT_BINDINGS_PINCTRL_DM814X_H + +#include + +#undef INPUT_EN +#undef PULL_UP +#undef PULL_ENA + +/* + * Note that dm814x silicon revision 2.1 and older require input enabled + * (bit 18 set) for all 3.3V I/Os to avoid cumulative hardware damage. For + * more info, see errata advisory 2.1.87. We leave bit 18 out of + * function-mask in dm814x.h and rely on the bootloader for it. + */ +#define INPUT_EN (1 << 18) +#define PULL_UP (1 << 17) +#define PULL_DISABLE (1 << 16) + +/* update macro depending on INPUT_EN and PULL_ENA */ +#undef PIN_OUTPUT +#undef PIN_OUTPUT_PULLUP +#undef PIN_OUTPUT_PULLDOWN +#undef PIN_INPUT +#undef PIN_INPUT_PULLUP +#undef PIN_INPUT_PULLDOWN + +#define PIN_OUTPUT (PULL_DISABLE) +#define PIN_OUTPUT_PULLUP (PULL_UP) +#define PIN_OUTPUT_PULLDOWN 0 +#define PIN_INPUT (INPUT_EN | PULL_DISABLE) +#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN (INPUT_EN) + +/* undef non-existing modes */ +#undef PIN_OFF_NONE +#undef PIN_OFF_OUTPUT_HIGH +#undef PIN_OFF_OUTPUT_LOW +#undef PIN_OFF_INPUT_PULLUP +#undef PIN_OFF_INPUT_PULLDOWN +#undef PIN_OFF_WAKEUPENABLE + +#endif + diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h new file mode 100644 index 0000000..252cdfd --- /dev/null +++ b/include/dt-bindings/pinctrl/dra.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This header provides constants for DRA pinctrl bindings. + * + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Author: Rajendra Nayak + */ + +#ifndef _DT_BINDINGS_PINCTRL_DRA_H +#define _DT_BINDINGS_PINCTRL_DRA_H + +/* DRA7 mux mode options for each pin. See TRM for options */ +#define MUX_MODE0 0x0 +#define MUX_MODE1 0x1 +#define MUX_MODE2 0x2 +#define MUX_MODE3 0x3 +#define MUX_MODE4 0x4 +#define MUX_MODE5 0x5 +#define MUX_MODE6 0x6 +#define MUX_MODE7 0x7 +#define MUX_MODE8 0x8 +#define MUX_MODE9 0x9 +#define MUX_MODE10 0xa +#define MUX_MODE11 0xb +#define MUX_MODE12 0xc +#define MUX_MODE13 0xd +#define MUX_MODE14 0xe +#define MUX_MODE15 0xf + +/* Certain pins need virtual mode, but note: they may glitch */ +#define MUX_VIRTUAL_MODE0 (MODE_SELECT | (0x0 << 4)) +#define MUX_VIRTUAL_MODE1 (MODE_SELECT | (0x1 << 4)) +#define MUX_VIRTUAL_MODE2 (MODE_SELECT | (0x2 << 4)) +#define MUX_VIRTUAL_MODE3 (MODE_SELECT | (0x3 << 4)) +#define MUX_VIRTUAL_MODE4 (MODE_SELECT | (0x4 << 4)) +#define MUX_VIRTUAL_MODE5 (MODE_SELECT | (0x5 << 4)) +#define MUX_VIRTUAL_MODE6 (MODE_SELECT | (0x6 << 4)) +#define MUX_VIRTUAL_MODE7 (MODE_SELECT | (0x7 << 4)) +#define MUX_VIRTUAL_MODE8 (MODE_SELECT | (0x8 << 4)) +#define MUX_VIRTUAL_MODE9 (MODE_SELECT | (0x9 << 4)) +#define MUX_VIRTUAL_MODE10 (MODE_SELECT | (0xa << 4)) +#define MUX_VIRTUAL_MODE11 (MODE_SELECT | (0xb << 4)) +#define MUX_VIRTUAL_MODE12 (MODE_SELECT | (0xc << 4)) +#define MUX_VIRTUAL_MODE13 (MODE_SELECT | (0xd << 4)) +#define MUX_VIRTUAL_MODE14 (MODE_SELECT | (0xe << 4)) +#define MUX_VIRTUAL_MODE15 (MODE_SELECT | (0xf << 4)) + +#define MODE_SELECT (1 << 8) + +#define PULL_ENA (0 << 16) +#define PULL_DIS (1 << 16) +#define PULL_UP (1 << 17) +#define INPUT_EN (1 << 18) +#define SLEWCONTROL (1 << 19) +#define WAKEUP_EN (1 << 24) +#define WAKEUP_EVENT (1 << 25) + +/* Active pin states */ +#define PIN_OUTPUT (0 | PULL_DIS) +#define PIN_OUTPUT_PULLUP (PULL_UP) +#define PIN_OUTPUT_PULLDOWN (0) +#define PIN_INPUT (INPUT_EN | PULL_DIS) +#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL) +#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN) + +/* + * Macro to allow using the absolute physical address instead of the + * padconf registers instead of the offset from padconf base. + */ +#define DRA7XX_CORE_IOPAD(pa, val) (((pa) & 0xffff) - 0x3400) (val) + +/* DRA7 IODELAY configuration parameters */ +#define A_DELAY_PS(val) ((val) & 0xffff) +#define G_DELAY_PS(val) ((val) & 0xffff) +#endif + diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h new file mode 100644 index 0000000..0359bfd --- /dev/null +++ b/include/dt-bindings/pinctrl/hisi.h @@ -0,0 +1,74 @@ +/* + * This header provides constants for hisilicon pinctrl bindings. + * + * Copyright (c) 2015 Hisilicon Limited. + * Copyright (c) 2015 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_PINCTRL_HISI_H +#define _DT_BINDINGS_PINCTRL_HISI_H + +/* iomg bit definition */ +#define MUX_M0 0 +#define MUX_M1 1 +#define MUX_M2 2 +#define MUX_M3 3 +#define MUX_M4 4 +#define MUX_M5 5 +#define MUX_M6 6 +#define MUX_M7 7 + +/* iocg bit definition */ +#define PULL_MASK (3) +#define PULL_DIS (0) +#define PULL_UP (1 << 0) +#define PULL_DOWN (1 << 1) + +/* drive strength definition */ +#define DRIVE_MASK (7 << 4) +#define DRIVE1_02MA (0 << 4) +#define DRIVE1_04MA (1 << 4) +#define DRIVE1_08MA (2 << 4) +#define DRIVE1_10MA (3 << 4) +#define DRIVE2_02MA (0 << 4) +#define DRIVE2_04MA (1 << 4) +#define DRIVE2_08MA (2 << 4) +#define DRIVE2_10MA (3 << 4) +#define DRIVE3_04MA (0 << 4) +#define DRIVE3_08MA (1 << 4) +#define DRIVE3_12MA (2 << 4) +#define DRIVE3_16MA (3 << 4) +#define DRIVE3_20MA (4 << 4) +#define DRIVE3_24MA (5 << 4) +#define DRIVE3_32MA (6 << 4) +#define DRIVE3_40MA (7 << 4) +#define DRIVE4_02MA (0 << 4) +#define DRIVE4_04MA (2 << 4) +#define DRIVE4_08MA (4 << 4) +#define DRIVE4_10MA (6 << 4) + +/* drive strength definition for hi3660 */ +#define DRIVE6_MASK (15 << 4) +#define DRIVE6_04MA (0 << 4) +#define DRIVE6_12MA (4 << 4) +#define DRIVE6_19MA (8 << 4) +#define DRIVE6_27MA (10 << 4) +#define DRIVE6_32MA (15 << 4) +#define DRIVE7_02MA (0 << 4) +#define DRIVE7_04MA (1 << 4) +#define DRIVE7_06MA (2 << 4) +#define DRIVE7_08MA (3 << 4) +#define DRIVE7_10MA (4 << 4) +#define DRIVE7_12MA (5 << 4) +#define DRIVE7_14MA (6 << 4) +#define DRIVE7_16MA (7 << 4) +#endif diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h new file mode 100644 index 0000000..499de62 --- /dev/null +++ b/include/dt-bindings/pinctrl/k3.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for pinctrl bindings for TI's K3 SoC + * family. + * + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + */ +#ifndef _DT_BINDINGS_PINCTRL_TI_K3_H +#define _DT_BINDINGS_PINCTRL_TI_K3_H + +#define PULLUDEN_SHIFT (16) +#define PULLTYPESEL_SHIFT (17) +#define RXACTIVE_SHIFT (18) + +#define PULL_DISABLE (1 << PULLUDEN_SHIFT) +#define PULL_ENABLE (0 << PULLUDEN_SHIFT) + +#define PULL_UP (1 << PULLTYPESEL_SHIFT | PULL_ENABLE) +#define PULL_DOWN (0 << PULLTYPESEL_SHIFT | PULL_ENABLE) + +#define INPUT_EN (1 << RXACTIVE_SHIFT) +#define INPUT_DISABLE (0 << RXACTIVE_SHIFT) + +/* Only these macros are expected be used directly in device tree files */ +#define PIN_OUTPUT (INPUT_DISABLE | PULL_DISABLE) +#define PIN_OUTPUT_PULLUP (INPUT_DISABLE | PULL_UP) +#define PIN_OUTPUT_PULLDOWN (INPUT_DISABLE | PULL_DOWN) +#define PIN_INPUT (INPUT_EN | PULL_DISABLE) +#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN (INPUT_EN | PULL_DOWN) + +#define AM65X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) +#define AM65X_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) + +#define J721E_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) +#define J721E_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) + +#endif diff --git a/include/dt-bindings/pinctrl/keystone.h b/include/dt-bindings/pinctrl/keystone.h new file mode 100644 index 0000000..7f97d77 --- /dev/null +++ b/include/dt-bindings/pinctrl/keystone.h @@ -0,0 +1,39 @@ +/* + * This header provides constants for Keystone pinctrl bindings. + * + * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_PINCTRL_KEYSTONE_H +#define _DT_BINDINGS_PINCTRL_KEYSTONE_H + +#define MUX_MODE0 0 +#define MUX_MODE1 1 +#define MUX_MODE2 2 +#define MUX_MODE3 3 +#define MUX_MODE4 4 +#define MUX_MODE5 5 + +#define BUFFER_CLASS_B (0 << 19) +#define BUFFER_CLASS_C (1 << 19) +#define BUFFER_CLASS_D (2 << 19) +#define BUFFER_CLASS_E (3 << 19) + +#define PULL_DISABLE (1 << 16) +#define PIN_PULLUP (1 << 17) +#define PIN_PULLDOWN (0 << 17) + +#define KEYSTONE_IOPAD_OFFSET(pa, offset) (((pa) & 0xffff) - (offset)) + +#define K2G_CORE_IOPAD(pa) KEYSTONE_IOPAD_OFFSET((pa), 0x1000) + +#endif diff --git a/include/dt-bindings/pinctrl/lochnagar.h b/include/dt-bindings/pinctrl/lochnagar.h new file mode 100644 index 0000000..644760b --- /dev/null +++ b/include/dt-bindings/pinctrl/lochnagar.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Device Tree defines for Lochnagar pinctrl + * + * Copyright (c) 2018 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + * + * Author: Charles Keepax + */ + +#ifndef DT_BINDINGS_PINCTRL_LOCHNAGAR_H +#define DT_BINDINGS_PINCTRL_LOCHNAGAR_H + +#define LOCHNAGAR1_PIN_CDC_RESET 0 +#define LOCHNAGAR1_PIN_DSP_RESET 1 +#define LOCHNAGAR1_PIN_CDC_CIF1MODE 2 +#define LOCHNAGAR1_PIN_NUM_GPIOS 3 + +#define LOCHNAGAR2_PIN_CDC_RESET 0 +#define LOCHNAGAR2_PIN_DSP_RESET 1 +#define LOCHNAGAR2_PIN_CDC_CIF1MODE 2 +#define LOCHNAGAR2_PIN_CDC_LDOENA 3 +#define LOCHNAGAR2_PIN_SPDIF_HWMODE 4 +#define LOCHNAGAR2_PIN_SPDIF_RESET 5 +#define LOCHNAGAR2_PIN_FPGA_GPIO1 6 +#define LOCHNAGAR2_PIN_FPGA_GPIO2 7 +#define LOCHNAGAR2_PIN_FPGA_GPIO3 8 +#define LOCHNAGAR2_PIN_FPGA_GPIO4 9 +#define LOCHNAGAR2_PIN_FPGA_GPIO5 10 +#define LOCHNAGAR2_PIN_FPGA_GPIO6 11 +#define LOCHNAGAR2_PIN_CDC_GPIO1 12 +#define LOCHNAGAR2_PIN_CDC_GPIO2 13 +#define LOCHNAGAR2_PIN_CDC_GPIO3 14 +#define LOCHNAGAR2_PIN_CDC_GPIO4 15 +#define LOCHNAGAR2_PIN_CDC_GPIO5 16 +#define LOCHNAGAR2_PIN_CDC_GPIO6 17 +#define LOCHNAGAR2_PIN_CDC_GPIO7 18 +#define LOCHNAGAR2_PIN_CDC_GPIO8 19 +#define LOCHNAGAR2_PIN_DSP_GPIO1 20 +#define LOCHNAGAR2_PIN_DSP_GPIO2 21 +#define LOCHNAGAR2_PIN_DSP_GPIO3 22 +#define LOCHNAGAR2_PIN_DSP_GPIO4 23 +#define LOCHNAGAR2_PIN_DSP_GPIO5 24 +#define LOCHNAGAR2_PIN_DSP_GPIO6 25 +#define LOCHNAGAR2_PIN_GF_GPIO2 26 +#define LOCHNAGAR2_PIN_GF_GPIO3 27 +#define LOCHNAGAR2_PIN_GF_GPIO7 28 +#define LOCHNAGAR2_PIN_CDC_AIF1_BCLK 29 +#define LOCHNAGAR2_PIN_CDC_AIF1_RXDAT 30 +#define LOCHNAGAR2_PIN_CDC_AIF1_LRCLK 31 +#define LOCHNAGAR2_PIN_CDC_AIF1_TXDAT 32 +#define LOCHNAGAR2_PIN_CDC_AIF2_BCLK 33 +#define LOCHNAGAR2_PIN_CDC_AIF2_RXDAT 34 +#define LOCHNAGAR2_PIN_CDC_AIF2_LRCLK 35 +#define LOCHNAGAR2_PIN_CDC_AIF2_TXDAT 36 +#define LOCHNAGAR2_PIN_CDC_AIF3_BCLK 37 +#define LOCHNAGAR2_PIN_CDC_AIF3_RXDAT 38 +#define LOCHNAGAR2_PIN_CDC_AIF3_LRCLK 39 +#define LOCHNAGAR2_PIN_CDC_AIF3_TXDAT 40 +#define LOCHNAGAR2_PIN_DSP_AIF1_BCLK 41 +#define LOCHNAGAR2_PIN_DSP_AIF1_RXDAT 42 +#define LOCHNAGAR2_PIN_DSP_AIF1_LRCLK 43 +#define LOCHNAGAR2_PIN_DSP_AIF1_TXDAT 44 +#define LOCHNAGAR2_PIN_DSP_AIF2_BCLK 45 +#define LOCHNAGAR2_PIN_DSP_AIF2_RXDAT 46 +#define LOCHNAGAR2_PIN_DSP_AIF2_LRCLK 47 +#define LOCHNAGAR2_PIN_DSP_AIF2_TXDAT 48 +#define LOCHNAGAR2_PIN_PSIA1_BCLK 49 +#define LOCHNAGAR2_PIN_PSIA1_RXDAT 50 +#define LOCHNAGAR2_PIN_PSIA1_LRCLK 51 +#define LOCHNAGAR2_PIN_PSIA1_TXDAT 52 +#define LOCHNAGAR2_PIN_PSIA2_BCLK 53 +#define LOCHNAGAR2_PIN_PSIA2_RXDAT 54 +#define LOCHNAGAR2_PIN_PSIA2_LRCLK 55 +#define LOCHNAGAR2_PIN_PSIA2_TXDAT 56 +#define LOCHNAGAR2_PIN_GF_AIF3_BCLK 57 +#define LOCHNAGAR2_PIN_GF_AIF3_RXDAT 58 +#define LOCHNAGAR2_PIN_GF_AIF3_LRCLK 59 +#define LOCHNAGAR2_PIN_GF_AIF3_TXDAT 60 +#define LOCHNAGAR2_PIN_GF_AIF4_BCLK 61 +#define LOCHNAGAR2_PIN_GF_AIF4_RXDAT 62 +#define LOCHNAGAR2_PIN_GF_AIF4_LRCLK 63 +#define LOCHNAGAR2_PIN_GF_AIF4_TXDAT 64 +#define LOCHNAGAR2_PIN_GF_AIF1_BCLK 65 +#define LOCHNAGAR2_PIN_GF_AIF1_RXDAT 66 +#define LOCHNAGAR2_PIN_GF_AIF1_LRCLK 67 +#define LOCHNAGAR2_PIN_GF_AIF1_TXDAT 68 +#define LOCHNAGAR2_PIN_GF_AIF2_BCLK 69 +#define LOCHNAGAR2_PIN_GF_AIF2_RXDAT 70 +#define LOCHNAGAR2_PIN_GF_AIF2_LRCLK 71 +#define LOCHNAGAR2_PIN_GF_AIF2_TXDAT 72 +#define LOCHNAGAR2_PIN_DSP_UART1_RX 73 +#define LOCHNAGAR2_PIN_DSP_UART1_TX 74 +#define LOCHNAGAR2_PIN_DSP_UART2_RX 75 +#define LOCHNAGAR2_PIN_DSP_UART2_TX 76 +#define LOCHNAGAR2_PIN_GF_UART2_RX 77 +#define LOCHNAGAR2_PIN_GF_UART2_TX 78 +#define LOCHNAGAR2_PIN_USB_UART_RX 79 +#define LOCHNAGAR2_PIN_CDC_PDMCLK1 80 +#define LOCHNAGAR2_PIN_CDC_PDMDAT1 81 +#define LOCHNAGAR2_PIN_CDC_PDMCLK2 82 +#define LOCHNAGAR2_PIN_CDC_PDMDAT2 83 +#define LOCHNAGAR2_PIN_CDC_DMICCLK1 84 +#define LOCHNAGAR2_PIN_CDC_DMICDAT1 85 +#define LOCHNAGAR2_PIN_CDC_DMICCLK2 86 +#define LOCHNAGAR2_PIN_CDC_DMICDAT2 87 +#define LOCHNAGAR2_PIN_CDC_DMICCLK3 88 +#define LOCHNAGAR2_PIN_CDC_DMICDAT3 89 +#define LOCHNAGAR2_PIN_CDC_DMICCLK4 90 +#define LOCHNAGAR2_PIN_CDC_DMICDAT4 91 +#define LOCHNAGAR2_PIN_DSP_DMICCLK1 92 +#define LOCHNAGAR2_PIN_DSP_DMICDAT1 93 +#define LOCHNAGAR2_PIN_DSP_DMICCLK2 94 +#define LOCHNAGAR2_PIN_DSP_DMICDAT2 95 +#define LOCHNAGAR2_PIN_I2C2_SCL 96 +#define LOCHNAGAR2_PIN_I2C2_SDA 97 +#define LOCHNAGAR2_PIN_I2C3_SCL 98 +#define LOCHNAGAR2_PIN_I2C3_SDA 99 +#define LOCHNAGAR2_PIN_I2C4_SCL 100 +#define LOCHNAGAR2_PIN_I2C4_SDA 101 +#define LOCHNAGAR2_PIN_DSP_STANDBY 102 +#define LOCHNAGAR2_PIN_CDC_MCLK1 103 +#define LOCHNAGAR2_PIN_CDC_MCLK2 104 +#define LOCHNAGAR2_PIN_DSP_CLKIN 105 +#define LOCHNAGAR2_PIN_PSIA1_MCLK 106 +#define LOCHNAGAR2_PIN_PSIA2_MCLK 107 +#define LOCHNAGAR2_PIN_GF_GPIO1 108 +#define LOCHNAGAR2_PIN_GF_GPIO5 109 +#define LOCHNAGAR2_PIN_DSP_GPIO20 110 +#define LOCHNAGAR2_PIN_NUM_GPIOS 111 + +#endif diff --git a/include/dt-bindings/pinctrl/mt6397-pinfunc.h b/include/dt-bindings/pinctrl/mt6397-pinfunc.h new file mode 100644 index 0000000..f393fbd --- /dev/null +++ b/include/dt-bindings/pinctrl/mt6397-pinfunc.h @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MT6397_PINFUNC_H +#define __DTS_MT6397_PINFUNC_H + +#include + +#define MT6397_PIN_0_INT__FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define MT6397_PIN_0_INT__FUNC_INT (MTK_PIN_NO(0) | 1) + +#define MT6397_PIN_1_SRCVOLTEN__FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define MT6397_PIN_1_SRCVOLTEN__FUNC_SRCVOLTEN (MTK_PIN_NO(1) | 1) +#define MT6397_PIN_1_SRCVOLTEN__FUNC_TEST_CK1 (MTK_PIN_NO(1) | 6) + +#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_SRCLKEN_PERI (MTK_PIN_NO(2) | 1) +#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_TEST_CK2 (MTK_PIN_NO(2) | 6) + +#define MT6397_PIN_3_RTC_32K1V8__FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define MT6397_PIN_3_RTC_32K1V8__FUNC_RTC_32K1V8 (MTK_PIN_NO(3) | 1) +#define MT6397_PIN_3_RTC_32K1V8__FUNC_TEST_CK3 (MTK_PIN_NO(3) | 6) + +#define MT6397_PIN_4_WRAP_EVENT__FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define MT6397_PIN_4_WRAP_EVENT__FUNC_WRAP_EVENT (MTK_PIN_NO(4) | 1) + +#define MT6397_PIN_5_SPI_CLK__FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define MT6397_PIN_5_SPI_CLK__FUNC_SPI_CLK (MTK_PIN_NO(5) | 1) + +#define MT6397_PIN_6_SPI_CSN__FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define MT6397_PIN_6_SPI_CSN__FUNC_SPI_CSN (MTK_PIN_NO(6) | 1) + +#define MT6397_PIN_7_SPI_MOSI__FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define MT6397_PIN_7_SPI_MOSI__FUNC_SPI_MOSI (MTK_PIN_NO(7) | 1) + +#define MT6397_PIN_8_SPI_MISO__FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define MT6397_PIN_8_SPI_MISO__FUNC_SPI_MISO (MTK_PIN_NO(8) | 1) + +#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_AUD_CLK (MTK_PIN_NO(9) | 1) +#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_TEST_IN0 (MTK_PIN_NO(9) | 6) +#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_TEST_OUT0 (MTK_PIN_NO(9) | 7) + +#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_AUD_MISO (MTK_PIN_NO(10) | 1) +#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_TEST_IN1 (MTK_PIN_NO(10) | 6) +#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_TEST_OUT1 (MTK_PIN_NO(10) | 7) + +#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_AUD_MOSI (MTK_PIN_NO(11) | 1) +#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_TEST_IN2 (MTK_PIN_NO(11) | 6) +#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_TEST_OUT2 (MTK_PIN_NO(11) | 7) + +#define MT6397_PIN_12_COL0__FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define MT6397_PIN_12_COL0__FUNC_COL0_USBDL (MTK_PIN_NO(12) | 1) +#define MT6397_PIN_12_COL0__FUNC_EINT10_1X (MTK_PIN_NO(12) | 2) +#define MT6397_PIN_12_COL0__FUNC_PWM1_3X (MTK_PIN_NO(12) | 3) +#define MT6397_PIN_12_COL0__FUNC_TEST_IN3 (MTK_PIN_NO(12) | 6) +#define MT6397_PIN_12_COL0__FUNC_TEST_OUT3 (MTK_PIN_NO(12) | 7) + +#define MT6397_PIN_13_COL1__FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define MT6397_PIN_13_COL1__FUNC_COL1 (MTK_PIN_NO(13) | 1) +#define MT6397_PIN_13_COL1__FUNC_EINT11_1X (MTK_PIN_NO(13) | 2) +#define MT6397_PIN_13_COL1__FUNC_SCL0_2X (MTK_PIN_NO(13) | 3) +#define MT6397_PIN_13_COL1__FUNC_TEST_IN4 (MTK_PIN_NO(13) | 6) +#define MT6397_PIN_13_COL1__FUNC_TEST_OUT4 (MTK_PIN_NO(13) | 7) + +#define MT6397_PIN_14_COL2__FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define MT6397_PIN_14_COL2__FUNC_COL2 (MTK_PIN_NO(14) | 1) +#define MT6397_PIN_14_COL2__FUNC_EINT12_1X (MTK_PIN_NO(14) | 2) +#define MT6397_PIN_14_COL2__FUNC_SDA0_2X (MTK_PIN_NO(14) | 3) +#define MT6397_PIN_14_COL2__FUNC_TEST_IN5 (MTK_PIN_NO(14) | 6) +#define MT6397_PIN_14_COL2__FUNC_TEST_OUT5 (MTK_PIN_NO(14) | 7) + +#define MT6397_PIN_15_COL3__FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define MT6397_PIN_15_COL3__FUNC_COL3 (MTK_PIN_NO(15) | 1) +#define MT6397_PIN_15_COL3__FUNC_EINT13_1X (MTK_PIN_NO(15) | 2) +#define MT6397_PIN_15_COL3__FUNC_SCL1_2X (MTK_PIN_NO(15) | 3) +#define MT6397_PIN_15_COL3__FUNC_TEST_IN6 (MTK_PIN_NO(15) | 6) +#define MT6397_PIN_15_COL3__FUNC_TEST_OUT6 (MTK_PIN_NO(15) | 7) + +#define MT6397_PIN_16_COL4__FUNC_GPIO16 (MTK_PIN_NO(16) | 0) +#define MT6397_PIN_16_COL4__FUNC_COL4 (MTK_PIN_NO(16) | 1) +#define MT6397_PIN_16_COL4__FUNC_EINT14_1X (MTK_PIN_NO(16) | 2) +#define MT6397_PIN_16_COL4__FUNC_SDA1_2X (MTK_PIN_NO(16) | 3) +#define MT6397_PIN_16_COL4__FUNC_TEST_IN7 (MTK_PIN_NO(16) | 6) +#define MT6397_PIN_16_COL4__FUNC_TEST_OUT7 (MTK_PIN_NO(16) | 7) + +#define MT6397_PIN_17_COL5__FUNC_GPIO17 (MTK_PIN_NO(17) | 0) +#define MT6397_PIN_17_COL5__FUNC_COL5 (MTK_PIN_NO(17) | 1) +#define MT6397_PIN_17_COL5__FUNC_EINT15_1X (MTK_PIN_NO(17) | 2) +#define MT6397_PIN_17_COL5__FUNC_SCL2_2X (MTK_PIN_NO(17) | 3) +#define MT6397_PIN_17_COL5__FUNC_TEST_IN8 (MTK_PIN_NO(17) | 6) +#define MT6397_PIN_17_COL5__FUNC_TEST_OUT8 (MTK_PIN_NO(17) | 7) + +#define MT6397_PIN_18_COL6__FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define MT6397_PIN_18_COL6__FUNC_COL6 (MTK_PIN_NO(18) | 1) +#define MT6397_PIN_18_COL6__FUNC_EINT16_1X (MTK_PIN_NO(18) | 2) +#define MT6397_PIN_18_COL6__FUNC_SDA2_2X (MTK_PIN_NO(18) | 3) +#define MT6397_PIN_18_COL6__FUNC_GPIO32K_0 (MTK_PIN_NO(18) | 4) +#define MT6397_PIN_18_COL6__FUNC_GPIO26M_0 (MTK_PIN_NO(18) | 5) +#define MT6397_PIN_18_COL6__FUNC_TEST_IN9 (MTK_PIN_NO(18) | 6) +#define MT6397_PIN_18_COL6__FUNC_TEST_OUT9 (MTK_PIN_NO(18) | 7) + +#define MT6397_PIN_19_COL7__FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define MT6397_PIN_19_COL7__FUNC_COL7 (MTK_PIN_NO(19) | 1) +#define MT6397_PIN_19_COL7__FUNC_EINT17_1X (MTK_PIN_NO(19) | 2) +#define MT6397_PIN_19_COL7__FUNC_PWM2_3X (MTK_PIN_NO(19) | 3) +#define MT6397_PIN_19_COL7__FUNC_GPIO32K_1 (MTK_PIN_NO(19) | 4) +#define MT6397_PIN_19_COL7__FUNC_GPIO26M_1 (MTK_PIN_NO(19) | 5) +#define MT6397_PIN_19_COL7__FUNC_TEST_IN10 (MTK_PIN_NO(19) | 6) +#define MT6397_PIN_19_COL7__FUNC_TEST_OUT10 (MTK_PIN_NO(19) | 7) + +#define MT6397_PIN_20_ROW0__FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define MT6397_PIN_20_ROW0__FUNC_ROW0 (MTK_PIN_NO(20) | 1) +#define MT6397_PIN_20_ROW0__FUNC_EINT18_1X (MTK_PIN_NO(20) | 2) +#define MT6397_PIN_20_ROW0__FUNC_SCL0_3X (MTK_PIN_NO(20) | 3) +#define MT6397_PIN_20_ROW0__FUNC_TEST_IN11 (MTK_PIN_NO(20) | 6) +#define MT6397_PIN_20_ROW0__FUNC_TEST_OUT11 (MTK_PIN_NO(20) | 7) + +#define MT6397_PIN_21_ROW1__FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define MT6397_PIN_21_ROW1__FUNC_ROW1 (MTK_PIN_NO(21) | 1) +#define MT6397_PIN_21_ROW1__FUNC_EINT19_1X (MTK_PIN_NO(21) | 2) +#define MT6397_PIN_21_ROW1__FUNC_SDA0_3X (MTK_PIN_NO(21) | 3) +#define MT6397_PIN_21_ROW1__FUNC_AUD_TSTCK (MTK_PIN_NO(21) | 4) +#define MT6397_PIN_21_ROW1__FUNC_TEST_IN12 (MTK_PIN_NO(21) | 6) +#define MT6397_PIN_21_ROW1__FUNC_TEST_OUT12 (MTK_PIN_NO(21) | 7) + +#define MT6397_PIN_22_ROW2__FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define MT6397_PIN_22_ROW2__FUNC_ROW2 (MTK_PIN_NO(22) | 1) +#define MT6397_PIN_22_ROW2__FUNC_EINT20_1X (MTK_PIN_NO(22) | 2) +#define MT6397_PIN_22_ROW2__FUNC_SCL1_3X (MTK_PIN_NO(22) | 3) +#define MT6397_PIN_22_ROW2__FUNC_TEST_IN13 (MTK_PIN_NO(22) | 6) +#define MT6397_PIN_22_ROW2__FUNC_TEST_OUT13 (MTK_PIN_NO(22) | 7) + +#define MT6397_PIN_23_ROW3__FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define MT6397_PIN_23_ROW3__FUNC_ROW3 (MTK_PIN_NO(23) | 1) +#define MT6397_PIN_23_ROW3__FUNC_EINT21_1X (MTK_PIN_NO(23) | 2) +#define MT6397_PIN_23_ROW3__FUNC_SDA1_3X (MTK_PIN_NO(23) | 3) +#define MT6397_PIN_23_ROW3__FUNC_TEST_IN14 (MTK_PIN_NO(23) | 6) +#define MT6397_PIN_23_ROW3__FUNC_TEST_OUT14 (MTK_PIN_NO(23) | 7) + +#define MT6397_PIN_24_ROW4__FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define MT6397_PIN_24_ROW4__FUNC_ROW4 (MTK_PIN_NO(24) | 1) +#define MT6397_PIN_24_ROW4__FUNC_EINT22_1X (MTK_PIN_NO(24) | 2) +#define MT6397_PIN_24_ROW4__FUNC_SCL2_3X (MTK_PIN_NO(24) | 3) +#define MT6397_PIN_24_ROW4__FUNC_TEST_IN15 (MTK_PIN_NO(24) | 6) +#define MT6397_PIN_24_ROW4__FUNC_TEST_OUT15 (MTK_PIN_NO(24) | 7) + +#define MT6397_PIN_25_ROW5__FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define MT6397_PIN_25_ROW5__FUNC_ROW5 (MTK_PIN_NO(25) | 1) +#define MT6397_PIN_25_ROW5__FUNC_EINT23_1X (MTK_PIN_NO(25) | 2) +#define MT6397_PIN_25_ROW5__FUNC_SDA2_3X (MTK_PIN_NO(25) | 3) +#define MT6397_PIN_25_ROW5__FUNC_TEST_IN16 (MTK_PIN_NO(25) | 6) +#define MT6397_PIN_25_ROW5__FUNC_TEST_OUT16 (MTK_PIN_NO(25) | 7) + +#define MT6397_PIN_26_ROW6__FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define MT6397_PIN_26_ROW6__FUNC_ROW6 (MTK_PIN_NO(26) | 1) +#define MT6397_PIN_26_ROW6__FUNC_EINT24_1X (MTK_PIN_NO(26) | 2) +#define MT6397_PIN_26_ROW6__FUNC_PWM3_3X (MTK_PIN_NO(26) | 3) +#define MT6397_PIN_26_ROW6__FUNC_GPIO32K_2 (MTK_PIN_NO(26) | 4) +#define MT6397_PIN_26_ROW6__FUNC_GPIO26M_2 (MTK_PIN_NO(26) | 5) +#define MT6397_PIN_26_ROW6__FUNC_TEST_IN17 (MTK_PIN_NO(26) | 6) +#define MT6397_PIN_26_ROW6__FUNC_TEST_OUT17 (MTK_PIN_NO(26) | 7) + +#define MT6397_PIN_27_ROW7__FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define MT6397_PIN_27_ROW7__FUNC_ROW7 (MTK_PIN_NO(27) | 1) +#define MT6397_PIN_27_ROW7__FUNC_EINT3_1X (MTK_PIN_NO(27) | 2) +#define MT6397_PIN_27_ROW7__FUNC_CBUS (MTK_PIN_NO(27) | 3) +#define MT6397_PIN_27_ROW7__FUNC_GPIO32K_3 (MTK_PIN_NO(27) | 4) +#define MT6397_PIN_27_ROW7__FUNC_GPIO26M_3 (MTK_PIN_NO(27) | 5) +#define MT6397_PIN_27_ROW7__FUNC_TEST_IN18 (MTK_PIN_NO(27) | 6) +#define MT6397_PIN_27_ROW7__FUNC_TEST_OUT18 (MTK_PIN_NO(27) | 7) + +#define MT6397_PIN_28_PWM1__FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define MT6397_PIN_28_PWM1__FUNC_PWM1 (MTK_PIN_NO(28) | 1) +#define MT6397_PIN_28_PWM1__FUNC_EINT4_1X (MTK_PIN_NO(28) | 2) +#define MT6397_PIN_28_PWM1__FUNC_GPIO32K_4 (MTK_PIN_NO(28) | 4) +#define MT6397_PIN_28_PWM1__FUNC_GPIO26M_4 (MTK_PIN_NO(28) | 5) +#define MT6397_PIN_28_PWM1__FUNC_TEST_IN19 (MTK_PIN_NO(28) | 6) +#define MT6397_PIN_28_PWM1__FUNC_TEST_OUT19 (MTK_PIN_NO(28) | 7) + +#define MT6397_PIN_29_PWM2__FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define MT6397_PIN_29_PWM2__FUNC_PWM2 (MTK_PIN_NO(29) | 1) +#define MT6397_PIN_29_PWM2__FUNC_EINT5_1X (MTK_PIN_NO(29) | 2) +#define MT6397_PIN_29_PWM2__FUNC_GPIO32K_5 (MTK_PIN_NO(29) | 4) +#define MT6397_PIN_29_PWM2__FUNC_GPIO26M_5 (MTK_PIN_NO(29) | 5) +#define MT6397_PIN_29_PWM2__FUNC_TEST_IN20 (MTK_PIN_NO(29) | 6) +#define MT6397_PIN_29_PWM2__FUNC_TEST_OUT20 (MTK_PIN_NO(29) | 7) + +#define MT6397_PIN_30_PWM3__FUNC_GPIO30 (MTK_PIN_NO(30) | 0) +#define MT6397_PIN_30_PWM3__FUNC_PWM3 (MTK_PIN_NO(30) | 1) +#define MT6397_PIN_30_PWM3__FUNC_EINT6_1X (MTK_PIN_NO(30) | 2) +#define MT6397_PIN_30_PWM3__FUNC_COL0 (MTK_PIN_NO(30) | 3) +#define MT6397_PIN_30_PWM3__FUNC_GPIO32K_6 (MTK_PIN_NO(30) | 4) +#define MT6397_PIN_30_PWM3__FUNC_GPIO26M_6 (MTK_PIN_NO(30) | 5) +#define MT6397_PIN_30_PWM3__FUNC_TEST_IN21 (MTK_PIN_NO(30) | 6) +#define MT6397_PIN_30_PWM3__FUNC_TEST_OUT21 (MTK_PIN_NO(30) | 7) + +#define MT6397_PIN_31_SCL0__FUNC_GPIO31 (MTK_PIN_NO(31) | 0) +#define MT6397_PIN_31_SCL0__FUNC_SCL0 (MTK_PIN_NO(31) | 1) +#define MT6397_PIN_31_SCL0__FUNC_EINT7_1X (MTK_PIN_NO(31) | 2) +#define MT6397_PIN_31_SCL0__FUNC_PWM1_2X (MTK_PIN_NO(31) | 3) +#define MT6397_PIN_31_SCL0__FUNC_TEST_IN22 (MTK_PIN_NO(31) | 6) +#define MT6397_PIN_31_SCL0__FUNC_TEST_OUT22 (MTK_PIN_NO(31) | 7) + +#define MT6397_PIN_32_SDA0__FUNC_GPIO32 (MTK_PIN_NO(32) | 0) +#define MT6397_PIN_32_SDA0__FUNC_SDA0 (MTK_PIN_NO(32) | 1) +#define MT6397_PIN_32_SDA0__FUNC_EINT8_1X (MTK_PIN_NO(32) | 2) +#define MT6397_PIN_32_SDA0__FUNC_TEST_IN23 (MTK_PIN_NO(32) | 6) +#define MT6397_PIN_32_SDA0__FUNC_TEST_OUT23 (MTK_PIN_NO(32) | 7) + +#define MT6397_PIN_33_SCL1__FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define MT6397_PIN_33_SCL1__FUNC_SCL1 (MTK_PIN_NO(33) | 1) +#define MT6397_PIN_33_SCL1__FUNC_EINT9_1X (MTK_PIN_NO(33) | 2) +#define MT6397_PIN_33_SCL1__FUNC_PWM2_2X (MTK_PIN_NO(33) | 3) +#define MT6397_PIN_33_SCL1__FUNC_TEST_IN24 (MTK_PIN_NO(33) | 6) +#define MT6397_PIN_33_SCL1__FUNC_TEST_OUT24 (MTK_PIN_NO(33) | 7) + +#define MT6397_PIN_34_SDA1__FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define MT6397_PIN_34_SDA1__FUNC_SDA1 (MTK_PIN_NO(34) | 1) +#define MT6397_PIN_34_SDA1__FUNC_EINT0_1X (MTK_PIN_NO(34) | 2) +#define MT6397_PIN_34_SDA1__FUNC_TEST_IN25 (MTK_PIN_NO(34) | 6) +#define MT6397_PIN_34_SDA1__FUNC_TEST_OUT25 (MTK_PIN_NO(34) | 7) + +#define MT6397_PIN_35_SCL2__FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define MT6397_PIN_35_SCL2__FUNC_SCL2 (MTK_PIN_NO(35) | 1) +#define MT6397_PIN_35_SCL2__FUNC_EINT1_1X (MTK_PIN_NO(35) | 2) +#define MT6397_PIN_35_SCL2__FUNC_PWM3_2X (MTK_PIN_NO(35) | 3) +#define MT6397_PIN_35_SCL2__FUNC_TEST_IN26 (MTK_PIN_NO(35) | 6) +#define MT6397_PIN_35_SCL2__FUNC_TEST_OUT26 (MTK_PIN_NO(35) | 7) + +#define MT6397_PIN_36_SDA2__FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define MT6397_PIN_36_SDA2__FUNC_SDA2 (MTK_PIN_NO(36) | 1) +#define MT6397_PIN_36_SDA2__FUNC_EINT2_1X (MTK_PIN_NO(36) | 2) +#define MT6397_PIN_36_SDA2__FUNC_TEST_IN27 (MTK_PIN_NO(36) | 6) +#define MT6397_PIN_36_SDA2__FUNC_TEST_OUT27 (MTK_PIN_NO(36) | 7) + +#define MT6397_PIN_37_HDMISD__FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define MT6397_PIN_37_HDMISD__FUNC_HDMISD (MTK_PIN_NO(37) | 1) +#define MT6397_PIN_37_HDMISD__FUNC_TEST_IN28 (MTK_PIN_NO(37) | 6) +#define MT6397_PIN_37_HDMISD__FUNC_TEST_OUT28 (MTK_PIN_NO(37) | 7) + +#define MT6397_PIN_38_HDMISCK__FUNC_GPIO38 (MTK_PIN_NO(38) | 0) +#define MT6397_PIN_38_HDMISCK__FUNC_HDMISCK (MTK_PIN_NO(38) | 1) +#define MT6397_PIN_38_HDMISCK__FUNC_TEST_IN29 (MTK_PIN_NO(38) | 6) +#define MT6397_PIN_38_HDMISCK__FUNC_TEST_OUT29 (MTK_PIN_NO(38) | 7) + +#define MT6397_PIN_39_HTPLG__FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define MT6397_PIN_39_HTPLG__FUNC_HTPLG (MTK_PIN_NO(39) | 1) +#define MT6397_PIN_39_HTPLG__FUNC_TEST_IN30 (MTK_PIN_NO(39) | 6) +#define MT6397_PIN_39_HTPLG__FUNC_TEST_OUT30 (MTK_PIN_NO(39) | 7) + +#define MT6397_PIN_40_CEC__FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define MT6397_PIN_40_CEC__FUNC_CEC (MTK_PIN_NO(40) | 1) +#define MT6397_PIN_40_CEC__FUNC_TEST_IN31 (MTK_PIN_NO(40) | 6) +#define MT6397_PIN_40_CEC__FUNC_TEST_OUT31 (MTK_PIN_NO(40) | 7) + +#endif /* __DTS_MT6397_PINFUNC_H */ diff --git a/include/dt-bindings/pinctrl/mt65xx.h b/include/dt-bindings/pinctrl/mt65xx.h new file mode 100644 index 0000000..7e16e58 --- /dev/null +++ b/include/dt-bindings/pinctrl/mt65xx.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Hongzhou.Yang + */ + +#ifndef _DT_BINDINGS_PINCTRL_MT65XX_H +#define _DT_BINDINGS_PINCTRL_MT65XX_H + +#define MTK_PIN_NO(x) ((x) << 8) +#define MTK_GET_PIN_NO(x) ((x) >> 8) +#define MTK_GET_PIN_FUNC(x) ((x) & 0xf) + +#define MTK_PUPD_SET_R1R0_00 100 +#define MTK_PUPD_SET_R1R0_01 101 +#define MTK_PUPD_SET_R1R0_10 102 +#define MTK_PUPD_SET_R1R0_11 103 + +#define MTK_DRIVE_2mA 2 +#define MTK_DRIVE_4mA 4 +#define MTK_DRIVE_6mA 6 +#define MTK_DRIVE_8mA 8 +#define MTK_DRIVE_10mA 10 +#define MTK_DRIVE_12mA 12 +#define MTK_DRIVE_14mA 14 +#define MTK_DRIVE_16mA 16 +#define MTK_DRIVE_20mA 20 +#define MTK_DRIVE_24mA 24 +#define MTK_DRIVE_28mA 28 +#define MTK_DRIVE_32mA 32 + +#endif /* _DT_BINDINGS_PINCTRL_MT65XX_H */ diff --git a/include/dt-bindings/pinctrl/mt6797-pinfunc.h b/include/dt-bindings/pinctrl/mt6797-pinfunc.h new file mode 100644 index 0000000..e981336 --- /dev/null +++ b/include/dt-bindings/pinctrl/mt6797-pinfunc.h @@ -0,0 +1,1368 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MT6797_PINFUNC_H +#define __DTS_MT6797_PINFUNC_H + +#include + +#define MT6797_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define MT6797_GPIO0__FUNC_CSI0A_L0P_T0A (MTK_PIN_NO(0) | 1) + +#define MT6797_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define MT6797_GPIO1__FUNC_CSI0A_L0N_T0B (MTK_PIN_NO(1) | 1) + +#define MT6797_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define MT6797_GPIO2__FUNC_CSI0A_L1P_T0C (MTK_PIN_NO(2) | 1) + +#define MT6797_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define MT6797_GPIO3__FUNC_CSI0A_L1N_T1A (MTK_PIN_NO(3) | 1) + +#define MT6797_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define MT6797_GPIO4__FUNC_CSI0A_L2P_T1B (MTK_PIN_NO(4) | 1) + +#define MT6797_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define MT6797_GPIO5__FUNC_CSI0A_L2N_T1C (MTK_PIN_NO(5) | 1) + +#define MT6797_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define MT6797_GPIO6__FUNC_CSI0B_L0P_T0A (MTK_PIN_NO(6) | 1) + +#define MT6797_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define MT6797_GPIO7__FUNC_CSI0B_L0N_T0B (MTK_PIN_NO(7) | 1) + +#define MT6797_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define MT6797_GPIO8__FUNC_CSI0B_L1P_T0C (MTK_PIN_NO(8) | 1) + +#define MT6797_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define MT6797_GPIO9__FUNC_CSI0B_L1N_T1A (MTK_PIN_NO(9) | 1) + +#define MT6797_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define MT6797_GPIO10__FUNC_CSI1A_L0P_T0A (MTK_PIN_NO(10) | 1) + +#define MT6797_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define MT6797_GPIO11__FUNC_CSI1A_L0N_T0B (MTK_PIN_NO(11) | 1) + +#define MT6797_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define MT6797_GPIO12__FUNC_CSI1A_L1P_T0C (MTK_PIN_NO(12) | 1) + +#define MT6797_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define MT6797_GPIO13__FUNC_CSI1A_L1N_T1A (MTK_PIN_NO(13) | 1) + +#define MT6797_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define MT6797_GPIO14__FUNC_CSI1A_L2P_T1B (MTK_PIN_NO(14) | 1) + +#define MT6797_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define MT6797_GPIO15__FUNC_CSI1A_L2N_T1C (MTK_PIN_NO(15) | 1) + +#define MT6797_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0) +#define MT6797_GPIO16__FUNC_CSI1B_L0P_T0A (MTK_PIN_NO(16) | 1) + +#define MT6797_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0) +#define MT6797_GPIO17__FUNC_CSI1B_L0N_T0B (MTK_PIN_NO(17) | 1) + +#define MT6797_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define MT6797_GPIO18__FUNC_CSI1B_L1P_T0C (MTK_PIN_NO(18) | 1) + +#define MT6797_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define MT6797_GPIO19__FUNC_CSI1B_L1N_T1A (MTK_PIN_NO(19) | 1) + +#define MT6797_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define MT6797_GPIO20__FUNC_CSI1B_L2P_T1B (MTK_PIN_NO(20) | 1) + +#define MT6797_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define MT6797_GPIO21__FUNC_CSI1B_L2N_T1C (MTK_PIN_NO(21) | 1) + +#define MT6797_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define MT6797_GPIO22__FUNC_CSI2_L0P_T0A (MTK_PIN_NO(22) | 1) + +#define MT6797_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define MT6797_GPIO23__FUNC_CSI2_L0N_T0B (MTK_PIN_NO(23) | 1) + +#define MT6797_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define MT6797_GPIO24__FUNC_CSI2_L1P_T0C (MTK_PIN_NO(24) | 1) + +#define MT6797_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define MT6797_GPIO25__FUNC_CSI2_L1N_T1A (MTK_PIN_NO(25) | 1) + +#define MT6797_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define MT6797_GPIO26__FUNC_CSI2_L2P_T1B (MTK_PIN_NO(26) | 1) + +#define MT6797_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define MT6797_GPIO27__FUNC_CSI2_L2N_T1C (MTK_PIN_NO(27) | 1) + +#define MT6797_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define MT6797_GPIO28__FUNC_SPI5_CLK_A (MTK_PIN_NO(28) | 1) +#define MT6797_GPIO28__FUNC_IRTX_OUT (MTK_PIN_NO(28) | 2) +#define MT6797_GPIO28__FUNC_UDI_TDO (MTK_PIN_NO(28) | 3) +#define MT6797_GPIO28__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(28) | 4) +#define MT6797_GPIO28__FUNC_CONN_MCU_TDO (MTK_PIN_NO(28) | 5) +#define MT6797_GPIO28__FUNC_PWM_A (MTK_PIN_NO(28) | 6) +#define MT6797_GPIO28__FUNC_C2K_DM_OTDO (MTK_PIN_NO(28) | 7) + +#define MT6797_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define MT6797_GPIO29__FUNC_SPI5_MI_A (MTK_PIN_NO(29) | 1) +#define MT6797_GPIO29__FUNC_DAP_SIB1_SWD (MTK_PIN_NO(29) | 2) +#define MT6797_GPIO29__FUNC_UDI_TMS (MTK_PIN_NO(29) | 3) +#define MT6797_GPIO29__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(29) | 4) +#define MT6797_GPIO29__FUNC_CONN_MCU_TMS (MTK_PIN_NO(29) | 5) +#define MT6797_GPIO29__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(29) | 6) +#define MT6797_GPIO29__FUNC_C2K_DM_OTMS (MTK_PIN_NO(29) | 7) + +#define MT6797_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0) +#define MT6797_GPIO30__FUNC_CMMCLK0 (MTK_PIN_NO(30) | 1) +#define MT6797_GPIO30__FUNC_MD_CLKM0 (MTK_PIN_NO(30) | 7) + +#define MT6797_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0) +#define MT6797_GPIO31__FUNC_CMMCLK1 (MTK_PIN_NO(31) | 1) +#define MT6797_GPIO31__FUNC_MD_CLKM1 (MTK_PIN_NO(31) | 7) + +#define MT6797_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0) +#define MT6797_GPIO32__FUNC_SPI5_CS_A (MTK_PIN_NO(32) | 1) +#define MT6797_GPIO32__FUNC_DAP_SIB1_SWCK (MTK_PIN_NO(32) | 2) +#define MT6797_GPIO32__FUNC_UDI_TCK_XI (MTK_PIN_NO(32) | 3) +#define MT6797_GPIO32__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(32) | 4) +#define MT6797_GPIO32__FUNC_CONN_MCU_TCK (MTK_PIN_NO(32) | 5) +#define MT6797_GPIO32__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(32) | 6) +#define MT6797_GPIO32__FUNC_C2K_DM_OTCK (MTK_PIN_NO(32) | 7) + +#define MT6797_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define MT6797_GPIO33__FUNC_SPI5_MO_A (MTK_PIN_NO(33) | 1) +#define MT6797_GPIO33__FUNC_CMFLASH (MTK_PIN_NO(33) | 2) +#define MT6797_GPIO33__FUNC_UDI_TDI (MTK_PIN_NO(33) | 3) +#define MT6797_GPIO33__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(33) | 4) +#define MT6797_GPIO33__FUNC_CONN_MCU_TDI (MTK_PIN_NO(33) | 5) +#define MT6797_GPIO33__FUNC_MD_URXD0 (MTK_PIN_NO(33) | 6) +#define MT6797_GPIO33__FUNC_C2K_DM_OTDI (MTK_PIN_NO(33) | 7) + +#define MT6797_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define MT6797_GPIO34__FUNC_CMFLASH (MTK_PIN_NO(34) | 1) +#define MT6797_GPIO34__FUNC_CLKM0 (MTK_PIN_NO(34) | 2) +#define MT6797_GPIO34__FUNC_UDI_NTRST (MTK_PIN_NO(34) | 3) +#define MT6797_GPIO34__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(34) | 4) +#define MT6797_GPIO34__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(34) | 5) +#define MT6797_GPIO34__FUNC_MD_UTXD0 (MTK_PIN_NO(34) | 6) +#define MT6797_GPIO34__FUNC_C2K_DM_JTINTP (MTK_PIN_NO(34) | 7) + +#define MT6797_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define MT6797_GPIO35__FUNC_CMMCLK3 (MTK_PIN_NO(35) | 1) +#define MT6797_GPIO35__FUNC_CLKM1 (MTK_PIN_NO(35) | 2) +#define MT6797_GPIO35__FUNC_MD_URXD1 (MTK_PIN_NO(35) | 3) +#define MT6797_GPIO35__FUNC_PTA_RXD (MTK_PIN_NO(35) | 4) +#define MT6797_GPIO35__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(35) | 5) +#define MT6797_GPIO35__FUNC_PWM_B (MTK_PIN_NO(35) | 6) +#define MT6797_GPIO35__FUNC_PCC_PPC_IO (MTK_PIN_NO(35) | 7) + +#define MT6797_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define MT6797_GPIO36__FUNC_CMMCLK2 (MTK_PIN_NO(36) | 1) +#define MT6797_GPIO36__FUNC_CLKM2 (MTK_PIN_NO(36) | 2) +#define MT6797_GPIO36__FUNC_MD_UTXD1 (MTK_PIN_NO(36) | 3) +#define MT6797_GPIO36__FUNC_PTA_TXD (MTK_PIN_NO(36) | 4) +#define MT6797_GPIO36__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(36) | 5) +#define MT6797_GPIO36__FUNC_PWM_C (MTK_PIN_NO(36) | 6) +#define MT6797_GPIO36__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(36) | 7) + +#define MT6797_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define MT6797_GPIO37__FUNC_SCL0_0 (MTK_PIN_NO(37) | 1) + +#define MT6797_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0) +#define MT6797_GPIO38__FUNC_SDA0_0 (MTK_PIN_NO(38) | 1) + +#define MT6797_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define MT6797_GPIO39__FUNC_DPI_D0 (MTK_PIN_NO(39) | 1) +#define MT6797_GPIO39__FUNC_SPI1_CLK_A (MTK_PIN_NO(39) | 2) +#define MT6797_GPIO39__FUNC_PCM0_SYNC (MTK_PIN_NO(39) | 3) +#define MT6797_GPIO39__FUNC_I2S0_LRCK (MTK_PIN_NO(39) | 4) +#define MT6797_GPIO39__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(39) | 5) +#define MT6797_GPIO39__FUNC_URXD3 (MTK_PIN_NO(39) | 6) +#define MT6797_GPIO39__FUNC_C2K_NTRST (MTK_PIN_NO(39) | 7) + +#define MT6797_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define MT6797_GPIO40__FUNC_DPI_D1 (MTK_PIN_NO(40) | 1) +#define MT6797_GPIO40__FUNC_SPI1_MI_A (MTK_PIN_NO(40) | 2) +#define MT6797_GPIO40__FUNC_PCM0_CLK (MTK_PIN_NO(40) | 3) +#define MT6797_GPIO40__FUNC_I2S0_BCK (MTK_PIN_NO(40) | 4) +#define MT6797_GPIO40__FUNC_CONN_MCU_TDO (MTK_PIN_NO(40) | 5) +#define MT6797_GPIO40__FUNC_UTXD3 (MTK_PIN_NO(40) | 6) +#define MT6797_GPIO40__FUNC_C2K_TCK (MTK_PIN_NO(40) | 7) + +#define MT6797_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0) +#define MT6797_GPIO41__FUNC_DPI_D2 (MTK_PIN_NO(41) | 1) +#define MT6797_GPIO41__FUNC_SPI1_CS_A (MTK_PIN_NO(41) | 2) +#define MT6797_GPIO41__FUNC_PCM0_DO (MTK_PIN_NO(41) | 3) +#define MT6797_GPIO41__FUNC_I2S3_DO (MTK_PIN_NO(41) | 4) +#define MT6797_GPIO41__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(41) | 5) +#define MT6797_GPIO41__FUNC_URTS3 (MTK_PIN_NO(41) | 6) +#define MT6797_GPIO41__FUNC_C2K_TDI (MTK_PIN_NO(41) | 7) + +#define MT6797_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0) +#define MT6797_GPIO42__FUNC_DPI_D3 (MTK_PIN_NO(42) | 1) +#define MT6797_GPIO42__FUNC_SPI1_MO_A (MTK_PIN_NO(42) | 2) +#define MT6797_GPIO42__FUNC_PCM0_DI (MTK_PIN_NO(42) | 3) +#define MT6797_GPIO42__FUNC_I2S0_DI (MTK_PIN_NO(42) | 4) +#define MT6797_GPIO42__FUNC_CONN_MCU_TDI (MTK_PIN_NO(42) | 5) +#define MT6797_GPIO42__FUNC_UCTS3 (MTK_PIN_NO(42) | 6) +#define MT6797_GPIO42__FUNC_C2K_TMS (MTK_PIN_NO(42) | 7) + +#define MT6797_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0) +#define MT6797_GPIO43__FUNC_DPI_D4 (MTK_PIN_NO(43) | 1) +#define MT6797_GPIO43__FUNC_SPI2_CLK_A (MTK_PIN_NO(43) | 2) +#define MT6797_GPIO43__FUNC_PCM1_SYNC (MTK_PIN_NO(43) | 3) +#define MT6797_GPIO43__FUNC_I2S2_LRCK (MTK_PIN_NO(43) | 4) +#define MT6797_GPIO43__FUNC_CONN_MCU_TMS (MTK_PIN_NO(43) | 5) +#define MT6797_GPIO43__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(43) | 6) +#define MT6797_GPIO43__FUNC_C2K_TDO (MTK_PIN_NO(43) | 7) + +#define MT6797_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0) +#define MT6797_GPIO44__FUNC_DPI_D5 (MTK_PIN_NO(44) | 1) +#define MT6797_GPIO44__FUNC_SPI2_MI_A (MTK_PIN_NO(44) | 2) +#define MT6797_GPIO44__FUNC_PCM1_CLK (MTK_PIN_NO(44) | 3) +#define MT6797_GPIO44__FUNC_I2S2_BCK (MTK_PIN_NO(44) | 4) +#define MT6797_GPIO44__FUNC_CONN_MCU_TCK (MTK_PIN_NO(44) | 5) +#define MT6797_GPIO44__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(44) | 6) +#define MT6797_GPIO44__FUNC_C2K_RTCK (MTK_PIN_NO(44) | 7) + +#define MT6797_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0) +#define MT6797_GPIO45__FUNC_DPI_D6 (MTK_PIN_NO(45) | 1) +#define MT6797_GPIO45__FUNC_SPI2_CS_A (MTK_PIN_NO(45) | 2) +#define MT6797_GPIO45__FUNC_PCM1_DI (MTK_PIN_NO(45) | 3) +#define MT6797_GPIO45__FUNC_I2S2_DI (MTK_PIN_NO(45) | 4) +#define MT6797_GPIO45__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(45) | 5) +#define MT6797_GPIO45__FUNC_MD_URXD0 (MTK_PIN_NO(45) | 6) + +#define MT6797_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0) +#define MT6797_GPIO46__FUNC_DPI_D7 (MTK_PIN_NO(46) | 1) +#define MT6797_GPIO46__FUNC_SPI2_MO_A (MTK_PIN_NO(46) | 2) +#define MT6797_GPIO46__FUNC_PCM1_DO0 (MTK_PIN_NO(46) | 3) +#define MT6797_GPIO46__FUNC_I2S1_DO (MTK_PIN_NO(46) | 4) +#define MT6797_GPIO46__FUNC_ANT_SEL0 (MTK_PIN_NO(46) | 5) +#define MT6797_GPIO46__FUNC_MD_UTXD0 (MTK_PIN_NO(46) | 6) + +#define MT6797_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0) +#define MT6797_GPIO47__FUNC_DPI_D8 (MTK_PIN_NO(47) | 1) +#define MT6797_GPIO47__FUNC_CLKM0 (MTK_PIN_NO(47) | 2) +#define MT6797_GPIO47__FUNC_PCM1_DO1 (MTK_PIN_NO(47) | 3) +#define MT6797_GPIO47__FUNC_I2S0_MCK (MTK_PIN_NO(47) | 4) +#define MT6797_GPIO47__FUNC_ANT_SEL1 (MTK_PIN_NO(47) | 5) +#define MT6797_GPIO47__FUNC_PTA_RXD (MTK_PIN_NO(47) | 6) +#define MT6797_GPIO47__FUNC_C2K_URXD0 (MTK_PIN_NO(47) | 7) + +#define MT6797_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0) +#define MT6797_GPIO48__FUNC_DPI_D9 (MTK_PIN_NO(48) | 1) +#define MT6797_GPIO48__FUNC_CLKM1 (MTK_PIN_NO(48) | 2) +#define MT6797_GPIO48__FUNC_CMFLASH (MTK_PIN_NO(48) | 3) +#define MT6797_GPIO48__FUNC_I2S2_MCK (MTK_PIN_NO(48) | 4) +#define MT6797_GPIO48__FUNC_ANT_SEL2 (MTK_PIN_NO(48) | 5) +#define MT6797_GPIO48__FUNC_PTA_TXD (MTK_PIN_NO(48) | 6) +#define MT6797_GPIO48__FUNC_C2K_UTXD0 (MTK_PIN_NO(48) | 7) + +#define MT6797_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0) +#define MT6797_GPIO49__FUNC_DPI_D10 (MTK_PIN_NO(49) | 1) +#define MT6797_GPIO49__FUNC_MD_INT1_C2K_UIM1_HOT_PLUG_IN (MTK_PIN_NO(49) | 2) +#define MT6797_GPIO49__FUNC_PWM_C (MTK_PIN_NO(49) | 3) +#define MT6797_GPIO49__FUNC_IRTX_OUT (MTK_PIN_NO(49) | 4) +#define MT6797_GPIO49__FUNC_ANT_SEL3 (MTK_PIN_NO(49) | 5) +#define MT6797_GPIO49__FUNC_MD_URXD1 (MTK_PIN_NO(49) | 6) + +#define MT6797_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0) +#define MT6797_GPIO50__FUNC_DPI_D11 (MTK_PIN_NO(50) | 1) +#define MT6797_GPIO50__FUNC_MD_INT2 (MTK_PIN_NO(50) | 2) +#define MT6797_GPIO50__FUNC_PWM_D (MTK_PIN_NO(50) | 3) +#define MT6797_GPIO50__FUNC_CLKM2 (MTK_PIN_NO(50) | 4) +#define MT6797_GPIO50__FUNC_ANT_SEL4 (MTK_PIN_NO(50) | 5) +#define MT6797_GPIO50__FUNC_MD_UTXD1 (MTK_PIN_NO(50) | 6) + +#define MT6797_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0) +#define MT6797_GPIO51__FUNC_DPI_DE (MTK_PIN_NO(51) | 1) +#define MT6797_GPIO51__FUNC_SPI4_CLK_A (MTK_PIN_NO(51) | 2) +#define MT6797_GPIO51__FUNC_IRTX_OUT (MTK_PIN_NO(51) | 3) +#define MT6797_GPIO51__FUNC_SCL0_1 (MTK_PIN_NO(51) | 4) +#define MT6797_GPIO51__FUNC_ANT_SEL5 (MTK_PIN_NO(51) | 5) +#define MT6797_GPIO51__FUNC_C2K_UTXD1 (MTK_PIN_NO(51) | 7) + +#define MT6797_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0) +#define MT6797_GPIO52__FUNC_DPI_CK (MTK_PIN_NO(52) | 1) +#define MT6797_GPIO52__FUNC_SPI4_MI_A (MTK_PIN_NO(52) | 2) +#define MT6797_GPIO52__FUNC_SPI4_MO_A (MTK_PIN_NO(52) | 3) +#define MT6797_GPIO52__FUNC_SDA0_1 (MTK_PIN_NO(52) | 4) +#define MT6797_GPIO52__FUNC_ANT_SEL6 (MTK_PIN_NO(52) | 5) +#define MT6797_GPIO52__FUNC_C2K_URXD1 (MTK_PIN_NO(52) | 7) + +#define MT6797_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0) +#define MT6797_GPIO53__FUNC_DPI_HSYNC (MTK_PIN_NO(53) | 1) +#define MT6797_GPIO53__FUNC_SPI4_CS_A (MTK_PIN_NO(53) | 2) +#define MT6797_GPIO53__FUNC_CMFLASH (MTK_PIN_NO(53) | 3) +#define MT6797_GPIO53__FUNC_SCL1_1 (MTK_PIN_NO(53) | 4) +#define MT6797_GPIO53__FUNC_ANT_SEL7 (MTK_PIN_NO(53) | 5) +#define MT6797_GPIO53__FUNC_MD_URXD2 (MTK_PIN_NO(53) | 6) +#define MT6797_GPIO53__FUNC_PCC_PPC_IO (MTK_PIN_NO(53) | 7) + +#define MT6797_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0) +#define MT6797_GPIO54__FUNC_DPI_VSYNC (MTK_PIN_NO(54) | 1) +#define MT6797_GPIO54__FUNC_SPI4_MO_A (MTK_PIN_NO(54) | 2) +#define MT6797_GPIO54__FUNC_SPI4_MI_A (MTK_PIN_NO(54) | 3) +#define MT6797_GPIO54__FUNC_SDA1_1 (MTK_PIN_NO(54) | 4) +#define MT6797_GPIO54__FUNC_PWM_A (MTK_PIN_NO(54) | 5) +#define MT6797_GPIO54__FUNC_MD_UTXD2 (MTK_PIN_NO(54) | 6) +#define MT6797_GPIO54__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(54) | 7) + +#define MT6797_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0) +#define MT6797_GPIO55__FUNC_SCL1_0 (MTK_PIN_NO(55) | 1) + +#define MT6797_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0) +#define MT6797_GPIO56__FUNC_SDA1_0 (MTK_PIN_NO(56) | 1) + +#define MT6797_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0) +#define MT6797_GPIO57__FUNC_SPI0_CLK (MTK_PIN_NO(57) | 1) +#define MT6797_GPIO57__FUNC_SCL0_2 (MTK_PIN_NO(57) | 2) +#define MT6797_GPIO57__FUNC_PWM_B (MTK_PIN_NO(57) | 3) +#define MT6797_GPIO57__FUNC_UTXD3 (MTK_PIN_NO(57) | 4) +#define MT6797_GPIO57__FUNC_PCM0_SYNC (MTK_PIN_NO(57) | 5) + +#define MT6797_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0) +#define MT6797_GPIO58__FUNC_SPI0_MI (MTK_PIN_NO(58) | 1) +#define MT6797_GPIO58__FUNC_SPI0_MO (MTK_PIN_NO(58) | 2) +#define MT6797_GPIO58__FUNC_SDA1_2 (MTK_PIN_NO(58) | 3) +#define MT6797_GPIO58__FUNC_URXD3 (MTK_PIN_NO(58) | 4) +#define MT6797_GPIO58__FUNC_PCM0_CLK (MTK_PIN_NO(58) | 5) + +#define MT6797_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0) +#define MT6797_GPIO59__FUNC_SPI0_MO (MTK_PIN_NO(59) | 1) +#define MT6797_GPIO59__FUNC_SPI0_MI (MTK_PIN_NO(59) | 2) +#define MT6797_GPIO59__FUNC_PWM_C (MTK_PIN_NO(59) | 3) +#define MT6797_GPIO59__FUNC_URTS3 (MTK_PIN_NO(59) | 4) +#define MT6797_GPIO59__FUNC_PCM0_DO (MTK_PIN_NO(59) | 5) + +#define MT6797_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0) +#define MT6797_GPIO60__FUNC_SPI0_CS (MTK_PIN_NO(60) | 1) +#define MT6797_GPIO60__FUNC_SDA0_2 (MTK_PIN_NO(60) | 2) +#define MT6797_GPIO60__FUNC_SCL1_2 (MTK_PIN_NO(60) | 3) +#define MT6797_GPIO60__FUNC_UCTS3 (MTK_PIN_NO(60) | 4) +#define MT6797_GPIO60__FUNC_PCM0_DI (MTK_PIN_NO(60) | 5) + +#define MT6797_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0) +#define MT6797_GPIO61__FUNC_EINT0 (MTK_PIN_NO(61) | 1) +#define MT6797_GPIO61__FUNC_IDDIG (MTK_PIN_NO(61) | 2) +#define MT6797_GPIO61__FUNC_SPI4_CLK_B (MTK_PIN_NO(61) | 3) +#define MT6797_GPIO61__FUNC_I2S0_LRCK (MTK_PIN_NO(61) | 4) +#define MT6797_GPIO61__FUNC_PCM0_SYNC (MTK_PIN_NO(61) | 5) +#define MT6797_GPIO61__FUNC_C2K_EINT0 (MTK_PIN_NO(61) | 7) + +#define MT6797_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0) +#define MT6797_GPIO62__FUNC_EINT1 (MTK_PIN_NO(62) | 1) +#define MT6797_GPIO62__FUNC_USB_DRVVBUS (MTK_PIN_NO(62) | 2) +#define MT6797_GPIO62__FUNC_SPI4_MI_B (MTK_PIN_NO(62) | 3) +#define MT6797_GPIO62__FUNC_I2S0_BCK (MTK_PIN_NO(62) | 4) +#define MT6797_GPIO62__FUNC_PCM0_CLK (MTK_PIN_NO(62) | 5) +#define MT6797_GPIO62__FUNC_C2K_EINT1 (MTK_PIN_NO(62) | 7) + +#define MT6797_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0) +#define MT6797_GPIO63__FUNC_EINT2 (MTK_PIN_NO(63) | 1) +#define MT6797_GPIO63__FUNC_IRTX_OUT (MTK_PIN_NO(63) | 2) +#define MT6797_GPIO63__FUNC_SPI4_MO_B (MTK_PIN_NO(63) | 3) +#define MT6797_GPIO63__FUNC_I2S0_MCK (MTK_PIN_NO(63) | 4) +#define MT6797_GPIO63__FUNC_PCM0_DI (MTK_PIN_NO(63) | 5) +#define MT6797_GPIO63__FUNC_C2K_DM_EINT0 (MTK_PIN_NO(63) | 7) + +#define MT6797_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0) +#define MT6797_GPIO64__FUNC_EINT3 (MTK_PIN_NO(64) | 1) +#define MT6797_GPIO64__FUNC_CMFLASH (MTK_PIN_NO(64) | 2) +#define MT6797_GPIO64__FUNC_SPI4_CS_B (MTK_PIN_NO(64) | 3) +#define MT6797_GPIO64__FUNC_I2S0_DI (MTK_PIN_NO(64) | 4) +#define MT6797_GPIO64__FUNC_PCM0_DO (MTK_PIN_NO(64) | 5) +#define MT6797_GPIO64__FUNC_C2K_DM_EINT1 (MTK_PIN_NO(64) | 7) + +#define MT6797_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0) +#define MT6797_GPIO65__FUNC_EINT4 (MTK_PIN_NO(65) | 1) +#define MT6797_GPIO65__FUNC_CLKM0 (MTK_PIN_NO(65) | 2) +#define MT6797_GPIO65__FUNC_SPI5_CLK_B (MTK_PIN_NO(65) | 3) +#define MT6797_GPIO65__FUNC_I2S1_LRCK (MTK_PIN_NO(65) | 4) +#define MT6797_GPIO65__FUNC_PWM_A (MTK_PIN_NO(65) | 5) +#define MT6797_GPIO65__FUNC_C2K_DM_EINT2 (MTK_PIN_NO(65) | 7) + +#define MT6797_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0) +#define MT6797_GPIO66__FUNC_EINT5 (MTK_PIN_NO(66) | 1) +#define MT6797_GPIO66__FUNC_CLKM1 (MTK_PIN_NO(66) | 2) +#define MT6797_GPIO66__FUNC_SPI5_MI_B (MTK_PIN_NO(66) | 3) +#define MT6797_GPIO66__FUNC_I2S1_BCK (MTK_PIN_NO(66) | 4) +#define MT6797_GPIO66__FUNC_PWM_B (MTK_PIN_NO(66) | 5) +#define MT6797_GPIO66__FUNC_C2K_DM_EINT3 (MTK_PIN_NO(66) | 7) + +#define MT6797_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0) +#define MT6797_GPIO67__FUNC_EINT6 (MTK_PIN_NO(67) | 1) +#define MT6797_GPIO67__FUNC_CLKM2 (MTK_PIN_NO(67) | 2) +#define MT6797_GPIO67__FUNC_SPI5_MO_B (MTK_PIN_NO(67) | 3) +#define MT6797_GPIO67__FUNC_I2S1_MCK (MTK_PIN_NO(67) | 4) +#define MT6797_GPIO67__FUNC_PWM_C (MTK_PIN_NO(67) | 5) +#define MT6797_GPIO67__FUNC_DBG_MON_A0 (MTK_PIN_NO(67) | 7) + +#define MT6797_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0) +#define MT6797_GPIO68__FUNC_EINT7 (MTK_PIN_NO(68) | 1) +#define MT6797_GPIO68__FUNC_CLKM3 (MTK_PIN_NO(68) | 2) +#define MT6797_GPIO68__FUNC_SPI5_CS_B (MTK_PIN_NO(68) | 3) +#define MT6797_GPIO68__FUNC_I2S1_DO (MTK_PIN_NO(68) | 4) +#define MT6797_GPIO68__FUNC_PWM_D (MTK_PIN_NO(68) | 5) +#define MT6797_GPIO68__FUNC_DBG_MON_A1 (MTK_PIN_NO(68) | 7) + +#define MT6797_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0) +#define MT6797_GPIO69__FUNC_I2S0_LRCK (MTK_PIN_NO(69) | 1) +#define MT6797_GPIO69__FUNC_I2S3_LRCK (MTK_PIN_NO(69) | 2) +#define MT6797_GPIO69__FUNC_I2S1_LRCK (MTK_PIN_NO(69) | 3) +#define MT6797_GPIO69__FUNC_I2S2_LRCK (MTK_PIN_NO(69) | 4) +#define MT6797_GPIO69__FUNC_DBG_MON_A2 (MTK_PIN_NO(69) | 7) + +#define MT6797_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0) +#define MT6797_GPIO70__FUNC_I2S0_BCK (MTK_PIN_NO(70) | 1) +#define MT6797_GPIO70__FUNC_I2S3_BCK (MTK_PIN_NO(70) | 2) +#define MT6797_GPIO70__FUNC_I2S1_BCK (MTK_PIN_NO(70) | 3) +#define MT6797_GPIO70__FUNC_I2S2_BCK (MTK_PIN_NO(70) | 4) +#define MT6797_GPIO70__FUNC_DBG_MON_A3 (MTK_PIN_NO(70) | 7) + +#define MT6797_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0) +#define MT6797_GPIO71__FUNC_I2S0_MCK (MTK_PIN_NO(71) | 1) +#define MT6797_GPIO71__FUNC_I2S3_MCK (MTK_PIN_NO(71) | 2) +#define MT6797_GPIO71__FUNC_I2S1_MCK (MTK_PIN_NO(71) | 3) +#define MT6797_GPIO71__FUNC_I2S2_MCK (MTK_PIN_NO(71) | 4) +#define MT6797_GPIO71__FUNC_DBG_MON_A4 (MTK_PIN_NO(71) | 7) + +#define MT6797_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0) +/* #define MT6797_GPIO72__FUNC_I2S0_DI (MTK_PIN_NO(72) | 1) */ +#define MT6797_GPIO72__FUNC_I2S0_DI (MTK_PIN_NO(72) | 2) +/* #define MT6797_GPIO72__FUNC_I2S2_DI (MTK_PIN_NO(72) | 3) */ +#define MT6797_GPIO72__FUNC_I2S2_DI (MTK_PIN_NO(72) | 4) +#define MT6797_GPIO72__FUNC_DBG_MON_A5 (MTK_PIN_NO(72) | 7) + +#define MT6797_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0) +/* #define MT6797_GPIO73__FUNC_I2S3_DO (MTK_PIN_NO(73) | 1) */ +#define MT6797_GPIO73__FUNC_I2S3_DO (MTK_PIN_NO(73) | 2) +/* #define MT6797_GPIO73__FUNC_I2S1_DO (MTK_PIN_NO(73) | 3) */ +#define MT6797_GPIO73__FUNC_I2S1_DO (MTK_PIN_NO(73) | 4) +#define MT6797_GPIO73__FUNC_DBG_MON_A6 (MTK_PIN_NO(73) | 7) + +#define MT6797_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0) +#define MT6797_GPIO74__FUNC_SCL3_0 (MTK_PIN_NO(74) | 1) +#define MT6797_GPIO74__FUNC_AUXIF_CLK1 (MTK_PIN_NO(74) | 7) + +#define MT6797_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0) +#define MT6797_GPIO75__FUNC_SDA3_0 (MTK_PIN_NO(75) | 1) +#define MT6797_GPIO75__FUNC_AUXIF_ST1 (MTK_PIN_NO(75) | 7) + +#define MT6797_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0) +#define MT6797_GPIO76__FUNC_CONN_HRST_B (MTK_PIN_NO(76) | 1) +#define MT6797_GPIO76__FUNC_C2K_DM_EINT0 (MTK_PIN_NO(76) | 7) + +#define MT6797_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0) +#define MT6797_GPIO77__FUNC_CONN_TOP_CLK (MTK_PIN_NO(77) | 1) +#define MT6797_GPIO77__FUNC_C2K_DM_EINT1 (MTK_PIN_NO(77) | 7) + +#define MT6797_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0) +#define MT6797_GPIO78__FUNC_CONN_TOP_DATA (MTK_PIN_NO(78) | 1) +#define MT6797_GPIO78__FUNC_C2K_DM_EINT2 (MTK_PIN_NO(78) | 7) + +#define MT6797_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0) +#define MT6797_GPIO79__FUNC_CONN_WB_PTA (MTK_PIN_NO(79) | 1) +#define MT6797_GPIO79__FUNC_C2K_DM_EINT3 (MTK_PIN_NO(79) | 7) + +#define MT6797_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0) +#define MT6797_GPIO80__FUNC_CONN_WF_HB0 (MTK_PIN_NO(80) | 1) +#define MT6797_GPIO80__FUNC_C2K_EINT0 (MTK_PIN_NO(80) | 7) + +#define MT6797_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0) +#define MT6797_GPIO81__FUNC_CONN_WF_HB1 (MTK_PIN_NO(81) | 1) +#define MT6797_GPIO81__FUNC_C2K_EINT1 (MTK_PIN_NO(81) | 7) + +#define MT6797_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0) +#define MT6797_GPIO82__FUNC_CONN_WF_HB2 (MTK_PIN_NO(82) | 1) +#define MT6797_GPIO82__FUNC_MD_CLKM0 (MTK_PIN_NO(82) | 7) + +#define MT6797_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0) +#define MT6797_GPIO83__FUNC_CONN_BT_CLK (MTK_PIN_NO(83) | 1) +#define MT6797_GPIO83__FUNC_MD_CLKM1 (MTK_PIN_NO(83) | 7) + +#define MT6797_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0) +#define MT6797_GPIO84__FUNC_CONN_BT_DATA (MTK_PIN_NO(84) | 1) + +#define MT6797_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0) +#define MT6797_GPIO85__FUNC_EINT8 (MTK_PIN_NO(85) | 1) +#define MT6797_GPIO85__FUNC_I2S1_LRCK (MTK_PIN_NO(85) | 2) +#define MT6797_GPIO85__FUNC_I2S2_LRCK (MTK_PIN_NO(85) | 3) +#define MT6797_GPIO85__FUNC_URXD1 (MTK_PIN_NO(85) | 4) +#define MT6797_GPIO85__FUNC_MD_URXD0 (MTK_PIN_NO(85) | 5) +#define MT6797_GPIO85__FUNC_DBG_MON_A7 (MTK_PIN_NO(85) | 7) + +#define MT6797_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0) +#define MT6797_GPIO86__FUNC_EINT9 (MTK_PIN_NO(86) | 1) +#define MT6797_GPIO86__FUNC_I2S1_BCK (MTK_PIN_NO(86) | 2) +#define MT6797_GPIO86__FUNC_I2S2_BCK (MTK_PIN_NO(86) | 3) +#define MT6797_GPIO86__FUNC_UTXD1 (MTK_PIN_NO(86) | 4) +#define MT6797_GPIO86__FUNC_MD_UTXD0 (MTK_PIN_NO(86) | 5) +#define MT6797_GPIO86__FUNC_DBG_MON_A8 (MTK_PIN_NO(86) | 7) + +#define MT6797_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0) +#define MT6797_GPIO87__FUNC_EINT10 (MTK_PIN_NO(87) | 1) +#define MT6797_GPIO87__FUNC_I2S1_MCK (MTK_PIN_NO(87) | 2) +#define MT6797_GPIO87__FUNC_I2S2_MCK (MTK_PIN_NO(87) | 3) +#define MT6797_GPIO87__FUNC_URTS1 (MTK_PIN_NO(87) | 4) +#define MT6797_GPIO87__FUNC_MD_URXD1 (MTK_PIN_NO(87) | 5) +#define MT6797_GPIO87__FUNC_DBG_MON_A9 (MTK_PIN_NO(87) | 7) + +#define MT6797_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0) +#define MT6797_GPIO88__FUNC_EINT11 (MTK_PIN_NO(88) | 1) +#define MT6797_GPIO88__FUNC_I2S1_DO (MTK_PIN_NO(88) | 2) +#define MT6797_GPIO88__FUNC_I2S2_DI (MTK_PIN_NO(88) | 3) +#define MT6797_GPIO88__FUNC_UCTS1 (MTK_PIN_NO(88) | 4) +#define MT6797_GPIO88__FUNC_MD_UTXD1 (MTK_PIN_NO(88) | 5) +#define MT6797_GPIO88__FUNC_DBG_MON_A10 (MTK_PIN_NO(88) | 7) + +#define MT6797_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0) +#define MT6797_GPIO89__FUNC_EINT12 (MTK_PIN_NO(89) | 1) +#define MT6797_GPIO89__FUNC_IRTX_OUT (MTK_PIN_NO(89) | 2) +#define MT6797_GPIO89__FUNC_CLKM0 (MTK_PIN_NO(89) | 3) +#define MT6797_GPIO89__FUNC_PCM1_SYNC (MTK_PIN_NO(89) | 4) +#define MT6797_GPIO89__FUNC_URTS0 (MTK_PIN_NO(89) | 5) +#define MT6797_GPIO89__FUNC_DBG_MON_A11 (MTK_PIN_NO(89) | 7) + +#define MT6797_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0) +#define MT6797_GPIO90__FUNC_EINT13 (MTK_PIN_NO(90) | 1) +#define MT6797_GPIO90__FUNC_CMFLASH (MTK_PIN_NO(90) | 2) +#define MT6797_GPIO90__FUNC_CLKM1 (MTK_PIN_NO(90) | 3) +#define MT6797_GPIO90__FUNC_PCM1_CLK (MTK_PIN_NO(90) | 4) +#define MT6797_GPIO90__FUNC_UCTS0 (MTK_PIN_NO(90) | 5) +#define MT6797_GPIO90__FUNC_C2K_DM_EINT0 (MTK_PIN_NO(90) | 7) + +#define MT6797_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0) +#define MT6797_GPIO91__FUNC_EINT14 (MTK_PIN_NO(91) | 1) +#define MT6797_GPIO91__FUNC_PWM_A (MTK_PIN_NO(91) | 2) +#define MT6797_GPIO91__FUNC_CLKM2 (MTK_PIN_NO(91) | 3) +#define MT6797_GPIO91__FUNC_PCM1_DI (MTK_PIN_NO(91) | 4) +#define MT6797_GPIO91__FUNC_SDA0_3 (MTK_PIN_NO(91) | 5) +#define MT6797_GPIO91__FUNC_C2K_DM_EINT1 (MTK_PIN_NO(91) | 7) + +#define MT6797_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0) +#define MT6797_GPIO92__FUNC_EINT15 (MTK_PIN_NO(92) | 1) +#define MT6797_GPIO92__FUNC_PWM_B (MTK_PIN_NO(92) | 2) +#define MT6797_GPIO92__FUNC_CLKM3 (MTK_PIN_NO(92) | 3) +#define MT6797_GPIO92__FUNC_PCM1_DO0 (MTK_PIN_NO(92) | 4) +#define MT6797_GPIO92__FUNC_SCL0_3 (MTK_PIN_NO(92) | 5) + +#define MT6797_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0) +#define MT6797_GPIO93__FUNC_EINT16 (MTK_PIN_NO(93) | 1) +#define MT6797_GPIO93__FUNC_IDDIG (MTK_PIN_NO(93) | 2) +#define MT6797_GPIO93__FUNC_CLKM4 (MTK_PIN_NO(93) | 3) +#define MT6797_GPIO93__FUNC_PCM1_DO1 (MTK_PIN_NO(93) | 4) +#define MT6797_GPIO93__FUNC_MD_INT2 (MTK_PIN_NO(93) | 5) +#define MT6797_GPIO93__FUNC_DROP_ZONE (MTK_PIN_NO(93) | 7) + +#define MT6797_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0) +#define MT6797_GPIO94__FUNC_USB_DRVVBUS (MTK_PIN_NO(94) | 1) +#define MT6797_GPIO94__FUNC_PWM_C (MTK_PIN_NO(94) | 2) +#define MT6797_GPIO94__FUNC_CLKM5 (MTK_PIN_NO(94) | 3) + +#define MT6797_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0) +#define MT6797_GPIO95__FUNC_SDA2_0 (MTK_PIN_NO(95) | 1) +#define MT6797_GPIO95__FUNC_AUXIF_ST0 (MTK_PIN_NO(95) | 7) + +#define MT6797_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0) +#define MT6797_GPIO96__FUNC_SCL2_0 (MTK_PIN_NO(96) | 1) +#define MT6797_GPIO96__FUNC_AUXIF_CLK0 (MTK_PIN_NO(96) | 7) + +#define MT6797_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0) +#define MT6797_GPIO97__FUNC_URXD0 (MTK_PIN_NO(97) | 1) +#define MT6797_GPIO97__FUNC_UTXD0 (MTK_PIN_NO(97) | 2) +#define MT6797_GPIO97__FUNC_MD_URXD0 (MTK_PIN_NO(97) | 3) +#define MT6797_GPIO97__FUNC_MD_URXD1 (MTK_PIN_NO(97) | 4) +#define MT6797_GPIO97__FUNC_MD_URXD2 (MTK_PIN_NO(97) | 5) +#define MT6797_GPIO97__FUNC_C2K_URXD0 (MTK_PIN_NO(97) | 6) +#define MT6797_GPIO97__FUNC_C2K_URXD1 (MTK_PIN_NO(97) | 7) + +#define MT6797_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0) +#define MT6797_GPIO98__FUNC_UTXD0 (MTK_PIN_NO(98) | 1) +#define MT6797_GPIO98__FUNC_URXD0 (MTK_PIN_NO(98) | 2) +#define MT6797_GPIO98__FUNC_MD_UTXD0 (MTK_PIN_NO(98) | 3) +#define MT6797_GPIO98__FUNC_MD_UTXD1 (MTK_PIN_NO(98) | 4) +#define MT6797_GPIO98__FUNC_MD_UTXD2 (MTK_PIN_NO(98) | 5) +#define MT6797_GPIO98__FUNC_C2K_UTXD0 (MTK_PIN_NO(98) | 6) +#define MT6797_GPIO98__FUNC_C2K_UTXD1 (MTK_PIN_NO(98) | 7) + +#define MT6797_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0) +#define MT6797_GPIO99__FUNC_RTC32K_CK (MTK_PIN_NO(99) | 1) + +#define MT6797_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0) +#define MT6797_GPIO100__FUNC_SRCLKENAI0 (MTK_PIN_NO(100) | 1) + +#define MT6797_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0) +#define MT6797_GPIO101__FUNC_SRCLKENAI1 (MTK_PIN_NO(101) | 1) + +#define MT6797_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0) +#define MT6797_GPIO102__FUNC_SRCLKENA0 (MTK_PIN_NO(102) | 1) + +#define MT6797_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0) +#define MT6797_GPIO103__FUNC_SRCLKENA1 (MTK_PIN_NO(103) | 1) + +#define MT6797_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0) +#define MT6797_GPIO104__FUNC_SYSRSTB (MTK_PIN_NO(104) | 1) + +#define MT6797_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0) +#define MT6797_GPIO105__FUNC_WATCHDOG (MTK_PIN_NO(105) | 1) + +#define MT6797_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0) +#define MT6797_GPIO106__FUNC_KPROW0 (MTK_PIN_NO(106) | 1) +#define MT6797_GPIO106__FUNC_CMFLASH (MTK_PIN_NO(106) | 2) +#define MT6797_GPIO106__FUNC_CLKM4 (MTK_PIN_NO(106) | 3) +#define MT6797_GPIO106__FUNC_TP_GPIO0_AO (MTK_PIN_NO(106) | 4) +#define MT6797_GPIO106__FUNC_IRTX_OUT (MTK_PIN_NO(106) | 5) + +#define MT6797_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0) +#define MT6797_GPIO107__FUNC_KPROW1 (MTK_PIN_NO(107) | 1) +#define MT6797_GPIO107__FUNC_IDDIG (MTK_PIN_NO(107) | 2) +#define MT6797_GPIO107__FUNC_CLKM5 (MTK_PIN_NO(107) | 3) +#define MT6797_GPIO107__FUNC_TP_GPIO1_AO (MTK_PIN_NO(107) | 4) +#define MT6797_GPIO107__FUNC_I2S1_BCK (MTK_PIN_NO(107) | 5) +#define MT6797_GPIO107__FUNC_DAP_SIB1_SWD (MTK_PIN_NO(107) | 7) + +#define MT6797_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0) +#define MT6797_GPIO108__FUNC_KPROW2 (MTK_PIN_NO(108) | 1) +#define MT6797_GPIO108__FUNC_USB_DRVVBUS (MTK_PIN_NO(108) | 2) +#define MT6797_GPIO108__FUNC_PWM_A (MTK_PIN_NO(108) | 3) +#define MT6797_GPIO108__FUNC_CMFLASH (MTK_PIN_NO(108) | 4) +#define MT6797_GPIO108__FUNC_I2S1_LRCK (MTK_PIN_NO(108) | 5) +#define MT6797_GPIO108__FUNC_DAP_SIB1_SWCK (MTK_PIN_NO(108) | 7) + +#define MT6797_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0) +#define MT6797_GPIO109__FUNC_KPCOL0 (MTK_PIN_NO(109) | 1) + +#define MT6797_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0) +#define MT6797_GPIO110__FUNC_KPCOL1 (MTK_PIN_NO(110) | 1) +#define MT6797_GPIO110__FUNC_SDA1_3 (MTK_PIN_NO(110) | 2) +#define MT6797_GPIO110__FUNC_PWM_B (MTK_PIN_NO(110) | 3) +#define MT6797_GPIO110__FUNC_CLKM0 (MTK_PIN_NO(110) | 4) +#define MT6797_GPIO110__FUNC_I2S1_DO (MTK_PIN_NO(110) | 5) +#define MT6797_GPIO110__FUNC_C2K_DM_EINT3 (MTK_PIN_NO(110) | 7) + +#define MT6797_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0) +#define MT6797_GPIO111__FUNC_KPCOL2 (MTK_PIN_NO(111) | 1) +#define MT6797_GPIO111__FUNC_SCL1_3 (MTK_PIN_NO(111) | 2) +#define MT6797_GPIO111__FUNC_PWM_C (MTK_PIN_NO(111) | 3) +#define MT6797_GPIO111__FUNC_DISP_PWM (MTK_PIN_NO(111) | 4) +#define MT6797_GPIO111__FUNC_I2S1_MCK (MTK_PIN_NO(111) | 5) +#define MT6797_GPIO111__FUNC_C2K_DM_EINT2 (MTK_PIN_NO(111) | 7) + +#define MT6797_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0) +#define MT6797_GPIO112__FUNC_MD_INT1_C2K_UIM1_HOT_PLUG_IN (MTK_PIN_NO(112) | 1) +#define MT6797_GPIO112__FUNC_C2K_DM_EINT1 (MTK_PIN_NO(112) | 7) + +#define MT6797_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0) +#define MT6797_GPIO113__FUNC_MD_INT0_C2K_UIM0_HOT_PLUG_IN (MTK_PIN_NO(113) | 1) +#define MT6797_GPIO113__FUNC_C2K_DM_EINT0 (MTK_PIN_NO(113) | 7) + +#define MT6797_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0) +#define MT6797_GPIO114__FUNC_MSDC0_DAT0 (MTK_PIN_NO(114) | 1) + +#define MT6797_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0) +#define MT6797_GPIO115__FUNC_MSDC0_DAT1 (MTK_PIN_NO(115) | 1) + +#define MT6797_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0) +#define MT6797_GPIO116__FUNC_MSDC0_DAT2 (MTK_PIN_NO(116) | 1) + +#define MT6797_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0) +#define MT6797_GPIO117__FUNC_MSDC0_DAT3 (MTK_PIN_NO(117) | 1) + +#define MT6797_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0) +#define MT6797_GPIO118__FUNC_MSDC0_DAT4 (MTK_PIN_NO(118) | 1) + +#define MT6797_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0) +#define MT6797_GPIO119__FUNC_MSDC0_DAT5 (MTK_PIN_NO(119) | 1) + +#define MT6797_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0) +#define MT6797_GPIO120__FUNC_MSDC0_DAT6 (MTK_PIN_NO(120) | 1) + +#define MT6797_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0) +#define MT6797_GPIO121__FUNC_MSDC0_DAT7 (MTK_PIN_NO(121) | 1) + +#define MT6797_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0) +#define MT6797_GPIO122__FUNC_MSDC0_CMD (MTK_PIN_NO(122) | 1) + +#define MT6797_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define MT6797_GPIO123__FUNC_MSDC0_CLK (MTK_PIN_NO(123) | 1) + +#define MT6797_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0) +#define MT6797_GPIO124__FUNC_MSDC0_DSL (MTK_PIN_NO(124) | 1) + +#define MT6797_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0) +#define MT6797_GPIO125__FUNC_MSDC0_RSTB (MTK_PIN_NO(125) | 1) + +#define MT6797_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0) +#define MT6797_GPIO126__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(126) | 1) +#define MT6797_GPIO126__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(126) | 2) +#define MT6797_GPIO126__FUNC_C2K_UIM0_CLK (MTK_PIN_NO(126) | 3) +#define MT6797_GPIO126__FUNC_C2K_UIM1_CLK (MTK_PIN_NO(126) | 4) + +#define MT6797_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0) +#define MT6797_GPIO127__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(127) | 1) +#define MT6797_GPIO127__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(127) | 2) +#define MT6797_GPIO127__FUNC_C2K_UIM0_RST (MTK_PIN_NO(127) | 3) +#define MT6797_GPIO127__FUNC_C2K_UIM1_RST (MTK_PIN_NO(127) | 4) + +#define MT6797_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0) +#define MT6797_GPIO128__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(128) | 1) +#define MT6797_GPIO128__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(128) | 2) +#define MT6797_GPIO128__FUNC_C2K_UIM0_IO (MTK_PIN_NO(128) | 3) +#define MT6797_GPIO128__FUNC_C2K_UIM1_IO (MTK_PIN_NO(128) | 4) + +#define MT6797_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0) +#define MT6797_GPIO129__FUNC_MSDC1_CMD (MTK_PIN_NO(129) | 1) +#define MT6797_GPIO129__FUNC_CONN_DSP_JMS (MTK_PIN_NO(129) | 2) +#define MT6797_GPIO129__FUNC_LTE_JTAG_TMS (MTK_PIN_NO(129) | 3) +#define MT6797_GPIO129__FUNC_UDI_TMS (MTK_PIN_NO(129) | 4) +#define MT6797_GPIO129__FUNC_C2K_TMS (MTK_PIN_NO(129) | 5) + +#define MT6797_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0) +#define MT6797_GPIO130__FUNC_MSDC1_DAT0 (MTK_PIN_NO(130) | 1) +#define MT6797_GPIO130__FUNC_CONN_DSP_JDI (MTK_PIN_NO(130) | 2) +#define MT6797_GPIO130__FUNC_LTE_JTAG_TDI (MTK_PIN_NO(130) | 3) +#define MT6797_GPIO130__FUNC_UDI_TDI (MTK_PIN_NO(130) | 4) +#define MT6797_GPIO130__FUNC_C2K_TDI (MTK_PIN_NO(130) | 5) + +#define MT6797_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0) +#define MT6797_GPIO131__FUNC_MSDC1_DAT1 (MTK_PIN_NO(131) | 1) +#define MT6797_GPIO131__FUNC_CONN_DSP_JDO (MTK_PIN_NO(131) | 2) +#define MT6797_GPIO131__FUNC_LTE_JTAG_TDO (MTK_PIN_NO(131) | 3) +#define MT6797_GPIO131__FUNC_UDI_TDO (MTK_PIN_NO(131) | 4) +#define MT6797_GPIO131__FUNC_C2K_TDO (MTK_PIN_NO(131) | 5) + +#define MT6797_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0) +#define MT6797_GPIO132__FUNC_MSDC1_DAT2 (MTK_PIN_NO(132) | 1) +#define MT6797_GPIO132__FUNC_C2K_RTCK (MTK_PIN_NO(132) | 5) + +#define MT6797_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0) +#define MT6797_GPIO133__FUNC_MSDC1_DAT3 (MTK_PIN_NO(133) | 1) +#define MT6797_GPIO133__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(133) | 2) +#define MT6797_GPIO133__FUNC_LTE_JTAG_TRSTN (MTK_PIN_NO(133) | 3) +#define MT6797_GPIO133__FUNC_UDI_NTRST (MTK_PIN_NO(133) | 4) +#define MT6797_GPIO133__FUNC_C2K_NTRST (MTK_PIN_NO(133) | 5) + +#define MT6797_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0) +#define MT6797_GPIO134__FUNC_MSDC1_CLK (MTK_PIN_NO(134) | 1) +#define MT6797_GPIO134__FUNC_CONN_DSP_JCK (MTK_PIN_NO(134) | 2) +#define MT6797_GPIO134__FUNC_LTE_JTAG_TCK (MTK_PIN_NO(134) | 3) +#define MT6797_GPIO134__FUNC_UDI_TCK_XI (MTK_PIN_NO(134) | 4) +#define MT6797_GPIO134__FUNC_C2K_TCK (MTK_PIN_NO(134) | 5) + +#define MT6797_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0) +#define MT6797_GPIO135__FUNC_TDM_LRCK (MTK_PIN_NO(135) | 1) +#define MT6797_GPIO135__FUNC_I2S0_LRCK (MTK_PIN_NO(135) | 2) +#define MT6797_GPIO135__FUNC_CLKM0 (MTK_PIN_NO(135) | 3) +#define MT6797_GPIO135__FUNC_PCM1_SYNC (MTK_PIN_NO(135) | 4) +#define MT6797_GPIO135__FUNC_PWM_A (MTK_PIN_NO(135) | 5) +#define MT6797_GPIO135__FUNC_DBG_MON_A12 (MTK_PIN_NO(135) | 7) + +#define MT6797_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0) +#define MT6797_GPIO136__FUNC_TDM_BCK (MTK_PIN_NO(136) | 1) +#define MT6797_GPIO136__FUNC_I2S0_BCK (MTK_PIN_NO(136) | 2) +#define MT6797_GPIO136__FUNC_CLKM1 (MTK_PIN_NO(136) | 3) +#define MT6797_GPIO136__FUNC_PCM1_CLK (MTK_PIN_NO(136) | 4) +#define MT6797_GPIO136__FUNC_PWM_B (MTK_PIN_NO(136) | 5) +#define MT6797_GPIO136__FUNC_DBG_MON_A13 (MTK_PIN_NO(136) | 7) + +#define MT6797_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0) +#define MT6797_GPIO137__FUNC_TDM_MCK (MTK_PIN_NO(137) | 1) +#define MT6797_GPIO137__FUNC_I2S0_MCK (MTK_PIN_NO(137) | 2) +#define MT6797_GPIO137__FUNC_CLKM2 (MTK_PIN_NO(137) | 3) +#define MT6797_GPIO137__FUNC_PCM1_DI (MTK_PIN_NO(137) | 4) +#define MT6797_GPIO137__FUNC_IRTX_OUT (MTK_PIN_NO(137) | 5) +#define MT6797_GPIO137__FUNC_DBG_MON_A14 (MTK_PIN_NO(137) | 7) + +#define MT6797_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0) +#define MT6797_GPIO138__FUNC_TDM_DATA0 (MTK_PIN_NO(138) | 1) +#define MT6797_GPIO138__FUNC_I2S0_DI (MTK_PIN_NO(138) | 2) +#define MT6797_GPIO138__FUNC_CLKM3 (MTK_PIN_NO(138) | 3) +#define MT6797_GPIO138__FUNC_PCM1_DO0 (MTK_PIN_NO(138) | 4) +#define MT6797_GPIO138__FUNC_PWM_C (MTK_PIN_NO(138) | 5) +#define MT6797_GPIO138__FUNC_SDA3_1 (MTK_PIN_NO(138) | 6) +#define MT6797_GPIO138__FUNC_DBG_MON_A15 (MTK_PIN_NO(138) | 7) + +#define MT6797_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0) +#define MT6797_GPIO139__FUNC_TDM_DATA1 (MTK_PIN_NO(139) | 1) +#define MT6797_GPIO139__FUNC_I2S3_DO (MTK_PIN_NO(139) | 2) +#define MT6797_GPIO139__FUNC_CLKM4 (MTK_PIN_NO(139) | 3) +#define MT6797_GPIO139__FUNC_PCM1_DO1 (MTK_PIN_NO(139) | 4) +#define MT6797_GPIO139__FUNC_ANT_SEL2 (MTK_PIN_NO(139) | 5) +#define MT6797_GPIO139__FUNC_SCL3_1 (MTK_PIN_NO(139) | 6) +#define MT6797_GPIO139__FUNC_DBG_MON_A16 (MTK_PIN_NO(139) | 7) + +#define MT6797_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0) +#define MT6797_GPIO140__FUNC_TDM_DATA2 (MTK_PIN_NO(140) | 1) +#define MT6797_GPIO140__FUNC_DISP_PWM (MTK_PIN_NO(140) | 2) +#define MT6797_GPIO140__FUNC_CLKM5 (MTK_PIN_NO(140) | 3) +#define MT6797_GPIO140__FUNC_SDA1_4 (MTK_PIN_NO(140) | 4) +#define MT6797_GPIO140__FUNC_ANT_SEL1 (MTK_PIN_NO(140) | 5) +#define MT6797_GPIO140__FUNC_URXD3 (MTK_PIN_NO(140) | 6) +#define MT6797_GPIO140__FUNC_DBG_MON_A17 (MTK_PIN_NO(140) | 7) + +#define MT6797_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0) +#define MT6797_GPIO141__FUNC_TDM_DATA3 (MTK_PIN_NO(141) | 1) +#define MT6797_GPIO141__FUNC_CMFLASH (MTK_PIN_NO(141) | 2) +#define MT6797_GPIO141__FUNC_IRTX_OUT (MTK_PIN_NO(141) | 3) +#define MT6797_GPIO141__FUNC_SCL1_4 (MTK_PIN_NO(141) | 4) +#define MT6797_GPIO141__FUNC_ANT_SEL0 (MTK_PIN_NO(141) | 5) +#define MT6797_GPIO141__FUNC_UTXD3 (MTK_PIN_NO(141) | 6) +#define MT6797_GPIO141__FUNC_DBG_MON_A18 (MTK_PIN_NO(141) | 7) + +#define MT6797_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0) +#define MT6797_GPIO142__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(142) | 1) +#define MT6797_GPIO142__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(142) | 2) + +#define MT6797_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0) +#define MT6797_GPIO143__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(143) | 1) +#define MT6797_GPIO143__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(143) | 2) + +#define MT6797_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0) +#define MT6797_GPIO144__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(144) | 1) + +#define MT6797_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0) +#define MT6797_GPIO145__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(145) | 1) + +#define MT6797_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0) +#define MT6797_GPIO146__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(146) | 1) + +#define MT6797_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0) +#define MT6797_GPIO147__FUNC_AUD_DAT_MISO (MTK_PIN_NO(147) | 1) +#define MT6797_GPIO147__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(147) | 2) +#define MT6797_GPIO147__FUNC_VOW_DAT_MISO (MTK_PIN_NO(147) | 3) + +#define MT6797_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0) +#define MT6797_GPIO148__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(148) | 1) +#define MT6797_GPIO148__FUNC_AUD_DAT_MISO (MTK_PIN_NO(148) | 2) + +#define MT6797_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0) +#define MT6797_GPIO149__FUNC_VOW_CLK_MISO (MTK_PIN_NO(149) | 1) + +#define MT6797_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0) +#define MT6797_GPIO150__FUNC_ANC_DAT_MOSI (MTK_PIN_NO(150) | 1) + +#define MT6797_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0) +#define MT6797_GPIO151__FUNC_SCL6_0 (MTK_PIN_NO(151) | 1) + +#define MT6797_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0) +#define MT6797_GPIO152__FUNC_SDA6_0 (MTK_PIN_NO(152) | 1) + +#define MT6797_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0) +#define MT6797_GPIO153__FUNC_SCL7_0 (MTK_PIN_NO(153) | 1) + +#define MT6797_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0) +#define MT6797_GPIO154__FUNC_SDA7_0 (MTK_PIN_NO(154) | 1) + +#define MT6797_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0) +#define MT6797_GPIO155__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(155) | 1) +#define MT6797_GPIO155__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(155) | 2) +#define MT6797_GPIO155__FUNC_C2K_UIM0_CLK (MTK_PIN_NO(155) | 3) +#define MT6797_GPIO155__FUNC_C2K_UIM1_CLK (MTK_PIN_NO(155) | 4) + +#define MT6797_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0) +#define MT6797_GPIO156__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(156) | 1) +#define MT6797_GPIO156__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(156) | 2) +#define MT6797_GPIO156__FUNC_C2K_UIM0_RST (MTK_PIN_NO(156) | 3) +#define MT6797_GPIO156__FUNC_C2K_UIM1_RST (MTK_PIN_NO(156) | 4) + +#define MT6797_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0) +#define MT6797_GPIO157__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(157) | 1) +#define MT6797_GPIO157__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(157) | 2) +#define MT6797_GPIO157__FUNC_C2K_UIM0_IO (MTK_PIN_NO(157) | 3) +#define MT6797_GPIO157__FUNC_C2K_UIM1_IO (MTK_PIN_NO(157) | 4) + +#define MT6797_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0) +#define MT6797_GPIO158__FUNC_MIPI_TDP0 (MTK_PIN_NO(158) | 1) + +#define MT6797_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0) +#define MT6797_GPIO159__FUNC_MIPI_TDN0 (MTK_PIN_NO(159) | 1) + +#define MT6797_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0) +#define MT6797_GPIO160__FUNC_MIPI_TDP1 (MTK_PIN_NO(160) | 1) + +#define MT6797_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0) +#define MT6797_GPIO161__FUNC_MIPI_TDN1 (MTK_PIN_NO(161) | 1) + +#define MT6797_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0) +#define MT6797_GPIO162__FUNC_MIPI_TCP (MTK_PIN_NO(162) | 1) + +#define MT6797_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0) +#define MT6797_GPIO163__FUNC_MIPI_TCN (MTK_PIN_NO(163) | 1) + +#define MT6797_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0) +#define MT6797_GPIO164__FUNC_MIPI_TDP2 (MTK_PIN_NO(164) | 1) + +#define MT6797_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0) +#define MT6797_GPIO165__FUNC_MIPI_TDN2 (MTK_PIN_NO(165) | 1) + +#define MT6797_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0) +#define MT6797_GPIO166__FUNC_MIPI_TDP3 (MTK_PIN_NO(166) | 1) + +#define MT6797_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0) +#define MT6797_GPIO167__FUNC_MIPI_TDN3 (MTK_PIN_NO(167) | 1) + +#define MT6797_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0) +#define MT6797_GPIO168__FUNC_MIPI_TDP0_A (MTK_PIN_NO(168) | 1) + +#define MT6797_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0) +#define MT6797_GPIO169__FUNC_MIPI_TDN0_A (MTK_PIN_NO(169) | 1) + +#define MT6797_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0) +#define MT6797_GPIO170__FUNC_MIPI_TDP1_A (MTK_PIN_NO(170) | 1) + +#define MT6797_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0) +#define MT6797_GPIO171__FUNC_MIPI_TDN1_A (MTK_PIN_NO(171) | 1) + +#define MT6797_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0) +#define MT6797_GPIO172__FUNC_MIPI_TCP_A (MTK_PIN_NO(172) | 1) + +#define MT6797_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0) +#define MT6797_GPIO173__FUNC_MIPI_TCN_A (MTK_PIN_NO(173) | 1) + +#define MT6797_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0) +#define MT6797_GPIO174__FUNC_MIPI_TDP2_A (MTK_PIN_NO(174) | 1) + +#define MT6797_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0) +#define MT6797_GPIO175__FUNC_MIPI_TDN2_A (MTK_PIN_NO(175) | 1) + +#define MT6797_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0) +#define MT6797_GPIO176__FUNC_MIPI_TDP3_A (MTK_PIN_NO(176) | 1) + +#define MT6797_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0) +#define MT6797_GPIO177__FUNC_MIPI_TDN3_A (MTK_PIN_NO(177) | 1) + +#define MT6797_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0) +#define MT6797_GPIO178__FUNC_DISP_PWM (MTK_PIN_NO(178) | 1) +#define MT6797_GPIO178__FUNC_PWM_D (MTK_PIN_NO(178) | 2) +#define MT6797_GPIO178__FUNC_CLKM5 (MTK_PIN_NO(178) | 3) +#define MT6797_GPIO178__FUNC_DBG_MON_A19 (MTK_PIN_NO(178) | 7) + +#define MT6797_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0) +#define MT6797_GPIO179__FUNC_DSI_TE0 (MTK_PIN_NO(179) | 1) +#define MT6797_GPIO179__FUNC_DBG_MON_A20 (MTK_PIN_NO(179) | 7) + +#define MT6797_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0) +#define MT6797_GPIO180__FUNC_LCM_RST (MTK_PIN_NO(180) | 1) +#define MT6797_GPIO180__FUNC_DSI_TE1 (MTK_PIN_NO(180) | 2) +#define MT6797_GPIO180__FUNC_DBG_MON_A21 (MTK_PIN_NO(180) | 7) + +#define MT6797_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0) +#define MT6797_GPIO181__FUNC_IDDIG (MTK_PIN_NO(181) | 1) +#define MT6797_GPIO181__FUNC_DSI_TE1 (MTK_PIN_NO(181) | 2) +#define MT6797_GPIO181__FUNC_DBG_MON_A22 (MTK_PIN_NO(181) | 7) + +#define MT6797_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0) +#define MT6797_GPIO182__FUNC_TESTMODE (MTK_PIN_NO(182) | 1) + +#define MT6797_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0) +#define MT6797_GPIO183__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(183) | 1) +#define MT6797_GPIO183__FUNC_SPM_BSI_CK (MTK_PIN_NO(183) | 2) +#define MT6797_GPIO183__FUNC_DBG_MON_B27 (MTK_PIN_NO(183) | 7) + +#define MT6797_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0) +#define MT6797_GPIO184__FUNC_RFIC0_BSI_EN (MTK_PIN_NO(184) | 1) +#define MT6797_GPIO184__FUNC_SPM_BSI_EN (MTK_PIN_NO(184) | 2) +#define MT6797_GPIO184__FUNC_DBG_MON_B28 (MTK_PIN_NO(184) | 7) + +#define MT6797_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0) +#define MT6797_GPIO185__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(185) | 1) +#define MT6797_GPIO185__FUNC_SPM_BSI_D0 (MTK_PIN_NO(185) | 2) +#define MT6797_GPIO185__FUNC_DBG_MON_B29 (MTK_PIN_NO(185) | 7) + +#define MT6797_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0) +#define MT6797_GPIO186__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(186) | 1) +#define MT6797_GPIO186__FUNC_SPM_BSI_D1 (MTK_PIN_NO(186) | 2) +#define MT6797_GPIO186__FUNC_DBG_MON_B30 (MTK_PIN_NO(186) | 7) + +#define MT6797_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0) +#define MT6797_GPIO187__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(187) | 1) +#define MT6797_GPIO187__FUNC_SPM_BSI_D2 (MTK_PIN_NO(187) | 2) +#define MT6797_GPIO187__FUNC_DBG_MON_B31 (MTK_PIN_NO(187) | 7) + +#define MT6797_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0) +#define MT6797_GPIO188__FUNC_MIPI0_SCLK (MTK_PIN_NO(188) | 1) +#define MT6797_GPIO188__FUNC_DBG_MON_B32 (MTK_PIN_NO(188) | 7) + +#define MT6797_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0) +#define MT6797_GPIO189__FUNC_MIPI0_SDATA (MTK_PIN_NO(189) | 1) + +#define MT6797_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0) +#define MT6797_GPIO190__FUNC_MIPI1_SCLK (MTK_PIN_NO(190) | 1) + +#define MT6797_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0) +#define MT6797_GPIO191__FUNC_MIPI1_SDATA (MTK_PIN_NO(191) | 1) + +#define MT6797_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0) +#define MT6797_GPIO192__FUNC_BPI_BUS4 (MTK_PIN_NO(192) | 1) + +#define MT6797_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0) +#define MT6797_GPIO193__FUNC_BPI_BUS5 (MTK_PIN_NO(193) | 1) +#define MT6797_GPIO193__FUNC_DBG_MON_B0 (MTK_PIN_NO(193) | 7) + +#define MT6797_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0) +#define MT6797_GPIO194__FUNC_BPI_BUS6 (MTK_PIN_NO(194) | 1) +#define MT6797_GPIO194__FUNC_DBG_MON_B1 (MTK_PIN_NO(194) | 7) + +#define MT6797_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0) +#define MT6797_GPIO195__FUNC_BPI_BUS7 (MTK_PIN_NO(195) | 1) +#define MT6797_GPIO195__FUNC_DBG_MON_B2 (MTK_PIN_NO(195) | 7) + +#define MT6797_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0) +#define MT6797_GPIO196__FUNC_BPI_BUS8 (MTK_PIN_NO(196) | 1) +#define MT6797_GPIO196__FUNC_DBG_MON_B3 (MTK_PIN_NO(196) | 7) + +#define MT6797_GPIO197__FUNC_GPIO197 (MTK_PIN_NO(197) | 0) +#define MT6797_GPIO197__FUNC_BPI_BUS9 (MTK_PIN_NO(197) | 1) +#define MT6797_GPIO197__FUNC_DBG_MON_B4 (MTK_PIN_NO(197) | 7) + +#define MT6797_GPIO198__FUNC_GPIO198 (MTK_PIN_NO(198) | 0) +#define MT6797_GPIO198__FUNC_BPI_BUS10 (MTK_PIN_NO(198) | 1) +#define MT6797_GPIO198__FUNC_DBG_MON_B5 (MTK_PIN_NO(198) | 7) + +#define MT6797_GPIO199__FUNC_GPIO199 (MTK_PIN_NO(199) | 0) +#define MT6797_GPIO199__FUNC_BPI_BUS11 (MTK_PIN_NO(199) | 1) +#define MT6797_GPIO199__FUNC_DBG_MON_B6 (MTK_PIN_NO(199) | 7) + +#define MT6797_GPIO200__FUNC_GPIO200 (MTK_PIN_NO(200) | 0) +#define MT6797_GPIO200__FUNC_BPI_BUS12 (MTK_PIN_NO(200) | 1) +#define MT6797_GPIO200__FUNC_DBG_MON_B7 (MTK_PIN_NO(200) | 7) + +#define MT6797_GPIO201__FUNC_GPIO201 (MTK_PIN_NO(201) | 0) +#define MT6797_GPIO201__FUNC_BPI_BUS13 (MTK_PIN_NO(201) | 1) +#define MT6797_GPIO201__FUNC_DBG_MON_B8 (MTK_PIN_NO(201) | 7) + +#define MT6797_GPIO202__FUNC_GPIO202 (MTK_PIN_NO(202) | 0) +#define MT6797_GPIO202__FUNC_BPI_BUS14 (MTK_PIN_NO(202) | 1) +#define MT6797_GPIO202__FUNC_DBG_MON_B9 (MTK_PIN_NO(202) | 7) + +#define MT6797_GPIO203__FUNC_GPIO203 (MTK_PIN_NO(203) | 0) +#define MT6797_GPIO203__FUNC_BPI_BUS15 (MTK_PIN_NO(203) | 1) +#define MT6797_GPIO203__FUNC_DBG_MON_B10 (MTK_PIN_NO(203) | 7) + +#define MT6797_GPIO204__FUNC_GPIO204 (MTK_PIN_NO(204) | 0) +#define MT6797_GPIO204__FUNC_BPI_BUS16 (MTK_PIN_NO(204) | 1) +#define MT6797_GPIO204__FUNC_PA_VM0 (MTK_PIN_NO(204) | 2) +#define MT6797_GPIO204__FUNC_DBG_MON_B11 (MTK_PIN_NO(204) | 7) + +#define MT6797_GPIO205__FUNC_GPIO205 (MTK_PIN_NO(205) | 0) +#define MT6797_GPIO205__FUNC_BPI_BUS17 (MTK_PIN_NO(205) | 1) +#define MT6797_GPIO205__FUNC_PA_VM1 (MTK_PIN_NO(205) | 2) +#define MT6797_GPIO205__FUNC_DBG_MON_B12 (MTK_PIN_NO(205) | 7) + +#define MT6797_GPIO206__FUNC_GPIO206 (MTK_PIN_NO(206) | 0) +#define MT6797_GPIO206__FUNC_BPI_BUS18 (MTK_PIN_NO(206) | 1) +#define MT6797_GPIO206__FUNC_TX_SWAP0 (MTK_PIN_NO(206) | 2) +#define MT6797_GPIO206__FUNC_DBG_MON_B13 (MTK_PIN_NO(206) | 7) + +#define MT6797_GPIO207__FUNC_GPIO207 (MTK_PIN_NO(207) | 0) +#define MT6797_GPIO207__FUNC_BPI_BUS19 (MTK_PIN_NO(207) | 1) +#define MT6797_GPIO207__FUNC_TX_SWAP1 (MTK_PIN_NO(207) | 2) +#define MT6797_GPIO207__FUNC_DBG_MON_B14 (MTK_PIN_NO(207) | 7) + +#define MT6797_GPIO208__FUNC_GPIO208 (MTK_PIN_NO(208) | 0) +#define MT6797_GPIO208__FUNC_BPI_BUS20 (MTK_PIN_NO(208) | 1) +#define MT6797_GPIO208__FUNC_TX_SWAP2 (MTK_PIN_NO(208) | 2) +#define MT6797_GPIO208__FUNC_DBG_MON_B15 (MTK_PIN_NO(208) | 7) + +#define MT6797_GPIO209__FUNC_GPIO209 (MTK_PIN_NO(209) | 0) +#define MT6797_GPIO209__FUNC_BPI_BUS21 (MTK_PIN_NO(209) | 1) +#define MT6797_GPIO209__FUNC_TX_SWAP3 (MTK_PIN_NO(209) | 2) +#define MT6797_GPIO209__FUNC_DBG_MON_B16 (MTK_PIN_NO(209) | 7) + +#define MT6797_GPIO210__FUNC_GPIO210 (MTK_PIN_NO(210) | 0) +#define MT6797_GPIO210__FUNC_BPI_BUS22 (MTK_PIN_NO(210) | 1) +#define MT6797_GPIO210__FUNC_DET_BPI0 (MTK_PIN_NO(210) | 2) +#define MT6797_GPIO210__FUNC_DBG_MON_B17 (MTK_PIN_NO(210) | 7) + +#define MT6797_GPIO211__FUNC_GPIO211 (MTK_PIN_NO(211) | 0) +#define MT6797_GPIO211__FUNC_BPI_BUS23 (MTK_PIN_NO(211) | 1) +#define MT6797_GPIO211__FUNC_DET_BPI1 (MTK_PIN_NO(211) | 2) +#define MT6797_GPIO211__FUNC_DBG_MON_B18 (MTK_PIN_NO(211) | 7) + +#define MT6797_GPIO212__FUNC_GPIO212 (MTK_PIN_NO(212) | 0) +#define MT6797_GPIO212__FUNC_BPI_BUS0 (MTK_PIN_NO(212) | 1) +#define MT6797_GPIO212__FUNC_DBG_MON_B19 (MTK_PIN_NO(212) | 7) + +#define MT6797_GPIO213__FUNC_GPIO213 (MTK_PIN_NO(213) | 0) +#define MT6797_GPIO213__FUNC_BPI_BUS1 (MTK_PIN_NO(213) | 1) +#define MT6797_GPIO213__FUNC_DBG_MON_B20 (MTK_PIN_NO(213) | 7) + +#define MT6797_GPIO214__FUNC_GPIO214 (MTK_PIN_NO(214) | 0) +#define MT6797_GPIO214__FUNC_BPI_BUS2 (MTK_PIN_NO(214) | 1) +#define MT6797_GPIO214__FUNC_DBG_MON_B21 (MTK_PIN_NO(214) | 7) + +#define MT6797_GPIO215__FUNC_GPIO215 (MTK_PIN_NO(215) | 0) +#define MT6797_GPIO215__FUNC_BPI_BUS3 (MTK_PIN_NO(215) | 1) +#define MT6797_GPIO215__FUNC_DBG_MON_B22 (MTK_PIN_NO(215) | 7) + +#define MT6797_GPIO216__FUNC_GPIO216 (MTK_PIN_NO(216) | 0) +#define MT6797_GPIO216__FUNC_MIPI2_SCLK (MTK_PIN_NO(216) | 1) +#define MT6797_GPIO216__FUNC_DBG_MON_B23 (MTK_PIN_NO(216) | 7) + +#define MT6797_GPIO217__FUNC_GPIO217 (MTK_PIN_NO(217) | 0) +#define MT6797_GPIO217__FUNC_MIPI2_SDATA (MTK_PIN_NO(217) | 1) +#define MT6797_GPIO217__FUNC_DBG_MON_B24 (MTK_PIN_NO(217) | 7) + +#define MT6797_GPIO218__FUNC_GPIO218 (MTK_PIN_NO(218) | 0) +#define MT6797_GPIO218__FUNC_MIPI3_SCLK (MTK_PIN_NO(218) | 1) +#define MT6797_GPIO218__FUNC_DBG_MON_B25 (MTK_PIN_NO(218) | 7) + +#define MT6797_GPIO219__FUNC_GPIO219 (MTK_PIN_NO(219) | 0) +#define MT6797_GPIO219__FUNC_MIPI3_SDATA (MTK_PIN_NO(219) | 1) +#define MT6797_GPIO219__FUNC_DBG_MON_B26 (MTK_PIN_NO(219) | 7) + +#define MT6797_GPIO220__FUNC_GPIO220 (MTK_PIN_NO(220) | 0) +#define MT6797_GPIO220__FUNC_CONN_WF_IP (MTK_PIN_NO(220) | 1) + +#define MT6797_GPIO221__FUNC_GPIO221 (MTK_PIN_NO(221) | 0) +#define MT6797_GPIO221__FUNC_CONN_WF_IN (MTK_PIN_NO(221) | 1) + +#define MT6797_GPIO222__FUNC_GPIO222 (MTK_PIN_NO(222) | 0) +#define MT6797_GPIO222__FUNC_CONN_WF_QP (MTK_PIN_NO(222) | 1) + +#define MT6797_GPIO223__FUNC_GPIO223 (MTK_PIN_NO(223) | 0) +#define MT6797_GPIO223__FUNC_CONN_WF_QN (MTK_PIN_NO(223) | 1) + +#define MT6797_GPIO224__FUNC_GPIO224 (MTK_PIN_NO(224) | 0) +#define MT6797_GPIO224__FUNC_CONN_BT_IP (MTK_PIN_NO(224) | 1) + +#define MT6797_GPIO225__FUNC_GPIO225 (MTK_PIN_NO(225) | 0) +#define MT6797_GPIO225__FUNC_CONN_BT_IN (MTK_PIN_NO(225) | 1) + +#define MT6797_GPIO226__FUNC_GPIO226 (MTK_PIN_NO(226) | 0) +#define MT6797_GPIO226__FUNC_CONN_BT_QP (MTK_PIN_NO(226) | 1) + +#define MT6797_GPIO227__FUNC_GPIO227 (MTK_PIN_NO(227) | 0) +#define MT6797_GPIO227__FUNC_CONN_BT_QN (MTK_PIN_NO(227) | 1) + +#define MT6797_GPIO228__FUNC_GPIO228 (MTK_PIN_NO(228) | 0) +#define MT6797_GPIO228__FUNC_CONN_GPS_IP (MTK_PIN_NO(228) | 1) + +#define MT6797_GPIO229__FUNC_GPIO229 (MTK_PIN_NO(229) | 0) +#define MT6797_GPIO229__FUNC_CONN_GPS_IN (MTK_PIN_NO(229) | 1) + +#define MT6797_GPIO230__FUNC_GPIO230 (MTK_PIN_NO(230) | 0) +#define MT6797_GPIO230__FUNC_CONN_GPS_QP (MTK_PIN_NO(230) | 1) + +#define MT6797_GPIO231__FUNC_GPIO231 (MTK_PIN_NO(231) | 0) +#define MT6797_GPIO231__FUNC_CONN_GPS_QN (MTK_PIN_NO(231) | 1) + +#define MT6797_GPIO232__FUNC_GPIO232 (MTK_PIN_NO(232) | 0) +#define MT6797_GPIO232__FUNC_URXD1 (MTK_PIN_NO(232) | 1) +#define MT6797_GPIO232__FUNC_UTXD1 (MTK_PIN_NO(232) | 2) +#define MT6797_GPIO232__FUNC_MD_URXD0 (MTK_PIN_NO(232) | 3) +#define MT6797_GPIO232__FUNC_MD_URXD1 (MTK_PIN_NO(232) | 4) +#define MT6797_GPIO232__FUNC_MD_URXD2 (MTK_PIN_NO(232) | 5) +#define MT6797_GPIO232__FUNC_C2K_URXD0 (MTK_PIN_NO(232) | 6) +#define MT6797_GPIO232__FUNC_C2K_URXD1 (MTK_PIN_NO(232) | 7) + +#define MT6797_GPIO233__FUNC_GPIO233 (MTK_PIN_NO(233) | 0) +#define MT6797_GPIO233__FUNC_UTXD1 (MTK_PIN_NO(233) | 1) +#define MT6797_GPIO233__FUNC_URXD1 (MTK_PIN_NO(233) | 2) +#define MT6797_GPIO233__FUNC_MD_UTXD0 (MTK_PIN_NO(233) | 3) +#define MT6797_GPIO233__FUNC_MD_UTXD1 (MTK_PIN_NO(233) | 4) +#define MT6797_GPIO233__FUNC_MD_UTXD2 (MTK_PIN_NO(233) | 5) +#define MT6797_GPIO233__FUNC_C2K_UTXD0 (MTK_PIN_NO(233) | 6) +#define MT6797_GPIO233__FUNC_C2K_UTXD1 (MTK_PIN_NO(233) | 7) + +#define MT6797_GPIO234__FUNC_GPIO234 (MTK_PIN_NO(234) | 0) +#define MT6797_GPIO234__FUNC_SPI1_CLK_B (MTK_PIN_NO(234) | 1) +#define MT6797_GPIO234__FUNC_TP_UTXD1_AO (MTK_PIN_NO(234) | 2) +#define MT6797_GPIO234__FUNC_SCL4_1 (MTK_PIN_NO(234) | 3) +#define MT6797_GPIO234__FUNC_UTXD0 (MTK_PIN_NO(234) | 4) +#define MT6797_GPIO234__FUNC_PWM_A (MTK_PIN_NO(234) | 6) +#define MT6797_GPIO234__FUNC_DBG_MON_A23 (MTK_PIN_NO(234) | 7) + +#define MT6797_GPIO235__FUNC_GPIO235 (MTK_PIN_NO(235) | 0) +#define MT6797_GPIO235__FUNC_SPI1_MI_B (MTK_PIN_NO(235) | 1) +#define MT6797_GPIO235__FUNC_SPI1_MO_B (MTK_PIN_NO(235) | 2) +#define MT6797_GPIO235__FUNC_SDA4_1 (MTK_PIN_NO(235) | 3) +#define MT6797_GPIO235__FUNC_URXD0 (MTK_PIN_NO(235) | 4) +#define MT6797_GPIO235__FUNC_CLKM0 (MTK_PIN_NO(235) | 6) +#define MT6797_GPIO235__FUNC_DBG_MON_A24 (MTK_PIN_NO(235) | 7) + +#define MT6797_GPIO236__FUNC_GPIO236 (MTK_PIN_NO(236) | 0) +#define MT6797_GPIO236__FUNC_SPI1_MO_B (MTK_PIN_NO(236) | 1) +#define MT6797_GPIO236__FUNC_SPI1_MI_B (MTK_PIN_NO(236) | 2) +#define MT6797_GPIO236__FUNC_SCL5_1 (MTK_PIN_NO(236) | 3) +#define MT6797_GPIO236__FUNC_URTS0 (MTK_PIN_NO(236) | 4) +#define MT6797_GPIO236__FUNC_PWM_B (MTK_PIN_NO(236) | 6) +#define MT6797_GPIO236__FUNC_DBG_MON_A25 (MTK_PIN_NO(236) | 7) + +#define MT6797_GPIO237__FUNC_GPIO237 (MTK_PIN_NO(237) | 0) +#define MT6797_GPIO237__FUNC_SPI1_CS_B (MTK_PIN_NO(237) | 1) +#define MT6797_GPIO237__FUNC_TP_URXD1_AO (MTK_PIN_NO(237) | 2) +#define MT6797_GPIO237__FUNC_SDA5_1 (MTK_PIN_NO(237) | 3) +#define MT6797_GPIO237__FUNC_UCTS0 (MTK_PIN_NO(237) | 4) +#define MT6797_GPIO237__FUNC_CLKM1 (MTK_PIN_NO(237) | 6) +#define MT6797_GPIO237__FUNC_DBG_MON_A26 (MTK_PIN_NO(237) | 7) + +#define MT6797_GPIO238__FUNC_GPIO238 (MTK_PIN_NO(238) | 0) +#define MT6797_GPIO238__FUNC_SDA4_0 (MTK_PIN_NO(238) | 1) + +#define MT6797_GPIO239__FUNC_GPIO239 (MTK_PIN_NO(239) | 0) +#define MT6797_GPIO239__FUNC_SCL4_0 (MTK_PIN_NO(239) | 1) + +#define MT6797_GPIO240__FUNC_GPIO240 (MTK_PIN_NO(240) | 0) +#define MT6797_GPIO240__FUNC_SDA5_0 (MTK_PIN_NO(240) | 1) + +#define MT6797_GPIO241__FUNC_GPIO241 (MTK_PIN_NO(241) | 0) +#define MT6797_GPIO241__FUNC_SCL5_0 (MTK_PIN_NO(241) | 1) + +#define MT6797_GPIO242__FUNC_GPIO242 (MTK_PIN_NO(242) | 0) +#define MT6797_GPIO242__FUNC_SPI2_CLK_B (MTK_PIN_NO(242) | 1) +#define MT6797_GPIO242__FUNC_TP_UTXD2_AO (MTK_PIN_NO(242) | 2) +#define MT6797_GPIO242__FUNC_SCL4_2 (MTK_PIN_NO(242) | 3) +#define MT6797_GPIO242__FUNC_UTXD1 (MTK_PIN_NO(242) | 4) +#define MT6797_GPIO242__FUNC_URTS3 (MTK_PIN_NO(242) | 5) +#define MT6797_GPIO242__FUNC_PWM_C (MTK_PIN_NO(242) | 6) +#define MT6797_GPIO242__FUNC_DBG_MON_A27 (MTK_PIN_NO(242) | 7) + +#define MT6797_GPIO243__FUNC_GPIO243 (MTK_PIN_NO(243) | 0) +#define MT6797_GPIO243__FUNC_SPI2_MI_B (MTK_PIN_NO(243) | 1) +#define MT6797_GPIO243__FUNC_SPI2_MO_B (MTK_PIN_NO(243) | 2) +#define MT6797_GPIO243__FUNC_SDA4_2 (MTK_PIN_NO(243) | 3) +#define MT6797_GPIO243__FUNC_URXD1 (MTK_PIN_NO(243) | 4) +#define MT6797_GPIO243__FUNC_UCTS3 (MTK_PIN_NO(243) | 5) +#define MT6797_GPIO243__FUNC_CLKM2 (MTK_PIN_NO(243) | 6) +#define MT6797_GPIO243__FUNC_DBG_MON_A28 (MTK_PIN_NO(243) | 7) + +#define MT6797_GPIO244__FUNC_GPIO244 (MTK_PIN_NO(244) | 0) +#define MT6797_GPIO244__FUNC_SPI2_MO_B (MTK_PIN_NO(244) | 1) +#define MT6797_GPIO244__FUNC_SPI2_MI_B (MTK_PIN_NO(244) | 2) +#define MT6797_GPIO244__FUNC_SCL5_2 (MTK_PIN_NO(244) | 3) +#define MT6797_GPIO244__FUNC_URTS1 (MTK_PIN_NO(244) | 4) +#define MT6797_GPIO244__FUNC_UTXD3 (MTK_PIN_NO(244) | 5) +#define MT6797_GPIO244__FUNC_PWM_D (MTK_PIN_NO(244) | 6) +#define MT6797_GPIO244__FUNC_DBG_MON_A29 (MTK_PIN_NO(244) | 7) + +#define MT6797_GPIO245__FUNC_GPIO245 (MTK_PIN_NO(245) | 0) +#define MT6797_GPIO245__FUNC_SPI2_CS_B (MTK_PIN_NO(245) | 1) +#define MT6797_GPIO245__FUNC_TP_URXD2_AO (MTK_PIN_NO(245) | 2) +#define MT6797_GPIO245__FUNC_SDA5_2 (MTK_PIN_NO(245) | 3) +#define MT6797_GPIO245__FUNC_UCTS1 (MTK_PIN_NO(245) | 4) +#define MT6797_GPIO245__FUNC_URXD3 (MTK_PIN_NO(245) | 5) +#define MT6797_GPIO245__FUNC_CLKM3 (MTK_PIN_NO(245) | 6) +#define MT6797_GPIO245__FUNC_DBG_MON_A30 (MTK_PIN_NO(245) | 7) + +#define MT6797_GPIO246__FUNC_GPIO246 (MTK_PIN_NO(246) | 0) +#define MT6797_GPIO246__FUNC_I2S1_LRCK (MTK_PIN_NO(246) | 1) +#define MT6797_GPIO246__FUNC_I2S2_LRCK (MTK_PIN_NO(246) | 2) +#define MT6797_GPIO246__FUNC_I2S0_LRCK (MTK_PIN_NO(246) | 3) +#define MT6797_GPIO246__FUNC_I2S3_LRCK (MTK_PIN_NO(246) | 4) +#define MT6797_GPIO246__FUNC_PCM0_SYNC (MTK_PIN_NO(246) | 5) +#define MT6797_GPIO246__FUNC_SPI5_CLK_C (MTK_PIN_NO(246) | 6) +#define MT6797_GPIO246__FUNC_DBG_MON_A31 (MTK_PIN_NO(246) | 7) + +#define MT6797_GPIO247__FUNC_GPIO247 (MTK_PIN_NO(247) | 0) +#define MT6797_GPIO247__FUNC_I2S1_BCK (MTK_PIN_NO(247) | 1) +#define MT6797_GPIO247__FUNC_I2S2_BCK (MTK_PIN_NO(247) | 2) +#define MT6797_GPIO247__FUNC_I2S0_BCK (MTK_PIN_NO(247) | 3) +#define MT6797_GPIO247__FUNC_I2S3_BCK (MTK_PIN_NO(247) | 4) +#define MT6797_GPIO247__FUNC_PCM0_CLK (MTK_PIN_NO(247) | 5) +#define MT6797_GPIO247__FUNC_SPI5_MI_C (MTK_PIN_NO(247) | 6) +#define MT6797_GPIO247__FUNC_DBG_MON_A32 (MTK_PIN_NO(247) | 7) + +#define MT6797_GPIO248__FUNC_GPIO248 (MTK_PIN_NO(248) | 0) +/* #define MT6797_GPIO248__FUNC_I2S2_DI (MTK_PIN_NO(248) | 1) */ +#define MT6797_GPIO248__FUNC_I2S2_DI (MTK_PIN_NO(248) | 2) +/* #define MT6797_GPIO248__FUNC_I2S0_DI (MTK_PIN_NO(248) | 3) */ +#define MT6797_GPIO248__FUNC_I2S0_DI (MTK_PIN_NO(248) | 4) +#define MT6797_GPIO248__FUNC_PCM0_DI (MTK_PIN_NO(248) | 5) +#define MT6797_GPIO248__FUNC_SPI5_CS_C (MTK_PIN_NO(248) | 6) + +#define MT6797_GPIO249__FUNC_GPIO249 (MTK_PIN_NO(249) | 0) +/* #define MT6797_GPIO249__FUNC_I2S1_DO (MTK_PIN_NO(249) | 1) */ +#define MT6797_GPIO249__FUNC_I2S1_DO (MTK_PIN_NO(249) | 2) +/* #define MT6797_GPIO249__FUNC_I2S3_DO (MTK_PIN_NO(249) | 3) */ +#define MT6797_GPIO249__FUNC_I2S3_DO (MTK_PIN_NO(249) | 4) +#define MT6797_GPIO249__FUNC_PCM0_DO (MTK_PIN_NO(249) | 5) +#define MT6797_GPIO249__FUNC_SPI5_MO_C (MTK_PIN_NO(249) | 6) +#define MT6797_GPIO249__FUNC_TRAP_SRAM_PWR_BYPASS (MTK_PIN_NO(249) | 7) + +#define MT6797_GPIO250__FUNC_GPIO250 (MTK_PIN_NO(250) | 0) +#define MT6797_GPIO250__FUNC_SPI3_MI (MTK_PIN_NO(250) | 1) +#define MT6797_GPIO250__FUNC_SPI3_MO (MTK_PIN_NO(250) | 2) +#define MT6797_GPIO250__FUNC_IRTX_OUT (MTK_PIN_NO(250) | 3) +#define MT6797_GPIO250__FUNC_TP_URXD1_AO (MTK_PIN_NO(250) | 6) +#define MT6797_GPIO250__FUNC_DROP_ZONE (MTK_PIN_NO(250) | 7) + +#define MT6797_GPIO251__FUNC_GPIO251 (MTK_PIN_NO(251) | 0) +#define MT6797_GPIO251__FUNC_SPI3_MO (MTK_PIN_NO(251) | 1) +#define MT6797_GPIO251__FUNC_SPI3_MI (MTK_PIN_NO(251) | 2) +#define MT6797_GPIO251__FUNC_CMFLASH (MTK_PIN_NO(251) | 3) +#define MT6797_GPIO251__FUNC_TP_UTXD1_AO (MTK_PIN_NO(251) | 6) +#define MT6797_GPIO251__FUNC_C2K_RTCK (MTK_PIN_NO(251) | 7) + +#define MT6797_GPIO252__FUNC_GPIO252 (MTK_PIN_NO(252) | 0) +#define MT6797_GPIO252__FUNC_SPI3_CLK (MTK_PIN_NO(252) | 1) +#define MT6797_GPIO252__FUNC_SCL0_4 (MTK_PIN_NO(252) | 2) +#define MT6797_GPIO252__FUNC_PWM_D (MTK_PIN_NO(252) | 3) +#define MT6797_GPIO252__FUNC_C2K_TMS (MTK_PIN_NO(252) | 7) + +#define MT6797_GPIO253__FUNC_GPIO253 (MTK_PIN_NO(253) | 0) +#define MT6797_GPIO253__FUNC_SPI3_CS (MTK_PIN_NO(253) | 1) +#define MT6797_GPIO253__FUNC_SDA0_4 (MTK_PIN_NO(253) | 2) +#define MT6797_GPIO253__FUNC_PWM_A (MTK_PIN_NO(253) | 3) +#define MT6797_GPIO253__FUNC_C2K_TCK (MTK_PIN_NO(253) | 7) + +#define MT6797_GPIO254__FUNC_GPIO254 (MTK_PIN_NO(254) | 0) +#define MT6797_GPIO254__FUNC_I2S1_MCK (MTK_PIN_NO(254) | 1) +#define MT6797_GPIO254__FUNC_I2S2_MCK (MTK_PIN_NO(254) | 2) +#define MT6797_GPIO254__FUNC_I2S0_MCK (MTK_PIN_NO(254) | 3) +#define MT6797_GPIO254__FUNC_I2S3_MCK (MTK_PIN_NO(254) | 4) +#define MT6797_GPIO254__FUNC_CLKM0 (MTK_PIN_NO(254) | 5) +#define MT6797_GPIO254__FUNC_C2K_TDI (MTK_PIN_NO(254) | 7) + +#define MT6797_GPIO255__FUNC_GPIO255 (MTK_PIN_NO(255) | 0) +#define MT6797_GPIO255__FUNC_CLKM1 (MTK_PIN_NO(255) | 1) +#define MT6797_GPIO255__FUNC_DISP_PWM (MTK_PIN_NO(255) | 2) +#define MT6797_GPIO255__FUNC_PWM_B (MTK_PIN_NO(255) | 3) +#define MT6797_GPIO255__FUNC_TP_GPIO1_AO (MTK_PIN_NO(255) | 6) +#define MT6797_GPIO255__FUNC_C2K_TDO (MTK_PIN_NO(255) | 7) + +#define MT6797_GPIO256__FUNC_GPIO256 (MTK_PIN_NO(256) | 0) +#define MT6797_GPIO256__FUNC_CLKM2 (MTK_PIN_NO(256) | 1) +#define MT6797_GPIO256__FUNC_IRTX_OUT (MTK_PIN_NO(256) | 2) +#define MT6797_GPIO256__FUNC_PWM_C (MTK_PIN_NO(256) | 3) +#define MT6797_GPIO256__FUNC_TP_GPIO0_AO (MTK_PIN_NO(256) | 6) +#define MT6797_GPIO256__FUNC_C2K_NTRST (MTK_PIN_NO(256) | 7) + +#define MT6797_GPIO257__FUNC_GPIO257 (MTK_PIN_NO(257) | 0) +#define MT6797_GPIO257__FUNC_IO_JTAG_TMS (MTK_PIN_NO(257) | 1) +#define MT6797_GPIO257__FUNC_LTE_JTAG_TMS (MTK_PIN_NO(257) | 2) +#define MT6797_GPIO257__FUNC_DFD_TMS (MTK_PIN_NO(257) | 3) +#define MT6797_GPIO257__FUNC_DAP_SIB1_SWD (MTK_PIN_NO(257) | 4) +#define MT6797_GPIO257__FUNC_ANC_JTAG_TMS (MTK_PIN_NO(257) | 5) +#define MT6797_GPIO257__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(257) | 6) +#define MT6797_GPIO257__FUNC_C2K_DM_OTMS (MTK_PIN_NO(257) | 7) + +#define MT6797_GPIO258__FUNC_GPIO258 (MTK_PIN_NO(258) | 0) +#define MT6797_GPIO258__FUNC_IO_JTAG_TCK (MTK_PIN_NO(258) | 1) +#define MT6797_GPIO258__FUNC_LTE_JTAG_TCK (MTK_PIN_NO(258) | 2) +#define MT6797_GPIO258__FUNC_DFD_TCK_XI (MTK_PIN_NO(258) | 3) +#define MT6797_GPIO258__FUNC_DAP_SIB1_SWCK (MTK_PIN_NO(258) | 4) +#define MT6797_GPIO258__FUNC_ANC_JTAG_TCK (MTK_PIN_NO(258) | 5) +#define MT6797_GPIO258__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(258) | 6) +#define MT6797_GPIO258__FUNC_C2K_DM_OTCK (MTK_PIN_NO(258) | 7) + +#define MT6797_GPIO259__FUNC_GPIO259 (MTK_PIN_NO(259) | 0) +#define MT6797_GPIO259__FUNC_IO_JTAG_TDI (MTK_PIN_NO(259) | 1) +#define MT6797_GPIO259__FUNC_LTE_JTAG_TDI (MTK_PIN_NO(259) | 2) +#define MT6797_GPIO259__FUNC_DFD_TDI (MTK_PIN_NO(259) | 3) +#define MT6797_GPIO259__FUNC_ANC_JTAG_TDI (MTK_PIN_NO(259) | 5) +#define MT6797_GPIO259__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(259) | 6) +#define MT6797_GPIO259__FUNC_C2K_DM_OTDI (MTK_PIN_NO(259) | 7) + +#define MT6797_GPIO260__FUNC_GPIO260 (MTK_PIN_NO(260) | 0) +#define MT6797_GPIO260__FUNC_IO_JTAG_TDO (MTK_PIN_NO(260) | 1) +#define MT6797_GPIO260__FUNC_LTE_JTAG_TDO (MTK_PIN_NO(260) | 2) +#define MT6797_GPIO260__FUNC_DFD_TDO (MTK_PIN_NO(260) | 3) +#define MT6797_GPIO260__FUNC_ANC_JTAG_TDO (MTK_PIN_NO(260) | 5) +#define MT6797_GPIO260__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(260) | 6) +#define MT6797_GPIO260__FUNC_C2K_DM_OTDO (MTK_PIN_NO(260) | 7) + +#define MT6797_GPIO261__FUNC_GPIO261 (MTK_PIN_NO(261) | 0) +#define MT6797_GPIO261__FUNC_LTE_JTAG_TRSTN (MTK_PIN_NO(261) | 2) +#define MT6797_GPIO261__FUNC_DFD_NTRST (MTK_PIN_NO(261) | 3) +#define MT6797_GPIO261__FUNC_ANC_JTAG_TRSTN (MTK_PIN_NO(261) | 5) +#define MT6797_GPIO261__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(261) | 6) +#define MT6797_GPIO261__FUNC_C2K_DM_JTINTP (MTK_PIN_NO(261) | 7) + +#endif /* __DTS_MT6797_PINFUNC_H */ diff --git a/include/dt-bindings/pinctrl/mt7623-pinfunc.h b/include/dt-bindings/pinctrl/mt7623-pinfunc.h new file mode 100644 index 0000000..604fe78 --- /dev/null +++ b/include/dt-bindings/pinctrl/mt7623-pinfunc.h @@ -0,0 +1,651 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MT7623_PINFUNC_H +#define __DTS_MT7623_PINFUNC_H + +#include + +#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_PWRAP_SPIDO (MTK_PIN_NO(0) | 1) +#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_PWRAP_SPIDI (MTK_PIN_NO(0) | 2) + +#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_PWRAP_SPIDI (MTK_PIN_NO(1) | 1) +#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_PWRAP_SPIDO (MTK_PIN_NO(1) | 2) + +#define MT7623_PIN_2_PWRAP_INT_FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define MT7623_PIN_2_PWRAP_INT_FUNC_PWRAP_INT (MTK_PIN_NO(2) | 1) + +#define MT7623_PIN_3_PWRAP_SPI0_CK_FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define MT7623_PIN_3_PWRAP_SPI0_CK_FUNC_PWRAP_SPICK_I (MTK_PIN_NO(3) | 1) + +#define MT7623_PIN_4_PWRAP_SPI0_CSN_FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define MT7623_PIN_4_PWRAP_SPI0_CSN_FUNC_PWRAP_SPICS_B_I (MTK_PIN_NO(4) | 1) + +#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_PWRAP_SPICK2_I (MTK_PIN_NO(5) | 1) +#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_ANT_SEL1 (MTK_PIN_NO(5) | 5) + +#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_PWRAP_SPICS2_B_I (MTK_PIN_NO(6) | 1) +#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_ANT_SEL0 (MTK_PIN_NO(6) | 5) + +#define MT7623_PIN_7_SPI1_CSN_FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define MT7623_PIN_7_SPI1_CSN_FUNC_SPI1_CS (MTK_PIN_NO(7) | 1) +#define MT7623_PIN_7_SPI1_CSN_FUNC_KCOL0 (MTK_PIN_NO(7) | 4) + +#define MT7623_PIN_8_SPI1_MI_FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MI (MTK_PIN_NO(8) | 1) +#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MO (MTK_PIN_NO(8) | 2) +#define MT7623_PIN_8_SPI1_MI_FUNC_KCOL1 (MTK_PIN_NO(8) | 4) + +#define MT7623_PIN_9_SPI1_MO_FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MO (MTK_PIN_NO(9) | 1) +#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MI (MTK_PIN_NO(9) | 2) +#define MT7623_PIN_9_SPI1_MO_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(9) | 3) +#define MT7623_PIN_9_SPI1_MO_FUNC_KCOL2 (MTK_PIN_NO(9) | 4) + +#define MT7623_PIN_10_RTC32K_CK_FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define MT7623_PIN_10_RTC32K_CK_FUNC_RTC32K_CK (MTK_PIN_NO(10) | 1) + +#define MT7623_PIN_11_WATCHDOG_FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define MT7623_PIN_11_WATCHDOG_FUNC_WATCHDOG (MTK_PIN_NO(11) | 1) + +#define MT7623_PIN_12_SRCLKENA_FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define MT7623_PIN_12_SRCLKENA_FUNC_SRCLKENA (MTK_PIN_NO(12) | 1) + +#define MT7623_PIN_13_SRCLKENAI_FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define MT7623_PIN_13_SRCLKENAI_FUNC_SRCLKENAI (MTK_PIN_NO(13) | 1) + +#define MT7623_PIN_14_GPIO14_FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define MT7623_PIN_14_GPIO14_FUNC_URXD2 (MTK_PIN_NO(14) | 1) +#define MT7623_PIN_14_GPIO14_FUNC_UTXD2 (MTK_PIN_NO(14) | 2) +#define MT7623_PIN_14_GPIO14_FUNC_SRCCLKENAI2 (MTK_PIN_NO(14) | 5) + +#define MT7623_PIN_15_GPIO15_FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define MT7623_PIN_15_GPIO15_FUNC_UTXD2 (MTK_PIN_NO(15) | 1) +#define MT7623_PIN_15_GPIO15_FUNC_URXD2 (MTK_PIN_NO(15) | 2) + +#define MT7623_PIN_18_PCM_CLK_FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define MT7623_PIN_18_PCM_CLK_FUNC_PCM_CLK0 (MTK_PIN_NO(18) | 1) +#define MT7623_PIN_18_PCM_CLK_FUNC_MRG_CLK (MTK_PIN_NO(18) | 2) +#define MT7623_PIN_18_PCM_CLK_FUNC_MM_TEST_CK (MTK_PIN_NO(18) | 4) +#define MT7623_PIN_18_PCM_CLK_FUNC_CONN_DSP_JCK (MTK_PIN_NO(18) | 5) +#define MT7623_PIN_18_PCM_CLK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(18) | 6) + +#define MT7623_PIN_19_PCM_SYNC_FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define MT7623_PIN_19_PCM_SYNC_FUNC_PCM_SYNC (MTK_PIN_NO(19) | 1) +#define MT7623_PIN_19_PCM_SYNC_FUNC_MRG_SYNC (MTK_PIN_NO(19) | 2) +#define MT7623_PIN_19_PCM_SYNC_FUNC_CONN_DSP_JINTP (MTK_PIN_NO(19) | 5) +#define MT7623_PIN_19_PCM_SYNC_FUNC_AP_PCM_SYNC (MTK_PIN_NO(19) | 6) + +#define MT7623_PIN_20_PCM_RX_FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define MT7623_PIN_20_PCM_RX_FUNC_PCM_RX (MTK_PIN_NO(20) | 1) +#define MT7623_PIN_20_PCM_RX_FUNC_MRG_RX (MTK_PIN_NO(20) | 2) +#define MT7623_PIN_20_PCM_RX_FUNC_MRG_TX (MTK_PIN_NO(20) | 3) +#define MT7623_PIN_20_PCM_RX_FUNC_PCM_TX (MTK_PIN_NO(20) | 4) +#define MT7623_PIN_20_PCM_RX_FUNC_CONN_DSP_JDI (MTK_PIN_NO(20) | 5) +#define MT7623_PIN_20_PCM_RX_FUNC_AP_PCM_RX (MTK_PIN_NO(20) | 6) + +#define MT7623_PIN_21_PCM_TX_FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define MT7623_PIN_21_PCM_TX_FUNC_PCM_TX (MTK_PIN_NO(21) | 1) +#define MT7623_PIN_21_PCM_TX_FUNC_MRG_TX (MTK_PIN_NO(21) | 2) +#define MT7623_PIN_21_PCM_TX_FUNC_MRG_RX (MTK_PIN_NO(21) | 3) +#define MT7623_PIN_21_PCM_TX_FUNC_PCM_RX (MTK_PIN_NO(21) | 4) +#define MT7623_PIN_21_PCM_TX_FUNC_CONN_DSP_JMS (MTK_PIN_NO(21) | 5) +#define MT7623_PIN_21_PCM_TX_FUNC_AP_PCM_TX (MTK_PIN_NO(21) | 6) + +#define MT7623_PIN_22_EINT0_FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define MT7623_PIN_22_EINT0_FUNC_UCTS0 (MTK_PIN_NO(22) | 1) +#define MT7623_PIN_22_EINT0_FUNC_PCIE0_PERST_N (MTK_PIN_NO(22) | 2) +#define MT7623_PIN_22_EINT0_FUNC_KCOL3 (MTK_PIN_NO(22) | 3) +#define MT7623_PIN_22_EINT0_FUNC_CONN_DSP_JDO (MTK_PIN_NO(22) | 4) +#define MT7623_PIN_22_EINT0_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(22) | 5) + +#define MT7623_PIN_23_EINT1_FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define MT7623_PIN_23_EINT1_FUNC_URTS0 (MTK_PIN_NO(23) | 1) +#define MT7623_PIN_23_EINT1_FUNC_PCIE1_PERST_N (MTK_PIN_NO(23) | 2) +#define MT7623_PIN_23_EINT1_FUNC_KCOL2 (MTK_PIN_NO(23) | 3) +#define MT7623_PIN_23_EINT1_FUNC_CONN_MCU_TDO (MTK_PIN_NO(23) | 4) +#define MT7623_PIN_23_EINT1_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 5) + +#define MT7623_PIN_24_EINT2_FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define MT7623_PIN_24_EINT2_FUNC_UCTS1 (MTK_PIN_NO(24) | 1) +#define MT7623_PIN_24_EINT2_FUNC_PCIE2_PERST_N (MTK_PIN_NO(24) | 2) +#define MT7623_PIN_24_EINT2_FUNC_KCOL1 (MTK_PIN_NO(24) | 3) +#define MT7623_PIN_24_EINT2_FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(24) | 4) + +#define MT7623_PIN_25_EINT3_FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define MT7623_PIN_25_EINT3_FUNC_URTS1 (MTK_PIN_NO(25) | 1) +#define MT7623_PIN_25_EINT3_FUNC_KCOL0 (MTK_PIN_NO(25) | 3) +#define MT7623_PIN_25_EINT3_FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(25) | 4) + +#define MT7623_PIN_26_EINT4_FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define MT7623_PIN_26_EINT4_FUNC_UCTS3 (MTK_PIN_NO(26) | 1) +#define MT7623_PIN_26_EINT4_FUNC_DRV_VBUS_P1 (MTK_PIN_NO(26) | 2) +#define MT7623_PIN_26_EINT4_FUNC_KROW3 (MTK_PIN_NO(26) | 3) +#define MT7623_PIN_26_EINT4_FUNC_CONN_MCU_TCK0 (MTK_PIN_NO(26) | 4) +#define MT7623_PIN_26_EINT4_FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(26) | 5) +#define MT7623_PIN_26_EINT4_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(26) | 6) + +#define MT7623_PIN_27_EINT5_FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define MT7623_PIN_27_EINT5_FUNC_URTS3 (MTK_PIN_NO(27) | 1) +#define MT7623_PIN_27_EINT5_FUNC_IDDIG_P1 (MTK_PIN_NO(27) | 2) +#define MT7623_PIN_27_EINT5_FUNC_KROW2 (MTK_PIN_NO(27) | 3) +#define MT7623_PIN_27_EINT5_FUNC_CONN_MCU_TDI (MTK_PIN_NO(27) | 4) +#define MT7623_PIN_27_EINT5_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(27) | 6) + +#define MT7623_PIN_28_EINT6_FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define MT7623_PIN_28_EINT6_FUNC_DRV_VBUS (MTK_PIN_NO(28) | 1) +#define MT7623_PIN_28_EINT6_FUNC_KROW1 (MTK_PIN_NO(28) | 3) +#define MT7623_PIN_28_EINT6_FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(28) | 4) +#define MT7623_PIN_28_EINT6_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(28) | 6) + +#define MT7623_PIN_29_EINT7_FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define MT7623_PIN_29_EINT7_FUNC_IDDIG (MTK_PIN_NO(29) | 1) +#define MT7623_PIN_29_EINT7_FUNC_MSDC1_WP (MTK_PIN_NO(29) | 2) +#define MT7623_PIN_29_EINT7_FUNC_KROW0 (MTK_PIN_NO(29) | 3) +#define MT7623_PIN_29_EINT7_FUNC_CONN_MCU_TMS (MTK_PIN_NO(29) | 4) +#define MT7623_PIN_29_EINT7_FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(29) | 5) +#define MT7623_PIN_29_EINT7_FUNC_PCIE2_PERST_N (MTK_PIN_NO(29) | 6) + +#define MT7623_PIN_33_I2S1_DATA_FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA (MTK_PIN_NO(33) | 1) +#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA_BYPS (MTK_PIN_NO(33) | 2) +#define MT7623_PIN_33_I2S1_DATA_FUNC_PCM_TX (MTK_PIN_NO(33) | 3) +#define MT7623_PIN_33_I2S1_DATA_FUNC_IMG_TEST_CK (MTK_PIN_NO(33) | 4) +#define MT7623_PIN_33_I2S1_DATA_FUNC_G1_RXD0 (MTK_PIN_NO(33) | 5) +#define MT7623_PIN_33_I2S1_DATA_FUNC_AP_PCM_TX (MTK_PIN_NO(33) | 6) + +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_I2S1_DATA_IN (MTK_PIN_NO(34) | 1) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(34) | 3) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_VDEC_TEST_CK (MTK_PIN_NO(34) | 4) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_G1_RXD1 (MTK_PIN_NO(34) | 5) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_AP_PCM_RX (MTK_PIN_NO(34) | 6) + +#define MT7623_PIN_35_I2S1_BCK_FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define MT7623_PIN_35_I2S1_BCK_FUNC_I2S1_BCK (MTK_PIN_NO(35) | 1) +#define MT7623_PIN_35_I2S1_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(35) | 3) +#define MT7623_PIN_35_I2S1_BCK_FUNC_G1_RXD2 (MTK_PIN_NO(35) | 5) +#define MT7623_PIN_35_I2S1_BCK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(35) | 6) + +#define MT7623_PIN_36_I2S1_LRCK_FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define MT7623_PIN_36_I2S1_LRCK_FUNC_I2S1_LRCK (MTK_PIN_NO(36) | 1) +#define MT7623_PIN_36_I2S1_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(36) | 3) +#define MT7623_PIN_36_I2S1_LRCK_FUNC_G1_RXD3 (MTK_PIN_NO(36) | 5) +#define MT7623_PIN_36_I2S1_LRCK_FUNC_AP_PCM_SYNC (MTK_PIN_NO(36) | 6) + +#define MT7623_PIN_37_I2S1_MCLK_FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define MT7623_PIN_37_I2S1_MCLK_FUNC_I2S1_MCLK (MTK_PIN_NO(37) | 1) +#define MT7623_PIN_37_I2S1_MCLK_FUNC_G1_RXDV (MTK_PIN_NO(37) | 5) + +#define MT7623_PIN_39_JTMS_FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define MT7623_PIN_39_JTMS_FUNC_JTMS (MTK_PIN_NO(39) | 1) +#define MT7623_PIN_39_JTMS_FUNC_CONN_MCU_TMS (MTK_PIN_NO(39) | 2) +#define MT7623_PIN_39_JTMS_FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(39) | 3) +#define MT7623_PIN_39_JTMS_FUNC_DFD_TMS_XI (MTK_PIN_NO(39) | 4) + +#define MT7623_PIN_40_JTCK_FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define MT7623_PIN_40_JTCK_FUNC_JTCK (MTK_PIN_NO(40) | 1) +#define MT7623_PIN_40_JTCK_FUNC_CONN_MCU_TCK1 (MTK_PIN_NO(40) | 2) +#define MT7623_PIN_40_JTCK_FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(40) | 3) +#define MT7623_PIN_40_JTCK_FUNC_DFD_TCK_XI (MTK_PIN_NO(40) | 4) + +#define MT7623_PIN_41_JTDI_FUNC_GPIO41 (MTK_PIN_NO(41) | 0) +#define MT7623_PIN_41_JTDI_FUNC_JTDI (MTK_PIN_NO(41) | 1) +#define MT7623_PIN_41_JTDI_FUNC_CONN_MCU_TDI (MTK_PIN_NO(41) | 2) +#define MT7623_PIN_41_JTDI_FUNC_DFD_TDI_XI (MTK_PIN_NO(41) | 4) + +#define MT7623_PIN_42_JTDO_FUNC_GPIO42 (MTK_PIN_NO(42) | 0) +#define MT7623_PIN_42_JTDO_FUNC_JTDO (MTK_PIN_NO(42) | 1) +#define MT7623_PIN_42_JTDO_FUNC_CONN_MCU_TDO (MTK_PIN_NO(42) | 2) +#define MT7623_PIN_42_JTDO_FUNC_DFD_TDO (MTK_PIN_NO(42) | 4) + +#define MT7623_PIN_43_NCLE_FUNC_GPIO43 (MTK_PIN_NO(43) | 0) +#define MT7623_PIN_43_NCLE_FUNC_NCLE (MTK_PIN_NO(43) | 1) +#define MT7623_PIN_43_NCLE_FUNC_EXT_XCS2 (MTK_PIN_NO(43) | 2) + +#define MT7623_PIN_44_NCEB1_FUNC_GPIO44 (MTK_PIN_NO(44) | 0) +#define MT7623_PIN_44_NCEB1_FUNC_NCEB1 (MTK_PIN_NO(44) | 1) +#define MT7623_PIN_44_NCEB1_FUNC_IDDIG (MTK_PIN_NO(44) | 2) + +#define MT7623_PIN_45_NCEB0_FUNC_GPIO45 (MTK_PIN_NO(45) | 0) +#define MT7623_PIN_45_NCEB0_FUNC_NCEB0 (MTK_PIN_NO(45) | 1) +#define MT7623_PIN_45_NCEB0_FUNC_DRV_VBUS (MTK_PIN_NO(45) | 2) + +#define MT7623_PIN_46_IR_FUNC_GPIO46 (MTK_PIN_NO(46) | 0) +#define MT7623_PIN_46_IR_FUNC_IR (MTK_PIN_NO(46) | 1) + +#define MT7623_PIN_47_NREB_FUNC_GPIO47 (MTK_PIN_NO(47) | 0) +#define MT7623_PIN_47_NREB_FUNC_NREB (MTK_PIN_NO(47) | 1) +#define MT7623_PIN_47_NREB_FUNC_IDDIG_P1 (MTK_PIN_NO(47) | 2) + +#define MT7623_PIN_48_NRNB_FUNC_GPIO48 (MTK_PIN_NO(48) | 0) +#define MT7623_PIN_48_NRNB_FUNC_NRNB (MTK_PIN_NO(48) | 1) +#define MT7623_PIN_48_NRNB_FUNC_DRV_VBUS_P1 (MTK_PIN_NO(48) | 2) + +#define MT7623_PIN_49_I2S0_DATA_FUNC_GPIO49 (MTK_PIN_NO(49) | 0) +#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA (MTK_PIN_NO(49) | 1) +#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA_BYPS (MTK_PIN_NO(49) | 2) +#define MT7623_PIN_49_I2S0_DATA_FUNC_PCM_TX (MTK_PIN_NO(49) | 3) +#define MT7623_PIN_49_I2S0_DATA_FUNC_AP_I2S_DO (MTK_PIN_NO(49) | 6) + +#define MT7623_PIN_53_SPI0_CSN_FUNC_GPIO53 (MTK_PIN_NO(53) | 0) +#define MT7623_PIN_53_SPI0_CSN_FUNC_SPI0_CS (MTK_PIN_NO(53) | 1) +#define MT7623_PIN_53_SPI0_CSN_FUNC_SPDIF (MTK_PIN_NO(53) | 3) +#define MT7623_PIN_53_SPI0_CSN_FUNC_ADC_CK (MTK_PIN_NO(53) | 4) +#define MT7623_PIN_53_SPI0_CSN_FUNC_PWM1 (MTK_PIN_NO(53) | 5) + +#define MT7623_PIN_54_SPI0_CK_FUNC_GPIO54 (MTK_PIN_NO(54) | 0) +#define MT7623_PIN_54_SPI0_CK_FUNC_SPI0_CK (MTK_PIN_NO(54) | 1) +#define MT7623_PIN_54_SPI0_CK_FUNC_SPDIF_IN1 (MTK_PIN_NO(54) | 3) +#define MT7623_PIN_54_SPI0_CK_FUNC_ADC_DAT_IN (MTK_PIN_NO(54) | 4) + +#define MT7623_PIN_55_SPI0_MI_FUNC_GPIO55 (MTK_PIN_NO(55) | 0) +#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MI (MTK_PIN_NO(55) | 1) +#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MO (MTK_PIN_NO(55) | 2) +#define MT7623_PIN_55_SPI0_MI_FUNC_MSDC1_WP (MTK_PIN_NO(55) | 3) +#define MT7623_PIN_55_SPI0_MI_FUNC_ADC_WS (MTK_PIN_NO(55) | 4) +#define MT7623_PIN_55_SPI0_MI_FUNC_PWM2 (MTK_PIN_NO(55) | 5) + +#define MT7623_PIN_56_SPI0_MO_FUNC_GPIO56 (MTK_PIN_NO(56) | 0) +#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MO (MTK_PIN_NO(56) | 1) +#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MI (MTK_PIN_NO(56) | 2) +#define MT7623_PIN_56_SPI0_MO_FUNC_SPDIF_IN0 (MTK_PIN_NO(56) | 3) + +#define MT7623_PIN_57_SDA1_FUNC_GPIO57 (MTK_PIN_NO(57) | 0) +#define MT7623_PIN_57_SDA1_FUNC_SDA1 (MTK_PIN_NO(57) | 1) + +#define MT7623_PIN_58_SCL1_FUNC_GPIO58 (MTK_PIN_NO(58) | 0) +#define MT7623_PIN_58_SCL1_FUNC_SCL1 (MTK_PIN_NO(58) | 1) + +#define MT7623_PIN_60_WB_RSTB_FUNC_GPIO60 (MTK_PIN_NO(60) | 0) +#define MT7623_PIN_60_WB_RSTB_FUNC_WB_RSTB (MTK_PIN_NO(60) | 1) + +#define MT7623_PIN_61_GPIO61_FUNC_GPIO61 (MTK_PIN_NO(61) | 0) +#define MT7623_PIN_61_GPIO61_FUNC_TEST_FD (MTK_PIN_NO(61) | 1) + +#define MT7623_PIN_62_GPIO62_FUNC_GPIO62 (MTK_PIN_NO(62) | 0) +#define MT7623_PIN_62_GPIO62_FUNC_TEST_FC (MTK_PIN_NO(62) | 1) + +#define MT7623_PIN_63_WB_SCLK_FUNC_GPIO63 (MTK_PIN_NO(63) | 0) +#define MT7623_PIN_63_WB_SCLK_FUNC_WB_SCLK (MTK_PIN_NO(63) | 1) + +#define MT7623_PIN_64_WB_SDATA_FUNC_GPIO64 (MTK_PIN_NO(64) | 0) +#define MT7623_PIN_64_WB_SDATA_FUNC_WB_SDATA (MTK_PIN_NO(64) | 1) + +#define MT7623_PIN_65_WB_SEN_FUNC_GPIO65 (MTK_PIN_NO(65) | 0) +#define MT7623_PIN_65_WB_SEN_FUNC_WB_SEN (MTK_PIN_NO(65) | 1) + +#define MT7623_PIN_66_WB_CRTL0_FUNC_GPIO66 (MTK_PIN_NO(66) | 0) +#define MT7623_PIN_66_WB_CRTL0_FUNC_WB_CRTL0 (MTK_PIN_NO(66) | 1) + +#define MT7623_PIN_67_WB_CRTL1_FUNC_GPIO67 (MTK_PIN_NO(67) | 0) +#define MT7623_PIN_67_WB_CRTL1_FUNC_WB_CRTL1 (MTK_PIN_NO(67) | 1) + +#define MT7623_PIN_68_WB_CRTL2_FUNC_GPIO68 (MTK_PIN_NO(68) | 0) +#define MT7623_PIN_68_WB_CRTL2_FUNC_WB_CRTL2 (MTK_PIN_NO(68) | 1) + +#define MT7623_PIN_69_WB_CRTL3_FUNC_GPIO69 (MTK_PIN_NO(69) | 0) +#define MT7623_PIN_69_WB_CRTL3_FUNC_WB_CRTL3 (MTK_PIN_NO(69) | 1) + +#define MT7623_PIN_70_WB_CRTL4_FUNC_GPIO70 (MTK_PIN_NO(70) | 0) +#define MT7623_PIN_70_WB_CRTL4_FUNC_WB_CRTL4 (MTK_PIN_NO(70) | 1) + +#define MT7623_PIN_71_WB_CRTL5_FUNC_GPIO71 (MTK_PIN_NO(71) | 0) +#define MT7623_PIN_71_WB_CRTL5_FUNC_WB_CRTL5 (MTK_PIN_NO(71) | 1) + +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_GPIO72 (MTK_PIN_NO(72) | 0) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_I2S0_DATA_IN (MTK_PIN_NO(72) | 1) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(72) | 3) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_PWM0 (MTK_PIN_NO(72) | 4) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_DISP_PWM (MTK_PIN_NO(72) | 5) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_AP_I2S_DI (MTK_PIN_NO(72) | 6) + +#define MT7623_PIN_73_I2S0_LRCK_FUNC_GPIO73 (MTK_PIN_NO(73) | 0) +#define MT7623_PIN_73_I2S0_LRCK_FUNC_I2S0_LRCK (MTK_PIN_NO(73) | 1) +#define MT7623_PIN_73_I2S0_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(73) | 3) +#define MT7623_PIN_73_I2S0_LRCK_FUNC_AP_I2S_LRCK (MTK_PIN_NO(73) | 6) + +#define MT7623_PIN_74_I2S0_BCK_FUNC_GPIO74 (MTK_PIN_NO(74) | 0) +#define MT7623_PIN_74_I2S0_BCK_FUNC_I2S0_BCK (MTK_PIN_NO(74) | 1) +#define MT7623_PIN_74_I2S0_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(74) | 3) +#define MT7623_PIN_74_I2S0_BCK_FUNC_AP_I2S_BCK (MTK_PIN_NO(74) | 6) + +#define MT7623_PIN_75_SDA0_FUNC_GPIO75 (MTK_PIN_NO(75) | 0) +#define MT7623_PIN_75_SDA0_FUNC_SDA0 (MTK_PIN_NO(75) | 1) + +#define MT7623_PIN_76_SCL0_FUNC_GPIO76 (MTK_PIN_NO(76) | 0) +#define MT7623_PIN_76_SCL0_FUNC_SCL0 (MTK_PIN_NO(76) | 1) + +#define MT7623_PIN_77_SDA2_FUNC_GPIO77 (MTK_PIN_NO(77) | 0) +#define MT7623_PIN_77_SDA2_FUNC_SDA2 (MTK_PIN_NO(77) | 1) + +#define MT7623_PIN_78_SCL2_FUNC_GPIO78 (MTK_PIN_NO(78) | 0) +#define MT7623_PIN_78_SCL2_FUNC_SCL2 (MTK_PIN_NO(78) | 1) + +#define MT7623_PIN_79_URXD0_FUNC_GPIO79 (MTK_PIN_NO(79) | 0) +#define MT7623_PIN_79_URXD0_FUNC_URXD0 (MTK_PIN_NO(79) | 1) +#define MT7623_PIN_79_URXD0_FUNC_UTXD0 (MTK_PIN_NO(79) | 2) + +#define MT7623_PIN_80_UTXD0_FUNC_GPIO80 (MTK_PIN_NO(80) | 0) +#define MT7623_PIN_80_UTXD0_FUNC_UTXD0 (MTK_PIN_NO(80) | 1) +#define MT7623_PIN_80_UTXD0_FUNC_URXD0 (MTK_PIN_NO(80) | 2) + +#define MT7623_PIN_81_URXD1_FUNC_GPIO81 (MTK_PIN_NO(81) | 0) +#define MT7623_PIN_81_URXD1_FUNC_URXD1 (MTK_PIN_NO(81) | 1) +#define MT7623_PIN_81_URXD1_FUNC_UTXD1 (MTK_PIN_NO(81) | 2) + +#define MT7623_PIN_82_UTXD1_FUNC_GPIO82 (MTK_PIN_NO(82) | 0) +#define MT7623_PIN_82_UTXD1_FUNC_UTXD1 (MTK_PIN_NO(82) | 1) +#define MT7623_PIN_82_UTXD1_FUNC_URXD1 (MTK_PIN_NO(82) | 2) + +#define MT7623_PIN_83_LCM_RST_FUNC_GPIO83 (MTK_PIN_NO(83) | 0) +#define MT7623_PIN_83_LCM_RST_FUNC_LCM_RST (MTK_PIN_NO(83) | 1) +#define MT7623_PIN_83_LCM_RST_FUNC_VDAC_CK_XI (MTK_PIN_NO(83) | 2) + +#define MT7623_PIN_84_DSI_TE_FUNC_GPIO84 (MTK_PIN_NO(84) | 0) +#define MT7623_PIN_84_DSI_TE_FUNC_DSI_TE (MTK_PIN_NO(84) | 1) + +#define MT7623_PIN_91_MIPI_TDN3_FUNC_GPIO91 (MTK_PIN_NO(91) | 0) +#define MT7623_PIN_91_MIPI_TDN3_FUNC_TDN3 (MTK_PIN_NO(91) | 1) + +#define MT7623_PIN_92_MIPI_TDP3_FUNC_GPIO92 (MTK_PIN_NO(92) | 0) +#define MT7623_PIN_92_MIPI_TDP3_FUNC_TDP3 (MTK_PIN_NO(92) | 1) + +#define MT7623_PIN_93_MIPI_TDN2_FUNC_GPIO93 (MTK_PIN_NO(93) | 0) +#define MT7623_PIN_93_MIPI_TDN2_FUNC_TDN2 (MTK_PIN_NO(93) | 1) + +#define MT7623_PIN_94_MIPI_TDP2_FUNC_GPIO94 (MTK_PIN_NO(94) | 0) +#define MT7623_PIN_94_MIPI_TDP2_FUNC_TDP2 (MTK_PIN_NO(94) | 1) + +#define MT7623_PIN_95_MIPI_TCN_FUNC_GPIO95 (MTK_PIN_NO(95) | 0) +#define MT7623_PIN_95_MIPI_TCN_FUNC_TCN (MTK_PIN_NO(95) | 1) + +#define MT7623_PIN_96_MIPI_TCP_FUNC_GPIO96 (MTK_PIN_NO(96) | 0) +#define MT7623_PIN_96_MIPI_TCP_FUNC_TCP (MTK_PIN_NO(96) | 1) + +#define MT7623_PIN_97_MIPI_TDN1_FUNC_GPIO97 (MTK_PIN_NO(97) | 0) +#define MT7623_PIN_97_MIPI_TDN1_FUNC_TDN1 (MTK_PIN_NO(97) | 1) + +#define MT7623_PIN_98_MIPI_TDP1_FUNC_GPIO98 (MTK_PIN_NO(98) | 0) +#define MT7623_PIN_98_MIPI_TDP1_FUNC_TDP1 (MTK_PIN_NO(98) | 1) + +#define MT7623_PIN_99_MIPI_TDN0_FUNC_GPIO99 (MTK_PIN_NO(99) | 0) +#define MT7623_PIN_99_MIPI_TDN0_FUNC_TDN0 (MTK_PIN_NO(99) | 1) + +#define MT7623_PIN_100_MIPI_TDP0_FUNC_GPIO100 (MTK_PIN_NO(100) | 0) +#define MT7623_PIN_100_MIPI_TDP0_FUNC_TDP0 (MTK_PIN_NO(100) | 1) + +#define MT7623_PIN_101_SPI2_CSN_FUNC_GPIO101 (MTK_PIN_NO(101) | 0) +#define MT7623_PIN_101_SPI2_CSN_FUNC_SPI2_CS (MTK_PIN_NO(101) | 1) +#define MT7623_PIN_101_SPI2_CSN_FUNC_SCL3 (MTK_PIN_NO(101) | 3) +#define MT7623_PIN_101_SPI2_CSN_FUNC_KROW0 (MTK_PIN_NO(101) | 4) + +#define MT7623_PIN_102_SPI2_MI_FUNC_GPIO102 (MTK_PIN_NO(102) | 0) +#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MI (MTK_PIN_NO(102) | 1) +#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MO (MTK_PIN_NO(102) | 2) +#define MT7623_PIN_102_SPI2_MI_FUNC_SDA3 (MTK_PIN_NO(102) | 3) +#define MT7623_PIN_102_SPI2_MI_FUNC_KROW1 (MTK_PIN_NO(102) | 4) + +#define MT7623_PIN_103_SPI2_MO_FUNC_GPIO103 (MTK_PIN_NO(103) | 0) +#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MO (MTK_PIN_NO(103) | 1) +#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MI (MTK_PIN_NO(103) | 2) +#define MT7623_PIN_103_SPI2_MO_FUNC_SCL3 (MTK_PIN_NO(103) | 3) +#define MT7623_PIN_103_SPI2_MO_FUNC_KROW2 (MTK_PIN_NO(103) | 4) + +#define MT7623_PIN_104_SPI2_CK_FUNC_GPIO104 (MTK_PIN_NO(104) | 0) +#define MT7623_PIN_104_SPI2_CK_FUNC_SPI2_CK (MTK_PIN_NO(104) | 1) +#define MT7623_PIN_104_SPI2_CK_FUNC_SDA3 (MTK_PIN_NO(104) | 3) +#define MT7623_PIN_104_SPI2_CK_FUNC_KROW3 (MTK_PIN_NO(104) | 4) + +#define MT7623_PIN_105_MSDC1_CMD_FUNC_GPIO105 (MTK_PIN_NO(105) | 0) +#define MT7623_PIN_105_MSDC1_CMD_FUNC_MSDC1_CMD (MTK_PIN_NO(105) | 1) +#define MT7623_PIN_105_MSDC1_CMD_FUNC_SDA1 (MTK_PIN_NO(105) | 3) +#define MT7623_PIN_105_MSDC1_CMD_FUNC_I2SOUT_BCK (MTK_PIN_NO(105) | 6) + +#define MT7623_PIN_106_MSDC1_CLK_FUNC_GPIO106 (MTK_PIN_NO(106) | 0) +#define MT7623_PIN_106_MSDC1_CLK_FUNC_MSDC1_CLK (MTK_PIN_NO(106) | 1) +#define MT7623_PIN_106_MSDC1_CLK_FUNC_SCL1 (MTK_PIN_NO(106) | 3) +#define MT7623_PIN_106_MSDC1_CLK_FUNC_I2SOUT_LRCK (MTK_PIN_NO(106) | 6) + +#define MT7623_PIN_107_MSDC1_DAT0_FUNC_GPIO107 (MTK_PIN_NO(107) | 0) +#define MT7623_PIN_107_MSDC1_DAT0_FUNC_MSDC1_DAT0 (MTK_PIN_NO(107) | 1) +#define MT7623_PIN_107_MSDC1_DAT0_FUNC_UTXD0 (MTK_PIN_NO(107) | 5) +#define MT7623_PIN_107_MSDC1_DAT0_FUNC_I2SOUT_DATA_OUT (MTK_PIN_NO(107) | 6) + +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_GPIO108 (MTK_PIN_NO(108) | 0) +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_MSDC1_DAT1 (MTK_PIN_NO(108) | 1) +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_PWM0 (MTK_PIN_NO(108) | 3) +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_URXD0 (MTK_PIN_NO(108) | 5) +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_PWM1 (MTK_PIN_NO(108) | 6) + +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_GPIO109 (MTK_PIN_NO(109) | 0) +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_MSDC1_DAT2 (MTK_PIN_NO(109) | 1) +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_SDA2 (MTK_PIN_NO(109) | 3) +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_UTXD1 (MTK_PIN_NO(109) | 5) +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_PWM2 (MTK_PIN_NO(109) | 6) + +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_GPIO110 (MTK_PIN_NO(110) | 0) +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_MSDC1_DAT3 (MTK_PIN_NO(110) | 1) +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_SCL2 (MTK_PIN_NO(110) | 3) +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_URXD1 (MTK_PIN_NO(110) | 5) +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_PWM3 (MTK_PIN_NO(110) | 6) + +#define MT7623_PIN_111_MSDC0_DAT7_FUNC_GPIO111 (MTK_PIN_NO(111) | 0) +#define MT7623_PIN_111_MSDC0_DAT7_FUNC_MSDC0_DAT7 (MTK_PIN_NO(111) | 1) +#define MT7623_PIN_111_MSDC0_DAT7_FUNC_NLD7 (MTK_PIN_NO(111) | 4) + +#define MT7623_PIN_112_MSDC0_DAT6_FUNC_GPIO112 (MTK_PIN_NO(112) | 0) +#define MT7623_PIN_112_MSDC0_DAT6_FUNC_MSDC0_DAT6 (MTK_PIN_NO(112) | 1) +#define MT7623_PIN_112_MSDC0_DAT6_FUNC_NLD6 (MTK_PIN_NO(112) | 4) + +#define MT7623_PIN_113_MSDC0_DAT5_FUNC_GPIO113 (MTK_PIN_NO(113) | 0) +#define MT7623_PIN_113_MSDC0_DAT5_FUNC_MSDC0_DAT5 (MTK_PIN_NO(113) | 1) +#define MT7623_PIN_113_MSDC0_DAT5_FUNC_NLD5 (MTK_PIN_NO(113) | 4) + +#define MT7623_PIN_114_MSDC0_DAT4_FUNC_GPIO114 (MTK_PIN_NO(114) | 0) +#define MT7623_PIN_114_MSDC0_DAT4_FUNC_MSDC0_DAT4 (MTK_PIN_NO(114) | 1) +#define MT7623_PIN_114_MSDC0_DAT4_FUNC_NLD4 (MTK_PIN_NO(114) | 4) + +#define MT7623_PIN_115_MSDC0_RSTB_FUNC_GPIO115 (MTK_PIN_NO(115) | 0) +#define MT7623_PIN_115_MSDC0_RSTB_FUNC_MSDC0_RSTB (MTK_PIN_NO(115) | 1) +#define MT7623_PIN_115_MSDC0_RSTB_FUNC_NLD8 (MTK_PIN_NO(115) | 4) + +#define MT7623_PIN_116_MSDC0_CMD_FUNC_GPIO116 (MTK_PIN_NO(116) | 0) +#define MT7623_PIN_116_MSDC0_CMD_FUNC_MSDC0_CMD (MTK_PIN_NO(116) | 1) +#define MT7623_PIN_116_MSDC0_CMD_FUNC_NALE (MTK_PIN_NO(116) | 4) + +#define MT7623_PIN_117_MSDC0_CLK_FUNC_GPIO117 (MTK_PIN_NO(117) | 0) +#define MT7623_PIN_117_MSDC0_CLK_FUNC_MSDC0_CLK (MTK_PIN_NO(117) | 1) +#define MT7623_PIN_117_MSDC0_CLK_FUNC_NWEB (MTK_PIN_NO(117) | 4) + +#define MT7623_PIN_118_MSDC0_DAT3_FUNC_GPIO118 (MTK_PIN_NO(118) | 0) +#define MT7623_PIN_118_MSDC0_DAT3_FUNC_MSDC0_DAT3 (MTK_PIN_NO(118) | 1) +#define MT7623_PIN_118_MSDC0_DAT3_FUNC_NLD3 (MTK_PIN_NO(118) | 4) + +#define MT7623_PIN_119_MSDC0_DAT2_FUNC_GPIO119 (MTK_PIN_NO(119) | 0) +#define MT7623_PIN_119_MSDC0_DAT2_FUNC_MSDC0_DAT2 (MTK_PIN_NO(119) | 1) +#define MT7623_PIN_119_MSDC0_DAT2_FUNC_NLD2 (MTK_PIN_NO(119) | 4) + +#define MT7623_PIN_120_MSDC0_DAT1_FUNC_GPIO120 (MTK_PIN_NO(120) | 0) +#define MT7623_PIN_120_MSDC0_DAT1_FUNC_MSDC0_DAT1 (MTK_PIN_NO(120) | 1) +#define MT7623_PIN_120_MSDC0_DAT1_FUNC_NLD1 (MTK_PIN_NO(120) | 4) + +#define MT7623_PIN_121_MSDC0_DAT0_FUNC_GPIO121 (MTK_PIN_NO(121) | 0) +#define MT7623_PIN_121_MSDC0_DAT0_FUNC_MSDC0_DAT0 (MTK_PIN_NO(121) | 1) +#define MT7623_PIN_121_MSDC0_DAT0_FUNC_NLD0 (MTK_PIN_NO(121) | 4) +#define MT7623_PIN_121_MSDC0_DAT0_FUNC_WATCHDOG (MTK_PIN_NO(121) | 5) + +#define MT7623_PIN_122_GPIO122_FUNC_GPIO122 (MTK_PIN_NO(122) | 0) +#define MT7623_PIN_122_GPIO122_FUNC_CEC (MTK_PIN_NO(122) | 1) +#define MT7623_PIN_122_GPIO122_FUNC_SDA2 (MTK_PIN_NO(122) | 4) +#define MT7623_PIN_122_GPIO122_FUNC_URXD0 (MTK_PIN_NO(122) | 5) + +#define MT7623_PIN_123_HTPLG_FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define MT7623_PIN_123_HTPLG_FUNC_HTPLG (MTK_PIN_NO(123) | 1) +#define MT7623_PIN_123_HTPLG_FUNC_SCL2 (MTK_PIN_NO(123) | 4) +#define MT7623_PIN_123_HTPLG_FUNC_UTXD0 (MTK_PIN_NO(123) | 5) + +#define MT7623_PIN_124_GPIO124_FUNC_GPIO124 (MTK_PIN_NO(124) | 0) +#define MT7623_PIN_124_GPIO124_FUNC_HDMISCK (MTK_PIN_NO(124) | 1) +#define MT7623_PIN_124_GPIO124_FUNC_SDA1 (MTK_PIN_NO(124) | 4) +#define MT7623_PIN_124_GPIO124_FUNC_PWM3 (MTK_PIN_NO(124) | 5) + +#define MT7623_PIN_125_GPIO125_FUNC_GPIO125 (MTK_PIN_NO(125) | 0) +#define MT7623_PIN_125_GPIO125_FUNC_HDMISD (MTK_PIN_NO(125) | 1) +#define MT7623_PIN_125_GPIO125_FUNC_SCL1 (MTK_PIN_NO(125) | 4) +#define MT7623_PIN_125_GPIO125_FUNC_PWM4 (MTK_PIN_NO(125) | 5) + +#define MT7623_PIN_126_I2S0_MCLK_FUNC_GPIO126 (MTK_PIN_NO(126) | 0) +#define MT7623_PIN_126_I2S0_MCLK_FUNC_I2S0_MCLK (MTK_PIN_NO(126) | 1) +#define MT7623_PIN_126_I2S0_MCLK_FUNC_AP_I2S_MCLK (MTK_PIN_NO(126) | 6) + +#define MT7623_PIN_199_SPI1_CK_FUNC_GPIO199 (MTK_PIN_NO(199) | 0) +#define MT7623_PIN_199_SPI1_CK_FUNC_SPI1_CK (MTK_PIN_NO(199) | 1) + +#define MT7623_PIN_200_URXD2_FUNC_GPIO200 (MTK_PIN_NO(200) | 0) +#define MT7623_PIN_200_URXD2_FUNC_URXD2 (MTK_PIN_NO(200) | 6) + +#define MT7623_PIN_201_UTXD2_FUNC_GPIO201 (MTK_PIN_NO(201) | 0) +#define MT7623_PIN_201_UTXD2_FUNC_UTXD2 (MTK_PIN_NO(201) | 6) + +#define MT7623_PIN_203_PWM0_FUNC_GPIO203 (MTK_PIN_NO(203) | 0) +#define MT7623_PIN_203_PWM0_FUNC_PWM0 (MTK_PIN_NO(203) | 1) +#define MT7623_PIN_203_PWM0_FUNC_DISP_PWM (MTK_PIN_NO(203) | 2) + +#define MT7623_PIN_204_PWM1_FUNC_GPIO204 (MTK_PIN_NO(204) | 0) +#define MT7623_PIN_204_PWM1_FUNC_PWM1 (MTK_PIN_NO(204) | 1) + +#define MT7623_PIN_205_PWM2_FUNC_GPIO205 (MTK_PIN_NO(205) | 0) +#define MT7623_PIN_205_PWM2_FUNC_PWM2 (MTK_PIN_NO(205) | 1) + +#define MT7623_PIN_206_PWM3_FUNC_GPIO206 (MTK_PIN_NO(206) | 0) +#define MT7623_PIN_206_PWM3_FUNC_PWM3 (MTK_PIN_NO(206) | 1) + +#define MT7623_PIN_207_PWM4_FUNC_GPIO207 (MTK_PIN_NO(207) | 0) +#define MT7623_PIN_207_PWM4_FUNC_PWM4 (MTK_PIN_NO(207) | 1) + +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_GPIO208 (MTK_PIN_NO(208) | 0) +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_AUD_EXT_CK1 (MTK_PIN_NO(208) | 1) +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_PWM0 (MTK_PIN_NO(208) | 2) +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_PCIE0_PERST_N (MTK_PIN_NO(208) | 3) +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_DISP_PWM (MTK_PIN_NO(208) | 5) + +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_GPIO209 (MTK_PIN_NO(209) | 0) +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_AUD_EXT_CK2 (MTK_PIN_NO(209) | 1) +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_MSDC1_WP (MTK_PIN_NO(209) | 2) +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_PCIE1_PERST_N (MTK_PIN_NO(209) | 3) +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_PWM1 (MTK_PIN_NO(209) | 5) + +#define MT7623_PIN_236_EXT_SDIO3_FUNC_GPIO236 (MTK_PIN_NO(236) | 0) +#define MT7623_PIN_236_EXT_SDIO3_FUNC_EXT_SDIO3 (MTK_PIN_NO(236) | 1) +#define MT7623_PIN_236_EXT_SDIO3_FUNC_IDDIG (MTK_PIN_NO(236) | 2) + +#define MT7623_PIN_237_EXT_SDIO2_FUNC_GPIO237 (MTK_PIN_NO(237) | 0) +#define MT7623_PIN_237_EXT_SDIO2_FUNC_EXT_SDIO2 (MTK_PIN_NO(237) | 1) +#define MT7623_PIN_237_EXT_SDIO2_FUNC_DRV_VBUS (MTK_PIN_NO(237) | 2) + +#define MT7623_PIN_238_EXT_SDIO1_FUNC_GPIO238 (MTK_PIN_NO(238) | 0) +#define MT7623_PIN_238_EXT_SDIO1_FUNC_EXT_SDIO1 (MTK_PIN_NO(238) | 1) + +#define MT7623_PIN_239_EXT_SDIO0_FUNC_GPIO239 (MTK_PIN_NO(239) | 0) +#define MT7623_PIN_239_EXT_SDIO0_FUNC_EXT_SDIO0 (MTK_PIN_NO(239) | 1) + +#define MT7623_PIN_240_EXT_XCS_FUNC_GPIO240 (MTK_PIN_NO(240) | 0) +#define MT7623_PIN_240_EXT_XCS_FUNC_EXT_XCS (MTK_PIN_NO(240) | 1) + +#define MT7623_PIN_241_EXT_SCK_FUNC_GPIO241 (MTK_PIN_NO(241) | 0) +#define MT7623_PIN_241_EXT_SCK_FUNC_EXT_SCK (MTK_PIN_NO(241) | 1) + +#define MT7623_PIN_242_URTS2_FUNC_GPIO242 (MTK_PIN_NO(242) | 0) +#define MT7623_PIN_242_URTS2_FUNC_URTS2 (MTK_PIN_NO(242) | 1) +#define MT7623_PIN_242_URTS2_FUNC_UTXD3 (MTK_PIN_NO(242) | 2) +#define MT7623_PIN_242_URTS2_FUNC_URXD3 (MTK_PIN_NO(242) | 3) +#define MT7623_PIN_242_URTS2_FUNC_SCL1 (MTK_PIN_NO(242) | 4) + +#define MT7623_PIN_243_UCTS2_FUNC_GPIO243 (MTK_PIN_NO(243) | 0) +#define MT7623_PIN_243_UCTS2_FUNC_UCTS2 (MTK_PIN_NO(243) | 1) +#define MT7623_PIN_243_UCTS2_FUNC_URXD3 (MTK_PIN_NO(243) | 2) +#define MT7623_PIN_243_UCTS2_FUNC_UTXD3 (MTK_PIN_NO(243) | 3) +#define MT7623_PIN_243_UCTS2_FUNC_SDA1 (MTK_PIN_NO(243) | 4) + +#define MT7623_PIN_250_GPIO250_FUNC_GPIO250 (MTK_PIN_NO(250) | 0) +#define MT7623_PIN_250_GPIO250_FUNC_TEST_MD7 (MTK_PIN_NO(250) | 1) +#define MT7623_PIN_250_GPIO250_FUNC_PCIE0_CLKREQ_N (MTK_PIN_NO(250) | 6) + +#define MT7623_PIN_251_GPIO251_FUNC_GPIO251 (MTK_PIN_NO(251) | 0) +#define MT7623_PIN_251_GPIO251_FUNC_TEST_MD6 (MTK_PIN_NO(251) | 1) +#define MT7623_PIN_251_GPIO251_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(251) | 6) + +#define MT7623_PIN_252_GPIO252_FUNC_GPIO252 (MTK_PIN_NO(252) | 0) +#define MT7623_PIN_252_GPIO252_FUNC_TEST_MD5 (MTK_PIN_NO(252) | 1) +#define MT7623_PIN_252_GPIO252_FUNC_PCIE1_CLKREQ_N (MTK_PIN_NO(252) | 6) + +#define MT7623_PIN_253_GPIO253_FUNC_GPIO253 (MTK_PIN_NO(253) | 0) +#define MT7623_PIN_253_GPIO253_FUNC_TEST_MD4 (MTK_PIN_NO(253) | 1) +#define MT7623_PIN_253_GPIO253_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(253) | 6) + +#define MT7623_PIN_254_GPIO254_FUNC_GPIO254 (MTK_PIN_NO(254) | 0) +#define MT7623_PIN_254_GPIO254_FUNC_TEST_MD3 (MTK_PIN_NO(254) | 1) +#define MT7623_PIN_254_GPIO254_FUNC_PCIE2_CLKREQ_N (MTK_PIN_NO(254) | 6) + +#define MT7623_PIN_255_GPIO255_FUNC_GPIO255 (MTK_PIN_NO(255) | 0) +#define MT7623_PIN_255_GPIO255_FUNC_TEST_MD2 (MTK_PIN_NO(255) | 1) +#define MT7623_PIN_255_GPIO255_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(255) | 6) + +#define MT7623_PIN_256_GPIO256_FUNC_GPIO256 (MTK_PIN_NO(256) | 0) +#define MT7623_PIN_256_GPIO256_FUNC_TEST_MD1 (MTK_PIN_NO(256) | 1) + +#define MT7623_PIN_257_GPIO257_FUNC_GPIO257 (MTK_PIN_NO(257) | 0) +#define MT7623_PIN_257_GPIO257_FUNC_TEST_MD0 (MTK_PIN_NO(257) | 1) + +#define MT7623_PIN_261_MSDC1_INS_FUNC_GPIO261 (MTK_PIN_NO(261) | 0) +#define MT7623_PIN_261_MSDC1_INS_FUNC_MSDC1_INS (MTK_PIN_NO(261) | 1) + +#define MT7623_PIN_262_G2_TXEN_FUNC_GPIO262 (MTK_PIN_NO(262) | 0) +#define MT7623_PIN_262_G2_TXEN_FUNC_G2_TXEN (MTK_PIN_NO(262) | 1) + +#define MT7623_PIN_263_G2_TXD3_FUNC_GPIO263 (MTK_PIN_NO(263) | 0) +#define MT7623_PIN_263_G2_TXD3_FUNC_G2_TXD3 (MTK_PIN_NO(263) | 1) + +#define MT7623_PIN_264_G2_TXD2_FUNC_GPIO264 (MTK_PIN_NO(264) | 0) +#define MT7623_PIN_264_G2_TXD2_FUNC_G2_TXD2 (MTK_PIN_NO(264) | 1) + +#define MT7623_PIN_265_G2_TXD1_FUNC_GPIO265 (MTK_PIN_NO(265) | 0) +#define MT7623_PIN_265_G2_TXD1_FUNC_G2_TXD1 (MTK_PIN_NO(265) | 1) + +#define MT7623_PIN_266_G2_TXD0_FUNC_GPIO266 (MTK_PIN_NO(266) | 0) +#define MT7623_PIN_266_G2_TXD0_FUNC_G2_TXD0 (MTK_PIN_NO(266) | 1) + +#define MT7623_PIN_267_G2_TXCLK_FUNC_GPIO267 (MTK_PIN_NO(267) | 0) +#define MT7623_PIN_267_G2_TXCLK_FUNC_G2_TXC (MTK_PIN_NO(267) | 1) + +#define MT7623_PIN_268_G2_RXCLK_FUNC_GPIO268 (MTK_PIN_NO(268) | 0) +#define MT7623_PIN_268_G2_RXCLK_FUNC_G2_RXC (MTK_PIN_NO(268) | 1) + +#define MT7623_PIN_269_G2_RXD0_FUNC_GPIO269 (MTK_PIN_NO(269) | 0) +#define MT7623_PIN_269_G2_RXD0_FUNC_G2_RXD0 (MTK_PIN_NO(269) | 1) + +#define MT7623_PIN_270_G2_RXD1_FUNC_GPIO270 (MTK_PIN_NO(270) | 0) +#define MT7623_PIN_270_G2_RXD1_FUNC_G2_RXD1 (MTK_PIN_NO(270) | 1) + +#define MT7623_PIN_271_G2_RXD2_FUNC_GPIO271 (MTK_PIN_NO(271) | 0) +#define MT7623_PIN_271_G2_RXD2_FUNC_G2_RXD2 (MTK_PIN_NO(271) | 1) + +#define MT7623_PIN_272_G2_RXD3_FUNC_GPIO272 (MTK_PIN_NO(272) | 0) +#define MT7623_PIN_272_G2_RXD3_FUNC_G2_RXD3 (MTK_PIN_NO(272) | 1) + +#define MT7623_PIN_274_G2_RXDV_FUNC_GPIO274 (MTK_PIN_NO(274) | 0) +#define MT7623_PIN_274_G2_RXDV_FUNC_G2_RXDV (MTK_PIN_NO(274) | 1) + +#define MT7623_PIN_275_G2_MDC_FUNC_GPIO275 (MTK_PIN_NO(275) | 0) +#define MT7623_PIN_275_G2_MDC_FUNC_MDC (MTK_PIN_NO(275) | 1) + +#define MT7623_PIN_276_G2_MDIO_FUNC_GPIO276 (MTK_PIN_NO(276) | 0) +#define MT7623_PIN_276_G2_MDIO_FUNC_MDIO (MTK_PIN_NO(276) | 1) + +#define MT7623_PIN_278_JTAG_RESET_FUNC_GPIO278 (MTK_PIN_NO(278) | 0) +#define MT7623_PIN_278_JTAG_RESET_FUNC_JTAG_RESET (MTK_PIN_NO(278) | 1) + +#endif /* __DTS_MT7623_PINFUNC_H */ diff --git a/include/dt-bindings/pinctrl/nomadik.h b/include/dt-bindings/pinctrl/nomadik.h new file mode 100644 index 0000000..fa24565 --- /dev/null +++ b/include/dt-bindings/pinctrl/nomadik.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * nomadik.h + * + * Copyright (C) ST-Ericsson SA 2013 + * Author: Gabriel Fernandez for ST-Ericsson. + */ + +#define INPUT_NOPULL 0 +#define INPUT_PULLUP 1 +#define INPUT_PULLDOWN 2 + +#define OUTPUT_LOW 0 +#define OUTPUT_HIGH 1 +#define DIR_OUTPUT 2 + +#define SLPM_DISABLED 0 +#define SLPM_ENABLED 1 + +#define SLPM_INPUT_NOPULL 0 +#define SLPM_INPUT_PULLUP 1 +#define SLPM_INPUT_PULLDOWN 2 +#define SLPM_DIR_INPUT 3 + +#define SLPM_OUTPUT_LOW 0 +#define SLPM_OUTPUT_HIGH 1 +#define SLPM_DIR_OUTPUT 2 + +#define SLPM_WAKEUP_DISABLE 0 +#define SLPM_WAKEUP_ENABLE 1 + +#define GPIOMODE_DISABLED 0 +#define GPIOMODE_ENABLED 1 + +#define SLPM_PDIS_DISABLED 0 +#define SLPM_PDIS_ENABLED 1 diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h new file mode 100644 index 0000000..6257180 --- /dev/null +++ b/include/dt-bindings/pinctrl/omap.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for OMAP pinctrl bindings. + * + * Copyright (C) 2009 Nokia + * Copyright (C) 2009-2010 Texas Instruments + */ + +#ifndef _DT_BINDINGS_PINCTRL_OMAP_H +#define _DT_BINDINGS_PINCTRL_OMAP_H + +/* 34xx mux mode options for each pin. See TRM for options */ +#define MUX_MODE0 0 +#define MUX_MODE1 1 +#define MUX_MODE2 2 +#define MUX_MODE3 3 +#define MUX_MODE4 4 +#define MUX_MODE5 5 +#define MUX_MODE6 6 +#define MUX_MODE7 7 + +/* 24xx/34xx mux bit defines */ +#define PULL_ENA (1 << 3) +#define PULL_UP (1 << 4) +#define ALTELECTRICALSEL (1 << 5) + +/* omap3/4/5 specific mux bit defines */ +#define INPUT_EN (1 << 8) +#define OFF_EN (1 << 9) +#define OFFOUT_EN (1 << 10) +#define OFFOUT_VAL (1 << 11) +#define OFF_PULL_EN (1 << 12) +#define OFF_PULL_UP (1 << 13) +#define WAKEUP_EN (1 << 14) +#define WAKEUP_EVENT (1 << 15) + +/* Active pin states */ +#define PIN_OUTPUT 0 +#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP) +#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA) +#define PIN_INPUT INPUT_EN +#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN) + +/* Off mode states */ +#define PIN_OFF_NONE 0 +#define PIN_OFF_OUTPUT_HIGH (OFF_EN | OFFOUT_EN | OFFOUT_VAL) +#define PIN_OFF_OUTPUT_LOW (OFF_EN | OFFOUT_EN) +#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFFOUT_EN | OFF_PULL_EN | OFF_PULL_UP) +#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFFOUT_EN | OFF_PULL_EN) +#define PIN_OFF_WAKEUPENABLE WAKEUP_EN + +/* + * Macros to allow using the absolute physical address instead of the + * padconf registers instead of the offset from padconf base. + */ +#define OMAP_IOPAD_OFFSET(pa, offset) (((pa) & 0xffff) - (offset)) + +#define OMAP2420_CORE_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0030) (val) +#define OMAP2430_CORE_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x2030) (val) +#define OMAP3_CORE1_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x2030) (val) +#define OMAP3430_CORE2_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x25d8) (val) +#define OMAP3630_CORE2_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x25a0) (val) +#define OMAP3_WKUP_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x2a00) (val) +#define DM814X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) +#define DM816X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) +#define AM33XX_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) +#define AM33XX_PADCONF(pa, dir, mux) OMAP_IOPAD_OFFSET((pa), 0x0800) ((dir) | (mux)) + +/* + * Macros to allow using the offset from the padconf physical address + * instead of the offset from padconf base. + */ +#define OMAP_PADCONF_OFFSET(offset, base_offset) ((offset) - (base_offset)) + +#define OMAP4_IOPAD(offset, val) OMAP_PADCONF_OFFSET((offset), 0x0040) (val) +#define OMAP5_IOPAD(offset, val) OMAP_PADCONF_OFFSET((offset), 0x0040) (val) + +/* + * Define some commonly used pins configured by the boards. + * Note that some boards use alternative pins, so check + * the schematics before using these. + */ +#define OMAP3_UART1_RX 0x152 +#define OMAP3_UART2_RX 0x14a +#define OMAP3_UART3_RX 0x16e +#define OMAP4_UART2_RX 0xdc +#define OMAP4_UART3_RX 0x104 +#define OMAP4_UART4_RX 0x11c + +#endif + diff --git a/include/dt-bindings/pinctrl/pads-imx8qm.h b/include/dt-bindings/pinctrl/pads-imx8qm.h new file mode 100644 index 0000000..ae7b294 --- /dev/null +++ b/include/dt-bindings/pinctrl/pads-imx8qm.h @@ -0,0 +1,960 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017~2018 NXP + */ + +#ifndef _IMX8QM_PADS_H +#define _IMX8QM_PADS_H + +/* pin id */ +#define IMX8QM_SIM0_CLK 0 +#define IMX8QM_SIM0_RST 1 +#define IMX8QM_SIM0_IO 2 +#define IMX8QM_SIM0_PD 3 +#define IMX8QM_SIM0_POWER_EN 4 +#define IMX8QM_SIM0_GPIO0_00 5 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_SIM 6 +#define IMX8QM_M40_I2C0_SCL 7 +#define IMX8QM_M40_I2C0_SDA 8 +#define IMX8QM_M40_GPIO0_00 9 +#define IMX8QM_M40_GPIO0_01 10 +#define IMX8QM_M41_I2C0_SCL 11 +#define IMX8QM_M41_I2C0_SDA 12 +#define IMX8QM_M41_GPIO0_00 13 +#define IMX8QM_M41_GPIO0_01 14 +#define IMX8QM_GPT0_CLK 15 +#define IMX8QM_GPT0_CAPTURE 16 +#define IMX8QM_GPT0_COMPARE 17 +#define IMX8QM_GPT1_CLK 18 +#define IMX8QM_GPT1_CAPTURE 19 +#define IMX8QM_GPT1_COMPARE 20 +#define IMX8QM_UART0_RX 21 +#define IMX8QM_UART0_TX 22 +#define IMX8QM_UART0_RTS_B 23 +#define IMX8QM_UART0_CTS_B 24 +#define IMX8QM_UART1_TX 25 +#define IMX8QM_UART1_RX 26 +#define IMX8QM_UART1_RTS_B 27 +#define IMX8QM_UART1_CTS_B 28 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOLH 29 +#define IMX8QM_SCU_PMIC_MEMC_ON 30 +#define IMX8QM_SCU_WDOG_OUT 31 +#define IMX8QM_PMIC_I2C_SDA 32 +#define IMX8QM_PMIC_I2C_SCL 33 +#define IMX8QM_PMIC_EARLY_WARNING 34 +#define IMX8QM_PMIC_INT_B 35 +#define IMX8QM_SCU_GPIO0_00 36 +#define IMX8QM_SCU_GPIO0_01 37 +#define IMX8QM_SCU_GPIO0_02 38 +#define IMX8QM_SCU_GPIO0_03 39 +#define IMX8QM_SCU_GPIO0_04 40 +#define IMX8QM_SCU_GPIO0_05 41 +#define IMX8QM_SCU_GPIO0_06 42 +#define IMX8QM_SCU_GPIO0_07 43 +#define IMX8QM_SCU_BOOT_MODE0 44 +#define IMX8QM_SCU_BOOT_MODE1 45 +#define IMX8QM_SCU_BOOT_MODE2 46 +#define IMX8QM_SCU_BOOT_MODE3 47 +#define IMX8QM_SCU_BOOT_MODE4 48 +#define IMX8QM_SCU_BOOT_MODE5 49 +#define IMX8QM_LVDS0_GPIO00 50 +#define IMX8QM_LVDS0_GPIO01 51 +#define IMX8QM_LVDS0_I2C0_SCL 52 +#define IMX8QM_LVDS0_I2C0_SDA 53 +#define IMX8QM_LVDS0_I2C1_SCL 54 +#define IMX8QM_LVDS0_I2C1_SDA 55 +#define IMX8QM_LVDS1_GPIO00 56 +#define IMX8QM_LVDS1_GPIO01 57 +#define IMX8QM_LVDS1_I2C0_SCL 58 +#define IMX8QM_LVDS1_I2C0_SDA 59 +#define IMX8QM_LVDS1_I2C1_SCL 60 +#define IMX8QM_LVDS1_I2C1_SDA 61 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_LVDSGPIO 62 +#define IMX8QM_MIPI_DSI0_I2C0_SCL 63 +#define IMX8QM_MIPI_DSI0_I2C0_SDA 64 +#define IMX8QM_MIPI_DSI0_GPIO0_00 65 +#define IMX8QM_MIPI_DSI0_GPIO0_01 66 +#define IMX8QM_MIPI_DSI1_I2C0_SCL 67 +#define IMX8QM_MIPI_DSI1_I2C0_SDA 68 +#define IMX8QM_MIPI_DSI1_GPIO0_00 69 +#define IMX8QM_MIPI_DSI1_GPIO0_01 70 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_MIPIDSIGPIO 71 +#define IMX8QM_MIPI_CSI0_MCLK_OUT 72 +#define IMX8QM_MIPI_CSI0_I2C0_SCL 73 +#define IMX8QM_MIPI_CSI0_I2C0_SDA 74 +#define IMX8QM_MIPI_CSI0_GPIO0_00 75 +#define IMX8QM_MIPI_CSI0_GPIO0_01 76 +#define IMX8QM_MIPI_CSI1_MCLK_OUT 77 +#define IMX8QM_MIPI_CSI1_GPIO0_00 78 +#define IMX8QM_MIPI_CSI1_GPIO0_01 79 +#define IMX8QM_MIPI_CSI1_I2C0_SCL 80 +#define IMX8QM_MIPI_CSI1_I2C0_SDA 81 +#define IMX8QM_HDMI_TX0_TS_SCL 82 +#define IMX8QM_HDMI_TX0_TS_SDA 83 +#define IMX8QM_COMP_CTL_GPIO_3V3_HDMIGPIO 84 +#define IMX8QM_ESAI1_FSR 85 +#define IMX8QM_ESAI1_FST 86 +#define IMX8QM_ESAI1_SCKR 87 +#define IMX8QM_ESAI1_SCKT 88 +#define IMX8QM_ESAI1_TX0 89 +#define IMX8QM_ESAI1_TX1 90 +#define IMX8QM_ESAI1_TX2_RX3 91 +#define IMX8QM_ESAI1_TX3_RX2 92 +#define IMX8QM_ESAI1_TX4_RX1 93 +#define IMX8QM_ESAI1_TX5_RX0 94 +#define IMX8QM_SPDIF0_RX 95 +#define IMX8QM_SPDIF0_TX 96 +#define IMX8QM_SPDIF0_EXT_CLK 97 +#define IMX8QM_SPI3_SCK 98 +#define IMX8QM_SPI3_SDO 99 +#define IMX8QM_SPI3_SDI 100 +#define IMX8QM_SPI3_CS0 101 +#define IMX8QM_SPI3_CS1 102 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHB 103 +#define IMX8QM_ESAI0_FSR 104 +#define IMX8QM_ESAI0_FST 105 +#define IMX8QM_ESAI0_SCKR 106 +#define IMX8QM_ESAI0_SCKT 107 +#define IMX8QM_ESAI0_TX0 108 +#define IMX8QM_ESAI0_TX1 109 +#define IMX8QM_ESAI0_TX2_RX3 110 +#define IMX8QM_ESAI0_TX3_RX2 111 +#define IMX8QM_ESAI0_TX4_RX1 112 +#define IMX8QM_ESAI0_TX5_RX0 113 +#define IMX8QM_MCLK_IN0 114 +#define IMX8QM_MCLK_OUT0 115 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHC 116 +#define IMX8QM_SPI0_SCK 117 +#define IMX8QM_SPI0_SDO 118 +#define IMX8QM_SPI0_SDI 119 +#define IMX8QM_SPI0_CS0 120 +#define IMX8QM_SPI0_CS1 121 +#define IMX8QM_SPI2_SCK 122 +#define IMX8QM_SPI2_SDO 123 +#define IMX8QM_SPI2_SDI 124 +#define IMX8QM_SPI2_CS0 125 +#define IMX8QM_SPI2_CS1 126 +#define IMX8QM_SAI1_RXC 127 +#define IMX8QM_SAI1_RXD 128 +#define IMX8QM_SAI1_RXFS 129 +#define IMX8QM_SAI1_TXC 130 +#define IMX8QM_SAI1_TXD 131 +#define IMX8QM_SAI1_TXFS 132 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHT 133 +#define IMX8QM_ADC_IN7 134 +#define IMX8QM_ADC_IN6 135 +#define IMX8QM_ADC_IN5 136 +#define IMX8QM_ADC_IN4 137 +#define IMX8QM_ADC_IN3 138 +#define IMX8QM_ADC_IN2 139 +#define IMX8QM_ADC_IN1 140 +#define IMX8QM_ADC_IN0 141 +#define IMX8QM_MLB_SIG 142 +#define IMX8QM_MLB_CLK 143 +#define IMX8QM_MLB_DATA 144 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOLHT 145 +#define IMX8QM_FLEXCAN0_RX 146 +#define IMX8QM_FLEXCAN0_TX 147 +#define IMX8QM_FLEXCAN1_RX 148 +#define IMX8QM_FLEXCAN1_TX 149 +#define IMX8QM_FLEXCAN2_RX 150 +#define IMX8QM_FLEXCAN2_TX 151 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOTHR 152 +#define IMX8QM_USB_SS3_TC0 153 +#define IMX8QM_USB_SS3_TC1 154 +#define IMX8QM_USB_SS3_TC2 155 +#define IMX8QM_USB_SS3_TC3 156 +#define IMX8QM_COMP_CTL_GPIO_3V3_USB3IO 157 +#define IMX8QM_USDHC1_RESET_B 158 +#define IMX8QM_USDHC1_VSELECT 159 +#define IMX8QM_USDHC2_RESET_B 160 +#define IMX8QM_USDHC2_VSELECT 161 +#define IMX8QM_USDHC2_WP 162 +#define IMX8QM_USDHC2_CD_B 163 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSELSEP 164 +#define IMX8QM_ENET0_MDIO 165 +#define IMX8QM_ENET0_MDC 166 +#define IMX8QM_ENET0_REFCLK_125M_25M 167 +#define IMX8QM_ENET1_REFCLK_125M_25M 168 +#define IMX8QM_ENET1_MDIO 169 +#define IMX8QM_ENET1_MDC 170 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOCT 171 +#define IMX8QM_QSPI1A_SS0_B 172 +#define IMX8QM_QSPI1A_SS1_B 173 +#define IMX8QM_QSPI1A_SCLK 174 +#define IMX8QM_QSPI1A_DQS 175 +#define IMX8QM_QSPI1A_DATA3 176 +#define IMX8QM_QSPI1A_DATA2 177 +#define IMX8QM_QSPI1A_DATA1 178 +#define IMX8QM_QSPI1A_DATA0 179 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_QSPI1 180 +#define IMX8QM_QSPI0A_DATA0 181 +#define IMX8QM_QSPI0A_DATA1 182 +#define IMX8QM_QSPI0A_DATA2 183 +#define IMX8QM_QSPI0A_DATA3 184 +#define IMX8QM_QSPI0A_DQS 185 +#define IMX8QM_QSPI0A_SS0_B 186 +#define IMX8QM_QSPI0A_SS1_B 187 +#define IMX8QM_QSPI0A_SCLK 188 +#define IMX8QM_QSPI0B_SCLK 189 +#define IMX8QM_QSPI0B_DATA0 190 +#define IMX8QM_QSPI0B_DATA1 191 +#define IMX8QM_QSPI0B_DATA2 192 +#define IMX8QM_QSPI0B_DATA3 193 +#define IMX8QM_QSPI0B_DQS 194 +#define IMX8QM_QSPI0B_SS0_B 195 +#define IMX8QM_QSPI0B_SS1_B 196 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_QSPI0 197 +#define IMX8QM_PCIE_CTRL0_CLKREQ_B 198 +#define IMX8QM_PCIE_CTRL0_WAKE_B 199 +#define IMX8QM_PCIE_CTRL0_PERST_B 200 +#define IMX8QM_PCIE_CTRL1_CLKREQ_B 201 +#define IMX8QM_PCIE_CTRL1_WAKE_B 202 +#define IMX8QM_PCIE_CTRL1_PERST_B 203 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_PCIESEP 204 +#define IMX8QM_USB_HSIC0_DATA 205 +#define IMX8QM_USB_HSIC0_STROBE 206 +#define IMX8QM_CALIBRATION_0_HSIC 207 +#define IMX8QM_CALIBRATION_1_HSIC 208 +#define IMX8QM_EMMC0_CLK 209 +#define IMX8QM_EMMC0_CMD 210 +#define IMX8QM_EMMC0_DATA0 211 +#define IMX8QM_EMMC0_DATA1 212 +#define IMX8QM_EMMC0_DATA2 213 +#define IMX8QM_EMMC0_DATA3 214 +#define IMX8QM_EMMC0_DATA4 215 +#define IMX8QM_EMMC0_DATA5 216 +#define IMX8QM_EMMC0_DATA6 217 +#define IMX8QM_EMMC0_DATA7 218 +#define IMX8QM_EMMC0_STROBE 219 +#define IMX8QM_EMMC0_RESET_B 220 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_SD1FIX 221 +#define IMX8QM_USDHC1_CLK 222 +#define IMX8QM_USDHC1_CMD 223 +#define IMX8QM_USDHC1_DATA0 224 +#define IMX8QM_USDHC1_DATA1 225 +#define IMX8QM_CTL_NAND_RE_P_N 226 +#define IMX8QM_USDHC1_DATA2 227 +#define IMX8QM_USDHC1_DATA3 228 +#define IMX8QM_CTL_NAND_DQS_P_N 229 +#define IMX8QM_USDHC1_DATA4 230 +#define IMX8QM_USDHC1_DATA5 231 +#define IMX8QM_USDHC1_DATA6 232 +#define IMX8QM_USDHC1_DATA7 233 +#define IMX8QM_USDHC1_STROBE 234 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSEL2 235 +#define IMX8QM_USDHC2_CLK 236 +#define IMX8QM_USDHC2_CMD 237 +#define IMX8QM_USDHC2_DATA0 238 +#define IMX8QM_USDHC2_DATA1 239 +#define IMX8QM_USDHC2_DATA2 240 +#define IMX8QM_USDHC2_DATA3 241 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSEL3 242 +#define IMX8QM_ENET0_RGMII_TXC 243 +#define IMX8QM_ENET0_RGMII_TX_CTL 244 +#define IMX8QM_ENET0_RGMII_TXD0 245 +#define IMX8QM_ENET0_RGMII_TXD1 246 +#define IMX8QM_ENET0_RGMII_TXD2 247 +#define IMX8QM_ENET0_RGMII_TXD3 248 +#define IMX8QM_ENET0_RGMII_RXC 249 +#define IMX8QM_ENET0_RGMII_RX_CTL 250 +#define IMX8QM_ENET0_RGMII_RXD0 251 +#define IMX8QM_ENET0_RGMII_RXD1 252 +#define IMX8QM_ENET0_RGMII_RXD2 253 +#define IMX8QM_ENET0_RGMII_RXD3 254 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB 255 +#define IMX8QM_ENET1_RGMII_TXC 256 +#define IMX8QM_ENET1_RGMII_TX_CTL 257 +#define IMX8QM_ENET1_RGMII_TXD0 258 +#define IMX8QM_ENET1_RGMII_TXD1 259 +#define IMX8QM_ENET1_RGMII_TXD2 260 +#define IMX8QM_ENET1_RGMII_TXD3 261 +#define IMX8QM_ENET1_RGMII_RXC 262 +#define IMX8QM_ENET1_RGMII_RX_CTL 263 +#define IMX8QM_ENET1_RGMII_RXD0 264 +#define IMX8QM_ENET1_RGMII_RXD1 265 +#define IMX8QM_ENET1_RGMII_RXD2 266 +#define IMX8QM_ENET1_RGMII_RXD3 267 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETA 268 + +/* + * format: + */ +#define IMX8QM_SIM0_CLK_DMA_SIM0_CLK IMX8QM_SIM0_CLK 0 +#define IMX8QM_SIM0_CLK_LSIO_GPIO0_IO00 IMX8QM_SIM0_CLK 3 +#define IMX8QM_SIM0_RST_DMA_SIM0_RST IMX8QM_SIM0_RST 0 +#define IMX8QM_SIM0_RST_LSIO_GPIO0_IO01 IMX8QM_SIM0_RST 3 +#define IMX8QM_SIM0_IO_DMA_SIM0_IO IMX8QM_SIM0_IO 0 +#define IMX8QM_SIM0_IO_LSIO_GPIO0_IO02 IMX8QM_SIM0_IO 3 +#define IMX8QM_SIM0_PD_DMA_SIM0_PD IMX8QM_SIM0_PD 0 +#define IMX8QM_SIM0_PD_DMA_I2C3_SCL IMX8QM_SIM0_PD 1 +#define IMX8QM_SIM0_PD_LSIO_GPIO0_IO03 IMX8QM_SIM0_PD 3 +#define IMX8QM_SIM0_POWER_EN_DMA_SIM0_POWER_EN IMX8QM_SIM0_POWER_EN 0 +#define IMX8QM_SIM0_POWER_EN_DMA_I2C3_SDA IMX8QM_SIM0_POWER_EN 1 +#define IMX8QM_SIM0_POWER_EN_LSIO_GPIO0_IO04 IMX8QM_SIM0_POWER_EN 3 +#define IMX8QM_SIM0_GPIO0_00_DMA_SIM0_POWER_EN IMX8QM_SIM0_GPIO0_00 0 +#define IMX8QM_SIM0_GPIO0_00_LSIO_GPIO0_IO05 IMX8QM_SIM0_GPIO0_00 3 +#define IMX8QM_M40_I2C0_SCL_M40_I2C0_SCL IMX8QM_M40_I2C0_SCL 0 +#define IMX8QM_M40_I2C0_SCL_M40_UART0_RX IMX8QM_M40_I2C0_SCL 1 +#define IMX8QM_M40_I2C0_SCL_M40_GPIO0_IO02 IMX8QM_M40_I2C0_SCL 2 +#define IMX8QM_M40_I2C0_SCL_LSIO_GPIO0_IO06 IMX8QM_M40_I2C0_SCL 3 +#define IMX8QM_M40_I2C0_SDA_M40_I2C0_SDA IMX8QM_M40_I2C0_SDA 0 +#define IMX8QM_M40_I2C0_SDA_M40_UART0_TX IMX8QM_M40_I2C0_SDA 1 +#define IMX8QM_M40_I2C0_SDA_M40_GPIO0_IO03 IMX8QM_M40_I2C0_SDA 2 +#define IMX8QM_M40_I2C0_SDA_LSIO_GPIO0_IO07 IMX8QM_M40_I2C0_SDA 3 +#define IMX8QM_M40_GPIO0_00_M40_GPIO0_IO00 IMX8QM_M40_GPIO0_00 0 +#define IMX8QM_M40_GPIO0_00_M40_TPM0_CH0 IMX8QM_M40_GPIO0_00 1 +#define IMX8QM_M40_GPIO0_00_DMA_UART4_RX IMX8QM_M40_GPIO0_00 2 +#define IMX8QM_M40_GPIO0_00_LSIO_GPIO0_IO08 IMX8QM_M40_GPIO0_00 3 +#define IMX8QM_M40_GPIO0_01_M40_GPIO0_IO01 IMX8QM_M40_GPIO0_01 0 +#define IMX8QM_M40_GPIO0_01_M40_TPM0_CH1 IMX8QM_M40_GPIO0_01 1 +#define IMX8QM_M40_GPIO0_01_DMA_UART4_TX IMX8QM_M40_GPIO0_01 2 +#define IMX8QM_M40_GPIO0_01_LSIO_GPIO0_IO09 IMX8QM_M40_GPIO0_01 3 +#define IMX8QM_M41_I2C0_SCL_M41_I2C0_SCL IMX8QM_M41_I2C0_SCL 0 +#define IMX8QM_M41_I2C0_SCL_M41_UART0_RX IMX8QM_M41_I2C0_SCL 1 +#define IMX8QM_M41_I2C0_SCL_M41_GPIO0_IO02 IMX8QM_M41_I2C0_SCL 2 +#define IMX8QM_M41_I2C0_SCL_LSIO_GPIO0_IO10 IMX8QM_M41_I2C0_SCL 3 +#define IMX8QM_M41_I2C0_SDA_M41_I2C0_SDA IMX8QM_M41_I2C0_SDA 0 +#define IMX8QM_M41_I2C0_SDA_M41_UART0_TX IMX8QM_M41_I2C0_SDA 1 +#define IMX8QM_M41_I2C0_SDA_M41_GPIO0_IO03 IMX8QM_M41_I2C0_SDA 2 +#define IMX8QM_M41_I2C0_SDA_LSIO_GPIO0_IO11 IMX8QM_M41_I2C0_SDA 3 +#define IMX8QM_M41_GPIO0_00_M41_GPIO0_IO00 IMX8QM_M41_GPIO0_00 0 +#define IMX8QM_M41_GPIO0_00_M41_TPM0_CH0 IMX8QM_M41_GPIO0_00 1 +#define IMX8QM_M41_GPIO0_00_DMA_UART3_RX IMX8QM_M41_GPIO0_00 2 +#define IMX8QM_M41_GPIO0_00_LSIO_GPIO0_IO12 IMX8QM_M41_GPIO0_00 3 +#define IMX8QM_M41_GPIO0_01_M41_GPIO0_IO01 IMX8QM_M41_GPIO0_01 0 +#define IMX8QM_M41_GPIO0_01_M41_TPM0_CH1 IMX8QM_M41_GPIO0_01 1 +#define IMX8QM_M41_GPIO0_01_DMA_UART3_TX IMX8QM_M41_GPIO0_01 2 +#define IMX8QM_M41_GPIO0_01_LSIO_GPIO0_IO13 IMX8QM_M41_GPIO0_01 3 +#define IMX8QM_GPT0_CLK_LSIO_GPT0_CLK IMX8QM_GPT0_CLK 0 +#define IMX8QM_GPT0_CLK_DMA_I2C1_SCL IMX8QM_GPT0_CLK 1 +#define IMX8QM_GPT0_CLK_LSIO_KPP0_COL4 IMX8QM_GPT0_CLK 2 +#define IMX8QM_GPT0_CLK_LSIO_GPIO0_IO14 IMX8QM_GPT0_CLK 3 +#define IMX8QM_GPT0_CAPTURE_LSIO_GPT0_CAPTURE IMX8QM_GPT0_CAPTURE 0 +#define IMX8QM_GPT0_CAPTURE_DMA_I2C1_SDA IMX8QM_GPT0_CAPTURE 1 +#define IMX8QM_GPT0_CAPTURE_LSIO_KPP0_COL5 IMX8QM_GPT0_CAPTURE 2 +#define IMX8QM_GPT0_CAPTURE_LSIO_GPIO0_IO15 IMX8QM_GPT0_CAPTURE 3 +#define IMX8QM_GPT0_COMPARE_LSIO_GPT0_COMPARE IMX8QM_GPT0_COMPARE 0 +#define IMX8QM_GPT0_COMPARE_LSIO_PWM3_OUT IMX8QM_GPT0_COMPARE 1 +#define IMX8QM_GPT0_COMPARE_LSIO_KPP0_COL6 IMX8QM_GPT0_COMPARE 2 +#define IMX8QM_GPT0_COMPARE_LSIO_GPIO0_IO16 IMX8QM_GPT0_COMPARE 3 +#define IMX8QM_GPT1_CLK_LSIO_GPT1_CLK IMX8QM_GPT1_CLK 0 +#define IMX8QM_GPT1_CLK_DMA_I2C2_SCL IMX8QM_GPT1_CLK 1 +#define IMX8QM_GPT1_CLK_LSIO_KPP0_COL7 IMX8QM_GPT1_CLK 2 +#define IMX8QM_GPT1_CLK_LSIO_GPIO0_IO17 IMX8QM_GPT1_CLK 3 +#define IMX8QM_GPT1_CAPTURE_LSIO_GPT1_CAPTURE IMX8QM_GPT1_CAPTURE 0 +#define IMX8QM_GPT1_CAPTURE_DMA_I2C2_SDA IMX8QM_GPT1_CAPTURE 1 +#define IMX8QM_GPT1_CAPTURE_LSIO_KPP0_ROW4 IMX8QM_GPT1_CAPTURE 2 +#define IMX8QM_GPT1_CAPTURE_LSIO_GPIO0_IO18 IMX8QM_GPT1_CAPTURE 3 +#define IMX8QM_GPT1_COMPARE_LSIO_GPT1_COMPARE IMX8QM_GPT1_COMPARE 0 +#define IMX8QM_GPT1_COMPARE_LSIO_PWM2_OUT IMX8QM_GPT1_COMPARE 1 +#define IMX8QM_GPT1_COMPARE_LSIO_KPP0_ROW5 IMX8QM_GPT1_COMPARE 2 +#define IMX8QM_GPT1_COMPARE_LSIO_GPIO0_IO19 IMX8QM_GPT1_COMPARE 3 +#define IMX8QM_UART0_RX_DMA_UART0_RX IMX8QM_UART0_RX 0 +#define IMX8QM_UART0_RX_SCU_UART0_RX IMX8QM_UART0_RX 1 +#define IMX8QM_UART0_RX_LSIO_GPIO0_IO20 IMX8QM_UART0_RX 3 +#define IMX8QM_UART0_TX_DMA_UART0_TX IMX8QM_UART0_TX 0 +#define IMX8QM_UART0_TX_SCU_UART0_TX IMX8QM_UART0_TX 1 +#define IMX8QM_UART0_TX_LSIO_GPIO0_IO21 IMX8QM_UART0_TX 3 +#define IMX8QM_UART0_RTS_B_DMA_UART0_RTS_B IMX8QM_UART0_RTS_B 0 +#define IMX8QM_UART0_RTS_B_LSIO_PWM0_OUT IMX8QM_UART0_RTS_B 1 +#define IMX8QM_UART0_RTS_B_DMA_UART2_RX IMX8QM_UART0_RTS_B 2 +#define IMX8QM_UART0_RTS_B_LSIO_GPIO0_IO22 IMX8QM_UART0_RTS_B 3 +#define IMX8QM_UART0_CTS_B_DMA_UART0_CTS_B IMX8QM_UART0_CTS_B 0 +#define IMX8QM_UART0_CTS_B_LSIO_PWM1_OUT IMX8QM_UART0_CTS_B 1 +#define IMX8QM_UART0_CTS_B_DMA_UART2_TX IMX8QM_UART0_CTS_B 2 +#define IMX8QM_UART0_CTS_B_LSIO_GPIO0_IO23 IMX8QM_UART0_CTS_B 3 +#define IMX8QM_UART1_TX_DMA_UART1_TX IMX8QM_UART1_TX 0 +#define IMX8QM_UART1_TX_DMA_SPI3_SCK IMX8QM_UART1_TX 1 +#define IMX8QM_UART1_TX_LSIO_GPIO0_IO24 IMX8QM_UART1_TX 3 +#define IMX8QM_UART1_RX_DMA_UART1_RX IMX8QM_UART1_RX 0 +#define IMX8QM_UART1_RX_DMA_SPI3_SDO IMX8QM_UART1_RX 1 +#define IMX8QM_UART1_RX_LSIO_GPIO0_IO25 IMX8QM_UART1_RX 3 +#define IMX8QM_UART1_RTS_B_DMA_UART1_RTS_B IMX8QM_UART1_RTS_B 0 +#define IMX8QM_UART1_RTS_B_DMA_SPI3_SDI IMX8QM_UART1_RTS_B 1 +#define IMX8QM_UART1_RTS_B_DMA_UART1_CTS_B IMX8QM_UART1_RTS_B 2 +#define IMX8QM_UART1_RTS_B_LSIO_GPIO0_IO26 IMX8QM_UART1_RTS_B 3 +#define IMX8QM_UART1_CTS_B_DMA_UART1_CTS_B IMX8QM_UART1_CTS_B 0 +#define IMX8QM_UART1_CTS_B_DMA_SPI3_CS0 IMX8QM_UART1_CTS_B 1 +#define IMX8QM_UART1_CTS_B_DMA_UART1_RTS_B IMX8QM_UART1_CTS_B 2 +#define IMX8QM_UART1_CTS_B_LSIO_GPIO0_IO27 IMX8QM_UART1_CTS_B 3 +#define IMX8QM_SCU_PMIC_MEMC_ON_SCU_GPIO0_IOXX_PMIC_MEMC_ON IMX8QM_SCU_PMIC_MEMC_ON 0 +#define IMX8QM_SCU_WDOG_OUT_SCU_WDOG0_WDOG_OUT IMX8QM_SCU_WDOG_OUT 0 +#define IMX8QM_PMIC_I2C_SDA_SCU_PMIC_I2C_SDA IMX8QM_PMIC_I2C_SDA 0 +#define IMX8QM_PMIC_I2C_SCL_SCU_PMIC_I2C_SCL IMX8QM_PMIC_I2C_SCL 0 +#define IMX8QM_PMIC_EARLY_WARNING_SCU_PMIC_EARLY_WARNING IMX8QM_PMIC_EARLY_WARNING 0 +#define IMX8QM_PMIC_INT_B_SCU_DIMX8QMMIC_INT_B IMX8QM_PMIC_INT_B 0 +#define IMX8QM_SCU_GPIO0_00_SCU_GPIO0_IO00 IMX8QM_SCU_GPIO0_00 0 +#define IMX8QM_SCU_GPIO0_00_SCU_UART0_RX IMX8QM_SCU_GPIO0_00 1 +#define IMX8QM_SCU_GPIO0_00_LSIO_GPIO0_IO28 IMX8QM_SCU_GPIO0_00 3 +#define IMX8QM_SCU_GPIO0_01_SCU_GPIO0_IO01 IMX8QM_SCU_GPIO0_01 0 +#define IMX8QM_SCU_GPIO0_01_SCU_UART0_TX IMX8QM_SCU_GPIO0_01 1 +#define IMX8QM_SCU_GPIO0_01_LSIO_GPIO0_IO29 IMX8QM_SCU_GPIO0_01 3 +#define IMX8QM_SCU_GPIO0_02_SCU_GPIO0_IO02 IMX8QM_SCU_GPIO0_02 0 +#define IMX8QM_SCU_GPIO0_02_SCU_GPIO0_IOXX_PMIC_GPU0_ON IMX8QM_SCU_GPIO0_02 1 +#define IMX8QM_SCU_GPIO0_02_LSIO_GPIO0_IO30 IMX8QM_SCU_GPIO0_02 3 +#define IMX8QM_SCU_GPIO0_03_SCU_GPIO0_IO03 IMX8QM_SCU_GPIO0_03 0 +#define IMX8QM_SCU_GPIO0_03_SCU_GPIO0_IOXX_PMIC_GPU1_ON IMX8QM_SCU_GPIO0_03 1 +#define IMX8QM_SCU_GPIO0_03_LSIO_GPIO0_IO31 IMX8QM_SCU_GPIO0_03 3 +#define IMX8QM_SCU_GPIO0_04_SCU_GPIO0_IO04 IMX8QM_SCU_GPIO0_04 0 +#define IMX8QM_SCU_GPIO0_04_SCU_GPIO0_IOXX_PMIC_A72_ON IMX8QM_SCU_GPIO0_04 1 +#define IMX8QM_SCU_GPIO0_04_LSIO_GPIO1_IO00 IMX8QM_SCU_GPIO0_04 3 +#define IMX8QM_SCU_GPIO0_05_SCU_GPIO0_IO05 IMX8QM_SCU_GPIO0_05 0 +#define IMX8QM_SCU_GPIO0_05_SCU_GPIO0_IOXX_PMIC_A53_ON IMX8QM_SCU_GPIO0_05 1 +#define IMX8QM_SCU_GPIO0_05_LSIO_GPIO1_IO01 IMX8QM_SCU_GPIO0_05 3 +#define IMX8QM_SCU_GPIO0_06_SCU_GPIO0_IO06 IMX8QM_SCU_GPIO0_06 0 +#define IMX8QM_SCU_GPIO0_06_SCU_TPM0_CH0 IMX8QM_SCU_GPIO0_06 1 +#define IMX8QM_SCU_GPIO0_06_LSIO_GPIO1_IO02 IMX8QM_SCU_GPIO0_06 3 +#define IMX8QM_SCU_GPIO0_07_SCU_GPIO0_IO07 IMX8QM_SCU_GPIO0_07 0 +#define IMX8QM_SCU_GPIO0_07_SCU_TPM0_CH1 IMX8QM_SCU_GPIO0_07 1 +#define IMX8QM_SCU_GPIO0_07_SCU_DSC_RTC_CLOCK_OUTPUT_32K IMX8QM_SCU_GPIO0_07 2 +#define IMX8QM_SCU_GPIO0_07_LSIO_GPIO1_IO03 IMX8QM_SCU_GPIO0_07 3 +#define IMX8QM_SCU_BOOT_MODE0_SCU_DSC_BOOT_MODE0 IMX8QM_SCU_BOOT_MODE0 0 +#define IMX8QM_SCU_BOOT_MODE1_SCU_DSC_BOOT_MODE1 IMX8QM_SCU_BOOT_MODE1 0 +#define IMX8QM_SCU_BOOT_MODE2_SCU_DSC_BOOT_MODE2 IMX8QM_SCU_BOOT_MODE2 0 +#define IMX8QM_SCU_BOOT_MODE3_SCU_DSC_BOOT_MODE3 IMX8QM_SCU_BOOT_MODE3 0 +#define IMX8QM_SCU_BOOT_MODE4_SCU_DSC_BOOT_MODE4 IMX8QM_SCU_BOOT_MODE4 0 +#define IMX8QM_SCU_BOOT_MODE4_SCU_PMIC_I2C_SCL IMX8QM_SCU_BOOT_MODE4 1 +#define IMX8QM_SCU_BOOT_MODE5_SCU_DSC_BOOT_MODE5 IMX8QM_SCU_BOOT_MODE5 0 +#define IMX8QM_SCU_BOOT_MODE5_SCU_PMIC_I2C_SDA IMX8QM_SCU_BOOT_MODE5 1 +#define IMX8QM_LVDS0_GPIO00_LVDS0_GPIO0_IO00 IMX8QM_LVDS0_GPIO00 0 +#define IMX8QM_LVDS0_GPIO00_LVDS0_PWM0_OUT IMX8QM_LVDS0_GPIO00 1 +#define IMX8QM_LVDS0_GPIO00_LSIO_GPIO1_IO04 IMX8QM_LVDS0_GPIO00 3 +#define IMX8QM_LVDS0_GPIO01_LVDS0_GPIO0_IO01 IMX8QM_LVDS0_GPIO01 0 +#define IMX8QM_LVDS0_GPIO01_LSIO_GPIO1_IO05 IMX8QM_LVDS0_GPIO01 3 +#define IMX8QM_LVDS0_I2C0_SCL_LVDS0_I2C0_SCL IMX8QM_LVDS0_I2C0_SCL 0 +#define IMX8QM_LVDS0_I2C0_SCL_LVDS0_GPIO0_IO02 IMX8QM_LVDS0_I2C0_SCL 1 +#define IMX8QM_LVDS0_I2C0_SCL_LSIO_GPIO1_IO06 IMX8QM_LVDS0_I2C0_SCL 3 +#define IMX8QM_LVDS0_I2C0_SDA_LVDS0_I2C0_SDA IMX8QM_LVDS0_I2C0_SDA 0 +#define IMX8QM_LVDS0_I2C0_SDA_LVDS0_GPIO0_IO03 IMX8QM_LVDS0_I2C0_SDA 1 +#define IMX8QM_LVDS0_I2C0_SDA_LSIO_GPIO1_IO07 IMX8QM_LVDS0_I2C0_SDA 3 +#define IMX8QM_LVDS0_I2C1_SCL_LVDS0_I2C1_SCL IMX8QM_LVDS0_I2C1_SCL 0 +#define IMX8QM_LVDS0_I2C1_SCL_DMA_UART2_TX IMX8QM_LVDS0_I2C1_SCL 1 +#define IMX8QM_LVDS0_I2C1_SCL_LSIO_GPIO1_IO08 IMX8QM_LVDS0_I2C1_SCL 3 +#define IMX8QM_LVDS0_I2C1_SDA_LVDS0_I2C1_SDA IMX8QM_LVDS0_I2C1_SDA 0 +#define IMX8QM_LVDS0_I2C1_SDA_DMA_UART2_RX IMX8QM_LVDS0_I2C1_SDA 1 +#define IMX8QM_LVDS0_I2C1_SDA_LSIO_GPIO1_IO09 IMX8QM_LVDS0_I2C1_SDA 3 +#define IMX8QM_LVDS1_GPIO00_LVDS1_GPIO0_IO00 IMX8QM_LVDS1_GPIO00 0 +#define IMX8QM_LVDS1_GPIO00_LVDS1_PWM0_OUT IMX8QM_LVDS1_GPIO00 1 +#define IMX8QM_LVDS1_GPIO00_LSIO_GPIO1_IO10 IMX8QM_LVDS1_GPIO00 3 +#define IMX8QM_LVDS1_GPIO01_LVDS1_GPIO0_IO01 IMX8QM_LVDS1_GPIO01 0 +#define IMX8QM_LVDS1_GPIO01_LSIO_GPIO1_IO11 IMX8QM_LVDS1_GPIO01 3 +#define IMX8QM_LVDS1_I2C0_SCL_LVDS1_I2C0_SCL IMX8QM_LVDS1_I2C0_SCL 0 +#define IMX8QM_LVDS1_I2C0_SCL_LVDS1_GPIO0_IO02 IMX8QM_LVDS1_I2C0_SCL 1 +#define IMX8QM_LVDS1_I2C0_SCL_LSIO_GPIO1_IO12 IMX8QM_LVDS1_I2C0_SCL 3 +#define IMX8QM_LVDS1_I2C0_SDA_LVDS1_I2C0_SDA IMX8QM_LVDS1_I2C0_SDA 0 +#define IMX8QM_LVDS1_I2C0_SDA_LVDS1_GPIO0_IO03 IMX8QM_LVDS1_I2C0_SDA 1 +#define IMX8QM_LVDS1_I2C0_SDA_LSIO_GPIO1_IO13 IMX8QM_LVDS1_I2C0_SDA 3 +#define IMX8QM_LVDS1_I2C1_SCL_LVDS1_I2C1_SCL IMX8QM_LVDS1_I2C1_SCL 0 +#define IMX8QM_LVDS1_I2C1_SCL_DMA_UART3_TX IMX8QM_LVDS1_I2C1_SCL 1 +#define IMX8QM_LVDS1_I2C1_SCL_LSIO_GPIO1_IO14 IMX8QM_LVDS1_I2C1_SCL 3 +#define IMX8QM_LVDS1_I2C1_SDA_LVDS1_I2C1_SDA IMX8QM_LVDS1_I2C1_SDA 0 +#define IMX8QM_LVDS1_I2C1_SDA_DMA_UART3_RX IMX8QM_LVDS1_I2C1_SDA 1 +#define IMX8QM_LVDS1_I2C1_SDA_LSIO_GPIO1_IO15 IMX8QM_LVDS1_I2C1_SDA 3 +#define IMX8QM_MIPI_DSI0_I2C0_SCL_MIPI_DSI0_I2C0_SCL IMX8QM_MIPI_DSI0_I2C0_SCL 0 +#define IMX8QM_MIPI_DSI0_I2C0_SCL_LSIO_GPIO1_IO16 IMX8QM_MIPI_DSI0_I2C0_SCL 3 +#define IMX8QM_MIPI_DSI0_I2C0_SDA_MIPI_DSI0_I2C0_SDA IMX8QM_MIPI_DSI0_I2C0_SDA 0 +#define IMX8QM_MIPI_DSI0_I2C0_SDA_LSIO_GPIO1_IO17 IMX8QM_MIPI_DSI0_I2C0_SDA 3 +#define IMX8QM_MIPI_DSI0_GPIO0_00_MIPI_DSI0_GPIO0_IO00 IMX8QM_MIPI_DSI0_GPIO0_00 0 +#define IMX8QM_MIPI_DSI0_GPIO0_00_MIPI_DSI0_PWM0_OUT IMX8QM_MIPI_DSI0_GPIO0_00 1 +#define IMX8QM_MIPI_DSI0_GPIO0_00_LSIO_GPIO1_IO18 IMX8QM_MIPI_DSI0_GPIO0_00 3 +#define IMX8QM_MIPI_DSI0_GPIO0_01_MIPI_DSI0_GPIO0_IO01 IMX8QM_MIPI_DSI0_GPIO0_01 0 +#define IMX8QM_MIPI_DSI0_GPIO0_01_LSIO_GPIO1_IO19 IMX8QM_MIPI_DSI0_GPIO0_01 3 +#define IMX8QM_MIPI_DSI1_I2C0_SCL_MIPI_DSI1_I2C0_SCL IMX8QM_MIPI_DSI1_I2C0_SCL 0 +#define IMX8QM_MIPI_DSI1_I2C0_SCL_LSIO_GPIO1_IO20 IMX8QM_MIPI_DSI1_I2C0_SCL 3 +#define IMX8QM_MIPI_DSI1_I2C0_SDA_MIPI_DSI1_I2C0_SDA IMX8QM_MIPI_DSI1_I2C0_SDA 0 +#define IMX8QM_MIPI_DSI1_I2C0_SDA_LSIO_GPIO1_IO21 IMX8QM_MIPI_DSI1_I2C0_SDA 3 +#define IMX8QM_MIPI_DSI1_GPIO0_00_MIPI_DSI1_GPIO0_IO00 IMX8QM_MIPI_DSI1_GPIO0_00 0 +#define IMX8QM_MIPI_DSI1_GPIO0_00_MIPI_DSI1_PWM0_OUT IMX8QM_MIPI_DSI1_GPIO0_00 1 +#define IMX8QM_MIPI_DSI1_GPIO0_00_LSIO_GPIO1_IO22 IMX8QM_MIPI_DSI1_GPIO0_00 3 +#define IMX8QM_MIPI_DSI1_GPIO0_01_MIPI_DSI1_GPIO0_IO01 IMX8QM_MIPI_DSI1_GPIO0_01 0 +#define IMX8QM_MIPI_DSI1_GPIO0_01_LSIO_GPIO1_IO23 IMX8QM_MIPI_DSI1_GPIO0_01 3 +#define IMX8QM_MIPI_CSI0_MCLK_OUT_MIPI_CSI0_ACM_MCLK_OUT IMX8QM_MIPI_CSI0_MCLK_OUT 0 +#define IMX8QM_MIPI_CSI0_MCLK_OUT_LSIO_GPIO1_IO24 IMX8QM_MIPI_CSI0_MCLK_OUT 3 +#define IMX8QM_MIPI_CSI0_I2C0_SCL_MIPI_CSI0_I2C0_SCL IMX8QM_MIPI_CSI0_I2C0_SCL 0 +#define IMX8QM_MIPI_CSI0_I2C0_SCL_LSIO_GPIO1_IO25 IMX8QM_MIPI_CSI0_I2C0_SCL 3 +#define IMX8QM_MIPI_CSI0_I2C0_SDA_MIPI_CSI0_I2C0_SDA IMX8QM_MIPI_CSI0_I2C0_SDA 0 +#define IMX8QM_MIPI_CSI0_I2C0_SDA_LSIO_GPIO1_IO26 IMX8QM_MIPI_CSI0_I2C0_SDA 3 +#define IMX8QM_MIPI_CSI0_GPIO0_00_MIPI_CSI0_GPIO0_IO00 IMX8QM_MIPI_CSI0_GPIO0_00 0 +#define IMX8QM_MIPI_CSI0_GPIO0_00_DMA_I2C0_SCL IMX8QM_MIPI_CSI0_GPIO0_00 1 +#define IMX8QM_MIPI_CSI0_GPIO0_00_MIPI_CSI1_I2C0_SCL IMX8QM_MIPI_CSI0_GPIO0_00 2 +#define IMX8QM_MIPI_CSI0_GPIO0_00_LSIO_GPIO1_IO27 IMX8QM_MIPI_CSI0_GPIO0_00 3 +#define IMX8QM_MIPI_CSI0_GPIO0_01_MIPI_CSI0_GPIO0_IO01 IMX8QM_MIPI_CSI0_GPIO0_01 0 +#define IMX8QM_MIPI_CSI0_GPIO0_01_DMA_I2C0_SDA IMX8QM_MIPI_CSI0_GPIO0_01 1 +#define IMX8QM_MIPI_CSI0_GPIO0_01_MIPI_CSI1_I2C0_SDA IMX8QM_MIPI_CSI0_GPIO0_01 2 +#define IMX8QM_MIPI_CSI0_GPIO0_01_LSIO_GPIO1_IO28 IMX8QM_MIPI_CSI0_GPIO0_01 3 +#define IMX8QM_MIPI_CSI1_MCLK_OUT_MIPI_CSI1_ACM_MCLK_OUT IMX8QM_MIPI_CSI1_MCLK_OUT 0 +#define IMX8QM_MIPI_CSI1_MCLK_OUT_LSIO_GPIO1_IO29 IMX8QM_MIPI_CSI1_MCLK_OUT 3 +#define IMX8QM_MIPI_CSI1_GPIO0_00_MIPI_CSI1_GPIO0_IO00 IMX8QM_MIPI_CSI1_GPIO0_00 0 +#define IMX8QM_MIPI_CSI1_GPIO0_00_DMA_UART4_RX IMX8QM_MIPI_CSI1_GPIO0_00 1 +#define IMX8QM_MIPI_CSI1_GPIO0_00_LSIO_GPIO1_IO30 IMX8QM_MIPI_CSI1_GPIO0_00 3 +#define IMX8QM_MIPI_CSI1_GPIO0_01_MIPI_CSI1_GPIO0_IO01 IMX8QM_MIPI_CSI1_GPIO0_01 0 +#define IMX8QM_MIPI_CSI1_GPIO0_01_DMA_UART4_TX IMX8QM_MIPI_CSI1_GPIO0_01 1 +#define IMX8QM_MIPI_CSI1_GPIO0_01_LSIO_GPIO1_IO31 IMX8QM_MIPI_CSI1_GPIO0_01 3 +#define IMX8QM_MIPI_CSI1_I2C0_SCL_MIPI_CSI1_I2C0_SCL IMX8QM_MIPI_CSI1_I2C0_SCL 0 +#define IMX8QM_MIPI_CSI1_I2C0_SCL_LSIO_GPIO2_IO00 IMX8QM_MIPI_CSI1_I2C0_SCL 3 +#define IMX8QM_MIPI_CSI1_I2C0_SDA_MIPI_CSI1_I2C0_SDA IMX8QM_MIPI_CSI1_I2C0_SDA 0 +#define IMX8QM_MIPI_CSI1_I2C0_SDA_LSIO_GPIO2_IO01 IMX8QM_MIPI_CSI1_I2C0_SDA 3 +#define IMX8QM_HDMI_TX0_TS_SCL_HDMI_TX0_I2C0_SCL IMX8QM_HDMI_TX0_TS_SCL 0 +#define IMX8QM_HDMI_TX0_TS_SCL_DMA_I2C0_SCL IMX8QM_HDMI_TX0_TS_SCL 1 +#define IMX8QM_HDMI_TX0_TS_SCL_LSIO_GPIO2_IO02 IMX8QM_HDMI_TX0_TS_SCL 3 +#define IMX8QM_HDMI_TX0_TS_SDA_HDMI_TX0_I2C0_SDA IMX8QM_HDMI_TX0_TS_SDA 0 +#define IMX8QM_HDMI_TX0_TS_SDA_DMA_I2C0_SDA IMX8QM_HDMI_TX0_TS_SDA 1 +#define IMX8QM_HDMI_TX0_TS_SDA_LSIO_GPIO2_IO03 IMX8QM_HDMI_TX0_TS_SDA 3 +#define IMX8QM_ESAI1_FSR_AUD_ESAI1_FSR IMX8QM_ESAI1_FSR 0 +#define IMX8QM_ESAI1_FSR_LSIO_GPIO2_IO04 IMX8QM_ESAI1_FSR 3 +#define IMX8QM_ESAI1_FST_AUD_ESAI1_FST IMX8QM_ESAI1_FST 0 +#define IMX8QM_ESAI1_FST_AUD_SPDIF0_EXT_CLK IMX8QM_ESAI1_FST 1 +#define IMX8QM_ESAI1_FST_LSIO_GPIO2_IO05 IMX8QM_ESAI1_FST 3 +#define IMX8QM_ESAI1_SCKR_AUD_ESAI1_SCKR IMX8QM_ESAI1_SCKR 0 +#define IMX8QM_ESAI1_SCKR_LSIO_GPIO2_IO06 IMX8QM_ESAI1_SCKR 3 +#define IMX8QM_ESAI1_SCKT_AUD_ESAI1_SCKT IMX8QM_ESAI1_SCKT 0 +#define IMX8QM_ESAI1_SCKT_AUD_SAI2_RXC IMX8QM_ESAI1_SCKT 1 +#define IMX8QM_ESAI1_SCKT_AUD_SPDIF0_EXT_CLK IMX8QM_ESAI1_SCKT 2 +#define IMX8QM_ESAI1_SCKT_LSIO_GPIO2_IO07 IMX8QM_ESAI1_SCKT 3 +#define IMX8QM_ESAI1_TX0_AUD_ESAI1_TX0 IMX8QM_ESAI1_TX0 0 +#define IMX8QM_ESAI1_TX0_AUD_SAI2_RXD IMX8QM_ESAI1_TX0 1 +#define IMX8QM_ESAI1_TX0_AUD_SPDIF0_RX IMX8QM_ESAI1_TX0 2 +#define IMX8QM_ESAI1_TX0_LSIO_GPIO2_IO08 IMX8QM_ESAI1_TX0 3 +#define IMX8QM_ESAI1_TX1_AUD_ESAI1_TX1 IMX8QM_ESAI1_TX1 0 +#define IMX8QM_ESAI1_TX1_AUD_SAI2_RXFS IMX8QM_ESAI1_TX1 1 +#define IMX8QM_ESAI1_TX1_AUD_SPDIF0_TX IMX8QM_ESAI1_TX1 2 +#define IMX8QM_ESAI1_TX1_LSIO_GPIO2_IO09 IMX8QM_ESAI1_TX1 3 +#define IMX8QM_ESAI1_TX2_RX3_AUD_ESAI1_TX2_RX3 IMX8QM_ESAI1_TX2_RX3 0 +#define IMX8QM_ESAI1_TX2_RX3_AUD_SPDIF0_RX IMX8QM_ESAI1_TX2_RX3 1 +#define IMX8QM_ESAI1_TX2_RX3_LSIO_GPIO2_IO10 IMX8QM_ESAI1_TX2_RX3 3 +#define IMX8QM_ESAI1_TX3_RX2_AUD_ESAI1_TX3_RX2 IMX8QM_ESAI1_TX3_RX2 0 +#define IMX8QM_ESAI1_TX3_RX2_AUD_SPDIF0_TX IMX8QM_ESAI1_TX3_RX2 1 +#define IMX8QM_ESAI1_TX3_RX2_LSIO_GPIO2_IO11 IMX8QM_ESAI1_TX3_RX2 3 +#define IMX8QM_ESAI1_TX4_RX1_AUD_ESAI1_TX4_RX1 IMX8QM_ESAI1_TX4_RX1 0 +#define IMX8QM_ESAI1_TX4_RX1_LSIO_GPIO2_IO12 IMX8QM_ESAI1_TX4_RX1 3 +#define IMX8QM_ESAI1_TX5_RX0_AUD_ESAI1_TX5_RX0 IMX8QM_ESAI1_TX5_RX0 0 +#define IMX8QM_ESAI1_TX5_RX0_LSIO_GPIO2_IO13 IMX8QM_ESAI1_TX5_RX0 3 +#define IMX8QM_SPDIF0_RX_AUD_SPDIF0_RX IMX8QM_SPDIF0_RX 0 +#define IMX8QM_SPDIF0_RX_AUD_MQS_R IMX8QM_SPDIF0_RX 1 +#define IMX8QM_SPDIF0_RX_AUD_ACM_MCLK_IN1 IMX8QM_SPDIF0_RX 2 +#define IMX8QM_SPDIF0_RX_LSIO_GPIO2_IO14 IMX8QM_SPDIF0_RX 3 +#define IMX8QM_SPDIF0_TX_AUD_SPDIF0_TX IMX8QM_SPDIF0_TX 0 +#define IMX8QM_SPDIF0_TX_AUD_MQS_L IMX8QM_SPDIF0_TX 1 +#define IMX8QM_SPDIF0_TX_AUD_ACM_MCLK_OUT1 IMX8QM_SPDIF0_TX 2 +#define IMX8QM_SPDIF0_TX_LSIO_GPIO2_IO15 IMX8QM_SPDIF0_TX 3 +#define IMX8QM_SPDIF0_EXT_CLK_AUD_SPDIF0_EXT_CLK IMX8QM_SPDIF0_EXT_CLK 0 +#define IMX8QM_SPDIF0_EXT_CLK_DMA_DMA0_REQ_IN0 IMX8QM_SPDIF0_EXT_CLK 1 +#define IMX8QM_SPDIF0_EXT_CLK_LSIO_GPIO2_IO16 IMX8QM_SPDIF0_EXT_CLK 3 +#define IMX8QM_SPI3_SCK_DMA_SPI3_SCK IMX8QM_SPI3_SCK 0 +#define IMX8QM_SPI3_SCK_LSIO_GPIO2_IO17 IMX8QM_SPI3_SCK 3 +#define IMX8QM_SPI3_SDO_DMA_SPI3_SDO IMX8QM_SPI3_SDO 0 +#define IMX8QM_SPI3_SDO_DMA_FTM_CH0 IMX8QM_SPI3_SDO 1 +#define IMX8QM_SPI3_SDO_LSIO_GPIO2_IO18 IMX8QM_SPI3_SDO 3 +#define IMX8QM_SPI3_SDI_DMA_SPI3_SDI IMX8QM_SPI3_SDI 0 +#define IMX8QM_SPI3_SDI_DMA_FTM_CH1 IMX8QM_SPI3_SDI 1 +#define IMX8QM_SPI3_SDI_LSIO_GPIO2_IO19 IMX8QM_SPI3_SDI 3 +#define IMX8QM_SPI3_CS0_DMA_SPI3_CS0 IMX8QM_SPI3_CS0 0 +#define IMX8QM_SPI3_CS0_DMA_FTM_CH2 IMX8QM_SPI3_CS0 1 +#define IMX8QM_SPI3_CS0_LSIO_GPIO2_IO20 IMX8QM_SPI3_CS0 3 +#define IMX8QM_SPI3_CS1_DMA_SPI3_CS1 IMX8QM_SPI3_CS1 0 +#define IMX8QM_SPI3_CS1_LSIO_GPIO2_IO21 IMX8QM_SPI3_CS1 3 +#define IMX8QM_ESAI0_FSR_AUD_ESAI0_FSR IMX8QM_ESAI0_FSR 0 +#define IMX8QM_ESAI0_FSR_LSIO_GPIO2_IO22 IMX8QM_ESAI0_FSR 3 +#define IMX8QM_ESAI0_FST_AUD_ESAI0_FST IMX8QM_ESAI0_FST 0 +#define IMX8QM_ESAI0_FST_LSIO_GPIO2_IO23 IMX8QM_ESAI0_FST 3 +#define IMX8QM_ESAI0_SCKR_AUD_ESAI0_SCKR IMX8QM_ESAI0_SCKR 0 +#define IMX8QM_ESAI0_SCKR_LSIO_GPIO2_IO24 IMX8QM_ESAI0_SCKR 3 +#define IMX8QM_ESAI0_SCKT_AUD_ESAI0_SCKT IMX8QM_ESAI0_SCKT 0 +#define IMX8QM_ESAI0_SCKT_LSIO_GPIO2_IO25 IMX8QM_ESAI0_SCKT 3 +#define IMX8QM_ESAI0_TX0_AUD_ESAI0_TX0 IMX8QM_ESAI0_TX0 0 +#define IMX8QM_ESAI0_TX0_LSIO_GPIO2_IO26 IMX8QM_ESAI0_TX0 3 +#define IMX8QM_ESAI0_TX1_AUD_ESAI0_TX1 IMX8QM_ESAI0_TX1 0 +#define IMX8QM_ESAI0_TX1_LSIO_GPIO2_IO27 IMX8QM_ESAI0_TX1 3 +#define IMX8QM_ESAI0_TX2_RX3_AUD_ESAI0_TX2_RX3 IMX8QM_ESAI0_TX2_RX3 0 +#define IMX8QM_ESAI0_TX2_RX3_LSIO_GPIO2_IO28 IMX8QM_ESAI0_TX2_RX3 3 +#define IMX8QM_ESAI0_TX3_RX2_AUD_ESAI0_TX3_RX2 IMX8QM_ESAI0_TX3_RX2 0 +#define IMX8QM_ESAI0_TX3_RX2_LSIO_GPIO2_IO29 IMX8QM_ESAI0_TX3_RX2 3 +#define IMX8QM_ESAI0_TX4_RX1_AUD_ESAI0_TX4_RX1 IMX8QM_ESAI0_TX4_RX1 0 +#define IMX8QM_ESAI0_TX4_RX1_LSIO_GPIO2_IO30 IMX8QM_ESAI0_TX4_RX1 3 +#define IMX8QM_ESAI0_TX5_RX0_AUD_ESAI0_TX5_RX0 IMX8QM_ESAI0_TX5_RX0 0 +#define IMX8QM_ESAI0_TX5_RX0_LSIO_GPIO2_IO31 IMX8QM_ESAI0_TX5_RX0 3 +#define IMX8QM_MCLK_IN0_AUD_ACM_MCLK_IN0 IMX8QM_MCLK_IN0 0 +#define IMX8QM_MCLK_IN0_AUD_ESAI0_RX_HF_CLK IMX8QM_MCLK_IN0 1 +#define IMX8QM_MCLK_IN0_AUD_ESAI1_RX_HF_CLK IMX8QM_MCLK_IN0 2 +#define IMX8QM_MCLK_IN0_LSIO_GPIO3_IO00 IMX8QM_MCLK_IN0 3 +#define IMX8QM_MCLK_OUT0_AUD_ACM_MCLK_OUT0 IMX8QM_MCLK_OUT0 0 +#define IMX8QM_MCLK_OUT0_AUD_ESAI0_TX_HF_CLK IMX8QM_MCLK_OUT0 1 +#define IMX8QM_MCLK_OUT0_AUD_ESAI1_TX_HF_CLK IMX8QM_MCLK_OUT0 2 +#define IMX8QM_MCLK_OUT0_LSIO_GPIO3_IO01 IMX8QM_MCLK_OUT0 3 +#define IMX8QM_SPI0_SCK_DMA_SPI0_SCK IMX8QM_SPI0_SCK 0 +#define IMX8QM_SPI0_SCK_AUD_SAI0_RXC IMX8QM_SPI0_SCK 1 +#define IMX8QM_SPI0_SCK_LSIO_GPIO3_IO02 IMX8QM_SPI0_SCK 3 +#define IMX8QM_SPI0_SDO_DMA_SPI0_SDO IMX8QM_SPI0_SDO 0 +#define IMX8QM_SPI0_SDO_AUD_SAI0_TXD IMX8QM_SPI0_SDO 1 +#define IMX8QM_SPI0_SDO_LSIO_GPIO3_IO03 IMX8QM_SPI0_SDO 3 +#define IMX8QM_SPI0_SDI_DMA_SPI0_SDI IMX8QM_SPI0_SDI 0 +#define IMX8QM_SPI0_SDI_AUD_SAI0_RXD IMX8QM_SPI0_SDI 1 +#define IMX8QM_SPI0_SDI_LSIO_GPIO3_IO04 IMX8QM_SPI0_SDI 3 +#define IMX8QM_SPI0_CS0_DMA_SPI0_CS0 IMX8QM_SPI0_CS0 0 +#define IMX8QM_SPI0_CS0_AUD_SAI0_RXFS IMX8QM_SPI0_CS0 1 +#define IMX8QM_SPI0_CS0_LSIO_GPIO3_IO05 IMX8QM_SPI0_CS0 3 +#define IMX8QM_SPI0_CS1_DMA_SPI0_CS1 IMX8QM_SPI0_CS1 0 +#define IMX8QM_SPI0_CS1_AUD_SAI0_TXC IMX8QM_SPI0_CS1 1 +#define IMX8QM_SPI0_CS1_LSIO_GPIO3_IO06 IMX8QM_SPI0_CS1 3 +#define IMX8QM_SPI2_SCK_DMA_SPI2_SCK IMX8QM_SPI2_SCK 0 +#define IMX8QM_SPI2_SCK_LSIO_GPIO3_IO07 IMX8QM_SPI2_SCK 3 +#define IMX8QM_SPI2_SDO_DMA_SPI2_SDO IMX8QM_SPI2_SDO 0 +#define IMX8QM_SPI2_SDO_LSIO_GPIO3_IO08 IMX8QM_SPI2_SDO 3 +#define IMX8QM_SPI2_SDI_DMA_SPI2_SDI IMX8QM_SPI2_SDI 0 +#define IMX8QM_SPI2_SDI_LSIO_GPIO3_IO09 IMX8QM_SPI2_SDI 3 +#define IMX8QM_SPI2_CS0_DMA_SPI2_CS0 IMX8QM_SPI2_CS0 0 +#define IMX8QM_SPI2_CS0_LSIO_GPIO3_IO10 IMX8QM_SPI2_CS0 3 +#define IMX8QM_SPI2_CS1_DMA_SPI2_CS1 IMX8QM_SPI2_CS1 0 +#define IMX8QM_SPI2_CS1_AUD_SAI0_TXFS IMX8QM_SPI2_CS1 1 +#define IMX8QM_SPI2_CS1_LSIO_GPIO3_IO11 IMX8QM_SPI2_CS1 3 +#define IMX8QM_SAI1_RXC_AUD_SAI1_RXC IMX8QM_SAI1_RXC 0 +#define IMX8QM_SAI1_RXC_AUD_SAI0_TXD IMX8QM_SAI1_RXC 1 +#define IMX8QM_SAI1_RXC_LSIO_GPIO3_IO12 IMX8QM_SAI1_RXC 3 +#define IMX8QM_SAI1_RXD_AUD_SAI1_RXD IMX8QM_SAI1_RXD 0 +#define IMX8QM_SAI1_RXD_AUD_SAI0_TXFS IMX8QM_SAI1_RXD 1 +#define IMX8QM_SAI1_RXD_LSIO_GPIO3_IO13 IMX8QM_SAI1_RXD 3 +#define IMX8QM_SAI1_RXFS_AUD_SAI1_RXFS IMX8QM_SAI1_RXFS 0 +#define IMX8QM_SAI1_RXFS_AUD_SAI0_RXD IMX8QM_SAI1_RXFS 1 +#define IMX8QM_SAI1_RXFS_LSIO_GPIO3_IO14 IMX8QM_SAI1_RXFS 3 +#define IMX8QM_SAI1_TXC_AUD_SAI1_TXC IMX8QM_SAI1_TXC 0 +#define IMX8QM_SAI1_TXC_AUD_SAI0_TXC IMX8QM_SAI1_TXC 1 +#define IMX8QM_SAI1_TXC_LSIO_GPIO3_IO15 IMX8QM_SAI1_TXC 3 +#define IMX8QM_SAI1_TXD_AUD_SAI1_TXD IMX8QM_SAI1_TXD 0 +#define IMX8QM_SAI1_TXD_AUD_SAI1_RXC IMX8QM_SAI1_TXD 1 +#define IMX8QM_SAI1_TXD_LSIO_GPIO3_IO16 IMX8QM_SAI1_TXD 3 +#define IMX8QM_SAI1_TXFS_AUD_SAI1_TXFS IMX8QM_SAI1_TXFS 0 +#define IMX8QM_SAI1_TXFS_AUD_SAI1_RXFS IMX8QM_SAI1_TXFS 1 +#define IMX8QM_SAI1_TXFS_LSIO_GPIO3_IO17 IMX8QM_SAI1_TXFS 3 +#define IMX8QM_ADC_IN7_DMA_ADC1_IN3 IMX8QM_ADC_IN7 0 +#define IMX8QM_ADC_IN7_DMA_SPI1_CS1 IMX8QM_ADC_IN7 1 +#define IMX8QM_ADC_IN7_LSIO_KPP0_ROW3 IMX8QM_ADC_IN7 2 +#define IMX8QM_ADC_IN7_LSIO_GPIO3_IO25 IMX8QM_ADC_IN7 3 +#define IMX8QM_ADC_IN6_DMA_ADC1_IN2 IMX8QM_ADC_IN6 0 +#define IMX8QM_ADC_IN6_DMA_SPI1_CS0 IMX8QM_ADC_IN6 1 +#define IMX8QM_ADC_IN6_LSIO_KPP0_ROW2 IMX8QM_ADC_IN6 2 +#define IMX8QM_ADC_IN6_LSIO_GPIO3_IO24 IMX8QM_ADC_IN6 3 +#define IMX8QM_ADC_IN5_DMA_ADC1_IN1 IMX8QM_ADC_IN5 0 +#define IMX8QM_ADC_IN5_DMA_SPI1_SDI IMX8QM_ADC_IN5 1 +#define IMX8QM_ADC_IN5_LSIO_KPP0_ROW1 IMX8QM_ADC_IN5 2 +#define IMX8QM_ADC_IN5_LSIO_GPIO3_IO23 IMX8QM_ADC_IN5 3 +#define IMX8QM_ADC_IN4_DMA_ADC1_IN0 IMX8QM_ADC_IN4 0 +#define IMX8QM_ADC_IN4_DMA_SPI1_SDO IMX8QM_ADC_IN4 1 +#define IMX8QM_ADC_IN4_LSIO_KPP0_ROW0 IMX8QM_ADC_IN4 2 +#define IMX8QM_ADC_IN4_LSIO_GPIO3_IO22 IMX8QM_ADC_IN4 3 +#define IMX8QM_ADC_IN3_DMA_ADC0_IN3 IMX8QM_ADC_IN3 0 +#define IMX8QM_ADC_IN3_DMA_SPI1_SCK IMX8QM_ADC_IN3 1 +#define IMX8QM_ADC_IN3_LSIO_KPP0_COL3 IMX8QM_ADC_IN3 2 +#define IMX8QM_ADC_IN3_LSIO_GPIO3_IO21 IMX8QM_ADC_IN3 3 +#define IMX8QM_ADC_IN2_DMA_ADC0_IN2 IMX8QM_ADC_IN2 0 +#define IMX8QM_ADC_IN2_LSIO_KPP0_COL2 IMX8QM_ADC_IN2 2 +#define IMX8QM_ADC_IN2_LSIO_GPIO3_IO20 IMX8QM_ADC_IN2 3 +#define IMX8QM_ADC_IN1_DMA_ADC0_IN1 IMX8QM_ADC_IN1 0 +#define IMX8QM_ADC_IN1_LSIO_KPP0_COL1 IMX8QM_ADC_IN1 2 +#define IMX8QM_ADC_IN1_LSIO_GPIO3_IO19 IMX8QM_ADC_IN1 3 +#define IMX8QM_ADC_IN0_DMA_ADC0_IN0 IMX8QM_ADC_IN0 0 +#define IMX8QM_ADC_IN0_LSIO_KPP0_COL0 IMX8QM_ADC_IN0 2 +#define IMX8QM_ADC_IN0_LSIO_GPIO3_IO18 IMX8QM_ADC_IN0 3 +#define IMX8QM_MLB_SIG_CONN_MLB_SIG IMX8QM_MLB_SIG 0 +#define IMX8QM_MLB_SIG_AUD_SAI3_RXC IMX8QM_MLB_SIG 1 +#define IMX8QM_MLB_SIG_LSIO_GPIO3_IO26 IMX8QM_MLB_SIG 3 +#define IMX8QM_MLB_CLK_CONN_MLB_CLK IMX8QM_MLB_CLK 0 +#define IMX8QM_MLB_CLK_AUD_SAI3_RXFS IMX8QM_MLB_CLK 1 +#define IMX8QM_MLB_CLK_LSIO_GPIO3_IO27 IMX8QM_MLB_CLK 3 +#define IMX8QM_MLB_DATA_CONN_MLB_DATA IMX8QM_MLB_DATA 0 +#define IMX8QM_MLB_DATA_AUD_SAI3_RXD IMX8QM_MLB_DATA 1 +#define IMX8QM_MLB_DATA_LSIO_GPIO3_IO28 IMX8QM_MLB_DATA 3 +#define IMX8QM_FLEXCAN0_RX_DMA_FLEXCAN0_RX IMX8QM_FLEXCAN0_RX 0 +#define IMX8QM_FLEXCAN0_RX_LSIO_GPIO3_IO29 IMX8QM_FLEXCAN0_RX 3 +#define IMX8QM_FLEXCAN0_TX_DMA_FLEXCAN0_TX IMX8QM_FLEXCAN0_TX 0 +#define IMX8QM_FLEXCAN0_TX_LSIO_GPIO3_IO30 IMX8QM_FLEXCAN0_TX 3 +#define IMX8QM_FLEXCAN1_RX_DMA_FLEXCAN1_RX IMX8QM_FLEXCAN1_RX 0 +#define IMX8QM_FLEXCAN1_RX_LSIO_GPIO3_IO31 IMX8QM_FLEXCAN1_RX 3 +#define IMX8QM_FLEXCAN1_TX_DMA_FLEXCAN1_TX IMX8QM_FLEXCAN1_TX 0 +#define IMX8QM_FLEXCAN1_TX_LSIO_GPIO4_IO00 IMX8QM_FLEXCAN1_TX 3 +#define IMX8QM_FLEXCAN2_RX_DMA_FLEXCAN2_RX IMX8QM_FLEXCAN2_RX 0 +#define IMX8QM_FLEXCAN2_RX_LSIO_GPIO4_IO01 IMX8QM_FLEXCAN2_RX 3 +#define IMX8QM_FLEXCAN2_TX_DMA_FLEXCAN2_TX IMX8QM_FLEXCAN2_TX 0 +#define IMX8QM_FLEXCAN2_TX_LSIO_GPIO4_IO02 IMX8QM_FLEXCAN2_TX 3 +#define IMX8QM_USB_SS3_TC0_DMA_I2C1_SCL IMX8QM_USB_SS3_TC0 0 +#define IMX8QM_USB_SS3_TC0_CONN_USB_OTG1_PWR IMX8QM_USB_SS3_TC0 1 +#define IMX8QM_USB_SS3_TC0_LSIO_GPIO4_IO03 IMX8QM_USB_SS3_TC0 3 +#define IMX8QM_USB_SS3_TC1_DMA_I2C1_SCL IMX8QM_USB_SS3_TC1 0 +#define IMX8QM_USB_SS3_TC1_CONN_USB_OTG2_PWR IMX8QM_USB_SS3_TC1 1 +#define IMX8QM_USB_SS3_TC1_LSIO_GPIO4_IO04 IMX8QM_USB_SS3_TC1 3 +#define IMX8QM_USB_SS3_TC2_DMA_I2C1_SDA IMX8QM_USB_SS3_TC2 0 +#define IMX8QM_USB_SS3_TC2_CONN_USB_OTG1_OC IMX8QM_USB_SS3_TC2 1 +#define IMX8QM_USB_SS3_TC2_LSIO_GPIO4_IO05 IMX8QM_USB_SS3_TC2 3 +#define IMX8QM_USB_SS3_TC3_DMA_I2C1_SDA IMX8QM_USB_SS3_TC3 0 +#define IMX8QM_USB_SS3_TC3_CONN_USB_OTG2_OC IMX8QM_USB_SS3_TC3 1 +#define IMX8QM_USB_SS3_TC3_LSIO_GPIO4_IO06 IMX8QM_USB_SS3_TC3 3 +#define IMX8QM_USDHC1_RESET_B_CONN_USDHC1_RESET_B IMX8QM_USDHC1_RESET_B 0 +#define IMX8QM_USDHC1_RESET_B_LSIO_GPIO4_IO07 IMX8QM_USDHC1_RESET_B 3 +#define IMX8QM_USDHC1_VSELECT_CONN_USDHC1_VSELECT IMX8QM_USDHC1_VSELECT 0 +#define IMX8QM_USDHC1_VSELECT_LSIO_GPIO4_IO08 IMX8QM_USDHC1_VSELECT 3 +#define IMX8QM_USDHC2_RESET_B_CONN_USDHC2_RESET_B IMX8QM_USDHC2_RESET_B 0 +#define IMX8QM_USDHC2_RESET_B_LSIO_GPIO4_IO09 IMX8QM_USDHC2_RESET_B 3 +#define IMX8QM_USDHC2_VSELECT_CONN_USDHC2_VSELECT IMX8QM_USDHC2_VSELECT 0 +#define IMX8QM_USDHC2_VSELECT_LSIO_GPIO4_IO10 IMX8QM_USDHC2_VSELECT 3 +#define IMX8QM_USDHC2_WP_CONN_USDHC2_WP IMX8QM_USDHC2_WP 0 +#define IMX8QM_USDHC2_WP_LSIO_GPIO4_IO11 IMX8QM_USDHC2_WP 3 +#define IMX8QM_USDHC2_CD_B_CONN_USDHC2_CD_B IMX8QM_USDHC2_CD_B 0 +#define IMX8QM_USDHC2_CD_B_LSIO_GPIO4_IO12 IMX8QM_USDHC2_CD_B 3 +#define IMX8QM_ENET0_MDIO_CONN_ENET0_MDIO IMX8QM_ENET0_MDIO 0 +#define IMX8QM_ENET0_MDIO_DMA_I2C4_SDA IMX8QM_ENET0_MDIO 1 +#define IMX8QM_ENET0_MDIO_LSIO_GPIO4_IO13 IMX8QM_ENET0_MDIO 3 +#define IMX8QM_ENET0_MDC_CONN_ENET0_MDC IMX8QM_ENET0_MDC 0 +#define IMX8QM_ENET0_MDC_DMA_I2C4_SCL IMX8QM_ENET0_MDC 1 +#define IMX8QM_ENET0_MDC_LSIO_GPIO4_IO14 IMX8QM_ENET0_MDC 3 +#define IMX8QM_ENET0_REFCLK_125M_25M_CONN_ENET0_REFCLK_125M_25M IMX8QM_ENET0_REFCLK_125M_25M 0 +#define IMX8QM_ENET0_REFCLK_125M_25M_CONN_ENET0_PPS IMX8QM_ENET0_REFCLK_125M_25M 1 +#define IMX8QM_ENET0_REFCLK_125M_25M_LSIO_GPIO4_IO15 IMX8QM_ENET0_REFCLK_125M_25M 3 +#define IMX8QM_ENET1_REFCLK_125M_25M_CONN_ENET1_REFCLK_125M_25M IMX8QM_ENET1_REFCLK_125M_25M 0 +#define IMX8QM_ENET1_REFCLK_125M_25M_CONN_ENET1_PPS IMX8QM_ENET1_REFCLK_125M_25M 1 +#define IMX8QM_ENET1_REFCLK_125M_25M_LSIO_GPIO4_IO16 IMX8QM_ENET1_REFCLK_125M_25M 3 +#define IMX8QM_ENET1_MDIO_CONN_ENET1_MDIO IMX8QM_ENET1_MDIO 0 +#define IMX8QM_ENET1_MDIO_DMA_I2C4_SDA IMX8QM_ENET1_MDIO 1 +#define IMX8QM_ENET1_MDIO_LSIO_GPIO4_IO17 IMX8QM_ENET1_MDIO 3 +#define IMX8QM_ENET1_MDC_CONN_ENET1_MDC IMX8QM_ENET1_MDC 0 +#define IMX8QM_ENET1_MDC_DMA_I2C4_SCL IMX8QM_ENET1_MDC 1 +#define IMX8QM_ENET1_MDC_LSIO_GPIO4_IO18 IMX8QM_ENET1_MDC 3 +#define IMX8QM_QSPI1A_SS0_B_LSIO_QSPI1A_SS0_B IMX8QM_QSPI1A_SS0_B 0 +#define IMX8QM_QSPI1A_SS0_B_LSIO_GPIO4_IO19 IMX8QM_QSPI1A_SS0_B 3 +#define IMX8QM_QSPI1A_SS1_B_LSIO_QSPI1A_SS1_B IMX8QM_QSPI1A_SS1_B 0 +#define IMX8QM_QSPI1A_SS1_B_LSIO_QSPI1A_SCLK2 IMX8QM_QSPI1A_SS1_B 1 +#define IMX8QM_QSPI1A_SS1_B_LSIO_GPIO4_IO20 IMX8QM_QSPI1A_SS1_B 3 +#define IMX8QM_QSPI1A_SCLK_LSIO_QSPI1A_SCLK IMX8QM_QSPI1A_SCLK 0 +#define IMX8QM_QSPI1A_SCLK_LSIO_GPIO4_IO21 IMX8QM_QSPI1A_SCLK 3 +#define IMX8QM_QSPI1A_DQS_LSIO_QSPI1A_DQS IMX8QM_QSPI1A_DQS 0 +#define IMX8QM_QSPI1A_DQS_LSIO_GPIO4_IO22 IMX8QM_QSPI1A_DQS 3 +#define IMX8QM_QSPI1A_DATA3_LSIO_QSPI1A_DATA3 IMX8QM_QSPI1A_DATA3 0 +#define IMX8QM_QSPI1A_DATA3_DMA_I2C1_SDA IMX8QM_QSPI1A_DATA3 1 +#define IMX8QM_QSPI1A_DATA3_CONN_USB_OTG1_OC IMX8QM_QSPI1A_DATA3 2 +#define IMX8QM_QSPI1A_DATA3_LSIO_GPIO4_IO23 IMX8QM_QSPI1A_DATA3 3 +#define IMX8QM_QSPI1A_DATA2_LSIO_QSPI1A_DATA2 IMX8QM_QSPI1A_DATA2 0 +#define IMX8QM_QSPI1A_DATA2_DMA_I2C1_SCL IMX8QM_QSPI1A_DATA2 1 +#define IMX8QM_QSPI1A_DATA2_CONN_USB_OTG2_PWR IMX8QM_QSPI1A_DATA2 2 +#define IMX8QM_QSPI1A_DATA2_LSIO_GPIO4_IO24 IMX8QM_QSPI1A_DATA2 3 +#define IMX8QM_QSPI1A_DATA1_LSIO_QSPI1A_DATA1 IMX8QM_QSPI1A_DATA1 0 +#define IMX8QM_QSPI1A_DATA1_DMA_I2C1_SDA IMX8QM_QSPI1A_DATA1 1 +#define IMX8QM_QSPI1A_DATA1_CONN_USB_OTG2_OC IMX8QM_QSPI1A_DATA1 2 +#define IMX8QM_QSPI1A_DATA1_LSIO_GPIO4_IO25 IMX8QM_QSPI1A_DATA1 3 +#define IMX8QM_QSPI1A_DATA0_LSIO_QSPI1A_DATA0 IMX8QM_QSPI1A_DATA0 0 +#define IMX8QM_QSPI1A_DATA0_LSIO_GPIO4_IO26 IMX8QM_QSPI1A_DATA0 3 +#define IMX8QM_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 IMX8QM_QSPI0A_DATA0 0 +#define IMX8QM_QSPI0A_DATA1_LSIO_QSPI0A_DATA1 IMX8QM_QSPI0A_DATA1 0 +#define IMX8QM_QSPI0A_DATA2_LSIO_QSPI0A_DATA2 IMX8QM_QSPI0A_DATA2 0 +#define IMX8QM_QSPI0A_DATA3_LSIO_QSPI0A_DATA3 IMX8QM_QSPI0A_DATA3 0 +#define IMX8QM_QSPI0A_DQS_LSIO_QSPI0A_DQS IMX8QM_QSPI0A_DQS 0 +#define IMX8QM_QSPI0A_SS0_B_LSIO_QSPI0A_SS0_B IMX8QM_QSPI0A_SS0_B 0 +#define IMX8QM_QSPI0A_SS1_B_LSIO_QSPI0A_SS1_B IMX8QM_QSPI0A_SS1_B 0 +#define IMX8QM_QSPI0A_SS1_B_LSIO_QSPI0A_SCLK2 IMX8QM_QSPI0A_SS1_B 1 +#define IMX8QM_QSPI0A_SCLK_LSIO_QSPI0A_SCLK IMX8QM_QSPI0A_SCLK 0 +#define IMX8QM_QSPI0B_SCLK_LSIO_QSPI0B_SCLK IMX8QM_QSPI0B_SCLK 0 +#define IMX8QM_QSPI0B_DATA0_LSIO_QSPI0B_DATA0 IMX8QM_QSPI0B_DATA0 0 +#define IMX8QM_QSPI0B_DATA1_LSIO_QSPI0B_DATA1 IMX8QM_QSPI0B_DATA1 0 +#define IMX8QM_QSPI0B_DATA2_LSIO_QSPI0B_DATA2 IMX8QM_QSPI0B_DATA2 0 +#define IMX8QM_QSPI0B_DATA3_LSIO_QSPI0B_DATA3 IMX8QM_QSPI0B_DATA3 0 +#define IMX8QM_QSPI0B_DQS_LSIO_QSPI0B_DQS IMX8QM_QSPI0B_DQS 0 +#define IMX8QM_QSPI0B_SS0_B_LSIO_QSPI0B_SS0_B IMX8QM_QSPI0B_SS0_B 0 +#define IMX8QM_QSPI0B_SS1_B_LSIO_QSPI0B_SS1_B IMX8QM_QSPI0B_SS1_B 0 +#define IMX8QM_QSPI0B_SS1_B_LSIO_QSPI0B_SCLK2 IMX8QM_QSPI0B_SS1_B 1 +#define IMX8QM_PCIE_CTRL0_CLKREQ_B_HSIO_PCIE0_CLKREQ_B IMX8QM_PCIE_CTRL0_CLKREQ_B 0 +#define IMX8QM_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO4_IO27 IMX8QM_PCIE_CTRL0_CLKREQ_B 3 +#define IMX8QM_PCIE_CTRL0_WAKE_B_HSIO_PCIE0_WAKE_B IMX8QM_PCIE_CTRL0_WAKE_B 0 +#define IMX8QM_PCIE_CTRL0_WAKE_B_LSIO_GPIO4_IO28 IMX8QM_PCIE_CTRL0_WAKE_B 3 +#define IMX8QM_PCIE_CTRL0_PERST_B_HSIO_PCIE0_PERST_B IMX8QM_PCIE_CTRL0_PERST_B 0 +#define IMX8QM_PCIE_CTRL0_PERST_B_LSIO_GPIO4_IO29 IMX8QM_PCIE_CTRL0_PERST_B 3 +#define IMX8QM_PCIE_CTRL1_CLKREQ_B_HSIO_PCIE1_CLKREQ_B IMX8QM_PCIE_CTRL1_CLKREQ_B 0 +#define IMX8QM_PCIE_CTRL1_CLKREQ_B_DMA_I2C1_SDA IMX8QM_PCIE_CTRL1_CLKREQ_B 1 +#define IMX8QM_PCIE_CTRL1_CLKREQ_B_CONN_USB_OTG2_OC IMX8QM_PCIE_CTRL1_CLKREQ_B 2 +#define IMX8QM_PCIE_CTRL1_CLKREQ_B_LSIO_GPIO4_IO30 IMX8QM_PCIE_CTRL1_CLKREQ_B 3 +#define IMX8QM_PCIE_CTRL1_WAKE_B_HSIO_PCIE1_WAKE_B IMX8QM_PCIE_CTRL1_WAKE_B 0 +#define IMX8QM_PCIE_CTRL1_WAKE_B_DMA_I2C1_SCL IMX8QM_PCIE_CTRL1_WAKE_B 1 +#define IMX8QM_PCIE_CTRL1_WAKE_B_CONN_USB_OTG2_PWR IMX8QM_PCIE_CTRL1_WAKE_B 2 +#define IMX8QM_PCIE_CTRL1_WAKE_B_LSIO_GPIO4_IO31 IMX8QM_PCIE_CTRL1_WAKE_B 3 +#define IMX8QM_PCIE_CTRL1_PERST_B_HSIO_PCIE1_PERST_B IMX8QM_PCIE_CTRL1_PERST_B 0 +#define IMX8QM_PCIE_CTRL1_PERST_B_DMA_I2C1_SCL IMX8QM_PCIE_CTRL1_PERST_B 1 +#define IMX8QM_PCIE_CTRL1_PERST_B_CONN_USB_OTG1_PWR IMX8QM_PCIE_CTRL1_PERST_B 2 +#define IMX8QM_PCIE_CTRL1_PERST_B_LSIO_GPIO5_IO00 IMX8QM_PCIE_CTRL1_PERST_B 3 +#define IMX8QM_USB_HSIC0_DATA_CONN_USB_HSIC0_DATA IMX8QM_USB_HSIC0_DATA 0 +#define IMX8QM_USB_HSIC0_DATA_DMA_I2C1_SDA IMX8QM_USB_HSIC0_DATA 1 +#define IMX8QM_USB_HSIC0_DATA_LSIO_GPIO5_IO01 IMX8QM_USB_HSIC0_DATA 3 +#define IMX8QM_USB_HSIC0_STROBE_CONN_USB_HSIC0_STROBE IMX8QM_USB_HSIC0_STROBE 0 +#define IMX8QM_USB_HSIC0_STROBE_DMA_I2C1_SCL IMX8QM_USB_HSIC0_STROBE 1 +#define IMX8QM_USB_HSIC0_STROBE_LSIO_GPIO5_IO02 IMX8QM_USB_HSIC0_STROBE 3 +#define IMX8QM_EMMC0_CLK_CONN_EMMC0_CLK IMX8QM_EMMC0_CLK 0 +#define IMX8QM_EMMC0_CLK_CONN_NAND_READY_B IMX8QM_EMMC0_CLK 1 +#define IMX8QM_EMMC0_CMD_CONN_EMMC0_CMD IMX8QM_EMMC0_CMD 0 +#define IMX8QM_EMMC0_CMD_CONN_NAND_DQS IMX8QM_EMMC0_CMD 1 +#define IMX8QM_EMMC0_CMD_AUD_MQS_R IMX8QM_EMMC0_CMD 2 +#define IMX8QM_EMMC0_CMD_LSIO_GPIO5_IO03 IMX8QM_EMMC0_CMD 3 +#define IMX8QM_EMMC0_DATA0_CONN_EMMC0_DATA0 IMX8QM_EMMC0_DATA0 0 +#define IMX8QM_EMMC0_DATA0_CONN_NAND_DATA00 IMX8QM_EMMC0_DATA0 1 +#define IMX8QM_EMMC0_DATA0_LSIO_GPIO5_IO04 IMX8QM_EMMC0_DATA0 3 +#define IMX8QM_EMMC0_DATA1_CONN_EMMC0_DATA1 IMX8QM_EMMC0_DATA1 0 +#define IMX8QM_EMMC0_DATA1_CONN_NAND_DATA01 IMX8QM_EMMC0_DATA1 1 +#define IMX8QM_EMMC0_DATA1_LSIO_GPIO5_IO05 IMX8QM_EMMC0_DATA1 3 +#define IMX8QM_EMMC0_DATA2_CONN_EMMC0_DATA2 IMX8QM_EMMC0_DATA2 0 +#define IMX8QM_EMMC0_DATA2_CONN_NAND_DATA02 IMX8QM_EMMC0_DATA2 1 +#define IMX8QM_EMMC0_DATA2_LSIO_GPIO5_IO06 IMX8QM_EMMC0_DATA2 3 +#define IMX8QM_EMMC0_DATA3_CONN_EMMC0_DATA3 IMX8QM_EMMC0_DATA3 0 +#define IMX8QM_EMMC0_DATA3_CONN_NAND_DATA03 IMX8QM_EMMC0_DATA3 1 +#define IMX8QM_EMMC0_DATA3_LSIO_GPIO5_IO07 IMX8QM_EMMC0_DATA3 3 +#define IMX8QM_EMMC0_DATA4_CONN_EMMC0_DATA4 IMX8QM_EMMC0_DATA4 0 +#define IMX8QM_EMMC0_DATA4_CONN_NAND_DATA04 IMX8QM_EMMC0_DATA4 1 +#define IMX8QM_EMMC0_DATA4_LSIO_GPIO5_IO08 IMX8QM_EMMC0_DATA4 3 +#define IMX8QM_EMMC0_DATA5_CONN_EMMC0_DATA5 IMX8QM_EMMC0_DATA5 0 +#define IMX8QM_EMMC0_DATA5_CONN_NAND_DATA05 IMX8QM_EMMC0_DATA5 1 +#define IMX8QM_EMMC0_DATA5_LSIO_GPIO5_IO09 IMX8QM_EMMC0_DATA5 3 +#define IMX8QM_EMMC0_DATA6_CONN_EMMC0_DATA6 IMX8QM_EMMC0_DATA6 0 +#define IMX8QM_EMMC0_DATA6_CONN_NAND_DATA06 IMX8QM_EMMC0_DATA6 1 +#define IMX8QM_EMMC0_DATA6_LSIO_GPIO5_IO10 IMX8QM_EMMC0_DATA6 3 +#define IMX8QM_EMMC0_DATA7_CONN_EMMC0_DATA7 IMX8QM_EMMC0_DATA7 0 +#define IMX8QM_EMMC0_DATA7_CONN_NAND_DATA07 IMX8QM_EMMC0_DATA7 1 +#define IMX8QM_EMMC0_DATA7_LSIO_GPIO5_IO11 IMX8QM_EMMC0_DATA7 3 +#define IMX8QM_EMMC0_STROBE_CONN_EMMC0_STROBE IMX8QM_EMMC0_STROBE 0 +#define IMX8QM_EMMC0_STROBE_CONN_NAND_CLE IMX8QM_EMMC0_STROBE 1 +#define IMX8QM_EMMC0_STROBE_LSIO_GPIO5_IO12 IMX8QM_EMMC0_STROBE 3 +#define IMX8QM_EMMC0_RESET_B_CONN_EMMC0_RESET_B IMX8QM_EMMC0_RESET_B 0 +#define IMX8QM_EMMC0_RESET_B_CONN_NAND_WP_B IMX8QM_EMMC0_RESET_B 1 +#define IMX8QM_EMMC0_RESET_B_CONN_USDHC1_VSELECT IMX8QM_EMMC0_RESET_B 2 +#define IMX8QM_EMMC0_RESET_B_LSIO_GPIO5_IO13 IMX8QM_EMMC0_RESET_B 3 +#define IMX8QM_USDHC1_CLK_CONN_USDHC1_CLK IMX8QM_USDHC1_CLK 0 +#define IMX8QM_USDHC1_CLK_AUD_MQS_R IMX8QM_USDHC1_CLK 1 +#define IMX8QM_USDHC1_CMD_CONN_USDHC1_CMD IMX8QM_USDHC1_CMD 0 +#define IMX8QM_USDHC1_CMD_AUD_MQS_L IMX8QM_USDHC1_CMD 1 +#define IMX8QM_USDHC1_CMD_LSIO_GPIO5_IO14 IMX8QM_USDHC1_CMD 3 +#define IMX8QM_USDHC1_DATA0_CONN_USDHC1_DATA0 IMX8QM_USDHC1_DATA0 0 +#define IMX8QM_USDHC1_DATA0_CONN_NAND_RE_N IMX8QM_USDHC1_DATA0 1 +#define IMX8QM_USDHC1_DATA0_LSIO_GPIO5_IO15 IMX8QM_USDHC1_DATA0 3 +#define IMX8QM_USDHC1_DATA1_CONN_USDHC1_DATA1 IMX8QM_USDHC1_DATA1 0 +#define IMX8QM_USDHC1_DATA1_CONN_NAND_RE_P IMX8QM_USDHC1_DATA1 1 +#define IMX8QM_USDHC1_DATA1_LSIO_GPIO5_IO16 IMX8QM_USDHC1_DATA1 3 +#define IMX8QM_USDHC1_DATA2_CONN_USDHC1_DATA2 IMX8QM_USDHC1_DATA2 0 +#define IMX8QM_USDHC1_DATA2_CONN_NAND_DQS_N IMX8QM_USDHC1_DATA2 1 +#define IMX8QM_USDHC1_DATA2_LSIO_GPIO5_IO17 IMX8QM_USDHC1_DATA2 3 +#define IMX8QM_USDHC1_DATA3_CONN_USDHC1_DATA3 IMX8QM_USDHC1_DATA3 0 +#define IMX8QM_USDHC1_DATA3_CONN_NAND_DQS_P IMX8QM_USDHC1_DATA3 1 +#define IMX8QM_USDHC1_DATA3_LSIO_GPIO5_IO18 IMX8QM_USDHC1_DATA3 3 +#define IMX8QM_USDHC1_DATA4_CONN_USDHC1_DATA4 IMX8QM_USDHC1_DATA4 0 +#define IMX8QM_USDHC1_DATA4_CONN_NAND_CE0_B IMX8QM_USDHC1_DATA4 1 +#define IMX8QM_USDHC1_DATA4_AUD_MQS_R IMX8QM_USDHC1_DATA4 2 +#define IMX8QM_USDHC1_DATA4_LSIO_GPIO5_IO19 IMX8QM_USDHC1_DATA4 3 +#define IMX8QM_USDHC1_DATA5_CONN_USDHC1_DATA5 IMX8QM_USDHC1_DATA5 0 +#define IMX8QM_USDHC1_DATA5_CONN_NAND_RE_B IMX8QM_USDHC1_DATA5 1 +#define IMX8QM_USDHC1_DATA5_AUD_MQS_L IMX8QM_USDHC1_DATA5 2 +#define IMX8QM_USDHC1_DATA5_LSIO_GPIO5_IO20 IMX8QM_USDHC1_DATA5 3 +#define IMX8QM_USDHC1_DATA6_CONN_USDHC1_DATA6 IMX8QM_USDHC1_DATA6 0 +#define IMX8QM_USDHC1_DATA6_CONN_NAND_WE_B IMX8QM_USDHC1_DATA6 1 +#define IMX8QM_USDHC1_DATA6_CONN_USDHC1_WP IMX8QM_USDHC1_DATA6 2 +#define IMX8QM_USDHC1_DATA6_LSIO_GPIO5_IO21 IMX8QM_USDHC1_DATA6 3 +#define IMX8QM_USDHC1_DATA7_CONN_USDHC1_DATA7 IMX8QM_USDHC1_DATA7 0 +#define IMX8QM_USDHC1_DATA7_CONN_NAND_ALE IMX8QM_USDHC1_DATA7 1 +#define IMX8QM_USDHC1_DATA7_CONN_USDHC1_CD_B IMX8QM_USDHC1_DATA7 2 +#define IMX8QM_USDHC1_DATA7_LSIO_GPIO5_IO22 IMX8QM_USDHC1_DATA7 3 +#define IMX8QM_USDHC1_STROBE_CONN_USDHC1_STROBE IMX8QM_USDHC1_STROBE 0 +#define IMX8QM_USDHC1_STROBE_CONN_NAND_CE1_B IMX8QM_USDHC1_STROBE 1 +#define IMX8QM_USDHC1_STROBE_CONN_USDHC1_RESET_B IMX8QM_USDHC1_STROBE 2 +#define IMX8QM_USDHC1_STROBE_LSIO_GPIO5_IO23 IMX8QM_USDHC1_STROBE 3 +#define IMX8QM_USDHC2_CLK_CONN_USDHC2_CLK IMX8QM_USDHC2_CLK 0 +#define IMX8QM_USDHC2_CLK_AUD_MQS_R IMX8QM_USDHC2_CLK 1 +#define IMX8QM_USDHC2_CLK_LSIO_GPIO5_IO24 IMX8QM_USDHC2_CLK 3 +#define IMX8QM_USDHC2_CMD_CONN_USDHC2_CMD IMX8QM_USDHC2_CMD 0 +#define IMX8QM_USDHC2_CMD_AUD_MQS_L IMX8QM_USDHC2_CMD 1 +#define IMX8QM_USDHC2_CMD_LSIO_GPIO5_IO25 IMX8QM_USDHC2_CMD 3 +#define IMX8QM_USDHC2_DATA0_CONN_USDHC2_DATA0 IMX8QM_USDHC2_DATA0 0 +#define IMX8QM_USDHC2_DATA0_DMA_UART4_RX IMX8QM_USDHC2_DATA0 1 +#define IMX8QM_USDHC2_DATA0_LSIO_GPIO5_IO26 IMX8QM_USDHC2_DATA0 3 +#define IMX8QM_USDHC2_DATA1_CONN_USDHC2_DATA1 IMX8QM_USDHC2_DATA1 0 +#define IMX8QM_USDHC2_DATA1_DMA_UART4_TX IMX8QM_USDHC2_DATA1 1 +#define IMX8QM_USDHC2_DATA1_LSIO_GPIO5_IO27 IMX8QM_USDHC2_DATA1 3 +#define IMX8QM_USDHC2_DATA2_CONN_USDHC2_DATA2 IMX8QM_USDHC2_DATA2 0 +#define IMX8QM_USDHC2_DATA2_DMA_UART4_CTS_B IMX8QM_USDHC2_DATA2 1 +#define IMX8QM_USDHC2_DATA2_LSIO_GPIO5_IO28 IMX8QM_USDHC2_DATA2 3 +#define IMX8QM_USDHC2_DATA3_CONN_USDHC2_DATA3 IMX8QM_USDHC2_DATA3 0 +#define IMX8QM_USDHC2_DATA3_DMA_UART4_RTS_B IMX8QM_USDHC2_DATA3 1 +#define IMX8QM_USDHC2_DATA3_LSIO_GPIO5_IO29 IMX8QM_USDHC2_DATA3 3 +#define IMX8QM_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC IMX8QM_ENET0_RGMII_TXC 0 +#define IMX8QM_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_OUT IMX8QM_ENET0_RGMII_TXC 1 +#define IMX8QM_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_IN IMX8QM_ENET0_RGMII_TXC 2 +#define IMX8QM_ENET0_RGMII_TXC_LSIO_GPIO5_IO30 IMX8QM_ENET0_RGMII_TXC 3 +#define IMX8QM_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL IMX8QM_ENET0_RGMII_TX_CTL 0 +#define IMX8QM_ENET0_RGMII_TX_CTL_LSIO_GPIO5_IO31 IMX8QM_ENET0_RGMII_TX_CTL 3 +#define IMX8QM_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 IMX8QM_ENET0_RGMII_TXD0 0 +#define IMX8QM_ENET0_RGMII_TXD0_LSIO_GPIO6_IO00 IMX8QM_ENET0_RGMII_TXD0 3 +#define IMX8QM_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 IMX8QM_ENET0_RGMII_TXD1 0 +#define IMX8QM_ENET0_RGMII_TXD1_LSIO_GPIO6_IO01 IMX8QM_ENET0_RGMII_TXD1 3 +#define IMX8QM_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 IMX8QM_ENET0_RGMII_TXD2 0 +#define IMX8QM_ENET0_RGMII_TXD2_DMA_UART3_TX IMX8QM_ENET0_RGMII_TXD2 1 +#define IMX8QM_ENET0_RGMII_TXD2_VPU_TSI_S1_VID IMX8QM_ENET0_RGMII_TXD2 2 +#define IMX8QM_ENET0_RGMII_TXD2_LSIO_GPIO6_IO02 IMX8QM_ENET0_RGMII_TXD2 3 +#define IMX8QM_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 IMX8QM_ENET0_RGMII_TXD3 0 +#define IMX8QM_ENET0_RGMII_TXD3_DMA_UART3_RTS_B IMX8QM_ENET0_RGMII_TXD3 1 +#define IMX8QM_ENET0_RGMII_TXD3_VPU_TSI_S1_SYNC IMX8QM_ENET0_RGMII_TXD3 2 +#define IMX8QM_ENET0_RGMII_TXD3_LSIO_GPIO6_IO03 IMX8QM_ENET0_RGMII_TXD3 3 +#define IMX8QM_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC IMX8QM_ENET0_RGMII_RXC 0 +#define IMX8QM_ENET0_RGMII_RXC_DMA_UART3_CTS_B IMX8QM_ENET0_RGMII_RXC 1 +#define IMX8QM_ENET0_RGMII_RXC_VPU_TSI_S1_DATA IMX8QM_ENET0_RGMII_RXC 2 +#define IMX8QM_ENET0_RGMII_RXC_LSIO_GPIO6_IO04 IMX8QM_ENET0_RGMII_RXC 3 +#define IMX8QM_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL IMX8QM_ENET0_RGMII_RX_CTL 0 +#define IMX8QM_ENET0_RGMII_RX_CTL_VPU_TSI_S0_VID IMX8QM_ENET0_RGMII_RX_CTL 2 +#define IMX8QM_ENET0_RGMII_RX_CTL_LSIO_GPIO6_IO05 IMX8QM_ENET0_RGMII_RX_CTL 3 +#define IMX8QM_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 IMX8QM_ENET0_RGMII_RXD0 0 +#define IMX8QM_ENET0_RGMII_RXD0_VPU_TSI_S0_SYNC IMX8QM_ENET0_RGMII_RXD0 2 +#define IMX8QM_ENET0_RGMII_RXD0_LSIO_GPIO6_IO06 IMX8QM_ENET0_RGMII_RXD0 3 +#define IMX8QM_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 IMX8QM_ENET0_RGMII_RXD1 0 +#define IMX8QM_ENET0_RGMII_RXD1_VPU_TSI_S0_DATA IMX8QM_ENET0_RGMII_RXD1 2 +#define IMX8QM_ENET0_RGMII_RXD1_LSIO_GPIO6_IO07 IMX8QM_ENET0_RGMII_RXD1 3 +#define IMX8QM_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 IMX8QM_ENET0_RGMII_RXD2 0 +#define IMX8QM_ENET0_RGMII_RXD2_CONN_ENET0_RMII_RX_ER IMX8QM_ENET0_RGMII_RXD2 1 +#define IMX8QM_ENET0_RGMII_RXD2_VPU_TSI_S0_CLK IMX8QM_ENET0_RGMII_RXD2 2 +#define IMX8QM_ENET0_RGMII_RXD2_LSIO_GPIO6_IO08 IMX8QM_ENET0_RGMII_RXD2 3 +#define IMX8QM_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 IMX8QM_ENET0_RGMII_RXD3 0 +#define IMX8QM_ENET0_RGMII_RXD3_DMA_UART3_RX IMX8QM_ENET0_RGMII_RXD3 1 +#define IMX8QM_ENET0_RGMII_RXD3_VPU_TSI_S1_CLK IMX8QM_ENET0_RGMII_RXD3 2 +#define IMX8QM_ENET0_RGMII_RXD3_LSIO_GPIO6_IO09 IMX8QM_ENET0_RGMII_RXD3 3 +#define IMX8QM_ENET1_RGMII_TXC_CONN_ENET1_RGMII_TXC IMX8QM_ENET1_RGMII_TXC 0 +#define IMX8QM_ENET1_RGMII_TXC_CONN_ENET1_RCLK50M_OUT IMX8QM_ENET1_RGMII_TXC 1 +#define IMX8QM_ENET1_RGMII_TXC_CONN_ENET1_RCLK50M_IN IMX8QM_ENET1_RGMII_TXC 2 +#define IMX8QM_ENET1_RGMII_TXC_LSIO_GPIO6_IO10 IMX8QM_ENET1_RGMII_TXC 3 +#define IMX8QM_ENET1_RGMII_TX_CTL_CONN_ENET1_RGMII_TX_CTL IMX8QM_ENET1_RGMII_TX_CTL 0 +#define IMX8QM_ENET1_RGMII_TX_CTL_LSIO_GPIO6_IO11 IMX8QM_ENET1_RGMII_TX_CTL 3 +#define IMX8QM_ENET1_RGMII_TXD0_CONN_ENET1_RGMII_TXD0 IMX8QM_ENET1_RGMII_TXD0 0 +#define IMX8QM_ENET1_RGMII_TXD0_LSIO_GPIO6_IO12 IMX8QM_ENET1_RGMII_TXD0 3 +#define IMX8QM_ENET1_RGMII_TXD1_CONN_ENET1_RGMII_TXD1 IMX8QM_ENET1_RGMII_TXD1 0 +#define IMX8QM_ENET1_RGMII_TXD1_LSIO_GPIO6_IO13 IMX8QM_ENET1_RGMII_TXD1 3 +#define IMX8QM_ENET1_RGMII_TXD2_CONN_ENET1_RGMII_TXD2 IMX8QM_ENET1_RGMII_TXD2 0 +#define IMX8QM_ENET1_RGMII_TXD2_DMA_UART3_TX IMX8QM_ENET1_RGMII_TXD2 1 +#define IMX8QM_ENET1_RGMII_TXD2_VPU_TSI_S1_VID IMX8QM_ENET1_RGMII_TXD2 2 +#define IMX8QM_ENET1_RGMII_TXD2_LSIO_GPIO6_IO14 IMX8QM_ENET1_RGMII_TXD2 3 +#define IMX8QM_ENET1_RGMII_TXD3_CONN_ENET1_RGMII_TXD3 IMX8QM_ENET1_RGMII_TXD3 0 +#define IMX8QM_ENET1_RGMII_TXD3_DMA_UART3_RTS_B IMX8QM_ENET1_RGMII_TXD3 1 +#define IMX8QM_ENET1_RGMII_TXD3_VPU_TSI_S1_SYNC IMX8QM_ENET1_RGMII_TXD3 2 +#define IMX8QM_ENET1_RGMII_TXD3_LSIO_GPIO6_IO15 IMX8QM_ENET1_RGMII_TXD3 3 +#define IMX8QM_ENET1_RGMII_RXC_CONN_ENET1_RGMII_RXC IMX8QM_ENET1_RGMII_RXC 0 +#define IMX8QM_ENET1_RGMII_RXC_DMA_UART3_CTS_B IMX8QM_ENET1_RGMII_RXC 1 +#define IMX8QM_ENET1_RGMII_RXC_VPU_TSI_S1_DATA IMX8QM_ENET1_RGMII_RXC 2 +#define IMX8QM_ENET1_RGMII_RXC_LSIO_GPIO6_IO16 IMX8QM_ENET1_RGMII_RXC 3 +#define IMX8QM_ENET1_RGMII_RX_CTL_CONN_ENET1_RGMII_RX_CTL IMX8QM_ENET1_RGMII_RX_CTL 0 +#define IMX8QM_ENET1_RGMII_RX_CTL_VPU_TSI_S0_VID IMX8QM_ENET1_RGMII_RX_CTL 2 +#define IMX8QM_ENET1_RGMII_RX_CTL_LSIO_GPIO6_IO17 IMX8QM_ENET1_RGMII_RX_CTL 3 +#define IMX8QM_ENET1_RGMII_RXD0_CONN_ENET1_RGMII_RXD0 IMX8QM_ENET1_RGMII_RXD0 0 +#define IMX8QM_ENET1_RGMII_RXD0_VPU_TSI_S0_SYNC IMX8QM_ENET1_RGMII_RXD0 2 +#define IMX8QM_ENET1_RGMII_RXD0_LSIO_GPIO6_IO18 IMX8QM_ENET1_RGMII_RXD0 3 +#define IMX8QM_ENET1_RGMII_RXD1_CONN_ENET1_RGMII_RXD1 IMX8QM_ENET1_RGMII_RXD1 0 +#define IMX8QM_ENET1_RGMII_RXD1_VPU_TSI_S0_DATA IMX8QM_ENET1_RGMII_RXD1 2 +#define IMX8QM_ENET1_RGMII_RXD1_LSIO_GPIO6_IO19 IMX8QM_ENET1_RGMII_RXD1 3 +#define IMX8QM_ENET1_RGMII_RXD2_CONN_ENET1_RGMII_RXD2 IMX8QM_ENET1_RGMII_RXD2 0 +#define IMX8QM_ENET1_RGMII_RXD2_CONN_ENET1_RMII_RX_ER IMX8QM_ENET1_RGMII_RXD2 1 +#define IMX8QM_ENET1_RGMII_RXD2_VPU_TSI_S0_CLK IMX8QM_ENET1_RGMII_RXD2 2 +#define IMX8QM_ENET1_RGMII_RXD2_LSIO_GPIO6_IO20 IMX8QM_ENET1_RGMII_RXD2 3 +#define IMX8QM_ENET1_RGMII_RXD3_CONN_ENET1_RGMII_RXD3 IMX8QM_ENET1_RGMII_RXD3 0 +#define IMX8QM_ENET1_RGMII_RXD3_DMA_UART3_RX IMX8QM_ENET1_RGMII_RXD3 1 +#define IMX8QM_ENET1_RGMII_RXD3_VPU_TSI_S1_CLK IMX8QM_ENET1_RGMII_RXD3 2 +#define IMX8QM_ENET1_RGMII_RXD3_LSIO_GPIO6_IO21 IMX8QM_ENET1_RGMII_RXD3 3 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB_PAD IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB 0 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETA_PAD IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETA 0 + +#endif /* _IMX8QM_PADS_H */ diff --git a/include/dt-bindings/pinctrl/pads-imx8qxp.h b/include/dt-bindings/pinctrl/pads-imx8qxp.h new file mode 100644 index 0000000..fbfee7e --- /dev/null +++ b/include/dt-bindings/pinctrl/pads-imx8qxp.h @@ -0,0 +1,751 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017~2018 NXP + */ + +#ifndef _IMX8QXP_PADS_H +#define _IMX8QXP_PADS_H + +/* pin id */ +#define IMX8QXP_PCIE_CTRL0_PERST_B 0 +#define IMX8QXP_PCIE_CTRL0_CLKREQ_B 1 +#define IMX8QXP_PCIE_CTRL0_WAKE_B 2 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_PCIESEP 3 +#define IMX8QXP_USB_SS3_TC0 4 +#define IMX8QXP_USB_SS3_TC1 5 +#define IMX8QXP_USB_SS3_TC2 6 +#define IMX8QXP_USB_SS3_TC3 7 +#define IMX8QXP_COMP_CTL_GPIO_3V3_USB3IO 8 +#define IMX8QXP_EMMC0_CLK 9 +#define IMX8QXP_EMMC0_CMD 10 +#define IMX8QXP_EMMC0_DATA0 11 +#define IMX8QXP_EMMC0_DATA1 12 +#define IMX8QXP_EMMC0_DATA2 13 +#define IMX8QXP_EMMC0_DATA3 14 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_SD1FIX0 15 +#define IMX8QXP_EMMC0_DATA4 16 +#define IMX8QXP_EMMC0_DATA5 17 +#define IMX8QXP_EMMC0_DATA6 18 +#define IMX8QXP_EMMC0_DATA7 19 +#define IMX8QXP_EMMC0_STROBE 20 +#define IMX8QXP_EMMC0_RESET_B 21 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_SD1FIX1 22 +#define IMX8QXP_USDHC1_RESET_B 23 +#define IMX8QXP_USDHC1_VSELECT 24 +#define IMX8QXP_CTL_NAND_RE_P_N 25 +#define IMX8QXP_USDHC1_WP 26 +#define IMX8QXP_USDHC1_CD_B 27 +#define IMX8QXP_CTL_NAND_DQS_P_N 28 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_VSELSEP 29 +#define IMX8QXP_USDHC1_CLK 30 +#define IMX8QXP_USDHC1_CMD 31 +#define IMX8QXP_USDHC1_DATA0 32 +#define IMX8QXP_USDHC1_DATA1 33 +#define IMX8QXP_USDHC1_DATA2 34 +#define IMX8QXP_USDHC1_DATA3 35 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_VSEL3 36 +#define IMX8QXP_ENET0_RGMII_TXC 37 +#define IMX8QXP_ENET0_RGMII_TX_CTL 38 +#define IMX8QXP_ENET0_RGMII_TXD0 39 +#define IMX8QXP_ENET0_RGMII_TXD1 40 +#define IMX8QXP_ENET0_RGMII_TXD2 41 +#define IMX8QXP_ENET0_RGMII_TXD3 42 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0 43 +#define IMX8QXP_ENET0_RGMII_RXC 44 +#define IMX8QXP_ENET0_RGMII_RX_CTL 45 +#define IMX8QXP_ENET0_RGMII_RXD0 46 +#define IMX8QXP_ENET0_RGMII_RXD1 47 +#define IMX8QXP_ENET0_RGMII_RXD2 48 +#define IMX8QXP_ENET0_RGMII_RXD3 49 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1 50 +#define IMX8QXP_ENET0_REFCLK_125M_25M 51 +#define IMX8QXP_ENET0_MDIO 52 +#define IMX8QXP_ENET0_MDC 53 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIOCT 54 +#define IMX8QXP_ESAI0_FSR 55 +#define IMX8QXP_ESAI0_FST 56 +#define IMX8QXP_ESAI0_SCKR 57 +#define IMX8QXP_ESAI0_SCKT 58 +#define IMX8QXP_ESAI0_TX0 59 +#define IMX8QXP_ESAI0_TX1 60 +#define IMX8QXP_ESAI0_TX2_RX3 61 +#define IMX8QXP_ESAI0_TX3_RX2 62 +#define IMX8QXP_ESAI0_TX4_RX1 63 +#define IMX8QXP_ESAI0_TX5_RX0 64 +#define IMX8QXP_SPDIF0_RX 65 +#define IMX8QXP_SPDIF0_TX 66 +#define IMX8QXP_SPDIF0_EXT_CLK 67 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIORHB 68 +#define IMX8QXP_SPI3_SCK 69 +#define IMX8QXP_SPI3_SDO 70 +#define IMX8QXP_SPI3_SDI 71 +#define IMX8QXP_SPI3_CS0 72 +#define IMX8QXP_SPI3_CS1 73 +#define IMX8QXP_MCLK_IN1 74 +#define IMX8QXP_MCLK_IN0 75 +#define IMX8QXP_MCLK_OUT0 76 +#define IMX8QXP_UART1_TX 77 +#define IMX8QXP_UART1_RX 78 +#define IMX8QXP_UART1_RTS_B 79 +#define IMX8QXP_UART1_CTS_B 80 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIORHK 81 +#define IMX8QXP_SAI0_TXD 82 +#define IMX8QXP_SAI0_TXC 83 +#define IMX8QXP_SAI0_RXD 84 +#define IMX8QXP_SAI0_TXFS 85 +#define IMX8QXP_SAI1_RXD 86 +#define IMX8QXP_SAI1_RXC 87 +#define IMX8QXP_SAI1_RXFS 88 +#define IMX8QXP_SPI2_CS0 89 +#define IMX8QXP_SPI2_SDO 90 +#define IMX8QXP_SPI2_SDI 91 +#define IMX8QXP_SPI2_SCK 92 +#define IMX8QXP_SPI0_SCK 93 +#define IMX8QXP_SPI0_SDI 94 +#define IMX8QXP_SPI0_SDO 95 +#define IMX8QXP_SPI0_CS1 96 +#define IMX8QXP_SPI0_CS0 97 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIORHT 98 +#define IMX8QXP_ADC_IN1 99 +#define IMX8QXP_ADC_IN0 100 +#define IMX8QXP_ADC_IN3 101 +#define IMX8QXP_ADC_IN2 102 +#define IMX8QXP_ADC_IN5 103 +#define IMX8QXP_ADC_IN4 104 +#define IMX8QXP_FLEXCAN0_RX 105 +#define IMX8QXP_FLEXCAN0_TX 106 +#define IMX8QXP_FLEXCAN1_RX 107 +#define IMX8QXP_FLEXCAN1_TX 108 +#define IMX8QXP_FLEXCAN2_RX 109 +#define IMX8QXP_FLEXCAN2_TX 110 +#define IMX8QXP_UART0_RX 111 +#define IMX8QXP_UART0_TX 112 +#define IMX8QXP_UART2_TX 113 +#define IMX8QXP_UART2_RX 114 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIOLH 115 +#define IMX8QXP_MIPI_DSI0_I2C0_SCL 116 +#define IMX8QXP_MIPI_DSI0_I2C0_SDA 117 +#define IMX8QXP_MIPI_DSI0_GPIO0_00 118 +#define IMX8QXP_MIPI_DSI0_GPIO0_01 119 +#define IMX8QXP_MIPI_DSI1_I2C0_SCL 120 +#define IMX8QXP_MIPI_DSI1_I2C0_SDA 121 +#define IMX8QXP_MIPI_DSI1_GPIO0_00 122 +#define IMX8QXP_MIPI_DSI1_GPIO0_01 123 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_MIPIDSIGPIO 124 +#define IMX8QXP_JTAG_TRST_B 125 +#define IMX8QXP_PMIC_I2C_SCL 126 +#define IMX8QXP_PMIC_I2C_SDA 127 +#define IMX8QXP_PMIC_INT_B 128 +#define IMX8QXP_SCU_GPIO0_00 129 +#define IMX8QXP_SCU_GPIO0_01 130 +#define IMX8QXP_SCU_PMIC_STANDBY 131 +#define IMX8QXP_SCU_BOOT_MODE0 132 +#define IMX8QXP_SCU_BOOT_MODE1 133 +#define IMX8QXP_SCU_BOOT_MODE2 134 +#define IMX8QXP_SCU_BOOT_MODE3 135 +#define IMX8QXP_CSI_D00 136 +#define IMX8QXP_CSI_D01 137 +#define IMX8QXP_CSI_D02 138 +#define IMX8QXP_CSI_D03 139 +#define IMX8QXP_CSI_D04 140 +#define IMX8QXP_CSI_D05 141 +#define IMX8QXP_CSI_D06 142 +#define IMX8QXP_CSI_D07 143 +#define IMX8QXP_CSI_HSYNC 144 +#define IMX8QXP_CSI_VSYNC 145 +#define IMX8QXP_CSI_PCLK 146 +#define IMX8QXP_CSI_MCLK 147 +#define IMX8QXP_CSI_EN 148 +#define IMX8QXP_CSI_RESET 149 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIORHD 150 +#define IMX8QXP_MIPI_CSI0_MCLK_OUT 151 +#define IMX8QXP_MIPI_CSI0_I2C0_SCL 152 +#define IMX8QXP_MIPI_CSI0_I2C0_SDA 153 +#define IMX8QXP_MIPI_CSI0_GPIO0_01 154 +#define IMX8QXP_MIPI_CSI0_GPIO0_00 155 +#define IMX8QXP_QSPI0A_DATA0 156 +#define IMX8QXP_QSPI0A_DATA1 157 +#define IMX8QXP_QSPI0A_DATA2 158 +#define IMX8QXP_QSPI0A_DATA3 159 +#define IMX8QXP_QSPI0A_DQS 160 +#define IMX8QXP_QSPI0A_SS0_B 161 +#define IMX8QXP_QSPI0A_SS1_B 162 +#define IMX8QXP_QSPI0A_SCLK 163 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_QSPI0A 164 +#define IMX8QXP_QSPI0B_SCLK 165 +#define IMX8QXP_QSPI0B_DATA0 166 +#define IMX8QXP_QSPI0B_DATA1 167 +#define IMX8QXP_QSPI0B_DATA2 168 +#define IMX8QXP_QSPI0B_DATA3 169 +#define IMX8QXP_QSPI0B_DQS 170 +#define IMX8QXP_QSPI0B_SS0_B 171 +#define IMX8QXP_QSPI0B_SS1_B 172 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_QSPI0B 173 + +/* + * format: + */ +#define IMX8QXP_PCIE_CTRL0_PERST_B_HSIO_PCIE0_PERST_B IMX8QXP_PCIE_CTRL0_PERST_B 0 +#define IMX8QXP_PCIE_CTRL0_PERST_B_LSIO_GPIO4_IO00 IMX8QXP_PCIE_CTRL0_PERST_B 4 +#define IMX8QXP_PCIE_CTRL0_CLKREQ_B_HSIO_PCIE0_CLKREQ_B IMX8QXP_PCIE_CTRL0_CLKREQ_B 0 +#define IMX8QXP_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO4_IO01 IMX8QXP_PCIE_CTRL0_CLKREQ_B 4 +#define IMX8QXP_PCIE_CTRL0_WAKE_B_HSIO_PCIE0_WAKE_B IMX8QXP_PCIE_CTRL0_WAKE_B 0 +#define IMX8QXP_PCIE_CTRL0_WAKE_B_LSIO_GPIO4_IO02 IMX8QXP_PCIE_CTRL0_WAKE_B 4 +#define IMX8QXP_USB_SS3_TC0_ADMA_I2C1_SCL IMX8QXP_USB_SS3_TC0 0 +#define IMX8QXP_USB_SS3_TC0_CONN_USB_OTG1_PWR IMX8QXP_USB_SS3_TC0 1 +#define IMX8QXP_USB_SS3_TC0_CONN_USB_OTG2_PWR IMX8QXP_USB_SS3_TC0 2 +#define IMX8QXP_USB_SS3_TC0_LSIO_GPIO4_IO03 IMX8QXP_USB_SS3_TC0 4 +#define IMX8QXP_USB_SS3_TC1_ADMA_I2C1_SCL IMX8QXP_USB_SS3_TC1 0 +#define IMX8QXP_USB_SS3_TC1_CONN_USB_OTG2_PWR IMX8QXP_USB_SS3_TC1 1 +#define IMX8QXP_USB_SS3_TC1_LSIO_GPIO4_IO04 IMX8QXP_USB_SS3_TC1 4 +#define IMX8QXP_USB_SS3_TC2_ADMA_I2C1_SDA IMX8QXP_USB_SS3_TC2 0 +#define IMX8QXP_USB_SS3_TC2_CONN_USB_OTG1_OC IMX8QXP_USB_SS3_TC2 1 +#define IMX8QXP_USB_SS3_TC2_CONN_USB_OTG2_OC IMX8QXP_USB_SS3_TC2 2 +#define IMX8QXP_USB_SS3_TC2_LSIO_GPIO4_IO05 IMX8QXP_USB_SS3_TC2 4 +#define IMX8QXP_USB_SS3_TC3_ADMA_I2C1_SDA IMX8QXP_USB_SS3_TC3 0 +#define IMX8QXP_USB_SS3_TC3_CONN_USB_OTG2_OC IMX8QXP_USB_SS3_TC3 1 +#define IMX8QXP_USB_SS3_TC3_LSIO_GPIO4_IO06 IMX8QXP_USB_SS3_TC3 4 +#define IMX8QXP_EMMC0_CLK_CONN_EMMC0_CLK IMX8QXP_EMMC0_CLK 0 +#define IMX8QXP_EMMC0_CLK_CONN_NAND_READY_B IMX8QXP_EMMC0_CLK 1 +#define IMX8QXP_EMMC0_CLK_LSIO_GPIO4_IO07 IMX8QXP_EMMC0_CLK 4 +#define IMX8QXP_EMMC0_CMD_CONN_EMMC0_CMD IMX8QXP_EMMC0_CMD 0 +#define IMX8QXP_EMMC0_CMD_CONN_NAND_DQS IMX8QXP_EMMC0_CMD 1 +#define IMX8QXP_EMMC0_CMD_LSIO_GPIO4_IO08 IMX8QXP_EMMC0_CMD 4 +#define IMX8QXP_EMMC0_DATA0_CONN_EMMC0_DATA0 IMX8QXP_EMMC0_DATA0 0 +#define IMX8QXP_EMMC0_DATA0_CONN_NAND_DATA00 IMX8QXP_EMMC0_DATA0 1 +#define IMX8QXP_EMMC0_DATA0_LSIO_GPIO4_IO09 IMX8QXP_EMMC0_DATA0 4 +#define IMX8QXP_EMMC0_DATA1_CONN_EMMC0_DATA1 IMX8QXP_EMMC0_DATA1 0 +#define IMX8QXP_EMMC0_DATA1_CONN_NAND_DATA01 IMX8QXP_EMMC0_DATA1 1 +#define IMX8QXP_EMMC0_DATA1_LSIO_GPIO4_IO10 IMX8QXP_EMMC0_DATA1 4 +#define IMX8QXP_EMMC0_DATA2_CONN_EMMC0_DATA2 IMX8QXP_EMMC0_DATA2 0 +#define IMX8QXP_EMMC0_DATA2_CONN_NAND_DATA02 IMX8QXP_EMMC0_DATA2 1 +#define IMX8QXP_EMMC0_DATA2_LSIO_GPIO4_IO11 IMX8QXP_EMMC0_DATA2 4 +#define IMX8QXP_EMMC0_DATA3_CONN_EMMC0_DATA3 IMX8QXP_EMMC0_DATA3 0 +#define IMX8QXP_EMMC0_DATA3_CONN_NAND_DATA03 IMX8QXP_EMMC0_DATA3 1 +#define IMX8QXP_EMMC0_DATA3_LSIO_GPIO4_IO12 IMX8QXP_EMMC0_DATA3 4 +#define IMX8QXP_EMMC0_DATA4_CONN_EMMC0_DATA4 IMX8QXP_EMMC0_DATA4 0 +#define IMX8QXP_EMMC0_DATA4_CONN_NAND_DATA04 IMX8QXP_EMMC0_DATA4 1 +#define IMX8QXP_EMMC0_DATA4_CONN_EMMC0_WP IMX8QXP_EMMC0_DATA4 3 +#define IMX8QXP_EMMC0_DATA4_LSIO_GPIO4_IO13 IMX8QXP_EMMC0_DATA4 4 +#define IMX8QXP_EMMC0_DATA5_CONN_EMMC0_DATA5 IMX8QXP_EMMC0_DATA5 0 +#define IMX8QXP_EMMC0_DATA5_CONN_NAND_DATA05 IMX8QXP_EMMC0_DATA5 1 +#define IMX8QXP_EMMC0_DATA5_CONN_EMMC0_VSELECT IMX8QXP_EMMC0_DATA5 3 +#define IMX8QXP_EMMC0_DATA5_LSIO_GPIO4_IO14 IMX8QXP_EMMC0_DATA5 4 +#define IMX8QXP_EMMC0_DATA6_CONN_EMMC0_DATA6 IMX8QXP_EMMC0_DATA6 0 +#define IMX8QXP_EMMC0_DATA6_CONN_NAND_DATA06 IMX8QXP_EMMC0_DATA6 1 +#define IMX8QXP_EMMC0_DATA6_CONN_MLB_CLK IMX8QXP_EMMC0_DATA6 3 +#define IMX8QXP_EMMC0_DATA6_LSIO_GPIO4_IO15 IMX8QXP_EMMC0_DATA6 4 +#define IMX8QXP_EMMC0_DATA7_CONN_EMMC0_DATA7 IMX8QXP_EMMC0_DATA7 0 +#define IMX8QXP_EMMC0_DATA7_CONN_NAND_DATA07 IMX8QXP_EMMC0_DATA7 1 +#define IMX8QXP_EMMC0_DATA7_CONN_MLB_SIG IMX8QXP_EMMC0_DATA7 3 +#define IMX8QXP_EMMC0_DATA7_LSIO_GPIO4_IO16 IMX8QXP_EMMC0_DATA7 4 +#define IMX8QXP_EMMC0_STROBE_CONN_EMMC0_STROBE IMX8QXP_EMMC0_STROBE 0 +#define IMX8QXP_EMMC0_STROBE_CONN_NAND_CLE IMX8QXP_EMMC0_STROBE 1 +#define IMX8QXP_EMMC0_STROBE_CONN_MLB_DATA IMX8QXP_EMMC0_STROBE 3 +#define IMX8QXP_EMMC0_STROBE_LSIO_GPIO4_IO17 IMX8QXP_EMMC0_STROBE 4 +#define IMX8QXP_EMMC0_RESET_B_CONN_EMMC0_RESET_B IMX8QXP_EMMC0_RESET_B 0 +#define IMX8QXP_EMMC0_RESET_B_CONN_NAND_WP_B IMX8QXP_EMMC0_RESET_B 1 +#define IMX8QXP_EMMC0_RESET_B_LSIO_GPIO4_IO18 IMX8QXP_EMMC0_RESET_B 4 +#define IMX8QXP_USDHC1_RESET_B_CONN_USDHC1_RESET_B IMX8QXP_USDHC1_RESET_B 0 +#define IMX8QXP_USDHC1_RESET_B_CONN_NAND_RE_N IMX8QXP_USDHC1_RESET_B 1 +#define IMX8QXP_USDHC1_RESET_B_ADMA_SPI2_SCK IMX8QXP_USDHC1_RESET_B 2 +#define IMX8QXP_USDHC1_RESET_B_LSIO_GPIO4_IO19 IMX8QXP_USDHC1_RESET_B 4 +#define IMX8QXP_USDHC1_VSELECT_CONN_USDHC1_VSELECT IMX8QXP_USDHC1_VSELECT 0 +#define IMX8QXP_USDHC1_VSELECT_CONN_NAND_RE_P IMX8QXP_USDHC1_VSELECT 1 +#define IMX8QXP_USDHC1_VSELECT_ADMA_SPI2_SDO IMX8QXP_USDHC1_VSELECT 2 +#define IMX8QXP_USDHC1_VSELECT_CONN_NAND_RE_B IMX8QXP_USDHC1_VSELECT 3 +#define IMX8QXP_USDHC1_VSELECT_LSIO_GPIO4_IO20 IMX8QXP_USDHC1_VSELECT 4 +#define IMX8QXP_USDHC1_WP_CONN_USDHC1_WP IMX8QXP_USDHC1_WP 0 +#define IMX8QXP_USDHC1_WP_CONN_NAND_DQS_N IMX8QXP_USDHC1_WP 1 +#define IMX8QXP_USDHC1_WP_ADMA_SPI2_SDI IMX8QXP_USDHC1_WP 2 +#define IMX8QXP_USDHC1_WP_LSIO_GPIO4_IO21 IMX8QXP_USDHC1_WP 4 +#define IMX8QXP_USDHC1_CD_B_CONN_USDHC1_CD_B IMX8QXP_USDHC1_CD_B 0 +#define IMX8QXP_USDHC1_CD_B_CONN_NAND_DQS_P IMX8QXP_USDHC1_CD_B 1 +#define IMX8QXP_USDHC1_CD_B_ADMA_SPI2_CS0 IMX8QXP_USDHC1_CD_B 2 +#define IMX8QXP_USDHC1_CD_B_CONN_NAND_DQS IMX8QXP_USDHC1_CD_B 3 +#define IMX8QXP_USDHC1_CD_B_LSIO_GPIO4_IO22 IMX8QXP_USDHC1_CD_B 4 +#define IMX8QXP_USDHC1_CLK_CONN_USDHC1_CLK IMX8QXP_USDHC1_CLK 0 +#define IMX8QXP_USDHC1_CLK_ADMA_UART3_RX IMX8QXP_USDHC1_CLK 2 +#define IMX8QXP_USDHC1_CLK_LSIO_GPIO4_IO23 IMX8QXP_USDHC1_CLK 4 +#define IMX8QXP_USDHC1_CMD_CONN_USDHC1_CMD IMX8QXP_USDHC1_CMD 0 +#define IMX8QXP_USDHC1_CMD_CONN_NAND_CE0_B IMX8QXP_USDHC1_CMD 1 +#define IMX8QXP_USDHC1_CMD_ADMA_MQS_R IMX8QXP_USDHC1_CMD 2 +#define IMX8QXP_USDHC1_CMD_LSIO_GPIO4_IO24 IMX8QXP_USDHC1_CMD 4 +#define IMX8QXP_USDHC1_DATA0_CONN_USDHC1_DATA0 IMX8QXP_USDHC1_DATA0 0 +#define IMX8QXP_USDHC1_DATA0_CONN_NAND_CE1_B IMX8QXP_USDHC1_DATA0 1 +#define IMX8QXP_USDHC1_DATA0_ADMA_MQS_L IMX8QXP_USDHC1_DATA0 2 +#define IMX8QXP_USDHC1_DATA0_LSIO_GPIO4_IO25 IMX8QXP_USDHC1_DATA0 4 +#define IMX8QXP_USDHC1_DATA1_CONN_USDHC1_DATA1 IMX8QXP_USDHC1_DATA1 0 +#define IMX8QXP_USDHC1_DATA1_CONN_NAND_RE_B IMX8QXP_USDHC1_DATA1 1 +#define IMX8QXP_USDHC1_DATA1_ADMA_UART3_TX IMX8QXP_USDHC1_DATA1 2 +#define IMX8QXP_USDHC1_DATA1_LSIO_GPIO4_IO26 IMX8QXP_USDHC1_DATA1 4 +#define IMX8QXP_USDHC1_DATA2_CONN_USDHC1_DATA2 IMX8QXP_USDHC1_DATA2 0 +#define IMX8QXP_USDHC1_DATA2_CONN_NAND_WE_B IMX8QXP_USDHC1_DATA2 1 +#define IMX8QXP_USDHC1_DATA2_ADMA_UART3_CTS_B IMX8QXP_USDHC1_DATA2 2 +#define IMX8QXP_USDHC1_DATA2_LSIO_GPIO4_IO27 IMX8QXP_USDHC1_DATA2 4 +#define IMX8QXP_USDHC1_DATA3_CONN_USDHC1_DATA3 IMX8QXP_USDHC1_DATA3 0 +#define IMX8QXP_USDHC1_DATA3_CONN_NAND_ALE IMX8QXP_USDHC1_DATA3 1 +#define IMX8QXP_USDHC1_DATA3_ADMA_UART3_RTS_B IMX8QXP_USDHC1_DATA3 2 +#define IMX8QXP_USDHC1_DATA3_LSIO_GPIO4_IO28 IMX8QXP_USDHC1_DATA3 4 +#define IMX8QXP_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC IMX8QXP_ENET0_RGMII_TXC 0 +#define IMX8QXP_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_OUT IMX8QXP_ENET0_RGMII_TXC 1 +#define IMX8QXP_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_IN IMX8QXP_ENET0_RGMII_TXC 2 +#define IMX8QXP_ENET0_RGMII_TXC_CONN_NAND_CE1_B IMX8QXP_ENET0_RGMII_TXC 3 +#define IMX8QXP_ENET0_RGMII_TXC_LSIO_GPIO4_IO29 IMX8QXP_ENET0_RGMII_TXC 4 +#define IMX8QXP_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL IMX8QXP_ENET0_RGMII_TX_CTL 0 +#define IMX8QXP_ENET0_RGMII_TX_CTL_CONN_USDHC1_RESET_B IMX8QXP_ENET0_RGMII_TX_CTL 3 +#define IMX8QXP_ENET0_RGMII_TX_CTL_LSIO_GPIO4_IO30 IMX8QXP_ENET0_RGMII_TX_CTL 4 +#define IMX8QXP_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 IMX8QXP_ENET0_RGMII_TXD0 0 +#define IMX8QXP_ENET0_RGMII_TXD0_CONN_USDHC1_VSELECT IMX8QXP_ENET0_RGMII_TXD0 3 +#define IMX8QXP_ENET0_RGMII_TXD0_LSIO_GPIO4_IO31 IMX8QXP_ENET0_RGMII_TXD0 4 +#define IMX8QXP_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 IMX8QXP_ENET0_RGMII_TXD1 0 +#define IMX8QXP_ENET0_RGMII_TXD1_CONN_USDHC1_WP IMX8QXP_ENET0_RGMII_TXD1 3 +#define IMX8QXP_ENET0_RGMII_TXD1_LSIO_GPIO5_IO00 IMX8QXP_ENET0_RGMII_TXD1 4 +#define IMX8QXP_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 IMX8QXP_ENET0_RGMII_TXD2 0 +#define IMX8QXP_ENET0_RGMII_TXD2_CONN_MLB_CLK IMX8QXP_ENET0_RGMII_TXD2 1 +#define IMX8QXP_ENET0_RGMII_TXD2_CONN_NAND_CE0_B IMX8QXP_ENET0_RGMII_TXD2 2 +#define IMX8QXP_ENET0_RGMII_TXD2_CONN_USDHC1_CD_B IMX8QXP_ENET0_RGMII_TXD2 3 +#define IMX8QXP_ENET0_RGMII_TXD2_LSIO_GPIO5_IO01 IMX8QXP_ENET0_RGMII_TXD2 4 +#define IMX8QXP_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 IMX8QXP_ENET0_RGMII_TXD3 0 +#define IMX8QXP_ENET0_RGMII_TXD3_CONN_MLB_SIG IMX8QXP_ENET0_RGMII_TXD3 1 +#define IMX8QXP_ENET0_RGMII_TXD3_CONN_NAND_RE_B IMX8QXP_ENET0_RGMII_TXD3 2 +#define IMX8QXP_ENET0_RGMII_TXD3_LSIO_GPIO5_IO02 IMX8QXP_ENET0_RGMII_TXD3 4 +#define IMX8QXP_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC IMX8QXP_ENET0_RGMII_RXC 0 +#define IMX8QXP_ENET0_RGMII_RXC_CONN_MLB_DATA IMX8QXP_ENET0_RGMII_RXC 1 +#define IMX8QXP_ENET0_RGMII_RXC_CONN_NAND_WE_B IMX8QXP_ENET0_RGMII_RXC 2 +#define IMX8QXP_ENET0_RGMII_RXC_CONN_USDHC1_CLK IMX8QXP_ENET0_RGMII_RXC 3 +#define IMX8QXP_ENET0_RGMII_RXC_LSIO_GPIO5_IO03 IMX8QXP_ENET0_RGMII_RXC 4 +#define IMX8QXP_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL IMX8QXP_ENET0_RGMII_RX_CTL 0 +#define IMX8QXP_ENET0_RGMII_RX_CTL_CONN_USDHC1_CMD IMX8QXP_ENET0_RGMII_RX_CTL 3 +#define IMX8QXP_ENET0_RGMII_RX_CTL_LSIO_GPIO5_IO04 IMX8QXP_ENET0_RGMII_RX_CTL 4 +#define IMX8QXP_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 IMX8QXP_ENET0_RGMII_RXD0 0 +#define IMX8QXP_ENET0_RGMII_RXD0_CONN_USDHC1_DATA0 IMX8QXP_ENET0_RGMII_RXD0 3 +#define IMX8QXP_ENET0_RGMII_RXD0_LSIO_GPIO5_IO05 IMX8QXP_ENET0_RGMII_RXD0 4 +#define IMX8QXP_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 IMX8QXP_ENET0_RGMII_RXD1 0 +#define IMX8QXP_ENET0_RGMII_RXD1_CONN_USDHC1_DATA1 IMX8QXP_ENET0_RGMII_RXD1 3 +#define IMX8QXP_ENET0_RGMII_RXD1_LSIO_GPIO5_IO06 IMX8QXP_ENET0_RGMII_RXD1 4 +#define IMX8QXP_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 IMX8QXP_ENET0_RGMII_RXD2 0 +#define IMX8QXP_ENET0_RGMII_RXD2_CONN_ENET0_RMII_RX_ER IMX8QXP_ENET0_RGMII_RXD2 1 +#define IMX8QXP_ENET0_RGMII_RXD2_CONN_USDHC1_DATA2 IMX8QXP_ENET0_RGMII_RXD2 3 +#define IMX8QXP_ENET0_RGMII_RXD2_LSIO_GPIO5_IO07 IMX8QXP_ENET0_RGMII_RXD2 4 +#define IMX8QXP_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 IMX8QXP_ENET0_RGMII_RXD3 0 +#define IMX8QXP_ENET0_RGMII_RXD3_CONN_NAND_ALE IMX8QXP_ENET0_RGMII_RXD3 2 +#define IMX8QXP_ENET0_RGMII_RXD3_CONN_USDHC1_DATA3 IMX8QXP_ENET0_RGMII_RXD3 3 +#define IMX8QXP_ENET0_RGMII_RXD3_LSIO_GPIO5_IO08 IMX8QXP_ENET0_RGMII_RXD3 4 +#define IMX8QXP_ENET0_REFCLK_125M_25M_CONN_ENET0_REFCLK_125M_25M IMX8QXP_ENET0_REFCLK_125M_25M 0 +#define IMX8QXP_ENET0_REFCLK_125M_25M_CONN_ENET0_PPS IMX8QXP_ENET0_REFCLK_125M_25M 1 +#define IMX8QXP_ENET0_REFCLK_125M_25M_CONN_ENET1_PPS IMX8QXP_ENET0_REFCLK_125M_25M 2 +#define IMX8QXP_ENET0_REFCLK_125M_25M_LSIO_GPIO5_IO09 IMX8QXP_ENET0_REFCLK_125M_25M 4 +#define IMX8QXP_ENET0_MDIO_CONN_ENET0_MDIO IMX8QXP_ENET0_MDIO 0 +#define IMX8QXP_ENET0_MDIO_ADMA_I2C3_SDA IMX8QXP_ENET0_MDIO 1 +#define IMX8QXP_ENET0_MDIO_CONN_ENET1_MDIO IMX8QXP_ENET0_MDIO 2 +#define IMX8QXP_ENET0_MDIO_LSIO_GPIO5_IO10 IMX8QXP_ENET0_MDIO 4 +#define IMX8QXP_ENET0_MDC_CONN_ENET0_MDC IMX8QXP_ENET0_MDC 0 +#define IMX8QXP_ENET0_MDC_ADMA_I2C3_SCL IMX8QXP_ENET0_MDC 1 +#define IMX8QXP_ENET0_MDC_CONN_ENET1_MDC IMX8QXP_ENET0_MDC 2 +#define IMX8QXP_ENET0_MDC_LSIO_GPIO5_IO11 IMX8QXP_ENET0_MDC 4 +#define IMX8QXP_ESAI0_FSR_ADMA_ESAI0_FSR IMX8QXP_ESAI0_FSR 0 +#define IMX8QXP_ESAI0_FSR_CONN_ENET1_RCLK50M_OUT IMX8QXP_ESAI0_FSR 1 +#define IMX8QXP_ESAI0_FSR_ADMA_LCDIF_D00 IMX8QXP_ESAI0_FSR 2 +#define IMX8QXP_ESAI0_FSR_CONN_ENET1_RGMII_TXC IMX8QXP_ESAI0_FSR 3 +#define IMX8QXP_ESAI0_FSR_CONN_ENET1_RCLK50M_IN IMX8QXP_ESAI0_FSR 4 +#define IMX8QXP_ESAI0_FST_ADMA_ESAI0_FST IMX8QXP_ESAI0_FST 0 +#define IMX8QXP_ESAI0_FST_CONN_MLB_CLK IMX8QXP_ESAI0_FST 1 +#define IMX8QXP_ESAI0_FST_ADMA_LCDIF_D01 IMX8QXP_ESAI0_FST 2 +#define IMX8QXP_ESAI0_FST_CONN_ENET1_RGMII_TXD2 IMX8QXP_ESAI0_FST 3 +#define IMX8QXP_ESAI0_FST_LSIO_GPIO0_IO01 IMX8QXP_ESAI0_FST 4 +#define IMX8QXP_ESAI0_SCKR_ADMA_ESAI0_SCKR IMX8QXP_ESAI0_SCKR 0 +#define IMX8QXP_ESAI0_SCKR_ADMA_LCDIF_D02 IMX8QXP_ESAI0_SCKR 2 +#define IMX8QXP_ESAI0_SCKR_CONN_ENET1_RGMII_TX_CTL IMX8QXP_ESAI0_SCKR 3 +#define IMX8QXP_ESAI0_SCKR_LSIO_GPIO0_IO02 IMX8QXP_ESAI0_SCKR 4 +#define IMX8QXP_ESAI0_SCKT_ADMA_ESAI0_SCKT IMX8QXP_ESAI0_SCKT 0 +#define IMX8QXP_ESAI0_SCKT_CONN_MLB_SIG IMX8QXP_ESAI0_SCKT 1 +#define IMX8QXP_ESAI0_SCKT_ADMA_LCDIF_D03 IMX8QXP_ESAI0_SCKT 2 +#define IMX8QXP_ESAI0_SCKT_CONN_ENET1_RGMII_TXD3 IMX8QXP_ESAI0_SCKT 3 +#define IMX8QXP_ESAI0_SCKT_LSIO_GPIO0_IO03 IMX8QXP_ESAI0_SCKT 4 +#define IMX8QXP_ESAI0_TX0_ADMA_ESAI0_TX0 IMX8QXP_ESAI0_TX0 0 +#define IMX8QXP_ESAI0_TX0_CONN_MLB_DATA IMX8QXP_ESAI0_TX0 1 +#define IMX8QXP_ESAI0_TX0_ADMA_LCDIF_D04 IMX8QXP_ESAI0_TX0 2 +#define IMX8QXP_ESAI0_TX0_CONN_ENET1_RGMII_RXC IMX8QXP_ESAI0_TX0 3 +#define IMX8QXP_ESAI0_TX0_LSIO_GPIO0_IO04 IMX8QXP_ESAI0_TX0 4 +#define IMX8QXP_ESAI0_TX1_ADMA_ESAI0_TX1 IMX8QXP_ESAI0_TX1 0 +#define IMX8QXP_ESAI0_TX1_ADMA_LCDIF_D05 IMX8QXP_ESAI0_TX1 2 +#define IMX8QXP_ESAI0_TX1_CONN_ENET1_RGMII_RXD3 IMX8QXP_ESAI0_TX1 3 +#define IMX8QXP_ESAI0_TX1_LSIO_GPIO0_IO05 IMX8QXP_ESAI0_TX1 4 +#define IMX8QXP_ESAI0_TX2_RX3_ADMA_ESAI0_TX2_RX3 IMX8QXP_ESAI0_TX2_RX3 0 +#define IMX8QXP_ESAI0_TX2_RX3_CONN_ENET1_RMII_RX_ER IMX8QXP_ESAI0_TX2_RX3 1 +#define IMX8QXP_ESAI0_TX2_RX3_ADMA_LCDIF_D06 IMX8QXP_ESAI0_TX2_RX3 2 +#define IMX8QXP_ESAI0_TX2_RX3_CONN_ENET1_RGMII_RXD2 IMX8QXP_ESAI0_TX2_RX3 3 +#define IMX8QXP_ESAI0_TX2_RX3_LSIO_GPIO0_IO06 IMX8QXP_ESAI0_TX2_RX3 4 +#define IMX8QXP_ESAI0_TX3_RX2_ADMA_ESAI0_TX3_RX2 IMX8QXP_ESAI0_TX3_RX2 0 +#define IMX8QXP_ESAI0_TX3_RX2_ADMA_LCDIF_D07 IMX8QXP_ESAI0_TX3_RX2 2 +#define IMX8QXP_ESAI0_TX3_RX2_CONN_ENET1_RGMII_RXD1 IMX8QXP_ESAI0_TX3_RX2 3 +#define IMX8QXP_ESAI0_TX3_RX2_LSIO_GPIO0_IO07 IMX8QXP_ESAI0_TX3_RX2 4 +#define IMX8QXP_ESAI0_TX4_RX1_ADMA_ESAI0_TX4_RX1 IMX8QXP_ESAI0_TX4_RX1 0 +#define IMX8QXP_ESAI0_TX4_RX1_ADMA_LCDIF_D08 IMX8QXP_ESAI0_TX4_RX1 2 +#define IMX8QXP_ESAI0_TX4_RX1_CONN_ENET1_RGMII_TXD0 IMX8QXP_ESAI0_TX4_RX1 3 +#define IMX8QXP_ESAI0_TX4_RX1_LSIO_GPIO0_IO08 IMX8QXP_ESAI0_TX4_RX1 4 +#define IMX8QXP_ESAI0_TX5_RX0_ADMA_ESAI0_TX5_RX0 IMX8QXP_ESAI0_TX5_RX0 0 +#define IMX8QXP_ESAI0_TX5_RX0_ADMA_LCDIF_D09 IMX8QXP_ESAI0_TX5_RX0 2 +#define IMX8QXP_ESAI0_TX5_RX0_CONN_ENET1_RGMII_TXD1 IMX8QXP_ESAI0_TX5_RX0 3 +#define IMX8QXP_ESAI0_TX5_RX0_LSIO_GPIO0_IO09 IMX8QXP_ESAI0_TX5_RX0 4 +#define IMX8QXP_SPDIF0_RX_ADMA_SPDIF0_RX IMX8QXP_SPDIF0_RX 0 +#define IMX8QXP_SPDIF0_RX_ADMA_MQS_R IMX8QXP_SPDIF0_RX 1 +#define IMX8QXP_SPDIF0_RX_ADMA_LCDIF_D10 IMX8QXP_SPDIF0_RX 2 +#define IMX8QXP_SPDIF0_RX_CONN_ENET1_RGMII_RXD0 IMX8QXP_SPDIF0_RX 3 +#define IMX8QXP_SPDIF0_RX_LSIO_GPIO0_IO10 IMX8QXP_SPDIF0_RX 4 +#define IMX8QXP_SPDIF0_TX_ADMA_SPDIF0_TX IMX8QXP_SPDIF0_TX 0 +#define IMX8QXP_SPDIF0_TX_ADMA_MQS_L IMX8QXP_SPDIF0_TX 1 +#define IMX8QXP_SPDIF0_TX_ADMA_LCDIF_D11 IMX8QXP_SPDIF0_TX 2 +#define IMX8QXP_SPDIF0_TX_CONN_ENET1_RGMII_RX_CTL IMX8QXP_SPDIF0_TX 3 +#define IMX8QXP_SPDIF0_TX_LSIO_GPIO0_IO11 IMX8QXP_SPDIF0_TX 4 +#define IMX8QXP_SPDIF0_EXT_CLK_ADMA_SPDIF0_EXT_CLK IMX8QXP_SPDIF0_EXT_CLK 0 +#define IMX8QXP_SPDIF0_EXT_CLK_ADMA_LCDIF_D12 IMX8QXP_SPDIF0_EXT_CLK 2 +#define IMX8QXP_SPDIF0_EXT_CLK_CONN_ENET1_REFCLK_125M_25M IMX8QXP_SPDIF0_EXT_CLK 3 +#define IMX8QXP_SPDIF0_EXT_CLK_LSIO_GPIO0_IO12 IMX8QXP_SPDIF0_EXT_CLK 4 +#define IMX8QXP_SPI3_SCK_ADMA_SPI3_SCK IMX8QXP_SPI3_SCK 0 +#define IMX8QXP_SPI3_SCK_ADMA_LCDIF_D13 IMX8QXP_SPI3_SCK 2 +#define IMX8QXP_SPI3_SCK_LSIO_GPIO0_IO13 IMX8QXP_SPI3_SCK 4 +#define IMX8QXP_SPI3_SDO_ADMA_SPI3_SDO IMX8QXP_SPI3_SDO 0 +#define IMX8QXP_SPI3_SDO_ADMA_LCDIF_D14 IMX8QXP_SPI3_SDO 2 +#define IMX8QXP_SPI3_SDO_LSIO_GPIO0_IO14 IMX8QXP_SPI3_SDO 4 +#define IMX8QXP_SPI3_SDI_ADMA_SPI3_SDI IMX8QXP_SPI3_SDI 0 +#define IMX8QXP_SPI3_SDI_ADMA_LCDIF_D15 IMX8QXP_SPI3_SDI 2 +#define IMX8QXP_SPI3_SDI_LSIO_GPIO0_IO15 IMX8QXP_SPI3_SDI 4 +#define IMX8QXP_SPI3_CS0_ADMA_SPI3_CS0 IMX8QXP_SPI3_CS0 0 +#define IMX8QXP_SPI3_CS0_ADMA_ACM_MCLK_OUT1 IMX8QXP_SPI3_CS0 1 +#define IMX8QXP_SPI3_CS0_ADMA_LCDIF_HSYNC IMX8QXP_SPI3_CS0 2 +#define IMX8QXP_SPI3_CS0_LSIO_GPIO0_IO16 IMX8QXP_SPI3_CS0 4 +#define IMX8QXP_SPI3_CS1_ADMA_SPI3_CS1 IMX8QXP_SPI3_CS1 0 +#define IMX8QXP_SPI3_CS1_ADMA_I2C3_SCL IMX8QXP_SPI3_CS1 1 +#define IMX8QXP_SPI3_CS1_ADMA_LCDIF_RESET IMX8QXP_SPI3_CS1 2 +#define IMX8QXP_SPI3_CS1_ADMA_SPI2_CS0 IMX8QXP_SPI3_CS1 3 +#define IMX8QXP_SPI3_CS1_ADMA_LCDIF_D16 IMX8QXP_SPI3_CS1 4 +#define IMX8QXP_MCLK_IN1_ADMA_ACM_MCLK_IN1 IMX8QXP_MCLK_IN1 0 +#define IMX8QXP_MCLK_IN1_ADMA_I2C3_SDA IMX8QXP_MCLK_IN1 1 +#define IMX8QXP_MCLK_IN1_ADMA_LCDIF_EN IMX8QXP_MCLK_IN1 2 +#define IMX8QXP_MCLK_IN1_ADMA_SPI2_SCK IMX8QXP_MCLK_IN1 3 +#define IMX8QXP_MCLK_IN1_ADMA_LCDIF_D17 IMX8QXP_MCLK_IN1 4 +#define IMX8QXP_MCLK_IN0_ADMA_ACM_MCLK_IN0 IMX8QXP_MCLK_IN0 0 +#define IMX8QXP_MCLK_IN0_ADMA_ESAI0_RX_HF_CLK IMX8QXP_MCLK_IN0 1 +#define IMX8QXP_MCLK_IN0_ADMA_LCDIF_VSYNC IMX8QXP_MCLK_IN0 2 +#define IMX8QXP_MCLK_IN0_ADMA_SPI2_SDI IMX8QXP_MCLK_IN0 3 +#define IMX8QXP_MCLK_IN0_LSIO_GPIO0_IO19 IMX8QXP_MCLK_IN0 4 +#define IMX8QXP_MCLK_OUT0_ADMA_ACM_MCLK_OUT0 IMX8QXP_MCLK_OUT0 0 +#define IMX8QXP_MCLK_OUT0_ADMA_ESAI0_TX_HF_CLK IMX8QXP_MCLK_OUT0 1 +#define IMX8QXP_MCLK_OUT0_ADMA_LCDIF_CLK IMX8QXP_MCLK_OUT0 2 +#define IMX8QXP_MCLK_OUT0_ADMA_SPI2_SDO IMX8QXP_MCLK_OUT0 3 +#define IMX8QXP_MCLK_OUT0_LSIO_GPIO0_IO20 IMX8QXP_MCLK_OUT0 4 +#define IMX8QXP_UART1_TX_ADMA_UART1_TX IMX8QXP_UART1_TX 0 +#define IMX8QXP_UART1_TX_LSIO_PWM0_OUT IMX8QXP_UART1_TX 1 +#define IMX8QXP_UART1_TX_LSIO_GPT0_CAPTURE IMX8QXP_UART1_TX 2 +#define IMX8QXP_UART1_TX_LSIO_GPIO0_IO21 IMX8QXP_UART1_TX 4 +#define IMX8QXP_UART1_RX_ADMA_UART1_RX IMX8QXP_UART1_RX 0 +#define IMX8QXP_UART1_RX_LSIO_PWM1_OUT IMX8QXP_UART1_RX 1 +#define IMX8QXP_UART1_RX_LSIO_GPT0_COMPARE IMX8QXP_UART1_RX 2 +#define IMX8QXP_UART1_RX_LSIO_GPT1_CLK IMX8QXP_UART1_RX 3 +#define IMX8QXP_UART1_RX_LSIO_GPIO0_IO22 IMX8QXP_UART1_RX 4 +#define IMX8QXP_UART1_RTS_B_ADMA_UART1_RTS_B IMX8QXP_UART1_RTS_B 0 +#define IMX8QXP_UART1_RTS_B_LSIO_PWM2_OUT IMX8QXP_UART1_RTS_B 1 +#define IMX8QXP_UART1_RTS_B_ADMA_LCDIF_D16 IMX8QXP_UART1_RTS_B 2 +#define IMX8QXP_UART1_RTS_B_LSIO_GPT1_CAPTURE IMX8QXP_UART1_RTS_B 3 +#define IMX8QXP_UART1_RTS_B_LSIO_GPT0_CLK IMX8QXP_UART1_RTS_B 4 +#define IMX8QXP_UART1_CTS_B_ADMA_UART1_CTS_B IMX8QXP_UART1_CTS_B 0 +#define IMX8QXP_UART1_CTS_B_LSIO_PWM3_OUT IMX8QXP_UART1_CTS_B 1 +#define IMX8QXP_UART1_CTS_B_ADMA_LCDIF_D17 IMX8QXP_UART1_CTS_B 2 +#define IMX8QXP_UART1_CTS_B_LSIO_GPT1_COMPARE IMX8QXP_UART1_CTS_B 3 +#define IMX8QXP_UART1_CTS_B_LSIO_GPIO0_IO24 IMX8QXP_UART1_CTS_B 4 +#define IMX8QXP_SAI0_TXD_ADMA_SAI0_TXD IMX8QXP_SAI0_TXD 0 +#define IMX8QXP_SAI0_TXD_ADMA_SAI1_RXC IMX8QXP_SAI0_TXD 1 +#define IMX8QXP_SAI0_TXD_ADMA_SPI1_SDO IMX8QXP_SAI0_TXD 2 +#define IMX8QXP_SAI0_TXD_ADMA_LCDIF_D18 IMX8QXP_SAI0_TXD 3 +#define IMX8QXP_SAI0_TXD_LSIO_GPIO0_IO25 IMX8QXP_SAI0_TXD 4 +#define IMX8QXP_SAI0_TXC_ADMA_SAI0_TXC IMX8QXP_SAI0_TXC 0 +#define IMX8QXP_SAI0_TXC_ADMA_SAI1_TXD IMX8QXP_SAI0_TXC 1 +#define IMX8QXP_SAI0_TXC_ADMA_SPI1_SDI IMX8QXP_SAI0_TXC 2 +#define IMX8QXP_SAI0_TXC_ADMA_LCDIF_D19 IMX8QXP_SAI0_TXC 3 +#define IMX8QXP_SAI0_TXC_LSIO_GPIO0_IO26 IMX8QXP_SAI0_TXC 4 +#define IMX8QXP_SAI0_RXD_ADMA_SAI0_RXD IMX8QXP_SAI0_RXD 0 +#define IMX8QXP_SAI0_RXD_ADMA_SAI1_RXFS IMX8QXP_SAI0_RXD 1 +#define IMX8QXP_SAI0_RXD_ADMA_SPI1_CS0 IMX8QXP_SAI0_RXD 2 +#define IMX8QXP_SAI0_RXD_ADMA_LCDIF_D20 IMX8QXP_SAI0_RXD 3 +#define IMX8QXP_SAI0_RXD_LSIO_GPIO0_IO27 IMX8QXP_SAI0_RXD 4 +#define IMX8QXP_SAI0_TXFS_ADMA_SAI0_TXFS IMX8QXP_SAI0_TXFS 0 +#define IMX8QXP_SAI0_TXFS_ADMA_SPI2_CS1 IMX8QXP_SAI0_TXFS 1 +#define IMX8QXP_SAI0_TXFS_ADMA_SPI1_SCK IMX8QXP_SAI0_TXFS 2 +#define IMX8QXP_SAI0_TXFS_LSIO_GPIO0_IO28 IMX8QXP_SAI0_TXFS 4 +#define IMX8QXP_SAI1_RXD_ADMA_SAI1_RXD IMX8QXP_SAI1_RXD 0 +#define IMX8QXP_SAI1_RXD_ADMA_SAI0_RXFS IMX8QXP_SAI1_RXD 1 +#define IMX8QXP_SAI1_RXD_ADMA_SPI1_CS1 IMX8QXP_SAI1_RXD 2 +#define IMX8QXP_SAI1_RXD_ADMA_LCDIF_D21 IMX8QXP_SAI1_RXD 3 +#define IMX8QXP_SAI1_RXD_LSIO_GPIO0_IO29 IMX8QXP_SAI1_RXD 4 +#define IMX8QXP_SAI1_RXC_ADMA_SAI1_RXC IMX8QXP_SAI1_RXC 0 +#define IMX8QXP_SAI1_RXC_ADMA_SAI1_TXC IMX8QXP_SAI1_RXC 1 +#define IMX8QXP_SAI1_RXC_ADMA_LCDIF_D22 IMX8QXP_SAI1_RXC 3 +#define IMX8QXP_SAI1_RXC_LSIO_GPIO0_IO30 IMX8QXP_SAI1_RXC 4 +#define IMX8QXP_SAI1_RXFS_ADMA_SAI1_RXFS IMX8QXP_SAI1_RXFS 0 +#define IMX8QXP_SAI1_RXFS_ADMA_SAI1_TXFS IMX8QXP_SAI1_RXFS 1 +#define IMX8QXP_SAI1_RXFS_ADMA_LCDIF_D23 IMX8QXP_SAI1_RXFS 3 +#define IMX8QXP_SAI1_RXFS_LSIO_GPIO0_IO31 IMX8QXP_SAI1_RXFS 4 +#define IMX8QXP_SPI2_CS0_ADMA_SPI2_CS0 IMX8QXP_SPI2_CS0 0 +#define IMX8QXP_SPI2_CS0_LSIO_GPIO1_IO00 IMX8QXP_SPI2_CS0 4 +#define IMX8QXP_SPI2_SDO_ADMA_SPI2_SDO IMX8QXP_SPI2_SDO 0 +#define IMX8QXP_SPI2_SDO_LSIO_GPIO1_IO01 IMX8QXP_SPI2_SDO 4 +#define IMX8QXP_SPI2_SDI_ADMA_SPI2_SDI IMX8QXP_SPI2_SDI 0 +#define IMX8QXP_SPI2_SDI_LSIO_GPIO1_IO02 IMX8QXP_SPI2_SDI 4 +#define IMX8QXP_SPI2_SCK_ADMA_SPI2_SCK IMX8QXP_SPI2_SCK 0 +#define IMX8QXP_SPI2_SCK_LSIO_GPIO1_IO03 IMX8QXP_SPI2_SCK 4 +#define IMX8QXP_SPI0_SCK_ADMA_SPI0_SCK IMX8QXP_SPI0_SCK 0 +#define IMX8QXP_SPI0_SCK_ADMA_SAI0_TXC IMX8QXP_SPI0_SCK 1 +#define IMX8QXP_SPI0_SCK_M40_I2C0_SCL IMX8QXP_SPI0_SCK 2 +#define IMX8QXP_SPI0_SCK_M40_GPIO0_IO00 IMX8QXP_SPI0_SCK 3 +#define IMX8QXP_SPI0_SCK_LSIO_GPIO1_IO04 IMX8QXP_SPI0_SCK 4 +#define IMX8QXP_SPI0_SDI_ADMA_SPI0_SDI IMX8QXP_SPI0_SDI 0 +#define IMX8QXP_SPI0_SDI_ADMA_SAI0_TXD IMX8QXP_SPI0_SDI 1 +#define IMX8QXP_SPI0_SDI_M40_TPM0_CH0 IMX8QXP_SPI0_SDI 2 +#define IMX8QXP_SPI0_SDI_M40_GPIO0_IO02 IMX8QXP_SPI0_SDI 3 +#define IMX8QXP_SPI0_SDI_LSIO_GPIO1_IO05 IMX8QXP_SPI0_SDI 4 +#define IMX8QXP_SPI0_SDO_ADMA_SPI0_SDO IMX8QXP_SPI0_SDO 0 +#define IMX8QXP_SPI0_SDO_ADMA_SAI0_TXFS IMX8QXP_SPI0_SDO 1 +#define IMX8QXP_SPI0_SDO_M40_I2C0_SDA IMX8QXP_SPI0_SDO 2 +#define IMX8QXP_SPI0_SDO_M40_GPIO0_IO01 IMX8QXP_SPI0_SDO 3 +#define IMX8QXP_SPI0_SDO_LSIO_GPIO1_IO06 IMX8QXP_SPI0_SDO 4 +#define IMX8QXP_SPI0_CS1_ADMA_SPI0_CS1 IMX8QXP_SPI0_CS1 0 +#define IMX8QXP_SPI0_CS1_ADMA_SAI0_RXC IMX8QXP_SPI0_CS1 1 +#define IMX8QXP_SPI0_CS1_ADMA_SAI1_TXD IMX8QXP_SPI0_CS1 2 +#define IMX8QXP_SPI0_CS1_ADMA_LCD_PWM0_OUT IMX8QXP_SPI0_CS1 3 +#define IMX8QXP_SPI0_CS1_LSIO_GPIO1_IO07 IMX8QXP_SPI0_CS1 4 +#define IMX8QXP_SPI0_CS0_ADMA_SPI0_CS0 IMX8QXP_SPI0_CS0 0 +#define IMX8QXP_SPI0_CS0_ADMA_SAI0_RXD IMX8QXP_SPI0_CS0 1 +#define IMX8QXP_SPI0_CS0_M40_TPM0_CH1 IMX8QXP_SPI0_CS0 2 +#define IMX8QXP_SPI0_CS0_M40_GPIO0_IO03 IMX8QXP_SPI0_CS0 3 +#define IMX8QXP_SPI0_CS0_LSIO_GPIO1_IO08 IMX8QXP_SPI0_CS0 4 +#define IMX8QXP_ADC_IN1_ADMA_ADC_IN1 IMX8QXP_ADC_IN1 0 +#define IMX8QXP_ADC_IN1_M40_I2C0_SDA IMX8QXP_ADC_IN1 1 +#define IMX8QXP_ADC_IN1_M40_GPIO0_IO01 IMX8QXP_ADC_IN1 2 +#define IMX8QXP_ADC_IN1_LSIO_GPIO1_IO09 IMX8QXP_ADC_IN1 4 +#define IMX8QXP_ADC_IN0_ADMA_ADC_IN0 IMX8QXP_ADC_IN0 0 +#define IMX8QXP_ADC_IN0_M40_I2C0_SCL IMX8QXP_ADC_IN0 1 +#define IMX8QXP_ADC_IN0_M40_GPIO0_IO00 IMX8QXP_ADC_IN0 2 +#define IMX8QXP_ADC_IN0_LSIO_GPIO1_IO10 IMX8QXP_ADC_IN0 4 +#define IMX8QXP_ADC_IN3_ADMA_ADC_IN3 IMX8QXP_ADC_IN3 0 +#define IMX8QXP_ADC_IN3_M40_UART0_TX IMX8QXP_ADC_IN3 1 +#define IMX8QXP_ADC_IN3_M40_GPIO0_IO03 IMX8QXP_ADC_IN3 2 +#define IMX8QXP_ADC_IN3_ADMA_ACM_MCLK_OUT0 IMX8QXP_ADC_IN3 3 +#define IMX8QXP_ADC_IN3_LSIO_GPIO1_IO11 IMX8QXP_ADC_IN3 4 +#define IMX8QXP_ADC_IN2_ADMA_ADC_IN2 IMX8QXP_ADC_IN2 0 +#define IMX8QXP_ADC_IN2_M40_UART0_RX IMX8QXP_ADC_IN2 1 +#define IMX8QXP_ADC_IN2_M40_GPIO0_IO02 IMX8QXP_ADC_IN2 2 +#define IMX8QXP_ADC_IN2_ADMA_ACM_MCLK_IN0 IMX8QXP_ADC_IN2 3 +#define IMX8QXP_ADC_IN2_LSIO_GPIO1_IO12 IMX8QXP_ADC_IN2 4 +#define IMX8QXP_ADC_IN5_ADMA_ADC_IN5 IMX8QXP_ADC_IN5 0 +#define IMX8QXP_ADC_IN5_M40_TPM0_CH1 IMX8QXP_ADC_IN5 1 +#define IMX8QXP_ADC_IN5_M40_GPIO0_IO05 IMX8QXP_ADC_IN5 2 +#define IMX8QXP_ADC_IN5_LSIO_GPIO1_IO13 IMX8QXP_ADC_IN5 4 +#define IMX8QXP_ADC_IN4_ADMA_ADC_IN4 IMX8QXP_ADC_IN4 0 +#define IMX8QXP_ADC_IN4_M40_TPM0_CH0 IMX8QXP_ADC_IN4 1 +#define IMX8QXP_ADC_IN4_M40_GPIO0_IO04 IMX8QXP_ADC_IN4 2 +#define IMX8QXP_ADC_IN4_LSIO_GPIO1_IO14 IMX8QXP_ADC_IN4 4 +#define IMX8QXP_FLEXCAN0_RX_ADMA_FLEXCAN0_RX IMX8QXP_FLEXCAN0_RX 0 +#define IMX8QXP_FLEXCAN0_RX_ADMA_SAI2_RXC IMX8QXP_FLEXCAN0_RX 1 +#define IMX8QXP_FLEXCAN0_RX_ADMA_UART0_RTS_B IMX8QXP_FLEXCAN0_RX 2 +#define IMX8QXP_FLEXCAN0_RX_ADMA_SAI1_TXC IMX8QXP_FLEXCAN0_RX 3 +#define IMX8QXP_FLEXCAN0_RX_LSIO_GPIO1_IO15 IMX8QXP_FLEXCAN0_RX 4 +#define IMX8QXP_FLEXCAN0_TX_ADMA_FLEXCAN0_TX IMX8QXP_FLEXCAN0_TX 0 +#define IMX8QXP_FLEXCAN0_TX_ADMA_SAI2_RXD IMX8QXP_FLEXCAN0_TX 1 +#define IMX8QXP_FLEXCAN0_TX_ADMA_UART0_CTS_B IMX8QXP_FLEXCAN0_TX 2 +#define IMX8QXP_FLEXCAN0_TX_ADMA_SAI1_TXFS IMX8QXP_FLEXCAN0_TX 3 +#define IMX8QXP_FLEXCAN0_TX_LSIO_GPIO1_IO16 IMX8QXP_FLEXCAN0_TX 4 +#define IMX8QXP_FLEXCAN1_RX_ADMA_FLEXCAN1_RX IMX8QXP_FLEXCAN1_RX 0 +#define IMX8QXP_FLEXCAN1_RX_ADMA_SAI2_RXFS IMX8QXP_FLEXCAN1_RX 1 +#define IMX8QXP_FLEXCAN1_RX_ADMA_FTM_CH2 IMX8QXP_FLEXCAN1_RX 2 +#define IMX8QXP_FLEXCAN1_RX_ADMA_SAI1_TXD IMX8QXP_FLEXCAN1_RX 3 +#define IMX8QXP_FLEXCAN1_RX_LSIO_GPIO1_IO17 IMX8QXP_FLEXCAN1_RX 4 +#define IMX8QXP_FLEXCAN1_TX_ADMA_FLEXCAN1_TX IMX8QXP_FLEXCAN1_TX 0 +#define IMX8QXP_FLEXCAN1_TX_ADMA_SAI3_RXC IMX8QXP_FLEXCAN1_TX 1 +#define IMX8QXP_FLEXCAN1_TX_ADMA_DMA0_REQ_IN0 IMX8QXP_FLEXCAN1_TX 2 +#define IMX8QXP_FLEXCAN1_TX_ADMA_SAI1_RXD IMX8QXP_FLEXCAN1_TX 3 +#define IMX8QXP_FLEXCAN1_TX_LSIO_GPIO1_IO18 IMX8QXP_FLEXCAN1_TX 4 +#define IMX8QXP_FLEXCAN2_RX_ADMA_FLEXCAN2_RX IMX8QXP_FLEXCAN2_RX 0 +#define IMX8QXP_FLEXCAN2_RX_ADMA_SAI3_RXD IMX8QXP_FLEXCAN2_RX 1 +#define IMX8QXP_FLEXCAN2_RX_ADMA_UART3_RX IMX8QXP_FLEXCAN2_RX 2 +#define IMX8QXP_FLEXCAN2_RX_ADMA_SAI1_RXFS IMX8QXP_FLEXCAN2_RX 3 +#define IMX8QXP_FLEXCAN2_RX_LSIO_GPIO1_IO19 IMX8QXP_FLEXCAN2_RX 4 +#define IMX8QXP_FLEXCAN2_TX_ADMA_FLEXCAN2_TX IMX8QXP_FLEXCAN2_TX 0 +#define IMX8QXP_FLEXCAN2_TX_ADMA_SAI3_RXFS IMX8QXP_FLEXCAN2_TX 1 +#define IMX8QXP_FLEXCAN2_TX_ADMA_UART3_TX IMX8QXP_FLEXCAN2_TX 2 +#define IMX8QXP_FLEXCAN2_TX_ADMA_SAI1_RXC IMX8QXP_FLEXCAN2_TX 3 +#define IMX8QXP_FLEXCAN2_TX_LSIO_GPIO1_IO20 IMX8QXP_FLEXCAN2_TX 4 +#define IMX8QXP_UART0_RX_ADMA_UART0_RX IMX8QXP_UART0_RX 0 +#define IMX8QXP_UART0_RX_ADMA_MQS_R IMX8QXP_UART0_RX 1 +#define IMX8QXP_UART0_RX_ADMA_FLEXCAN0_RX IMX8QXP_UART0_RX 2 +#define IMX8QXP_UART0_RX_LSIO_GPIO1_IO21 IMX8QXP_UART0_RX 4 +#define IMX8QXP_UART0_TX_ADMA_UART0_TX IMX8QXP_UART0_TX 0 +#define IMX8QXP_UART0_TX_ADMA_MQS_L IMX8QXP_UART0_TX 1 +#define IMX8QXP_UART0_TX_ADMA_FLEXCAN0_TX IMX8QXP_UART0_TX 2 +#define IMX8QXP_UART0_TX_LSIO_GPIO1_IO22 IMX8QXP_UART0_TX 4 +#define IMX8QXP_UART2_TX_ADMA_UART2_TX IMX8QXP_UART2_TX 0 +#define IMX8QXP_UART2_TX_ADMA_FTM_CH1 IMX8QXP_UART2_TX 1 +#define IMX8QXP_UART2_TX_ADMA_FLEXCAN1_TX IMX8QXP_UART2_TX 2 +#define IMX8QXP_UART2_TX_LSIO_GPIO1_IO23 IMX8QXP_UART2_TX 4 +#define IMX8QXP_UART2_RX_ADMA_UART2_RX IMX8QXP_UART2_RX 0 +#define IMX8QXP_UART2_RX_ADMA_FTM_CH0 IMX8QXP_UART2_RX 1 +#define IMX8QXP_UART2_RX_ADMA_FLEXCAN1_RX IMX8QXP_UART2_RX 2 +#define IMX8QXP_UART2_RX_LSIO_GPIO1_IO24 IMX8QXP_UART2_RX 4 +#define IMX8QXP_MIPI_DSI0_I2C0_SCL_MIPI_DSI0_I2C0_SCL IMX8QXP_MIPI_DSI0_I2C0_SCL 0 +#define IMX8QXP_MIPI_DSI0_I2C0_SCL_MIPI_DSI1_GPIO0_IO02 IMX8QXP_MIPI_DSI0_I2C0_SCL 1 +#define IMX8QXP_MIPI_DSI0_I2C0_SCL_LSIO_GPIO1_IO25 IMX8QXP_MIPI_DSI0_I2C0_SCL 4 +#define IMX8QXP_MIPI_DSI0_I2C0_SDA_MIPI_DSI0_I2C0_SDA IMX8QXP_MIPI_DSI0_I2C0_SDA 0 +#define IMX8QXP_MIPI_DSI0_I2C0_SDA_MIPI_DSI1_GPIO0_IO03 IMX8QXP_MIPI_DSI0_I2C0_SDA 1 +#define IMX8QXP_MIPI_DSI0_I2C0_SDA_LSIO_GPIO1_IO26 IMX8QXP_MIPI_DSI0_I2C0_SDA 4 +#define IMX8QXP_MIPI_DSI0_GPIO0_00_MIPI_DSI0_GPIO0_IO00 IMX8QXP_MIPI_DSI0_GPIO0_00 0 +#define IMX8QXP_MIPI_DSI0_GPIO0_00_ADMA_I2C1_SCL IMX8QXP_MIPI_DSI0_GPIO0_00 1 +#define IMX8QXP_MIPI_DSI0_GPIO0_00_MIPI_DSI0_PWM0_OUT IMX8QXP_MIPI_DSI0_GPIO0_00 2 +#define IMX8QXP_MIPI_DSI0_GPIO0_00_LSIO_GPIO1_IO27 IMX8QXP_MIPI_DSI0_GPIO0_00 4 +#define IMX8QXP_MIPI_DSI0_GPIO0_01_MIPI_DSI0_GPIO0_IO01 IMX8QXP_MIPI_DSI0_GPIO0_01 0 +#define IMX8QXP_MIPI_DSI0_GPIO0_01_ADMA_I2C1_SDA IMX8QXP_MIPI_DSI0_GPIO0_01 1 +#define IMX8QXP_MIPI_DSI0_GPIO0_01_LSIO_GPIO1_IO28 IMX8QXP_MIPI_DSI0_GPIO0_01 4 +#define IMX8QXP_MIPI_DSI1_I2C0_SCL_MIPI_DSI1_I2C0_SCL IMX8QXP_MIPI_DSI1_I2C0_SCL 0 +#define IMX8QXP_MIPI_DSI1_I2C0_SCL_MIPI_DSI0_GPIO0_IO02 IMX8QXP_MIPI_DSI1_I2C0_SCL 1 +#define IMX8QXP_MIPI_DSI1_I2C0_SCL_LSIO_GPIO1_IO29 IMX8QXP_MIPI_DSI1_I2C0_SCL 4 +#define IMX8QXP_MIPI_DSI1_I2C0_SDA_MIPI_DSI1_I2C0_SDA IMX8QXP_MIPI_DSI1_I2C0_SDA 0 +#define IMX8QXP_MIPI_DSI1_I2C0_SDA_MIPI_DSI0_GPIO0_IO03 IMX8QXP_MIPI_DSI1_I2C0_SDA 1 +#define IMX8QXP_MIPI_DSI1_I2C0_SDA_LSIO_GPIO1_IO30 IMX8QXP_MIPI_DSI1_I2C0_SDA 4 +#define IMX8QXP_MIPI_DSI1_GPIO0_00_MIPI_DSI1_GPIO0_IO00 IMX8QXP_MIPI_DSI1_GPIO0_00 0 +#define IMX8QXP_MIPI_DSI1_GPIO0_00_ADMA_I2C2_SCL IMX8QXP_MIPI_DSI1_GPIO0_00 1 +#define IMX8QXP_MIPI_DSI1_GPIO0_00_MIPI_DSI1_PWM0_OUT IMX8QXP_MIPI_DSI1_GPIO0_00 2 +#define IMX8QXP_MIPI_DSI1_GPIO0_00_LSIO_GPIO1_IO31 IMX8QXP_MIPI_DSI1_GPIO0_00 4 +#define IMX8QXP_MIPI_DSI1_GPIO0_01_MIPI_DSI1_GPIO0_IO01 IMX8QXP_MIPI_DSI1_GPIO0_01 0 +#define IMX8QXP_MIPI_DSI1_GPIO0_01_ADMA_I2C2_SDA IMX8QXP_MIPI_DSI1_GPIO0_01 1 +#define IMX8QXP_MIPI_DSI1_GPIO0_01_LSIO_GPIO2_IO00 IMX8QXP_MIPI_DSI1_GPIO0_01 4 +#define IMX8QXP_JTAG_TRST_B_SCU_JTAG_TRST_B IMX8QXP_JTAG_TRST_B 0 +#define IMX8QXP_JTAG_TRST_B_SCU_WDOG0_WDOG_OUT IMX8QXP_JTAG_TRST_B 1 +#define IMX8QXP_PMIC_I2C_SCL_SCU_PMIC_I2C_SCL IMX8QXP_PMIC_I2C_SCL 0 +#define IMX8QXP_PMIC_I2C_SCL_SCU_GPIO0_IOXX_PMIC_A35_ON IMX8QXP_PMIC_I2C_SCL 1 +#define IMX8QXP_PMIC_I2C_SCL_LSIO_GPIO2_IO01 IMX8QXP_PMIC_I2C_SCL 4 +#define IMX8QXP_PMIC_I2C_SDA_SCU_PMIC_I2C_SDA IMX8QXP_PMIC_I2C_SDA 0 +#define IMX8QXP_PMIC_I2C_SDA_SCU_GPIO0_IOXX_PMIC_GPU_ON IMX8QXP_PMIC_I2C_SDA 1 +#define IMX8QXP_PMIC_I2C_SDA_LSIO_GPIO2_IO02 IMX8QXP_PMIC_I2C_SDA 4 +#define IMX8QXP_PMIC_INT_B_SCU_DIMX8QXPMIC_INT_B IMX8QXP_PMIC_INT_B 0 +#define IMX8QXP_SCU_GPIO0_00_SCU_GPIO0_IO00 IMX8QXP_SCU_GPIO0_00 0 +#define IMX8QXP_SCU_GPIO0_00_SCU_UART0_RX IMX8QXP_SCU_GPIO0_00 1 +#define IMX8QXP_SCU_GPIO0_00_M40_UART0_RX IMX8QXP_SCU_GPIO0_00 2 +#define IMX8QXP_SCU_GPIO0_00_ADMA_UART3_RX IMX8QXP_SCU_GPIO0_00 3 +#define IMX8QXP_SCU_GPIO0_00_LSIO_GPIO2_IO03 IMX8QXP_SCU_GPIO0_00 4 +#define IMX8QXP_SCU_GPIO0_01_SCU_GPIO0_IO01 IMX8QXP_SCU_GPIO0_01 0 +#define IMX8QXP_SCU_GPIO0_01_SCU_UART0_TX IMX8QXP_SCU_GPIO0_01 1 +#define IMX8QXP_SCU_GPIO0_01_M40_UART0_TX IMX8QXP_SCU_GPIO0_01 2 +#define IMX8QXP_SCU_GPIO0_01_ADMA_UART3_TX IMX8QXP_SCU_GPIO0_01 3 +#define IMX8QXP_SCU_GPIO0_01_SCU_WDOG0_WDOG_OUT IMX8QXP_SCU_GPIO0_01 4 +#define IMX8QXP_SCU_PMIC_STANDBY_SCU_DIMX8QXPMIC_STANDBY IMX8QXP_SCU_PMIC_STANDBY 0 +#define IMX8QXP_SCU_BOOT_MODE0_SCU_DSC_BOOT_MODE0 IMX8QXP_SCU_BOOT_MODE0 0 +#define IMX8QXP_SCU_BOOT_MODE1_SCU_DSC_BOOT_MODE1 IMX8QXP_SCU_BOOT_MODE1 0 +#define IMX8QXP_SCU_BOOT_MODE2_SCU_DSC_BOOT_MODE2 IMX8QXP_SCU_BOOT_MODE2 0 +#define IMX8QXP_SCU_BOOT_MODE2_SCU_PMIC_I2C_SDA IMX8QXP_SCU_BOOT_MODE2 1 +#define IMX8QXP_SCU_BOOT_MODE3_SCU_DSC_BOOT_MODE3 IMX8QXP_SCU_BOOT_MODE3 0 +#define IMX8QXP_SCU_BOOT_MODE3_SCU_PMIC_I2C_SCL IMX8QXP_SCU_BOOT_MODE3 1 +#define IMX8QXP_SCU_BOOT_MODE3_SCU_DSC_RTC_CLOCK_OUTPUT_32K IMX8QXP_SCU_BOOT_MODE3 3 +#define IMX8QXP_CSI_D00_CI_PI_D02 IMX8QXP_CSI_D00 0 +#define IMX8QXP_CSI_D00_ADMA_SAI0_RXC IMX8QXP_CSI_D00 2 +#define IMX8QXP_CSI_D01_CI_PI_D03 IMX8QXP_CSI_D01 0 +#define IMX8QXP_CSI_D01_ADMA_SAI0_RXD IMX8QXP_CSI_D01 2 +#define IMX8QXP_CSI_D02_CI_PI_D04 IMX8QXP_CSI_D02 0 +#define IMX8QXP_CSI_D02_ADMA_SAI0_RXFS IMX8QXP_CSI_D02 2 +#define IMX8QXP_CSI_D03_CI_PI_D05 IMX8QXP_CSI_D03 0 +#define IMX8QXP_CSI_D03_ADMA_SAI2_RXC IMX8QXP_CSI_D03 2 +#define IMX8QXP_CSI_D04_CI_PI_D06 IMX8QXP_CSI_D04 0 +#define IMX8QXP_CSI_D04_ADMA_SAI2_RXD IMX8QXP_CSI_D04 2 +#define IMX8QXP_CSI_D05_CI_PI_D07 IMX8QXP_CSI_D05 0 +#define IMX8QXP_CSI_D05_ADMA_SAI2_RXFS IMX8QXP_CSI_D05 2 +#define IMX8QXP_CSI_D06_CI_PI_D08 IMX8QXP_CSI_D06 0 +#define IMX8QXP_CSI_D06_ADMA_SAI3_RXC IMX8QXP_CSI_D06 2 +#define IMX8QXP_CSI_D07_CI_PI_D09 IMX8QXP_CSI_D07 0 +#define IMX8QXP_CSI_D07_ADMA_SAI3_RXD IMX8QXP_CSI_D07 2 +#define IMX8QXP_CSI_HSYNC_CI_PI_HSYNC IMX8QXP_CSI_HSYNC 0 +#define IMX8QXP_CSI_HSYNC_CI_PI_D00 IMX8QXP_CSI_HSYNC 1 +#define IMX8QXP_CSI_HSYNC_ADMA_SAI3_RXFS IMX8QXP_CSI_HSYNC 2 +#define IMX8QXP_CSI_VSYNC_CI_PI_VSYNC IMX8QXP_CSI_VSYNC 0 +#define IMX8QXP_CSI_VSYNC_CI_PI_D01 IMX8QXP_CSI_VSYNC 1 +#define IMX8QXP_CSI_PCLK_CI_PI_PCLK IMX8QXP_CSI_PCLK 0 +#define IMX8QXP_CSI_PCLK_MIPI_CSI0_I2C0_SCL IMX8QXP_CSI_PCLK 1 +#define IMX8QXP_CSI_PCLK_ADMA_SPI1_SCK IMX8QXP_CSI_PCLK 3 +#define IMX8QXP_CSI_PCLK_LSIO_GPIO3_IO00 IMX8QXP_CSI_PCLK 4 +#define IMX8QXP_CSI_MCLK_CI_PI_MCLK IMX8QXP_CSI_MCLK 0 +#define IMX8QXP_CSI_MCLK_MIPI_CSI0_I2C0_SDA IMX8QXP_CSI_MCLK 1 +#define IMX8QXP_CSI_MCLK_ADMA_SPI1_SDO IMX8QXP_CSI_MCLK 3 +#define IMX8QXP_CSI_MCLK_LSIO_GPIO3_IO01 IMX8QXP_CSI_MCLK 4 +#define IMX8QXP_CSI_EN_CI_PI_EN IMX8QXP_CSI_EN 0 +#define IMX8QXP_CSI_EN_CI_PI_I2C_SCL IMX8QXP_CSI_EN 1 +#define IMX8QXP_CSI_EN_ADMA_I2C3_SCL IMX8QXP_CSI_EN 2 +#define IMX8QXP_CSI_EN_ADMA_SPI1_SDI IMX8QXP_CSI_EN 3 +#define IMX8QXP_CSI_EN_LSIO_GPIO3_IO02 IMX8QXP_CSI_EN 4 +#define IMX8QXP_CSI_RESET_CI_PI_RESET IMX8QXP_CSI_RESET 0 +#define IMX8QXP_CSI_RESET_CI_PI_I2C_SDA IMX8QXP_CSI_RESET 1 +#define IMX8QXP_CSI_RESET_ADMA_I2C3_SDA IMX8QXP_CSI_RESET 2 +#define IMX8QXP_CSI_RESET_ADMA_SPI1_CS0 IMX8QXP_CSI_RESET 3 +#define IMX8QXP_CSI_RESET_LSIO_GPIO3_IO03 IMX8QXP_CSI_RESET 4 +#define IMX8QXP_MIPI_CSI0_MCLK_OUT_MIPI_CSI0_ACM_MCLK_OUT IMX8QXP_MIPI_CSI0_MCLK_OUT 0 +#define IMX8QXP_MIPI_CSI0_MCLK_OUT_LSIO_GPIO3_IO04 IMX8QXP_MIPI_CSI0_MCLK_OUT 4 +#define IMX8QXP_MIPI_CSI0_I2C0_SCL_MIPI_CSI0_I2C0_SCL IMX8QXP_MIPI_CSI0_I2C0_SCL 0 +#define IMX8QXP_MIPI_CSI0_I2C0_SCL_MIPI_CSI0_GPIO0_IO02 IMX8QXP_MIPI_CSI0_I2C0_SCL 1 +#define IMX8QXP_MIPI_CSI0_I2C0_SCL_LSIO_GPIO3_IO05 IMX8QXP_MIPI_CSI0_I2C0_SCL 4 +#define IMX8QXP_MIPI_CSI0_I2C0_SDA_MIPI_CSI0_I2C0_SDA IMX8QXP_MIPI_CSI0_I2C0_SDA 0 +#define IMX8QXP_MIPI_CSI0_I2C0_SDA_MIPI_CSI0_GPIO0_IO03 IMX8QXP_MIPI_CSI0_I2C0_SDA 1 +#define IMX8QXP_MIPI_CSI0_I2C0_SDA_LSIO_GPIO3_IO06 IMX8QXP_MIPI_CSI0_I2C0_SDA 4 +#define IMX8QXP_MIPI_CSI0_GPIO0_01_MIPI_CSI0_GPIO0_IO01 IMX8QXP_MIPI_CSI0_GPIO0_01 0 +#define IMX8QXP_MIPI_CSI0_GPIO0_01_ADMA_I2C0_SDA IMX8QXP_MIPI_CSI0_GPIO0_01 1 +#define IMX8QXP_MIPI_CSI0_GPIO0_01_LSIO_GPIO3_IO07 IMX8QXP_MIPI_CSI0_GPIO0_01 4 +#define IMX8QXP_MIPI_CSI0_GPIO0_00_MIPI_CSI0_GPIO0_IO00 IMX8QXP_MIPI_CSI0_GPIO0_00 0 +#define IMX8QXP_MIPI_CSI0_GPIO0_00_ADMA_I2C0_SCL IMX8QXP_MIPI_CSI0_GPIO0_00 1 +#define IMX8QXP_MIPI_CSI0_GPIO0_00_LSIO_GPIO3_IO08 IMX8QXP_MIPI_CSI0_GPIO0_00 4 +#define IMX8QXP_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 IMX8QXP_QSPI0A_DATA0 0 +#define IMX8QXP_QSPI0A_DATA0_LSIO_GPIO3_IO09 IMX8QXP_QSPI0A_DATA0 4 +#define IMX8QXP_QSPI0A_DATA1_LSIO_QSPI0A_DATA1 IMX8QXP_QSPI0A_DATA1 0 +#define IMX8QXP_QSPI0A_DATA1_LSIO_GPIO3_IO10 IMX8QXP_QSPI0A_DATA1 4 +#define IMX8QXP_QSPI0A_DATA2_LSIO_QSPI0A_DATA2 IMX8QXP_QSPI0A_DATA2 0 +#define IMX8QXP_QSPI0A_DATA2_LSIO_GPIO3_IO11 IMX8QXP_QSPI0A_DATA2 4 +#define IMX8QXP_QSPI0A_DATA3_LSIO_QSPI0A_DATA3 IMX8QXP_QSPI0A_DATA3 0 +#define IMX8QXP_QSPI0A_DATA3_LSIO_GPIO3_IO12 IMX8QXP_QSPI0A_DATA3 4 +#define IMX8QXP_QSPI0A_DQS_LSIO_QSPI0A_DQS IMX8QXP_QSPI0A_DQS 0 +#define IMX8QXP_QSPI0A_DQS_LSIO_GPIO3_IO13 IMX8QXP_QSPI0A_DQS 4 +#define IMX8QXP_QSPI0A_SS0_B_LSIO_QSPI0A_SS0_B IMX8QXP_QSPI0A_SS0_B 0 +#define IMX8QXP_QSPI0A_SS0_B_LSIO_GPIO3_IO14 IMX8QXP_QSPI0A_SS0_B 4 +#define IMX8QXP_QSPI0A_SS1_B_LSIO_QSPI0A_SS1_B IMX8QXP_QSPI0A_SS1_B 0 +#define IMX8QXP_QSPI0A_SS1_B_LSIO_GPIO3_IO15 IMX8QXP_QSPI0A_SS1_B 4 +#define IMX8QXP_QSPI0A_SCLK_LSIO_QSPI0A_SCLK IMX8QXP_QSPI0A_SCLK 0 +#define IMX8QXP_QSPI0A_SCLK_LSIO_GPIO3_IO16 IMX8QXP_QSPI0A_SCLK 4 +#define IMX8QXP_QSPI0B_SCLK_LSIO_QSPI0B_SCLK IMX8QXP_QSPI0B_SCLK 0 +#define IMX8QXP_QSPI0B_SCLK_LSIO_QSPI1A_SCLK IMX8QXP_QSPI0B_SCLK 1 +#define IMX8QXP_QSPI0B_SCLK_LSIO_KPP0_COL0 IMX8QXP_QSPI0B_SCLK 2 +#define IMX8QXP_QSPI0B_SCLK_LSIO_GPIO3_IO17 IMX8QXP_QSPI0B_SCLK 4 +#define IMX8QXP_QSPI0B_DATA0_LSIO_QSPI0B_DATA0 IMX8QXP_QSPI0B_DATA0 0 +#define IMX8QXP_QSPI0B_DATA0_LSIO_QSPI1A_DATA0 IMX8QXP_QSPI0B_DATA0 1 +#define IMX8QXP_QSPI0B_DATA0_LSIO_KPP0_COL1 IMX8QXP_QSPI0B_DATA0 2 +#define IMX8QXP_QSPI0B_DATA0_LSIO_GPIO3_IO18 IMX8QXP_QSPI0B_DATA0 4 +#define IMX8QXP_QSPI0B_DATA1_LSIO_QSPI0B_DATA1 IMX8QXP_QSPI0B_DATA1 0 +#define IMX8QXP_QSPI0B_DATA1_LSIO_QSPI1A_DATA1 IMX8QXP_QSPI0B_DATA1 1 +#define IMX8QXP_QSPI0B_DATA1_LSIO_KPP0_COL2 IMX8QXP_QSPI0B_DATA1 2 +#define IMX8QXP_QSPI0B_DATA1_LSIO_GPIO3_IO19 IMX8QXP_QSPI0B_DATA1 4 +#define IMX8QXP_QSPI0B_DATA2_LSIO_QSPI0B_DATA2 IMX8QXP_QSPI0B_DATA2 0 +#define IMX8QXP_QSPI0B_DATA2_LSIO_QSPI1A_DATA2 IMX8QXP_QSPI0B_DATA2 1 +#define IMX8QXP_QSPI0B_DATA2_LSIO_KPP0_COL3 IMX8QXP_QSPI0B_DATA2 2 +#define IMX8QXP_QSPI0B_DATA2_LSIO_GPIO3_IO20 IMX8QXP_QSPI0B_DATA2 4 +#define IMX8QXP_QSPI0B_DATA3_LSIO_QSPI0B_DATA3 IMX8QXP_QSPI0B_DATA3 0 +#define IMX8QXP_QSPI0B_DATA3_LSIO_QSPI1A_DATA3 IMX8QXP_QSPI0B_DATA3 1 +#define IMX8QXP_QSPI0B_DATA3_LSIO_KPP0_ROW0 IMX8QXP_QSPI0B_DATA3 2 +#define IMX8QXP_QSPI0B_DATA3_LSIO_GPIO3_IO21 IMX8QXP_QSPI0B_DATA3 4 +#define IMX8QXP_QSPI0B_DQS_LSIO_QSPI0B_DQS IMX8QXP_QSPI0B_DQS 0 +#define IMX8QXP_QSPI0B_DQS_LSIO_QSPI1A_DQS IMX8QXP_QSPI0B_DQS 1 +#define IMX8QXP_QSPI0B_DQS_LSIO_KPP0_ROW1 IMX8QXP_QSPI0B_DQS 2 +#define IMX8QXP_QSPI0B_DQS_LSIO_GPIO3_IO22 IMX8QXP_QSPI0B_DQS 4 +#define IMX8QXP_QSPI0B_SS0_B_LSIO_QSPI0B_SS0_B IMX8QXP_QSPI0B_SS0_B 0 +#define IMX8QXP_QSPI0B_SS0_B_LSIO_QSPI1A_SS0_B IMX8QXP_QSPI0B_SS0_B 1 +#define IMX8QXP_QSPI0B_SS0_B_LSIO_KPP0_ROW2 IMX8QXP_QSPI0B_SS0_B 2 +#define IMX8QXP_QSPI0B_SS0_B_LSIO_GPIO3_IO23 IMX8QXP_QSPI0B_SS0_B 4 +#define IMX8QXP_QSPI0B_SS1_B_LSIO_QSPI0B_SS1_B IMX8QXP_QSPI0B_SS1_B 0 +#define IMX8QXP_QSPI0B_SS1_B_LSIO_QSPI1A_SS1_B IMX8QXP_QSPI0B_SS1_B 1 +#define IMX8QXP_QSPI0B_SS1_B_LSIO_KPP0_ROW3 IMX8QXP_QSPI0B_SS1_B 2 +#define IMX8QXP_QSPI0B_SS1_B_LSIO_GPIO3_IO24 IMX8QXP_QSPI0B_SS1_B 4 + +#endif /* _IMX8QXP_PADS_H */ diff --git a/include/dt-bindings/pinctrl/pinctrl-tegra-io-pad.h b/include/dt-bindings/pinctrl/pinctrl-tegra-io-pad.h new file mode 100644 index 0000000..20f4340 --- /dev/null +++ b/include/dt-bindings/pinctrl/pinctrl-tegra-io-pad.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * pinctrl-tegra-io-pad.h: Tegra I/O pad source voltage configuration constants + * pinctrl bindings. + * + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Author: Aapo Vienamo + */ + +#ifndef _DT_BINDINGS_PINCTRL_TEGRA_IO_PAD_H +#define _DT_BINDINGS_PINCTRL_TEGRA_IO_PAD_H + +/* Voltage levels of the I/O pad's source rail */ +#define TEGRA_IO_PAD_VOLTAGE_1V8 0 +#define TEGRA_IO_PAD_VOLTAGE_3V3 1 + +#endif diff --git a/include/dt-bindings/pinctrl/pinctrl-tegra-xusb.h b/include/dt-bindings/pinctrl/pinctrl-tegra-xusb.h new file mode 100644 index 0000000..ac63c39 --- /dev/null +++ b/include/dt-bindings/pinctrl/pinctrl-tegra-xusb.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_PINCTRL_TEGRA_XUSB_H +#define _DT_BINDINGS_PINCTRL_TEGRA_XUSB_H 1 + +#define TEGRA_XUSB_PADCTL_PCIE 0 +#define TEGRA_XUSB_PADCTL_SATA 1 + +#endif /* _DT_BINDINGS_PINCTRL_TEGRA_XUSB_H */ diff --git a/include/dt-bindings/pinctrl/pinctrl-tegra.h b/include/dt-bindings/pinctrl/pinctrl-tegra.h new file mode 100644 index 0000000..d9b18bf --- /dev/null +++ b/include/dt-bindings/pinctrl/pinctrl-tegra.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This header provides constants for Tegra pinctrl bindings. + * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. + * + * Author: Laxman Dewangan + */ + +#ifndef _DT_BINDINGS_PINCTRL_TEGRA_H +#define _DT_BINDINGS_PINCTRL_TEGRA_H + +/* + * Enable/disable for diffeent dt properties. This is applicable for + * properties nvidia,enable-input, nvidia,tristate, nvidia,open-drain, + * nvidia,lock, nvidia,rcv-sel, nvidia,high-speed-mode, nvidia,schmitt. + */ +#define TEGRA_PIN_DISABLE 0 +#define TEGRA_PIN_ENABLE 1 + +#define TEGRA_PIN_PULL_NONE 0 +#define TEGRA_PIN_PULL_DOWN 1 +#define TEGRA_PIN_PULL_UP 2 + +/* Low power mode driver */ +#define TEGRA_PIN_LP_DRIVE_DIV_8 0 +#define TEGRA_PIN_LP_DRIVE_DIV_4 1 +#define TEGRA_PIN_LP_DRIVE_DIV_2 2 +#define TEGRA_PIN_LP_DRIVE_DIV_1 3 + +/* Rising/Falling slew rate */ +#define TEGRA_PIN_SLEW_RATE_FASTEST 0 +#define TEGRA_PIN_SLEW_RATE_FAST 1 +#define TEGRA_PIN_SLEW_RATE_SLOW 2 +#define TEGRA_PIN_SLEW_RATE_SLOWEST 3 + +#endif diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h new file mode 100644 index 0000000..e5df5ce --- /dev/null +++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the Qualcomm PMIC GPIO binding. + */ + +#ifndef _DT_BINDINGS_PINCTRL_QCOM_PMIC_GPIO_H +#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_GPIO_H + +#define PMIC_GPIO_PULL_UP_30 0 +#define PMIC_GPIO_PULL_UP_1P5 1 +#define PMIC_GPIO_PULL_UP_31P5 2 +#define PMIC_GPIO_PULL_UP_1P5_30 3 + +#define PMIC_GPIO_STRENGTH_NO 0 +#define PMIC_GPIO_STRENGTH_HIGH 1 +#define PMIC_GPIO_STRENGTH_MED 2 +#define PMIC_GPIO_STRENGTH_LOW 3 + +/* + * Note: PM8018 GPIO3 and GPIO4 are supporting + * only S3 and L2 options (1.8V) + */ +#define PM8018_GPIO_L6 0 +#define PM8018_GPIO_L5 1 +#define PM8018_GPIO_S3 2 +#define PM8018_GPIO_L14 3 +#define PM8018_GPIO_L2 4 +#define PM8018_GPIO_L4 5 +#define PM8018_GPIO_VDD 6 + +/* + * Note: PM8038 GPIO7 and GPIO8 are supporting + * only L11 and L4 options (1.8V) + */ +#define PM8038_GPIO_VPH 0 +#define PM8038_GPIO_BB 1 +#define PM8038_GPIO_L11 2 +#define PM8038_GPIO_L15 3 +#define PM8038_GPIO_L4 4 +#define PM8038_GPIO_L3 5 +#define PM8038_GPIO_L17 6 + +#define PM8058_GPIO_VPH 0 +#define PM8058_GPIO_BB 1 +#define PM8058_GPIO_S3 2 +#define PM8058_GPIO_L3 3 +#define PM8058_GPIO_L7 4 +#define PM8058_GPIO_L6 5 +#define PM8058_GPIO_L5 6 +#define PM8058_GPIO_L2 7 + +/* + * Note: PM8916 GPIO1 and GPIO2 are supporting + * only L2(1.15V) and L5(1.8V) options + */ +#define PM8916_GPIO_VPH 0 +#define PM8916_GPIO_L2 2 +#define PM8916_GPIO_L5 3 + +#define PM8917_GPIO_VPH 0 +#define PM8917_GPIO_S4 2 +#define PM8917_GPIO_L15 3 +#define PM8917_GPIO_L4 4 +#define PM8917_GPIO_L3 5 +#define PM8917_GPIO_L17 6 + +#define PM8921_GPIO_VPH 0 +#define PM8921_GPIO_BB 1 +#define PM8921_GPIO_S4 2 +#define PM8921_GPIO_L15 3 +#define PM8921_GPIO_L4 4 +#define PM8921_GPIO_L3 5 +#define PM8921_GPIO_L17 6 + +/* + * Note: PM8941 gpios from 15 to 18 are supporting + * only S3 and L6 options (1.8V) + */ +#define PM8941_GPIO_VPH 0 +#define PM8941_GPIO_L1 1 +#define PM8941_GPIO_S3 2 +#define PM8941_GPIO_L6 3 + +/* + * Note: PMA8084 gpios from 15 to 18 are supporting + * only S4 and L6 options (1.8V) + */ +#define PMA8084_GPIO_VPH 0 +#define PMA8084_GPIO_L1 1 +#define PMA8084_GPIO_S4 2 +#define PMA8084_GPIO_L6 3 + +#define PM8994_GPIO_VPH 0 +#define PM8994_GPIO_S4 2 +#define PM8994_GPIO_L12 3 + +/* To be used with "function" */ +#define PMIC_GPIO_FUNC_NORMAL "normal" +#define PMIC_GPIO_FUNC_PAIRED "paired" +#define PMIC_GPIO_FUNC_FUNC1 "func1" +#define PMIC_GPIO_FUNC_FUNC2 "func2" +#define PMIC_GPIO_FUNC_FUNC3 "func3" +#define PMIC_GPIO_FUNC_FUNC4 "func4" +#define PMIC_GPIO_FUNC_DTEST1 "dtest1" +#define PMIC_GPIO_FUNC_DTEST2 "dtest2" +#define PMIC_GPIO_FUNC_DTEST3 "dtest3" +#define PMIC_GPIO_FUNC_DTEST4 "dtest4" + +#define PM8038_GPIO1_2_LPG_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO3_5V_BOOST_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO4_SSBI_ALT_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO5_6_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO10_11_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO6_7_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO9_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO6_12_KYPD_DRV PMIC_GPIO_FUNC_FUNC2 + +#define PM8058_GPIO7_8_MP3_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO7_8_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO9_26_KYPD_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO24_26_LPG_DRV PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO33_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO34_35_MP3_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO36_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO37_UPL_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO37_UART_M_RX PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO38_XO_SLEEP_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO38_39_CLK_32KHZ PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO39_MP3_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO40_EXT_BB_EN PMIC_GPIO_FUNC_FUNC1 + +#define PM8916_GPIO1_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8916_GPIO1_KEYP_DRV PMIC_GPIO_FUNC_FUNC2 +#define PM8916_GPIO2_DIV_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8916_GPIO2_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2 +#define PM8916_GPIO3_KEYP_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8916_GPIO4_KEYP_DRV PMIC_GPIO_FUNC_FUNC2 + +#define PM8917_GPIO9_18_KEYP_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO20_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2 +#define PM8917_GPIO25_26_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO37_38_XO_SLEEP_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO37_38_MP3_CLK PMIC_GPIO_FUNC_FUNC2 + +#define PM8941_GPIO9_14_KYPD_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO15_18_DIV_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO15_18_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2 +#define PM8941_GPIO23_26_KYPD_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO23_26_LPG_DRV_HI PMIC_GPIO_FUNC_FUNC2 +#define PM8941_GPIO31_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO33_36_LPG_DRV_3D PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO33_36_LPG_DRV_HI PMIC_GPIO_FUNC_FUNC2 + +#define PMA8084_GPIO4_5_LPG_DRV PMIC_GPIO_FUNC_FUNC1 +#define PMA8084_GPIO7_10_LPG_DRV PMIC_GPIO_FUNC_FUNC1 +#define PMA8084_GPIO5_14_KEYP_DRV PMIC_GPIO_FUNC_FUNC2 +#define PMA8084_GPIO19_21_KEYP_DRV PMIC_GPIO_FUNC_FUNC2 +#define PMA8084_GPIO15_18_DIV_CLK PMIC_GPIO_FUNC_FUNC1 +#define PMA8084_GPIO15_18_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2 +#define PMA8084_GPIO22_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 + +#endif diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h new file mode 100644 index 0000000..32e66ee --- /dev/null +++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the Qualcomm PMIC's + * Multi-Purpose Pin binding. + */ + +#ifndef _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H +#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H + +/* power-source */ + +/* Digital Input/Output: level [PM8058] */ +#define PM8058_MPP_VPH 0 +#define PM8058_MPP_S3 1 +#define PM8058_MPP_L2 2 +#define PM8058_MPP_L3 3 + +/* Digital Input/Output: level [PM8901] */ +#define PM8901_MPP_MSMIO 0 +#define PM8901_MPP_DIG 1 +#define PM8901_MPP_L5 2 +#define PM8901_MPP_S4 3 +#define PM8901_MPP_VPH 4 + +/* Digital Input/Output: level [PM8921] */ +#define PM8921_MPP_S4 1 +#define PM8921_MPP_L15 3 +#define PM8921_MPP_L17 4 +#define PM8921_MPP_VPH 7 + +/* Digital Input/Output: level [PM8821] */ +#define PM8821_MPP_1P8 0 +#define PM8821_MPP_VPH 7 + +/* Digital Input/Output: level [PM8018] */ +#define PM8018_MPP_L4 0 +#define PM8018_MPP_L14 1 +#define PM8018_MPP_S3 2 +#define PM8018_MPP_L6 3 +#define PM8018_MPP_L2 4 +#define PM8018_MPP_L5 5 +#define PM8018_MPP_VPH 7 + +/* Digital Input/Output: level [PM8038] */ +#define PM8038_MPP_L20 0 +#define PM8038_MPP_L11 1 +#define PM8038_MPP_L5 2 +#define PM8038_MPP_L15 3 +#define PM8038_MPP_L17 4 +#define PM8038_MPP_VPH 7 + +#define PM8841_MPP_VPH 0 +#define PM8841_MPP_S3 2 + +#define PM8916_MPP_VPH 0 +#define PM8916_MPP_L2 2 +#define PM8916_MPP_L5 3 + +#define PM8941_MPP_VPH 0 +#define PM8941_MPP_L1 1 +#define PM8941_MPP_S3 2 +#define PM8941_MPP_L6 3 + +#define PMA8084_MPP_VPH 0 +#define PMA8084_MPP_L1 1 +#define PMA8084_MPP_S4 2 +#define PMA8084_MPP_L6 3 + +#define PM8994_MPP_VPH 0 +/* Only supported for MPP_05-MPP_08 */ +#define PM8994_MPP_L19 1 +#define PM8994_MPP_S4 2 +#define PM8994_MPP_L12 3 + +/* + * Analog Input - Set the source for analog input. + * To be used with "qcom,amux-route" property + */ +#define PMIC_MPP_AMUX_ROUTE_CH5 0 +#define PMIC_MPP_AMUX_ROUTE_CH6 1 +#define PMIC_MPP_AMUX_ROUTE_CH7 2 +#define PMIC_MPP_AMUX_ROUTE_CH8 3 +#define PMIC_MPP_AMUX_ROUTE_ABUS1 4 +#define PMIC_MPP_AMUX_ROUTE_ABUS2 5 +#define PMIC_MPP_AMUX_ROUTE_ABUS3 6 +#define PMIC_MPP_AMUX_ROUTE_ABUS4 7 + +/* Analog Output: level */ +#define PMIC_MPP_AOUT_LVL_1V25 0 +#define PMIC_MPP_AOUT_LVL_1V25_2 1 +#define PMIC_MPP_AOUT_LVL_0V625 2 +#define PMIC_MPP_AOUT_LVL_0V3125 3 +#define PMIC_MPP_AOUT_LVL_MPP 4 +#define PMIC_MPP_AOUT_LVL_ABUS1 5 +#define PMIC_MPP_AOUT_LVL_ABUS2 6 +#define PMIC_MPP_AOUT_LVL_ABUS3 7 + +/* To be used with "function" */ +#define PMIC_MPP_FUNC_NORMAL "normal" +#define PMIC_MPP_FUNC_PAIRED "paired" +#define PMIC_MPP_FUNC_DTEST1 "dtest1" +#define PMIC_MPP_FUNC_DTEST2 "dtest2" +#define PMIC_MPP_FUNC_DTEST3 "dtest3" +#define PMIC_MPP_FUNC_DTEST4 "dtest4" + +#endif diff --git a/include/dt-bindings/pinctrl/r7s72100-pinctrl.h b/include/dt-bindings/pinctrl/r7s72100-pinctrl.h new file mode 100644 index 0000000..cdb9502 --- /dev/null +++ b/include/dt-bindings/pinctrl/r7s72100-pinctrl.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Defines macros and constants for Renesas RZ/A1 pin controller pin + * muxing functions. + */ +#ifndef __DT_BINDINGS_PINCTRL_RENESAS_RZA1_H +#define __DT_BINDINGS_PINCTRL_RENESAS_RZA1_H + +#define RZA1_PINS_PER_PORT 16 + +/* + * Create the pin index from its bank and position numbers and store in + * the upper 16 bits the alternate function identifier + */ +#define RZA1_PINMUX(b, p, f) ((b) * RZA1_PINS_PER_PORT + (p) | (f << 16)) + +#endif /* __DT_BINDINGS_PINCTRL_RENESAS_RZA1_H */ diff --git a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h new file mode 100644 index 0000000..2d0c23e --- /dev/null +++ b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Defines macros and constants for Renesas RZ/A2 pin controller pin + * muxing functions. + */ +#ifndef __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H +#define __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H + +#define RZA2_PINS_PER_PORT 8 + +/* Port names as labeled in the Hardware Manual */ +#define PORT0 0 +#define PORT1 1 +#define PORT2 2 +#define PORT3 3 +#define PORT4 4 +#define PORT5 5 +#define PORT6 6 +#define PORT7 7 +#define PORT8 8 +#define PORT9 9 +#define PORTA 10 +#define PORTB 11 +#define PORTC 12 +#define PORTD 13 +#define PORTE 14 +#define PORTF 15 +#define PORTG 16 +#define PORTH 17 +/* No I */ +#define PORTJ 18 +#define PORTK 19 +#define PORTL 20 +#define PORTM 21 /* Pins PM_0/1 are labeled JP_0/1 in HW manual */ + +/* + * Create the pin index from its bank and position numbers and store in + * the upper 16 bits the alternate function identifier + */ +#define RZA2_PINMUX(b, p, f) ((b) * RZA2_PINS_PER_PORT + (p) | (f << 16)) + +/* + * Convert a port and pin label to its global pin index + */ + #define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin)) + +#endif /* __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H */ diff --git a/include/dt-bindings/pinctrl/rockchip.h b/include/dt-bindings/pinctrl/rockchip.h new file mode 100644 index 0000000..dc5c1c7 --- /dev/null +++ b/include/dt-bindings/pinctrl/rockchip.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Header providing constants for Rockchip pinctrl bindings. + * + * Copyright (c) 2013 MundoReader S.L. + * Author: Heiko Stuebner + */ + +#ifndef __DT_BINDINGS_ROCKCHIP_PINCTRL_H__ +#define __DT_BINDINGS_ROCKCHIP_PINCTRL_H__ + +#define RK_GPIO0 0 +#define RK_GPIO1 1 +#define RK_GPIO2 2 +#define RK_GPIO3 3 +#define RK_GPIO4 4 +#define RK_GPIO6 6 + +#define RK_PA0 0 +#define RK_PA1 1 +#define RK_PA2 2 +#define RK_PA3 3 +#define RK_PA4 4 +#define RK_PA5 5 +#define RK_PA6 6 +#define RK_PA7 7 +#define RK_PB0 8 +#define RK_PB1 9 +#define RK_PB2 10 +#define RK_PB3 11 +#define RK_PB4 12 +#define RK_PB5 13 +#define RK_PB6 14 +#define RK_PB7 15 +#define RK_PC0 16 +#define RK_PC1 17 +#define RK_PC2 18 +#define RK_PC3 19 +#define RK_PC4 20 +#define RK_PC5 21 +#define RK_PC6 22 +#define RK_PC7 23 +#define RK_PD0 24 +#define RK_PD1 25 +#define RK_PD2 26 +#define RK_PD3 27 +#define RK_PD4 28 +#define RK_PD5 29 +#define RK_PD6 30 +#define RK_PD7 31 + +#define RK_FUNC_GPIO 0 +#define RK_FUNC_1 1 +#define RK_FUNC_2 2 +#define RK_FUNC_3 3 +#define RK_FUNC_4 4 + +#endif diff --git a/include/dt-bindings/pinctrl/rzn1-pinctrl.h b/include/dt-bindings/pinctrl/rzn1-pinctrl.h new file mode 100644 index 0000000..21d6cc4 --- /dev/null +++ b/include/dt-bindings/pinctrl/rzn1-pinctrl.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Defines macros and constants for Renesas RZ/N1 pin controller pin + * muxing functions. + */ +#ifndef __DT_BINDINGS_RZN1_PINCTRL_H +#define __DT_BINDINGS_RZN1_PINCTRL_H + +#define RZN1_PINMUX(_gpio, _func) \ + (((_func) << 8) | (_gpio)) + +/* + * Given the different levels of muxing on the SoC, it was decided to + * 'linearize' them into one numerical space. So mux level 1, 2 and the MDIO + * muxes are all represented by one single value. + * + * You can derive the hardware value pretty easily too, as + * 0...9 are Level 1 + * 10...71 are Level 2. The Level 2 mux will be set to this + * value - RZN1_FUNC_L2_OFFSET, and the Level 1 mux will be + * set accordingly. + * 72...103 are for the 2 MDIO muxes. + */ +#define RZN1_FUNC_HIGHZ 0 +#define RZN1_FUNC_0L 1 +#define RZN1_FUNC_CLK_ETH_MII_RGMII_RMII 2 +#define RZN1_FUNC_CLK_ETH_NAND 3 +#define RZN1_FUNC_QSPI 4 +#define RZN1_FUNC_SDIO 5 +#define RZN1_FUNC_LCD 6 +#define RZN1_FUNC_LCD_E 7 +#define RZN1_FUNC_MSEBIM 8 +#define RZN1_FUNC_MSEBIS 9 +#define RZN1_FUNC_L2_OFFSET 10 /* I'm Special */ + +#define RZN1_FUNC_HIGHZ1 (RZN1_FUNC_L2_OFFSET + 0) +#define RZN1_FUNC_ETHERCAT (RZN1_FUNC_L2_OFFSET + 1) +#define RZN1_FUNC_SERCOS3 (RZN1_FUNC_L2_OFFSET + 2) +#define RZN1_FUNC_SDIO_E (RZN1_FUNC_L2_OFFSET + 3) +#define RZN1_FUNC_ETH_MDIO (RZN1_FUNC_L2_OFFSET + 4) +#define RZN1_FUNC_ETH_MDIO_E1 (RZN1_FUNC_L2_OFFSET + 5) +#define RZN1_FUNC_USB (RZN1_FUNC_L2_OFFSET + 6) +#define RZN1_FUNC_MSEBIM_E (RZN1_FUNC_L2_OFFSET + 7) +#define RZN1_FUNC_MSEBIS_E (RZN1_FUNC_L2_OFFSET + 8) +#define RZN1_FUNC_RSV (RZN1_FUNC_L2_OFFSET + 9) +#define RZN1_FUNC_RSV_E (RZN1_FUNC_L2_OFFSET + 10) +#define RZN1_FUNC_RSV_E1 (RZN1_FUNC_L2_OFFSET + 11) +#define RZN1_FUNC_UART0_I (RZN1_FUNC_L2_OFFSET + 12) +#define RZN1_FUNC_UART0_I_E (RZN1_FUNC_L2_OFFSET + 13) +#define RZN1_FUNC_UART1_I (RZN1_FUNC_L2_OFFSET + 14) +#define RZN1_FUNC_UART1_I_E (RZN1_FUNC_L2_OFFSET + 15) +#define RZN1_FUNC_UART2_I (RZN1_FUNC_L2_OFFSET + 16) +#define RZN1_FUNC_UART2_I_E (RZN1_FUNC_L2_OFFSET + 17) +#define RZN1_FUNC_UART0 (RZN1_FUNC_L2_OFFSET + 18) +#define RZN1_FUNC_UART0_E (RZN1_FUNC_L2_OFFSET + 19) +#define RZN1_FUNC_UART1 (RZN1_FUNC_L2_OFFSET + 20) +#define RZN1_FUNC_UART1_E (RZN1_FUNC_L2_OFFSET + 21) +#define RZN1_FUNC_UART2 (RZN1_FUNC_L2_OFFSET + 22) +#define RZN1_FUNC_UART2_E (RZN1_FUNC_L2_OFFSET + 23) +#define RZN1_FUNC_UART3 (RZN1_FUNC_L2_OFFSET + 24) +#define RZN1_FUNC_UART3_E (RZN1_FUNC_L2_OFFSET + 25) +#define RZN1_FUNC_UART4 (RZN1_FUNC_L2_OFFSET + 26) +#define RZN1_FUNC_UART4_E (RZN1_FUNC_L2_OFFSET + 27) +#define RZN1_FUNC_UART5 (RZN1_FUNC_L2_OFFSET + 28) +#define RZN1_FUNC_UART5_E (RZN1_FUNC_L2_OFFSET + 29) +#define RZN1_FUNC_UART6 (RZN1_FUNC_L2_OFFSET + 30) +#define RZN1_FUNC_UART6_E (RZN1_FUNC_L2_OFFSET + 31) +#define RZN1_FUNC_UART7 (RZN1_FUNC_L2_OFFSET + 32) +#define RZN1_FUNC_UART7_E (RZN1_FUNC_L2_OFFSET + 33) +#define RZN1_FUNC_SPI0_M (RZN1_FUNC_L2_OFFSET + 34) +#define RZN1_FUNC_SPI0_M_E (RZN1_FUNC_L2_OFFSET + 35) +#define RZN1_FUNC_SPI1_M (RZN1_FUNC_L2_OFFSET + 36) +#define RZN1_FUNC_SPI1_M_E (RZN1_FUNC_L2_OFFSET + 37) +#define RZN1_FUNC_SPI2_M (RZN1_FUNC_L2_OFFSET + 38) +#define RZN1_FUNC_SPI2_M_E (RZN1_FUNC_L2_OFFSET + 39) +#define RZN1_FUNC_SPI3_M (RZN1_FUNC_L2_OFFSET + 40) +#define RZN1_FUNC_SPI3_M_E (RZN1_FUNC_L2_OFFSET + 41) +#define RZN1_FUNC_SPI4_S (RZN1_FUNC_L2_OFFSET + 42) +#define RZN1_FUNC_SPI4_S_E (RZN1_FUNC_L2_OFFSET + 43) +#define RZN1_FUNC_SPI5_S (RZN1_FUNC_L2_OFFSET + 44) +#define RZN1_FUNC_SPI5_S_E (RZN1_FUNC_L2_OFFSET + 45) +#define RZN1_FUNC_SGPIO0_M (RZN1_FUNC_L2_OFFSET + 46) +#define RZN1_FUNC_SGPIO1_M (RZN1_FUNC_L2_OFFSET + 47) +#define RZN1_FUNC_GPIO (RZN1_FUNC_L2_OFFSET + 48) +#define RZN1_FUNC_CAN (RZN1_FUNC_L2_OFFSET + 49) +#define RZN1_FUNC_I2C (RZN1_FUNC_L2_OFFSET + 50) +#define RZN1_FUNC_SAFE (RZN1_FUNC_L2_OFFSET + 51) +#define RZN1_FUNC_PTO_PWM (RZN1_FUNC_L2_OFFSET + 52) +#define RZN1_FUNC_PTO_PWM1 (RZN1_FUNC_L2_OFFSET + 53) +#define RZN1_FUNC_PTO_PWM2 (RZN1_FUNC_L2_OFFSET + 54) +#define RZN1_FUNC_PTO_PWM3 (RZN1_FUNC_L2_OFFSET + 55) +#define RZN1_FUNC_PTO_PWM4 (RZN1_FUNC_L2_OFFSET + 56) +#define RZN1_FUNC_DELTA_SIGMA (RZN1_FUNC_L2_OFFSET + 57) +#define RZN1_FUNC_SGPIO2_M (RZN1_FUNC_L2_OFFSET + 58) +#define RZN1_FUNC_SGPIO3_M (RZN1_FUNC_L2_OFFSET + 59) +#define RZN1_FUNC_SGPIO4_S (RZN1_FUNC_L2_OFFSET + 60) +#define RZN1_FUNC_MAC_MTIP_SWITCH (RZN1_FUNC_L2_OFFSET + 61) + +#define RZN1_FUNC_MDIO_OFFSET (RZN1_FUNC_L2_OFFSET + 62) + +/* These are MDIO0 peripherals for the RZN1_FUNC_ETH_MDIO function */ +#define RZN1_FUNC_MDIO0_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 0) +#define RZN1_FUNC_MDIO0_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 1) +#define RZN1_FUNC_MDIO0_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 2) +#define RZN1_FUNC_MDIO0_ECAT (RZN1_FUNC_MDIO_OFFSET + 3) +#define RZN1_FUNC_MDIO0_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 4) +#define RZN1_FUNC_MDIO0_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 5) +#define RZN1_FUNC_MDIO0_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 6) +#define RZN1_FUNC_MDIO0_SWITCH (RZN1_FUNC_MDIO_OFFSET + 7) +/* These are MDIO0 peripherals for the RZN1_FUNC_ETH_MDIO_E1 function */ +#define RZN1_FUNC_MDIO0_E1_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 8) +#define RZN1_FUNC_MDIO0_E1_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 9) +#define RZN1_FUNC_MDIO0_E1_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 10) +#define RZN1_FUNC_MDIO0_E1_ECAT (RZN1_FUNC_MDIO_OFFSET + 11) +#define RZN1_FUNC_MDIO0_E1_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 12) +#define RZN1_FUNC_MDIO0_E1_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 13) +#define RZN1_FUNC_MDIO0_E1_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 14) +#define RZN1_FUNC_MDIO0_E1_SWITCH (RZN1_FUNC_MDIO_OFFSET + 15) + +/* These are MDIO1 peripherals for the RZN1_FUNC_ETH_MDIO function */ +#define RZN1_FUNC_MDIO1_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 16) +#define RZN1_FUNC_MDIO1_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 17) +#define RZN1_FUNC_MDIO1_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 18) +#define RZN1_FUNC_MDIO1_ECAT (RZN1_FUNC_MDIO_OFFSET + 19) +#define RZN1_FUNC_MDIO1_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 20) +#define RZN1_FUNC_MDIO1_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 21) +#define RZN1_FUNC_MDIO1_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 22) +#define RZN1_FUNC_MDIO1_SWITCH (RZN1_FUNC_MDIO_OFFSET + 23) +/* These are MDIO1 peripherals for the RZN1_FUNC_ETH_MDIO_E1 function */ +#define RZN1_FUNC_MDIO1_E1_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 24) +#define RZN1_FUNC_MDIO1_E1_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 25) +#define RZN1_FUNC_MDIO1_E1_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 26) +#define RZN1_FUNC_MDIO1_E1_ECAT (RZN1_FUNC_MDIO_OFFSET + 27) +#define RZN1_FUNC_MDIO1_E1_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 28) +#define RZN1_FUNC_MDIO1_E1_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 29) +#define RZN1_FUNC_MDIO1_E1_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 30) +#define RZN1_FUNC_MDIO1_E1_SWITCH (RZN1_FUNC_MDIO_OFFSET + 31) + +#define RZN1_FUNC_MAX (RZN1_FUNC_MDIO_OFFSET + 32) + +#endif /* __DT_BINDINGS_RZN1_PINCTRL_H */ diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h new file mode 100644 index 0000000..b183250 --- /dev/null +++ b/include/dt-bindings/pinctrl/samsung.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Samsung's Exynos pinctrl bindings + * + * Copyright (c) 2016 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * Author: Krzysztof Kozlowski + */ + +#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__ +#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__ + +#define EXYNOS_PIN_PULL_NONE 0 +#define EXYNOS_PIN_PULL_DOWN 1 +#define EXYNOS_PIN_PULL_UP 3 + +#define S3C64XX_PIN_PULL_NONE 0 +#define S3C64XX_PIN_PULL_DOWN 1 +#define S3C64XX_PIN_PULL_UP 2 + +/* Pin function in power down mode */ +#define EXYNOS_PIN_PDN_OUT0 0 +#define EXYNOS_PIN_PDN_OUT1 1 +#define EXYNOS_PIN_PDN_INPUT 2 +#define EXYNOS_PIN_PDN_PREV 3 + +/* Drive strengths for Exynos3250, Exynos4 (all) and Exynos5250 */ +#define EXYNOS4_PIN_DRV_LV1 0 +#define EXYNOS4_PIN_DRV_LV2 2 +#define EXYNOS4_PIN_DRV_LV3 1 +#define EXYNOS4_PIN_DRV_LV4 3 + +/* Drive strengths for Exynos5260 */ +#define EXYNOS5260_PIN_DRV_LV1 0 +#define EXYNOS5260_PIN_DRV_LV2 1 +#define EXYNOS5260_PIN_DRV_LV4 2 +#define EXYNOS5260_PIN_DRV_LV6 3 + +/* Drive strengths for Exynos5410, Exynos542x and Exynos5800 */ +#define EXYNOS5420_PIN_DRV_LV1 0 +#define EXYNOS5420_PIN_DRV_LV2 1 +#define EXYNOS5420_PIN_DRV_LV3 2 +#define EXYNOS5420_PIN_DRV_LV4 3 + +/* Drive strengths for Exynos5433 */ +#define EXYNOS5433_PIN_DRV_FAST_SR1 0 +#define EXYNOS5433_PIN_DRV_FAST_SR2 1 +#define EXYNOS5433_PIN_DRV_FAST_SR3 2 +#define EXYNOS5433_PIN_DRV_FAST_SR4 3 +#define EXYNOS5433_PIN_DRV_FAST_SR5 4 +#define EXYNOS5433_PIN_DRV_FAST_SR6 5 +#define EXYNOS5433_PIN_DRV_SLOW_SR1 8 +#define EXYNOS5433_PIN_DRV_SLOW_SR2 9 +#define EXYNOS5433_PIN_DRV_SLOW_SR3 0xa +#define EXYNOS5433_PIN_DRV_SLOW_SR4 0xb +#define EXYNOS5433_PIN_DRV_SLOW_SR5 0xc +#define EXYNOS5433_PIN_DRV_SLOW_SR6 0xf + +#define EXYNOS_PIN_FUNC_INPUT 0 +#define EXYNOS_PIN_FUNC_OUTPUT 1 +#define EXYNOS_PIN_FUNC_2 2 +#define EXYNOS_PIN_FUNC_3 3 +#define EXYNOS_PIN_FUNC_4 4 +#define EXYNOS_PIN_FUNC_5 5 +#define EXYNOS_PIN_FUNC_6 6 +#define EXYNOS_PIN_FUNC_EINT 0xf +#define EXYNOS_PIN_FUNC_F EXYNOS_PIN_FUNC_EINT + +/* Drive strengths for Exynos7 FSYS1 block */ +#define EXYNOS7_FSYS1_PIN_DRV_LV1 0 +#define EXYNOS7_FSYS1_PIN_DRV_LV2 4 +#define EXYNOS7_FSYS1_PIN_DRV_LV3 2 +#define EXYNOS7_FSYS1_PIN_DRV_LV4 6 +#define EXYNOS7_FSYS1_PIN_DRV_LV5 1 +#define EXYNOS7_FSYS1_PIN_DRV_LV6 5 + +#endif /* __DT_BINDINGS_PINCTRL_SAMSUNG_H__ */ diff --git a/include/dt-bindings/pinctrl/stm32-pinfunc.h b/include/dt-bindings/pinctrl/stm32-pinfunc.h new file mode 100644 index 0000000..e6fb8ad --- /dev/null +++ b/include/dt-bindings/pinctrl/stm32-pinfunc.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (C) STMicroelectronics 2017 - All Rights Reserved + * Author: Torgue Alexandre for STMicroelectronics. + */ + +#ifndef _DT_BINDINGS_STM32_PINFUNC_H +#define _DT_BINDINGS_STM32_PINFUNC_H + +/* define PIN modes */ +#define GPIO 0x0 +#define AF0 0x1 +#define AF1 0x2 +#define AF2 0x3 +#define AF3 0x4 +#define AF4 0x5 +#define AF5 0x6 +#define AF6 0x7 +#define AF7 0x8 +#define AF8 0x9 +#define AF9 0xa +#define AF10 0xb +#define AF11 0xc +#define AF12 0xd +#define AF13 0xe +#define AF14 0xf +#define AF15 0x10 +#define ANALOG 0x11 + +/* define Pins number*/ +#define PIN_NO(port, line) (((port) - 'A') * 0x10 + (line)) + +#define STM32_PINMUX(port, line, mode) (((PIN_NO(port, line)) << 8) | (mode)) + +/* package information */ +#define STM32MP_PKG_AA 0x1 +#define STM32MP_PKG_AB 0x2 +#define STM32MP_PKG_AC 0x4 +#define STM32MP_PKG_AD 0x8 + +#endif /* _DT_BINDINGS_STM32_PINFUNC_H */ + diff --git a/include/dt-bindings/pinctrl/sun4i-a10.h b/include/dt-bindings/pinctrl/sun4i-a10.h new file mode 100644 index 0000000..f7553c1 --- /dev/null +++ b/include/dt-bindings/pinctrl/sun4i-a10.h @@ -0,0 +1,62 @@ +/* + * Copyright 2014 Maxime Ripard + * + * Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this file; if not, write to the Free + * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DT_BINDINGS_PINCTRL_SUN4I_A10_H_ +#define __DT_BINDINGS_PINCTRL_SUN4I_A10_H_ + +#define SUN4I_PINCTRL_10_MA 0 +#define SUN4I_PINCTRL_20_MA 1 +#define SUN4I_PINCTRL_30_MA 2 +#define SUN4I_PINCTRL_40_MA 3 + +#define SUN4I_PINCTRL_NO_PULL 0 +#define SUN4I_PINCTRL_PULL_UP 1 +#define SUN4I_PINCTRL_PULL_DOWN 2 + +#endif /* __DT_BINDINGS_PINCTRL_SUN4I_A10_H_ */ diff --git a/include/dt-bindings/power/imx7-power.h b/include/dt-bindings/power/imx7-power.h new file mode 100644 index 0000000..597c1aa --- /dev/null +++ b/include/dt-bindings/power/imx7-power.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Impinj + */ + +#ifndef __DT_BINDINGS_IMX7_POWER_H__ +#define __DT_BINDINGS_IMX7_POWER_H__ + +#define IMX7_POWER_DOMAIN_MIPI_PHY 0 +#define IMX7_POWER_DOMAIN_PCIE_PHY 1 +#define IMX7_POWER_DOMAIN_USB_HSIC_PHY 2 + +#endif diff --git a/include/dt-bindings/power/imx8mq-power.h b/include/dt-bindings/power/imx8mq-power.h new file mode 100644 index 0000000..8a513bd --- /dev/null +++ b/include/dt-bindings/power/imx8mq-power.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (C) 2018 Pengutronix, Lucas Stach + */ + +#ifndef __DT_BINDINGS_IMX8MQ_POWER_H__ +#define __DT_BINDINGS_IMX8MQ_POWER_H__ + +#define IMX8M_POWER_DOMAIN_MIPI 0 +#define IMX8M_POWER_DOMAIN_PCIE1 1 +#define IMX8M_POWER_DOMAIN_USB_OTG1 2 +#define IMX8M_POWER_DOMAIN_USB_OTG2 3 +#define IMX8M_POWER_DOMAIN_DDR1 4 +#define IMX8M_POWER_DOMAIN_GPU 5 +#define IMX8M_POWER_DOMAIN_VPU 6 +#define IMX8M_POWER_DOMAIN_DISP 7 +#define IMX8M_POWER_DOMAIN_MIPI_CSI1 8 +#define IMX8M_POWER_DOMAIN_MIPI_CSI2 9 +#define IMX8M_POWER_DOMAIN_PCIE2 10 + +#endif diff --git a/include/dt-bindings/power/meson-g12a-power.h b/include/dt-bindings/power/meson-g12a-power.h new file mode 100644 index 0000000..bb5e67a --- /dev/null +++ b/include/dt-bindings/power/meson-g12a-power.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2019 BayLibre, SAS + * Author: Neil Armstrong + */ + +#ifndef _DT_BINDINGS_MESON_G12A_POWER_H +#define _DT_BINDINGS_MESON_G12A_POWER_H + +#define PWRC_G12A_VPU_ID 0 +#define PWRC_G12A_ETH_ID 1 + +#endif diff --git a/include/dt-bindings/power/meson-sm1-power.h b/include/dt-bindings/power/meson-sm1-power.h new file mode 100644 index 0000000..a020ab0 --- /dev/null +++ b/include/dt-bindings/power/meson-sm1-power.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2019 BayLibre, SAS + * Author: Neil Armstrong + */ + +#ifndef _DT_BINDINGS_MESON_SM1_POWER_H +#define _DT_BINDINGS_MESON_SM1_POWER_H + +#define PWRC_SM1_VPU_ID 0 +#define PWRC_SM1_NNA_ID 1 +#define PWRC_SM1_USB_ID 2 +#define PWRC_SM1_PCIE_ID 3 +#define PWRC_SM1_GE2D_ID 4 +#define PWRC_SM1_AUDIO_ID 5 +#define PWRC_SM1_ETH_ID 6 + +#endif diff --git a/include/dt-bindings/power/mt2701-power.h b/include/dt-bindings/power/mt2701-power.h new file mode 100644 index 0000000..09e16f8 --- /dev/null +++ b/include/dt-bindings/power/mt2701-power.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015 MediaTek Inc. + */ + +#ifndef _DT_BINDINGS_POWER_MT2701_POWER_H +#define _DT_BINDINGS_POWER_MT2701_POWER_H + +#define MT2701_POWER_DOMAIN_CONN 0 +#define MT2701_POWER_DOMAIN_DISP 1 +#define MT2701_POWER_DOMAIN_MFG 2 +#define MT2701_POWER_DOMAIN_VDEC 3 +#define MT2701_POWER_DOMAIN_ISP 4 +#define MT2701_POWER_DOMAIN_BDP 5 +#define MT2701_POWER_DOMAIN_ETH 6 +#define MT2701_POWER_DOMAIN_HIF 7 +#define MT2701_POWER_DOMAIN_IFR_MSC 8 + +#endif /* _DT_BINDINGS_POWER_MT2701_POWER_H */ diff --git a/include/dt-bindings/power/mt2712-power.h b/include/dt-bindings/power/mt2712-power.h new file mode 100644 index 0000000..95bdb1c --- /dev/null +++ b/include/dt-bindings/power/mt2712-power.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 MediaTek Inc. + */ + +#ifndef _DT_BINDINGS_POWER_MT2712_POWER_H +#define _DT_BINDINGS_POWER_MT2712_POWER_H + +#define MT2712_POWER_DOMAIN_MM 0 +#define MT2712_POWER_DOMAIN_VDEC 1 +#define MT2712_POWER_DOMAIN_VENC 2 +#define MT2712_POWER_DOMAIN_ISP 3 +#define MT2712_POWER_DOMAIN_AUDIO 4 +#define MT2712_POWER_DOMAIN_USB 5 +#define MT2712_POWER_DOMAIN_USB2 6 +#define MT2712_POWER_DOMAIN_MFG 7 +#define MT2712_POWER_DOMAIN_MFG_SC1 8 +#define MT2712_POWER_DOMAIN_MFG_SC2 9 +#define MT2712_POWER_DOMAIN_MFG_SC3 10 + +#endif /* _DT_BINDINGS_POWER_MT2712_POWER_H */ diff --git a/include/dt-bindings/power/mt6797-power.h b/include/dt-bindings/power/mt6797-power.h new file mode 100644 index 0000000..a60c1d8 --- /dev/null +++ b/include/dt-bindings/power/mt6797-power.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Mars.C + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_POWER_MT6797_POWER_H +#define _DT_BINDINGS_POWER_MT6797_POWER_H + +#define MT6797_POWER_DOMAIN_VDEC 0 +#define MT6797_POWER_DOMAIN_VENC 1 +#define MT6797_POWER_DOMAIN_ISP 2 +#define MT6797_POWER_DOMAIN_MM 3 +#define MT6797_POWER_DOMAIN_AUDIO 4 +#define MT6797_POWER_DOMAIN_MFG_ASYNC 5 +#define MT6797_POWER_DOMAIN_MFG 6 +#define MT6797_POWER_DOMAIN_MFG_CORE0 7 +#define MT6797_POWER_DOMAIN_MFG_CORE1 8 +#define MT6797_POWER_DOMAIN_MFG_CORE2 9 +#define MT6797_POWER_DOMAIN_MFG_CORE3 10 +#define MT6797_POWER_DOMAIN_MJC 11 + +#endif /* _DT_BINDINGS_POWER_MT6797_POWER_H */ diff --git a/include/dt-bindings/power/mt7622-power.h b/include/dt-bindings/power/mt7622-power.h new file mode 100644 index 0000000..ffad81a --- /dev/null +++ b/include/dt-bindings/power/mt7622-power.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 MediaTek Inc. + */ + +#ifndef _DT_BINDINGS_POWER_MT7622_POWER_H +#define _DT_BINDINGS_POWER_MT7622_POWER_H + +#define MT7622_POWER_DOMAIN_ETHSYS 0 +#define MT7622_POWER_DOMAIN_HIF0 1 +#define MT7622_POWER_DOMAIN_HIF1 2 +#define MT7622_POWER_DOMAIN_WB 3 + +#endif /* _DT_BINDINGS_POWER_MT7622_POWER_H */ diff --git a/include/dt-bindings/power/mt7623a-power.h b/include/dt-bindings/power/mt7623a-power.h new file mode 100644 index 0000000..2544822 --- /dev/null +++ b/include/dt-bindings/power/mt7623a-power.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_POWER_MT7623A_POWER_H +#define _DT_BINDINGS_POWER_MT7623A_POWER_H + +#define MT7623A_POWER_DOMAIN_CONN 0 +#define MT7623A_POWER_DOMAIN_ETH 1 +#define MT7623A_POWER_DOMAIN_HIF 2 +#define MT7623A_POWER_DOMAIN_IFR_MSC 3 + +#endif /* _DT_BINDINGS_POWER_MT7623A_POWER_H */ diff --git a/include/dt-bindings/power/mt8173-power.h b/include/dt-bindings/power/mt8173-power.h new file mode 100644 index 0000000..ef4a7f9 --- /dev/null +++ b/include/dt-bindings/power/mt8173-power.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_POWER_MT8173_POWER_H +#define _DT_BINDINGS_POWER_MT8173_POWER_H + +#define MT8173_POWER_DOMAIN_VDEC 0 +#define MT8173_POWER_DOMAIN_VENC 1 +#define MT8173_POWER_DOMAIN_ISP 2 +#define MT8173_POWER_DOMAIN_MM 3 +#define MT8173_POWER_DOMAIN_VENC_LT 4 +#define MT8173_POWER_DOMAIN_AUDIO 5 +#define MT8173_POWER_DOMAIN_USB 6 +#define MT8173_POWER_DOMAIN_MFG_ASYNC 7 +#define MT8173_POWER_DOMAIN_MFG_2D 8 +#define MT8173_POWER_DOMAIN_MFG 9 + +#endif /* _DT_BINDINGS_POWER_MT8173_POWER_H */ diff --git a/include/dt-bindings/power/owl-s500-powergate.h b/include/dt-bindings/power/owl-s500-powergate.h new file mode 100644 index 0000000..0a1c451 --- /dev/null +++ b/include/dt-bindings/power/owl-s500-powergate.h @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2017 Andreas Färber + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ +#ifndef DT_BINDINGS_POWER_OWL_S500_POWERGATE_H +#define DT_BINDINGS_POWER_OWL_S500_POWERGATE_H + +#define S500_PD_VDE 0 +#define S500_PD_VCE_SI 1 +#define S500_PD_USB2_1 2 +#define S500_PD_CPU2 3 +#define S500_PD_CPU3 4 +#define S500_PD_DMA 5 +#define S500_PD_DS 6 +#define S500_PD_USB3 7 +#define S500_PD_USB2_0 8 + +#endif diff --git a/include/dt-bindings/power/owl-s700-powergate.h b/include/dt-bindings/power/owl-s700-powergate.h new file mode 100644 index 0000000..4cf1aef --- /dev/null +++ b/include/dt-bindings/power/owl-s700-powergate.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Actions Semi S700 SPS + * + * Copyright (c) 2017 Andreas Färber + */ +#ifndef DT_BINDINGS_POWER_OWL_S700_POWERGATE_H +#define DT_BINDINGS_POWER_OWL_S700_POWERGATE_H + +#define S700_PD_VDE 0 +#define S700_PD_VCE_SI 1 +#define S700_PD_USB2_1 2 +#define S700_PD_HDE 3 +#define S700_PD_DMA 4 +#define S700_PD_DS 5 +#define S700_PD_USB3 6 +#define S700_PD_USB2_0 7 + +#endif diff --git a/include/dt-bindings/power/owl-s900-powergate.h b/include/dt-bindings/power/owl-s900-powergate.h new file mode 100644 index 0000000..d939bd9 --- /dev/null +++ b/include/dt-bindings/power/owl-s900-powergate.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) */ +/* + * Actions Semi S900 SPS + * + * Copyright (c) 2018 Linaro Ltd. + */ +#ifndef DT_BINDINGS_POWER_OWL_S900_POWERGATE_H +#define DT_BINDINGS_POWER_OWL_S900_POWERGATE_H + +#define S900_PD_GPU_B 0 +#define S900_PD_VCE 1 +#define S900_PD_SENSOR 2 +#define S900_PD_VDE 3 +#define S900_PD_HDE 4 +#define S900_PD_USB3 5 +#define S900_PD_DDR0 6 +#define S900_PD_DDR1 7 +#define S900_PD_DE 8 +#define S900_PD_NAND 9 +#define S900_PD_USB2_H0 10 +#define S900_PD_USB2_H1 11 + +#endif diff --git a/include/dt-bindings/power/px30-power.h b/include/dt-bindings/power/px30-power.h new file mode 100644 index 0000000..30917a9 --- /dev/null +++ b/include/dt-bindings/power/px30-power.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_PX30_POWER_H__ +#define __DT_BINDINGS_POWER_PX30_POWER_H__ + +/* VD_CORE */ +#define PX30_PD_A35_0 0 +#define PX30_PD_A35_1 1 +#define PX30_PD_A35_2 2 +#define PX30_PD_A35_3 3 +#define PX30_PD_SCU 4 + +/* VD_LOGIC */ +#define PX30_PD_USB 5 +#define PX30_PD_DDR 6 +#define PX30_PD_SDCARD 7 +#define PX30_PD_CRYPTO 8 +#define PX30_PD_GMAC 9 +#define PX30_PD_MMC_NAND 10 +#define PX30_PD_VPU 11 +#define PX30_PD_VO 12 +#define PX30_PD_VI 13 +#define PX30_PD_GPU 14 + +/* VD_PMU */ +#define PX30_PD_PMU 15 + +#endif diff --git a/include/dt-bindings/power/qcom-aoss-qmp.h b/include/dt-bindings/power/qcom-aoss-qmp.h new file mode 100644 index 0000000..ec336d3 --- /dev/null +++ b/include/dt-bindings/power/qcom-aoss-qmp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Linaro Ltd. */ + +#ifndef __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H +#define __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H + +#define AOSS_QMP_LS_CDSP 0 +#define AOSS_QMP_LS_LPASS 1 +#define AOSS_QMP_LS_MODEM 2 +#define AOSS_QMP_LS_SLPI 3 +#define AOSS_QMP_LS_SPSS 4 +#define AOSS_QMP_LS_VENUS 5 + +#endif diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h new file mode 100644 index 0000000..93e36d0 --- /dev/null +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */ + +#ifndef _DT_BINDINGS_POWER_QCOM_RPMPD_H +#define _DT_BINDINGS_POWER_QCOM_RPMPD_H + +/* SDM845 Power Domain Indexes */ +#define SDM845_EBI 0 +#define SDM845_MX 1 +#define SDM845_MX_AO 2 +#define SDM845_CX 3 +#define SDM845_CX_AO 4 +#define SDM845_LMX 5 +#define SDM845_LCX 6 +#define SDM845_GFX 7 +#define SDM845_MSS 8 + +/* SDM845 Power Domain performance levels */ +#define RPMH_REGULATOR_LEVEL_RETENTION 16 +#define RPMH_REGULATOR_LEVEL_MIN_SVS 48 +#define RPMH_REGULATOR_LEVEL_LOW_SVS 64 +#define RPMH_REGULATOR_LEVEL_SVS 128 +#define RPMH_REGULATOR_LEVEL_SVS_L1 192 +#define RPMH_REGULATOR_LEVEL_NOM 256 +#define RPMH_REGULATOR_LEVEL_NOM_L1 320 +#define RPMH_REGULATOR_LEVEL_NOM_L2 336 +#define RPMH_REGULATOR_LEVEL_TURBO 384 +#define RPMH_REGULATOR_LEVEL_TURBO_L1 416 + +/* MSM8996 Power Domain Indexes */ +#define MSM8996_VDDCX 0 +#define MSM8996_VDDCX_AO 1 +#define MSM8996_VDDCX_VFC 2 +#define MSM8996_VDDMX 3 +#define MSM8996_VDDMX_AO 4 +#define MSM8996_VDDSSCX 5 +#define MSM8996_VDDSSCX_VFC 6 + +/* MSM8998 Power Domain Indexes */ +#define MSM8998_VDDCX 0 +#define MSM8998_VDDCX_AO 1 +#define MSM8998_VDDCX_VFL 2 +#define MSM8998_VDDMX 3 +#define MSM8998_VDDMX_AO 4 +#define MSM8998_VDDMX_VFL 5 +#define MSM8998_SSCCX 6 +#define MSM8998_SSCCX_VFL 7 +#define MSM8998_SSCMX 8 +#define MSM8998_SSCMX_VFL 9 + +/* QCS404 Power Domains */ +#define QCS404_VDDMX 0 +#define QCS404_VDDMX_AO 1 +#define QCS404_VDDMX_VFL 2 +#define QCS404_LPICX 3 +#define QCS404_LPICX_VFL 4 +#define QCS404_LPIMX 5 +#define QCS404_LPIMX_VFL 6 + +/* RPM SMD Power Domain performance levels */ +#define RPM_SMD_LEVEL_RETENTION 16 +#define RPM_SMD_LEVEL_RETENTION_PLUS 32 +#define RPM_SMD_LEVEL_MIN_SVS 48 +#define RPM_SMD_LEVEL_LOW_SVS 64 +#define RPM_SMD_LEVEL_SVS 128 +#define RPM_SMD_LEVEL_SVS_PLUS 192 +#define RPM_SMD_LEVEL_NOM 256 +#define RPM_SMD_LEVEL_NOM_PLUS 320 +#define RPM_SMD_LEVEL_TURBO 384 +#define RPM_SMD_LEVEL_TURBO_NO_CPR 416 +#define RPM_SMD_LEVEL_BINNING 512 + +#endif diff --git a/include/dt-bindings/power/r8a7743-sysc.h b/include/dt-bindings/power/r8a7743-sysc.h new file mode 100644 index 0000000..1b86393 --- /dev/null +++ b/include/dt-bindings/power/r8a7743-sysc.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Cogent Embedded Inc. + */ +#ifndef __DT_BINDINGS_POWER_R8A7743_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7743_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7743_PD_CA15_CPU0 0 +#define R8A7743_PD_CA15_CPU1 1 +#define R8A7743_PD_CA15_SCU 12 +#define R8A7743_PD_SGX 20 + +/* Always-on power area */ +#define R8A7743_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7743_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7744-sysc.h b/include/dt-bindings/power/r8a7744-sysc.h new file mode 100644 index 0000000..8b65297 --- /dev/null +++ b/include/dt-bindings/power/r8a7744-sysc.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_POWER_R8A7744_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7744_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + * + * Note that RZ/G1N is identical to RZ/G2M w.r.t. power domains. + */ + +#define R8A7744_PD_CA15_CPU0 0 +#define R8A7744_PD_CA15_CPU1 1 +#define R8A7744_PD_CA15_SCU 12 +#define R8A7744_PD_SGX 20 + +/* Always-on power area */ +#define R8A7744_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7744_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7745-sysc.h b/include/dt-bindings/power/r8a7745-sysc.h new file mode 100644 index 0000000..725ad35 --- /dev/null +++ b/include/dt-bindings/power/r8a7745-sysc.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Cogent Embedded Inc. + */ +#ifndef __DT_BINDINGS_POWER_R8A7745_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7745_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7745_PD_CA7_CPU0 5 +#define R8A7745_PD_CA7_CPU1 6 +#define R8A7745_PD_SGX 20 +#define R8A7745_PD_CA7_SCU 21 + +/* Always-on power area */ +#define R8A7745_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7745_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77470-sysc.h b/include/dt-bindings/power/r8a77470-sysc.h new file mode 100644 index 0000000..8bf4db1 --- /dev/null +++ b/include/dt-bindings/power/r8a77470-sysc.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_POWER_R8A77470_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77470_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77470_PD_CA7_CPU0 5 +#define R8A77470_PD_CA7_CPU1 6 +#define R8A77470_PD_SGX 20 +#define R8A77470_PD_CA7_SCU 21 + +/* Always-on power area */ +#define R8A77470_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77470_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a774a1-sysc.h b/include/dt-bindings/power/r8a774a1-sysc.h new file mode 100644 index 0000000..580f431 --- /dev/null +++ b/include/dt-bindings/power/r8a774a1-sysc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_POWER_R8A774A1_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A774A1_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A774A1_PD_CA57_CPU0 0 +#define R8A774A1_PD_CA57_CPU1 1 +#define R8A774A1_PD_CA53_CPU0 5 +#define R8A774A1_PD_CA53_CPU1 6 +#define R8A774A1_PD_CA53_CPU2 7 +#define R8A774A1_PD_CA53_CPU3 8 +#define R8A774A1_PD_CA57_SCU 12 +#define R8A774A1_PD_A3VC 14 +#define R8A774A1_PD_3DG_A 17 +#define R8A774A1_PD_3DG_B 18 +#define R8A774A1_PD_CA53_SCU 21 +#define R8A774A1_PD_A2VC0 25 +#define R8A774A1_PD_A2VC1 26 + +/* Always-on power area */ +#define R8A774A1_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A774A1_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a774c0-sysc.h b/include/dt-bindings/power/r8a774c0-sysc.h new file mode 100644 index 0000000..9922d4c --- /dev/null +++ b/include/dt-bindings/power/r8a774c0-sysc.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_POWER_R8A774C0_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A774C0_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A774C0_PD_CA53_CPU0 5 +#define R8A774C0_PD_CA53_CPU1 6 +#define R8A774C0_PD_A3VC 14 +#define R8A774C0_PD_3DG_A 17 +#define R8A774C0_PD_3DG_B 18 +#define R8A774C0_PD_CA53_SCU 21 +#define R8A774C0_PD_A2VC1 26 + +/* Always-on power area */ +#define R8A774C0_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A774C0_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7779-sysc.h b/include/dt-bindings/power/r8a7779-sysc.h new file mode 100644 index 0000000..c4f528b --- /dev/null +++ b/include/dt-bindings/power/r8a7779-sysc.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Glider bvba + */ +#ifndef __DT_BINDINGS_POWER_R8A7779_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7779_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7779_PD_ARM1 1 +#define R8A7779_PD_ARM2 2 +#define R8A7779_PD_ARM3 3 +#define R8A7779_PD_SGX 20 +#define R8A7779_PD_VDP 21 +#define R8A7779_PD_IMP 24 + +/* Always-on power area */ +#define R8A7779_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7779_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7790-sysc.h b/include/dt-bindings/power/r8a7790-sysc.h new file mode 100644 index 0000000..bcb4905 --- /dev/null +++ b/include/dt-bindings/power/r8a7790-sysc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Glider bvba + */ +#ifndef __DT_BINDINGS_POWER_R8A7790_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7790_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7790_PD_CA15_CPU0 0 +#define R8A7790_PD_CA15_CPU1 1 +#define R8A7790_PD_CA15_CPU2 2 +#define R8A7790_PD_CA15_CPU3 3 +#define R8A7790_PD_CA7_CPU0 5 +#define R8A7790_PD_CA7_CPU1 6 +#define R8A7790_PD_CA7_CPU2 7 +#define R8A7790_PD_CA7_CPU3 8 +#define R8A7790_PD_CA15_SCU 12 +#define R8A7790_PD_SH_4A 16 +#define R8A7790_PD_RGX 20 +#define R8A7790_PD_CA7_SCU 21 +#define R8A7790_PD_IMP 24 + +/* Always-on power area */ +#define R8A7790_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7790_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7791-sysc.h b/include/dt-bindings/power/r8a7791-sysc.h new file mode 100644 index 0000000..1d20fae --- /dev/null +++ b/include/dt-bindings/power/r8a7791-sysc.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Glider bvba + */ +#ifndef __DT_BINDINGS_POWER_R8A7791_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7791_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7791_PD_CA15_CPU0 0 +#define R8A7791_PD_CA15_CPU1 1 +#define R8A7791_PD_CA15_SCU 12 +#define R8A7791_PD_SH_4A 16 +#define R8A7791_PD_SGX 20 + +/* Always-on power area */ +#define R8A7791_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7791_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7792-sysc.h b/include/dt-bindings/power/r8a7792-sysc.h new file mode 100644 index 0000000..dd3a466 --- /dev/null +++ b/include/dt-bindings/power/r8a7792-sysc.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Cogent Embedded Inc. + */ +#ifndef __DT_BINDINGS_POWER_R8A7792_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7792_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7792_PD_CA15_CPU0 0 +#define R8A7792_PD_CA15_CPU1 1 +#define R8A7792_PD_CA15_SCU 12 +#define R8A7792_PD_SGX 20 +#define R8A7792_PD_IMP 24 + +/* Always-on power area */ +#define R8A7792_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7792_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7793-sysc.h b/include/dt-bindings/power/r8a7793-sysc.h new file mode 100644 index 0000000..056998c --- /dev/null +++ b/include/dt-bindings/power/r8a7793-sysc.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Glider bvba + */ +#ifndef __DT_BINDINGS_POWER_R8A7793_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7793_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + * + * Note that R-Car M2-N is identical to R-Car M2-W w.r.t. power domains. + */ + +#define R8A7793_PD_CA15_CPU0 0 +#define R8A7793_PD_CA15_CPU1 1 +#define R8A7793_PD_CA15_SCU 12 +#define R8A7793_PD_SH_4A 16 +#define R8A7793_PD_SGX 20 + +/* Always-on power area */ +#define R8A7793_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7793_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7794-sysc.h b/include/dt-bindings/power/r8a7794-sysc.h new file mode 100644 index 0000000..4d6c708 --- /dev/null +++ b/include/dt-bindings/power/r8a7794-sysc.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Glider bvba + */ +#ifndef __DT_BINDINGS_POWER_R8A7794_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7794_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7794_PD_CA7_CPU0 5 +#define R8A7794_PD_CA7_CPU1 6 +#define R8A7794_PD_SH_4A 16 +#define R8A7794_PD_SGX 20 +#define R8A7794_PD_CA7_SCU 21 + +/* Always-on power area */ +#define R8A7794_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7794_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7795-sysc.h b/include/dt-bindings/power/r8a7795-sysc.h new file mode 100644 index 0000000..eea6ad6 --- /dev/null +++ b/include/dt-bindings/power/r8a7795-sysc.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Glider bvba + */ +#ifndef __DT_BINDINGS_POWER_R8A7795_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7795_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7795_PD_CA57_CPU0 0 +#define R8A7795_PD_CA57_CPU1 1 +#define R8A7795_PD_CA57_CPU2 2 +#define R8A7795_PD_CA57_CPU3 3 +#define R8A7795_PD_CA53_CPU0 5 +#define R8A7795_PD_CA53_CPU1 6 +#define R8A7795_PD_CA53_CPU2 7 +#define R8A7795_PD_CA53_CPU3 8 +#define R8A7795_PD_A3VP 9 +#define R8A7795_PD_CA57_SCU 12 +#define R8A7795_PD_CR7 13 +#define R8A7795_PD_A3VC 14 +#define R8A7795_PD_3DG_A 17 +#define R8A7795_PD_3DG_B 18 +#define R8A7795_PD_3DG_C 19 +#define R8A7795_PD_3DG_D 20 +#define R8A7795_PD_CA53_SCU 21 +#define R8A7795_PD_3DG_E 22 +#define R8A7795_PD_A3IR 24 +#define R8A7795_PD_A2VC0 25 /* ES1.x only */ +#define R8A7795_PD_A2VC1 26 + +/* Always-on power area */ +#define R8A7795_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7795_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a7796-sysc.h b/include/dt-bindings/power/r8a7796-sysc.h new file mode 100644 index 0000000..7e6fc06 --- /dev/null +++ b/include/dt-bindings/power/r8a7796-sysc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Glider bvba + */ +#ifndef __DT_BINDINGS_POWER_R8A7796_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7796_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7796_PD_CA57_CPU0 0 +#define R8A7796_PD_CA57_CPU1 1 +#define R8A7796_PD_CA53_CPU0 5 +#define R8A7796_PD_CA53_CPU1 6 +#define R8A7796_PD_CA53_CPU2 7 +#define R8A7796_PD_CA53_CPU3 8 +#define R8A7796_PD_CA57_SCU 12 +#define R8A7796_PD_CR7 13 +#define R8A7796_PD_A3VC 14 +#define R8A7796_PD_3DG_A 17 +#define R8A7796_PD_3DG_B 18 +#define R8A7796_PD_CA53_SCU 21 +#define R8A7796_PD_A3IR 24 +#define R8A7796_PD_A2VC0 25 +#define R8A7796_PD_A2VC1 26 + +/* Always-on power area */ +#define R8A7796_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7796_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77965-sysc.h b/include/dt-bindings/power/r8a77965-sysc.h new file mode 100644 index 0000000..de82d8a --- /dev/null +++ b/include/dt-bindings/power/r8a77965-sysc.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Jacopo Mondi + * Copyright (C) 2016 Glider bvba + */ + +#ifndef __DT_BINDINGS_POWER_R8A77965_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77965_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77965_PD_CA57_CPU0 0 +#define R8A77965_PD_CA57_CPU1 1 +#define R8A77965_PD_A3VP 9 +#define R8A77965_PD_CA57_SCU 12 +#define R8A77965_PD_CR7 13 +#define R8A77965_PD_A3VC 14 +#define R8A77965_PD_3DG_A 17 +#define R8A77965_PD_3DG_B 18 +#define R8A77965_PD_A2VC1 26 + +/* Always-on power area */ +#define R8A77965_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77965_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77970-sysc.h b/include/dt-bindings/power/r8a77970-sysc.h new file mode 100644 index 0000000..9dcdbd5 --- /dev/null +++ b/include/dt-bindings/power/r8a77970-sysc.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Cogent Embedded Inc. + */ +#ifndef __DT_BINDINGS_POWER_R8A77970_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77970_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77970_PD_CA53_CPU0 5 +#define R8A77970_PD_CA53_CPU1 6 +#define R8A77970_PD_CA53_SCU 21 +#define R8A77970_PD_A2IR0 23 +#define R8A77970_PD_A3IR 24 +#define R8A77970_PD_A2IR1 27 +#define R8A77970_PD_A2DP 28 +#define R8A77970_PD_A2CN 29 +#define R8A77970_PD_A2SC0 30 +#define R8A77970_PD_A2SC1 31 + +/* Always-on power area */ +#define R8A77970_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77970_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77980-sysc.h b/include/dt-bindings/power/r8a77980-sysc.h new file mode 100644 index 0000000..e12c858 --- /dev/null +++ b/include/dt-bindings/power/r8a77980-sysc.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + * Copyright (C) 2018 Cogent Embedded, Inc. + */ +#ifndef __DT_BINDINGS_POWER_R8A77980_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77980_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77980_PD_A2SC2 0 +#define R8A77980_PD_A2SC3 1 +#define R8A77980_PD_A2SC4 2 +#define R8A77980_PD_A2DP0 3 +#define R8A77980_PD_A2DP1 4 +#define R8A77980_PD_CA53_CPU0 5 +#define R8A77980_PD_CA53_CPU1 6 +#define R8A77980_PD_CA53_CPU2 7 +#define R8A77980_PD_CA53_CPU3 8 +#define R8A77980_PD_A2CN 10 +#define R8A77980_PD_A3VIP0 11 +#define R8A77980_PD_A2IR5 12 +#define R8A77980_PD_CR7 13 +#define R8A77980_PD_A2IR4 15 +#define R8A77980_PD_CA53_SCU 21 +#define R8A77980_PD_A2IR0 23 +#define R8A77980_PD_A3IR 24 +#define R8A77980_PD_A3VIP1 25 +#define R8A77980_PD_A3VIP2 26 +#define R8A77980_PD_A2IR1 27 +#define R8A77980_PD_A2IR2 28 +#define R8A77980_PD_A2IR3 29 +#define R8A77980_PD_A2SC0 30 +#define R8A77980_PD_A2SC1 31 + +/* Always-on power area */ +#define R8A77980_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77980_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77990-sysc.h b/include/dt-bindings/power/r8a77990-sysc.h new file mode 100644 index 0000000..944d85b --- /dev/null +++ b/include/dt-bindings/power/r8a77990-sysc.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_POWER_R8A77990_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77990_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77990_PD_CA53_CPU0 5 +#define R8A77990_PD_CA53_CPU1 6 +#define R8A77990_PD_CR7 13 +#define R8A77990_PD_A3VC 14 +#define R8A77990_PD_3DG_A 17 +#define R8A77990_PD_3DG_B 18 +#define R8A77990_PD_CA53_SCU 21 +#define R8A77990_PD_A2VC1 26 + +/* Always-on power area */ +#define R8A77990_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77990_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77995-sysc.h b/include/dt-bindings/power/r8a77995-sysc.h new file mode 100644 index 0000000..f2b3550 --- /dev/null +++ b/include/dt-bindings/power/r8a77995-sysc.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Glider bvba + */ +#ifndef __DT_BINDINGS_POWER_R8A77995_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77995_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77995_PD_CA53_CPU0 5 +#define R8A77995_PD_CA53_SCU 21 + +/* Always-on power area */ +#define R8A77995_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77995_SYSC_H__ */ diff --git a/include/dt-bindings/power/raspberrypi-power.h b/include/dt-bindings/power/raspberrypi-power.h new file mode 100644 index 0000000..3575f9f --- /dev/null +++ b/include/dt-bindings/power/raspberrypi-power.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright © 2015 Broadcom + */ + +#ifndef _DT_BINDINGS_ARM_BCM2835_RPI_POWER_H +#define _DT_BINDINGS_ARM_BCM2835_RPI_POWER_H + +/* These power domain indices are the firmware interface's indices + * minus one. + */ +#define RPI_POWER_DOMAIN_I2C0 0 +#define RPI_POWER_DOMAIN_I2C1 1 +#define RPI_POWER_DOMAIN_I2C2 2 +#define RPI_POWER_DOMAIN_VIDEO_SCALER 3 +#define RPI_POWER_DOMAIN_VPU1 4 +#define RPI_POWER_DOMAIN_HDMI 5 +#define RPI_POWER_DOMAIN_USB 6 +#define RPI_POWER_DOMAIN_VEC 7 +#define RPI_POWER_DOMAIN_JPEG 8 +#define RPI_POWER_DOMAIN_H264 9 +#define RPI_POWER_DOMAIN_V3D 10 +#define RPI_POWER_DOMAIN_ISP 11 +#define RPI_POWER_DOMAIN_UNICAM0 12 +#define RPI_POWER_DOMAIN_UNICAM1 13 +#define RPI_POWER_DOMAIN_CCP2RX 14 +#define RPI_POWER_DOMAIN_CSI2 15 +#define RPI_POWER_DOMAIN_CPI 16 +#define RPI_POWER_DOMAIN_DSI0 17 +#define RPI_POWER_DOMAIN_DSI1 18 +#define RPI_POWER_DOMAIN_TRANSPOSER 19 +#define RPI_POWER_DOMAIN_CCP2TX 20 +#define RPI_POWER_DOMAIN_CDP 21 +#define RPI_POWER_DOMAIN_ARM 22 + +#define RPI_POWER_DOMAIN_COUNT 23 + +#endif /* _DT_BINDINGS_ARM_BCM2835_RPI_POWER_H */ diff --git a/include/dt-bindings/power/rk3036-power.h b/include/dt-bindings/power/rk3036-power.h new file mode 100644 index 0000000..0bc6b5d --- /dev/null +++ b/include/dt-bindings/power/rk3036-power.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3036_POWER_H__ +#define __DT_BINDINGS_POWER_RK3036_POWER_H__ + +#define RK3036_PD_MSCH 0 +#define RK3036_PD_CORE 1 +#define RK3036_PD_PERI 2 +#define RK3036_PD_VIO 3 +#define RK3036_PD_VPU 4 +#define RK3036_PD_GPU 5 +#define RK3036_PD_SYS 6 + +#endif diff --git a/include/dt-bindings/power/rk3066-power.h b/include/dt-bindings/power/rk3066-power.h new file mode 100644 index 0000000..acf9f31 --- /dev/null +++ b/include/dt-bindings/power/rk3066-power.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3066_POWER_H__ +#define __DT_BINDINGS_POWER_RK3066_POWER_H__ + +/* VD_CORE */ +#define RK3066_PD_A9_0 0 +#define RK3066_PD_A9_1 1 +#define RK3066_PD_DBG 4 +#define RK3066_PD_SCU 5 + +/* VD_LOGIC */ +#define RK3066_PD_VIDEO 6 +#define RK3066_PD_VIO 7 +#define RK3066_PD_GPU 8 +#define RK3066_PD_PERI 9 +#define RK3066_PD_CPU 10 +#define RK3066_PD_ALIVE 11 + +/* VD_PMU */ +#define RK3066_PD_RTC 12 + +#endif diff --git a/include/dt-bindings/power/rk3128-power.h b/include/dt-bindings/power/rk3128-power.h new file mode 100644 index 0000000..c051dc3 --- /dev/null +++ b/include/dt-bindings/power/rk3128-power.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3128_POWER_H__ +#define __DT_BINDINGS_POWER_RK3128_POWER_H__ + +/* VD_CORE */ +#define RK3128_PD_CORE 0 + +/* VD_LOGIC */ +#define RK3128_PD_VIO 1 +#define RK3128_PD_VIDEO 2 +#define RK3128_PD_GPU 3 +#define RK3128_PD_MSCH 4 + +#endif diff --git a/include/dt-bindings/power/rk3188-power.h b/include/dt-bindings/power/rk3188-power.h new file mode 100644 index 0000000..93d23df --- /dev/null +++ b/include/dt-bindings/power/rk3188-power.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3188_POWER_H__ +#define __DT_BINDINGS_POWER_RK3188_POWER_H__ + +/* VD_CORE */ +#define RK3188_PD_A9_0 0 +#define RK3188_PD_A9_1 1 +#define RK3188_PD_A9_2 2 +#define RK3188_PD_A9_3 3 +#define RK3188_PD_DBG 4 +#define RK3188_PD_SCU 5 + +/* VD_LOGIC */ +#define RK3188_PD_VIDEO 6 +#define RK3188_PD_VIO 7 +#define RK3188_PD_GPU 8 +#define RK3188_PD_PERI 9 +#define RK3188_PD_CPU 10 +#define RK3188_PD_ALIVE 11 + +/* VD_PMU */ +#define RK3188_PD_RTC 12 + +#endif diff --git a/include/dt-bindings/power/rk3228-power.h b/include/dt-bindings/power/rk3228-power.h new file mode 100644 index 0000000..6a8dc1b --- /dev/null +++ b/include/dt-bindings/power/rk3228-power.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3228_POWER_H__ +#define __DT_BINDINGS_POWER_RK3228_POWER_H__ + +/** + * RK3228 idle id Summary. + */ + +#define RK3228_PD_CORE 0 +#define RK3228_PD_MSCH 1 +#define RK3228_PD_BUS 2 +#define RK3228_PD_SYS 3 +#define RK3228_PD_VIO 4 +#define RK3228_PD_VOP 5 +#define RK3228_PD_VPU 6 +#define RK3228_PD_RKVDEC 7 +#define RK3228_PD_GPU 8 +#define RK3228_PD_PERI 9 +#define RK3228_PD_GMAC 10 + +#endif diff --git a/include/dt-bindings/power/rk3288-power.h b/include/dt-bindings/power/rk3288-power.h new file mode 100644 index 0000000..f710b56 --- /dev/null +++ b/include/dt-bindings/power/rk3288-power.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3288_POWER_H__ +#define __DT_BINDINGS_POWER_RK3288_POWER_H__ + +/** + * RK3288 Power Domain and Voltage Domain Summary. + */ + +/* VD_CORE */ +#define RK3288_PD_A17_0 0 +#define RK3288_PD_A17_1 1 +#define RK3288_PD_A17_2 2 +#define RK3288_PD_A17_3 3 +#define RK3288_PD_SCU 4 +#define RK3288_PD_DEBUG 5 +#define RK3288_PD_MEM 6 + +/* VD_LOGIC */ +#define RK3288_PD_BUS 7 +#define RK3288_PD_PERI 8 +#define RK3288_PD_VIO 9 +#define RK3288_PD_ALIVE 10 +#define RK3288_PD_HEVC 11 +#define RK3288_PD_VIDEO 12 + +/* VD_GPU */ +#define RK3288_PD_GPU 13 + +/* VD_PMU */ +#define RK3288_PD_PMU 14 + +#endif diff --git a/include/dt-bindings/power/rk3328-power.h b/include/dt-bindings/power/rk3328-power.h new file mode 100644 index 0000000..02e3d7f --- /dev/null +++ b/include/dt-bindings/power/rk3328-power.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3328_POWER_H__ +#define __DT_BINDINGS_POWER_RK3328_POWER_H__ + +/** + * RK3328 idle id Summary. + */ +#define RK3328_PD_CORE 0 +#define RK3328_PD_GPU 1 +#define RK3328_PD_BUS 2 +#define RK3328_PD_MSCH 3 +#define RK3328_PD_PERI 4 +#define RK3328_PD_VIDEO 5 +#define RK3328_PD_HEVC 6 +#define RK3328_PD_SYS 7 +#define RK3328_PD_VPU 8 +#define RK3328_PD_VIO 9 + +#endif diff --git a/include/dt-bindings/power/rk3366-power.h b/include/dt-bindings/power/rk3366-power.h new file mode 100644 index 0000000..223a3dc --- /dev/null +++ b/include/dt-bindings/power/rk3366-power.h @@ -0,0 +1,24 @@ +#ifndef __DT_BINDINGS_POWER_RK3366_POWER_H__ +#define __DT_BINDINGS_POWER_RK3366_POWER_H__ + +/* VD_CORE */ +#define RK3366_PD_A53_0 0 +#define RK3366_PD_A53_1 1 +#define RK3366_PD_A53_2 2 +#define RK3366_PD_A53_3 3 + +/* VD_LOGIC */ +#define RK3366_PD_BUS 4 +#define RK3366_PD_PERI 5 +#define RK3366_PD_VIO 6 +#define RK3366_PD_VIDEO 7 +#define RK3366_PD_RKVDEC 8 +#define RK3366_PD_WIFIBT 9 +#define RK3366_PD_VPU 10 +#define RK3366_PD_GPU 11 +#define RK3366_PD_ALIVE 12 + +/* VD_PMU */ +#define RK3366_PD_PMU 13 + +#endif diff --git a/include/dt-bindings/power/rk3368-power.h b/include/dt-bindings/power/rk3368-power.h new file mode 100644 index 0000000..5e602db --- /dev/null +++ b/include/dt-bindings/power/rk3368-power.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3368_POWER_H__ +#define __DT_BINDINGS_POWER_RK3368_POWER_H__ + +/* VD_CORE */ +#define RK3368_PD_A53_L0 0 +#define RK3368_PD_A53_L1 1 +#define RK3368_PD_A53_L2 2 +#define RK3368_PD_A53_L3 3 +#define RK3368_PD_SCU_L 4 +#define RK3368_PD_A53_B0 5 +#define RK3368_PD_A53_B1 6 +#define RK3368_PD_A53_B2 7 +#define RK3368_PD_A53_B3 8 +#define RK3368_PD_SCU_B 9 + +/* VD_LOGIC */ +#define RK3368_PD_BUS 10 +#define RK3368_PD_PERI 11 +#define RK3368_PD_VIO 12 +#define RK3368_PD_ALIVE 13 +#define RK3368_PD_VIDEO 14 +#define RK3368_PD_GPU_0 15 +#define RK3368_PD_GPU_1 16 + +/* VD_PMU */ +#define RK3368_PD_PMU 17 + +#endif diff --git a/include/dt-bindings/power/rk3399-power.h b/include/dt-bindings/power/rk3399-power.h new file mode 100644 index 0000000..aedd8b1 --- /dev/null +++ b/include/dt-bindings/power/rk3399-power.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3399_POWER_H__ +#define __DT_BINDINGS_POWER_RK3399_POWER_H__ + +/* VD_CORE_L */ +#define RK3399_PD_A53_L0 0 +#define RK3399_PD_A53_L1 1 +#define RK3399_PD_A53_L2 2 +#define RK3399_PD_A53_L3 3 +#define RK3399_PD_SCU_L 4 + +/* VD_CORE_B */ +#define RK3399_PD_A72_B0 5 +#define RK3399_PD_A72_B1 6 +#define RK3399_PD_SCU_B 7 + +/* VD_LOGIC */ +#define RK3399_PD_TCPD0 8 +#define RK3399_PD_TCPD1 9 +#define RK3399_PD_CCI 10 +#define RK3399_PD_CCI0 11 +#define RK3399_PD_CCI1 12 +#define RK3399_PD_PERILP 13 +#define RK3399_PD_PERIHP 14 +#define RK3399_PD_VIO 15 +#define RK3399_PD_VO 16 +#define RK3399_PD_VOPB 17 +#define RK3399_PD_VOPL 18 +#define RK3399_PD_ISP0 19 +#define RK3399_PD_ISP1 20 +#define RK3399_PD_HDCP 21 +#define RK3399_PD_GMAC 22 +#define RK3399_PD_EMMC 23 +#define RK3399_PD_USB3 24 +#define RK3399_PD_EDP 25 +#define RK3399_PD_GIC 26 +#define RK3399_PD_SD 27 +#define RK3399_PD_SDIOAUDIO 28 +#define RK3399_PD_ALIVE 29 + +/* VD_CENTER */ +#define RK3399_PD_CENTER 30 +#define RK3399_PD_VCODEC 31 +#define RK3399_PD_VDU 32 +#define RK3399_PD_RGA 33 +#define RK3399_PD_IEP 34 + +/* VD_GPU */ +#define RK3399_PD_GPU 35 + +/* VD_PMU */ +#define RK3399_PD_PMU 36 + +#endif diff --git a/include/dt-bindings/power/tegra186-powergate.h b/include/dt-bindings/power/tegra186-powergate.h new file mode 100644 index 0000000..31fd3f9 --- /dev/null +++ b/include/dt-bindings/power/tegra186-powergate.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef _DT_BINDINGS_POWER_TEGRA186_POWERGATE_H +#define _DT_BINDINGS_POWER_TEGRA186_POWERGATE_H + +#define TEGRA186_POWER_DOMAIN_AUD 0 +#define TEGRA186_POWER_DOMAIN_DFD 1 +#define TEGRA186_POWER_DOMAIN_DISP 2 +#define TEGRA186_POWER_DOMAIN_DISPB 3 +#define TEGRA186_POWER_DOMAIN_DISPC 4 +#define TEGRA186_POWER_DOMAIN_ISPA 5 +#define TEGRA186_POWER_DOMAIN_NVDEC 6 +#define TEGRA186_POWER_DOMAIN_NVJPG 7 +#define TEGRA186_POWER_DOMAIN_MPE 8 +#define TEGRA186_POWER_DOMAIN_PCX 9 +#define TEGRA186_POWER_DOMAIN_SAX 10 +#define TEGRA186_POWER_DOMAIN_VE 11 +#define TEGRA186_POWER_DOMAIN_VIC 12 +#define TEGRA186_POWER_DOMAIN_XUSBA 13 +#define TEGRA186_POWER_DOMAIN_XUSBB 14 +#define TEGRA186_POWER_DOMAIN_XUSBC 15 +#define TEGRA186_POWER_DOMAIN_GPU 43 +#define TEGRA186_POWER_DOMAIN_MAX 44 + +#endif diff --git a/include/dt-bindings/power/tegra194-powergate.h b/include/dt-bindings/power/tegra194-powergate.h new file mode 100644 index 0000000..8225374 --- /dev/null +++ b/include/dt-bindings/power/tegra194-powergate.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __ABI_MACH_T194_POWERGATE_T194_H_ +#define __ABI_MACH_T194_POWERGATE_T194_H_ + +#define TEGRA194_POWER_DOMAIN_AUD 1 +#define TEGRA194_POWER_DOMAIN_DISP 2 +#define TEGRA194_POWER_DOMAIN_DISPB 3 +#define TEGRA194_POWER_DOMAIN_DISPC 4 +#define TEGRA194_POWER_DOMAIN_ISPA 5 +#define TEGRA194_POWER_DOMAIN_NVDECA 6 +#define TEGRA194_POWER_DOMAIN_NVJPG 7 +#define TEGRA194_POWER_DOMAIN_NVENCA 8 +#define TEGRA194_POWER_DOMAIN_NVENCB 9 +#define TEGRA194_POWER_DOMAIN_NVDECB 10 +#define TEGRA194_POWER_DOMAIN_SAX 11 +#define TEGRA194_POWER_DOMAIN_VE 12 +#define TEGRA194_POWER_DOMAIN_VIC 13 +#define TEGRA194_POWER_DOMAIN_XUSBA 14 +#define TEGRA194_POWER_DOMAIN_XUSBB 15 +#define TEGRA194_POWER_DOMAIN_XUSBC 16 +#define TEGRA194_POWER_DOMAIN_PCIEX8A 17 +#define TEGRA194_POWER_DOMAIN_PCIEX4A 18 +#define TEGRA194_POWER_DOMAIN_PCIEX1A 19 +#define TEGRA194_POWER_DOMAIN_PCIEX8B 21 +#define TEGRA194_POWER_DOMAIN_PVAA 22 +#define TEGRA194_POWER_DOMAIN_PVAB 23 +#define TEGRA194_POWER_DOMAIN_DLAA 24 +#define TEGRA194_POWER_DOMAIN_DLAB 25 +#define TEGRA194_POWER_DOMAIN_CV 26 +#define TEGRA194_POWER_DOMAIN_GPU 27 +#define TEGRA194_POWER_DOMAIN_MAX 27 + +#endif diff --git a/include/dt-bindings/power/xlnx-zynqmp-power.h b/include/dt-bindings/power/xlnx-zynqmp-power.h new file mode 100644 index 0000000..0d9a412 --- /dev/null +++ b/include/dt-bindings/power/xlnx-zynqmp-power.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Xilinx, Inc. + */ + +#ifndef _DT_BINDINGS_ZYNQMP_POWER_H +#define _DT_BINDINGS_ZYNQMP_POWER_H + +#define PD_USB_0 22 +#define PD_USB_1 23 +#define PD_TTC_0 24 +#define PD_TTC_1 25 +#define PD_TTC_2 26 +#define PD_TTC_3 27 +#define PD_SATA 28 +#define PD_ETH_0 29 +#define PD_ETH_1 30 +#define PD_ETH_2 31 +#define PD_ETH_3 32 +#define PD_UART_0 33 +#define PD_UART_1 34 +#define PD_SPI_0 35 +#define PD_SPI_1 36 +#define PD_I2C_0 37 +#define PD_I2C_1 38 +#define PD_SD_0 39 +#define PD_SD_1 40 +#define PD_DP 41 +#define PD_GDMA 42 +#define PD_ADMA 43 +#define PD_NAND 44 +#define PD_QSPI 45 +#define PD_GPIO 46 +#define PD_CAN_0 47 +#define PD_CAN_1 48 +#define PD_GPU 58 +#define PD_PCIE 59 + +#endif diff --git a/include/dt-bindings/pwm/pwm.h b/include/dt-bindings/pwm/pwm.h new file mode 100644 index 0000000..ab9a077 --- /dev/null +++ b/include/dt-bindings/pwm/pwm.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for most PWM bindings. + * + * Most PWM bindings can include a flags cell as part of the PWM specifier. + * In most cases, the format of the flags cell uses the standard values + * defined in this header. + */ + +#ifndef _DT_BINDINGS_PWM_PWM_H +#define _DT_BINDINGS_PWM_PWM_H + +#define PWM_POLARITY_INVERTED (1 << 0) + +#endif diff --git a/include/dt-bindings/regulator/active-semi,8865-regulator.h b/include/dt-bindings/regulator/active-semi,8865-regulator.h new file mode 100644 index 0000000..15473db --- /dev/null +++ b/include/dt-bindings/regulator/active-semi,8865-regulator.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Device Tree binding constants for the ACT8865 PMIC regulators + */ + +#ifndef _DT_BINDINGS_REGULATOR_ACT8865_H +#define _DT_BINDINGS_REGULATOR_ACT8865_H + +/* + * These constants should be used to specify regulator modes in device tree for + * ACT8865 regulators as follows: + * ACT8865_REGULATOR_MODE_FIXED: It is specific to DCDC regulators and it + * specifies the usage of fixed-frequency + * PWM. + * + * ACT8865_REGULATOR_MODE_NORMAL: It is specific to LDO regulators and it + * specifies the usage of normal mode. + * + * ACT8865_REGULATOR_MODE_LOWPOWER: For DCDC and LDO regulators; it specify + * the usage of proprietary power-saving + * mode. + */ + +#define ACT8865_REGULATOR_MODE_FIXED 1 +#define ACT8865_REGULATOR_MODE_NORMAL 2 +#define ACT8865_REGULATOR_MODE_LOWPOWER 3 + +#endif diff --git a/include/dt-bindings/regulator/active-semi,8945a-regulator.h b/include/dt-bindings/regulator/active-semi,8945a-regulator.h new file mode 100644 index 0000000..9bdba5e --- /dev/null +++ b/include/dt-bindings/regulator/active-semi,8945a-regulator.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 Microchip Technology, Inc. All rights reserved. + * + * Device Tree binding constants for the ACT8945A PMIC regulators + */ + +#ifndef _DT_BINDINGS_REGULATOR_ACT8945A_H +#define _DT_BINDINGS_REGULATOR_ACT8945A_H + +/* + * These constants should be used to specify regulator modes in device tree for + * ACT8945A regulators as follows: + * ACT8945A_REGULATOR_MODE_FIXED: It is specific to DCDC regulators and it + * specifies the usage of fixed-frequency + * PWM. + * + * ACT8945A_REGULATOR_MODE_NORMAL: It is specific to LDO regulators and it + * specifies the usage of normal mode. + * + * ACT8945A_REGULATOR_MODE_LOWPOWER: For DCDC and LDO regulators; it specify + * the usage of proprietary power-saving + * mode. + */ + +#define ACT8945A_REGULATOR_MODE_FIXED 1 +#define ACT8945A_REGULATOR_MODE_NORMAL 2 +#define ACT8945A_REGULATOR_MODE_LOWPOWER 3 + +#endif diff --git a/include/dt-bindings/regulator/maxim,max77802.h b/include/dt-bindings/regulator/maxim,max77802.h new file mode 100644 index 0000000..d0baba1 --- /dev/null +++ b/include/dt-bindings/regulator/maxim,max77802.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2014 Google, Inc + * + * Device Tree binding constants for the Maxim 77802 PMIC regulators + */ + +#ifndef _DT_BINDINGS_REGULATOR_MAXIM_MAX77802_H +#define _DT_BINDINGS_REGULATOR_MAXIM_MAX77802_H + +/* Regulator operating modes */ +#define MAX77802_OPMODE_LP 1 +#define MAX77802_OPMODE_NORMAL 3 + +#endif /* _DT_BINDINGS_REGULATOR_MAXIM_MAX77802_H */ diff --git a/include/dt-bindings/regulator/qcom,rpmh-regulator.h b/include/dt-bindings/regulator/qcom,rpmh-regulator.h new file mode 100644 index 0000000..86713dc --- /dev/null +++ b/include/dt-bindings/regulator/qcom,rpmh-regulator.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */ + +#ifndef __QCOM_RPMH_REGULATOR_H +#define __QCOM_RPMH_REGULATOR_H + +/* + * These mode constants may be used to specify modes for various RPMh regulator + * device tree properties (e.g. regulator-initial-mode). Each type of regulator + * supports a subset of the possible modes. + * + * %RPMH_REGULATOR_MODE_RET: Retention mode in which only an extremely small + * load current is allowed. This mode is supported + * by LDO and SMPS type regulators. + * %RPMH_REGULATOR_MODE_LPM: Low power mode in which a small load current is + * allowed. This mode corresponds to PFM for SMPS + * and BOB type regulators. This mode is supported + * by LDO, HFSMPS, BOB, and PMIC4 FTSMPS type + * regulators. + * %RPMH_REGULATOR_MODE_AUTO: Auto mode in which the regulator hardware + * automatically switches between LPM and HPM based + * upon the real-time load current. This mode is + * supported by HFSMPS, BOB, and PMIC4 FTSMPS type + * regulators. + * %RPMH_REGULATOR_MODE_HPM: High power mode in which the full rated current + * of the regulator is allowed. This mode + * corresponds to PWM for SMPS and BOB type + * regulators. This mode is supported by all types + * of regulators. + */ +#define RPMH_REGULATOR_MODE_RET 0 +#define RPMH_REGULATOR_MODE_LPM 1 +#define RPMH_REGULATOR_MODE_AUTO 2 +#define RPMH_REGULATOR_MODE_HPM 3 + +#endif diff --git a/include/dt-bindings/reset-controller/mt8183-resets.h b/include/dt-bindings/reset-controller/mt8183-resets.h new file mode 100644 index 0000000..8804e34 --- /dev/null +++ b/include/dt-bindings/reset-controller/mt8183-resets.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Yong Liang + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8183 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8183 + +/* INFRACFG AO resets */ +#define MT8183_INFRACFG_AO_THERM_SW_RST 0 +#define MT8183_INFRACFG_AO_USB_TOP_SW_RST 1 +#define MT8183_INFRACFG_AO_MM_IOMMU_SW_RST 3 +#define MT8183_INFRACFG_AO_MSDC3_SW_RST 4 +#define MT8183_INFRACFG_AO_MSDC2_SW_RST 5 +#define MT8183_INFRACFG_AO_MSDC1_SW_RST 6 +#define MT8183_INFRACFG_AO_MSDC0_SW_RST 7 +#define MT8183_INFRACFG_AO_APDMA_SW_RST 9 +#define MT8183_INFRACFG_AO_MIMP_D_SW_RST 10 +#define MT8183_INFRACFG_AO_BTIF_SW_RST 12 +#define MT8183_INFRACFG_AO_DISP_PWM_SW_RST 14 +#define MT8183_INFRACFG_AO_AUXADC_SW_RST 15 + +#define MT8183_INFRACFG_AO_IRTX_SW_RST 32 +#define MT8183_INFRACFG_AO_SPI0_SW_RST 33 +#define MT8183_INFRACFG_AO_I2C0_SW_RST 34 +#define MT8183_INFRACFG_AO_I2C1_SW_RST 35 +#define MT8183_INFRACFG_AO_I2C2_SW_RST 36 +#define MT8183_INFRACFG_AO_I2C3_SW_RST 37 +#define MT8183_INFRACFG_AO_UART0_SW_RST 38 +#define MT8183_INFRACFG_AO_UART1_SW_RST 39 +#define MT8183_INFRACFG_AO_UART2_SW_RST 40 +#define MT8183_INFRACFG_AO_PWM_SW_RST 41 +#define MT8183_INFRACFG_AO_SPI1_SW_RST 42 +#define MT8183_INFRACFG_AO_I2C4_SW_RST 43 +#define MT8183_INFRACFG_AO_DVFSP_SW_RST 44 +#define MT8183_INFRACFG_AO_SPI2_SW_RST 45 +#define MT8183_INFRACFG_AO_SPI3_SW_RST 46 +#define MT8183_INFRACFG_AO_UFSHCI_SW_RST 47 + +#define MT8183_INFRACFG_AO_PMIC_WRAP_SW_RST 64 +#define MT8183_INFRACFG_AO_SPM_SW_RST 65 +#define MT8183_INFRACFG_AO_USBSIF_SW_RST 66 +#define MT8183_INFRACFG_AO_KP_SW_RST 68 +#define MT8183_INFRACFG_AO_APXGPT_SW_RST 69 +#define MT8183_INFRACFG_AO_CLDMA_AO_SW_RST 70 +#define MT8183_INFRACFG_AO_UNIPRO_UFS_SW_RST 71 +#define MT8183_INFRACFG_AO_DX_CC_SW_RST 72 +#define MT8183_INFRACFG_AO_UFSPHY_SW_RST 73 + +#define MT8183_INFRACFG_AO_DX_CC_SEC_SW_RST 96 +#define MT8183_INFRACFG_AO_GCE_SW_RST 97 +#define MT8183_INFRACFG_AO_CLDMA_SW_RST 98 +#define MT8183_INFRACFG_AO_TRNG_SW_RST 99 +#define MT8183_INFRACFG_AO_AP_MD_CCIF_1_SW_RST 103 +#define MT8183_INFRACFG_AO_AP_MD_CCIF_SW_RST 104 +#define MT8183_INFRACFG_AO_I2C1_IMM_SW_RST 105 +#define MT8183_INFRACFG_AO_I2C1_ARB_SW_RST 106 +#define MT8183_INFRACFG_AO_I2C2_IMM_SW_RST 107 +#define MT8183_INFRACFG_AO_I2C2_ARB_SW_RST 108 +#define MT8183_INFRACFG_AO_I2C5_SW_RST 109 +#define MT8183_INFRACFG_AO_I2C5_IMM_SW_RST 110 +#define MT8183_INFRACFG_AO_I2C5_ARB_SW_RST 111 +#define MT8183_INFRACFG_AO_SPI4_SW_RST 112 +#define MT8183_INFRACFG_AO_SPI5_SW_RST 113 +#define MT8183_INFRACFG_AO_INFRA2MFGAXI_CBIP_CLAS_SW_RST 114 +#define MT8183_INFRACFG_AO_MFGAXI2INFRA_M0_CBIP_GLAS_OUT_SW_RST 115 +#define MT8183_INFRACFG_AO_MFGAXI2INFRA_M1_CBIP_GLAS_OUT_SW_RST 116 +#define MT8183_INFRACFG_AO_UFS_AES_SW_RST 117 +#define MT8183_INFRACFG_AO_CCU_I2C_IRQ_SW_RST 118 +#define MT8183_INFRACFG_AO_CCU_I2C_DMA_SW_RST 119 +#define MT8183_INFRACFG_AO_I2C6_SW_RST 120 +#define MT8183_INFRACFG_AO_CCU_GALS_SW_RST 121 +#define MT8183_INFRACFG_AO_IPU_GALS_SW_RST 122 +#define MT8183_INFRACFG_AO_CONN2AP_GALS_SW_RST 123 +#define MT8183_INFRACFG_AO_AP_MD_CCIF2_SW_RST 124 +#define MT8183_INFRACFG_AO_AP_MD_CCIF3_SW_RST 125 +#define MT8183_INFRACFG_AO_I2C7_SW_RST 126 +#define MT8183_INFRACFG_AO_I2C8_SW_RST 127 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8183 */ diff --git a/include/dt-bindings/reset/actions,s700-reset.h b/include/dt-bindings/reset/actions,s700-reset.h new file mode 100644 index 0000000..5e3b16b --- /dev/null +++ b/include/dt-bindings/reset/actions,s700-reset.h @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) +// +// Device Tree binding constants for Actions Semi S700 Reset Management Unit +// +// Copyright (c) 2018 Linaro Ltd. + +#ifndef __DT_BINDINGS_ACTIONS_S700_RESET_H +#define __DT_BINDINGS_ACTIONS_S700_RESET_H + +#define RESET_AUDIO 0 +#define RESET_CSI 1 +#define RESET_DE 2 +#define RESET_DSI 3 +#define RESET_GPIO 4 +#define RESET_I2C0 5 +#define RESET_I2C1 6 +#define RESET_I2C2 7 +#define RESET_I2C3 8 +#define RESET_KEY 9 +#define RESET_LCD0 10 +#define RESET_SI 11 +#define RESET_SPI0 12 +#define RESET_SPI1 13 +#define RESET_SPI2 14 +#define RESET_SPI3 15 +#define RESET_UART0 16 +#define RESET_UART1 17 +#define RESET_UART2 18 +#define RESET_UART3 19 +#define RESET_UART4 20 +#define RESET_UART5 21 +#define RESET_UART6 22 + +#endif /* __DT_BINDINGS_ACTIONS_S700_RESET_H */ diff --git a/include/dt-bindings/reset/actions,s900-reset.h b/include/dt-bindings/reset/actions,s900-reset.h new file mode 100644 index 0000000..42c19d0 --- /dev/null +++ b/include/dt-bindings/reset/actions,s900-reset.h @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) +// +// Device Tree binding constants for Actions Semi S900 Reset Management Unit +// +// Copyright (c) 2018 Linaro Ltd. + +#ifndef __DT_BINDINGS_ACTIONS_S900_RESET_H +#define __DT_BINDINGS_ACTIONS_S900_RESET_H + +#define RESET_CHIPID 0 +#define RESET_CPU_SCNT 1 +#define RESET_SRAMI 2 +#define RESET_DDR_CTL_PHY 3 +#define RESET_DMAC 4 +#define RESET_GPIO 5 +#define RESET_BISP_AXI 6 +#define RESET_CSI0 7 +#define RESET_CSI1 8 +#define RESET_DE 9 +#define RESET_DSI 10 +#define RESET_GPU3D_PA 11 +#define RESET_GPU3D_PB 12 +#define RESET_HDE 13 +#define RESET_I2C0 14 +#define RESET_I2C1 15 +#define RESET_I2C2 16 +#define RESET_I2C3 17 +#define RESET_I2C4 18 +#define RESET_I2C5 19 +#define RESET_IMX 20 +#define RESET_NANDC0 21 +#define RESET_NANDC1 22 +#define RESET_SD0 23 +#define RESET_SD1 24 +#define RESET_SD2 25 +#define RESET_SD3 26 +#define RESET_SPI0 27 +#define RESET_SPI1 28 +#define RESET_SPI2 29 +#define RESET_SPI3 30 +#define RESET_UART0 31 +#define RESET_UART1 32 +#define RESET_UART2 33 +#define RESET_UART3 34 +#define RESET_UART4 35 +#define RESET_UART5 36 +#define RESET_UART6 37 +#define RESET_HDMI 38 +#define RESET_LVDS 39 +#define RESET_EDP 40 +#define RESET_USB2HUB 41 +#define RESET_USB2HSIC 42 +#define RESET_USB3 43 +#define RESET_PCM1 44 +#define RESET_AUDIO 45 +#define RESET_PCM0 46 +#define RESET_SE 47 +#define RESET_GIC 48 +#define RESET_DDR_CTL_PHY_AXI 49 +#define RESET_CMU_DDR 50 +#define RESET_DMM 51 +#define RESET_HDCP2TX 52 +#define RESET_ETHERNET 53 + +#endif /* __DT_BINDINGS_ACTIONS_S900_RESET_H */ diff --git a/include/dt-bindings/reset/altr,rst-mgr-a10.h b/include/dt-bindings/reset/altr,rst-mgr-a10.h new file mode 100644 index 0000000..5d8a494 --- /dev/null +++ b/include/dt-bindings/reset/altr,rst-mgr-a10.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, Steffen Trumtrar + */ + +#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_A10_H +#define _DT_BINDINGS_RESET_ALTR_RST_MGR_A10_H + +/* MPUMODRST */ +#define CPU0_RESET 0 +#define CPU1_RESET 1 +#define WDS_RESET 2 +#define SCUPER_RESET 3 + +/* PER0MODRST */ +#define EMAC0_RESET 32 +#define EMAC1_RESET 33 +#define EMAC2_RESET 34 +#define USB0_RESET 35 +#define USB1_RESET 36 +#define NAND_RESET 37 +#define QSPI_RESET 38 +#define SDMMC_RESET 39 +#define EMAC0_OCP_RESET 40 +#define EMAC1_OCP_RESET 41 +#define EMAC2_OCP_RESET 42 +#define USB0_OCP_RESET 43 +#define USB1_OCP_RESET 44 +#define NAND_OCP_RESET 45 +#define QSPI_OCP_RESET 46 +#define SDMMC_OCP_RESET 47 +#define DMA_RESET 48 +#define SPIM0_RESET 49 +#define SPIM1_RESET 50 +#define SPIS0_RESET 51 +#define SPIS1_RESET 52 +#define DMA_OCP_RESET 53 +#define EMAC_PTP_RESET 54 +/* 55 is empty*/ +#define DMAIF0_RESET 56 +#define DMAIF1_RESET 57 +#define DMAIF2_RESET 58 +#define DMAIF3_RESET 59 +#define DMAIF4_RESET 60 +#define DMAIF5_RESET 61 +#define DMAIF6_RESET 62 +#define DMAIF7_RESET 63 + +/* PER1MODRST */ +#define L4WD0_RESET 64 +#define L4WD1_RESET 65 +#define L4SYSTIMER0_RESET 66 +#define L4SYSTIMER1_RESET 67 +#define SPTIMER0_RESET 68 +#define SPTIMER1_RESET 69 +/* 70-71 is reserved */ +#define I2C0_RESET 72 +#define I2C1_RESET 73 +#define I2C2_RESET 74 +#define I2C3_RESET 75 +#define I2C4_RESET 76 +/* 77-79 is reserved */ +#define UART0_RESET 80 +#define UART1_RESET 81 +/* 82-87 is reserved */ +#define GPIO0_RESET 88 +#define GPIO1_RESET 89 +#define GPIO2_RESET 90 + +/* BRGMODRST */ +#define HPS2FPGA_RESET 96 +#define LWHPS2FPGA_RESET 97 +#define FPGA2HPS_RESET 98 +#define F2SSDRAM0_RESET 99 +#define F2SSDRAM1_RESET 100 +#define F2SSDRAM2_RESET 101 +#define DDRSCH_RESET 102 + +/* SYSMODRST*/ +#define ROM_RESET 128 +#define OCRAM_RESET 129 +/* 130 is reserved */ +#define FPGAMGR_RESET 131 +#define S2F_RESET 132 +#define SYSDBG_RESET 133 +#define OCRAM_OCP_RESET 134 + +/* COLDMODRST */ +#define CLKMGRCOLD_RESET 160 +/* 161-162 is reserved */ +#define S2FCOLD_RESET 163 +#define TIMESTAMPCOLD_RESET 164 +#define TAPCOLD_RESET 165 +#define HMCCOLD_RESET 166 +#define IOMGRCOLD_RESET 167 + +/* NRSTMODRST */ +#define NRSTPINOE_RESET 192 + +/* DBGMODRST */ +#define DBG_RESET 224 +#endif diff --git a/include/dt-bindings/reset/altr,rst-mgr-a10sr.h b/include/dt-bindings/reset/altr,rst-mgr-a10sr.h new file mode 100644 index 0000000..09a15ea --- /dev/null +++ b/include/dt-bindings/reset/altr,rst-mgr-a10sr.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright Intel Corporation (C) 2017. All Rights Reserved + * + * Reset binding definitions for Altera Arria10 MAX5 System Resource Chip + * + * Adapted from altr,rst-mgr-a10.h + */ + +#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_A10SR_H +#define _DT_BINDINGS_RESET_ALTR_RST_MGR_A10SR_H + +/* Peripheral PHY resets */ +#define A10SR_RESET_ENET_HPS 0 +#define A10SR_RESET_PCIE 1 +#define A10SR_RESET_FILE 2 +#define A10SR_RESET_BQSPI 3 +#define A10SR_RESET_USB 4 + +#define A10SR_RESET_NUM 5 + +#endif diff --git a/include/dt-bindings/reset/altr,rst-mgr-s10.h b/include/dt-bindings/reset/altr,rst-mgr-s10.h new file mode 100644 index 0000000..70ea3a0 --- /dev/null +++ b/include/dt-bindings/reset/altr,rst-mgr-s10.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Intel Corporation. All rights reserved + * Copyright (C) 2016 Altera Corporation. All rights reserved + * + * derived from Steffen Trumtrar's "altr,rst-mgr-a10.h" + */ + +#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_S10_H +#define _DT_BINDINGS_RESET_ALTR_RST_MGR_S10_H + +/* MPUMODRST */ +#define CPU0_RESET 0 +#define CPU1_RESET 1 +#define CPU2_RESET 2 +#define CPU3_RESET 3 + +/* PER0MODRST */ +#define EMAC0_RESET 32 +#define EMAC1_RESET 33 +#define EMAC2_RESET 34 +#define USB0_RESET 35 +#define USB1_RESET 36 +#define NAND_RESET 37 +/* 38 is empty */ +#define SDMMC_RESET 39 +#define EMAC0_OCP_RESET 40 +#define EMAC1_OCP_RESET 41 +#define EMAC2_OCP_RESET 42 +#define USB0_OCP_RESET 43 +#define USB1_OCP_RESET 44 +#define NAND_OCP_RESET 45 +/* 46 is empty */ +#define SDMMC_OCP_RESET 47 +#define DMA_RESET 48 +#define SPIM0_RESET 49 +#define SPIM1_RESET 50 +#define SPIS0_RESET 51 +#define SPIS1_RESET 52 +#define DMA_OCP_RESET 53 +#define EMAC_PTP_RESET 54 +/* 55 is empty*/ +#define DMAIF0_RESET 56 +#define DMAIF1_RESET 57 +#define DMAIF2_RESET 58 +#define DMAIF3_RESET 59 +#define DMAIF4_RESET 60 +#define DMAIF5_RESET 61 +#define DMAIF6_RESET 62 +#define DMAIF7_RESET 63 + +/* PER1MODRST */ +#define WATCHDOG0_RESET 64 +#define WATCHDOG1_RESET 65 +#define WATCHDOG2_RESET 66 +#define WATCHDOG3_RESET 67 +#define L4SYSTIMER0_RESET 68 +#define L4SYSTIMER1_RESET 69 +#define SPTIMER0_RESET 70 +#define SPTIMER1_RESET 71 +#define I2C0_RESET 72 +#define I2C1_RESET 73 +#define I2C2_RESET 74 +#define I2C3_RESET 75 +#define I2C4_RESET 76 +/* 77-79 is empty */ +#define UART0_RESET 80 +#define UART1_RESET 81 +/* 82-87 is empty */ +#define GPIO0_RESET 88 +#define GPIO1_RESET 89 + +/* BRGMODRST */ +#define SOC2FPGA_RESET 96 +#define LWHPS2FPGA_RESET 97 +#define FPGA2SOC_RESET 98 +#define F2SSDRAM0_RESET 99 +#define F2SSDRAM1_RESET 100 +#define F2SSDRAM2_RESET 101 +#define DDRSCH_RESET 102 + +/* COLDMODRST */ +#define CPUPO0_RESET 160 +#define CPUPO1_RESET 161 +#define CPUPO2_RESET 162 +#define CPUPO3_RESET 163 +/* 164-167 is empty */ +#define L2_RESET 168 + +/* DBGMODRST */ +#define DBG_RESET 224 +#define CSDAP_RESET 225 + +/* TAPMODRST */ +#define TAP_RESET 256 + +#endif diff --git a/include/dt-bindings/reset/altr,rst-mgr.h b/include/dt-bindings/reset/altr,rst-mgr.h new file mode 100644 index 0000000..9b6ce14 --- /dev/null +++ b/include/dt-bindings/reset/altr,rst-mgr.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, Steffen Trumtrar + */ + +#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_H +#define _DT_BINDINGS_RESET_ALTR_RST_MGR_H + +/* MPUMODRST */ +#define CPU0_RESET 0 +#define CPU1_RESET 1 +#define WDS_RESET 2 +#define SCUPER_RESET 3 +#define L2_RESET 4 + +/* PERMODRST */ +#define EMAC0_RESET 32 +#define EMAC1_RESET 33 +#define USB0_RESET 34 +#define USB1_RESET 35 +#define NAND_RESET 36 +#define QSPI_RESET 37 +#define L4WD0_RESET 38 +#define L4WD1_RESET 39 +#define OSC1TIMER0_RESET 40 +#define OSC1TIMER1_RESET 41 +#define SPTIMER0_RESET 42 +#define SPTIMER1_RESET 43 +#define I2C0_RESET 44 +#define I2C1_RESET 45 +#define I2C2_RESET 46 +#define I2C3_RESET 47 +#define UART0_RESET 48 +#define UART1_RESET 49 +#define SPIM0_RESET 50 +#define SPIM1_RESET 51 +#define SPIS0_RESET 52 +#define SPIS1_RESET 53 +#define SDMMC_RESET 54 +#define CAN0_RESET 55 +#define CAN1_RESET 56 +#define GPIO0_RESET 57 +#define GPIO1_RESET 58 +#define GPIO2_RESET 59 +#define DMA_RESET 60 +#define SDR_RESET 61 + +/* PER2MODRST */ +#define DMAIF0_RESET 64 +#define DMAIF1_RESET 65 +#define DMAIF2_RESET 66 +#define DMAIF3_RESET 67 +#define DMAIF4_RESET 68 +#define DMAIF5_RESET 69 +#define DMAIF6_RESET 70 +#define DMAIF7_RESET 71 + +/* BRGMODRST */ +#define HPS2FPGA_RESET 96 +#define LWHPS2FPGA_RESET 97 +#define FPGA2HPS_RESET 98 + +/* MISCMODRST*/ +#define ROM_RESET 128 +#define OCRAM_RESET 129 +#define SYSMGR_RESET 130 +#define SYSMGRCOLD_RESET 131 +#define FPGAMGR_RESET 132 +#define ACPIDMAP_RESET 133 +#define S2F_RESET 134 +#define S2FCOLD_RESET 135 +#define NRSTPIN_RESET 136 +#define TIMESTAMPCOLD_RESET 137 +#define CLKMGRCOLD_RESET 138 +#define SCANMGR_RESET 139 +#define FRZCTRLCOLD_RESET 140 +#define SYSDBG_RESET 141 +#define DBG_RESET 142 +#define TAPCOLD_RESET 143 +#define SDRCOLD_RESET 144 + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h b/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h new file mode 100644 index 0000000..05c3636 --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) + * + * Copyright (c) 2018 Baylibre SAS. + * Author: Jerome Brunet + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H +#define _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H + +#define AXG_ARB_TODDR_A 0 +#define AXG_ARB_TODDR_B 1 +#define AXG_ARB_TODDR_C 2 +#define AXG_ARB_FRDDR_A 3 +#define AXG_ARB_FRDDR_B 4 +#define AXG_ARB_FRDDR_C 5 + +#endif /* _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H */ diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h new file mode 100644 index 0000000..0f2e0fe --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ +/* + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * Copyright (c) 2017 Amlogic, inc. + * Author: Yixun Lan + * + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H + +/* RESET0 */ +#define RESET_HIU 0 +#define RESET_PCIE_A 1 +#define RESET_PCIE_B 2 +#define RESET_DDR_TOP 3 +/* 4 */ +#define RESET_VIU 5 +#define RESET_PCIE_PHY 6 +#define RESET_PCIE_APB 7 +/* 8 */ +/* 9 */ +#define RESET_VENC 10 +#define RESET_ASSIST 11 +/* 12 */ +#define RESET_VCBUS 13 +/* 14 */ +/* 15 */ +#define RESET_GIC 16 +#define RESET_CAPB3_DECODE 17 +/* 18-21 */ +#define RESET_SYS_CPU_CAPB3 22 +#define RESET_CBUS_CAPB3 23 +#define RESET_AHB_CNTL 24 +#define RESET_AHB_DATA 25 +#define RESET_VCBUS_CLK81 26 +#define RESET_MMC 27 +/* 28-31 */ +/* RESET1 */ +/* 32 */ +/* 33 */ +#define RESET_USB_OTG 34 +#define RESET_DDR 35 +#define RESET_AO_RESET 36 +/* 37 */ +#define RESET_AHB_SRAM 38 +/* 39 */ +/* 40 */ +#define RESET_DMA 41 +#define RESET_ISA 42 +#define RESET_ETHERNET 43 +/* 44 */ +#define RESET_SD_EMMC_B 45 +#define RESET_SD_EMMC_C 46 +#define RESET_ROM_BOOT 47 +#define RESET_SYS_CPU_0 48 +#define RESET_SYS_CPU_1 49 +#define RESET_SYS_CPU_2 50 +#define RESET_SYS_CPU_3 51 +#define RESET_SYS_CPU_CORE_0 52 +#define RESET_SYS_CPU_CORE_1 53 +#define RESET_SYS_CPU_CORE_2 54 +#define RESET_SYS_CPU_CORE_3 55 +#define RESET_SYS_PLL_DIV 56 +#define RESET_SYS_CPU_AXI 57 +#define RESET_SYS_CPU_L2 58 +#define RESET_SYS_CPU_P 59 +#define RESET_SYS_CPU_MBIST 60 +/* 61-63 */ +/* RESET2 */ +/* 64 */ +/* 65 */ +#define RESET_AUDIO 66 +/* 67 */ +#define RESET_MIPI_HOST 68 +#define RESET_AUDIO_LOCKER 69 +#define RESET_GE2D 70 +/* 71-76 */ +#define RESET_AO_CPU_RESET 77 +/* 78-95 */ +/* RESET3 */ +#define RESET_RING_OSCILLATOR 96 +/* 97-127 */ +/* RESET4 */ +/* 128 */ +/* 129 */ +#define RESET_MIPI_PHY 130 +/* 131-140 */ +#define RESET_VENCL 141 +#define RESET_I2C_MASTER_2 142 +#define RESET_I2C_MASTER_1 143 +/* 144-159 */ +/* RESET5 */ +/* 160-191 */ +/* RESET6 */ +#define RESET_PERIPHS_GENERAL 192 +#define RESET_PERIPHS_SPICC 193 +/* 194 */ +/* 195 */ +#define RESET_PERIPHS_I2C_MASTER_0 196 +/* 197-200 */ +#define RESET_PERIPHS_UART_0 201 +#define RESET_PERIPHS_UART_1 202 +/* 203-204 */ +#define RESET_PERIPHS_SPI_0 205 +#define RESET_PERIPHS_I2C_MASTER_3 206 +/* 207-223 */ +/* RESET7 */ +#define RESET_USB_DDR_0 224 +#define RESET_USB_DDR_1 225 +#define RESET_USB_DDR_2 226 +#define RESET_USB_DDR_3 227 +/* 228 */ +#define RESET_DEVICE_MMC_ARB 229 +/* 230 */ +#define RESET_VID_LOCK 231 +#define RESET_A9_DMC_PIPEL 232 +#define RESET_DMC_VPU_PIPEL 233 +/* 234-255 */ + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h new file mode 100644 index 0000000..14b78da --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 BayLibre, SAS. + * Author: Jerome Brunet + * + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_G12A_AUDIO_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_G12A_AUDIO_RESET_H + +#define AUD_RESET_PDM 0 +#define AUD_RESET_TDMIN_A 1 +#define AUD_RESET_TDMIN_B 2 +#define AUD_RESET_TDMIN_C 3 +#define AUD_RESET_TDMIN_LB 4 +#define AUD_RESET_LOOPBACK 5 +#define AUD_RESET_TODDR_A 6 +#define AUD_RESET_TODDR_B 7 +#define AUD_RESET_TODDR_C 8 +#define AUD_RESET_FRDDR_A 9 +#define AUD_RESET_FRDDR_B 10 +#define AUD_RESET_FRDDR_C 11 +#define AUD_RESET_TDMOUT_A 12 +#define AUD_RESET_TDMOUT_B 13 +#define AUD_RESET_TDMOUT_C 14 +#define AUD_RESET_SPDIFOUT 15 +#define AUD_RESET_SPDIFOUT_B 16 +#define AUD_RESET_SPDIFIN 17 +#define AUD_RESET_EQDRC 18 +#define AUD_RESET_RESAMPLE 19 +#define AUD_RESET_DDRARB 20 +#define AUD_RESET_POWDET 21 +#define AUD_RESET_TORAM 22 +#define AUD_RESET_TOACODEC 23 +#define AUD_RESET_TOHDMITX 24 +#define AUD_RESET_CLKTREE 25 + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h new file mode 100644 index 0000000..6d487c5 --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ +/* + * Copyright (c) 2019 BayLibre, SAS. + * Author: Jerome Brunet + * + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_G12A_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_G12A_RESET_H + +/* RESET0 */ +#define RESET_HIU 0 +/* 1 */ +#define RESET_DOS 2 +/* 3-4 */ +#define RESET_VIU 5 +#define RESET_AFIFO 6 +#define RESET_VID_PLL_DIV 7 +/* 8-9 */ +#define RESET_VENC 10 +#define RESET_ASSIST 11 +#define RESET_PCIE_CTRL_A 12 +#define RESET_VCBUS 13 +#define RESET_PCIE_PHY 14 +#define RESET_PCIE_APB 15 +#define RESET_GIC 16 +#define RESET_CAPB3_DECODE 17 +/* 18 */ +#define RESET_HDMITX_CAPB3 19 +#define RESET_DVALIN_CAPB3 20 +#define RESET_DOS_CAPB3 21 +/* 22 */ +#define RESET_CBUS_CAPB3 23 +#define RESET_AHB_CNTL 24 +#define RESET_AHB_DATA 25 +#define RESET_VCBUS_CLK81 26 +/* 27-31 */ +/* RESET1 */ +/* 32 */ +#define RESET_DEMUX 33 +#define RESET_USB 34 +#define RESET_DDR 35 +/* 36 */ +#define RESET_BT656 37 +#define RESET_AHB_SRAM 38 +/* 39 */ +#define RESET_PARSER 40 +/* 41 */ +#define RESET_ISA 42 +#define RESET_ETHERNET 43 +#define RESET_SD_EMMC_A 44 +#define RESET_SD_EMMC_B 45 +#define RESET_SD_EMMC_C 46 +/* 47 */ +#define RESET_USB_PHY20 48 +#define RESET_USB_PHY21 49 +/* 50-60 */ +#define RESET_AUDIO_CODEC 61 +/* 62-63 */ +/* RESET2 */ +/* 64 */ +#define RESET_AUDIO 65 +#define RESET_HDMITX_PHY 66 +/* 67 */ +#define RESET_MIPI_DSI_HOST 68 +#define RESET_ALOCKER 69 +#define RESET_GE2D 70 +#define RESET_PARSER_REG 71 +#define RESET_PARSER_FETCH 72 +#define RESET_CTL 73 +#define RESET_PARSER_TOP 74 +/* 75-77 */ +#define RESET_DVALIN 78 +#define RESET_HDMITX 79 +/* 80-95 */ +/* RESET3 */ +/* 96-95 */ +#define RESET_DEMUX_TOP 105 +#define RESET_DEMUX_DES_PL 106 +#define RESET_DEMUX_S2P_0 107 +#define RESET_DEMUX_S2P_1 108 +#define RESET_DEMUX_0 109 +#define RESET_DEMUX_1 110 +#define RESET_DEMUX_2 111 +/* 112-127 */ +/* RESET4 */ +/* 128-129 */ +#define RESET_MIPI_DSI_PHY 130 +/* 131-132 */ +#define RESET_RDMA 133 +#define RESET_VENCI 134 +#define RESET_VENCP 135 +/* 136 */ +#define RESET_VDAC 137 +/* 138-139 */ +#define RESET_VDI6 140 +#define RESET_VENCL 141 +#define RESET_I2C_M1 142 +#define RESET_I2C_M2 143 +/* 144-159 */ +/* RESET5 */ +/* 160-191 */ +/* RESET6 */ +#define RESET_GEN 192 +#define RESET_SPICC0 193 +#define RESET_SC 194 +#define RESET_SANA_3 195 +#define RESET_I2C_M0 196 +#define RESET_TS_PLL 197 +#define RESET_SPICC1 198 +#define RESET_STREAM 199 +#define RESET_TS_CPU 200 +#define RESET_UART0 201 +#define RESET_UART1_2 202 +#define RESET_ASYNC0 203 +#define RESET_ASYNC1 204 +#define RESET_SPIFC0 205 +#define RESET_I2C_M3 206 +/* 207-223 */ +/* RESET7 */ +#define RESET_USB_DDR_0 224 +#define RESET_USB_DDR_1 225 +#define RESET_USB_DDR_2 226 +#define RESET_USB_DDR_3 227 +#define RESET_TS_GPU 228 +#define RESET_DEVICE_MMC_ARB 229 +#define RESET_DVALIN_DMC_PIPL 230 +#define RESET_VID_LOCK 231 +#define RESET_NIC_DMC_PIPL 232 +#define RESET_DMC_VPU_PIPL 233 +#define RESET_GE2D_DMC_PIPL 234 +#define RESET_HCODEC_DMC_PIPL 235 +#define RESET_WAVE420_DMC_PIPL 236 +#define RESET_HEVCF_DMC_PIPL 237 +/* 238-255 */ + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h new file mode 100644 index 0000000..ea50586 --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + */ +#ifndef _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H + +/* RESET0 */ +#define RESET_HIU 0 +/* 1 */ +#define RESET_DOS_RESET 2 +#define RESET_DDR_TOP 3 +#define RESET_DCU_RESET 4 +#define RESET_VIU 5 +#define RESET_AIU 6 +#define RESET_VID_PLL_DIV 7 +/* 8 */ +#define RESET_PMUX 9 +#define RESET_VENC 10 +#define RESET_ASSIST 11 +#define RESET_AFIFO2 12 +#define RESET_VCBUS 13 +/* 14 */ +/* 15 */ +#define RESET_GIC 16 +#define RESET_CAPB3_DECODE 17 +#define RESET_NAND_CAPB3 18 +#define RESET_HDMITX_CAPB3 19 +#define RESET_MALI_CAPB3 20 +#define RESET_DOS_CAPB3 21 +#define RESET_SYS_CPU_CAPB3 22 +#define RESET_CBUS_CAPB3 23 +#define RESET_AHB_CNTL 24 +#define RESET_AHB_DATA 25 +#define RESET_VCBUS_CLK81 26 +#define RESET_MMC 27 +#define RESET_MIPI_0 28 +#define RESET_MIPI_1 29 +#define RESET_MIPI_2 30 +#define RESET_MIPI_3 31 +/* RESET1 */ +#define RESET_CPPM 32 +#define RESET_DEMUX 33 +#define RESET_USB_OTG 34 +#define RESET_DDR 35 +#define RESET_AO_RESET 36 +#define RESET_BT656 37 +#define RESET_AHB_SRAM 38 +/* 39 */ +#define RESET_PARSER 40 +#define RESET_BLKMV 41 +#define RESET_ISA 42 +#define RESET_ETHERNET 43 +#define RESET_SD_EMMC_A 44 +#define RESET_SD_EMMC_B 45 +#define RESET_SD_EMMC_C 46 +#define RESET_ROM_BOOT 47 +#define RESET_SYS_CPU_0 48 +#define RESET_SYS_CPU_1 49 +#define RESET_SYS_CPU_2 50 +#define RESET_SYS_CPU_3 51 +#define RESET_SYS_CPU_CORE_0 52 +#define RESET_SYS_CPU_CORE_1 53 +#define RESET_SYS_CPU_CORE_2 54 +#define RESET_SYS_CPU_CORE_3 55 +#define RESET_SYS_PLL_DIV 56 +#define RESET_SYS_CPU_AXI 57 +#define RESET_SYS_CPU_L2 58 +#define RESET_SYS_CPU_P 59 +#define RESET_SYS_CPU_MBIST 60 +/* 61 */ +/* 62 */ +/* 63 */ +/* RESET2 */ +#define RESET_VD_RMEM 64 +#define RESET_AUDIN 65 +#define RESET_HDMI_TX 66 +/* 67 */ +/* 68 */ +/* 69 */ +#define RESET_GE2D 70 +#define RESET_PARSER_REG 71 +#define RESET_PARSER_FETCH 72 +#define RESET_PARSER_CTL 73 +#define RESET_PARSER_TOP 74 +/* 75 */ +/* 76 */ +#define RESET_AO_CPU_RESET 77 +#define RESET_MALI 78 +#define RESET_HDMI_SYSTEM_RESET 79 +/* 80-95 */ +/* RESET3 */ +#define RESET_RING_OSCILLATOR 96 +#define RESET_SYS_CPU 97 +#define RESET_EFUSE 98 +#define RESET_SYS_CPU_BVCI 99 +#define RESET_AIFIFO 100 +#define RESET_TVFE 101 +#define RESET_AHB_BRIDGE_CNTL 102 +/* 103 */ +#define RESET_AUDIO_DAC 104 +#define RESET_DEMUX_TOP 105 +#define RESET_DEMUX_DES 106 +#define RESET_DEMUX_S2P_0 107 +#define RESET_DEMUX_S2P_1 108 +#define RESET_DEMUX_RESET_0 109 +#define RESET_DEMUX_RESET_1 110 +#define RESET_DEMUX_RESET_2 111 +/* 112-127 */ +/* RESET4 */ +/* 128 */ +/* 129 */ +/* 130 */ +/* 131 */ +#define RESET_DVIN_RESET 132 +#define RESET_RDMA 133 +#define RESET_VENCI 134 +#define RESET_VENCP 135 +/* 136 */ +#define RESET_VDAC 137 +#define RESET_RTC 138 +/* 139 */ +#define RESET_VDI6 140 +#define RESET_VENCL 141 +#define RESET_I2C_MASTER_2 142 +#define RESET_I2C_MASTER_1 143 +/* 144-159 */ +/* RESET5 */ +/* 160-191 */ +/* RESET6 */ +#define RESET_PERIPHS_GENERAL 192 +#define RESET_PERIPHS_SPICC 193 +#define RESET_PERIPHS_SMART_CARD 194 +#define RESET_PERIPHS_SAR_ADC 195 +#define RESET_PERIPHS_I2C_MASTER_0 196 +#define RESET_SANA 197 +/* 198 */ +#define RESET_PERIPHS_STREAM_INTERFACE 199 +#define RESET_PERIPHS_SDIO 200 +#define RESET_PERIPHS_UART_0 201 +#define RESET_PERIPHS_UART_1_2 202 +#define RESET_PERIPHS_ASYNC_0 203 +#define RESET_PERIPHS_ASYNC_1 204 +#define RESET_PERIPHS_SPI_0 205 +#define RESET_PERIPHS_SDHC 206 +#define RESET_UART_SLIP 207 +/* 208-223 */ +/* RESET7 */ +#define RESET_USB_DDR_0 224 +#define RESET_USB_DDR_1 225 +#define RESET_USB_DDR_2 226 +#define RESET_USB_DDR_3 227 +/* 228 */ +#define RESET_DEVICE_MMC_ARB 229 +/* 230 */ +#define RESET_VID_LOCK 231 +#define RESET_A9_DMC_PIPEL 232 +/* 233-255 */ + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson8b-clkc-reset.h b/include/dt-bindings/reset/amlogic,meson8b-clkc-reset.h new file mode 100644 index 0000000..1f1b56e --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson8b-clkc-reset.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2017 Martin Blumenstingl . + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON8B_CLKC_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON8B_CLKC_RESET_H + +#define CLKC_RESET_L2_CACHE_SOFT_RESET 0 +#define CLKC_RESET_AXI_64_TO_128_BRIDGE_A5_SOFT_RESET 1 +#define CLKC_RESET_SCU_SOFT_RESET 2 +#define CLKC_RESET_CPU0_SOFT_RESET 3 +#define CLKC_RESET_CPU1_SOFT_RESET 4 +#define CLKC_RESET_CPU2_SOFT_RESET 5 +#define CLKC_RESET_CPU3_SOFT_RESET 6 +#define CLKC_RESET_A5_GLOBAL_RESET 7 +#define CLKC_RESET_A5_AXI_SOFT_RESET 8 +#define CLKC_RESET_A5_ABP_SOFT_RESET 9 +#define CLKC_RESET_AXI_64_TO_128_BRIDGE_MMC_SOFT_RESET 10 +#define CLKC_RESET_VID_CLK_CNTL_SOFT_RESET 11 +#define CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_POST 12 +#define CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_PRE 13 +#define CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_POST 14 +#define CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_PRE 15 + +#endif /* _DT_BINDINGS_AMLOGIC_MESON8B_CLKC_RESET_H */ diff --git a/include/dt-bindings/reset/amlogic,meson8b-reset.h b/include/dt-bindings/reset/amlogic,meson8b-reset.h new file mode 100644 index 0000000..fbc524a --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson8b-reset.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + */ +#ifndef _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H + +/* RESET0 */ +#define RESET_HIU 0 +#define RESET_VLD 1 +#define RESET_IQIDCT 2 +#define RESET_MC 3 +/* 8 */ +#define RESET_VIU 5 +#define RESET_AIU 6 +#define RESET_MCPU 7 +#define RESET_CCPU 8 +#define RESET_PMUX 9 +#define RESET_VENC 10 +#define RESET_ASSIST 11 +#define RESET_AFIFO2 12 +#define RESET_MDEC 13 +#define RESET_VLD_PART 14 +#define RESET_VIFIFO 15 +/* 16-31 */ +/* RESET1 */ +/* 32 */ +#define RESET_DEMUX 33 +#define RESET_USB_OTG 34 +#define RESET_DDR 35 +#define RESET_VDAC_1 36 +#define RESET_BT656 37 +#define RESET_AHB_SRAM 38 +#define RESET_AHB_BRIDGE 39 +#define RESET_PARSER 40 +#define RESET_BLKMV 41 +#define RESET_ISA 42 +#define RESET_ETHERNET 43 +#define RESET_ABUF 44 +#define RESET_AHB_DATA 45 +#define RESET_AHB_CNTL 46 +#define RESET_ROM_BOOT 47 +/* 48-63 */ +/* RESET2 */ +#define RESET_VD_RMEM 64 +#define RESET_AUDIN 65 +#define RESET_DBLK 66 +#define RESET_PIC_DC 67 +#define RESET_PSC 68 +#define RESET_NAND 69 +#define RESET_GE2D 70 +#define RESET_PARSER_REG 71 +#define RESET_PARSER_FETCH 72 +#define RESET_PARSER_CTL 73 +#define RESET_PARSER_TOP 74 +#define RESET_HDMI_APB 75 +#define RESET_AUDIO_APB 76 +#define RESET_MEDIA_CPU 77 +#define RESET_MALI 78 +#define RESET_HDMI_SYSTEM_RESET 79 +/* 80-95 */ +/* RESET3 */ +#define RESET_RING_OSCILLATOR 96 +#define RESET_SYS_CPU_0 97 +#define RESET_EFUSE 98 +#define RESET_SYS_CPU_BVCI 99 +#define RESET_AIFIFO 100 +#define RESET_AUDIO_PLL_MODULATOR 101 +#define RESET_AHB_BRIDGE_CNTL 102 +#define RESET_SYS_CPU_1 103 +#define RESET_AUDIO_DAC 104 +#define RESET_DEMUX_TOP 105 +#define RESET_DEMUX_DES 106 +#define RESET_DEMUX_S2P_0 107 +#define RESET_DEMUX_S2P_1 108 +#define RESET_DEMUX_RESET_0 109 +#define RESET_DEMUX_RESET_1 110 +#define RESET_DEMUX_RESET_2 111 +/* 112-127 */ +/* RESET4 */ +#define RESET_PL310 128 +#define RESET_A5_APB 129 +#define RESET_A5_AXI 130 +#define RESET_A5 131 +#define RESET_DVIN 132 +#define RESET_RDMA 133 +#define RESET_VENCI 134 +#define RESET_VENCP 135 +#define RESET_VENCT 136 +#define RESET_VDAC_4 137 +#define RESET_RTC 138 +#define RESET_A5_DEBUG 139 +#define RESET_VDI6 140 +#define RESET_VENCL 141 +/* 142-159 */ +/* RESET5 */ +#define RESET_DDR_PLL 160 +#define RESET_MISC_PLL 161 +#define RESET_SYS_PLL 162 +#define RESET_HPLL_PLL 163 +#define RESET_AUDIO_PLL 164 +#define RESET_VID2_PLL 165 +/* 166-191 */ +/* RESET6 */ +#define RESET_PERIPHS_GENERAL 192 +#define RESET_PERIPHS_IR_REMOTE 193 +#define RESET_PERIPHS_SMART_CARD 194 +#define RESET_PERIPHS_SAR_ADC 195 +#define RESET_PERIPHS_I2C_MASTER_0 196 +#define RESET_PERIPHS_I2C_MASTER_1 197 +#define RESET_PERIPHS_I2C_SLAVE 198 +#define RESET_PERIPHS_STREAM_INTERFACE 199 +#define RESET_PERIPHS_SDIO 200 +#define RESET_PERIPHS_UART_0 201 +#define RESET_PERIPHS_UART_1 202 +#define RESET_PERIPHS_ASYNC_0 203 +#define RESET_PERIPHS_ASYNC_1 204 +#define RESET_PERIPHS_SPI_0 205 +#define RESET_PERIPHS_SPI_1 206 +#define RESET_PERIPHS_LED_PWM 207 +/* 208-223 */ +/* RESET7 */ +/* 224-255 */ + +#endif diff --git a/include/dt-bindings/reset/axg-aoclkc.h b/include/dt-bindings/reset/axg-aoclkc.h new file mode 100644 index 0000000..d342c0b --- /dev/null +++ b/include/dt-bindings/reset/axg-aoclkc.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (c) 2016 BayLibre, SAS + * Author: Neil Armstrong + * + * Copyright (c) 2018 Amlogic, inc. + * Author: Qiufang Dai + */ + +#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_AXG_AOCLK +#define DT_BINDINGS_RESET_AMLOGIC_MESON_AXG_AOCLK + +#define RESET_AO_REMOTE 0 +#define RESET_AO_I2C_MASTER 1 +#define RESET_AO_I2C_SLAVE 2 +#define RESET_AO_UART1 3 +#define RESET_AO_UART2 4 +#define RESET_AO_IR_BLASTER 5 + +#endif diff --git a/include/dt-bindings/reset/bitmain,bm1880-reset.h b/include/dt-bindings/reset/bitmain,bm1880-reset.h new file mode 100644 index 0000000..4c0de52 --- /dev/null +++ b/include/dt-bindings/reset/bitmain,bm1880-reset.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2018 Bitmain Ltd. + * Copyright (c) 2019 Linaro Ltd. + */ + +#ifndef _DT_BINDINGS_BM1880_RESET_H +#define _DT_BINDINGS_BM1880_RESET_H + +#define BM1880_RST_MAIN_AP 0 +#define BM1880_RST_SECOND_AP 1 +#define BM1880_RST_DDR 2 +#define BM1880_RST_VIDEO 3 +#define BM1880_RST_JPEG 4 +#define BM1880_RST_VPP 5 +#define BM1880_RST_GDMA 6 +#define BM1880_RST_AXI_SRAM 7 +#define BM1880_RST_TPU 8 +#define BM1880_RST_USB 9 +#define BM1880_RST_ETH0 10 +#define BM1880_RST_ETH1 11 +#define BM1880_RST_NAND 12 +#define BM1880_RST_EMMC 13 +#define BM1880_RST_SD 14 +#define BM1880_RST_SDMA 15 +#define BM1880_RST_I2S0 16 +#define BM1880_RST_I2S1 17 +#define BM1880_RST_UART0_1_CLK 18 +#define BM1880_RST_UART0_1_ACLK 19 +#define BM1880_RST_UART2_3_CLK 20 +#define BM1880_RST_UART2_3_ACLK 21 +#define BM1880_RST_MINER 22 +#define BM1880_RST_I2C0 23 +#define BM1880_RST_I2C1 24 +#define BM1880_RST_I2C2 25 +#define BM1880_RST_I2C3 26 +#define BM1880_RST_I2C4 27 +#define BM1880_RST_PWM0 28 +#define BM1880_RST_PWM1 29 +#define BM1880_RST_PWM2 30 +#define BM1880_RST_PWM3 31 +#define BM1880_RST_SPI 32 +#define BM1880_RST_GPIO0 33 +#define BM1880_RST_GPIO1 34 +#define BM1880_RST_GPIO2 35 +#define BM1880_RST_EFUSE 36 +#define BM1880_RST_WDT 37 +#define BM1880_RST_AHB_ROM 38 +#define BM1880_RST_SPIC 39 + +#endif /* _DT_BINDINGS_BM1880_RESET_H */ diff --git a/include/dt-bindings/reset/cortina,gemini-reset.h b/include/dt-bindings/reset/cortina,gemini-reset.h new file mode 100644 index 0000000..f48aff2 --- /dev/null +++ b/include/dt-bindings/reset/cortina,gemini-reset.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_RESET_CORTINA_GEMINI_H +#define _DT_BINDINGS_RESET_CORTINA_GEMINI_H + +#define GEMINI_RESET_DRAM 0 +#define GEMINI_RESET_FLASH 1 +#define GEMINI_RESET_IDE 2 +#define GEMINI_RESET_RAID 3 +#define GEMINI_RESET_SECURITY 4 +#define GEMINI_RESET_GMAC0 5 +#define GEMINI_RESET_GMAC1 6 +#define GEMINI_RESET_PCI 7 +#define GEMINI_RESET_USB0 8 +#define GEMINI_RESET_USB1 9 +#define GEMINI_RESET_DMAC 10 +#define GEMINI_RESET_APB 11 +#define GEMINI_RESET_LPC 12 +#define GEMINI_RESET_LCD 13 +#define GEMINI_RESET_INTCON0 14 +#define GEMINI_RESET_INTCON1 15 +#define GEMINI_RESET_RTC 16 +#define GEMINI_RESET_TIMER 17 +#define GEMINI_RESET_UART 18 +#define GEMINI_RESET_SSP 19 +#define GEMINI_RESET_GPIO0 20 +#define GEMINI_RESET_GPIO1 21 +#define GEMINI_RESET_GPIO2 22 +#define GEMINI_RESET_WDOG 23 +#define GEMINI_RESET_EXTERN 24 +#define GEMINI_RESET_CIR 25 +#define GEMINI_RESET_SATA0 26 +#define GEMINI_RESET_SATA1 27 +#define GEMINI_RESET_TVC 28 +#define GEMINI_RESET_CPU1 30 +#define GEMINI_RESET_GLOBAL 31 + +#endif diff --git a/include/dt-bindings/reset/g12a-aoclkc.h b/include/dt-bindings/reset/g12a-aoclkc.h new file mode 100644 index 0000000..bd2e233 --- /dev/null +++ b/include/dt-bindings/reset/g12a-aoclkc.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (c) 2016 BayLibre, SAS + * Author: Neil Armstrong + */ + +#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_G12A_AOCLK +#define DT_BINDINGS_RESET_AMLOGIC_MESON_G12A_AOCLK + +#define RESET_AO_IR_IN 0 +#define RESET_AO_UART 1 +#define RESET_AO_I2C_M 2 +#define RESET_AO_I2C_S 3 +#define RESET_AO_SAR_ADC 4 +#define RESET_AO_UART2 5 +#define RESET_AO_IR_OUT 6 + +#endif diff --git a/include/dt-bindings/reset/gxbb-aoclkc.h b/include/dt-bindings/reset/gxbb-aoclkc.h new file mode 100644 index 0000000..9e3fd60 --- /dev/null +++ b/include/dt-bindings/reset/gxbb-aoclkc.h @@ -0,0 +1,66 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_GXBB_AOCLK +#define DT_BINDINGS_RESET_AMLOGIC_MESON_GXBB_AOCLK + +#define RESET_AO_REMOTE 0 +#define RESET_AO_I2C_MASTER 1 +#define RESET_AO_I2C_SLAVE 2 +#define RESET_AO_UART1 3 +#define RESET_AO_UART2 4 +#define RESET_AO_IR_BLASTER 5 + +#endif diff --git a/include/dt-bindings/reset/hisi,hi6220-resets.h b/include/dt-bindings/reset/hisi,hi6220-resets.h new file mode 100644 index 0000000..63aff7d --- /dev/null +++ b/include/dt-bindings/reset/hisi,hi6220-resets.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * This header provides index for the reset controller + * based on hi6220 SoC. + */ +#ifndef _DT_BINDINGS_RESET_CONTROLLER_HI6220 +#define _DT_BINDINGS_RESET_CONTROLLER_HI6220 + +#define PERIPH_RSTDIS0_MMC0 0x000 +#define PERIPH_RSTDIS0_MMC1 0x001 +#define PERIPH_RSTDIS0_MMC2 0x002 +#define PERIPH_RSTDIS0_NANDC 0x003 +#define PERIPH_RSTDIS0_USBOTG_BUS 0x004 +#define PERIPH_RSTDIS0_POR_PICOPHY 0x005 +#define PERIPH_RSTDIS0_USBOTG 0x006 +#define PERIPH_RSTDIS0_USBOTG_32K 0x007 +#define PERIPH_RSTDIS1_HIFI 0x100 +#define PERIPH_RSTDIS1_DIGACODEC 0x105 +#define PERIPH_RSTEN2_IPF 0x200 +#define PERIPH_RSTEN2_SOCP 0x201 +#define PERIPH_RSTEN2_DMAC 0x202 +#define PERIPH_RSTEN2_SECENG 0x203 +#define PERIPH_RSTEN2_ABB 0x204 +#define PERIPH_RSTEN2_HPM0 0x205 +#define PERIPH_RSTEN2_HPM1 0x206 +#define PERIPH_RSTEN2_HPM2 0x207 +#define PERIPH_RSTEN2_HPM3 0x208 +#define PERIPH_RSTEN3_CSSYS 0x300 +#define PERIPH_RSTEN3_I2C0 0x301 +#define PERIPH_RSTEN3_I2C1 0x302 +#define PERIPH_RSTEN3_I2C2 0x303 +#define PERIPH_RSTEN3_I2C3 0x304 +#define PERIPH_RSTEN3_UART1 0x305 +#define PERIPH_RSTEN3_UART2 0x306 +#define PERIPH_RSTEN3_UART3 0x307 +#define PERIPH_RSTEN3_UART4 0x308 +#define PERIPH_RSTEN3_SSP 0x309 +#define PERIPH_RSTEN3_PWM 0x30a +#define PERIPH_RSTEN3_BLPWM 0x30b +#define PERIPH_RSTEN3_TSENSOR 0x30c +#define PERIPH_RSTEN3_DAPB 0x312 +#define PERIPH_RSTEN3_HKADC 0x313 +#define PERIPH_RSTEN3_CODEC_SSI 0x314 +#define PERIPH_RSTEN3_PMUSSI1 0x316 +#define PERIPH_RSTEN8_RS0 0x400 +#define PERIPH_RSTEN8_RS2 0x401 +#define PERIPH_RSTEN8_RS3 0x402 +#define PERIPH_RSTEN8_MS0 0x403 +#define PERIPH_RSTEN8_MS2 0x405 +#define PERIPH_RSTEN8_XG2RAM0 0x406 +#define PERIPH_RSTEN8_X2SRAM_TZMA 0x407 +#define PERIPH_RSTEN8_SRAM 0x408 +#define PERIPH_RSTEN8_HARQ 0x40a +#define PERIPH_RSTEN8_DDRC 0x40c +#define PERIPH_RSTEN8_DDRC_APB 0x40d +#define PERIPH_RSTEN8_DDRPACK_APB 0x40e +#define PERIPH_RSTEN8_DDRT 0x411 +#define PERIPH_RSDIST9_CARM_DAP 0x500 +#define PERIPH_RSDIST9_CARM_ATB 0x501 +#define PERIPH_RSDIST9_CARM_LBUS 0x502 +#define PERIPH_RSDIST9_CARM_POR 0x503 +#define PERIPH_RSDIST9_CARM_CORE 0x504 +#define PERIPH_RSDIST9_CARM_DBG 0x505 +#define PERIPH_RSDIST9_CARM_L2 0x506 +#define PERIPH_RSDIST9_CARM_SOCDBG 0x507 +#define PERIPH_RSDIST9_CARM_ETM 0x508 + +#define MEDIA_G3D 0 +#define MEDIA_CODEC_VPU 2 +#define MEDIA_CODEC_JPEG 3 +#define MEDIA_ISP 4 +#define MEDIA_ADE 5 +#define MEDIA_MMU 6 +#define MEDIA_XG2RAM1 7 + +#define AO_G3D 1 +#define AO_CODECISP 2 +#define AO_MCPU 4 +#define AO_BBPHARQMEM 5 +#define AO_HIFI 8 +#define AO_ACPUSCUL2C 12 + +#endif /*_DT_BINDINGS_RESET_CONTROLLER_HI6220*/ diff --git a/include/dt-bindings/reset/imx7-reset.h b/include/dt-bindings/reset/imx7-reset.h new file mode 100644 index 0000000..a5b35b4 --- /dev/null +++ b/include/dt-bindings/reset/imx7-reset.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Impinj, Inc. + * + * Author: Andrey Smirnov + */ + +#ifndef DT_BINDING_RESET_IMX7_H +#define DT_BINDING_RESET_IMX7_H + +#define IMX7_RESET_A7_CORE_POR_RESET0 0 +#define IMX7_RESET_A7_CORE_POR_RESET1 1 +#define IMX7_RESET_A7_CORE_RESET0 2 +#define IMX7_RESET_A7_CORE_RESET1 3 +#define IMX7_RESET_A7_DBG_RESET0 4 +#define IMX7_RESET_A7_DBG_RESET1 5 +#define IMX7_RESET_A7_ETM_RESET0 6 +#define IMX7_RESET_A7_ETM_RESET1 7 +#define IMX7_RESET_A7_SOC_DBG_RESET 8 +#define IMX7_RESET_A7_L2RESET 9 +#define IMX7_RESET_SW_M4C_RST 10 +#define IMX7_RESET_SW_M4P_RST 11 +#define IMX7_RESET_EIM_RST 12 +#define IMX7_RESET_HSICPHY_PORT_RST 13 +#define IMX7_RESET_USBPHY1_POR 14 +#define IMX7_RESET_USBPHY1_PORT_RST 15 +#define IMX7_RESET_USBPHY2_POR 16 +#define IMX7_RESET_USBPHY2_PORT_RST 17 +#define IMX7_RESET_MIPI_PHY_MRST 18 +#define IMX7_RESET_MIPI_PHY_SRST 19 + +/* + * IMX7_RESET_PCIEPHY is a logical reset line combining PCIEPHY_BTN + * and PCIEPHY_G_RST + */ +#define IMX7_RESET_PCIEPHY 20 +#define IMX7_RESET_PCIEPHY_PERST 21 + +/* + * IMX7_RESET_PCIE_CTRL_APPS_EN is not strictly a reset line, but it + * can be used to inhibit PCIe LTTSM, so, in a way, it can be thoguht + * of as one + */ +#define IMX7_RESET_PCIE_CTRL_APPS_EN 22 +#define IMX7_RESET_DDRC_PRST 23 +#define IMX7_RESET_DDRC_CORE_RST 24 + +#define IMX7_RESET_PCIE_CTRL_APPS_TURNOFF 25 + +#define IMX7_RESET_NUM 26 + +#endif + diff --git a/include/dt-bindings/reset/imx8mq-reset.h b/include/dt-bindings/reset/imx8mq-reset.h new file mode 100644 index 0000000..9a30108 --- /dev/null +++ b/include/dt-bindings/reset/imx8mq-reset.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Zodiac Inflight Innovations + * + * Author: Andrey Smirnov + */ + +#ifndef DT_BINDING_RESET_IMX8MQ_H +#define DT_BINDING_RESET_IMX8MQ_H + +#define IMX8MQ_RESET_A53_CORE_POR_RESET0 0 +#define IMX8MQ_RESET_A53_CORE_POR_RESET1 1 +#define IMX8MQ_RESET_A53_CORE_POR_RESET2 2 +#define IMX8MQ_RESET_A53_CORE_POR_RESET3 3 +#define IMX8MQ_RESET_A53_CORE_RESET0 4 +#define IMX8MQ_RESET_A53_CORE_RESET1 5 +#define IMX8MQ_RESET_A53_CORE_RESET2 6 +#define IMX8MQ_RESET_A53_CORE_RESET3 7 +#define IMX8MQ_RESET_A53_DBG_RESET0 8 +#define IMX8MQ_RESET_A53_DBG_RESET1 9 +#define IMX8MQ_RESET_A53_DBG_RESET2 10 +#define IMX8MQ_RESET_A53_DBG_RESET3 11 +#define IMX8MQ_RESET_A53_ETM_RESET0 12 +#define IMX8MQ_RESET_A53_ETM_RESET1 13 +#define IMX8MQ_RESET_A53_ETM_RESET2 14 +#define IMX8MQ_RESET_A53_ETM_RESET3 15 +#define IMX8MQ_RESET_A53_SOC_DBG_RESET 16 +#define IMX8MQ_RESET_A53_L2RESET 17 +#define IMX8MQ_RESET_SW_NON_SCLR_M4C_RST 18 +#define IMX8MQ_RESET_OTG1_PHY_RESET 19 +#define IMX8MQ_RESET_OTG2_PHY_RESET 20 +#define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21 +#define IMX8MQ_RESET_MIPI_DSI_RESET_N 22 +#define IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N 23 +#define IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N 24 +#define IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N 25 +#define IMX8MQ_RESET_PCIEPHY 26 +#define IMX8MQ_RESET_PCIEPHY_PERST 27 +#define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28 +#define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29 +#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_DISP_RESET 31 +#define IMX8MQ_RESET_GPU_RESET 32 +#define IMX8MQ_RESET_VPU_RESET 33 +#define IMX8MQ_RESET_PCIEPHY2 34 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_PCIEPHY2_PERST 35 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_DDRC1_PRST 44 +#define IMX8MQ_RESET_DDRC1_CORE_RESET 45 +#define IMX8MQ_RESET_DDRC1_PHY_RESET 46 +#define IMX8MQ_RESET_DDRC2_PRST 47 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_DDRC2_CORE_RESET 48 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_DDRC2_PHY_RESET 49 /* i.MX8MM does NOT support */ + +#define IMX8MQ_RESET_NUM 50 + +#endif diff --git a/include/dt-bindings/reset/mt2701-resets.h b/include/dt-bindings/reset/mt2701-resets.h new file mode 100644 index 0000000..91e4200 --- /dev/null +++ b/include/dt-bindings/reset/mt2701-resets.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek, Shunli Wang + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT2701 +#define _DT_BINDINGS_RESET_CONTROLLER_MT2701 + +/* INFRACFG resets */ +#define MT2701_INFRA_EMI_REG_RST 0 +#define MT2701_INFRA_DRAMC0_A0_RST 1 +#define MT2701_INFRA_FHCTL_RST 2 +#define MT2701_INFRA_APCIRQ_EINT_RST 3 +#define MT2701_INFRA_APXGPT_RST 4 +#define MT2701_INFRA_SCPSYS_RST 5 +#define MT2701_INFRA_KP_RST 6 +#define MT2701_INFRA_PMIC_WRAP_RST 7 +#define MT2701_INFRA_MIPI_RST 8 +#define MT2701_INFRA_IRRX_RST 9 +#define MT2701_INFRA_CEC_RST 10 +#define MT2701_INFRA_EMI_RST 32 +#define MT2701_INFRA_DRAMC0_RST 34 +#define MT2701_INFRA_TRNG_RST 37 +#define MT2701_INFRA_SYSIRQ_RST 38 + +/* PERICFG resets */ +#define MT2701_PERI_UART0_SW_RST 0 +#define MT2701_PERI_UART1_SW_RST 1 +#define MT2701_PERI_UART2_SW_RST 2 +#define MT2701_PERI_UART3_SW_RST 3 +#define MT2701_PERI_GCPU_SW_RST 5 +#define MT2701_PERI_BTIF_SW_RST 6 +#define MT2701_PERI_PWM_SW_RST 8 +#define MT2701_PERI_AUXADC_SW_RST 10 +#define MT2701_PERI_DMA_SW_RST 11 +#define MT2701_PERI_NFI_SW_RST 14 +#define MT2701_PERI_NLI_SW_RST 15 +#define MT2701_PERI_THERM_SW_RST 16 +#define MT2701_PERI_MSDC2_SW_RST 17 +#define MT2701_PERI_MSDC0_SW_RST 19 +#define MT2701_PERI_MSDC1_SW_RST 20 +#define MT2701_PERI_I2C0_SW_RST 22 +#define MT2701_PERI_I2C1_SW_RST 23 +#define MT2701_PERI_I2C2_SW_RST 24 +#define MT2701_PERI_I2C3_SW_RST 25 +#define MT2701_PERI_USB_SW_RST 28 +#define MT2701_PERI_ETH_SW_RST 29 +#define MT2701_PERI_SPI0_SW_RST 33 + +/* TOPRGU resets */ +#define MT2701_TOPRGU_INFRA_RST 0 +#define MT2701_TOPRGU_MM_RST 1 +#define MT2701_TOPRGU_MFG_RST 2 +#define MT2701_TOPRGU_ETHDMA_RST 3 +#define MT2701_TOPRGU_VDEC_RST 4 +#define MT2701_TOPRGU_VENC_IMG_RST 5 +#define MT2701_TOPRGU_DDRPHY_RST 6 +#define MT2701_TOPRGU_MD_RST 7 +#define MT2701_TOPRGU_INFRA_AO_RST 8 +#define MT2701_TOPRGU_CONN_RST 9 +#define MT2701_TOPRGU_APMIXED_RST 10 +#define MT2701_TOPRGU_HIFSYS_RST 11 +#define MT2701_TOPRGU_CONN_MCU_RST 12 +#define MT2701_TOPRGU_BDP_DISP_RST 13 + +/* HIFSYS resets */ +#define MT2701_HIFSYS_UHOST0_RST 3 +#define MT2701_HIFSYS_UHOST1_RST 4 +#define MT2701_HIFSYS_UPHY0_RST 21 +#define MT2701_HIFSYS_UPHY1_RST 22 +#define MT2701_HIFSYS_PCIE0_RST 24 +#define MT2701_HIFSYS_PCIE1_RST 25 +#define MT2701_HIFSYS_PCIE2_RST 26 + +/* ETHSYS resets */ +#define MT2701_ETHSYS_SYS_RST 0 +#define MT2701_ETHSYS_MCM_RST 2 +#define MT2701_ETHSYS_FE_RST 6 +#define MT2701_ETHSYS_GMAC_RST 23 +#define MT2701_ETHSYS_PPE_RST 31 + +/* G3DSYS resets */ +#define MT2701_G3DSYS_CORE_RST 0 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT2701 */ diff --git a/include/dt-bindings/reset/mt7622-reset.h b/include/dt-bindings/reset/mt7622-reset.h new file mode 100644 index 0000000..da0d1ae --- /dev/null +++ b/include/dt-bindings/reset/mt7622-reset.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Sean Wang + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7622 +#define _DT_BINDINGS_RESET_CONTROLLER_MT7622 + +/* INFRACFG resets */ +#define MT7622_INFRA_EMI_REG_RST 0 +#define MT7622_INFRA_DRAMC0_A0_RST 1 +#define MT7622_INFRA_APCIRQ_EINT_RST 3 +#define MT7622_INFRA_APXGPT_RST 4 +#define MT7622_INFRA_SCPSYS_RST 5 +#define MT7622_INFRA_PMIC_WRAP_RST 7 +#define MT7622_INFRA_IRRX_RST 9 +#define MT7622_INFRA_EMI_RST 16 +#define MT7622_INFRA_WED0_RST 17 +#define MT7622_INFRA_DRAMC_RST 18 +#define MT7622_INFRA_CCI_INTF_RST 19 +#define MT7622_INFRA_TRNG_RST 21 +#define MT7622_INFRA_SYSIRQ_RST 22 +#define MT7622_INFRA_WED1_RST 25 + +/* PERICFG Subsystem resets */ +#define MT7622_PERI_UART0_SW_RST 0 +#define MT7622_PERI_UART1_SW_RST 1 +#define MT7622_PERI_UART2_SW_RST 2 +#define MT7622_PERI_UART3_SW_RST 3 +#define MT7622_PERI_UART4_SW_RST 4 +#define MT7622_PERI_BTIF_SW_RST 6 +#define MT7622_PERI_PWM_SW_RST 8 +#define MT7622_PERI_AUXADC_SW_RST 10 +#define MT7622_PERI_DMA_SW_RST 11 +#define MT7622_PERI_IRTX_SW_RST 13 +#define MT7622_PERI_NFI_SW_RST 14 +#define MT7622_PERI_THERM_SW_RST 16 +#define MT7622_PERI_MSDC0_SW_RST 19 +#define MT7622_PERI_MSDC1_SW_RST 20 +#define MT7622_PERI_I2C0_SW_RST 22 +#define MT7622_PERI_I2C1_SW_RST 23 +#define MT7622_PERI_I2C2_SW_RST 24 +#define MT7622_PERI_SPI0_SW_RST 33 +#define MT7622_PERI_SPI1_SW_RST 34 +#define MT7622_PERI_FLASHIF_SW_RST 36 + +/* TOPRGU resets */ +#define MT7622_TOPRGU_INFRA_RST 0 +#define MT7622_TOPRGU_ETHDMA_RST 1 +#define MT7622_TOPRGU_DDRPHY_RST 6 +#define MT7622_TOPRGU_INFRA_AO_RST 8 +#define MT7622_TOPRGU_CONN_RST 9 +#define MT7622_TOPRGU_APMIXED_RST 10 +#define MT7622_TOPRGU_CONN_MCU_RST 12 + +/* PCIe/SATA Subsystem resets */ +#define MT7622_SATA_PHY_REG_RST 12 +#define MT7622_SATA_PHY_SW_RST 13 +#define MT7622_SATA_AXI_BUS_RST 15 +#define MT7622_PCIE1_CORE_RST 19 +#define MT7622_PCIE1_MMIO_RST 20 +#define MT7622_PCIE1_HRST 21 +#define MT7622_PCIE1_USER_RST 22 +#define MT7622_PCIE1_PIPE_RST 23 +#define MT7622_PCIE0_CORE_RST 27 +#define MT7622_PCIE0_MMIO_RST 28 +#define MT7622_PCIE0_HRST 29 +#define MT7622_PCIE0_USER_RST 30 +#define MT7622_PCIE0_PIPE_RST 31 + +/* SSUSB Subsystem resets */ +#define MT7622_SSUSB_PHY_PWR_RST 3 +#define MT7622_SSUSB_MAC_PWR_RST 4 + +/* ETHSYS Subsystem resets */ +#define MT7622_ETHSYS_SYS_RST 0 +#define MT7622_ETHSYS_MCM_RST 2 +#define MT7622_ETHSYS_HSDMA_RST 5 +#define MT7622_ETHSYS_FE_RST 6 +#define MT7622_ETHSYS_GMAC_RST 23 +#define MT7622_ETHSYS_EPHY_RST 24 +#define MT7622_ETHSYS_CRYPTO_RST 29 +#define MT7622_ETHSYS_PPE_RST 31 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7622 */ diff --git a/include/dt-bindings/reset/mt7629-resets.h b/include/dt-bindings/reset/mt7629-resets.h new file mode 100644 index 0000000..6bb8573 --- /dev/null +++ b/include/dt-bindings/reset/mt7629-resets.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 MediaTek Inc. + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7629 +#define _DT_BINDINGS_RESET_CONTROLLER_MT7629 + +/* INFRACFG resets */ +#define MT7629_INFRA_EMI_MPU_RST 0 +#define MT7629_INFRA_UART5_RST 2 +#define MT7629_INFRA_CIRQ_EINT_RST 3 +#define MT7629_INFRA_APXGPT_RST 4 +#define MT7629_INFRA_SCPSYS_RST 5 +#define MT7629_INFRA_KP_RST 6 +#define MT7629_INFRA_SPI1_RST 7 +#define MT7629_INFRA_SPI4_RST 8 +#define MT7629_INFRA_SYSTIMER_RST 9 +#define MT7629_INFRA_IRRX_RST 10 +#define MT7629_INFRA_AO_BUS_RST 16 +#define MT7629_INFRA_EMI_RST 32 +#define MT7629_INFRA_APMIXED_RST 35 +#define MT7629_INFRA_MIPI_RST 36 +#define MT7629_INFRA_TRNG_RST 37 +#define MT7629_INFRA_SYSCIRQ_RST 38 +#define MT7629_INFRA_MIPI_CSI_RST 39 +#define MT7629_INFRA_GCE_FAXI_RST 40 +#define MT7629_INFRA_I2C_SRAM_RST 41 +#define MT7629_INFRA_IOMMU_RST 47 + +/* PERICFG resets */ +#define MT7629_PERI_UART0_SW_RST 0 +#define MT7629_PERI_UART1_SW_RST 1 +#define MT7629_PERI_UART2_SW_RST 2 +#define MT7629_PERI_BTIF_SW_RST 6 +#define MT7629_PERI_PWN_SW_RST 8 +#define MT7629_PERI_DMA_SW_RST 11 +#define MT7629_PERI_NFI_SW_RST 14 +#define MT7629_PERI_I2C0_SW_RST 22 +#define MT7629_PERI_SPI0_SW_RST 33 +#define MT7629_PERI_SPI1_SW_RST 34 +#define MT7629_PERI_FLASHIF_SW_RST 36 + +/* PCIe Subsystem resets */ +#define MT7629_PCIE1_CORE_RST 19 +#define MT7629_PCIE1_MMIO_RST 20 +#define MT7629_PCIE1_HRST 21 +#define MT7629_PCIE1_USER_RST 22 +#define MT7629_PCIE1_PIPE_RST 23 +#define MT7629_PCIE0_CORE_RST 27 +#define MT7629_PCIE0_MMIO_RST 28 +#define MT7629_PCIE0_HRST 29 +#define MT7629_PCIE0_USER_RST 30 +#define MT7629_PCIE0_PIPE_RST 31 + +/* SSUSB Subsystem resets */ +#define MT7629_SSUSB_PHY_PWR_RST 3 +#define MT7629_SSUSB_MAC_PWR_RST 4 + +/* ETH Subsystem resets */ +#define MT7629_ETHSYS_SYS_RST 0 +#define MT7629_ETHSYS_MCM_RST 2 +#define MT7629_ETHSYS_HSDMA_RST 5 +#define MT7629_ETHSYS_FE_RST 6 +#define MT7629_ETHSYS_ESW_RST 16 +#define MT7629_ETHSYS_GMAC_RST 23 +#define MT7629_ETHSYS_EPHY_RST 24 +#define MT7629_ETHSYS_CRYPTO_RST 29 +#define MT7629_ETHSYS_PPE_RST 31 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7629 */ diff --git a/include/dt-bindings/reset/mt8135-resets.h b/include/dt-bindings/reset/mt8135-resets.h new file mode 100644 index 0000000..8c060d0 --- /dev/null +++ b/include/dt-bindings/reset/mt8135-resets.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Flora Fu, MediaTek + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8135 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8135 + +/* INFRACFG resets */ +#define MT8135_INFRA_EMI_REG_RST 0 +#define MT8135_INFRA_DRAMC0_A0_RST 1 +#define MT8135_INFRA_CCIF0_RST 2 +#define MT8135_INFRA_APCIRQ_EINT_RST 3 +#define MT8135_INFRA_APXGPT_RST 4 +#define MT8135_INFRA_SCPSYS_RST 5 +#define MT8135_INFRA_CCIF1_RST 6 +#define MT8135_INFRA_PMIC_WRAP_RST 7 +#define MT8135_INFRA_KP_RST 8 +#define MT8135_INFRA_EMI_RST 32 +#define MT8135_INFRA_DRAMC0_RST 34 +#define MT8135_INFRA_SMI_RST 35 +#define MT8135_INFRA_M4U_RST 36 + +/* PERICFG resets */ +#define MT8135_PERI_UART0_SW_RST 0 +#define MT8135_PERI_UART1_SW_RST 1 +#define MT8135_PERI_UART2_SW_RST 2 +#define MT8135_PERI_UART3_SW_RST 3 +#define MT8135_PERI_IRDA_SW_RST 4 +#define MT8135_PERI_PTP_SW_RST 5 +#define MT8135_PERI_AP_HIF_SW_RST 6 +#define MT8135_PERI_GPCU_SW_RST 7 +#define MT8135_PERI_MD_HIF_SW_RST 8 +#define MT8135_PERI_NLI_SW_RST 9 +#define MT8135_PERI_AUXADC_SW_RST 10 +#define MT8135_PERI_DMA_SW_RST 11 +#define MT8135_PERI_NFI_SW_RST 14 +#define MT8135_PERI_PWM_SW_RST 15 +#define MT8135_PERI_THERM_SW_RST 16 +#define MT8135_PERI_MSDC0_SW_RST 17 +#define MT8135_PERI_MSDC1_SW_RST 18 +#define MT8135_PERI_MSDC2_SW_RST 19 +#define MT8135_PERI_MSDC3_SW_RST 20 +#define MT8135_PERI_I2C0_SW_RST 22 +#define MT8135_PERI_I2C1_SW_RST 23 +#define MT8135_PERI_I2C2_SW_RST 24 +#define MT8135_PERI_I2C3_SW_RST 25 +#define MT8135_PERI_I2C4_SW_RST 26 +#define MT8135_PERI_I2C5_SW_RST 27 +#define MT8135_PERI_I2C6_SW_RST 28 +#define MT8135_PERI_USB_SW_RST 29 +#define MT8135_PERI_SPI1_SW_RST 33 +#define MT8135_PERI_PWRAP_BRIDGE_SW_RST 34 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8135 */ diff --git a/include/dt-bindings/reset/mt8173-resets.h b/include/dt-bindings/reset/mt8173-resets.h new file mode 100644 index 0000000..ba8636e --- /dev/null +++ b/include/dt-bindings/reset/mt8173-resets.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Flora Fu, MediaTek + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8173 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8173 + +/* INFRACFG resets */ +#define MT8173_INFRA_EMI_REG_RST 0 +#define MT8173_INFRA_DRAMC0_A0_RST 1 +#define MT8173_INFRA_APCIRQ_EINT_RST 3 +#define MT8173_INFRA_APXGPT_RST 4 +#define MT8173_INFRA_SCPSYS_RST 5 +#define MT8173_INFRA_KP_RST 6 +#define MT8173_INFRA_PMIC_WRAP_RST 7 +#define MT8173_INFRA_MPIP_RST 8 +#define MT8173_INFRA_CEC_RST 9 +#define MT8173_INFRA_EMI_RST 32 +#define MT8173_INFRA_DRAMC0_RST 34 +#define MT8173_INFRA_APMIXEDSYS_RST 35 +#define MT8173_INFRA_MIPI_DSI_RST 36 +#define MT8173_INFRA_TRNG_RST 37 +#define MT8173_INFRA_SYSIRQ_RST 38 +#define MT8173_INFRA_MIPI_CSI_RST 39 +#define MT8173_INFRA_GCE_FAXI_RST 40 +#define MT8173_INFRA_MMIOMMURST 47 + + +/* PERICFG resets */ +#define MT8173_PERI_UART0_SW_RST 0 +#define MT8173_PERI_UART1_SW_RST 1 +#define MT8173_PERI_UART2_SW_RST 2 +#define MT8173_PERI_UART3_SW_RST 3 +#define MT8173_PERI_IRRX_SW_RST 4 +#define MT8173_PERI_PWM_SW_RST 8 +#define MT8173_PERI_AUXADC_SW_RST 10 +#define MT8173_PERI_DMA_SW_RST 11 +#define MT8173_PERI_I2C6_SW_RST 13 +#define MT8173_PERI_NFI_SW_RST 14 +#define MT8173_PERI_THERM_SW_RST 16 +#define MT8173_PERI_MSDC2_SW_RST 17 +#define MT8173_PERI_MSDC3_SW_RST 18 +#define MT8173_PERI_MSDC0_SW_RST 19 +#define MT8173_PERI_MSDC1_SW_RST 20 +#define MT8173_PERI_I2C0_SW_RST 22 +#define MT8173_PERI_I2C1_SW_RST 23 +#define MT8173_PERI_I2C2_SW_RST 24 +#define MT8173_PERI_I2C3_SW_RST 25 +#define MT8173_PERI_I2C4_SW_RST 26 +#define MT8173_PERI_HDMI_SW_RST 29 +#define MT8173_PERI_SPI0_SW_RST 33 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8173 */ diff --git a/include/dt-bindings/reset/oxsemi,ox810se.h b/include/dt-bindings/reset/oxsemi,ox810se.h new file mode 100644 index 0000000..e943187 --- /dev/null +++ b/include/dt-bindings/reset/oxsemi,ox810se.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Neil Armstrong + */ + +#ifndef DT_RESET_OXSEMI_OX810SE_H +#define DT_RESET_OXSEMI_OX810SE_H + +#define RESET_ARM 0 +#define RESET_COPRO 1 +/* Reserved 2 */ +/* Reserved 3 */ +#define RESET_USBHS 4 +#define RESET_USBHSPHY 5 +#define RESET_MAC 6 +#define RESET_PCI 7 +#define RESET_DMA 8 +#define RESET_DPE 9 +#define RESET_DDR 10 +#define RESET_SATA 11 +#define RESET_SATA_LINK 12 +#define RESET_SATA_PHY 13 + /* Reserved 14 */ +#define RESET_NAND 15 +#define RESET_GPIO 16 +#define RESET_UART1 17 +#define RESET_UART2 18 +#define RESET_MISC 19 +#define RESET_I2S 20 +#define RESET_AHB_MON 21 +#define RESET_UART3 22 +#define RESET_UART4 23 +#define RESET_SGDMA 24 +/* Reserved 25 */ +/* Reserved 26 */ +/* Reserved 27 */ +/* Reserved 28 */ +/* Reserved 29 */ +/* Reserved 30 */ +#define RESET_BUS 31 + +#endif /* DT_RESET_OXSEMI_OX810SE_H */ diff --git a/include/dt-bindings/reset/oxsemi,ox820.h b/include/dt-bindings/reset/oxsemi,ox820.h new file mode 100644 index 0000000..54b58e0 --- /dev/null +++ b/include/dt-bindings/reset/oxsemi,ox820.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Neil Armstrong + */ + +#ifndef DT_RESET_OXSEMI_OX820_H +#define DT_RESET_OXSEMI_OX820_H + +#define RESET_SCU 0 +#define RESET_LEON 1 +#define RESET_ARM0 2 +#define RESET_ARM1 3 +#define RESET_USBHS 4 +#define RESET_USBPHYA 5 +#define RESET_MAC 6 +#define RESET_PCIEA 7 +#define RESET_SGDMA 8 +#define RESET_CIPHER 9 +#define RESET_DDR 10 +#define RESET_SATA 11 +#define RESET_SATA_LINK 12 +#define RESET_SATA_PHY 13 +#define RESET_PCIEPHY 14 +#define RESET_NAND 15 +#define RESET_GPIO 16 +#define RESET_UART1 17 +#define RESET_UART2 18 +#define RESET_MISC 19 +#define RESET_I2S 20 +#define RESET_SD 21 +#define RESET_MAC_2 22 +#define RESET_PCIEB 23 +#define RESET_VIDEO 24 +#define RESET_DDR_PHY 25 +#define RESET_USBPHYB 26 +#define RESET_USBDEV 27 +/* Reserved 29 */ +#define RESET_ARMDBG 29 +#define RESET_PLLA 30 +#define RESET_PLLB 31 + +#endif /* DT_RESET_OXSEMI_OX820_H */ diff --git a/include/dt-bindings/reset/pistachio-resets.h b/include/dt-bindings/reset/pistachio-resets.h new file mode 100644 index 0000000..5bb4dd0 --- /dev/null +++ b/include/dt-bindings/reset/pistachio-resets.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the reset controller + * present in the Pistachio SoC + */ + +#ifndef _PISTACHIO_RESETS_H +#define _PISTACHIO_RESETS_H + +#define PISTACHIO_RESET_I2C0 0 +#define PISTACHIO_RESET_I2C1 1 +#define PISTACHIO_RESET_I2C2 2 +#define PISTACHIO_RESET_I2C3 3 +#define PISTACHIO_RESET_I2S_IN 4 +#define PISTACHIO_RESET_PRL_OUT 5 +#define PISTACHIO_RESET_SPDIF_OUT 6 +#define PISTACHIO_RESET_SPI 7 +#define PISTACHIO_RESET_PWM_PDM 8 +#define PISTACHIO_RESET_UART0 9 +#define PISTACHIO_RESET_UART1 10 +#define PISTACHIO_RESET_QSPI 11 +#define PISTACHIO_RESET_MDC 12 +#define PISTACHIO_RESET_SDHOST 13 +#define PISTACHIO_RESET_ETHERNET 14 +#define PISTACHIO_RESET_IR 15 +#define PISTACHIO_RESET_HASH 16 +#define PISTACHIO_RESET_TIMER 17 +#define PISTACHIO_RESET_I2S_OUT 18 +#define PISTACHIO_RESET_SPDIF_IN 19 +#define PISTACHIO_RESET_EVT 20 +#define PISTACHIO_RESET_USB_H 21 +#define PISTACHIO_RESET_USB_PR 22 +#define PISTACHIO_RESET_USB_PHY_PR 23 +#define PISTACHIO_RESET_USB_PHY_PON 24 +#define PISTACHIO_RESET_MAX 24 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-apq8084.h b/include/dt-bindings/reset/qcom,gcc-apq8084.h new file mode 100644 index 0000000..e76be38 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-apq8084.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_RESET_APQ_GCC_8084_H +#define _DT_BINDINGS_RESET_APQ_GCC_8084_H + +#define GCC_SYSTEM_NOC_BCR 0 +#define GCC_CONFIG_NOC_BCR 1 +#define GCC_PERIPH_NOC_BCR 2 +#define GCC_IMEM_BCR 3 +#define GCC_MMSS_BCR 4 +#define GCC_QDSS_BCR 5 +#define GCC_USB_30_BCR 6 +#define GCC_USB3_PHY_BCR 7 +#define GCC_USB_HS_HSIC_BCR 8 +#define GCC_USB_HS_BCR 9 +#define GCC_USB2A_PHY_BCR 10 +#define GCC_USB2B_PHY_BCR 11 +#define GCC_SDCC1_BCR 12 +#define GCC_SDCC2_BCR 13 +#define GCC_SDCC3_BCR 14 +#define GCC_SDCC4_BCR 15 +#define GCC_BLSP1_BCR 16 +#define GCC_BLSP1_QUP1_BCR 17 +#define GCC_BLSP1_UART1_BCR 18 +#define GCC_BLSP1_QUP2_BCR 19 +#define GCC_BLSP1_UART2_BCR 20 +#define GCC_BLSP1_QUP3_BCR 21 +#define GCC_BLSP1_UART3_BCR 22 +#define GCC_BLSP1_QUP4_BCR 23 +#define GCC_BLSP1_UART4_BCR 24 +#define GCC_BLSP1_QUP5_BCR 25 +#define GCC_BLSP1_UART5_BCR 26 +#define GCC_BLSP1_QUP6_BCR 27 +#define GCC_BLSP1_UART6_BCR 28 +#define GCC_BLSP2_BCR 29 +#define GCC_BLSP2_QUP1_BCR 30 +#define GCC_BLSP2_UART1_BCR 31 +#define GCC_BLSP2_QUP2_BCR 32 +#define GCC_BLSP2_UART2_BCR 33 +#define GCC_BLSP2_QUP3_BCR 34 +#define GCC_BLSP2_UART3_BCR 35 +#define GCC_BLSP2_QUP4_BCR 36 +#define GCC_BLSP2_UART4_BCR 37 +#define GCC_BLSP2_QUP5_BCR 38 +#define GCC_BLSP2_UART5_BCR 39 +#define GCC_BLSP2_QUP6_BCR 40 +#define GCC_BLSP2_UART6_BCR 41 +#define GCC_PDM_BCR 42 +#define GCC_PRNG_BCR 43 +#define GCC_BAM_DMA_BCR 44 +#define GCC_TSIF_BCR 45 +#define GCC_TCSR_BCR 46 +#define GCC_BOOT_ROM_BCR 47 +#define GCC_MSG_RAM_BCR 48 +#define GCC_TLMM_BCR 49 +#define GCC_MPM_BCR 50 +#define GCC_MPM_AHB_RESET 51 +#define GCC_MPM_NON_AHB_RESET 52 +#define GCC_SEC_CTRL_BCR 53 +#define GCC_SPMI_BCR 54 +#define GCC_SPDM_BCR 55 +#define GCC_CE1_BCR 56 +#define GCC_CE2_BCR 57 +#define GCC_BIMC_BCR 58 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 59 +#define GCC_SNOC_BUS_TIMEOUT2_BCR 60 +#define GCC_PNOC_BUS_TIMEOUT0_BCR 61 +#define GCC_PNOC_BUS_TIMEOUT1_BCR 62 +#define GCC_PNOC_BUS_TIMEOUT2_BCR 63 +#define GCC_PNOC_BUS_TIMEOUT3_BCR 64 +#define GCC_PNOC_BUS_TIMEOUT4_BCR 65 +#define GCC_CNOC_BUS_TIMEOUT0_BCR 66 +#define GCC_CNOC_BUS_TIMEOUT1_BCR 67 +#define GCC_CNOC_BUS_TIMEOUT2_BCR 68 +#define GCC_CNOC_BUS_TIMEOUT3_BCR 69 +#define GCC_CNOC_BUS_TIMEOUT4_BCR 70 +#define GCC_CNOC_BUS_TIMEOUT5_BCR 71 +#define GCC_CNOC_BUS_TIMEOUT6_BCR 72 +#define GCC_DEHR_BCR 73 +#define GCC_RBCPR_BCR 74 +#define GCC_MSS_RESTART 75 +#define GCC_LPASS_RESTART 76 +#define GCC_WCSS_RESTART 77 +#define GCC_VENUS_RESTART 78 +#define GCC_COPSS_SMMU_BCR 79 +#define GCC_SPSS_BCR 80 +#define GCC_PCIE_0_BCR 81 +#define GCC_PCIE_0_PHY_BCR 82 +#define GCC_PCIE_1_BCR 83 +#define GCC_PCIE_1_PHY_BCR 84 +#define GCC_USB_30_SEC_BCR 85 +#define GCC_USB3_SEC_PHY_BCR 86 +#define GCC_SATA_BCR 87 +#define GCC_CE3_BCR 88 +#define GCC_UFS_BCR 89 +#define GCC_USB30_PHY_COM_BCR 90 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h new file mode 100644 index 0000000..26b6f92 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_RESET_IPQ_806X_H +#define _DT_BINDINGS_RESET_IPQ_806X_H + +#define QDSS_STM_RESET 0 +#define AFAB_SMPSS_S_RESET 1 +#define AFAB_SMPSS_M1_RESET 2 +#define AFAB_SMPSS_M0_RESET 3 +#define AFAB_EBI1_CH0_RESET 4 +#define AFAB_EBI1_CH1_RESET 5 +#define SFAB_ADM0_M0_RESET 6 +#define SFAB_ADM0_M1_RESET 7 +#define SFAB_ADM0_M2_RESET 8 +#define ADM0_C2_RESET 9 +#define ADM0_C1_RESET 10 +#define ADM0_C0_RESET 11 +#define ADM0_PBUS_RESET 12 +#define ADM0_RESET 13 +#define QDSS_CLKS_SW_RESET 14 +#define QDSS_POR_RESET 15 +#define QDSS_TSCTR_RESET 16 +#define QDSS_HRESET_RESET 17 +#define QDSS_AXI_RESET 18 +#define QDSS_DBG_RESET 19 +#define SFAB_PCIE_M_RESET 20 +#define SFAB_PCIE_S_RESET 21 +#define PCIE_EXT_RESET 22 +#define PCIE_PHY_RESET 23 +#define PCIE_PCI_RESET 24 +#define PCIE_POR_RESET 25 +#define PCIE_HCLK_RESET 26 +#define PCIE_ACLK_RESET 27 +#define SFAB_LPASS_RESET 28 +#define SFAB_AFAB_M_RESET 29 +#define AFAB_SFAB_M0_RESET 30 +#define AFAB_SFAB_M1_RESET 31 +#define SFAB_SATA_S_RESET 32 +#define SFAB_DFAB_M_RESET 33 +#define DFAB_SFAB_M_RESET 34 +#define DFAB_SWAY0_RESET 35 +#define DFAB_SWAY1_RESET 36 +#define DFAB_ARB0_RESET 37 +#define DFAB_ARB1_RESET 38 +#define PPSS_PROC_RESET 39 +#define PPSS_RESET 40 +#define DMA_BAM_RESET 41 +#define SPS_TIC_H_RESET 42 +#define SFAB_CFPB_M_RESET 43 +#define SFAB_CFPB_S_RESET 44 +#define TSIF_H_RESET 45 +#define CE1_H_RESET 46 +#define CE1_CORE_RESET 47 +#define CE1_SLEEP_RESET 48 +#define CE2_H_RESET 49 +#define CE2_CORE_RESET 50 +#define SFAB_SFPB_M_RESET 51 +#define SFAB_SFPB_S_RESET 52 +#define RPM_PROC_RESET 53 +#define PMIC_SSBI2_RESET 54 +#define SDC1_RESET 55 +#define SDC2_RESET 56 +#define SDC3_RESET 57 +#define SDC4_RESET 58 +#define USB_HS1_RESET 59 +#define USB_HSIC_RESET 60 +#define USB_FS1_XCVR_RESET 61 +#define USB_FS1_RESET 62 +#define GSBI1_RESET 63 +#define GSBI2_RESET 64 +#define GSBI3_RESET 65 +#define GSBI4_RESET 66 +#define GSBI5_RESET 67 +#define GSBI6_RESET 68 +#define GSBI7_RESET 69 +#define SPDM_RESET 70 +#define SEC_CTRL_RESET 71 +#define TLMM_H_RESET 72 +#define SFAB_SATA_M_RESET 73 +#define SATA_RESET 74 +#define TSSC_RESET 75 +#define PDM_RESET 76 +#define MPM_H_RESET 77 +#define MPM_RESET 78 +#define SFAB_SMPSS_S_RESET 79 +#define PRNG_RESET 80 +#define SFAB_CE3_M_RESET 81 +#define SFAB_CE3_S_RESET 82 +#define CE3_SLEEP_RESET 83 +#define PCIE_1_M_RESET 84 +#define PCIE_1_S_RESET 85 +#define PCIE_1_EXT_RESET 86 +#define PCIE_1_PHY_RESET 87 +#define PCIE_1_PCI_RESET 88 +#define PCIE_1_POR_RESET 89 +#define PCIE_1_HCLK_RESET 90 +#define PCIE_1_ACLK_RESET 91 +#define PCIE_2_M_RESET 92 +#define PCIE_2_S_RESET 93 +#define PCIE_2_EXT_RESET 94 +#define PCIE_2_PHY_RESET 95 +#define PCIE_2_PCI_RESET 96 +#define PCIE_2_POR_RESET 97 +#define PCIE_2_HCLK_RESET 98 +#define PCIE_2_ACLK_RESET 99 +#define SFAB_USB30_S_RESET 100 +#define SFAB_USB30_M_RESET 101 +#define USB30_0_PORT2_HS_PHY_RESET 102 +#define USB30_0_MASTER_RESET 103 +#define USB30_0_SLEEP_RESET 104 +#define USB30_0_UTMI_PHY_RESET 105 +#define USB30_0_POWERON_RESET 106 +#define USB30_0_PHY_RESET 107 +#define USB30_1_MASTER_RESET 108 +#define USB30_1_SLEEP_RESET 109 +#define USB30_1_UTMI_PHY_RESET 110 +#define USB30_1_POWERON_RESET 111 +#define USB30_1_PHY_RESET 112 +#define NSSFB0_RESET 113 +#define NSSFB1_RESET 114 +#define UBI32_CORE1_CLKRST_CLAMP_RESET 115 +#define UBI32_CORE1_CLAMP_RESET 116 +#define UBI32_CORE1_AHB_RESET 117 +#define UBI32_CORE1_AXI_RESET 118 +#define UBI32_CORE2_CLKRST_CLAMP_RESET 119 +#define UBI32_CORE2_CLAMP_RESET 120 +#define UBI32_CORE2_AHB_RESET 121 +#define UBI32_CORE2_AXI_RESET 122 +#define GMAC_CORE1_RESET 123 +#define GMAC_CORE2_RESET 124 +#define GMAC_CORE3_RESET 125 +#define GMAC_CORE4_RESET 126 +#define GMAC_AHB_RESET 127 +#define NSS_CH0_RST_RX_CLK_N_RESET 128 +#define NSS_CH0_RST_TX_CLK_N_RESET 129 +#define NSS_CH0_RST_RX_125M_N_RESET 130 +#define NSS_CH0_HW_RST_RX_125M_N_RESET 131 +#define NSS_CH0_RST_TX_125M_N_RESET 132 +#define NSS_CH1_RST_RX_CLK_N_RESET 133 +#define NSS_CH1_RST_TX_CLK_N_RESET 134 +#define NSS_CH1_RST_RX_125M_N_RESET 135 +#define NSS_CH1_HW_RST_RX_125M_N_RESET 136 +#define NSS_CH1_RST_TX_125M_N_RESET 137 +#define NSS_CH2_RST_RX_CLK_N_RESET 138 +#define NSS_CH2_RST_TX_CLK_N_RESET 139 +#define NSS_CH2_RST_RX_125M_N_RESET 140 +#define NSS_CH2_HW_RST_RX_125M_N_RESET 141 +#define NSS_CH2_RST_TX_125M_N_RESET 142 +#define NSS_CH3_RST_RX_CLK_N_RESET 143 +#define NSS_CH3_RST_TX_CLK_N_RESET 144 +#define NSS_CH3_RST_RX_125M_N_RESET 145 +#define NSS_CH3_HW_RST_RX_125M_N_RESET 146 +#define NSS_CH3_RST_TX_125M_N_RESET 147 +#define NSS_RST_RX_250M_125M_N_RESET 148 +#define NSS_RST_TX_250M_125M_N_RESET 149 +#define NSS_QSGMII_TXPI_RST_N_RESET 150 +#define NSS_QSGMII_CDR_RST_N_RESET 151 +#define NSS_SGMII2_CDR_RST_N_RESET 152 +#define NSS_SGMII3_CDR_RST_N_RESET 153 +#define NSS_CAL_PRBS_RST_N_RESET 154 +#define NSS_LCKDT_RST_N_RESET 155 +#define NSS_SRDS_N_RESET 156 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-mdm9615.h b/include/dt-bindings/reset/qcom,gcc-mdm9615.h new file mode 100644 index 0000000..5faf02d --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-mdm9615.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * Copyright (c) BayLibre, SAS. + * Author : Neil Armstrong + */ + +#ifndef _DT_BINDINGS_RESET_GCC_MDM9615_H +#define _DT_BINDINGS_RESET_GCC_MDM9615_H + +#define SFAB_MSS_Q6_SW_RESET 0 +#define SFAB_MSS_Q6_FW_RESET 1 +#define QDSS_STM_RESET 2 +#define AFAB_SMPSS_S_RESET 3 +#define AFAB_SMPSS_M1_RESET 4 +#define AFAB_SMPSS_M0_RESET 5 +#define AFAB_EBI1_CH0_RESET 6 +#define AFAB_EBI1_CH1_RESET 7 +#define SFAB_ADM0_M0_RESET 8 +#define SFAB_ADM0_M1_RESET 9 +#define SFAB_ADM0_M2_RESET 10 +#define ADM0_C2_RESET 11 +#define ADM0_C1_RESET 12 +#define ADM0_C0_RESET 13 +#define ADM0_PBUS_RESET 14 +#define ADM0_RESET 15 +#define QDSS_CLKS_SW_RESET 16 +#define QDSS_POR_RESET 17 +#define QDSS_TSCTR_RESET 18 +#define QDSS_HRESET_RESET 19 +#define QDSS_AXI_RESET 20 +#define QDSS_DBG_RESET 21 +#define PCIE_A_RESET 22 +#define PCIE_AUX_RESET 23 +#define PCIE_H_RESET 24 +#define SFAB_PCIE_M_RESET 25 +#define SFAB_PCIE_S_RESET 26 +#define SFAB_MSS_M_RESET 27 +#define SFAB_USB3_M_RESET 28 +#define SFAB_RIVA_M_RESET 29 +#define SFAB_LPASS_RESET 30 +#define SFAB_AFAB_M_RESET 31 +#define AFAB_SFAB_M0_RESET 32 +#define AFAB_SFAB_M1_RESET 33 +#define SFAB_SATA_S_RESET 34 +#define SFAB_DFAB_M_RESET 35 +#define DFAB_SFAB_M_RESET 36 +#define DFAB_SWAY0_RESET 37 +#define DFAB_SWAY1_RESET 38 +#define DFAB_ARB0_RESET 39 +#define DFAB_ARB1_RESET 40 +#define PPSS_PROC_RESET 41 +#define PPSS_RESET 42 +#define DMA_BAM_RESET 43 +#define SPS_TIC_H_RESET 44 +#define SLIMBUS_H_RESET 45 +#define SFAB_CFPB_M_RESET 46 +#define SFAB_CFPB_S_RESET 47 +#define TSIF_H_RESET 48 +#define CE1_H_RESET 49 +#define CE1_CORE_RESET 50 +#define CE1_SLEEP_RESET 51 +#define CE2_H_RESET 52 +#define CE2_CORE_RESET 53 +#define SFAB_SFPB_M_RESET 54 +#define SFAB_SFPB_S_RESET 55 +#define RPM_PROC_RESET 56 +#define PMIC_SSBI2_RESET 57 +#define SDC1_RESET 58 +#define SDC2_RESET 59 +#define SDC3_RESET 60 +#define SDC4_RESET 61 +#define SDC5_RESET 62 +#define DFAB_A2_RESET 63 +#define USB_HS1_RESET 64 +#define USB_HSIC_RESET 65 +#define USB_FS1_XCVR_RESET 66 +#define USB_FS1_RESET 67 +#define USB_FS2_XCVR_RESET 68 +#define USB_FS2_RESET 69 +#define GSBI1_RESET 70 +#define GSBI2_RESET 71 +#define GSBI3_RESET 72 +#define GSBI4_RESET 73 +#define GSBI5_RESET 74 +#define GSBI6_RESET 75 +#define GSBI7_RESET 76 +#define GSBI8_RESET 77 +#define GSBI9_RESET 78 +#define GSBI10_RESET 79 +#define GSBI11_RESET 80 +#define GSBI12_RESET 81 +#define SPDM_RESET 82 +#define TLMM_H_RESET 83 +#define SFAB_MSS_S_RESET 84 +#define MSS_SLP_RESET 85 +#define MSS_Q6SW_JTAG_RESET 86 +#define MSS_Q6FW_JTAG_RESET 87 +#define MSS_RESET 88 +#define SATA_H_RESET 89 +#define SATA_RXOOB_RESE 90 +#define SATA_PMALIVE_RESET 91 +#define SATA_SFAB_M_RESET 92 +#define TSSC_RESET 93 +#define PDM_RESET 94 +#define MPM_H_RESET 95 +#define MPM_RESET 96 +#define SFAB_SMPSS_S_RESET 97 +#define PRNG_RESET 98 +#define RIVA_RESET 99 +#define USB_HS3_RESET 100 +#define USB_HS4_RESET 101 +#define CE3_RESET 102 +#define PCIE_EXT_PCI_RESET 103 +#define PCIE_PHY_RESET 104 +#define PCIE_PCI_RESET 105 +#define PCIE_POR_RESET 106 +#define PCIE_HCLK_RESET 107 +#define PCIE_ACLK_RESET 108 +#define CE3_H_RESET 109 +#define SFAB_CE3_M_RESET 110 +#define SFAB_CE3_S_RESET 111 +#define SATA_RESET 112 +#define CE3_SLEEP_RESET 113 +#define GSS_SLP_RESET 114 +#define GSS_RESET 115 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-msm8660.h b/include/dt-bindings/reset/qcom,gcc-msm8660.h new file mode 100644 index 0000000..f6d2b3c --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-msm8660.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_RESET_MSM_GCC_8660_H +#define _DT_BINDINGS_RESET_MSM_GCC_8660_H + +#define AFAB_CORE_RESET 0 +#define SCSS_SYS_RESET 1 +#define SCSS_SYS_POR_RESET 2 +#define AFAB_SMPSS_S_RESET 3 +#define AFAB_SMPSS_M1_RESET 4 +#define AFAB_SMPSS_M0_RESET 5 +#define AFAB_EBI1_S_RESET 6 +#define SFAB_CORE_RESET 7 +#define SFAB_ADM0_M0_RESET 8 +#define SFAB_ADM0_M1_RESET 9 +#define SFAB_ADM0_M2_RESET 10 +#define ADM0_C2_RESET 11 +#define ADM0_C1_RESET 12 +#define ADM0_C0_RESET 13 +#define ADM0_PBUS_RESET 14 +#define ADM0_RESET 15 +#define SFAB_ADM1_M0_RESET 16 +#define SFAB_ADM1_M1_RESET 17 +#define SFAB_ADM1_M2_RESET 18 +#define MMFAB_ADM1_M3_RESET 19 +#define ADM1_C3_RESET 20 +#define ADM1_C2_RESET 21 +#define ADM1_C1_RESET 22 +#define ADM1_C0_RESET 23 +#define ADM1_PBUS_RESET 24 +#define ADM1_RESET 25 +#define IMEM0_RESET 26 +#define SFAB_LPASS_Q6_RESET 27 +#define SFAB_AFAB_M_RESET 28 +#define AFAB_SFAB_M0_RESET 29 +#define AFAB_SFAB_M1_RESET 30 +#define DFAB_CORE_RESET 31 +#define SFAB_DFAB_M_RESET 32 +#define DFAB_SFAB_M_RESET 33 +#define DFAB_SWAY0_RESET 34 +#define DFAB_SWAY1_RESET 35 +#define DFAB_ARB0_RESET 36 +#define DFAB_ARB1_RESET 37 +#define PPSS_PROC_RESET 38 +#define PPSS_RESET 39 +#define PMEM_RESET 40 +#define DMA_BAM_RESET 41 +#define SIC_RESET 42 +#define SPS_TIC_RESET 43 +#define CFBP0_RESET 44 +#define CFBP1_RESET 45 +#define CFBP2_RESET 46 +#define EBI2_RESET 47 +#define SFAB_CFPB_M_RESET 48 +#define CFPB_MASTER_RESET 49 +#define SFAB_CFPB_S_RESET 50 +#define CFPB_SPLITTER_RESET 51 +#define TSIF_RESET 52 +#define CE1_RESET 53 +#define CE2_RESET 54 +#define SFAB_SFPB_M_RESET 55 +#define SFAB_SFPB_S_RESET 56 +#define RPM_PROC_RESET 57 +#define RPM_BUS_RESET 58 +#define RPM_MSG_RAM_RESET 59 +#define PMIC_ARB0_RESET 60 +#define PMIC_ARB1_RESET 61 +#define PMIC_SSBI2_RESET 62 +#define SDC1_RESET 63 +#define SDC2_RESET 64 +#define SDC3_RESET 65 +#define SDC4_RESET 66 +#define SDC5_RESET 67 +#define USB_HS1_RESET 68 +#define USB_HS2_XCVR_RESET 69 +#define USB_HS2_RESET 70 +#define USB_FS1_XCVR_RESET 71 +#define USB_FS1_RESET 72 +#define USB_FS2_XCVR_RESET 73 +#define USB_FS2_RESET 74 +#define GSBI1_RESET 75 +#define GSBI2_RESET 76 +#define GSBI3_RESET 77 +#define GSBI4_RESET 78 +#define GSBI5_RESET 79 +#define GSBI6_RESET 80 +#define GSBI7_RESET 81 +#define GSBI8_RESET 82 +#define GSBI9_RESET 83 +#define GSBI10_RESET 84 +#define GSBI11_RESET 85 +#define GSBI12_RESET 86 +#define SPDM_RESET 87 +#define SEC_CTRL_RESET 88 +#define TLMM_H_RESET 89 +#define TLMM_RESET 90 +#define MARRM_PWRON_RESET 91 +#define MARM_RESET 92 +#define MAHB1_RESET 93 +#define SFAB_MSS_S_RESET 94 +#define MAHB2_RESET 95 +#define MODEM_SW_AHB_RESET 96 +#define MODEM_RESET 97 +#define SFAB_MSS_MDM1_RESET 98 +#define SFAB_MSS_MDM0_RESET 99 +#define MSS_SLP_RESET 100 +#define MSS_MARM_SAW_RESET 101 +#define MSS_WDOG_RESET 102 +#define TSSC_RESET 103 +#define PDM_RESET 104 +#define SCSS_CORE0_RESET 105 +#define SCSS_CORE0_POR_RESET 106 +#define SCSS_CORE1_RESET 107 +#define SCSS_CORE1_POR_RESET 108 +#define MPM_RESET 109 +#define EBI1_1X_DIV_RESET 110 +#define EBI1_RESET 111 +#define SFAB_SMPSS_S_RESET 112 +#define USB_PHY0_RESET 113 +#define USB_PHY1_RESET 114 +#define PRNG_RESET 115 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-msm8916.h b/include/dt-bindings/reset/qcom,gcc-msm8916.h new file mode 100644 index 0000000..1f9be10 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-msm8916.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2015 Linaro Limited + */ + +#ifndef _DT_BINDINGS_RESET_MSM_GCC_8916_H +#define _DT_BINDINGS_RESET_MSM_GCC_8916_H + +#define GCC_BLSP1_BCR 0 +#define GCC_BLSP1_QUP1_BCR 1 +#define GCC_BLSP1_UART1_BCR 2 +#define GCC_BLSP1_QUP2_BCR 3 +#define GCC_BLSP1_UART2_BCR 4 +#define GCC_BLSP1_QUP3_BCR 5 +#define GCC_BLSP1_QUP4_BCR 6 +#define GCC_BLSP1_QUP5_BCR 7 +#define GCC_BLSP1_QUP6_BCR 8 +#define GCC_IMEM_BCR 9 +#define GCC_SMMU_BCR 10 +#define GCC_APSS_TCU_BCR 11 +#define GCC_SMMU_XPU_BCR 12 +#define GCC_PCNOC_TBU_BCR 13 +#define GCC_PRNG_BCR 14 +#define GCC_BOOT_ROM_BCR 15 +#define GCC_CRYPTO_BCR 16 +#define GCC_SEC_CTRL_BCR 17 +#define GCC_AUDIO_CORE_BCR 18 +#define GCC_ULT_AUDIO_BCR 19 +#define GCC_DEHR_BCR 20 +#define GCC_SYSTEM_NOC_BCR 21 +#define GCC_PCNOC_BCR 22 +#define GCC_TCSR_BCR 23 +#define GCC_QDSS_BCR 24 +#define GCC_DCD_BCR 25 +#define GCC_MSG_RAM_BCR 26 +#define GCC_MPM_BCR 27 +#define GCC_SPMI_BCR 28 +#define GCC_SPDM_BCR 29 +#define GCC_MM_SPDM_BCR 30 +#define GCC_BIMC_BCR 31 +#define GCC_RBCPR_BCR 32 +#define GCC_TLMM_BCR 33 +#define GCC_USB_HS_BCR 34 +#define GCC_USB2A_PHY_BCR 35 +#define GCC_SDCC1_BCR 36 +#define GCC_SDCC2_BCR 37 +#define GCC_PDM_BCR 38 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 39 +#define GCC_PCNOC_BUS_TIMEOUT0_BCR 40 +#define GCC_PCNOC_BUS_TIMEOUT1_BCR 41 +#define GCC_PCNOC_BUS_TIMEOUT2_BCR 42 +#define GCC_PCNOC_BUS_TIMEOUT3_BCR 43 +#define GCC_PCNOC_BUS_TIMEOUT4_BCR 44 +#define GCC_PCNOC_BUS_TIMEOUT5_BCR 45 +#define GCC_PCNOC_BUS_TIMEOUT6_BCR 46 +#define GCC_PCNOC_BUS_TIMEOUT7_BCR 47 +#define GCC_PCNOC_BUS_TIMEOUT8_BCR 48 +#define GCC_PCNOC_BUS_TIMEOUT9_BCR 49 +#define GCC_MMSS_BCR 50 +#define GCC_VENUS0_BCR 51 +#define GCC_MDSS_BCR 52 +#define GCC_CAMSS_PHY0_BCR 53 +#define GCC_CAMSS_CSI0_BCR 54 +#define GCC_CAMSS_CSI0PHY_BCR 55 +#define GCC_CAMSS_CSI0RDI_BCR 56 +#define GCC_CAMSS_CSI0PIX_BCR 57 +#define GCC_CAMSS_PHY1_BCR 58 +#define GCC_CAMSS_CSI1_BCR 59 +#define GCC_CAMSS_CSI1PHY_BCR 60 +#define GCC_CAMSS_CSI1RDI_BCR 61 +#define GCC_CAMSS_CSI1PIX_BCR 62 +#define GCC_CAMSS_ISPIF_BCR 63 +#define GCC_CAMSS_CCI_BCR 64 +#define GCC_CAMSS_MCLK0_BCR 65 +#define GCC_CAMSS_MCLK1_BCR 66 +#define GCC_CAMSS_GP0_BCR 67 +#define GCC_CAMSS_GP1_BCR 68 +#define GCC_CAMSS_TOP_BCR 69 +#define GCC_CAMSS_MICRO_BCR 70 +#define GCC_CAMSS_JPEG_BCR 71 +#define GCC_CAMSS_VFE_BCR 72 +#define GCC_CAMSS_CSI_VFE0_BCR 73 +#define GCC_OXILI_BCR 74 +#define GCC_GMEM_BCR 75 +#define GCC_CAMSS_AHB_BCR 76 +#define GCC_MDP_TBU_BCR 77 +#define GCC_GFX_TBU_BCR 78 +#define GCC_GFX_TCU_BCR 79 +#define GCC_MSS_TBU_AXI_BCR 80 +#define GCC_MSS_TBU_GSS_AXI_BCR 81 +#define GCC_MSS_TBU_Q6_AXI_BCR 82 +#define GCC_GTCU_AHB_BCR 83 +#define GCC_SMMU_CFG_BCR 84 +#define GCC_VFE_TBU_BCR 85 +#define GCC_VENUS_TBU_BCR 86 +#define GCC_JPEG_TBU_BCR 87 +#define GCC_PRONTO_TBU_BCR 88 +#define GCC_SMMU_CATS_BCR 89 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-msm8960.h b/include/dt-bindings/reset/qcom,gcc-msm8960.h new file mode 100644 index 0000000..c7ebae7 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-msm8960.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_RESET_MSM_GCC_8960_H +#define _DT_BINDINGS_RESET_MSM_GCC_8960_H + +#define SFAB_MSS_Q6_SW_RESET 0 +#define SFAB_MSS_Q6_FW_RESET 1 +#define QDSS_STM_RESET 2 +#define AFAB_SMPSS_S_RESET 3 +#define AFAB_SMPSS_M1_RESET 4 +#define AFAB_SMPSS_M0_RESET 5 +#define AFAB_EBI1_CH0_RESET 6 +#define AFAB_EBI1_CH1_RESET 7 +#define SFAB_ADM0_M0_RESET 8 +#define SFAB_ADM0_M1_RESET 9 +#define SFAB_ADM0_M2_RESET 10 +#define ADM0_C2_RESET 11 +#define ADM0_C1_RESET 12 +#define ADM0_C0_RESET 13 +#define ADM0_PBUS_RESET 14 +#define ADM0_RESET 15 +#define QDSS_CLKS_SW_RESET 16 +#define QDSS_POR_RESET 17 +#define QDSS_TSCTR_RESET 18 +#define QDSS_HRESET_RESET 19 +#define QDSS_AXI_RESET 20 +#define QDSS_DBG_RESET 21 +#define PCIE_A_RESET 22 +#define PCIE_AUX_RESET 23 +#define PCIE_H_RESET 24 +#define SFAB_PCIE_M_RESET 25 +#define SFAB_PCIE_S_RESET 26 +#define SFAB_MSS_M_RESET 27 +#define SFAB_USB3_M_RESET 28 +#define SFAB_RIVA_M_RESET 29 +#define SFAB_LPASS_RESET 30 +#define SFAB_AFAB_M_RESET 31 +#define AFAB_SFAB_M0_RESET 32 +#define AFAB_SFAB_M1_RESET 33 +#define SFAB_SATA_S_RESET 34 +#define SFAB_DFAB_M_RESET 35 +#define DFAB_SFAB_M_RESET 36 +#define DFAB_SWAY0_RESET 37 +#define DFAB_SWAY1_RESET 38 +#define DFAB_ARB0_RESET 39 +#define DFAB_ARB1_RESET 40 +#define PPSS_PROC_RESET 41 +#define PPSS_RESET 42 +#define DMA_BAM_RESET 43 +#define SPS_TIC_H_RESET 44 +#define SLIMBUS_H_RESET 45 +#define SFAB_CFPB_M_RESET 46 +#define SFAB_CFPB_S_RESET 47 +#define TSIF_H_RESET 48 +#define CE1_H_RESET 49 +#define CE1_CORE_RESET 50 +#define CE1_SLEEP_RESET 51 +#define CE2_H_RESET 52 +#define CE2_CORE_RESET 53 +#define SFAB_SFPB_M_RESET 54 +#define SFAB_SFPB_S_RESET 55 +#define RPM_PROC_RESET 56 +#define PMIC_SSBI2_RESET 57 +#define SDC1_RESET 58 +#define SDC2_RESET 59 +#define SDC3_RESET 60 +#define SDC4_RESET 61 +#define SDC5_RESET 62 +#define DFAB_A2_RESET 63 +#define USB_HS1_RESET 64 +#define USB_HSIC_RESET 65 +#define USB_FS1_XCVR_RESET 66 +#define USB_FS1_RESET 67 +#define USB_FS2_XCVR_RESET 68 +#define USB_FS2_RESET 69 +#define GSBI1_RESET 70 +#define GSBI2_RESET 71 +#define GSBI3_RESET 72 +#define GSBI4_RESET 73 +#define GSBI5_RESET 74 +#define GSBI6_RESET 75 +#define GSBI7_RESET 76 +#define GSBI8_RESET 77 +#define GSBI9_RESET 78 +#define GSBI10_RESET 79 +#define GSBI11_RESET 80 +#define GSBI12_RESET 81 +#define SPDM_RESET 82 +#define TLMM_H_RESET 83 +#define SFAB_MSS_S_RESET 84 +#define MSS_SLP_RESET 85 +#define MSS_Q6SW_JTAG_RESET 86 +#define MSS_Q6FW_JTAG_RESET 87 +#define MSS_RESET 88 +#define SATA_H_RESET 89 +#define SATA_RXOOB_RESE 90 +#define SATA_PMALIVE_RESET 91 +#define SATA_SFAB_M_RESET 92 +#define TSSC_RESET 93 +#define PDM_RESET 94 +#define MPM_H_RESET 95 +#define MPM_RESET 96 +#define SFAB_SMPSS_S_RESET 97 +#define PRNG_RESET 98 +#define RIVA_RESET 99 +#define USB_HS3_RESET 100 +#define USB_HS4_RESET 101 +#define CE3_RESET 102 +#define PCIE_EXT_PCI_RESET 103 +#define PCIE_PHY_RESET 104 +#define PCIE_PCI_RESET 105 +#define PCIE_POR_RESET 106 +#define PCIE_HCLK_RESET 107 +#define PCIE_ACLK_RESET 108 +#define CE3_H_RESET 109 +#define SFAB_CE3_M_RESET 110 +#define SFAB_CE3_S_RESET 111 +#define SATA_RESET 112 +#define CE3_SLEEP_RESET 113 +#define GSS_SLP_RESET 114 +#define GSS_RESET 115 + +#endif diff --git a/include/dt-bindings/reset/qcom,gcc-msm8974.h b/include/dt-bindings/reset/qcom,gcc-msm8974.h new file mode 100644 index 0000000..23777e5 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-msm8974.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_RESET_MSM_GCC_8974_H +#define _DT_BINDINGS_RESET_MSM_GCC_8974_H + +#define GCC_SYSTEM_NOC_BCR 0 +#define GCC_CONFIG_NOC_BCR 1 +#define GCC_PERIPH_NOC_BCR 2 +#define GCC_IMEM_BCR 3 +#define GCC_MMSS_BCR 4 +#define GCC_QDSS_BCR 5 +#define GCC_USB_30_BCR 6 +#define GCC_USB3_PHY_BCR 7 +#define GCC_USB_HS_HSIC_BCR 8 +#define GCC_USB_HS_BCR 9 +#define GCC_USB2A_PHY_BCR 10 +#define GCC_USB2B_PHY_BCR 11 +#define GCC_SDCC1_BCR 12 +#define GCC_SDCC2_BCR 13 +#define GCC_SDCC3_BCR 14 +#define GCC_SDCC4_BCR 15 +#define GCC_BLSP1_BCR 16 +#define GCC_BLSP1_QUP1_BCR 17 +#define GCC_BLSP1_UART1_BCR 18 +#define GCC_BLSP1_QUP2_BCR 19 +#define GCC_BLSP1_UART2_BCR 20 +#define GCC_BLSP1_QUP3_BCR 21 +#define GCC_BLSP1_UART3_BCR 22 +#define GCC_BLSP1_QUP4_BCR 23 +#define GCC_BLSP1_UART4_BCR 24 +#define GCC_BLSP1_QUP5_BCR 25 +#define GCC_BLSP1_UART5_BCR 26 +#define GCC_BLSP1_QUP6_BCR 27 +#define GCC_BLSP1_UART6_BCR 28 +#define GCC_BLSP2_BCR 29 +#define GCC_BLSP2_QUP1_BCR 30 +#define GCC_BLSP2_UART1_BCR 31 +#define GCC_BLSP2_QUP2_BCR 32 +#define GCC_BLSP2_UART2_BCR 33 +#define GCC_BLSP2_QUP3_BCR 34 +#define GCC_BLSP2_UART3_BCR 35 +#define GCC_BLSP2_QUP4_BCR 36 +#define GCC_BLSP2_UART4_BCR 37 +#define GCC_BLSP2_QUP5_BCR 38 +#define GCC_BLSP2_UART5_BCR 39 +#define GCC_BLSP2_QUP6_BCR 40 +#define GCC_BLSP2_UART6_BCR 41 +#define GCC_PDM_BCR 42 +#define GCC_BAM_DMA_BCR 43 +#define GCC_TSIF_BCR 44 +#define GCC_TCSR_BCR 45 +#define GCC_BOOT_ROM_BCR 46 +#define GCC_MSG_RAM_BCR 47 +#define GCC_TLMM_BCR 48 +#define GCC_MPM_BCR 49 +#define GCC_SEC_CTRL_BCR 50 +#define GCC_SPMI_BCR 51 +#define GCC_SPDM_BCR 52 +#define GCC_CE1_BCR 53 +#define GCC_CE2_BCR 54 +#define GCC_BIMC_BCR 55 +#define GCC_MPM_NON_AHB_RESET 56 +#define GCC_MPM_AHB_RESET 57 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 58 +#define GCC_SNOC_BUS_TIMEOUT2_BCR 59 +#define GCC_PNOC_BUS_TIMEOUT0_BCR 60 +#define GCC_PNOC_BUS_TIMEOUT1_BCR 61 +#define GCC_PNOC_BUS_TIMEOUT2_BCR 62 +#define GCC_PNOC_BUS_TIMEOUT3_BCR 63 +#define GCC_PNOC_BUS_TIMEOUT4_BCR 64 +#define GCC_CNOC_BUS_TIMEOUT0_BCR 65 +#define GCC_CNOC_BUS_TIMEOUT1_BCR 66 +#define GCC_CNOC_BUS_TIMEOUT2_BCR 67 +#define GCC_CNOC_BUS_TIMEOUT3_BCR 68 +#define GCC_CNOC_BUS_TIMEOUT4_BCR 69 +#define GCC_CNOC_BUS_TIMEOUT5_BCR 70 +#define GCC_CNOC_BUS_TIMEOUT6_BCR 71 +#define GCC_DEHR_BCR 72 +#define GCC_RBCPR_BCR 73 +#define GCC_MSS_RESTART 74 +#define GCC_LPASS_RESTART 75 +#define GCC_WCSS_RESTART 76 +#define GCC_VENUS_RESTART 77 + +#endif diff --git a/include/dt-bindings/reset/qcom,mmcc-apq8084.h b/include/dt-bindings/reset/qcom,mmcc-apq8084.h new file mode 100644 index 0000000..faaeb40 --- /dev/null +++ b/include/dt-bindings/reset/qcom,mmcc-apq8084.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_RESET_APQ_MMCC_8084_H +#define _DT_BINDINGS_RESET_APQ_MMCC_8084_H + +#define MMSS_SPDM_RESET 0 +#define MMSS_SPDM_RM_RESET 1 +#define VENUS0_RESET 2 +#define VPU_RESET 3 +#define MDSS_RESET 4 +#define AVSYNC_RESET 5 +#define CAMSS_PHY0_RESET 6 +#define CAMSS_PHY1_RESET 7 +#define CAMSS_PHY2_RESET 8 +#define CAMSS_CSI0_RESET 9 +#define CAMSS_CSI0PHY_RESET 10 +#define CAMSS_CSI0RDI_RESET 11 +#define CAMSS_CSI0PIX_RESET 12 +#define CAMSS_CSI1_RESET 13 +#define CAMSS_CSI1PHY_RESET 14 +#define CAMSS_CSI1RDI_RESET 15 +#define CAMSS_CSI1PIX_RESET 16 +#define CAMSS_CSI2_RESET 17 +#define CAMSS_CSI2PHY_RESET 18 +#define CAMSS_CSI2RDI_RESET 19 +#define CAMSS_CSI2PIX_RESET 20 +#define CAMSS_CSI3_RESET 21 +#define CAMSS_CSI3PHY_RESET 22 +#define CAMSS_CSI3RDI_RESET 23 +#define CAMSS_CSI3PIX_RESET 24 +#define CAMSS_ISPIF_RESET 25 +#define CAMSS_CCI_RESET 26 +#define CAMSS_MCLK0_RESET 27 +#define CAMSS_MCLK1_RESET 28 +#define CAMSS_MCLK2_RESET 29 +#define CAMSS_MCLK3_RESET 30 +#define CAMSS_GP0_RESET 31 +#define CAMSS_GP1_RESET 32 +#define CAMSS_TOP_RESET 33 +#define CAMSS_AHB_RESET 34 +#define CAMSS_MICRO_RESET 35 +#define CAMSS_JPEG_RESET 36 +#define CAMSS_VFE_RESET 37 +#define CAMSS_CSI_VFE0_RESET 38 +#define CAMSS_CSI_VFE1_RESET 39 +#define OXILI_RESET 40 +#define OXILICX_RESET 41 +#define OCMEMCX_RESET 42 +#define MMSS_RBCRP_RESET 43 +#define MMSSNOCAHB_RESET 44 +#define MMSSNOCAXI_RESET 45 + +#endif diff --git a/include/dt-bindings/reset/qcom,mmcc-msm8960.h b/include/dt-bindings/reset/qcom,mmcc-msm8960.h new file mode 100644 index 0000000..eb4186a --- /dev/null +++ b/include/dt-bindings/reset/qcom,mmcc-msm8960.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_RESET_MSM_MMCC_8960_H +#define _DT_BINDINGS_RESET_MSM_MMCC_8960_H + +#define VPE_AXI_RESET 0 +#define IJPEG_AXI_RESET 1 +#define MPD_AXI_RESET 2 +#define VFE_AXI_RESET 3 +#define SP_AXI_RESET 4 +#define VCODEC_AXI_RESET 5 +#define ROT_AXI_RESET 6 +#define VCODEC_AXI_A_RESET 7 +#define VCODEC_AXI_B_RESET 8 +#define FAB_S3_AXI_RESET 9 +#define FAB_S2_AXI_RESET 10 +#define FAB_S1_AXI_RESET 11 +#define FAB_S0_AXI_RESET 12 +#define SMMU_GFX3D_ABH_RESET 13 +#define SMMU_VPE_AHB_RESET 14 +#define SMMU_VFE_AHB_RESET 15 +#define SMMU_ROT_AHB_RESET 16 +#define SMMU_VCODEC_B_AHB_RESET 17 +#define SMMU_VCODEC_A_AHB_RESET 18 +#define SMMU_MDP1_AHB_RESET 19 +#define SMMU_MDP0_AHB_RESET 20 +#define SMMU_JPEGD_AHB_RESET 21 +#define SMMU_IJPEG_AHB_RESET 22 +#define SMMU_GFX2D0_AHB_RESET 23 +#define SMMU_GFX2D1_AHB_RESET 24 +#define APU_AHB_RESET 25 +#define CSI_AHB_RESET 26 +#define TV_ENC_AHB_RESET 27 +#define VPE_AHB_RESET 28 +#define FABRIC_AHB_RESET 29 +#define GFX2D0_AHB_RESET 30 +#define GFX2D1_AHB_RESET 31 +#define GFX3D_AHB_RESET 32 +#define HDMI_AHB_RESET 33 +#define MSSS_IMEM_AHB_RESET 34 +#define IJPEG_AHB_RESET 35 +#define DSI_M_AHB_RESET 36 +#define DSI_S_AHB_RESET 37 +#define JPEGD_AHB_RESET 38 +#define MDP_AHB_RESET 39 +#define ROT_AHB_RESET 40 +#define VCODEC_AHB_RESET 41 +#define VFE_AHB_RESET 42 +#define DSI2_M_AHB_RESET 43 +#define DSI2_S_AHB_RESET 44 +#define CSIPHY2_RESET 45 +#define CSI_PIX1_RESET 46 +#define CSIPHY0_RESET 47 +#define CSIPHY1_RESET 48 +#define DSI2_RESET 49 +#define VFE_CSI_RESET 50 +#define MDP_RESET 51 +#define AMP_RESET 52 +#define JPEGD_RESET 53 +#define CSI1_RESET 54 +#define VPE_RESET 55 +#define MMSS_FABRIC_RESET 56 +#define VFE_RESET 57 +#define GFX2D0_RESET 58 +#define GFX2D1_RESET 59 +#define GFX3D_RESET 60 +#define HDMI_RESET 61 +#define MMSS_IMEM_RESET 62 +#define IJPEG_RESET 63 +#define CSI0_RESET 64 +#define DSI_RESET 65 +#define VCODEC_RESET 66 +#define MDP_TV_RESET 67 +#define MDP_VSYNC_RESET 68 +#define ROT_RESET 69 +#define TV_HDMI_RESET 70 +#define TV_ENC_RESET 71 +#define CSI2_RESET 72 +#define CSI_RDI1_RESET 73 +#define CSI_RDI2_RESET 74 +#define GFX3D_AXI_RESET 75 +#define VCAP_AXI_RESET 76 +#define SMMU_VCAP_AHB_RESET 77 +#define VCAP_AHB_RESET 78 +#define CSI_RDI_RESET 79 +#define CSI_PIX_RESET 80 +#define VCAP_NPL_RESET 81 +#define VCAP_RESET 82 + +#endif diff --git a/include/dt-bindings/reset/qcom,mmcc-msm8974.h b/include/dt-bindings/reset/qcom,mmcc-msm8974.h new file mode 100644 index 0000000..d61b077 --- /dev/null +++ b/include/dt-bindings/reset/qcom,mmcc-msm8974.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_RESET_MSM_MMCC_8974_H +#define _DT_BINDINGS_RESET_MSM_MMCC_8974_H + +#define SPDM_RESET 0 +#define SPDM_RM_RESET 1 +#define VENUS0_RESET 2 +#define MDSS_RESET 3 +#define CAMSS_PHY0_RESET 4 +#define CAMSS_PHY1_RESET 5 +#define CAMSS_PHY2_RESET 6 +#define CAMSS_CSI0_RESET 7 +#define CAMSS_CSI0PHY_RESET 8 +#define CAMSS_CSI0RDI_RESET 9 +#define CAMSS_CSI0PIX_RESET 10 +#define CAMSS_CSI1_RESET 11 +#define CAMSS_CSI1PHY_RESET 12 +#define CAMSS_CSI1RDI_RESET 13 +#define CAMSS_CSI1PIX_RESET 14 +#define CAMSS_CSI2_RESET 15 +#define CAMSS_CSI2PHY_RESET 16 +#define CAMSS_CSI2RDI_RESET 17 +#define CAMSS_CSI2PIX_RESET 18 +#define CAMSS_CSI3_RESET 19 +#define CAMSS_CSI3PHY_RESET 20 +#define CAMSS_CSI3RDI_RESET 21 +#define CAMSS_CSI3PIX_RESET 22 +#define CAMSS_ISPIF_RESET 23 +#define CAMSS_CCI_RESET 24 +#define CAMSS_MCLK0_RESET 25 +#define CAMSS_MCLK1_RESET 26 +#define CAMSS_MCLK2_RESET 27 +#define CAMSS_MCLK3_RESET 28 +#define CAMSS_GP0_RESET 29 +#define CAMSS_GP1_RESET 30 +#define CAMSS_TOP_RESET 31 +#define CAMSS_MICRO_RESET 32 +#define CAMSS_JPEG_RESET 33 +#define CAMSS_VFE_RESET 34 +#define CAMSS_CSI_VFE0_RESET 35 +#define CAMSS_CSI_VFE1_RESET 36 +#define OXILI_RESET 37 +#define OXILICX_RESET 38 +#define OCMEMCX_RESET 39 +#define MMSS_RBCRP_RESET 40 +#define MMSSNOCAHB_RESET 41 +#define MMSSNOCAXI_RESET 42 +#define OCMEMNOC_RESET 43 + +#endif diff --git a/include/dt-bindings/reset/qcom,sdm845-aoss.h b/include/dt-bindings/reset/qcom,sdm845-aoss.h new file mode 100644 index 0000000..476c5fc --- /dev/null +++ b/include/dt-bindings/reset/qcom,sdm845-aoss.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_RESET_AOSS_SDM_845_H +#define _DT_BINDINGS_RESET_AOSS_SDM_845_H + +#define AOSS_CC_MSS_RESTART 0 +#define AOSS_CC_CAMSS_RESTART 1 +#define AOSS_CC_VENUS_RESTART 2 +#define AOSS_CC_GPU_RESTART 3 +#define AOSS_CC_DISPSS_RESTART 4 +#define AOSS_CC_WCSS_RESTART 5 +#define AOSS_CC_LPASS_RESTART 6 + +#endif diff --git a/include/dt-bindings/reset/qcom,sdm845-pdc.h b/include/dt-bindings/reset/qcom,sdm845-pdc.h new file mode 100644 index 0000000..53c37f9 --- /dev/null +++ b/include/dt-bindings/reset/qcom,sdm845-pdc.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_RESET_PDC_SDM_845_H +#define _DT_BINDINGS_RESET_PDC_SDM_845_H + +#define PDC_APPS_SYNC_RESET 0 +#define PDC_SP_SYNC_RESET 1 +#define PDC_AUDIO_SYNC_RESET 2 +#define PDC_SENSORS_SYNC_RESET 3 +#define PDC_AOP_SYNC_RESET 4 +#define PDC_DEBUG_SYNC_RESET 5 +#define PDC_GPU_SYNC_RESET 6 +#define PDC_DISPLAY_SYNC_RESET 7 +#define PDC_COMPUTE_SYNC_RESET 8 +#define PDC_MODEM_SYNC_RESET 9 + +#endif diff --git a/include/dt-bindings/reset/snps,hsdk-reset.h b/include/dt-bindings/reset/snps,hsdk-reset.h new file mode 100644 index 0000000..e1a643e --- /dev/null +++ b/include/dt-bindings/reset/snps,hsdk-reset.h @@ -0,0 +1,17 @@ +/** + * This header provides index for the HSDK reset controller. + */ +#ifndef _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK +#define _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK + +#define HSDK_APB_RESET 0 +#define HSDK_AXI_RESET 1 +#define HSDK_ETH_RESET 2 +#define HSDK_USB_RESET 3 +#define HSDK_SDIO_RESET 4 +#define HSDK_HDMI_RESET 5 +#define HSDK_GFX_RESET 6 +#define HSDK_DMAC_RESET 7 +#define HSDK_EBI_RESET 8 + +#endif /*_DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK*/ diff --git a/include/dt-bindings/reset/stih407-resets.h b/include/dt-bindings/reset/stih407-resets.h new file mode 100644 index 0000000..f2a2c4f --- /dev/null +++ b/include/dt-bindings/reset/stih407-resets.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the reset controller + * based peripheral powerdown requests on the STMicroelectronics + * STiH407 SoC. + */ +#ifndef _DT_BINDINGS_RESET_CONTROLLER_STIH407 +#define _DT_BINDINGS_RESET_CONTROLLER_STIH407 + +/* Powerdown requests control 0 */ +#define STIH407_EMISS_POWERDOWN 0 +#define STIH407_NAND_POWERDOWN 1 + +/* Synp GMAC PowerDown */ +#define STIH407_ETH1_POWERDOWN 2 + +/* Powerdown requests control 1 */ +#define STIH407_USB3_POWERDOWN 3 +#define STIH407_USB2_PORT1_POWERDOWN 4 +#define STIH407_USB2_PORT0_POWERDOWN 5 +#define STIH407_PCIE1_POWERDOWN 6 +#define STIH407_PCIE0_POWERDOWN 7 +#define STIH407_SATA1_POWERDOWN 8 +#define STIH407_SATA0_POWERDOWN 9 + +/* Reset defines */ +#define STIH407_ETH1_SOFTRESET 0 +#define STIH407_MMC1_SOFTRESET 1 +#define STIH407_PICOPHY_SOFTRESET 2 +#define STIH407_IRB_SOFTRESET 3 +#define STIH407_PCIE0_SOFTRESET 4 +#define STIH407_PCIE1_SOFTRESET 5 +#define STIH407_SATA0_SOFTRESET 6 +#define STIH407_SATA1_SOFTRESET 7 +#define STIH407_MIPHY0_SOFTRESET 8 +#define STIH407_MIPHY1_SOFTRESET 9 +#define STIH407_MIPHY2_SOFTRESET 10 +#define STIH407_SATA0_PWR_SOFTRESET 11 +#define STIH407_SATA1_PWR_SOFTRESET 12 +#define STIH407_DELTA_SOFTRESET 13 +#define STIH407_BLITTER_SOFTRESET 14 +#define STIH407_HDTVOUT_SOFTRESET 15 +#define STIH407_HDQVDP_SOFTRESET 16 +#define STIH407_VDP_AUX_SOFTRESET 17 +#define STIH407_COMPO_SOFTRESET 18 +#define STIH407_HDMI_TX_PHY_SOFTRESET 19 +#define STIH407_JPEG_DEC_SOFTRESET 20 +#define STIH407_VP8_DEC_SOFTRESET 21 +#define STIH407_GPU_SOFTRESET 22 +#define STIH407_HVA_SOFTRESET 23 +#define STIH407_ERAM_HVA_SOFTRESET 24 +#define STIH407_LPM_SOFTRESET 25 +#define STIH407_KEYSCAN_SOFTRESET 26 +#define STIH407_USB2_PORT0_SOFTRESET 27 +#define STIH407_USB2_PORT1_SOFTRESET 28 +#define STIH407_ST231_AUD_SOFTRESET 29 +#define STIH407_ST231_DMU_SOFTRESET 30 +#define STIH407_ST231_GP0_SOFTRESET 31 +#define STIH407_ST231_GP1_SOFTRESET 32 + +/* Picophy reset defines */ +#define STIH407_PICOPHY0_RESET 0 +#define STIH407_PICOPHY1_RESET 1 +#define STIH407_PICOPHY2_RESET 2 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_STIH407 */ diff --git a/include/dt-bindings/reset/stih415-resets.h b/include/dt-bindings/reset/stih415-resets.h new file mode 100644 index 0000000..96f7831 --- /dev/null +++ b/include/dt-bindings/reset/stih415-resets.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the reset controller + * based peripheral powerdown requests on the STMicroelectronics + * STiH415 SoC. + */ +#ifndef _DT_BINDINGS_RESET_CONTROLLER_STIH415 +#define _DT_BINDINGS_RESET_CONTROLLER_STIH415 + +#define STIH415_EMISS_POWERDOWN 0 +#define STIH415_NAND_POWERDOWN 1 +#define STIH415_KEYSCAN_POWERDOWN 2 +#define STIH415_USB0_POWERDOWN 3 +#define STIH415_USB1_POWERDOWN 4 +#define STIH415_USB2_POWERDOWN 5 +#define STIH415_SATA0_POWERDOWN 6 +#define STIH415_SATA1_POWERDOWN 7 +#define STIH415_PCIE_POWERDOWN 8 + +#define STIH415_ETH0_SOFTRESET 0 +#define STIH415_ETH1_SOFTRESET 1 +#define STIH415_IRB_SOFTRESET 2 +#define STIH415_USB0_SOFTRESET 3 +#define STIH415_USB1_SOFTRESET 4 +#define STIH415_USB2_SOFTRESET 5 +#define STIH415_KEYSCAN_SOFTRESET 6 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_STIH415 */ diff --git a/include/dt-bindings/reset/stih416-resets.h b/include/dt-bindings/reset/stih416-resets.h new file mode 100644 index 0000000..f682c90 --- /dev/null +++ b/include/dt-bindings/reset/stih416-resets.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the reset controller + * based peripheral powerdown requests on the STMicroelectronics + * STiH416 SoC. + */ +#ifndef _DT_BINDINGS_RESET_CONTROLLER_STIH416 +#define _DT_BINDINGS_RESET_CONTROLLER_STIH416 + +#define STIH416_EMISS_POWERDOWN 0 +#define STIH416_NAND_POWERDOWN 1 +#define STIH416_KEYSCAN_POWERDOWN 2 +#define STIH416_USB0_POWERDOWN 3 +#define STIH416_USB1_POWERDOWN 4 +#define STIH416_USB2_POWERDOWN 5 +#define STIH416_USB3_POWERDOWN 6 +#define STIH416_SATA0_POWERDOWN 7 +#define STIH416_SATA1_POWERDOWN 8 +#define STIH416_PCIE0_POWERDOWN 9 +#define STIH416_PCIE1_POWERDOWN 10 + +#define STIH416_ETH0_SOFTRESET 0 +#define STIH416_ETH1_SOFTRESET 1 +#define STIH416_IRB_SOFTRESET 2 +#define STIH416_USB0_SOFTRESET 3 +#define STIH416_USB1_SOFTRESET 4 +#define STIH416_USB2_SOFTRESET 5 +#define STIH416_USB3_SOFTRESET 6 +#define STIH416_SATA0_SOFTRESET 7 +#define STIH416_SATA1_SOFTRESET 8 +#define STIH416_PCIE0_SOFTRESET 9 +#define STIH416_PCIE1_SOFTRESET 10 +#define STIH416_AUD_DAC_SOFTRESET 11 +#define STIH416_HDTVOUT_SOFTRESET 12 +#define STIH416_VTAC_M_RX_SOFTRESET 13 +#define STIH416_VTAC_A_RX_SOFTRESET 14 +#define STIH416_SYNC_HD_SOFTRESET 15 +#define STIH416_SYNC_SD_SOFTRESET 16 +#define STIH416_BLITTER_SOFTRESET 17 +#define STIH416_GPU_SOFTRESET 18 +#define STIH416_VTAC_M_TX_SOFTRESET 19 +#define STIH416_VTAC_A_TX_SOFTRESET 20 +#define STIH416_VTG_AUX_SOFTRESET 21 +#define STIH416_JPEG_DEC_SOFTRESET 22 +#define STIH416_HVA_SOFTRESET 23 +#define STIH416_COMPO_M_SOFTRESET 24 +#define STIH416_COMPO_A_SOFTRESET 25 +#define STIH416_VP8_DEC_SOFTRESET 26 +#define STIH416_VTG_MAIN_SOFTRESET 27 +#define STIH416_KEYSCAN_SOFTRESET 28 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_STIH416 */ diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h new file mode 100644 index 0000000..f0c3aae --- /dev/null +++ b/include/dt-bindings/reset/stm32mp1-resets.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Author: Gabriel Fernandez for STMicroelectronics. + */ + +#ifndef _DT_BINDINGS_STM32MP1_RESET_H_ +#define _DT_BINDINGS_STM32MP1_RESET_H_ + +#define LTDC_R 3072 +#define DSI_R 3076 +#define DDRPERFM_R 3080 +#define USBPHY_R 3088 +#define SPI6_R 3136 +#define I2C4_R 3138 +#define I2C6_R 3139 +#define USART1_R 3140 +#define STGEN_R 3156 +#define GPIOZ_R 3200 +#define CRYP1_R 3204 +#define HASH1_R 3205 +#define RNG1_R 3206 +#define AXIM_R 3216 +#define GPU_R 3269 +#define ETHMAC_R 3274 +#define FMC_R 3276 +#define QSPI_R 3278 +#define SDMMC1_R 3280 +#define SDMMC2_R 3281 +#define CRC1_R 3284 +#define USBH_R 3288 +#define MDMA_R 3328 +#define MCU_R 8225 +#define TIM2_R 19456 +#define TIM3_R 19457 +#define TIM4_R 19458 +#define TIM5_R 19459 +#define TIM6_R 19460 +#define TIM7_R 19461 +#define TIM12_R 16462 +#define TIM13_R 16463 +#define TIM14_R 16464 +#define LPTIM1_R 19465 +#define SPI2_R 19467 +#define SPI3_R 19468 +#define USART2_R 19470 +#define USART3_R 19471 +#define UART4_R 19472 +#define UART5_R 19473 +#define UART7_R 19474 +#define UART8_R 19475 +#define I2C1_R 19477 +#define I2C2_R 19478 +#define I2C3_R 19479 +#define I2C5_R 19480 +#define SPDIF_R 19482 +#define CEC_R 19483 +#define DAC12_R 19485 +#define MDIO_R 19847 +#define TIM1_R 19520 +#define TIM8_R 19521 +#define TIM15_R 19522 +#define TIM16_R 19523 +#define TIM17_R 19524 +#define SPI1_R 19528 +#define SPI4_R 19529 +#define SPI5_R 19530 +#define USART6_R 19533 +#define SAI1_R 19536 +#define SAI2_R 19537 +#define SAI3_R 19538 +#define DFSDM_R 19540 +#define FDCAN_R 19544 +#define LPTIM2_R 19584 +#define LPTIM3_R 19585 +#define LPTIM4_R 19586 +#define LPTIM5_R 19587 +#define SAI4_R 19592 +#define SYSCFG_R 19595 +#define VREF_R 19597 +#define TMPSENS_R 19600 +#define PMBCTRL_R 19601 +#define DMA1_R 19648 +#define DMA2_R 19649 +#define DMAMUX_R 19650 +#define ADC12_R 19653 +#define USBO_R 19656 +#define SDMMC3_R 19664 +#define CAMITF_R 19712 +#define CRYP2_R 19716 +#define HASH2_R 19717 +#define RNG2_R 19718 +#define CRC2_R 19719 +#define HSEM_R 19723 +#define MBOX_R 19724 +#define GPIOA_R 19776 +#define GPIOB_R 19777 +#define GPIOC_R 19778 +#define GPIOD_R 19779 +#define GPIOE_R 19780 +#define GPIOF_R 19781 +#define GPIOG_R 19782 +#define GPIOH_R 19783 +#define GPIOI_R 19784 +#define GPIOJ_R 19785 +#define GPIOK_R 19786 + +#endif /* _DT_BINDINGS_STM32MP1_RESET_H_ */ diff --git a/include/dt-bindings/reset/sun4i-a10-ccu.h b/include/dt-bindings/reset/sun4i-a10-ccu.h new file mode 100644 index 0000000..5f4480b --- /dev/null +++ b/include/dt-bindings/reset/sun4i-a10-ccu.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2017 Priit Laes + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN4I_A10_H +#define _DT_BINDINGS_RST_SUN4I_A10_H + +#define RST_USB_PHY0 1 +#define RST_USB_PHY1 2 +#define RST_USB_PHY2 3 +#define RST_GPS 4 +#define RST_DE_BE0 5 +#define RST_DE_BE1 6 +#define RST_DE_FE0 7 +#define RST_DE_FE1 8 +#define RST_DE_MP 9 +#define RST_TVE0 10 +#define RST_TCON0 11 +#define RST_TVE1 12 +#define RST_TCON1 13 +#define RST_CSI0 14 +#define RST_CSI1 15 +#define RST_VE 16 +#define RST_ACE 17 +#define RST_LVDS 18 +#define RST_GPU 19 +#define RST_HDMI_H 20 +#define RST_HDMI_SYS 21 +#define RST_HDMI_AUDIO_DMA 22 + +#endif /* DT_BINDINGS_RST_SUN4I_A10_H */ diff --git a/include/dt-bindings/reset/sun50i-a64-ccu.h b/include/dt-bindings/reset/sun50i-a64-ccu.h new file mode 100644 index 0000000..db60b29 --- /dev/null +++ b/include/dt-bindings/reset/sun50i-a64-ccu.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN50I_A64_H_ +#define _DT_BINDINGS_RST_SUN50I_A64_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_USB_HSIC 2 +#define RST_DRAM 3 +#define RST_MBUS 4 +#define RST_BUS_MIPI_DSI 5 +#define RST_BUS_CE 6 +#define RST_BUS_DMA 7 +#define RST_BUS_MMC0 8 +#define RST_BUS_MMC1 9 +#define RST_BUS_MMC2 10 +#define RST_BUS_NAND 11 +#define RST_BUS_DRAM 12 +#define RST_BUS_EMAC 13 +#define RST_BUS_TS 14 +#define RST_BUS_HSTIMER 15 +#define RST_BUS_SPI0 16 +#define RST_BUS_SPI1 17 +#define RST_BUS_OTG 18 +#define RST_BUS_EHCI0 19 +#define RST_BUS_EHCI1 20 +#define RST_BUS_OHCI0 21 +#define RST_BUS_OHCI1 22 +#define RST_BUS_VE 23 +#define RST_BUS_TCON0 24 +#define RST_BUS_TCON1 25 +#define RST_BUS_DEINTERLACE 26 +#define RST_BUS_CSI 27 +#define RST_BUS_HDMI0 28 +#define RST_BUS_HDMI1 29 +#define RST_BUS_DE 30 +#define RST_BUS_GPU 31 +#define RST_BUS_MSGBOX 32 +#define RST_BUS_SPINLOCK 33 +#define RST_BUS_DBG 34 +#define RST_BUS_LVDS 35 +#define RST_BUS_CODEC 36 +#define RST_BUS_SPDIF 37 +#define RST_BUS_THS 38 +#define RST_BUS_I2S0 39 +#define RST_BUS_I2S1 40 +#define RST_BUS_I2S2 41 +#define RST_BUS_I2C0 42 +#define RST_BUS_I2C1 43 +#define RST_BUS_I2C2 44 +#define RST_BUS_SCR 45 +#define RST_BUS_UART0 46 +#define RST_BUS_UART1 47 +#define RST_BUS_UART2 48 +#define RST_BUS_UART3 49 +#define RST_BUS_UART4 50 + +#endif /* _DT_BINDINGS_RST_SUN50I_A64_H_ */ diff --git a/include/dt-bindings/reset/sun50i-h6-ccu.h b/include/dt-bindings/reset/sun50i-h6-ccu.h new file mode 100644 index 0000000..81106f4 --- /dev/null +++ b/include/dt-bindings/reset/sun50i-h6-ccu.h @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: (GPL-2.0+ or MIT) +/* + * Copyright (C) 2017 Icenowy Zheng + */ + +#ifndef _DT_BINDINGS_RESET_SUN50I_H6_H_ +#define _DT_BINDINGS_RESET_SUN50I_H6_H_ + +#define RST_MBUS 0 +#define RST_BUS_DE 1 +#define RST_BUS_DEINTERLACE 2 +#define RST_BUS_GPU 3 +#define RST_BUS_CE 4 +#define RST_BUS_VE 5 +#define RST_BUS_EMCE 6 +#define RST_BUS_VP9 7 +#define RST_BUS_DMA 8 +#define RST_BUS_MSGBOX 9 +#define RST_BUS_SPINLOCK 10 +#define RST_BUS_HSTIMER 11 +#define RST_BUS_DBG 12 +#define RST_BUS_PSI 13 +#define RST_BUS_PWM 14 +#define RST_BUS_IOMMU 15 +#define RST_BUS_DRAM 16 +#define RST_BUS_NAND 17 +#define RST_BUS_MMC0 18 +#define RST_BUS_MMC1 19 +#define RST_BUS_MMC2 20 +#define RST_BUS_UART0 21 +#define RST_BUS_UART1 22 +#define RST_BUS_UART2 23 +#define RST_BUS_UART3 24 +#define RST_BUS_I2C0 25 +#define RST_BUS_I2C1 26 +#define RST_BUS_I2C2 27 +#define RST_BUS_I2C3 28 +#define RST_BUS_SCR0 29 +#define RST_BUS_SCR1 30 +#define RST_BUS_SPI0 31 +#define RST_BUS_SPI1 32 +#define RST_BUS_EMAC 33 +#define RST_BUS_TS 34 +#define RST_BUS_IR_TX 35 +#define RST_BUS_THS 36 +#define RST_BUS_I2S0 37 +#define RST_BUS_I2S1 38 +#define RST_BUS_I2S2 39 +#define RST_BUS_I2S3 40 +#define RST_BUS_SPDIF 41 +#define RST_BUS_DMIC 42 +#define RST_BUS_AUDIO_HUB 43 +#define RST_USB_PHY0 44 +#define RST_USB_PHY1 45 +#define RST_USB_PHY3 46 +#define RST_USB_HSIC 47 +#define RST_BUS_OHCI0 48 +#define RST_BUS_OHCI3 49 +#define RST_BUS_EHCI0 50 +#define RST_BUS_XHCI 51 +#define RST_BUS_EHCI3 52 +#define RST_BUS_OTG 53 +#define RST_BUS_PCIE 54 +#define RST_PCIE_POWERUP 55 +#define RST_BUS_HDMI 56 +#define RST_BUS_HDMI_SUB 57 +#define RST_BUS_TCON_TOP 58 +#define RST_BUS_TCON_LCD0 59 +#define RST_BUS_TCON_TV0 60 +#define RST_BUS_CSI 61 +#define RST_BUS_HDCP 62 + +#endif /* _DT_BINDINGS_RESET_SUN50I_H6_H_ */ diff --git a/include/dt-bindings/reset/sun50i-h6-r-ccu.h b/include/dt-bindings/reset/sun50i-h6-r-ccu.h new file mode 100644 index 0000000..01c84db --- /dev/null +++ b/include/dt-bindings/reset/sun50i-h6-r-ccu.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (C) 2016 Icenowy Zheng + */ + +#ifndef _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_ +#define _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_ + +#define RST_R_APB1_TIMER 0 +#define RST_R_APB1_TWD 1 +#define RST_R_APB1_PWM 2 +#define RST_R_APB2_UART 3 +#define RST_R_APB2_I2C 4 +#define RST_R_APB1_IR 5 +#define RST_R_APB1_W1 6 + +#endif /* _DT_BINDINGS_RST_SUN50I_H6_R_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun5i-ccu.h b/include/dt-bindings/reset/sun5i-ccu.h new file mode 100644 index 0000000..40cc22a --- /dev/null +++ b/include/dt-bindings/reset/sun5i-ccu.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard + */ + +#ifndef _RST_SUN5I_H_ +#define _RST_SUN5I_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_GPS 2 +#define RST_DE_BE 3 +#define RST_DE_FE 4 +#define RST_TVE 5 +#define RST_LCD 6 +#define RST_CSI 7 +#define RST_VE 8 +#define RST_GPU 9 +#define RST_IEP 10 + +#endif /* _RST_SUN5I_H_ */ diff --git a/include/dt-bindings/reset/sun6i-a31-ccu.h b/include/dt-bindings/reset/sun6i-a31-ccu.h new file mode 100644 index 0000000..fbff365 --- /dev/null +++ b/include/dt-bindings/reset/sun6i-a31-ccu.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN6I_A31_H_ +#define _DT_BINDINGS_RST_SUN6I_A31_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_USB_PHY2 2 + +#define RST_AHB1_MIPI_DSI 3 +#define RST_AHB1_SS 4 +#define RST_AHB1_DMA 5 +#define RST_AHB1_MMC0 6 +#define RST_AHB1_MMC1 7 +#define RST_AHB1_MMC2 8 +#define RST_AHB1_MMC3 9 +#define RST_AHB1_NAND1 10 +#define RST_AHB1_NAND0 11 +#define RST_AHB1_SDRAM 12 +#define RST_AHB1_EMAC 13 +#define RST_AHB1_TS 14 +#define RST_AHB1_HSTIMER 15 +#define RST_AHB1_SPI0 16 +#define RST_AHB1_SPI1 17 +#define RST_AHB1_SPI2 18 +#define RST_AHB1_SPI3 19 +#define RST_AHB1_OTG 20 +#define RST_AHB1_EHCI0 21 +#define RST_AHB1_EHCI1 22 +#define RST_AHB1_OHCI0 23 +#define RST_AHB1_OHCI1 24 +#define RST_AHB1_OHCI2 25 +#define RST_AHB1_VE 26 +#define RST_AHB1_LCD0 27 +#define RST_AHB1_LCD1 28 +#define RST_AHB1_CSI 29 +#define RST_AHB1_HDMI 30 +#define RST_AHB1_BE0 31 +#define RST_AHB1_BE1 32 +#define RST_AHB1_FE0 33 +#define RST_AHB1_FE1 34 +#define RST_AHB1_MP 35 +#define RST_AHB1_GPU 36 +#define RST_AHB1_DEU0 37 +#define RST_AHB1_DEU1 38 +#define RST_AHB1_DRC0 39 +#define RST_AHB1_DRC1 40 +#define RST_AHB1_LVDS 41 + +#define RST_APB1_CODEC 42 +#define RST_APB1_SPDIF 43 +#define RST_APB1_DIGITAL_MIC 44 +#define RST_APB1_DAUDIO0 45 +#define RST_APB1_DAUDIO1 46 +#define RST_APB2_I2C0 47 +#define RST_APB2_I2C1 48 +#define RST_APB2_I2C2 49 +#define RST_APB2_I2C3 50 +#define RST_APB2_UART0 51 +#define RST_APB2_UART1 52 +#define RST_APB2_UART2 53 +#define RST_APB2_UART3 54 +#define RST_APB2_UART4 55 +#define RST_APB2_UART5 56 + +#endif /* _DT_BINDINGS_RST_SUN6I_A31_H_ */ diff --git a/include/dt-bindings/reset/sun8i-a23-a33-ccu.h b/include/dt-bindings/reset/sun8i-a23-a33-ccu.h new file mode 100644 index 0000000..6121f2b --- /dev/null +++ b/include/dt-bindings/reset/sun8i-a23-a33-ccu.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_A23_A33_H_ +#define _DT_BINDINGS_RST_SUN8I_A23_A33_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_USB_HSIC 2 +#define RST_MBUS 3 +#define RST_BUS_MIPI_DSI 4 +#define RST_BUS_SS 5 +#define RST_BUS_DMA 6 +#define RST_BUS_MMC0 7 +#define RST_BUS_MMC1 8 +#define RST_BUS_MMC2 9 +#define RST_BUS_NAND 10 +#define RST_BUS_DRAM 11 +#define RST_BUS_HSTIMER 12 +#define RST_BUS_SPI0 13 +#define RST_BUS_SPI1 14 +#define RST_BUS_OTG 15 +#define RST_BUS_EHCI 16 +#define RST_BUS_OHCI 17 +#define RST_BUS_VE 18 +#define RST_BUS_LCD 19 +#define RST_BUS_CSI 20 +#define RST_BUS_DE_BE 21 +#define RST_BUS_DE_FE 22 +#define RST_BUS_GPU 23 +#define RST_BUS_MSGBOX 24 +#define RST_BUS_SPINLOCK 25 +#define RST_BUS_DRC 26 +#define RST_BUS_SAT 27 +#define RST_BUS_LVDS 28 +#define RST_BUS_CODEC 29 +#define RST_BUS_I2S0 30 +#define RST_BUS_I2S1 31 +#define RST_BUS_I2C0 32 +#define RST_BUS_I2C1 33 +#define RST_BUS_I2C2 34 +#define RST_BUS_UART0 35 +#define RST_BUS_UART1 36 +#define RST_BUS_UART2 37 +#define RST_BUS_UART3 38 +#define RST_BUS_UART4 39 + +#endif /* _DT_BINDINGS_RST_SUN8I_A23_A33_H_ */ diff --git a/include/dt-bindings/reset/sun8i-a83t-ccu.h b/include/dt-bindings/reset/sun8i-a83t-ccu.h new file mode 100644 index 0000000..784f6e1 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-a83t-ccu.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2017 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN8I_A83T_CCU_H_ +#define _DT_BINDINGS_RESET_SUN8I_A83T_CCU_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_USB_HSIC 2 + +#define RST_DRAM 3 +#define RST_MBUS 4 + +#define RST_BUS_MIPI_DSI 5 +#define RST_BUS_SS 6 +#define RST_BUS_DMA 7 +#define RST_BUS_MMC0 8 +#define RST_BUS_MMC1 9 +#define RST_BUS_MMC2 10 +#define RST_BUS_NAND 11 +#define RST_BUS_DRAM 12 +#define RST_BUS_EMAC 13 +#define RST_BUS_HSTIMER 14 +#define RST_BUS_SPI0 15 +#define RST_BUS_SPI1 16 +#define RST_BUS_OTG 17 +#define RST_BUS_EHCI0 18 +#define RST_BUS_EHCI1 19 +#define RST_BUS_OHCI0 20 + +#define RST_BUS_VE 21 +#define RST_BUS_TCON0 22 +#define RST_BUS_TCON1 23 +#define RST_BUS_CSI 24 +#define RST_BUS_HDMI0 25 +#define RST_BUS_HDMI1 26 +#define RST_BUS_DE 27 +#define RST_BUS_GPU 28 +#define RST_BUS_MSGBOX 29 +#define RST_BUS_SPINLOCK 30 + +#define RST_BUS_LVDS 31 + +#define RST_BUS_SPDIF 32 +#define RST_BUS_I2S0 33 +#define RST_BUS_I2S1 34 +#define RST_BUS_I2S2 35 +#define RST_BUS_TDM 36 + +#define RST_BUS_I2C0 37 +#define RST_BUS_I2C1 38 +#define RST_BUS_I2C2 39 +#define RST_BUS_UART0 40 +#define RST_BUS_UART1 41 +#define RST_BUS_UART2 42 +#define RST_BUS_UART3 43 +#define RST_BUS_UART4 44 + +#endif /* _DT_BINDINGS_RESET_SUN8I_A83T_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun8i-de2.h b/include/dt-bindings/reset/sun8i-de2.h new file mode 100644 index 0000000..1c36a6a --- /dev/null +++ b/include/dt-bindings/reset/sun8i-de2.h @@ -0,0 +1,15 @@ +/* + * Copyright (C) 2016 Icenowy Zheng + * + * SPDX-License-Identifier: (GPL-2.0+ OR MIT) + */ + +#ifndef _DT_BINDINGS_RESET_SUN8I_DE2_H_ +#define _DT_BINDINGS_RESET_SUN8I_DE2_H_ + +#define RST_MIXER0 0 +#define RST_MIXER1 1 +#define RST_WB 2 +#define RST_ROT 3 + +#endif /* _DT_BINDINGS_RESET_SUN8I_DE2_H_ */ diff --git a/include/dt-bindings/reset/sun8i-h3-ccu.h b/include/dt-bindings/reset/sun8i-h3-ccu.h new file mode 100644 index 0000000..484c2a2 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-h3-ccu.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_H3_H_ +#define _DT_BINDINGS_RST_SUN8I_H3_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_USB_PHY2 2 +#define RST_USB_PHY3 3 + +#define RST_MBUS 4 + +#define RST_BUS_CE 5 +#define RST_BUS_DMA 6 +#define RST_BUS_MMC0 7 +#define RST_BUS_MMC1 8 +#define RST_BUS_MMC2 9 +#define RST_BUS_NAND 10 +#define RST_BUS_DRAM 11 +#define RST_BUS_EMAC 12 +#define RST_BUS_TS 13 +#define RST_BUS_HSTIMER 14 +#define RST_BUS_SPI0 15 +#define RST_BUS_SPI1 16 +#define RST_BUS_OTG 17 +#define RST_BUS_EHCI0 18 +#define RST_BUS_EHCI1 19 +#define RST_BUS_EHCI2 20 +#define RST_BUS_EHCI3 21 +#define RST_BUS_OHCI0 22 +#define RST_BUS_OHCI1 23 +#define RST_BUS_OHCI2 24 +#define RST_BUS_OHCI3 25 +#define RST_BUS_VE 26 +#define RST_BUS_TCON0 27 +#define RST_BUS_TCON1 28 +#define RST_BUS_DEINTERLACE 29 +#define RST_BUS_CSI 30 +#define RST_BUS_TVE 31 +#define RST_BUS_HDMI0 32 +#define RST_BUS_HDMI1 33 +#define RST_BUS_DE 34 +#define RST_BUS_GPU 35 +#define RST_BUS_MSGBOX 36 +#define RST_BUS_SPINLOCK 37 +#define RST_BUS_DBG 38 +#define RST_BUS_EPHY 39 +#define RST_BUS_CODEC 40 +#define RST_BUS_SPDIF 41 +#define RST_BUS_THS 42 +#define RST_BUS_I2S0 43 +#define RST_BUS_I2S1 44 +#define RST_BUS_I2S2 45 +#define RST_BUS_I2C0 46 +#define RST_BUS_I2C1 47 +#define RST_BUS_I2C2 48 +#define RST_BUS_UART0 49 +#define RST_BUS_UART1 50 +#define RST_BUS_UART2 51 +#define RST_BUS_UART3 52 +#define RST_BUS_SCR0 53 + +/* New resets imported in H5 */ +#define RST_BUS_SCR1 54 + +#endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/reset/sun8i-r-ccu.h b/include/dt-bindings/reset/sun8i-r-ccu.h new file mode 100644 index 0000000..4ba64f3 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-r-ccu.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2016 Icenowy Zheng + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_R_CCU_H_ +#define _DT_BINDINGS_RST_SUN8I_R_CCU_H_ + +#define RST_APB0_IR 0 +#define RST_APB0_TIMER 1 +#define RST_APB0_RSB 2 +#define RST_APB0_UART 3 +/* 4 is reserved for RST_APB0_W1 on A31 */ +#define RST_APB0_I2C 5 + +#endif /* _DT_BINDINGS_RST_SUN8I_R_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun8i-r40-ccu.h b/include/dt-bindings/reset/sun8i-r40-ccu.h new file mode 100644 index 0000000..c5ebcf6 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-r40-ccu.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2017 Icenowy Zheng + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_R40_H_ +#define _DT_BINDINGS_RST_SUN8I_R40_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_USB_PHY2 2 + +#define RST_DRAM 3 +#define RST_MBUS 4 + +#define RST_BUS_MIPI_DSI 5 +#define RST_BUS_CE 6 +#define RST_BUS_DMA 7 +#define RST_BUS_MMC0 8 +#define RST_BUS_MMC1 9 +#define RST_BUS_MMC2 10 +#define RST_BUS_MMC3 11 +#define RST_BUS_NAND 12 +#define RST_BUS_DRAM 13 +#define RST_BUS_EMAC 14 +#define RST_BUS_TS 15 +#define RST_BUS_HSTIMER 16 +#define RST_BUS_SPI0 17 +#define RST_BUS_SPI1 18 +#define RST_BUS_SPI2 19 +#define RST_BUS_SPI3 20 +#define RST_BUS_SATA 21 +#define RST_BUS_OTG 22 +#define RST_BUS_EHCI0 23 +#define RST_BUS_EHCI1 24 +#define RST_BUS_EHCI2 25 +#define RST_BUS_OHCI0 26 +#define RST_BUS_OHCI1 27 +#define RST_BUS_OHCI2 28 +#define RST_BUS_VE 29 +#define RST_BUS_MP 30 +#define RST_BUS_DEINTERLACE 31 +#define RST_BUS_CSI0 32 +#define RST_BUS_CSI1 33 +#define RST_BUS_HDMI0 34 +#define RST_BUS_HDMI1 35 +#define RST_BUS_DE 36 +#define RST_BUS_TVE0 37 +#define RST_BUS_TVE1 38 +#define RST_BUS_TVE_TOP 39 +#define RST_BUS_GMAC 40 +#define RST_BUS_GPU 41 +#define RST_BUS_TVD0 42 +#define RST_BUS_TVD1 43 +#define RST_BUS_TVD2 44 +#define RST_BUS_TVD3 45 +#define RST_BUS_TVD_TOP 46 +#define RST_BUS_TCON_LCD0 47 +#define RST_BUS_TCON_LCD1 48 +#define RST_BUS_TCON_TV0 49 +#define RST_BUS_TCON_TV1 50 +#define RST_BUS_TCON_TOP 51 +#define RST_BUS_DBG 52 +#define RST_BUS_LVDS 53 +#define RST_BUS_CODEC 54 +#define RST_BUS_SPDIF 55 +#define RST_BUS_AC97 56 +#define RST_BUS_IR0 57 +#define RST_BUS_IR1 58 +#define RST_BUS_THS 59 +#define RST_BUS_KEYPAD 60 +#define RST_BUS_I2S0 61 +#define RST_BUS_I2S1 62 +#define RST_BUS_I2S2 63 +#define RST_BUS_I2C0 64 +#define RST_BUS_I2C1 65 +#define RST_BUS_I2C2 66 +#define RST_BUS_I2C3 67 +#define RST_BUS_CAN 68 +#define RST_BUS_SCR 69 +#define RST_BUS_PS20 70 +#define RST_BUS_PS21 71 +#define RST_BUS_I2C4 72 +#define RST_BUS_UART0 73 +#define RST_BUS_UART1 74 +#define RST_BUS_UART2 75 +#define RST_BUS_UART3 76 +#define RST_BUS_UART4 77 +#define RST_BUS_UART5 78 +#define RST_BUS_UART6 79 +#define RST_BUS_UART7 80 + +#endif /* _DT_BINDINGS_RST_SUN8I_R40_H_ */ diff --git a/include/dt-bindings/reset/sun8i-v3s-ccu.h b/include/dt-bindings/reset/sun8i-v3s-ccu.h new file mode 100644 index 0000000..b679017 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-v3s-ccu.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2016 Icenowy Zheng + * + * Based on sun8i-v3s-ccu.h, which is + * Copyright (C) 2016 Maxime Ripard + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_V3S_H_ +#define _DT_BINDINGS_RST_SUN8I_V3S_H_ + +#define RST_USB_PHY0 0 + +#define RST_MBUS 1 + +#define RST_BUS_CE 5 +#define RST_BUS_DMA 6 +#define RST_BUS_MMC0 7 +#define RST_BUS_MMC1 8 +#define RST_BUS_MMC2 9 +#define RST_BUS_DRAM 11 +#define RST_BUS_EMAC 12 +#define RST_BUS_HSTIMER 14 +#define RST_BUS_SPI0 15 +#define RST_BUS_OTG 17 +#define RST_BUS_EHCI0 18 +#define RST_BUS_OHCI0 22 +#define RST_BUS_VE 26 +#define RST_BUS_TCON0 27 +#define RST_BUS_CSI 30 +#define RST_BUS_DE 34 +#define RST_BUS_DBG 38 +#define RST_BUS_EPHY 39 +#define RST_BUS_CODEC 40 +#define RST_BUS_I2C0 46 +#define RST_BUS_I2C1 47 +#define RST_BUS_UART0 49 +#define RST_BUS_UART1 50 +#define RST_BUS_UART2 51 + +/* Reset lines not available on V3s */ +#define RST_BUS_I2S0 52 + +#endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-ccu.h b/include/dt-bindings/reset/sun9i-a80-ccu.h new file mode 100644 index 0000000..4b8df4b --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-ccu.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ + +#define RST_BUS_FD 0 +#define RST_BUS_VE 1 +#define RST_BUS_GPU_CTRL 2 +#define RST_BUS_SS 3 +#define RST_BUS_MMC 4 +#define RST_BUS_NAND0 5 +#define RST_BUS_NAND1 6 +#define RST_BUS_SDRAM 7 +#define RST_BUS_SATA 8 +#define RST_BUS_TS 9 +#define RST_BUS_SPI0 10 +#define RST_BUS_SPI1 11 +#define RST_BUS_SPI2 12 +#define RST_BUS_SPI3 13 + +#define RST_BUS_OTG 14 +#define RST_BUS_OTG_PHY 15 +#define RST_BUS_MIPI_HSI 16 +#define RST_BUS_GMAC 17 +#define RST_BUS_MSGBOX 18 +#define RST_BUS_SPINLOCK 19 +#define RST_BUS_HSTIMER 20 +#define RST_BUS_DMA 21 + +#define RST_BUS_LCD0 22 +#define RST_BUS_LCD1 23 +#define RST_BUS_EDP 24 +#define RST_BUS_LVDS 25 +#define RST_BUS_CSI 26 +#define RST_BUS_HDMI0 27 +#define RST_BUS_HDMI1 28 +#define RST_BUS_DE 29 +#define RST_BUS_MP 30 +#define RST_BUS_GPU 31 +#define RST_BUS_MIPI_DSI 32 + +#define RST_BUS_SPDIF 33 +#define RST_BUS_AC97 34 +#define RST_BUS_I2S0 35 +#define RST_BUS_I2S1 36 +#define RST_BUS_LRADC 37 +#define RST_BUS_GPADC 38 +#define RST_BUS_CIR_TX 39 + +#define RST_BUS_I2C0 40 +#define RST_BUS_I2C1 41 +#define RST_BUS_I2C2 42 +#define RST_BUS_I2C3 43 +#define RST_BUS_I2C4 44 +#define RST_BUS_UART0 45 +#define RST_BUS_UART1 46 +#define RST_BUS_UART2 47 +#define RST_BUS_UART3 48 +#define RST_BUS_UART4 49 +#define RST_BUS_UART5 50 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-de.h b/include/dt-bindings/reset/sun9i-a80-de.h new file mode 100644 index 0000000..2050727 --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-de.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ + +#define RST_FE0 0 +#define RST_FE1 1 +#define RST_FE2 2 +#define RST_DEU0 3 +#define RST_DEU1 4 +#define RST_BE0 5 +#define RST_BE1 6 +#define RST_BE2 7 +#define RST_DRC0 8 +#define RST_DRC1 9 +#define RST_MERGE 10 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-usb.h b/include/dt-bindings/reset/sun9i-a80-usb.h new file mode 100644 index 0000000..ee49286 --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-usb.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ + +#define RST_USB0_HCI 0 +#define RST_USB1_HCI 1 +#define RST_USB2_HCI 2 + +#define RST_USB0_PHY 3 +#define RST_USB1_HSIC 4 +#define RST_USB1_PHY 5 +#define RST_USB2_HSIC 6 +#define RST_USB2_PHY 7 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ */ diff --git a/include/dt-bindings/reset/suniv-ccu-f1c100s.h b/include/dt-bindings/reset/suniv-ccu-f1c100s.h new file mode 100644 index 0000000..6a4b438 --- /dev/null +++ b/include/dt-bindings/reset/suniv-ccu-f1c100s.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) + * + * Copyright (C) 2018 Icenowy Zheng + * + */ + +#ifndef _DT_BINDINGS_RST_SUNIV_F1C100S_H_ +#define _DT_BINDINGS_RST_SUNIV_F1C100S_H_ + +#define RST_USB_PHY0 0 +#define RST_BUS_DMA 1 +#define RST_BUS_MMC0 2 +#define RST_BUS_MMC1 3 +#define RST_BUS_DRAM 4 +#define RST_BUS_SPI0 5 +#define RST_BUS_SPI1 6 +#define RST_BUS_OTG 7 +#define RST_BUS_VE 8 +#define RST_BUS_LCD 9 +#define RST_BUS_DEINTERLACE 10 +#define RST_BUS_CSI 11 +#define RST_BUS_TVD 12 +#define RST_BUS_TVE 13 +#define RST_BUS_DE_BE 14 +#define RST_BUS_DE_FE 15 +#define RST_BUS_CODEC 16 +#define RST_BUS_SPDIF 17 +#define RST_BUS_IR 18 +#define RST_BUS_RSB 19 +#define RST_BUS_I2S0 20 +#define RST_BUS_I2C0 21 +#define RST_BUS_I2C1 22 +#define RST_BUS_I2C2 23 +#define RST_BUS_UART0 24 +#define RST_BUS_UART1 25 +#define RST_BUS_UART2 26 + +#endif /* _DT_BINDINGS_RST_SUNIV_F1C100S_H_ */ diff --git a/include/dt-bindings/reset/tegra124-car.h b/include/dt-bindings/reset/tegra124-car.h new file mode 100644 index 0000000..97d2f3d --- /dev/null +++ b/include/dt-bindings/reset/tegra124-car.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides Tegra124-specific constants for binding + * nvidia,tegra124-car. + */ + +#ifndef _DT_BINDINGS_RESET_TEGRA124_CAR_H +#define _DT_BINDINGS_RESET_TEGRA124_CAR_H + +#define TEGRA124_RESET(x) (6 * 32 + (x)) +#define TEGRA124_RST_DFLL_DVCO TEGRA124_RESET(0) + +#endif /* _DT_BINDINGS_RESET_TEGRA124_CAR_H */ diff --git a/include/dt-bindings/reset/tegra186-reset.h b/include/dt-bindings/reset/tegra186-reset.h new file mode 100644 index 0000000..3c60e3e --- /dev/null +++ b/include/dt-bindings/reset/tegra186-reset.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef _ABI_MACH_T186_RESET_T186_H_ +#define _ABI_MACH_T186_RESET_T186_H_ + + +#define TEGRA186_RESET_ACTMON 0 +#define TEGRA186_RESET_AFI 1 +#define TEGRA186_RESET_CEC 2 +#define TEGRA186_RESET_CSITE 3 +#define TEGRA186_RESET_DP2 4 +#define TEGRA186_RESET_DPAUX 5 +#define TEGRA186_RESET_DSI 6 +#define TEGRA186_RESET_DSIB 7 +#define TEGRA186_RESET_DTV 8 +#define TEGRA186_RESET_DVFS 9 +#define TEGRA186_RESET_ENTROPY 10 +#define TEGRA186_RESET_EXTPERIPH1 11 +#define TEGRA186_RESET_EXTPERIPH2 12 +#define TEGRA186_RESET_EXTPERIPH3 13 +#define TEGRA186_RESET_GPU 14 +#define TEGRA186_RESET_HDA 15 +#define TEGRA186_RESET_HDA2CODEC_2X 16 +#define TEGRA186_RESET_HDA2HDMICODEC 17 +#define TEGRA186_RESET_HOST1X 18 +#define TEGRA186_RESET_I2C1 19 +#define TEGRA186_RESET_I2C2 20 +#define TEGRA186_RESET_I2C3 21 +#define TEGRA186_RESET_I2C4 22 +#define TEGRA186_RESET_I2C5 23 +#define TEGRA186_RESET_I2C6 24 +#define TEGRA186_RESET_ISP 25 +#define TEGRA186_RESET_KFUSE 26 +#define TEGRA186_RESET_LA 27 +#define TEGRA186_RESET_MIPI_CAL 28 +#define TEGRA186_RESET_PCIE 29 +#define TEGRA186_RESET_PCIEXCLK 30 +#define TEGRA186_RESET_SATA 31 +#define TEGRA186_RESET_SATACOLD 32 +#define TEGRA186_RESET_SDMMC1 33 +#define TEGRA186_RESET_SDMMC2 34 +#define TEGRA186_RESET_SDMMC3 35 +#define TEGRA186_RESET_SDMMC4 36 +#define TEGRA186_RESET_SE 37 +#define TEGRA186_RESET_SOC_THERM 38 +#define TEGRA186_RESET_SOR0 39 +#define TEGRA186_RESET_SPI1 40 +#define TEGRA186_RESET_SPI2 41 +#define TEGRA186_RESET_SPI3 42 +#define TEGRA186_RESET_SPI4 43 +#define TEGRA186_RESET_TMR 44 +#define TEGRA186_RESET_TRIG_SYS 45 +#define TEGRA186_RESET_TSEC 46 +#define TEGRA186_RESET_UARTA 47 +#define TEGRA186_RESET_UARTB 48 +#define TEGRA186_RESET_UARTC 49 +#define TEGRA186_RESET_UARTD 50 +#define TEGRA186_RESET_VI 51 +#define TEGRA186_RESET_VIC 52 +#define TEGRA186_RESET_XUSB_DEV 53 +#define TEGRA186_RESET_XUSB_HOST 54 +#define TEGRA186_RESET_XUSB_PADCTL 55 +#define TEGRA186_RESET_XUSB_SS 56 +#define TEGRA186_RESET_AON_APB 57 +#define TEGRA186_RESET_AXI_CBB 58 +#define TEGRA186_RESET_BPMP_APB 59 +#define TEGRA186_RESET_CAN1 60 +#define TEGRA186_RESET_CAN2 61 +#define TEGRA186_RESET_DMIC5 62 +#define TEGRA186_RESET_DSIC 63 +#define TEGRA186_RESET_DSID 64 +#define TEGRA186_RESET_EMC_EMC 65 +#define TEGRA186_RESET_EMC_MEM 66 +#define TEGRA186_RESET_EMCSB_EMC 67 +#define TEGRA186_RESET_EMCSB_MEM 68 +#define TEGRA186_RESET_EQOS 69 +#define TEGRA186_RESET_GPCDMA 70 +#define TEGRA186_RESET_GPIO_CTL0 71 +#define TEGRA186_RESET_GPIO_CTL1 72 +#define TEGRA186_RESET_GPIO_CTL2 73 +#define TEGRA186_RESET_GPIO_CTL3 74 +#define TEGRA186_RESET_GPIO_CTL4 75 +#define TEGRA186_RESET_GPIO_CTL5 76 +#define TEGRA186_RESET_I2C10 77 +#define TEGRA186_RESET_I2C12 78 +#define TEGRA186_RESET_I2C13 79 +#define TEGRA186_RESET_I2C14 80 +#define TEGRA186_RESET_I2C7 81 +#define TEGRA186_RESET_I2C8 82 +#define TEGRA186_RESET_I2C9 83 +#define TEGRA186_RESET_JTAG2AXI 84 +#define TEGRA186_RESET_MPHY_IOBIST 85 +#define TEGRA186_RESET_MPHY_L0_RX 86 +#define TEGRA186_RESET_MPHY_L0_TX 87 +#define TEGRA186_RESET_NVCSI 88 +#define TEGRA186_RESET_NVDISPLAY0_HEAD0 89 +#define TEGRA186_RESET_NVDISPLAY0_HEAD1 90 +#define TEGRA186_RESET_NVDISPLAY0_HEAD2 91 +#define TEGRA186_RESET_NVDISPLAY0_MISC 92 +#define TEGRA186_RESET_NVDISPLAY0_WGRP0 93 +#define TEGRA186_RESET_NVDISPLAY0_WGRP1 94 +#define TEGRA186_RESET_NVDISPLAY0_WGRP2 95 +#define TEGRA186_RESET_NVDISPLAY0_WGRP3 96 +#define TEGRA186_RESET_NVDISPLAY0_WGRP4 97 +#define TEGRA186_RESET_NVDISPLAY0_WGRP5 98 +#define TEGRA186_RESET_PWM1 99 +#define TEGRA186_RESET_PWM2 100 +#define TEGRA186_RESET_PWM3 101 +#define TEGRA186_RESET_PWM4 102 +#define TEGRA186_RESET_PWM5 103 +#define TEGRA186_RESET_PWM6 104 +#define TEGRA186_RESET_PWM7 105 +#define TEGRA186_RESET_PWM8 106 +#define TEGRA186_RESET_SCE_APB 107 +#define TEGRA186_RESET_SOR1 108 +#define TEGRA186_RESET_TACH 109 +#define TEGRA186_RESET_TSC 110 +#define TEGRA186_RESET_UARTF 111 +#define TEGRA186_RESET_UARTG 112 +#define TEGRA186_RESET_UFSHC 113 +#define TEGRA186_RESET_UFSHC_AXI_M 114 +#define TEGRA186_RESET_UPHY 115 +#define TEGRA186_RESET_ADSP 116 +#define TEGRA186_RESET_ADSPDBG 117 +#define TEGRA186_RESET_ADSPINTF 118 +#define TEGRA186_RESET_ADSPNEON 119 +#define TEGRA186_RESET_ADSPPERIPH 120 +#define TEGRA186_RESET_ADSPSCU 121 +#define TEGRA186_RESET_ADSPWDT 122 +#define TEGRA186_RESET_APE 123 +#define TEGRA186_RESET_DPAUX1 124 +#define TEGRA186_RESET_NVDEC 125 +#define TEGRA186_RESET_NVENC 126 +#define TEGRA186_RESET_NVJPG 127 +#define TEGRA186_RESET_PEX_USB_UPHY 128 +#define TEGRA186_RESET_QSPI 129 +#define TEGRA186_RESET_TSECB 130 +#define TEGRA186_RESET_VI_I2C 131 +#define TEGRA186_RESET_UARTE 132 +#define TEGRA186_RESET_TOP_GTE 133 +#define TEGRA186_RESET_SHSP 134 +#define TEGRA186_RESET_PEX_USB_UPHY_L5 135 +#define TEGRA186_RESET_PEX_USB_UPHY_L4 136 +#define TEGRA186_RESET_PEX_USB_UPHY_L3 137 +#define TEGRA186_RESET_PEX_USB_UPHY_L2 138 +#define TEGRA186_RESET_PEX_USB_UPHY_L1 139 +#define TEGRA186_RESET_PEX_USB_UPHY_L0 140 +#define TEGRA186_RESET_PEX_USB_UPHY_PLL1 141 +#define TEGRA186_RESET_PEX_USB_UPHY_PLL0 142 +#define TEGRA186_RESET_TSCTNVI 143 +#define TEGRA186_RESET_EXTPERIPH4 144 +#define TEGRA186_RESET_DSIPADCTL 145 +#define TEGRA186_RESET_AUD_MCLK 146 +#define TEGRA186_RESET_MPHY_CLK_CTL 147 +#define TEGRA186_RESET_MPHY_L1_RX 148 +#define TEGRA186_RESET_MPHY_L1_TX 149 +#define TEGRA186_RESET_UFSHC_LP 150 +#define TEGRA186_RESET_BPMP_NIC 151 +#define TEGRA186_RESET_BPMP_NSYSPORESET 152 +#define TEGRA186_RESET_BPMP_NRESET 153 +#define TEGRA186_RESET_BPMP_DBGRESETN 154 +#define TEGRA186_RESET_BPMP_PRESETDBGN 155 +#define TEGRA186_RESET_BPMP_PM 156 +#define TEGRA186_RESET_BPMP_CVC 157 +#define TEGRA186_RESET_BPMP_DMA 158 +#define TEGRA186_RESET_BPMP_HSP 159 +#define TEGRA186_RESET_TSCTNBPMP 160 +#define TEGRA186_RESET_BPMP_TKE 161 +#define TEGRA186_RESET_BPMP_GTE 162 +#define TEGRA186_RESET_BPMP_PM_ACTMON 163 +#define TEGRA186_RESET_AON_NIC 164 +#define TEGRA186_RESET_AON_NSYSPORESET 165 +#define TEGRA186_RESET_AON_NRESET 166 +#define TEGRA186_RESET_AON_DBGRESETN 167 +#define TEGRA186_RESET_AON_PRESETDBGN 168 +#define TEGRA186_RESET_AON_ACTMON 169 +#define TEGRA186_RESET_AOPM 170 +#define TEGRA186_RESET_AOVC 171 +#define TEGRA186_RESET_AON_DMA 172 +#define TEGRA186_RESET_AON_GPIO 173 +#define TEGRA186_RESET_AON_HSP 174 +#define TEGRA186_RESET_TSCTNAON 175 +#define TEGRA186_RESET_AON_TKE 176 +#define TEGRA186_RESET_AON_GTE 177 +#define TEGRA186_RESET_SCE_NIC 178 +#define TEGRA186_RESET_SCE_NSYSPORESET 179 +#define TEGRA186_RESET_SCE_NRESET 180 +#define TEGRA186_RESET_SCE_DBGRESETN 181 +#define TEGRA186_RESET_SCE_PRESETDBGN 182 +#define TEGRA186_RESET_SCE_ACTMON 183 +#define TEGRA186_RESET_SCE_PM 184 +#define TEGRA186_RESET_SCE_DMA 185 +#define TEGRA186_RESET_SCE_HSP 186 +#define TEGRA186_RESET_TSCTNSCE 187 +#define TEGRA186_RESET_SCE_TKE 188 +#define TEGRA186_RESET_SCE_GTE 189 +#define TEGRA186_RESET_SCE_CFG 190 +#define TEGRA186_RESET_ADSP_ALL 191 +/** @brief controls the power up/down sequence of UFSHC PSW partition. Controls LP_PWR_READY, LP_ISOL_EN, and LP_RESET_N signals */ +#define TEGRA186_RESET_UFSHC_LP_SEQ 192 +#define TEGRA186_RESET_SIZE 193 + +#endif diff --git a/include/dt-bindings/reset/tegra194-reset.h b/include/dt-bindings/reset/tegra194-reset.h new file mode 100644 index 0000000..473afaa --- /dev/null +++ b/include/dt-bindings/reset/tegra194-reset.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __ABI_MACH_T194_RESET_H +#define __ABI_MACH_T194_RESET_H + +#define TEGRA194_RESET_ACTMON 1 +#define TEGRA194_RESET_ADSP_ALL 2 +#define TEGRA194_RESET_AFI 3 +#define TEGRA194_RESET_CAN1 4 +#define TEGRA194_RESET_CAN2 5 +#define TEGRA194_RESET_DLA0 6 +#define TEGRA194_RESET_DLA1 7 +#define TEGRA194_RESET_DPAUX 8 +#define TEGRA194_RESET_DPAUX1 9 +#define TEGRA194_RESET_DPAUX2 10 +#define TEGRA194_RESET_DPAUX3 11 +#define TEGRA194_RESET_EQOS 17 +#define TEGRA194_RESET_GPCDMA 18 +#define TEGRA194_RESET_GPU 19 +#define TEGRA194_RESET_HDA 20 +#define TEGRA194_RESET_HDA2CODEC_2X 21 +#define TEGRA194_RESET_HDA2HDMICODEC 22 +#define TEGRA194_RESET_HOST1X 23 +#define TEGRA194_RESET_I2C1 24 +#define TEGRA194_RESET_I2C10 25 +#define TEGRA194_RESET_RSVD_26 26 +#define TEGRA194_RESET_RSVD_27 27 +#define TEGRA194_RESET_RSVD_28 28 +#define TEGRA194_RESET_I2C2 29 +#define TEGRA194_RESET_I2C3 30 +#define TEGRA194_RESET_I2C4 31 +#define TEGRA194_RESET_I2C6 32 +#define TEGRA194_RESET_I2C7 33 +#define TEGRA194_RESET_I2C8 34 +#define TEGRA194_RESET_I2C9 35 +#define TEGRA194_RESET_ISP 36 +#define TEGRA194_RESET_MIPI_CAL 37 +#define TEGRA194_RESET_MPHY_CLK_CTL 38 +#define TEGRA194_RESET_MPHY_L0_RX 39 +#define TEGRA194_RESET_MPHY_L0_TX 40 +#define TEGRA194_RESET_MPHY_L1_RX 41 +#define TEGRA194_RESET_MPHY_L1_TX 42 +#define TEGRA194_RESET_NVCSI 43 +#define TEGRA194_RESET_NVDEC 44 +#define TEGRA194_RESET_NVDISPLAY0_HEAD0 45 +#define TEGRA194_RESET_NVDISPLAY0_HEAD1 46 +#define TEGRA194_RESET_NVDISPLAY0_HEAD2 47 +#define TEGRA194_RESET_NVDISPLAY0_HEAD3 48 +#define TEGRA194_RESET_NVDISPLAY0_MISC 49 +#define TEGRA194_RESET_NVDISPLAY0_WGRP0 50 +#define TEGRA194_RESET_NVDISPLAY0_WGRP1 51 +#define TEGRA194_RESET_NVDISPLAY0_WGRP2 52 +#define TEGRA194_RESET_NVDISPLAY0_WGRP3 53 +#define TEGRA194_RESET_NVDISPLAY0_WGRP4 54 +#define TEGRA194_RESET_NVDISPLAY0_WGRP5 55 +#define TEGRA194_RESET_RSVD_56 56 +#define TEGRA194_RESET_RSVD_57 57 +#define TEGRA194_RESET_RSVD_58 58 +#define TEGRA194_RESET_NVENC 59 +#define TEGRA194_RESET_NVENC1 60 +#define TEGRA194_RESET_NVJPG 61 +#define TEGRA194_RESET_PCIE 62 +#define TEGRA194_RESET_PCIEXCLK 63 +#define TEGRA194_RESET_RSVD_64 64 +#define TEGRA194_RESET_RSVD_65 65 +#define TEGRA194_RESET_PVA0_ALL 66 +#define TEGRA194_RESET_PVA1_ALL 67 +#define TEGRA194_RESET_PWM1 68 +#define TEGRA194_RESET_PWM2 69 +#define TEGRA194_RESET_PWM3 70 +#define TEGRA194_RESET_PWM4 71 +#define TEGRA194_RESET_PWM5 72 +#define TEGRA194_RESET_PWM6 73 +#define TEGRA194_RESET_PWM7 74 +#define TEGRA194_RESET_PWM8 75 +#define TEGRA194_RESET_QSPI0 76 +#define TEGRA194_RESET_QSPI1 77 +#define TEGRA194_RESET_SATA 78 +#define TEGRA194_RESET_SATACOLD 79 +#define TEGRA194_RESET_SCE_ALL 80 +#define TEGRA194_RESET_RCE_ALL 81 +#define TEGRA194_RESET_SDMMC1 82 +#define TEGRA194_RESET_RSVD_83 83 +#define TEGRA194_RESET_SDMMC3 84 +#define TEGRA194_RESET_SDMMC4 85 +#define TEGRA194_RESET_SE 86 +#define TEGRA194_RESET_SOR0 87 +#define TEGRA194_RESET_SOR1 88 +#define TEGRA194_RESET_SOR2 89 +#define TEGRA194_RESET_SOR3 90 +#define TEGRA194_RESET_SPI1 91 +#define TEGRA194_RESET_SPI2 92 +#define TEGRA194_RESET_SPI3 93 +#define TEGRA194_RESET_SPI4 94 +#define TEGRA194_RESET_TACH 95 +#define TEGRA194_RESET_RSVD_96 96 +#define TEGRA194_RESET_TSCTNVI 97 +#define TEGRA194_RESET_TSEC 98 +#define TEGRA194_RESET_TSECB 99 +#define TEGRA194_RESET_UARTA 100 +#define TEGRA194_RESET_UARTB 101 +#define TEGRA194_RESET_UARTC 102 +#define TEGRA194_RESET_UARTD 103 +#define TEGRA194_RESET_UARTE 104 +#define TEGRA194_RESET_UARTF 105 +#define TEGRA194_RESET_UARTG 106 +#define TEGRA194_RESET_UARTH 107 +#define TEGRA194_RESET_UFSHC 108 +#define TEGRA194_RESET_UFSHC_AXI_M 109 +#define TEGRA194_RESET_UFSHC_LP_SEQ 110 +#define TEGRA194_RESET_RSVD_111 111 +#define TEGRA194_RESET_VI 112 +#define TEGRA194_RESET_VIC 113 +#define TEGRA194_RESET_XUSB_PADCTL 114 +#define TEGRA194_RESET_NVDEC1 115 +#define TEGRA194_RESET_PEX0_CORE_0 116 +#define TEGRA194_RESET_PEX0_CORE_1 117 +#define TEGRA194_RESET_PEX0_CORE_2 118 +#define TEGRA194_RESET_PEX0_CORE_3 119 +#define TEGRA194_RESET_PEX0_CORE_4 120 +#define TEGRA194_RESET_PEX0_CORE_0_APB 121 +#define TEGRA194_RESET_PEX0_CORE_1_APB 122 +#define TEGRA194_RESET_PEX0_CORE_2_APB 123 +#define TEGRA194_RESET_PEX0_CORE_3_APB 124 +#define TEGRA194_RESET_PEX0_CORE_4_APB 125 +#define TEGRA194_RESET_PEX0_COMMON_APB 126 +#define TEGRA194_RESET_PEX1_CORE_5 129 +#define TEGRA194_RESET_PEX1_CORE_5_APB 130 +#define TEGRA194_RESET_CVNAS 131 +#define TEGRA194_RESET_CVNAS_FCM 132 +#define TEGRA194_RESET_DMIC5 144 +#define TEGRA194_RESET_APE 145 +#define TEGRA194_RESET_PEX_USB_UPHY 146 +#define TEGRA194_RESET_PEX_USB_UPHY_L0 147 +#define TEGRA194_RESET_PEX_USB_UPHY_L1 148 +#define TEGRA194_RESET_PEX_USB_UPHY_L2 149 +#define TEGRA194_RESET_PEX_USB_UPHY_L3 150 +#define TEGRA194_RESET_PEX_USB_UPHY_L4 151 +#define TEGRA194_RESET_PEX_USB_UPHY_L5 152 +#define TEGRA194_RESET_PEX_USB_UPHY_L6 153 +#define TEGRA194_RESET_PEX_USB_UPHY_L7 154 +#define TEGRA194_RESET_PEX_USB_UPHY_L8 155 +#define TEGRA194_RESET_PEX_USB_UPHY_L9 156 +#define TEGRA194_RESET_PEX_USB_UPHY_L10 157 +#define TEGRA194_RESET_PEX_USB_UPHY_L11 158 +#define TEGRA194_RESET_PEX_USB_UPHY_PLL0 159 +#define TEGRA194_RESET_PEX_USB_UPHY_PLL1 160 +#define TEGRA194_RESET_PEX_USB_UPHY_PLL2 161 +#define TEGRA194_RESET_PEX_USB_UPHY_PLL3 162 + +#endif diff --git a/include/dt-bindings/reset/tegra210-car.h b/include/dt-bindings/reset/tegra210-car.h new file mode 100644 index 0000000..9dc84ec --- /dev/null +++ b/include/dt-bindings/reset/tegra210-car.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides Tegra210-specific constants for binding + * nvidia,tegra210-car. + */ + +#ifndef _DT_BINDINGS_RESET_TEGRA210_CAR_H +#define _DT_BINDINGS_RESET_TEGRA210_CAR_H + +#define TEGRA210_RESET(x) (7 * 32 + (x)) +#define TEGRA210_RST_DFLL_DVCO TEGRA210_RESET(0) +#define TEGRA210_RST_ADSP TEGRA210_RESET(1) + +#endif /* _DT_BINDINGS_RESET_TEGRA210_CAR_H */ diff --git a/include/dt-bindings/reset/ti-syscon.h b/include/dt-bindings/reset/ti-syscon.h new file mode 100644 index 0000000..6d696d2 --- /dev/null +++ b/include/dt-bindings/reset/ti-syscon.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * TI Syscon Reset definitions + * + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef __DT_BINDINGS_RESET_TI_SYSCON_H__ +#define __DT_BINDINGS_RESET_TI_SYSCON_H__ + +/* + * The reset does not support the feature and corresponding + * values are not valid + */ +#define ASSERT_NONE (1 << 0) +#define DEASSERT_NONE (1 << 1) +#define STATUS_NONE (1 << 2) + +/* When set this function is activated by setting(vs clearing) this bit */ +#define ASSERT_SET (1 << 3) +#define DEASSERT_SET (1 << 4) +#define STATUS_SET (1 << 5) + +/* The following are the inverse of the above and are added for consistency */ +#define ASSERT_CLEAR (0 << 3) +#define DEASSERT_CLEAR (0 << 4) +#define STATUS_CLEAR (0 << 5) + +#endif diff --git a/include/dt-bindings/reset/xlnx-zynqmp-resets.h b/include/dt-bindings/reset/xlnx-zynqmp-resets.h new file mode 100644 index 0000000..d44525b --- /dev/null +++ b/include/dt-bindings/reset/xlnx-zynqmp-resets.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Xilinx, Inc. + */ + +#ifndef _DT_BINDINGS_ZYNQMP_RESETS_H +#define _DT_BINDINGS_ZYNQMP_RESETS_H + +#define ZYNQMP_RESET_PCIE_CFG 0 +#define ZYNQMP_RESET_PCIE_BRIDGE 1 +#define ZYNQMP_RESET_PCIE_CTRL 2 +#define ZYNQMP_RESET_DP 3 +#define ZYNQMP_RESET_SWDT_CRF 4 +#define ZYNQMP_RESET_AFI_FM5 5 +#define ZYNQMP_RESET_AFI_FM4 6 +#define ZYNQMP_RESET_AFI_FM3 7 +#define ZYNQMP_RESET_AFI_FM2 8 +#define ZYNQMP_RESET_AFI_FM1 9 +#define ZYNQMP_RESET_AFI_FM0 10 +#define ZYNQMP_RESET_GDMA 11 +#define ZYNQMP_RESET_GPU_PP1 12 +#define ZYNQMP_RESET_GPU_PP0 13 +#define ZYNQMP_RESET_GPU 14 +#define ZYNQMP_RESET_GT 15 +#define ZYNQMP_RESET_SATA 16 +#define ZYNQMP_RESET_ACPU3_PWRON 17 +#define ZYNQMP_RESET_ACPU2_PWRON 18 +#define ZYNQMP_RESET_ACPU1_PWRON 19 +#define ZYNQMP_RESET_ACPU0_PWRON 20 +#define ZYNQMP_RESET_APU_L2 21 +#define ZYNQMP_RESET_ACPU3 22 +#define ZYNQMP_RESET_ACPU2 23 +#define ZYNQMP_RESET_ACPU1 24 +#define ZYNQMP_RESET_ACPU0 25 +#define ZYNQMP_RESET_DDR 26 +#define ZYNQMP_RESET_APM_FPD 27 +#define ZYNQMP_RESET_SOFT 28 +#define ZYNQMP_RESET_GEM0 29 +#define ZYNQMP_RESET_GEM1 30 +#define ZYNQMP_RESET_GEM2 31 +#define ZYNQMP_RESET_GEM3 32 +#define ZYNQMP_RESET_QSPI 33 +#define ZYNQMP_RESET_UART0 34 +#define ZYNQMP_RESET_UART1 35 +#define ZYNQMP_RESET_SPI0 36 +#define ZYNQMP_RESET_SPI1 37 +#define ZYNQMP_RESET_SDIO0 38 +#define ZYNQMP_RESET_SDIO1 39 +#define ZYNQMP_RESET_CAN0 40 +#define ZYNQMP_RESET_CAN1 41 +#define ZYNQMP_RESET_I2C0 42 +#define ZYNQMP_RESET_I2C1 43 +#define ZYNQMP_RESET_TTC0 44 +#define ZYNQMP_RESET_TTC1 45 +#define ZYNQMP_RESET_TTC2 46 +#define ZYNQMP_RESET_TTC3 47 +#define ZYNQMP_RESET_SWDT_CRL 48 +#define ZYNQMP_RESET_NAND 49 +#define ZYNQMP_RESET_ADMA 50 +#define ZYNQMP_RESET_GPIO 51 +#define ZYNQMP_RESET_IOU_CC 52 +#define ZYNQMP_RESET_TIMESTAMP 53 +#define ZYNQMP_RESET_RPU_R50 54 +#define ZYNQMP_RESET_RPU_R51 55 +#define ZYNQMP_RESET_RPU_AMBA 56 +#define ZYNQMP_RESET_OCM 57 +#define ZYNQMP_RESET_RPU_PGE 58 +#define ZYNQMP_RESET_USB0_CORERESET 59 +#define ZYNQMP_RESET_USB1_CORERESET 60 +#define ZYNQMP_RESET_USB0_HIBERRESET 61 +#define ZYNQMP_RESET_USB1_HIBERRESET 62 +#define ZYNQMP_RESET_USB0_APB 63 +#define ZYNQMP_RESET_USB1_APB 64 +#define ZYNQMP_RESET_IPI 65 +#define ZYNQMP_RESET_APM_LPD 66 +#define ZYNQMP_RESET_RTC 67 +#define ZYNQMP_RESET_SYSMON 68 +#define ZYNQMP_RESET_AFI_FM6 69 +#define ZYNQMP_RESET_LPD_SWDT 70 +#define ZYNQMP_RESET_FPD 71 +#define ZYNQMP_RESET_RPU_DBG1 72 +#define ZYNQMP_RESET_RPU_DBG0 73 +#define ZYNQMP_RESET_DBG_LPD 74 +#define ZYNQMP_RESET_DBG_FPD 75 +#define ZYNQMP_RESET_APLL 76 +#define ZYNQMP_RESET_DPLL 77 +#define ZYNQMP_RESET_VPLL 78 +#define ZYNQMP_RESET_IOPLL 79 +#define ZYNQMP_RESET_RPLL 80 +#define ZYNQMP_RESET_GPO3_PL_0 81 +#define ZYNQMP_RESET_GPO3_PL_1 82 +#define ZYNQMP_RESET_GPO3_PL_2 83 +#define ZYNQMP_RESET_GPO3_PL_3 84 +#define ZYNQMP_RESET_GPO3_PL_4 85 +#define ZYNQMP_RESET_GPO3_PL_5 86 +#define ZYNQMP_RESET_GPO3_PL_6 87 +#define ZYNQMP_RESET_GPO3_PL_7 88 +#define ZYNQMP_RESET_GPO3_PL_8 89 +#define ZYNQMP_RESET_GPO3_PL_9 90 +#define ZYNQMP_RESET_GPO3_PL_10 91 +#define ZYNQMP_RESET_GPO3_PL_11 92 +#define ZYNQMP_RESET_GPO3_PL_12 93 +#define ZYNQMP_RESET_GPO3_PL_13 94 +#define ZYNQMP_RESET_GPO3_PL_14 95 +#define ZYNQMP_RESET_GPO3_PL_15 96 +#define ZYNQMP_RESET_GPO3_PL_16 97 +#define ZYNQMP_RESET_GPO3_PL_17 98 +#define ZYNQMP_RESET_GPO3_PL_18 99 +#define ZYNQMP_RESET_GPO3_PL_19 100 +#define ZYNQMP_RESET_GPO3_PL_20 101 +#define ZYNQMP_RESET_GPO3_PL_21 102 +#define ZYNQMP_RESET_GPO3_PL_22 103 +#define ZYNQMP_RESET_GPO3_PL_23 104 +#define ZYNQMP_RESET_GPO3_PL_24 105 +#define ZYNQMP_RESET_GPO3_PL_25 106 +#define ZYNQMP_RESET_GPO3_PL_26 107 +#define ZYNQMP_RESET_GPO3_PL_27 108 +#define ZYNQMP_RESET_GPO3_PL_28 109 +#define ZYNQMP_RESET_GPO3_PL_29 110 +#define ZYNQMP_RESET_GPO3_PL_30 111 +#define ZYNQMP_RESET_GPO3_PL_31 112 +#define ZYNQMP_RESET_RPU_LS 113 +#define ZYNQMP_RESET_PS_ONLY 114 +#define ZYNQMP_RESET_PL 115 +#define ZYNQMP_RESET_PS_PL0 116 +#define ZYNQMP_RESET_PS_PL1 117 +#define ZYNQMP_RESET_PS_PL2 118 +#define ZYNQMP_RESET_PS_PL3 119 + +#endif diff --git a/include/dt-bindings/soc/bcm2835-pm.h b/include/dt-bindings/soc/bcm2835-pm.h new file mode 100644 index 0000000..153d75b --- /dev/null +++ b/include/dt-bindings/soc/bcm2835-pm.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ + +#ifndef _DT_BINDINGS_ARM_BCM2835_PM_H +#define _DT_BINDINGS_ARM_BCM2835_PM_H + +#define BCM2835_POWER_DOMAIN_GRAFX 0 +#define BCM2835_POWER_DOMAIN_GRAFX_V3D 1 +#define BCM2835_POWER_DOMAIN_IMAGE 2 +#define BCM2835_POWER_DOMAIN_IMAGE_PERI 3 +#define BCM2835_POWER_DOMAIN_IMAGE_ISP 4 +#define BCM2835_POWER_DOMAIN_IMAGE_H264 5 +#define BCM2835_POWER_DOMAIN_USB 6 +#define BCM2835_POWER_DOMAIN_DSI0 7 +#define BCM2835_POWER_DOMAIN_DSI1 8 +#define BCM2835_POWER_DOMAIN_CAM0 9 +#define BCM2835_POWER_DOMAIN_CAM1 10 +#define BCM2835_POWER_DOMAIN_CCP2TX 11 +#define BCM2835_POWER_DOMAIN_HDMI 12 + +#define BCM2835_POWER_DOMAIN_COUNT 13 + +#define BCM2835_RESET_V3D 0 +#define BCM2835_RESET_ISP 1 +#define BCM2835_RESET_H264 2 + +#define BCM2835_RESET_COUNT 3 + +#endif /* _DT_BINDINGS_ARM_BCM2835_PM_H */ diff --git a/include/dt-bindings/soc/qcom,apr.h b/include/dt-bindings/soc/qcom,apr.h new file mode 100644 index 0000000..0063624 --- /dev/null +++ b/include/dt-bindings/soc/qcom,apr.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_QCOM_APR_H +#define __DT_BINDINGS_QCOM_APR_H + +/* Domain IDs */ +#define APR_DOMAIN_SIM 0x1 +#define APR_DOMAIN_PC 0x2 +#define APR_DOMAIN_MODEM 0x3 +#define APR_DOMAIN_ADSP 0x4 +#define APR_DOMAIN_APPS 0x5 +#define APR_DOMAIN_MAX 0x6 + +/* ADSP service IDs */ +#define APR_SVC_ADSP_CORE 0x3 +#define APR_SVC_AFE 0x4 +#define APR_SVC_VSM 0x5 +#define APR_SVC_VPM 0x6 +#define APR_SVC_ASM 0x7 +#define APR_SVC_ADM 0x8 +#define APR_SVC_ADSP_MVM 0x09 +#define APR_SVC_ADSP_CVS 0x0A +#define APR_SVC_ADSP_CVP 0x0B +#define APR_SVC_USM 0x0C +#define APR_SVC_LSM 0x0D +#define APR_SVC_VIDC 0x16 +#define APR_SVC_MAX 0x17 + +#endif /* __DT_BINDINGS_QCOM_APR_H */ diff --git a/include/dt-bindings/soc/qcom,gsbi.h b/include/dt-bindings/soc/qcom,gsbi.h new file mode 100644 index 0000000..c00ab8c --- /dev/null +++ b/include/dt-bindings/soc/qcom,gsbi.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ +#ifndef __DT_BINDINGS_QCOM_GSBI_H +#define __DT_BINDINGS_QCOM_GSBI_H + +#define GSBI_PROT_IDLE 0 +#define GSBI_PROT_I2C_UIM 1 +#define GSBI_PROT_I2C 2 +#define GSBI_PROT_SPI 3 +#define GSBI_PROT_UART_W_FC 4 +#define GSBI_PROT_UIM 5 +#define GSBI_PROT_I2C_UART 6 + +#define GSBI_CRCI_QUP 0 +#define GSBI_CRCI_UART 1 + +#endif diff --git a/include/dt-bindings/soc/qcom,rpmh-rsc.h b/include/dt-bindings/soc/qcom,rpmh-rsc.h new file mode 100644 index 0000000..868f998 --- /dev/null +++ b/include/dt-bindings/soc/qcom,rpmh-rsc.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __DT_QCOM_RPMH_RSC_H__ +#define __DT_QCOM_RPMH_RSC_H__ + +#define SLEEP_TCS 0 +#define WAKE_TCS 1 +#define ACTIVE_TCS 2 +#define CONTROL_TCS 3 + +#endif /* __DT_QCOM_RPMH_RSC_H__ */ diff --git a/include/dt-bindings/soc/rockchip,boot-mode.h b/include/dt-bindings/soc/rockchip,boot-mode.h new file mode 100644 index 0000000..4b0914c --- /dev/null +++ b/include/dt-bindings/soc/rockchip,boot-mode.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ROCKCHIP_BOOT_MODE_H +#define __ROCKCHIP_BOOT_MODE_H + +/*high 24 bits is tag, low 8 bits is type*/ +#define REBOOT_FLAG 0x5242C300 +/* normal boot */ +#define BOOT_NORMAL (REBOOT_FLAG + 0) +/* enter bootloader rockusb mode */ +#define BOOT_BL_DOWNLOAD (REBOOT_FLAG + 1) +/* enter recovery */ +#define BOOT_RECOVERY (REBOOT_FLAG + 3) + /* enter fastboot mode */ +#define BOOT_FASTBOOT (REBOOT_FLAG + 9) + +#endif diff --git a/include/dt-bindings/soc/ti,sci_pm_domain.h b/include/dt-bindings/soc/ti,sci_pm_domain.h new file mode 100644 index 0000000..8f2a736 --- /dev/null +++ b/include/dt-bindings/soc/ti,sci_pm_domain.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __DT_BINDINGS_TI_SCI_PM_DOMAIN_H +#define __DT_BINDINGS_TI_SCI_PM_DOMAIN_H + +#define TI_SCI_PD_EXCLUSIVE 1 +#define TI_SCI_PD_SHARED 0 + +#endif /* __DT_BINDINGS_TI_SCI_PM_DOMAIN_H */ diff --git a/include/dt-bindings/soc/zte,pm_domains.h b/include/dt-bindings/soc/zte,pm_domains.h new file mode 100644 index 0000000..df04470 --- /dev/null +++ b/include/dt-bindings/soc/zte,pm_domains.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Linaro Ltd. + * + * Author: Baoyou Xie + */ + +#ifndef _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H +#define _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H + +#define DM_ZX296718_SAPPU 0 +#define DM_ZX296718_VDE 1 /* g1v6 */ +#define DM_ZX296718_VCE 2 /* h1v6 */ +#define DM_ZX296718_HDE 3 /* g2v2 */ +#define DM_ZX296718_VIU 4 +#define DM_ZX296718_USB20 5 +#define DM_ZX296718_USB21 6 +#define DM_ZX296718_USB30 7 +#define DM_ZX296718_HSIC 8 +#define DM_ZX296718_GMAC 9 +#define DM_ZX296718_TS 10 +#define DM_ZX296718_VOU 11 + +#endif /* _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H */ diff --git a/include/dt-bindings/sound/apq8016-lpass.h b/include/dt-bindings/sound/apq8016-lpass.h new file mode 100644 index 0000000..3c3e16c --- /dev/null +++ b/include/dt-bindings/sound/apq8016-lpass.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_APQ8016_LPASS_H +#define __DT_APQ8016_LPASS_H + +#define MI2S_PRIMARY 0 +#define MI2S_SECONDARY 1 +#define MI2S_TERTIARY 2 +#define MI2S_QUATERNARY 3 + +#endif /* __DT_APQ8016_LPASS_H */ diff --git a/include/dt-bindings/sound/audio-jack-events.h b/include/dt-bindings/sound/audio-jack-events.h new file mode 100644 index 0000000..1b29b29 --- /dev/null +++ b/include/dt-bindings/sound/audio-jack-events.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __AUDIO_JACK_EVENTS_H +#define __AUDIO_JACK_EVENTS_H + +#define JACK_HEADPHONE 1 +#define JACK_MICROPHONE 2 +#define JACK_LINEOUT 3 +#define JACK_LINEIN 4 + +#endif /* __AUDIO_JACK_EVENTS_H */ diff --git a/include/dt-bindings/sound/cs35l32.h b/include/dt-bindings/sound/cs35l32.h new file mode 100644 index 0000000..7549d50 --- /dev/null +++ b/include/dt-bindings/sound/cs35l32.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_CS35L32_H +#define __DT_CS35L32_H + +#define CS35L32_BOOST_MGR_AUTO 0 +#define CS35L32_BOOST_MGR_AUTO_AUDIO 1 +#define CS35L32_BOOST_MGR_BYPASS 2 +#define CS35L32_BOOST_MGR_FIXED 3 + +#define CS35L32_DATA_CFG_LR_VP 0 +#define CS35L32_DATA_CFG_LR_STAT 1 +#define CS35L32_DATA_CFG_LR 2 +#define CS35L32_DATA_CFG_LR_VPSTAT 3 + +#define CS35L32_BATT_THRESH_3_1V 0 +#define CS35L32_BATT_THRESH_3_2V 1 +#define CS35L32_BATT_THRESH_3_3V 2 +#define CS35L32_BATT_THRESH_3_4V 3 + +#define CS35L32_BATT_RECOV_3_1V 0 +#define CS35L32_BATT_RECOV_3_2V 1 +#define CS35L32_BATT_RECOV_3_3V 2 +#define CS35L32_BATT_RECOV_3_4V 3 +#define CS35L32_BATT_RECOV_3_5V 4 +#define CS35L32_BATT_RECOV_3_6V 5 + +#endif /* __DT_CS35L32_H */ diff --git a/include/dt-bindings/sound/cs42l42.h b/include/dt-bindings/sound/cs42l42.h new file mode 100644 index 0000000..f25d83c --- /dev/null +++ b/include/dt-bindings/sound/cs42l42.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cs42l42.h -- CS42L42 ALSA SoC audio driver DT bindings header + * + * Copyright 2016 Cirrus Logic, Inc. + * + * Author: James Schulman + * Author: Brian Austin + * Author: Michael White + */ + +#ifndef __DT_CS42L42_H +#define __DT_CS42L42_H + +/* HPOUT Load Capacity */ +#define CS42L42_HPOUT_LOAD_1NF 0 +#define CS42L42_HPOUT_LOAD_10NF 1 + +/* HPOUT Clamp to GND Override */ +#define CS42L42_HPOUT_CLAMP_EN 0 +#define CS42L42_HPOUT_CLAMP_DIS 1 + +/* Tip Sense Inversion */ +#define CS42L42_TS_INV_DIS 0 +#define CS42L42_TS_INV_EN 1 + +/* Tip Sense Debounce */ +#define CS42L42_TS_DBNCE_0 0 +#define CS42L42_TS_DBNCE_125 1 +#define CS42L42_TS_DBNCE_250 2 +#define CS42L42_TS_DBNCE_500 3 +#define CS42L42_TS_DBNCE_750 4 +#define CS42L42_TS_DBNCE_1000 5 +#define CS42L42_TS_DBNCE_1250 6 +#define CS42L42_TS_DBNCE_1500 7 + +/* Button Press Software Debounce Times */ +#define CS42L42_BTN_DET_INIT_DBNCE_MIN 0 +#define CS42L42_BTN_DET_INIT_DBNCE_DEFAULT 100 +#define CS42L42_BTN_DET_INIT_DBNCE_MAX 200 + +#define CS42L42_BTN_DET_EVENT_DBNCE_MIN 0 +#define CS42L42_BTN_DET_EVENT_DBNCE_DEFAULT 10 +#define CS42L42_BTN_DET_EVENT_DBNCE_MAX 20 + +/* Button Detect Level Sensitivities */ +#define CS42L42_NUM_BIASES 4 + +#define CS42L42_HS_DET_LEVEL_15 0x0F +#define CS42L42_HS_DET_LEVEL_8 0x08 +#define CS42L42_HS_DET_LEVEL_4 0x04 +#define CS42L42_HS_DET_LEVEL_1 0x01 + +#define CS42L42_HS_DET_LEVEL_MIN 0 +#define CS42L42_HS_DET_LEVEL_MAX 0x3F + +/* HS Bias Ramp Rate */ + +#define CS42L42_HSBIAS_RAMP_FAST_RISE_SLOW_FALL 0 +#define CS42L42_HSBIAS_RAMP_FAST 1 +#define CS42L42_HSBIAS_RAMP_SLOW 2 +#define CS42L42_HSBIAS_RAMP_SLOWEST 3 + +#define CS42L42_HSBIAS_RAMP_TIME0 10 +#define CS42L42_HSBIAS_RAMP_TIME1 40 +#define CS42L42_HSBIAS_RAMP_TIME2 90 +#define CS42L42_HSBIAS_RAMP_TIME3 170 + +#endif /* __DT_CS42L42_H */ diff --git a/include/dt-bindings/sound/fsl-imx-audmux.h b/include/dt-bindings/sound/fsl-imx-audmux.h new file mode 100644 index 0000000..15f138b --- /dev/null +++ b/include/dt-bindings/sound/fsl-imx-audmux.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_FSL_IMX_AUDMUX_H +#define __DT_FSL_IMX_AUDMUX_H + +#define MX27_AUDMUX_HPCR1_SSI0 0 +#define MX27_AUDMUX_HPCR2_SSI1 1 +#define MX27_AUDMUX_HPCR3_SSI_PINS_4 2 +#define MX27_AUDMUX_PPCR1_SSI_PINS_1 3 +#define MX27_AUDMUX_PPCR2_SSI_PINS_2 4 +#define MX27_AUDMUX_PPCR3_SSI_PINS_3 5 + +#define MX31_AUDMUX_PORT1_SSI0 0 +#define MX31_AUDMUX_PORT2_SSI1 1 +#define MX31_AUDMUX_PORT3_SSI_PINS_3 2 +#define MX31_AUDMUX_PORT4_SSI_PINS_4 3 +#define MX31_AUDMUX_PORT5_SSI_PINS_5 4 +#define MX31_AUDMUX_PORT6_SSI_PINS_6 5 +#define MX31_AUDMUX_PORT7_SSI_PINS_7 6 + +#define MX51_AUDMUX_PORT1_SSI0 0 +#define MX51_AUDMUX_PORT2_SSI1 1 +#define MX51_AUDMUX_PORT3 2 +#define MX51_AUDMUX_PORT4 3 +#define MX51_AUDMUX_PORT5 4 +#define MX51_AUDMUX_PORT6 5 +#define MX51_AUDMUX_PORT7 6 + +/* + * TFCSEL/RFCSEL (i.MX27) or TFSEL/TCSEL/RFSEL/RCSEL (i.MX31/51/53/6Q) + * can be sourced from Rx/Tx. + */ +#define IMX_AUDMUX_RXFS 0x8 +#define IMX_AUDMUX_RXCLK 0x8 + +/* Register definitions for the i.MX21/27 Digital Audio Multiplexer */ +#define IMX_AUDMUX_V1_PCR_INMMASK(x) ((x) & 0xff) +#define IMX_AUDMUX_V1_PCR_INMEN (1 << 8) +#define IMX_AUDMUX_V1_PCR_TXRXEN (1 << 10) +#define IMX_AUDMUX_V1_PCR_SYN (1 << 12) +#define IMX_AUDMUX_V1_PCR_RXDSEL(x) (((x) & 0x7) << 13) +#define IMX_AUDMUX_V1_PCR_RFCSEL(x) (((x) & 0xf) << 20) +#define IMX_AUDMUX_V1_PCR_RCLKDIR (1 << 24) +#define IMX_AUDMUX_V1_PCR_RFSDIR (1 << 25) +#define IMX_AUDMUX_V1_PCR_TFCSEL(x) (((x) & 0xf) << 26) +#define IMX_AUDMUX_V1_PCR_TCLKDIR (1 << 30) +#define IMX_AUDMUX_V1_PCR_TFSDIR (1 << 31) + +/* Register definitions for the i.MX25/31/35/51 Digital Audio Multiplexer */ +#define IMX_AUDMUX_V2_PTCR_TFSDIR (1 << 31) +#define IMX_AUDMUX_V2_PTCR_TFSEL(x) (((x) & 0xf) << 27) +#define IMX_AUDMUX_V2_PTCR_TCLKDIR (1 << 26) +#define IMX_AUDMUX_V2_PTCR_TCSEL(x) (((x) & 0xf) << 22) +#define IMX_AUDMUX_V2_PTCR_RFSDIR (1 << 21) +#define IMX_AUDMUX_V2_PTCR_RFSEL(x) (((x) & 0xf) << 17) +#define IMX_AUDMUX_V2_PTCR_RCLKDIR (1 << 16) +#define IMX_AUDMUX_V2_PTCR_RCSEL(x) (((x) & 0xf) << 12) +#define IMX_AUDMUX_V2_PTCR_SYN (1 << 11) + +#define IMX_AUDMUX_V2_PDCR_RXDSEL(x) (((x) & 0x7) << 13) +#define IMX_AUDMUX_V2_PDCR_TXRXEN (1 << 12) +#define IMX_AUDMUX_V2_PDCR_MODE(x) (((x) & 0x3) << 8) +#define IMX_AUDMUX_V2_PDCR_INMMASK(x) ((x) & 0xff) + +#endif /* __DT_FSL_IMX_AUDMUX_H */ diff --git a/include/dt-bindings/sound/madera.h b/include/dt-bindings/sound/madera.h new file mode 100644 index 0000000..d0096d5 --- /dev/null +++ b/include/dt-bindings/sound/madera.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Device Tree defines for Madera codecs + * + * Copyright (C) 2016-2017 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + */ + +#ifndef DT_BINDINGS_SOUND_MADERA_H +#define DT_BINDINGS_SOUND_MADERA_H + +#define MADERA_INMODE_DIFF 0 +#define MADERA_INMODE_SE 1 +#define MADERA_INMODE_DMIC 2 + +#define MADERA_DMIC_REF_MICVDD 0 +#define MADERA_DMIC_REF_MICBIAS1 1 +#define MADERA_DMIC_REF_MICBIAS2 2 +#define MADERA_DMIC_REF_MICBIAS3 3 + +#define CS47L35_DMIC_REF_MICBIAS1B 1 +#define CS47L35_DMIC_REF_MICBIAS2A 2 +#define CS47L35_DMIC_REF_MICBIAS2B 3 + +#endif diff --git a/include/dt-bindings/sound/meson-g12a-tohdmitx.h b/include/dt-bindings/sound/meson-g12a-tohdmitx.h new file mode 100644 index 0000000..c5e1f48 --- /dev/null +++ b/include/dt-bindings/sound/meson-g12a-tohdmitx.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_MESON_G12A_TOHDMITX_H +#define __DT_MESON_G12A_TOHDMITX_H + +#define TOHDMITX_I2S_IN_A 0 +#define TOHDMITX_I2S_IN_B 1 +#define TOHDMITX_I2S_IN_C 2 +#define TOHDMITX_I2S_OUT 3 +#define TOHDMITX_SPDIF_IN_A 4 +#define TOHDMITX_SPDIF_IN_B 5 +#define TOHDMITX_SPDIF_OUT 6 + +#endif /* __DT_MESON_G12A_TOHDMITX_H */ diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h new file mode 100644 index 0000000..1df06f8 --- /dev/null +++ b/include/dt-bindings/sound/qcom,q6afe.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_Q6_AFE_H__ +#define __DT_BINDINGS_Q6_AFE_H__ + +/* Audio Front End (AFE) virtual ports IDs */ +#define HDMI_RX 1 +#define SLIMBUS_0_RX 2 +#define SLIMBUS_0_TX 3 +#define SLIMBUS_1_RX 4 +#define SLIMBUS_1_TX 5 +#define SLIMBUS_2_RX 6 +#define SLIMBUS_2_TX 7 +#define SLIMBUS_3_RX 8 +#define SLIMBUS_3_TX 9 +#define SLIMBUS_4_RX 10 +#define SLIMBUS_4_TX 11 +#define SLIMBUS_5_RX 12 +#define SLIMBUS_5_TX 13 +#define SLIMBUS_6_RX 14 +#define SLIMBUS_6_TX 15 +#define PRIMARY_MI2S_RX 16 +#define PRIMARY_MI2S_TX 17 +#define SECONDARY_MI2S_RX 18 +#define SECONDARY_MI2S_TX 19 +#define TERTIARY_MI2S_RX 20 +#define TERTIARY_MI2S_TX 21 +#define QUATERNARY_MI2S_RX 22 +#define QUATERNARY_MI2S_TX 23 +#define PRIMARY_TDM_RX_0 24 +#define PRIMARY_TDM_TX_0 25 +#define PRIMARY_TDM_RX_1 26 +#define PRIMARY_TDM_TX_1 27 +#define PRIMARY_TDM_RX_2 28 +#define PRIMARY_TDM_TX_2 29 +#define PRIMARY_TDM_RX_3 30 +#define PRIMARY_TDM_TX_3 31 +#define PRIMARY_TDM_RX_4 32 +#define PRIMARY_TDM_TX_4 33 +#define PRIMARY_TDM_RX_5 34 +#define PRIMARY_TDM_TX_5 35 +#define PRIMARY_TDM_RX_6 36 +#define PRIMARY_TDM_TX_6 37 +#define PRIMARY_TDM_RX_7 38 +#define PRIMARY_TDM_TX_7 39 +#define SECONDARY_TDM_RX_0 40 +#define SECONDARY_TDM_TX_0 41 +#define SECONDARY_TDM_RX_1 42 +#define SECONDARY_TDM_TX_1 43 +#define SECONDARY_TDM_RX_2 44 +#define SECONDARY_TDM_TX_2 45 +#define SECONDARY_TDM_RX_3 46 +#define SECONDARY_TDM_TX_3 47 +#define SECONDARY_TDM_RX_4 48 +#define SECONDARY_TDM_TX_4 49 +#define SECONDARY_TDM_RX_5 50 +#define SECONDARY_TDM_TX_5 51 +#define SECONDARY_TDM_RX_6 52 +#define SECONDARY_TDM_TX_6 53 +#define SECONDARY_TDM_RX_7 54 +#define SECONDARY_TDM_TX_7 55 +#define TERTIARY_TDM_RX_0 56 +#define TERTIARY_TDM_TX_0 57 +#define TERTIARY_TDM_RX_1 58 +#define TERTIARY_TDM_TX_1 59 +#define TERTIARY_TDM_RX_2 60 +#define TERTIARY_TDM_TX_2 61 +#define TERTIARY_TDM_RX_3 62 +#define TERTIARY_TDM_TX_3 63 +#define TERTIARY_TDM_RX_4 64 +#define TERTIARY_TDM_TX_4 65 +#define TERTIARY_TDM_RX_5 66 +#define TERTIARY_TDM_TX_5 67 +#define TERTIARY_TDM_RX_6 68 +#define TERTIARY_TDM_TX_6 69 +#define TERTIARY_TDM_RX_7 70 +#define TERTIARY_TDM_TX_7 71 +#define QUATERNARY_TDM_RX_0 72 +#define QUATERNARY_TDM_TX_0 73 +#define QUATERNARY_TDM_RX_1 74 +#define QUATERNARY_TDM_TX_1 75 +#define QUATERNARY_TDM_RX_2 76 +#define QUATERNARY_TDM_TX_2 77 +#define QUATERNARY_TDM_RX_3 78 +#define QUATERNARY_TDM_TX_3 79 +#define QUATERNARY_TDM_RX_4 80 +#define QUATERNARY_TDM_TX_4 81 +#define QUATERNARY_TDM_RX_5 82 +#define QUATERNARY_TDM_TX_5 83 +#define QUATERNARY_TDM_RX_6 84 +#define QUATERNARY_TDM_TX_6 85 +#define QUATERNARY_TDM_RX_7 86 +#define QUATERNARY_TDM_TX_7 87 +#define QUINARY_TDM_RX_0 88 +#define QUINARY_TDM_TX_0 89 +#define QUINARY_TDM_RX_1 90 +#define QUINARY_TDM_TX_1 91 +#define QUINARY_TDM_RX_2 92 +#define QUINARY_TDM_TX_2 93 +#define QUINARY_TDM_RX_3 94 +#define QUINARY_TDM_TX_3 95 +#define QUINARY_TDM_RX_4 96 +#define QUINARY_TDM_TX_4 97 +#define QUINARY_TDM_RX_5 98 +#define QUINARY_TDM_TX_5 99 +#define QUINARY_TDM_RX_6 100 +#define QUINARY_TDM_TX_6 101 +#define QUINARY_TDM_RX_7 102 +#define QUINARY_TDM_TX_7 103 +#define DISPLAY_PORT_RX 104 + +#endif /* __DT_BINDINGS_Q6_AFE_H__ */ + diff --git a/include/dt-bindings/sound/qcom,q6asm.h b/include/dt-bindings/sound/qcom,q6asm.h new file mode 100644 index 0000000..1eb77d8 --- /dev/null +++ b/include/dt-bindings/sound/qcom,q6asm.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_Q6_ASM_H__ +#define __DT_BINDINGS_Q6_ASM_H__ + +#define MSM_FRONTEND_DAI_MULTIMEDIA1 0 +#define MSM_FRONTEND_DAI_MULTIMEDIA2 1 +#define MSM_FRONTEND_DAI_MULTIMEDIA3 2 +#define MSM_FRONTEND_DAI_MULTIMEDIA4 3 +#define MSM_FRONTEND_DAI_MULTIMEDIA5 4 +#define MSM_FRONTEND_DAI_MULTIMEDIA6 5 +#define MSM_FRONTEND_DAI_MULTIMEDIA7 6 +#define MSM_FRONTEND_DAI_MULTIMEDIA8 7 +#define MSM_FRONTEND_DAI_MULTIMEDIA9 8 +#define MSM_FRONTEND_DAI_MULTIMEDIA10 9 +#define MSM_FRONTEND_DAI_MULTIMEDIA11 10 +#define MSM_FRONTEND_DAI_MULTIMEDIA12 11 +#define MSM_FRONTEND_DAI_MULTIMEDIA13 12 +#define MSM_FRONTEND_DAI_MULTIMEDIA14 13 +#define MSM_FRONTEND_DAI_MULTIMEDIA15 14 +#define MSM_FRONTEND_DAI_MULTIMEDIA16 15 + +#endif /* __DT_BINDINGS_Q6_ASM_H__ */ diff --git a/include/dt-bindings/sound/rt5640.h b/include/dt-bindings/sound/rt5640.h new file mode 100644 index 0000000..154c9b4 --- /dev/null +++ b/include/dt-bindings/sound/rt5640.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_RT5640_H +#define __DT_RT5640_H + +#define RT5640_DMIC1_DATA_PIN_NONE 0 +#define RT5640_DMIC1_DATA_PIN_IN1P 1 +#define RT5640_DMIC1_DATA_PIN_GPIO3 2 + +#define RT5640_DMIC2_DATA_PIN_NONE 0 +#define RT5640_DMIC2_DATA_PIN_IN1N 1 +#define RT5640_DMIC2_DATA_PIN_GPIO4 2 + +#define RT5640_JD_SRC_GPIO1 1 +#define RT5640_JD_SRC_JD1_IN4P 2 +#define RT5640_JD_SRC_JD2_IN4N 3 +#define RT5640_JD_SRC_GPIO2 4 +#define RT5640_JD_SRC_GPIO3 5 +#define RT5640_JD_SRC_GPIO4 6 + +#define RT5640_OVCD_SF_0P5 0 +#define RT5640_OVCD_SF_0P75 1 +#define RT5640_OVCD_SF_1P0 2 +#define RT5640_OVCD_SF_1P5 3 + +#endif /* __DT_RT5640_H */ diff --git a/include/dt-bindings/sound/rt5651.h b/include/dt-bindings/sound/rt5651.h new file mode 100644 index 0000000..2f2dac9 --- /dev/null +++ b/include/dt-bindings/sound/rt5651.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_RT5651_H +#define __DT_RT5651_H + +#define RT5651_JD_NULL 0 +#define RT5651_JD1_1 1 +#define RT5651_JD1_2 2 +#define RT5651_JD2 3 + +#define RT5651_OVCD_SF_0P5 0 +#define RT5651_OVCD_SF_0P75 1 +#define RT5651_OVCD_SF_1P0 2 +#define RT5651_OVCD_SF_1P5 3 + +#endif /* __DT_RT5651_H */ diff --git a/include/dt-bindings/sound/samsung-i2s.h b/include/dt-bindings/sound/samsung-i2s.h new file mode 100644 index 0000000..77545f1 --- /dev/null +++ b/include/dt-bindings/sound/samsung-i2s.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_SAMSUNG_I2S_H +#define _DT_BINDINGS_SAMSUNG_I2S_H + +#define CLK_I2S_CDCLK 0 +#define CLK_I2S_RCLK_SRC 1 +#define CLK_I2S_RCLK_PSR 2 + +#endif /* _DT_BINDINGS_SAMSUNG_I2S_H */ diff --git a/include/dt-bindings/sound/tas2552.h b/include/dt-bindings/sound/tas2552.h new file mode 100644 index 0000000..0daeb83 --- /dev/null +++ b/include/dt-bindings/sound/tas2552.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_TAS2552_H +#define __DT_TAS2552_H + +#define TAS2552_PLL_CLKIN (0) +#define TAS2552_PDM_CLK (1) +#define TAS2552_CLK_TARGET_MASK (1) + +#define TAS2552_PLL_CLKIN_MCLK ((0 << 1) | TAS2552_PLL_CLKIN) +#define TAS2552_PLL_CLKIN_BCLK ((1 << 1) | TAS2552_PLL_CLKIN) +#define TAS2552_PLL_CLKIN_IVCLKIN ((2 << 1) | TAS2552_PLL_CLKIN) +#define TAS2552_PLL_CLKIN_1_8_FIXED ((3 << 1) | TAS2552_PLL_CLKIN) + +#define TAS2552_PDM_CLK_PLL ((0 << 1) | TAS2552_PDM_CLK) +#define TAS2552_PDM_CLK_IVCLKIN ((1 << 1) | TAS2552_PDM_CLK) +#define TAS2552_PDM_CLK_BCLK ((2 << 1) | TAS2552_PDM_CLK) +#define TAS2552_PDM_CLK_MCLK ((3 << 1) | TAS2552_PDM_CLK) + +#endif /* __DT_TAS2552_H */ diff --git a/include/dt-bindings/sound/tlv320aic31xx-micbias.h b/include/dt-bindings/sound/tlv320aic31xx-micbias.h new file mode 100644 index 0000000..c6895a1 --- /dev/null +++ b/include/dt-bindings/sound/tlv320aic31xx-micbias.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_TLV320AIC31XX_MICBIAS_H +#define __DT_TLV320AIC31XX_MICBIAS_H + +#define MICBIAS_2_0V 1 +#define MICBIAS_2_5V 2 +#define MICBIAS_AVDDV 3 + +#endif /* __DT_TLV320AIC31XX_MICBIAS_H */ diff --git a/include/dt-bindings/spmi/spmi.h b/include/dt-bindings/spmi/spmi.h new file mode 100644 index 0000000..ad4a434 --- /dev/null +++ b/include/dt-bindings/spmi/spmi.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ +#ifndef __DT_BINDINGS_SPMI_H +#define __DT_BINDINGS_SPMI_H + +#define SPMI_USID 0 +#define SPMI_GSID 1 + +#endif diff --git a/include/dt-bindings/thermal/lm90.h b/include/dt-bindings/thermal/lm90.h new file mode 100644 index 0000000..eed91a1 --- /dev/null +++ b/include/dt-bindings/thermal/lm90.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the LM90 thermal bindings. + */ + +#ifndef _DT_BINDINGS_THERMAL_LM90_H_ +#define _DT_BINDINGS_THERMAL_LM90_H_ + +#define LM90_LOCAL_TEMPERATURE 0 +#define LM90_REMOTE_TEMPERATURE 1 +#define LM90_REMOTE2_TEMPERATURE 2 + +#endif diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h new file mode 100644 index 0000000..444c7bd --- /dev/null +++ b/include/dt-bindings/thermal/tegra124-soctherm.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for binding nvidia,tegra124-soctherm. + */ + +#ifndef _DT_BINDINGS_THERMAL_TEGRA124_SOCTHERM_H +#define _DT_BINDINGS_THERMAL_TEGRA124_SOCTHERM_H + +#define TEGRA124_SOCTHERM_SENSOR_CPU 0 +#define TEGRA124_SOCTHERM_SENSOR_MEM 1 +#define TEGRA124_SOCTHERM_SENSOR_GPU 2 +#define TEGRA124_SOCTHERM_SENSOR_PLLX 3 +#define TEGRA124_SOCTHERM_SENSOR_NUM 4 + +#define TEGRA_SOCTHERM_THROT_LEVEL_NONE 0 +#define TEGRA_SOCTHERM_THROT_LEVEL_LOW 1 +#define TEGRA_SOCTHERM_THROT_LEVEL_MED 2 +#define TEGRA_SOCTHERM_THROT_LEVEL_HIGH 3 + +#endif diff --git a/include/dt-bindings/thermal/tegra186-bpmp-thermal.h b/include/dt-bindings/thermal/tegra186-bpmp-thermal.h new file mode 100644 index 0000000..a96b8fa --- /dev/null +++ b/include/dt-bindings/thermal/tegra186-bpmp-thermal.h @@ -0,0 +1,14 @@ +/* + * This header provides constants for binding nvidia,tegra186-bpmp-thermal. + */ + +#ifndef _DT_BINDINGS_THERMAL_TEGRA186_BPMP_THERMAL_H +#define _DT_BINDINGS_THERMAL_TEGRA186_BPMP_THERMAL_H + +#define TEGRA186_BPMP_THERMAL_ZONE_CPU 2 +#define TEGRA186_BPMP_THERMAL_ZONE_GPU 3 +#define TEGRA186_BPMP_THERMAL_ZONE_AUX 4 +#define TEGRA186_BPMP_THERMAL_ZONE_PLLX 5 +#define TEGRA186_BPMP_THERMAL_ZONE_AO 6 + +#endif diff --git a/include/dt-bindings/thermal/tegra194-bpmp-thermal.h b/include/dt-bindings/thermal/tegra194-bpmp-thermal.h new file mode 100644 index 0000000..aa7fb08 --- /dev/null +++ b/include/dt-bindings/thermal/tegra194-bpmp-thermal.h @@ -0,0 +1,15 @@ +/* + * This header provides constants for binding nvidia,tegra194-bpmp-thermal. + */ + +#ifndef _DT_BINDINGS_THERMAL_TEGRA194_BPMP_THERMAL_H +#define _DT_BINDINGS_THERMAL_TEGRA194_BPMP_THERMAL_H + +#define TEGRA194_BPMP_THERMAL_ZONE_CPU 2 +#define TEGRA194_BPMP_THERMAL_ZONE_GPU 3 +#define TEGRA194_BPMP_THERMAL_ZONE_AUX 4 +#define TEGRA194_BPMP_THERMAL_ZONE_PLLX 5 +#define TEGRA194_BPMP_THERMAL_ZONE_AO 6 +#define TEGRA194_BPMP_THERMAL_ZONE_TJ_MAX 7 + +#endif diff --git a/include/dt-bindings/thermal/thermal.h b/include/dt-bindings/thermal/thermal.h new file mode 100644 index 0000000..bc7babb --- /dev/null +++ b/include/dt-bindings/thermal/thermal.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This header provides constants for most thermal bindings. + * + * Copyright (C) 2013 Texas Instruments + * Eduardo Valentin + */ + +#ifndef _DT_BINDINGS_THERMAL_THERMAL_H +#define _DT_BINDINGS_THERMAL_THERMAL_H + +/* On cooling devices upper and lower limits */ +#define THERMAL_NO_LIMIT (~0) + +#endif + diff --git a/include/dt-bindings/thermal/thermal_exynos.h b/include/dt-bindings/thermal/thermal_exynos.h new file mode 100644 index 0000000..642e4e7 --- /dev/null +++ b/include/dt-bindings/thermal/thermal_exynos.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * thermal_exynos.h - Samsung EXYNOS TMU device tree definitions + * + * Copyright (C) 2014 Samsung Electronics + * Lukasz Majewski + */ + +#ifndef _EXYNOS_THERMAL_TMU_DT_H +#define _EXYNOS_THERMAL_TMU_DT_H + +#define TYPE_ONE_POINT_TRIMMING 0 +#define TYPE_ONE_POINT_TRIMMING_25 1 +#define TYPE_ONE_POINT_TRIMMING_85 2 +#define TYPE_TWO_POINT_TRIMMING 3 +#define TYPE_NONE 4 + +#endif /* _EXYNOS_THERMAL_TMU_DT_H */ diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h new file mode 100644 index 0000000..985f2bb --- /dev/null +++ b/include/dt-bindings/usb/pd.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_POWER_DELIVERY_H +#define __DT_POWER_DELIVERY_H + +/* Power delivery Power Data Object definitions */ +#define PDO_TYPE_FIXED 0 +#define PDO_TYPE_BATT 1 +#define PDO_TYPE_VAR 2 +#define PDO_TYPE_APDO 3 + +#define PDO_TYPE_SHIFT 30 +#define PDO_TYPE_MASK 0x3 + +#define PDO_TYPE(t) ((t) << PDO_TYPE_SHIFT) + +#define PDO_VOLT_MASK 0x3ff +#define PDO_CURR_MASK 0x3ff +#define PDO_PWR_MASK 0x3ff + +#define PDO_FIXED_DUAL_ROLE (1 << 29) /* Power role swap supported */ +#define PDO_FIXED_SUSPEND (1 << 28) /* USB Suspend supported (Source) */ +#define PDO_FIXED_HIGHER_CAP (1 << 28) /* Requires more than vSafe5V (Sink) */ +#define PDO_FIXED_EXTPOWER (1 << 27) /* Externally powered */ +#define PDO_FIXED_USB_COMM (1 << 26) /* USB communications capable */ +#define PDO_FIXED_DATA_SWAP (1 << 25) /* Data role swap supported */ +#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */ +#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */ + +#define PDO_FIXED_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_FIXED_VOLT_SHIFT) +#define PDO_FIXED_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_FIXED_CURR_SHIFT) + +#define PDO_FIXED(mv, ma, flags) \ + (PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \ + PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma)) + +#define VSAFE5V 5000 /* mv units */ + +#define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */ +#define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */ +#define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */ + +#define PDO_BATT_MIN_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_BATT_MIN_VOLT_SHIFT) +#define PDO_BATT_MAX_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_BATT_MAX_VOLT_SHIFT) +#define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT) + +#define PDO_BATT(min_mv, max_mv, max_mw) \ + (PDO_TYPE(PDO_TYPE_BATT) | PDO_BATT_MIN_VOLT(min_mv) | \ + PDO_BATT_MAX_VOLT(max_mv) | PDO_BATT_MAX_POWER(max_mw)) + +#define PDO_VAR_MAX_VOLT_SHIFT 20 /* 50mV units */ +#define PDO_VAR_MIN_VOLT_SHIFT 10 /* 50mV units */ +#define PDO_VAR_MAX_CURR_SHIFT 0 /* 10mA units */ + +#define PDO_VAR_MIN_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_VAR_MIN_VOLT_SHIFT) +#define PDO_VAR_MAX_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_VAR_MAX_VOLT_SHIFT) +#define PDO_VAR_MAX_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_VAR_MAX_CURR_SHIFT) + +#define PDO_VAR(min_mv, max_mv, max_ma) \ + (PDO_TYPE(PDO_TYPE_VAR) | PDO_VAR_MIN_VOLT(min_mv) | \ + PDO_VAR_MAX_VOLT(max_mv) | PDO_VAR_MAX_CURR(max_ma)) + +#define APDO_TYPE_PPS 0 + +#define PDO_APDO_TYPE_SHIFT 28 /* Only valid value currently is 0x0 - PPS */ +#define PDO_APDO_TYPE_MASK 0x3 + +#define PDO_APDO_TYPE(t) ((t) << PDO_APDO_TYPE_SHIFT) + +#define PDO_PPS_APDO_MAX_VOLT_SHIFT 17 /* 100mV units */ +#define PDO_PPS_APDO_MIN_VOLT_SHIFT 8 /* 100mV units */ +#define PDO_PPS_APDO_MAX_CURR_SHIFT 0 /* 50mA units */ + +#define PDO_PPS_APDO_VOLT_MASK 0xff +#define PDO_PPS_APDO_CURR_MASK 0x7f + +#define PDO_PPS_APDO_MIN_VOLT(mv) \ + ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MIN_VOLT_SHIFT) +#define PDO_PPS_APDO_MAX_VOLT(mv) \ + ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MAX_VOLT_SHIFT) +#define PDO_PPS_APDO_MAX_CURR(ma) \ + ((((ma) / 50) & PDO_PPS_APDO_CURR_MASK) << PDO_PPS_APDO_MAX_CURR_SHIFT) + +#define PDO_PPS_APDO(min_mv, max_mv, max_ma) \ + (PDO_TYPE(PDO_TYPE_APDO) | PDO_APDO_TYPE(APDO_TYPE_PPS) | \ + PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \ + PDO_PPS_APDO_MAX_CURR(max_ma)) + + #endif /* __DT_POWER_DELIVERY_H */ diff --git a/include/keys/asymmetric-parser.h b/include/keys/asymmetric-parser.h new file mode 100644 index 0000000..8a21d6a --- /dev/null +++ b/include/keys/asymmetric-parser.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Asymmetric public-key cryptography data parser + * + * See Documentation/crypto/asymmetric-keys.txt + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _KEYS_ASYMMETRIC_PARSER_H +#define _KEYS_ASYMMETRIC_PARSER_H + +/* + * Key data parser. Called during key instantiation. + */ +struct asymmetric_key_parser { + struct list_head link; + struct module *owner; + const char *name; + + /* Attempt to parse a key from the data blob passed to add_key() or + * keyctl_instantiate(). Should also generate a proposed description + * that the caller can optionally use for the key. + * + * Return EBADMSG if not recognised. + */ + int (*parse)(struct key_preparsed_payload *prep); +}; + +extern int register_asymmetric_key_parser(struct asymmetric_key_parser *); +extern void unregister_asymmetric_key_parser(struct asymmetric_key_parser *); + +#endif /* _KEYS_ASYMMETRIC_PARSER_H */ diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h new file mode 100644 index 0000000..2140781 --- /dev/null +++ b/include/keys/asymmetric-subtype.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Asymmetric public-key cryptography key subtype + * + * See Documentation/crypto/asymmetric-keys.txt + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _KEYS_ASYMMETRIC_SUBTYPE_H +#define _KEYS_ASYMMETRIC_SUBTYPE_H + +#include +#include + +struct kernel_pkey_query; +struct kernel_pkey_params; +struct public_key_signature; + +/* + * Keys of this type declare a subtype that indicates the handlers and + * capabilities. + */ +struct asymmetric_key_subtype { + struct module *owner; + const char *name; + unsigned short name_len; /* length of name */ + + /* Describe a key of this subtype for /proc/keys */ + void (*describe)(const struct key *key, struct seq_file *m); + + /* Destroy a key of this subtype */ + void (*destroy)(void *payload_crypto, void *payload_auth); + + int (*query)(const struct kernel_pkey_params *params, + struct kernel_pkey_query *info); + + /* Encrypt/decrypt/sign data */ + int (*eds_op)(struct kernel_pkey_params *params, + const void *in, void *out); + + /* Verify the signature on a key of this subtype (optional) */ + int (*verify_signature)(const struct key *key, + const struct public_key_signature *sig); +}; + +/** + * asymmetric_key_subtype - Get the subtype from an asymmetric key + * @key: The key of interest. + * + * Retrieves and returns the subtype pointer of the asymmetric key from the + * type-specific data attached to the key. + */ +static inline +struct asymmetric_key_subtype *asymmetric_key_subtype(const struct key *key) +{ + return key->payload.data[asym_subtype]; +} + +#endif /* _KEYS_ASYMMETRIC_SUBTYPE_H */ diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h new file mode 100644 index 0000000..91cfd9b --- /dev/null +++ b/include/keys/asymmetric-type.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Asymmetric Public-key cryptography key type interface + * + * See Documentation/crypto/asymmetric-keys.txt + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _KEYS_ASYMMETRIC_TYPE_H +#define _KEYS_ASYMMETRIC_TYPE_H + +#include +#include + +extern struct key_type key_type_asymmetric; + +/* + * The key payload is four words. The asymmetric-type key uses them as + * follows: + */ +enum asymmetric_payload_bits { + asym_crypto, /* The data representing the key */ + asym_subtype, /* Pointer to an asymmetric_key_subtype struct */ + asym_key_ids, /* Pointer to an asymmetric_key_ids struct */ + asym_auth /* The key's authorisation (signature, parent key ID) */ +}; + +/* + * Identifiers for an asymmetric key ID. We have three ways of looking up a + * key derived from an X.509 certificate: + * + * (1) Serial Number & Issuer. Non-optional. This is the only valid way to + * map a PKCS#7 signature to an X.509 certificate. + * + * (2) Issuer & Subject Unique IDs. Optional. These were the original way to + * match X.509 certificates, but have fallen into disuse in favour of (3). + * + * (3) Auth & Subject Key Identifiers. Optional. SKIDs are only provided on + * CA keys that are intended to sign other keys, so don't appear in end + * user certificates unless forced. + * + * We could also support an PGP key identifier, which is just a SHA1 sum of the + * public key and certain parameters, but since we don't support PGP keys at + * the moment, we shall ignore those. + * + * What we actually do is provide a place where binary identifiers can be + * stashed and then compare against them when checking for an id match. + */ +struct asymmetric_key_id { + unsigned short len; + unsigned char data[]; +}; + +struct asymmetric_key_ids { + void *id[2]; +}; + +extern bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1, + const struct asymmetric_key_id *kid2); + +extern bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1, + const struct asymmetric_key_id *kid2); + +extern struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1, + size_t len_1, + const void *val_2, + size_t len_2); +static inline +const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key) +{ + return key->payload.data[asym_key_ids]; +} + +extern struct key *find_asymmetric_key(struct key *keyring, + const struct asymmetric_key_id *id_0, + const struct asymmetric_key_id *id_1, + bool partial); + +/* + * The payload is at the discretion of the subtype. + */ + +#endif /* _KEYS_ASYMMETRIC_TYPE_H */ diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h new file mode 100644 index 0000000..3fee04f --- /dev/null +++ b/include/keys/big_key-type.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Big capacity key type. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _KEYS_BIG_KEY_TYPE_H +#define _KEYS_BIG_KEY_TYPE_H + +#include + +extern struct key_type key_type_big_key; + +extern int big_key_preparse(struct key_preparsed_payload *prep); +extern void big_key_free_preparse(struct key_preparsed_payload *prep); +extern void big_key_revoke(struct key *key); +extern void big_key_destroy(struct key *key); +extern void big_key_describe(const struct key *big_key, struct seq_file *m); +extern long big_key_read(const struct key *key, char *buffer, size_t buflen); + +#endif /* _KEYS_BIG_KEY_TYPE_H */ diff --git a/include/keys/ceph-type.h b/include/keys/ceph-type.h new file mode 100644 index 0000000..aa6d3e0 --- /dev/null +++ b/include/keys/ceph-type.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _KEYS_CEPH_TYPE_H +#define _KEYS_CEPH_TYPE_H + +#include + +extern struct key_type key_type_ceph; + +#endif diff --git a/include/keys/dns_resolver-type.h b/include/keys/dns_resolver-type.h new file mode 100644 index 0000000..218ca22 --- /dev/null +++ b/include/keys/dns_resolver-type.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* DNS resolver key type + * + * Copyright (C) 2010 Wang Lei. All Rights Reserved. + * Written by Wang Lei (wang840925@gmail.com) + */ + +#ifndef _KEYS_DNS_RESOLVER_TYPE_H +#define _KEYS_DNS_RESOLVER_TYPE_H + +#include + +extern struct key_type key_type_dns_resolver; + +extern int request_dns_resolver_key(const char *description, + const char *callout_info, + char **data); + +#endif /* _KEYS_DNS_RESOLVER_TYPE_H */ diff --git a/include/keys/encrypted-type.h b/include/keys/encrypted-type.h new file mode 100644 index 0000000..9e9ccb2 --- /dev/null +++ b/include/keys/encrypted-type.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2010 IBM Corporation + * Copyright (C) 2010 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Authors: + * Mimi Zohar + * Roberto Sassu + */ + +#ifndef _KEYS_ENCRYPTED_TYPE_H +#define _KEYS_ENCRYPTED_TYPE_H + +#include +#include + +struct encrypted_key_payload { + struct rcu_head rcu; + char *format; /* datablob: format */ + char *master_desc; /* datablob: master key name */ + char *datalen; /* datablob: decrypted key length */ + u8 *iv; /* datablob: iv */ + u8 *encrypted_data; /* datablob: encrypted data */ + unsigned short datablob_len; /* length of datablob */ + unsigned short decrypted_datalen; /* decrypted data length */ + unsigned short payload_datalen; /* payload data length */ + unsigned short encrypted_key_format; /* encrypted key format */ + u8 *decrypted_data; /* decrypted data */ + u8 payload_data[0]; /* payload data + datablob + hmac */ +}; + +extern struct key_type key_type_encrypted; + +#endif /* _KEYS_ENCRYPTED_TYPE_H */ diff --git a/include/keys/keyring-type.h b/include/keys/keyring-type.h new file mode 100644 index 0000000..1dc8386 --- /dev/null +++ b/include/keys/keyring-type.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Keyring key type + * + * Copyright (C) 2008, 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _KEYS_KEYRING_TYPE_H +#define _KEYS_KEYRING_TYPE_H + +#include +#include + +#endif /* _KEYS_KEYRING_TYPE_H */ diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h new file mode 100644 index 0000000..36b89a9 --- /dev/null +++ b/include/keys/request_key_auth-type.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* request_key authorisation token key type + * + * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H +#define _KEYS_REQUEST_KEY_AUTH_TYPE_H + +#include + +/* + * Authorisation record for request_key(). + */ +struct request_key_auth { + struct rcu_head rcu; + struct key *target_key; + struct key *dest_keyring; + const struct cred *cred; + void *callout_info; + size_t callout_len; + pid_t pid; + char op[8]; +} __randomize_layout; + +static inline struct request_key_auth *get_request_key_auth(const struct key *key) +{ + return key->payload.data[0]; +} + + +#endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */ diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h new file mode 100644 index 0000000..a183278 --- /dev/null +++ b/include/keys/rxrpc-type.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* RxRPC key type + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _KEYS_RXRPC_TYPE_H +#define _KEYS_RXRPC_TYPE_H + +#include + +/* + * key type for AF_RXRPC keys + */ +extern struct key_type key_type_rxrpc; + +extern struct key *rxrpc_get_null_key(const char *); + +/* + * RxRPC key for Kerberos IV (type-2 security) + */ +struct rxkad_key { + u32 vice_id; + u32 start; /* time at which ticket starts */ + u32 expiry; /* time at which ticket expires */ + u32 kvno; /* key version number */ + u8 primary_flag; /* T if key for primary cell for this user */ + u16 ticket_len; /* length of ticket[] */ + u8 session_key[8]; /* DES session key */ + u8 ticket[0]; /* the encrypted ticket */ +}; + +/* + * Kerberos 5 principal + * name/name/name@realm + */ +struct krb5_principal { + u8 n_name_parts; /* N of parts of the name part of the principal */ + char **name_parts; /* parts of the name part of the principal */ + char *realm; /* parts of the realm part of the principal */ +}; + +/* + * Kerberos 5 tagged data + */ +struct krb5_tagged_data { + /* for tag value, see /usr/include/krb5/krb5.h + * - KRB5_AUTHDATA_* for auth data + * - + */ + s32 tag; + u32 data_len; + u8 *data; +}; + +/* + * RxRPC key for Kerberos V (type-5 security) + */ +struct rxk5_key { + u64 authtime; /* time at which auth token generated */ + u64 starttime; /* time at which auth token starts */ + u64 endtime; /* time at which auth token expired */ + u64 renew_till; /* time to which auth token can be renewed */ + s32 is_skey; /* T if ticket is encrypted in another ticket's + * skey */ + s32 flags; /* mask of TKT_FLG_* bits (krb5/krb5.h) */ + struct krb5_principal client; /* client principal name */ + struct krb5_principal server; /* server principal name */ + u16 ticket_len; /* length of ticket */ + u16 ticket2_len; /* length of second ticket */ + u8 n_authdata; /* number of authorisation data elements */ + u8 n_addresses; /* number of addresses */ + struct krb5_tagged_data session; /* session data; tag is enctype */ + struct krb5_tagged_data *addresses; /* addresses */ + u8 *ticket; /* krb5 ticket */ + u8 *ticket2; /* second krb5 ticket, if related to ticket (via + * DUPLICATE-SKEY or ENC-TKT-IN-SKEY) */ + struct krb5_tagged_data *authdata; /* authorisation data */ +}; + +/* + * list of tokens attached to an rxrpc key + */ +struct rxrpc_key_token { + u16 security_index; /* RxRPC header security index */ + struct rxrpc_key_token *next; /* the next token in the list */ + union { + struct rxkad_key *kad; + struct rxk5_key *k5; + }; +}; + +/* + * structure of raw payloads passed to add_key() or instantiate key + */ +struct rxrpc_key_data_v1 { + u16 security_index; + u16 ticket_length; + u32 expiry; /* time_t */ + u32 kvno; + u8 session_key[8]; + u8 ticket[0]; +}; + +/* + * AF_RXRPC key payload derived from XDR format + * - based on openafs-1.4.10/src/auth/afs_token.xg + */ +#define AFSTOKEN_LENGTH_MAX 16384 /* max payload size */ +#define AFSTOKEN_STRING_MAX 256 /* max small string length */ +#define AFSTOKEN_DATA_MAX 64 /* max small data length */ +#define AFSTOKEN_CELL_MAX 64 /* max cellname length */ +#define AFSTOKEN_MAX 8 /* max tokens per payload */ +#define AFSTOKEN_BDATALN_MAX 16384 /* max big data length */ +#define AFSTOKEN_RK_TIX_MAX 12000 /* max RxKAD ticket size */ +#define AFSTOKEN_GK_KEY_MAX 64 /* max GSSAPI key size */ +#define AFSTOKEN_GK_TOKEN_MAX 16384 /* max GSSAPI token size */ +#define AFSTOKEN_K5_COMPONENTS_MAX 16 /* max K5 components */ +#define AFSTOKEN_K5_NAME_MAX 128 /* max K5 name length */ +#define AFSTOKEN_K5_REALM_MAX 64 /* max K5 realm name length */ +#define AFSTOKEN_K5_TIX_MAX 16384 /* max K5 ticket size */ +#define AFSTOKEN_K5_ADDRESSES_MAX 16 /* max K5 addresses */ +#define AFSTOKEN_K5_AUTHDATA_MAX 16 /* max K5 pieces of auth data */ + +/* + * Truncate a time64_t to the range from 1970 to 2106 as in the network + * protocol. + */ +static inline u32 rxrpc_time64_to_u32(time64_t time) +{ + if (time < 0) + return 0; + + if (time > UINT_MAX) + return UINT_MAX; + + return (u32)time; +} + +/* + * Extend u32 back to time64_t using the same 1970-2106 range. + */ +static inline time64_t rxrpc_u32_to_time64(u32 time) +{ + return (time64_t)time; +} + +#endif /* _KEYS_RXRPC_TYPE_H */ diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h new file mode 100644 index 0000000..c1a96fd --- /dev/null +++ b/include/keys/system_keyring.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* System keyring containing trusted public keys. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _KEYS_SYSTEM_KEYRING_H +#define _KEYS_SYSTEM_KEYRING_H + +#include + +#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING + +extern int restrict_link_by_builtin_trusted(struct key *keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *restriction_key); + +#else +#define restrict_link_by_builtin_trusted restrict_link_reject +#endif + +#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING +extern int restrict_link_by_builtin_and_secondary_trusted( + struct key *keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *restriction_key); +#else +#define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted +#endif + +#ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING +extern int mark_hash_blacklisted(const char *hash); +extern int is_hash_blacklisted(const u8 *hash, size_t hash_len, + const char *type); +#else +static inline int is_hash_blacklisted(const u8 *hash, size_t hash_len, + const char *type) +{ + return 0; +} +#endif + +#ifdef CONFIG_IMA_BLACKLIST_KEYRING +extern struct key *ima_blacklist_keyring; + +static inline struct key *get_ima_blacklist_keyring(void) +{ + return ima_blacklist_keyring; +} +#else +static inline struct key *get_ima_blacklist_keyring(void) +{ + return NULL; +} +#endif /* CONFIG_IMA_BLACKLIST_KEYRING */ + +#if defined(CONFIG_INTEGRITY_PLATFORM_KEYRING) && \ + defined(CONFIG_SYSTEM_TRUSTED_KEYRING) +extern void __init set_platform_trusted_keys(struct key *keyring); +#else +static inline void set_platform_trusted_keys(struct key *keyring) +{ +} +#endif + +#endif /* _KEYS_SYSTEM_KEYRING_H */ diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h new file mode 100644 index 0000000..a94c03a --- /dev/null +++ b/include/keys/trusted-type.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2010 IBM Corporation + * Author: David Safford + */ + +#ifndef _KEYS_TRUSTED_TYPE_H +#define _KEYS_TRUSTED_TYPE_H + +#include +#include +#include + +#define MIN_KEY_SIZE 32 +#define MAX_KEY_SIZE 128 +#define MAX_BLOB_SIZE 512 +#define MAX_PCRINFO_SIZE 64 +#define MAX_DIGEST_SIZE 64 + +struct trusted_key_payload { + struct rcu_head rcu; + unsigned int key_len; + unsigned int blob_len; + unsigned char migratable; + unsigned char key[MAX_KEY_SIZE + 1]; + unsigned char blob[MAX_BLOB_SIZE]; +}; + +struct trusted_key_options { + uint16_t keytype; + uint32_t keyhandle; + unsigned char keyauth[TPM_DIGEST_SIZE]; + unsigned char blobauth[TPM_DIGEST_SIZE]; + uint32_t pcrinfo_len; + unsigned char pcrinfo[MAX_PCRINFO_SIZE]; + int pcrlock; + uint32_t hash; + uint32_t policydigest_len; + unsigned char policydigest[MAX_DIGEST_SIZE]; + uint32_t policyhandle; +}; + +extern struct key_type key_type_trusted; + +#endif /* _KEYS_TRUSTED_TYPE_H */ diff --git a/include/keys/trusted.h b/include/keys/trusted.h new file mode 100644 index 0000000..0071298 --- /dev/null +++ b/include/keys/trusted.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __TRUSTED_KEY_H +#define __TRUSTED_KEY_H + +/* implementation specific TPM constants */ +#define MAX_BUF_SIZE 1024 +#define TPM_GETRANDOM_SIZE 14 +#define TPM_OSAP_SIZE 36 +#define TPM_OIAP_SIZE 10 +#define TPM_SEAL_SIZE 87 +#define TPM_UNSEAL_SIZE 104 +#define TPM_SIZE_OFFSET 2 +#define TPM_RETURN_OFFSET 6 +#define TPM_DATA_OFFSET 10 + +#define LOAD32(buffer, offset) (ntohl(*(uint32_t *)&buffer[offset])) +#define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset]) +#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset])) + +struct tpm_buf { + int len; + unsigned char data[MAX_BUF_SIZE]; +}; + +#define INIT_BUF(tb) (tb->len = 0) + +struct osapsess { + uint32_t handle; + unsigned char secret[SHA1_DIGEST_SIZE]; + unsigned char enonce[TPM_NONCE_SIZE]; +}; + +/* discrete values, but have to store in uint16_t for TPM use */ +enum { + SEAL_keytype = 1, + SRK_keytype = 4 +}; + +int TSS_authhmac(unsigned char *digest, const unsigned char *key, + unsigned int keylen, unsigned char *h1, + unsigned char *h2, unsigned int h3, ...); +int TSS_checkhmac1(unsigned char *buffer, + const uint32_t command, + const unsigned char *ononce, + const unsigned char *key, + unsigned int keylen, ...); + +int trusted_tpm_send(unsigned char *cmd, size_t buflen); +int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce); + +#define TPM_DEBUG 0 + +#if TPM_DEBUG +static inline void dump_options(struct trusted_key_options *o) +{ + pr_info("trusted_key: sealing key type %d\n", o->keytype); + pr_info("trusted_key: sealing key handle %0X\n", o->keyhandle); + pr_info("trusted_key: pcrlock %d\n", o->pcrlock); + pr_info("trusted_key: pcrinfo %d\n", o->pcrinfo_len); + print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE, + 16, 1, o->pcrinfo, o->pcrinfo_len, 0); +} + +static inline void dump_payload(struct trusted_key_payload *p) +{ + pr_info("trusted_key: key_len %d\n", p->key_len); + print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE, + 16, 1, p->key, p->key_len, 0); + pr_info("trusted_key: bloblen %d\n", p->blob_len); + print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE, + 16, 1, p->blob, p->blob_len, 0); + pr_info("trusted_key: migratable %d\n", p->migratable); +} + +static inline void dump_sess(struct osapsess *s) +{ + print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE, + 16, 1, &s->handle, 4, 0); + pr_info("trusted-key: secret:\n"); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, + 16, 1, &s->secret, SHA1_DIGEST_SIZE, 0); + pr_info("trusted-key: enonce:\n"); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, + 16, 1, &s->enonce, SHA1_DIGEST_SIZE, 0); +} + +static inline void dump_tpm_buf(unsigned char *buf) +{ + int len; + + pr_info("\ntrusted-key: tpm buffer\n"); + len = LOAD32(buf, TPM_SIZE_OFFSET); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0); +} +#else +static inline void dump_options(struct trusted_key_options *o) +{ +} + +static inline void dump_payload(struct trusted_key_payload *p) +{ +} + +static inline void dump_sess(struct osapsess *s) +{ +} + +static inline void dump_tpm_buf(unsigned char *buf) +{ +} +#endif + +static inline void store8(struct tpm_buf *buf, const unsigned char value) +{ + buf->data[buf->len++] = value; +} + +static inline void store16(struct tpm_buf *buf, const uint16_t value) +{ + *(uint16_t *) & buf->data[buf->len] = htons(value); + buf->len += sizeof value; +} + +static inline void store32(struct tpm_buf *buf, const uint32_t value) +{ + *(uint32_t *) & buf->data[buf->len] = htonl(value); + buf->len += sizeof value; +} + +static inline void storebytes(struct tpm_buf *buf, const unsigned char *in, + const int len) +{ + memcpy(buf->data + buf->len, in, len); + buf->len += len; +} +#endif diff --git a/include/keys/user-type.h b/include/keys/user-type.h new file mode 100644 index 0000000..be61fcd --- /dev/null +++ b/include/keys/user-type.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* user-type.h: User-defined key type + * + * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _KEYS_USER_TYPE_H +#define _KEYS_USER_TYPE_H + +#include +#include + +#ifdef CONFIG_KEYS + +/*****************************************************************************/ +/* + * the payload for a key of type "user" or "logon" + * - once filled in and attached to a key: + * - the payload struct is invariant may not be changed, only replaced + * - the payload must be read with RCU procedures or with the key semaphore + * held + * - the payload may only be replaced with the key semaphore write-locked + * - the key's data length is the size of the actual data, not including the + * payload wrapper + */ +struct user_key_payload { + struct rcu_head rcu; /* RCU destructor */ + unsigned short datalen; /* length of this data */ + char data[0] __aligned(__alignof__(u64)); /* actual data */ +}; + +extern struct key_type key_type_user; +extern struct key_type key_type_logon; + +struct key_preparsed_payload; + +extern int user_preparse(struct key_preparsed_payload *prep); +extern void user_free_preparse(struct key_preparsed_payload *prep); +extern int user_update(struct key *key, struct key_preparsed_payload *prep); +extern void user_revoke(struct key *key); +extern void user_destroy(struct key *key); +extern void user_describe(const struct key *user, struct seq_file *m); +extern long user_read(const struct key *key, char *buffer, size_t buflen); + +static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) +{ + return (struct user_key_payload *)dereference_key_rcu(key); +} + +static inline struct user_key_payload *user_key_payload_locked(const struct key *key) +{ + return (struct user_key_payload *)dereference_key_locked((struct key *)key); +} + +#endif /* CONFIG_KEYS */ + +#endif /* _KEYS_USER_TYPE_H */ diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h new file mode 100644 index 0000000..d120e6c --- /dev/null +++ b/include/kvm/arm_arch_timer.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier + */ + +#ifndef __ASM_ARM_KVM_ARCH_TIMER_H +#define __ASM_ARM_KVM_ARCH_TIMER_H + +#include +#include + +enum kvm_arch_timers { + TIMER_PTIMER, + TIMER_VTIMER, + NR_KVM_TIMERS +}; + +enum kvm_arch_timer_regs { + TIMER_REG_CNT, + TIMER_REG_CVAL, + TIMER_REG_TVAL, + TIMER_REG_CTL, +}; + +struct arch_timer_context { + struct kvm_vcpu *vcpu; + + /* Registers: control register, timer value */ + u32 cnt_ctl; + u64 cnt_cval; + + /* Timer IRQ */ + struct kvm_irq_level irq; + + /* Virtual offset */ + u64 cntvoff; + + /* Emulated Timer (may be unused) */ + struct hrtimer hrtimer; + + /* + * We have multiple paths which can save/restore the timer state onto + * the hardware, so we need some way of keeping track of where the + * latest state is. + */ + bool loaded; + + /* Duplicated state from arch_timer.c for convenience */ + u32 host_timer_irq; + u32 host_timer_irq_flags; +}; + +struct timer_map { + struct arch_timer_context *direct_vtimer; + struct arch_timer_context *direct_ptimer; + struct arch_timer_context *emul_ptimer; +}; + +struct arch_timer_cpu { + struct arch_timer_context timers[NR_KVM_TIMERS]; + + /* Background timer used when the guest is not running */ + struct hrtimer bg_timer; + + /* Is the timer enabled */ + bool enabled; +}; + +int kvm_timer_hyp_init(bool); +int kvm_timer_enable(struct kvm_vcpu *vcpu); +int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); +void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); +bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); +void kvm_timer_update_run(struct kvm_vcpu *vcpu); +void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); + +u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); +int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); + +int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); + +bool kvm_timer_is_pending(struct kvm_vcpu *vcpu); + +u64 kvm_phys_timer_read(void); + +void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu); +void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); + +void kvm_timer_init_vhe(void); + +bool kvm_arch_timer_get_input_level(int vintid); + +#define vcpu_timer(v) (&(v)->arch.timer_cpu) +#define vcpu_get_timer(v,t) (&vcpu_timer(v)->timers[(t)]) +#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER]) +#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER]) + +#define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers) + +u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu, + enum kvm_arch_timers tmr, + enum kvm_arch_timer_regs treg); +void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu, + enum kvm_arch_timers tmr, + enum kvm_arch_timer_regs treg, + u64 val); + +#endif diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h new file mode 100644 index 0000000..6db0304 --- /dev/null +++ b/include/kvm/arm_pmu.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015 Linaro Ltd. + * Author: Shannon Zhao + */ + +#ifndef __ASM_ARM_KVM_PMU_H +#define __ASM_ARM_KVM_PMU_H + +#include +#include + +#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) +#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1) + +#ifdef CONFIG_KVM_ARM_PMU + +struct kvm_pmc { + u8 idx; /* index into the pmu->pmc array */ + struct perf_event *perf_event; +}; + +struct kvm_pmu { + int irq_num; + struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; + DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); + bool ready; + bool created; + bool irq_level; +}; + +#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) +#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) +u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); +void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); +u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); +void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); +void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); +void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); +bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); +void kvm_pmu_update_run(struct kvm_vcpu *vcpu); +void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, + u64 select_idx); +bool kvm_arm_support_pmu_v3(void); +int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); +#else +struct kvm_pmu { +}; + +#define kvm_arm_pmu_v3_ready(v) (false) +#define kvm_arm_pmu_irq_initialized(v) (false) +static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, + u64 select_idx) +{ + return 0; +} +static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, + u64 select_idx, u64 val) {} +static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) +{ + return 0; +} +static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} +static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) +{ + return false; +} +static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, + u64 data, u64 select_idx) {} +static inline bool kvm_arm_support_pmu_v3(void) { return false; } +static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) +{ + return 0; +} +#endif + +#endif diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h new file mode 100644 index 0000000..632e78b --- /dev/null +++ b/include/kvm/arm_psci.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier + */ + +#ifndef __KVM_ARM_PSCI_H__ +#define __KVM_ARM_PSCI_H__ + +#include +#include + +#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) +#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) +#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) + +#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 + +/* + * We need the KVM pointer independently from the vcpu as we can call + * this from HYP, and need to apply kern_hyp_va on it... + */ +static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) +{ + /* + * Our PSCI implementation stays the same across versions from + * v0.2 onward, only adding the few mandatory functions (such + * as FEATURES with 1.0) that are required by newer + * revisions. It is thus safe to return the latest, unless + * userspace has instructed us otherwise. + */ + if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) { + if (vcpu->kvm->arch.psci_version) + return vcpu->kvm->arch.psci_version; + + return KVM_ARM_PSCI_LATEST; + } + + return KVM_ARM_PSCI_0_1; +} + + +int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); + +struct kvm_one_reg; + +int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu); +int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); +int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); +int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); + +#endif /* __KVM_ARM_PSCI_H__ */ diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h new file mode 100644 index 0000000..af4f09c --- /dev/null +++ b/include/kvm/arm_vgic.h @@ -0,0 +1,402 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015, 2016 ARM Ltd. + */ +#ifndef __KVM_ARM_VGIC_H +#define __KVM_ARM_VGIC_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define VGIC_V3_MAX_CPUS 512 +#define VGIC_V2_MAX_CPUS 8 +#define VGIC_NR_IRQS_LEGACY 256 +#define VGIC_NR_SGIS 16 +#define VGIC_NR_PPIS 16 +#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) +#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1) +#define VGIC_MAX_SPI 1019 +#define VGIC_MAX_RESERVED 1023 +#define VGIC_MIN_LPI 8192 +#define KVM_IRQCHIP_NUM_PINS (1020 - 32) + +#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS) +#define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \ + (irq) <= VGIC_MAX_SPI) + +enum vgic_type { + VGIC_V2, /* Good ol' GICv2 */ + VGIC_V3, /* New fancy GICv3 */ +}; + +/* same for all guests, as depending only on the _host's_ GIC model */ +struct vgic_global { + /* type of the host GIC */ + enum vgic_type type; + + /* Physical address of vgic virtual cpu interface */ + phys_addr_t vcpu_base; + + /* GICV mapping, kernel VA */ + void __iomem *vcpu_base_va; + /* GICV mapping, HYP VA */ + void __iomem *vcpu_hyp_va; + + /* virtual control interface mapping, kernel VA */ + void __iomem *vctrl_base; + /* virtual control interface mapping, HYP VA */ + void __iomem *vctrl_hyp; + + /* Number of implemented list registers */ + int nr_lr; + + /* Maintenance IRQ number */ + unsigned int maint_irq; + + /* maximum number of VCPUs allowed (GICv2 limits us to 8) */ + int max_gic_vcpus; + + /* Only needed for the legacy KVM_CREATE_IRQCHIP */ + bool can_emulate_gicv2; + + /* Hardware has GICv4? */ + bool has_gicv4; + + /* GIC system register CPU interface */ + struct static_key_false gicv3_cpuif; + + u32 ich_vtr_el2; +}; + +extern struct vgic_global kvm_vgic_global_state; + +#define VGIC_V2_MAX_LRS (1 << 6) +#define VGIC_V3_MAX_LRS 16 +#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) + +enum vgic_irq_config { + VGIC_CONFIG_EDGE = 0, + VGIC_CONFIG_LEVEL +}; + +struct vgic_irq { + raw_spinlock_t irq_lock; /* Protects the content of the struct */ + struct list_head lpi_list; /* Used to link all LPIs together */ + struct list_head ap_list; + + struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU + * SPIs and LPIs: The VCPU whose ap_list + * this is queued on. + */ + + struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should + * be sent to, as a result of the + * targets reg (v2) or the + * affinity reg (v3). + */ + + u32 intid; /* Guest visible INTID */ + bool line_level; /* Level only */ + bool pending_latch; /* The pending latch state used to calculate + * the pending state for both level + * and edge triggered IRQs. */ + bool active; /* not used for LPIs */ + bool enabled; + bool hw; /* Tied to HW IRQ */ + struct kref refcount; /* Used for LPIs */ + u32 hwintid; /* HW INTID number */ + unsigned int host_irq; /* linux irq corresponding to hwintid */ + union { + u8 targets; /* GICv2 target VCPUs mask */ + u32 mpidr; /* GICv3 target VCPU */ + }; + u8 source; /* GICv2 SGIs only */ + u8 active_source; /* GICv2 SGIs only */ + u8 priority; + u8 group; /* 0 == group 0, 1 == group 1 */ + enum vgic_irq_config config; /* Level or edge */ + + /* + * Callback function pointer to in-kernel devices that can tell us the + * state of the input level of mapped level-triggered IRQ faster than + * peaking into the physical GIC. + * + * Always called in non-preemptible section and the functions can use + * kvm_arm_get_running_vcpu() to get the vcpu pointer for private + * IRQs. + */ + bool (*get_input_level)(int vintid); + + void *owner; /* Opaque pointer to reserve an interrupt + for in-kernel devices. */ +}; + +struct vgic_register_region; +struct vgic_its; + +enum iodev_type { + IODEV_CPUIF, + IODEV_DIST, + IODEV_REDIST, + IODEV_ITS +}; + +struct vgic_io_device { + gpa_t base_addr; + union { + struct kvm_vcpu *redist_vcpu; + struct vgic_its *its; + }; + const struct vgic_register_region *regions; + enum iodev_type iodev_type; + int nr_regions; + struct kvm_io_device dev; +}; + +struct vgic_its { + /* The base address of the ITS control register frame */ + gpa_t vgic_its_base; + + bool enabled; + struct vgic_io_device iodev; + struct kvm_device *dev; + + /* These registers correspond to GITS_BASER{0,1} */ + u64 baser_device_table; + u64 baser_coll_table; + + /* Protects the command queue */ + struct mutex cmd_lock; + u64 cbaser; + u32 creadr; + u32 cwriter; + + /* migration ABI revision in use */ + u32 abi_rev; + + /* Protects the device and collection lists */ + struct mutex its_lock; + struct list_head device_list; + struct list_head collection_list; +}; + +struct vgic_state_iter; + +struct vgic_redist_region { + u32 index; + gpa_t base; + u32 count; /* number of redistributors or 0 if single region */ + u32 free_index; /* index of the next free redistributor */ + struct list_head list; +}; + +struct vgic_dist { + bool in_kernel; + bool ready; + bool initialized; + + /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */ + u32 vgic_model; + + /* Implementation revision as reported in the GICD_IIDR */ + u32 implementation_rev; + + /* Userspace can write to GICv2 IGROUPR */ + bool v2_groups_user_writable; + + /* Do injected MSIs require an additional device ID? */ + bool msis_require_devid; + + int nr_spis; + + /* base addresses in guest physical address space: */ + gpa_t vgic_dist_base; /* distributor */ + union { + /* either a GICv2 CPU interface */ + gpa_t vgic_cpu_base; + /* or a number of GICv3 redistributor regions */ + struct list_head rd_regions; + }; + + /* distributor enabled */ + bool enabled; + + struct vgic_irq *spis; + + struct vgic_io_device dist_iodev; + + bool has_its; + + /* + * Contains the attributes and gpa of the LPI configuration table. + * Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share + * one address across all redistributors. + * GICv3 spec: 6.1.2 "LPI Configuration tables" + */ + u64 propbaser; + + /* Protects the lpi_list and the count value below. */ + raw_spinlock_t lpi_list_lock; + struct list_head lpi_list_head; + int lpi_list_count; + + /* LPI translation cache */ + struct list_head lpi_translation_cache; + + /* used by vgic-debug */ + struct vgic_state_iter *iter; + + /* + * GICv4 ITS per-VM data, containing the IRQ domain, the VPE + * array, the property table pointer as well as allocation + * data. This essentially ties the Linux IRQ core and ITS + * together, and avoids leaking KVM's data structures anywhere + * else. + */ + struct its_vm its_vm; +}; + +struct vgic_v2_cpu_if { + u32 vgic_hcr; + u32 vgic_vmcr; + u32 vgic_apr; + u32 vgic_lr[VGIC_V2_MAX_LRS]; +}; + +struct vgic_v3_cpu_if { + u32 vgic_hcr; + u32 vgic_vmcr; + u32 vgic_sre; /* Restored only, change ignored */ + u32 vgic_ap0r[4]; + u32 vgic_ap1r[4]; + u64 vgic_lr[VGIC_V3_MAX_LRS]; + + /* + * GICv4 ITS per-VPE data, containing the doorbell IRQ, the + * pending table pointer, the its_vm pointer and a few other + * HW specific things. As for the its_vm structure, this is + * linking the Linux IRQ subsystem and the ITS together. + */ + struct its_vpe its_vpe; +}; + +struct vgic_cpu { + /* CPU vif control registers for world switch */ + union { + struct vgic_v2_cpu_if vgic_v2; + struct vgic_v3_cpu_if vgic_v3; + }; + + unsigned int used_lrs; + struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; + + raw_spinlock_t ap_list_lock; /* Protects the ap_list */ + + /* + * List of IRQs that this VCPU should consider because they are either + * Active or Pending (hence the name; AP list), or because they recently + * were one of the two and need to be migrated off this list to another + * VCPU. + */ + struct list_head ap_list_head; + + /* + * Members below are used with GICv3 emulation only and represent + * parts of the redistributor. + */ + struct vgic_io_device rd_iodev; + struct vgic_redist_region *rdreg; + + /* Contains the attributes and gpa of the LPI pending tables. */ + u64 pendbaser; + + bool lpis_enabled; + + /* Cache guest priority bits */ + u32 num_pri_bits; + + /* Cache guest interrupt ID bits */ + u32 num_id_bits; +}; + +extern struct static_key_false vgic_v2_cpuif_trap; +extern struct static_key_false vgic_v3_cpuif_trap; + +int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); +void kvm_vgic_early_init(struct kvm *kvm); +int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); +int kvm_vgic_create(struct kvm *kvm, u32 type); +void kvm_vgic_destroy(struct kvm *kvm); +void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); +int kvm_vgic_map_resources(struct kvm *kvm); +int kvm_vgic_hyp_init(void); +void kvm_vgic_init_cpu_hardware(void); + +int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, + bool level, void *owner); +int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, + u32 vintid, bool (*get_input_level)(int vindid)); +int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid); +bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid); + +int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); + +void kvm_vgic_load(struct kvm_vcpu *vcpu); +void kvm_vgic_put(struct kvm_vcpu *vcpu); +void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu); + +#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) +#define vgic_initialized(k) ((k)->arch.vgic.initialized) +#define vgic_ready(k) ((k)->arch.vgic.ready) +#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \ + ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) + +bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); +void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); +void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); +void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid); + +void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1); + +/** + * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW + * + * The host's GIC naturally limits the maximum amount of VCPUs a guest + * can use. + */ +static inline int kvm_vgic_get_max_vcpus(void) +{ + return kvm_vgic_global_state.max_gic_vcpus; +} + +int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); + +/** + * kvm_vgic_setup_default_irq_routing: + * Setup a default flat gsi routing table mapping all SPIs + */ +int kvm_vgic_setup_default_irq_routing(struct kvm *kvm); + +int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner); + +struct kvm_kernel_irq_routing_entry; + +int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq, + struct kvm_kernel_irq_routing_entry *irq_entry); + +int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq, + struct kvm_kernel_irq_routing_entry *irq_entry); + +void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu); +void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu); + +#endif /* __KVM_ARM_VGIC_H */ diff --git a/include/kvm/iodev.h b/include/kvm/iodev.h new file mode 100644 index 0000000..d75fc43 --- /dev/null +++ b/include/kvm/iodev.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __KVM_IODEV_H__ +#define __KVM_IODEV_H__ + +#include +#include + +struct kvm_io_device; +struct kvm_vcpu; + +/** + * kvm_io_device_ops are called under kvm slots_lock. + * read and write handlers return 0 if the transaction has been handled, + * or non-zero to have it passed to the next device. + **/ +struct kvm_io_device_ops { + int (*read)(struct kvm_vcpu *vcpu, + struct kvm_io_device *this, + gpa_t addr, + int len, + void *val); + int (*write)(struct kvm_vcpu *vcpu, + struct kvm_io_device *this, + gpa_t addr, + int len, + const void *val); + void (*destructor)(struct kvm_io_device *this); +}; + + +struct kvm_io_device { + const struct kvm_io_device_ops *ops; +}; + +static inline void kvm_iodevice_init(struct kvm_io_device *dev, + const struct kvm_io_device_ops *ops) +{ + dev->ops = ops; +} + +static inline int kvm_iodevice_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, gpa_t addr, + int l, void *v) +{ + return dev->ops->read ? dev->ops->read(vcpu, dev, addr, l, v) + : -EOPNOTSUPP; +} + +static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, gpa_t addr, + int l, const void *v) +{ + return dev->ops->write ? dev->ops->write(vcpu, dev, addr, l, v) + : -EOPNOTSUPP; +} + +static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) +{ + if (dev->ops->destructor) + dev->ops->destructor(dev); +} + +#endif /* __KVM_IODEV_H__ */ diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h new file mode 100644 index 0000000..9c777d2 --- /dev/null +++ b/include/linux/8250_pci.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for PCI support. + */ +#define FL_BASE_MASK 0x0007 +#define FL_BASE0 0x0000 +#define FL_BASE1 0x0001 +#define FL_BASE2 0x0002 +#define FL_BASE3 0x0003 +#define FL_BASE4 0x0004 +#define FL_GET_BASE(x) (x & FL_BASE_MASK) + +/* Use successive BARs (PCI base address registers), + else use offset into some specified BAR */ +#define FL_BASE_BARS 0x0008 + +/* do not assign an irq */ +#define FL_NOIRQ 0x0080 + +/* Use the Base address register size to cap number of ports */ +#define FL_REGION_SZ_CAP 0x0100 + +struct pciserial_board { + unsigned int flags; + unsigned int num_ports; + unsigned int base_baud; + unsigned int uart_offset; + unsigned int reg_shift; + unsigned int first_offset; +}; + +struct serial_private; + +struct serial_private * +pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board); +void pciserial_remove_ports(struct serial_private *priv); +void pciserial_suspend_ports(struct serial_private *priv); +void pciserial_resume_ports(struct serial_private *priv); diff --git a/include/linux/a.out.h b/include/linux/a.out.h new file mode 100644 index 0000000..600cf45 --- /dev/null +++ b/include/linux/a.out.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __A_OUT_GNU_H__ +#define __A_OUT_GNU_H__ + +#include + +#ifndef __ASSEMBLY__ +#ifdef linux +#include +#if defined(__i386__) || defined(__mc68000__) +#else +#ifndef SEGMENT_SIZE +#define SEGMENT_SIZE PAGE_SIZE +#endif +#endif +#endif +#endif /*__ASSEMBLY__ */ +#endif /* __A_OUT_GNU_H__ */ diff --git a/include/linux/acct.h b/include/linux/acct.h new file mode 100644 index 0000000..bc70e81 --- /dev/null +++ b/include/linux/acct.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BSD Process Accounting for Linux - Definitions + * + * Author: Marco van Wieringen (mvw@planets.elm.net) + * + * This header file contains the definitions needed to implement + * BSD-style process accounting. The kernel accounting code and all + * user-level programs that try to do something useful with the + * process accounting log must include this file. + * + * Copyright (C) 1995 - 1997 Marco van Wieringen - ELM Consultancy B.V. + * + */ +#ifndef _LINUX_ACCT_H +#define _LINUX_ACCT_H + +#include + + + +#ifdef CONFIG_BSD_PROCESS_ACCT +struct pid_namespace; +extern int acct_parm[]; /* for sysctl */ +extern void acct_collect(long exitcode, int group_dead); +extern void acct_process(void); +extern void acct_exit_ns(struct pid_namespace *); +#else +#define acct_collect(x,y) do { } while (0) +#define acct_process() do { } while (0) +#define acct_exit_ns(ns) do { } while (0) +#endif + +/* + * ACCT_VERSION numbers as yet defined: + * 0: old format (until 2.6.7) with 16 bit uid/gid + * 1: extended variant (binary compatible on M68K) + * 2: extended variant (binary compatible on everything except M68K) + * 3: new binary incompatible format (64 bytes) + * 4: new binary incompatible format (128 bytes) + * 5: new binary incompatible format (128 bytes, second half) + * + */ + +#undef ACCT_VERSION +#undef AHZ + +#ifdef CONFIG_BSD_PROCESS_ACCT_V3 +#define ACCT_VERSION 3 +#define AHZ 100 +typedef struct acct_v3 acct_t; +#else +#ifdef CONFIG_M68K +#define ACCT_VERSION 1 +#else +#define ACCT_VERSION 2 +#endif +#define AHZ (USER_HZ) +typedef struct acct acct_t; +#endif + +#include +/* + * Yet another set of HZ to *HZ helper functions. + * See for the original. + */ + +static inline u32 jiffies_to_AHZ(unsigned long x) +{ +#if (TICK_NSEC % (NSEC_PER_SEC / AHZ)) == 0 +# if HZ < AHZ + return x * (AHZ / HZ); +# else + return x / (HZ / AHZ); +# endif +#else + u64 tmp = (u64)x * TICK_NSEC; + do_div(tmp, (NSEC_PER_SEC / AHZ)); + return (long)tmp; +#endif +} + +static inline u64 nsec_to_AHZ(u64 x) +{ +#if (NSEC_PER_SEC % AHZ) == 0 + do_div(x, (NSEC_PER_SEC / AHZ)); +#elif (AHZ % 512) == 0 + x *= AHZ/512; + do_div(x, (NSEC_PER_SEC / 512)); +#else + /* + * max relative error 5.7e-8 (1.8s per year) for AHZ <= 1024, + * overflow after 64.99 years. + * exact for AHZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... + */ + x *= 9; + do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (AHZ/2)) + / AHZ)); +#endif + return x; +} + +#endif /* _LINUX_ACCT_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h new file mode 100644 index 0000000..ce29a01 --- /dev/null +++ b/include/linux/acpi.h @@ -0,0 +1,1307 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * acpi.h - ACPI Interface + * + * Copyright (C) 2001 Paul Diefenbaugh + */ + +#ifndef _LINUX_ACPI_H +#define _LINUX_ACPI_H + +#include +#include /* for struct resource */ +#include +#include +#include +#include +#include + +#ifndef _LINUX +#define _LINUX +#endif +#include + +#ifdef CONFIG_ACPI + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static inline acpi_handle acpi_device_handle(struct acpi_device *adev) +{ + return adev ? adev->handle : NULL; +} + +#define ACPI_COMPANION(dev) to_acpi_device_node((dev)->fwnode) +#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ + acpi_fwnode_handle(adev) : NULL) +#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) +#define ACPI_HANDLE_FWNODE(fwnode) \ + acpi_device_handle(to_acpi_device_node(fwnode)) + +static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) +{ + struct fwnode_handle *fwnode; + + fwnode = kzalloc(sizeof(struct fwnode_handle), GFP_KERNEL); + if (!fwnode) + return NULL; + + fwnode->ops = &acpi_static_fwnode_ops; + + return fwnode; +} + +static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode) +{ + if (WARN_ON(!is_acpi_static_node(fwnode))) + return; + + kfree(fwnode); +} + +/** + * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with + * the PCI-defined class-code information + * + * @_cls : the class, subclass, prog-if triple for this device + * @_msk : the class mask for this device + * + * This macro is used to create a struct acpi_device_id that matches a + * specific PCI class. The .id and .driver_data fields will be left + * initialized with the default value. + */ +#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk), + +static inline bool has_acpi_companion(struct device *dev) +{ + return is_acpi_device_node(dev->fwnode); +} + +static inline void acpi_preset_companion(struct device *dev, + struct acpi_device *parent, u64 addr) +{ + ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false)); +} + +static inline const char *acpi_dev_name(struct acpi_device *adev) +{ + return dev_name(&adev->dev); +} + +struct device *acpi_get_first_physical_node(struct acpi_device *adev); + +enum acpi_irq_model_id { + ACPI_IRQ_MODEL_PIC = 0, + ACPI_IRQ_MODEL_IOAPIC, + ACPI_IRQ_MODEL_IOSAPIC, + ACPI_IRQ_MODEL_PLATFORM, + ACPI_IRQ_MODEL_GIC, + ACPI_IRQ_MODEL_COUNT +}; + +extern enum acpi_irq_model_id acpi_irq_model; + +enum acpi_interrupt_id { + ACPI_INTERRUPT_PMI = 1, + ACPI_INTERRUPT_INIT, + ACPI_INTERRUPT_CPEI, + ACPI_INTERRUPT_COUNT +}; + +#define ACPI_SPACE_MEM 0 + +enum acpi_address_range_id { + ACPI_ADDRESS_RANGE_MEMORY = 1, + ACPI_ADDRESS_RANGE_RESERVED = 2, + ACPI_ADDRESS_RANGE_ACPI = 3, + ACPI_ADDRESS_RANGE_NVS = 4, + ACPI_ADDRESS_RANGE_COUNT +}; + + +/* Table Handlers */ +union acpi_subtable_headers { + struct acpi_subtable_header common; + struct acpi_hmat_structure hmat; +}; + +typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); + +typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header, + const unsigned long end); + +/* Debugger support */ + +struct acpi_debugger_ops { + int (*create_thread)(acpi_osd_exec_callback function, void *context); + ssize_t (*write_log)(const char *msg); + ssize_t (*read_cmd)(char *buffer, size_t length); + int (*wait_command_ready)(bool single_step, char *buffer, size_t length); + int (*notify_command_complete)(void); +}; + +struct acpi_debugger { + const struct acpi_debugger_ops *ops; + struct module *owner; + struct mutex lock; +}; + +#ifdef CONFIG_ACPI_DEBUGGER +int __init acpi_debugger_init(void); +int acpi_register_debugger(struct module *owner, + const struct acpi_debugger_ops *ops); +void acpi_unregister_debugger(const struct acpi_debugger_ops *ops); +int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context); +ssize_t acpi_debugger_write_log(const char *msg); +ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length); +int acpi_debugger_wait_command_ready(void); +int acpi_debugger_notify_command_complete(void); +#else +static inline int acpi_debugger_init(void) +{ + return -ENODEV; +} + +static inline int acpi_register_debugger(struct module *owner, + const struct acpi_debugger_ops *ops) +{ + return -ENODEV; +} + +static inline void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) +{ +} + +static inline int acpi_debugger_create_thread(acpi_osd_exec_callback function, + void *context) +{ + return -ENODEV; +} + +static inline int acpi_debugger_write_log(const char *msg) +{ + return -ENODEV; +} + +static inline int acpi_debugger_read_cmd(char *buffer, u32 buffer_length) +{ + return -ENODEV; +} + +static inline int acpi_debugger_wait_command_ready(void) +{ + return -ENODEV; +} + +static inline int acpi_debugger_notify_command_complete(void) +{ + return -ENODEV; +} +#endif + +#define BAD_MADT_ENTRY(entry, end) ( \ + (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ + ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) + +struct acpi_subtable_proc { + int id; + acpi_tbl_entry_handler handler; + int count; +}; + +void __iomem *__acpi_map_table(unsigned long phys, unsigned long size); +void __acpi_unmap_table(void __iomem *map, unsigned long size); +int early_acpi_boot_init(void); +int acpi_boot_init (void); +void acpi_boot_table_init (void); +int acpi_mps_check (void); +int acpi_numa_init (void); + +int acpi_table_init (void); +int acpi_table_parse(char *id, acpi_tbl_table_handler handler); +int __init acpi_table_parse_entries(char *id, unsigned long table_size, + int entry_id, + acpi_tbl_entry_handler handler, + unsigned int max_entries); +int __init acpi_table_parse_entries_array(char *id, unsigned long table_size, + struct acpi_subtable_proc *proc, int proc_num, + unsigned int max_entries); +int acpi_table_parse_madt(enum acpi_madt_type id, + acpi_tbl_entry_handler handler, + unsigned int max_entries); +int acpi_parse_mcfg (struct acpi_table_header *header); +void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); + +/* the following numa functions are architecture-dependent */ +void acpi_numa_slit_init (struct acpi_table_slit *slit); + +#if defined(CONFIG_X86) || defined(CONFIG_IA64) +void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); +#else +static inline void +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { } +#endif + +void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); + +#ifdef CONFIG_ARM64 +void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); +#else +static inline void +acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } +#endif + +int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); + +#ifndef PHYS_CPUID_INVALID +typedef u32 phys_cpuid_t; +#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) +#endif + +static inline bool invalid_logical_cpuid(u32 cpuid) +{ + return (int)cpuid < 0; +} + +static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) +{ + return phys_id == PHYS_CPUID_INVALID; +} + +/* Validate the processor object's proc_id */ +bool acpi_duplicate_processor_id(int proc_id); + +#ifdef CONFIG_ACPI_HOTPLUG_CPU +/* Arch dependent functions for cpu hotplug support */ +int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, + int *pcpu); +int acpi_unmap_cpu(int cpu); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC +int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); +#endif + +int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); +int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); +int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); +void acpi_irq_stats_init(void); +extern u32 acpi_irq_handled; +extern u32 acpi_irq_not_handled; +extern unsigned int acpi_sci_irq; +extern bool acpi_no_s5; +#define INVALID_ACPI_IRQ ((unsigned)-1) +static inline bool acpi_sci_irq_valid(void) +{ + return acpi_sci_irq != INVALID_ACPI_IRQ; +} + +extern int sbf_port; +extern unsigned long acpi_realmode_flags; + +int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); +int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); +int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); + +void acpi_set_irq_model(enum acpi_irq_model_id model, + struct fwnode_handle *fwnode); + +struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, + unsigned int size, + struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, + void *host_data); + +#ifdef CONFIG_X86_IO_APIC +extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); +#else +static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) +{ + return -1; +} +#endif +/* + * This function undoes the effect of one call to acpi_register_gsi(). + * If this matches the last registration, any IRQ resources for gsi + * are freed. + */ +void acpi_unregister_gsi (u32 gsi); + +struct pci_dev; + +int acpi_pci_irq_enable (struct pci_dev *dev); +void acpi_penalize_isa_irq(int irq, int active); +bool acpi_isa_irq_available(int irq); +#ifdef CONFIG_PCI +void acpi_penalize_sci_irq(int irq, int trigger, int polarity); +#else +static inline void acpi_penalize_sci_irq(int irq, int trigger, + int polarity) +{ +} +#endif +void acpi_pci_irq_disable (struct pci_dev *dev); + +extern int ec_read(u8 addr, u8 *val); +extern int ec_write(u8 addr, u8 val); +extern int ec_transaction(u8 command, + const u8 *wdata, unsigned wdata_len, + u8 *rdata, unsigned rdata_len); +extern acpi_handle ec_get_handle(void); + +extern bool acpi_is_pnp_device(struct acpi_device *); + +#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) + +typedef void (*wmi_notify_handler) (u32 value, void *context); + +extern acpi_status wmi_evaluate_method(const char *guid, u8 instance, + u32 method_id, + const struct acpi_buffer *in, + struct acpi_buffer *out); +extern acpi_status wmi_query_block(const char *guid, u8 instance, + struct acpi_buffer *out); +extern acpi_status wmi_set_block(const char *guid, u8 instance, + const struct acpi_buffer *in); +extern acpi_status wmi_install_notify_handler(const char *guid, + wmi_notify_handler handler, void *data); +extern acpi_status wmi_remove_notify_handler(const char *guid); +extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out); +extern bool wmi_has_guid(const char *guid); +extern char *wmi_get_acpi_device_uid(const char *guid); + +#endif /* CONFIG_ACPI_WMI */ + +#define ACPI_VIDEO_OUTPUT_SWITCHING 0x0001 +#define ACPI_VIDEO_DEVICE_POSTING 0x0002 +#define ACPI_VIDEO_ROM_AVAILABLE 0x0004 +#define ACPI_VIDEO_BACKLIGHT 0x0008 +#define ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR 0x0010 +#define ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO 0x0020 +#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR 0x0040 +#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO 0x0080 +#define ACPI_VIDEO_BACKLIGHT_DMI_VENDOR 0x0100 +#define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200 +#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 +#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 + +extern char acpi_video_backlight_string[]; +extern long acpi_is_video_device(acpi_handle handle); +extern int acpi_blacklisted(void); +extern void acpi_osi_setup(char *str); +extern bool acpi_osi_is_win8(void); + +#ifdef CONFIG_ACPI_NUMA +int acpi_map_pxm_to_online_node(int pxm); +int acpi_map_pxm_to_node(int pxm); +int acpi_get_node(acpi_handle handle); +#else +static inline int acpi_map_pxm_to_online_node(int pxm) +{ + return 0; +} +static inline int acpi_map_pxm_to_node(int pxm) +{ + return 0; +} +static inline int acpi_get_node(acpi_handle handle) +{ + return 0; +} +#endif +extern int acpi_paddr_to_node(u64 start_addr, u64 size); + +extern int pnpacpi_disabled; + +#define PXM_INVAL (-1) + +bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); +bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); +bool acpi_dev_resource_address_space(struct acpi_resource *ares, + struct resource_win *win); +bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, + struct resource_win *win); +unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); +unsigned int acpi_dev_get_irq_type(int triggering, int polarity); +bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, + struct resource *res); + +void acpi_dev_free_resource_list(struct list_head *list); +int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, + int (*preproc)(struct acpi_resource *, void *), + void *preproc_data); +int acpi_dev_get_dma_resources(struct acpi_device *adev, + struct list_head *list); +int acpi_dev_filter_resource_type(struct acpi_resource *ares, + unsigned long types); + +static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares, + void *arg) +{ + return acpi_dev_filter_resource_type(ares, (unsigned long)arg); +} + +struct acpi_device *acpi_resource_consumer(struct resource *res); + +int acpi_check_resource_conflict(const struct resource *res); + +int acpi_check_region(resource_size_t start, resource_size_t n, + const char *name); + +acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, + u32 level); + +int acpi_resources_are_enforced(void); + +#ifdef CONFIG_HIBERNATION +void __init acpi_no_s4_hw_signature(void); +#endif + +#ifdef CONFIG_PM_SLEEP +void __init acpi_old_suspend_ordering(void); +void __init acpi_nvs_nosave(void); +void __init acpi_nvs_nosave_s3(void); +void __init acpi_sleep_no_blacklist(void); +#endif /* CONFIG_PM_SLEEP */ + +int acpi_register_wakeup_handler( + int wake_irq, bool (*wakeup)(void *context), void *context); +void acpi_unregister_wakeup_handler( + bool (*wakeup)(void *context), void *context); + +struct acpi_osc_context { + char *uuid_str; /* UUID string */ + int rev; + struct acpi_buffer cap; /* list of DWORD capabilities */ + struct acpi_buffer ret; /* free by caller if success */ +}; + +acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); + +/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */ +#define OSC_QUERY_DWORD 0 /* DWORD 1 */ +#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */ +#define OSC_CONTROL_DWORD 2 /* DWORD 3 */ + +/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */ +#define OSC_QUERY_ENABLE 0x00000001 /* input */ +#define OSC_REQUEST_ERROR 0x00000002 /* return */ +#define OSC_INVALID_UUID_ERROR 0x00000004 /* return */ +#define OSC_INVALID_REVISION_ERROR 0x00000008 /* return */ +#define OSC_CAPABILITIES_MASK_ERROR 0x00000010 /* return */ + +/* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */ +#define OSC_SB_PAD_SUPPORT 0x00000001 +#define OSC_SB_PPC_OST_SUPPORT 0x00000002 +#define OSC_SB_PR3_SUPPORT 0x00000004 +#define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008 +#define OSC_SB_APEI_SUPPORT 0x00000010 +#define OSC_SB_CPC_SUPPORT 0x00000020 +#define OSC_SB_CPCV2_SUPPORT 0x00000040 +#define OSC_SB_PCLPI_SUPPORT 0x00000080 +#define OSC_SB_OSLPI_SUPPORT 0x00000100 +#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 + +extern bool osc_sb_apei_support_acked; +extern bool osc_pc_lpi_support_confirmed; + +/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ +#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 +#define OSC_PCI_ASPM_SUPPORT 0x00000002 +#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 +#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 +#define OSC_PCI_MSI_SUPPORT 0x00000010 +#define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100 +#define OSC_PCI_SUPPORT_MASKS 0x0000011f + +/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ +#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 +#define OSC_PCI_SHPC_NATIVE_HP_CONTROL 0x00000002 +#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004 +#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008 +#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 +#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020 +#define OSC_PCI_CONTROL_MASKS 0x0000003f + +#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002 +#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004 +#define ACPI_GSB_ACCESS_ATTRIB_BYTE 0x00000006 +#define ACPI_GSB_ACCESS_ATTRIB_WORD 0x00000008 +#define ACPI_GSB_ACCESS_ATTRIB_BLOCK 0x0000000A +#define ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE 0x0000000B +#define ACPI_GSB_ACCESS_ATTRIB_WORD_CALL 0x0000000C +#define ACPI_GSB_ACCESS_ATTRIB_BLOCK_CALL 0x0000000D +#define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E +#define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F + +extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, + u32 *mask, u32 req); + +/* Enable _OST when all relevant hotplug operations are enabled */ +#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \ + defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \ + defined(CONFIG_ACPI_CONTAINER) +#define ACPI_HOTPLUG_OST +#endif + +/* _OST Source Event Code (OSPM Action) */ +#define ACPI_OST_EC_OSPM_SHUTDOWN 0x100 +#define ACPI_OST_EC_OSPM_EJECT 0x103 +#define ACPI_OST_EC_OSPM_INSERTION 0x200 + +/* _OST General Processing Status Code */ +#define ACPI_OST_SC_SUCCESS 0x0 +#define ACPI_OST_SC_NON_SPECIFIC_FAILURE 0x1 +#define ACPI_OST_SC_UNRECOGNIZED_NOTIFY 0x2 + +/* _OST OS Shutdown Processing (0x100) Status Code */ +#define ACPI_OST_SC_OS_SHUTDOWN_DENIED 0x80 +#define ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS 0x81 +#define ACPI_OST_SC_OS_SHUTDOWN_COMPLETED 0x82 +#define ACPI_OST_SC_OS_SHUTDOWN_NOT_SUPPORTED 0x83 + +/* _OST Ejection Request (0x3, 0x103) Status Code */ +#define ACPI_OST_SC_EJECT_NOT_SUPPORTED 0x80 +#define ACPI_OST_SC_DEVICE_IN_USE 0x81 +#define ACPI_OST_SC_DEVICE_BUSY 0x82 +#define ACPI_OST_SC_EJECT_DEPENDENCY_BUSY 0x83 +#define ACPI_OST_SC_EJECT_IN_PROGRESS 0x84 + +/* _OST Insertion Request (0x200) Status Code */ +#define ACPI_OST_SC_INSERT_IN_PROGRESS 0x80 +#define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81 +#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 + +enum acpi_predicate { + all_versions, + less_than_or_equal, + equal, + greater_than_or_equal, +}; + +/* Table must be terminted by a NULL entry */ +struct acpi_platform_list { + char oem_id[ACPI_OEM_ID_SIZE+1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE+1]; + u32 oem_revision; + char *table; + enum acpi_predicate pred; + char *reason; + u32 data; +}; +int acpi_match_platform_list(const struct acpi_platform_list *plat); + +extern void acpi_early_init(void); +extern void acpi_subsystem_init(void); +extern void arch_post_acpi_subsys_init(void); + +extern int acpi_nvs_register(__u64 start, __u64 size); + +extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), + void *data); + +const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, + const struct device *dev); + +const void *acpi_device_get_match_data(const struct device *dev); +extern bool acpi_driver_match_device(struct device *dev, + const struct device_driver *drv); +int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); +int acpi_device_modalias(struct device *, char *, int); +void acpi_walk_dep_device_list(acpi_handle handle); + +struct platform_device *acpi_create_platform_device(struct acpi_device *, + struct property_entry *); +#define ACPI_PTR(_ptr) (_ptr) + +static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = true; +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = false; +} + +enum acpi_reconfig_event { + ACPI_RECONFIG_DEVICE_ADD = 0, + ACPI_RECONFIG_DEVICE_REMOVE, +}; + +int acpi_reconfig_notifier_register(struct notifier_block *nb); +int acpi_reconfig_notifier_unregister(struct notifier_block *nb); + +#ifdef CONFIG_ACPI_GTDT +int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count); +int acpi_gtdt_map_ppi(int type); +bool acpi_gtdt_c3stop(int type); +int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count); +#endif + +#ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER +static inline void acpi_arch_set_root_pointer(u64 addr) +{ +} +#endif + +#ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER +static inline u64 acpi_arch_get_root_pointer(void) +{ + return 0; +} +#endif + +#else /* !CONFIG_ACPI */ + +#define acpi_disabled 1 + +#define ACPI_COMPANION(dev) (NULL) +#define ACPI_COMPANION_SET(dev, adev) do { } while (0) +#define ACPI_HANDLE(dev) (NULL) +#define ACPI_HANDLE_FWNODE(fwnode) (NULL) +#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), + +struct fwnode_handle; + +static inline bool acpi_dev_found(const char *hid) +{ + return false; +} + +static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) +{ + return false; +} + +static inline struct acpi_device * +acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv) +{ + return NULL; +} + +static inline void acpi_dev_put(struct acpi_device *adev) {} + +static inline bool is_acpi_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline bool is_acpi_data_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline bool acpi_data_node_match(struct fwnode_handle *fwnode, + const char *name) +{ + return false; +} + +static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) +{ + return NULL; +} + +static inline bool has_acpi_companion(struct device *dev) +{ + return false; +} + +static inline void acpi_preset_companion(struct device *dev, + struct acpi_device *parent, u64 addr) +{ +} + +static inline const char *acpi_dev_name(struct acpi_device *adev) +{ + return NULL; +} + +static inline struct device *acpi_get_first_physical_node(struct acpi_device *adev) +{ + return NULL; +} + +static inline void acpi_early_init(void) { } +static inline void acpi_subsystem_init(void) { } + +static inline int early_acpi_boot_init(void) +{ + return 0; +} +static inline int acpi_boot_init(void) +{ + return 0; +} + +static inline void acpi_boot_table_init(void) +{ + return; +} + +static inline int acpi_mps_check(void) +{ + return 0; +} + +static inline int acpi_check_resource_conflict(struct resource *res) +{ + return 0; +} + +static inline int acpi_check_region(resource_size_t start, resource_size_t n, + const char *name) +{ + return 0; +} + +struct acpi_table_header; +static inline int acpi_table_parse(char *id, + int (*handler)(struct acpi_table_header *)) +{ + return -ENODEV; +} + +static inline int acpi_nvs_register(__u64 start, __u64 size) +{ + return 0; +} + +static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), + void *data) +{ + return 0; +} + +struct acpi_device_id; + +static inline const struct acpi_device_id *acpi_match_device( + const struct acpi_device_id *ids, const struct device *dev) +{ + return NULL; +} + +static inline const void *acpi_device_get_match_data(const struct device *dev) +{ + return NULL; +} + +static inline bool acpi_driver_match_device(struct device *dev, + const struct device_driver *drv) +{ + return false; +} + +static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle, + const guid_t *guid, + int rev, int func, + union acpi_object *argv4) +{ + return NULL; +} + +static inline int acpi_device_uevent_modalias(struct device *dev, + struct kobj_uevent_env *env) +{ + return -ENODEV; +} + +static inline int acpi_device_modalias(struct device *dev, + char *buf, int size) +{ + return -ENODEV; +} + +static inline bool acpi_dma_supported(struct acpi_device *adev) +{ + return false; +} + +static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) +{ + return DEV_DMA_NOT_SUPPORTED; +} + +static inline int acpi_dma_get_range(struct device *dev, u64 *dma_addr, + u64 *offset, u64 *size) +{ + return -ENODEV; +} + +static inline int acpi_dma_configure(struct device *dev, + enum dev_dma_attr attr) +{ + return 0; +} + +#define ACPI_PTR(_ptr) (NULL) + +static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ +} + +static inline int acpi_reconfig_notifier_register(struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline struct acpi_device *acpi_resource_consumer(struct resource *res) +{ + return NULL; +} + +#endif /* !CONFIG_ACPI */ + +#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC +int acpi_ioapic_add(acpi_handle root); +#else +static inline int acpi_ioapic_add(acpi_handle root) { return 0; } +#endif + +#ifdef CONFIG_ACPI +void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, + u32 pm1a_ctrl, u32 pm1b_ctrl)); + +acpi_status acpi_os_prepare_sleep(u8 sleep_state, + u32 pm1a_control, u32 pm1b_control); + +void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, + u32 val_a, u32 val_b)); + +acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, + u32 val_a, u32 val_b); + +#ifdef CONFIG_X86 +void arch_reserve_mem_area(acpi_physical_address addr, size_t size); +#else +static inline void arch_reserve_mem_area(acpi_physical_address addr, + size_t size) +{ +} +#endif /* CONFIG_X86 */ +#else +#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) +#endif + +#if defined(CONFIG_ACPI) && defined(CONFIG_PM) +int acpi_dev_suspend(struct device *dev, bool wakeup); +int acpi_dev_resume(struct device *dev); +int acpi_subsys_runtime_suspend(struct device *dev); +int acpi_subsys_runtime_resume(struct device *dev); +int acpi_dev_pm_attach(struct device *dev, bool power_on); +#else +static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } +static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } +static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } +static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } +static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) +{ + return 0; +} +#endif + +#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) +int acpi_subsys_prepare(struct device *dev); +void acpi_subsys_complete(struct device *dev); +int acpi_subsys_suspend_late(struct device *dev); +int acpi_subsys_suspend_noirq(struct device *dev); +int acpi_subsys_suspend(struct device *dev); +int acpi_subsys_freeze(struct device *dev); +int acpi_subsys_poweroff(struct device *dev); +void acpi_ec_mark_gpe_for_wake(void); +void acpi_ec_set_gpe_wake_mask(u8 action); +#else +static inline int acpi_subsys_prepare(struct device *dev) { return 0; } +static inline void acpi_subsys_complete(struct device *dev) {} +static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } +static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; } +static inline int acpi_subsys_suspend(struct device *dev) { return 0; } +static inline int acpi_subsys_freeze(struct device *dev) { return 0; } +static inline int acpi_subsys_poweroff(struct device *dev) { return 0; } +static inline void acpi_ec_mark_gpe_for_wake(void) {} +static inline void acpi_ec_set_gpe_wake_mask(u8 action) {} +#endif + +#ifdef CONFIG_ACPI +__printf(3, 4) +void acpi_handle_printk(const char *level, acpi_handle handle, + const char *fmt, ...); +#else /* !CONFIG_ACPI */ +static inline __printf(3, 4) void +acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} +#endif /* !CONFIG_ACPI */ + +#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG) +__printf(3, 4) +void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...); +#endif + +/* + * acpi_handle_: Print message with ACPI prefix and object path + * + * These interfaces acquire the global namespace mutex to obtain an object + * path. In interrupt context, it shows the object path as . + */ +#define acpi_handle_emerg(handle, fmt, ...) \ + acpi_handle_printk(KERN_EMERG, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_alert(handle, fmt, ...) \ + acpi_handle_printk(KERN_ALERT, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_crit(handle, fmt, ...) \ + acpi_handle_printk(KERN_CRIT, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_err(handle, fmt, ...) \ + acpi_handle_printk(KERN_ERR, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_warn(handle, fmt, ...) \ + acpi_handle_printk(KERN_WARNING, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_notice(handle, fmt, ...) \ + acpi_handle_printk(KERN_NOTICE, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_info(handle, fmt, ...) \ + acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__) + +#if defined(DEBUG) +#define acpi_handle_debug(handle, fmt, ...) \ + acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__) +#else +#if defined(CONFIG_DYNAMIC_DEBUG) +#define acpi_handle_debug(handle, fmt, ...) \ + _dynamic_func_call(fmt, __acpi_handle_debug, \ + handle, pr_fmt(fmt), ##__VA_ARGS__) +#else +#define acpi_handle_debug(handle, fmt, ...) \ +({ \ + if (0) \ + acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); \ + 0; \ +}) +#endif +#endif + +#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) +bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, + struct acpi_resource_gpio **agpio); +int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index); +#else +static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, + struct acpi_resource_gpio **agpio) +{ + return false; +} +static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) +{ + return -ENXIO; +} +#endif + +/* Device properties */ + +#ifdef CONFIG_ACPI +int acpi_dev_get_property(const struct acpi_device *adev, const char *name, + acpi_object_type type, const union acpi_object **obj); +int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, + const char *name, size_t index, size_t num_args, + struct fwnode_reference_args *args); + +static inline int acpi_node_get_property_reference( + const struct fwnode_handle *fwnode, + const char *name, size_t index, + struct fwnode_reference_args *args) +{ + return __acpi_node_get_property_reference(fwnode, name, index, + NR_FWNODE_REFERENCE_ARGS, args); +} + +static inline bool acpi_dev_has_props(const struct acpi_device *adev) +{ + return !list_empty(&adev->data.properties); +} + +struct acpi_device_properties * +acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid, + const union acpi_object *properties); + +int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, + void **valptr); +int acpi_dev_prop_read_single(struct acpi_device *adev, + const char *propname, enum dev_prop_type proptype, + void *val); +int acpi_node_prop_read(const struct fwnode_handle *fwnode, + const char *propname, enum dev_prop_type proptype, + void *val, size_t nval); +int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname, + enum dev_prop_type proptype, void *val, size_t nval); + +struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, + struct fwnode_handle *child); +struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode); + +struct acpi_probe_entry; +typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, + struct acpi_probe_entry *); + +#define ACPI_TABLE_ID_LEN 5 + +/** + * struct acpi_probe_entry - boot-time probing entry + * @id: ACPI table name + * @type: Optional subtable type to match + * (if @id contains subtables) + * @subtable_valid: Optional callback to check the validity of + * the subtable + * @probe_table: Callback to the driver being probed when table + * match is successful + * @probe_subtbl: Callback to the driver being probed when table and + * subtable match (and optional callback is successful) + * @driver_data: Sideband data provided back to the driver + */ +struct acpi_probe_entry { + __u8 id[ACPI_TABLE_ID_LEN]; + __u8 type; + acpi_probe_entry_validate_subtbl subtable_valid; + union { + acpi_tbl_table_handler probe_table; + acpi_tbl_entry_handler probe_subtbl; + }; + kernel_ulong_t driver_data; +}; + +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ + static const struct acpi_probe_entry __acpi_probe_##name \ + __used __section(__##table##_acpi_probe_table) \ + = { \ + .id = table_id, \ + .type = subtable, \ + .subtable_valid = valid, \ + .probe_table = (acpi_tbl_table_handler)fn, \ + .driver_data = data, \ + } + +#define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table +#define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end + +int __acpi_probe_device_table(struct acpi_probe_entry *start, int nr); + +#define acpi_probe_device_table(t) \ + ({ \ + extern struct acpi_probe_entry ACPI_PROBE_TABLE(t), \ + ACPI_PROBE_TABLE_END(t); \ + __acpi_probe_device_table(&ACPI_PROBE_TABLE(t), \ + (&ACPI_PROBE_TABLE_END(t) - \ + &ACPI_PROBE_TABLE(t))); \ + }) +#else +static inline int acpi_dev_get_property(struct acpi_device *adev, + const char *name, acpi_object_type type, + const union acpi_object **obj) +{ + return -ENXIO; +} + +static inline int +__acpi_node_get_property_reference(const struct fwnode_handle *fwnode, + const char *name, size_t index, size_t num_args, + struct fwnode_reference_args *args) +{ + return -ENXIO; +} + +static inline int +acpi_node_get_property_reference(const struct fwnode_handle *fwnode, + const char *name, size_t index, + struct fwnode_reference_args *args) +{ + return -ENXIO; +} + +static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode, + const char *propname, + void **valptr) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_get(const struct acpi_device *adev, + const char *propname, + void **valptr) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_read_single(const struct acpi_device *adev, + const char *propname, + enum dev_prop_type proptype, + void *val) +{ + return -ENXIO; +} + +static inline int acpi_node_prop_read(const struct fwnode_handle *fwnode, + const char *propname, + enum dev_prop_type proptype, + void *val, size_t nval) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_read(const struct acpi_device *adev, + const char *propname, + enum dev_prop_type proptype, + void *val, size_t nval) +{ + return -ENXIO; +} + +static inline struct fwnode_handle * +acpi_get_next_subnode(const struct fwnode_handle *fwnode, + struct fwnode_handle *child) +{ + return NULL; +} + +static inline struct fwnode_handle * +acpi_node_get_parent(const struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline struct fwnode_handle * +acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode, + struct fwnode_handle *prev) +{ + return ERR_PTR(-ENXIO); +} + +static inline int +acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode, + struct fwnode_handle **remote, + struct fwnode_handle **port, + struct fwnode_handle **endpoint) +{ + return -ENXIO; +} + +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ + static const void * __acpi_table_##name[] \ + __attribute__((unused)) \ + = { (void *) table_id, \ + (void *) subtable, \ + (void *) valid, \ + (void *) fn, \ + (void *) data } + +#define acpi_probe_device_table(t) ({ int __r = 0; __r;}) +#endif + +#ifdef CONFIG_ACPI_TABLE_UPGRADE +void acpi_table_upgrade(void); +#else +static inline void acpi_table_upgrade(void) { } +#endif + +#if defined(CONFIG_ACPI) && defined(CONFIG_ACPI_WATCHDOG) +extern bool acpi_has_watchdog(void); +#else +static inline bool acpi_has_watchdog(void) { return false; } +#endif + +#ifdef CONFIG_ACPI_SPCR_TABLE +extern bool qdf2400_e44_present; +int acpi_parse_spcr(bool enable_earlycon, bool enable_console); +#else +static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console) +{ + return 0; +} +#endif + +#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI) +int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res); +#else +static inline +int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res) +{ + return -EINVAL; +} +#endif + +#ifdef CONFIG_ACPI_LPIT +int lpit_read_residency_count_address(u64 *address); +#else +static inline int lpit_read_residency_count_address(u64 *address) +{ + return -EINVAL; +} +#endif + +#ifdef CONFIG_ACPI_PPTT +int acpi_pptt_cpu_is_thread(unsigned int cpu); +int find_acpi_cpu_topology(unsigned int cpu, int level); +int find_acpi_cpu_topology_package(unsigned int cpu); +int find_acpi_cpu_topology_hetero_id(unsigned int cpu); +int find_acpi_cpu_cache_topology(unsigned int cpu, int level); +#else +static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) +{ + return -EINVAL; +} +static inline int find_acpi_cpu_topology(unsigned int cpu, int level) +{ + return -EINVAL; +} +static inline int find_acpi_cpu_topology_package(unsigned int cpu) +{ + return -EINVAL; +} +static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) +{ + return -EINVAL; +} +static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level) +{ + return -EINVAL; +} +#endif + +#ifdef CONFIG_ACPI +extern int acpi_platform_notify(struct device *dev, enum kobject_action action); +#else +static inline int +acpi_platform_notify(struct device *dev, enum kobject_action action) +{ + return 0; +} +#endif + +#endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h new file mode 100644 index 0000000..72cedb9 --- /dev/null +++ b/include/linux/acpi_dma.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * ACPI helpers for DMA request / controller + * + * Based on of_dma.h + * + * Copyright (C) 2013, Intel Corporation + * Author: Andy Shevchenko + */ + +#ifndef __LINUX_ACPI_DMA_H +#define __LINUX_ACPI_DMA_H + +#include +#include +#include +#include + +/** + * struct acpi_dma_spec - slave device DMA resources + * @chan_id: channel unique id + * @slave_id: request line unique id + * @dev: struct device of the DMA controller to be used in the filter + * function + */ +struct acpi_dma_spec { + int chan_id; + int slave_id; + struct device *dev; +}; + +/** + * struct acpi_dma - representation of the registered DMAC + * @dma_controllers: linked list node + * @dev: struct device of this controller + * @acpi_dma_xlate: callback function to find a suitable channel + * @data: private data used by a callback function + * @base_request_line: first supported request line (CSRT) + * @end_request_line: last supported request line (CSRT) + */ +struct acpi_dma { + struct list_head dma_controllers; + struct device *dev; + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *); + void *data; + unsigned short base_request_line; + unsigned short end_request_line; +}; + +/* Used with acpi_dma_simple_xlate() */ +struct acpi_dma_filter_info { + dma_cap_mask_t dma_cap; + dma_filter_fn filter_fn; +}; + +#ifdef CONFIG_DMA_ACPI + +int acpi_dma_controller_register(struct device *dev, + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *), + void *data); +int acpi_dma_controller_free(struct device *dev); +int devm_acpi_dma_controller_register(struct device *dev, + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *), + void *data); +void devm_acpi_dma_controller_free(struct device *dev); + +struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, + size_t index); +struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, + const char *name); + +struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, + struct acpi_dma *adma); +#else + +static inline int acpi_dma_controller_register(struct device *dev, + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *), + void *data) +{ + return -ENODEV; +} +static inline int acpi_dma_controller_free(struct device *dev) +{ + return -ENODEV; +} +static inline int devm_acpi_dma_controller_register(struct device *dev, + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *), + void *data) +{ + return -ENODEV; +} +static inline void devm_acpi_dma_controller_free(struct device *dev) +{ +} + +static inline struct dma_chan *acpi_dma_request_slave_chan_by_index( + struct device *dev, size_t index) +{ + return ERR_PTR(-ENODEV); +} +static inline struct dma_chan *acpi_dma_request_slave_chan_by_name( + struct device *dev, const char *name) +{ + return ERR_PTR(-ENODEV); +} + +#define acpi_dma_simple_xlate NULL + +#endif + +#define acpi_dma_request_slave_channel acpi_dma_request_slave_chan_by_index + +#endif /* __LINUX_ACPI_DMA_H */ diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h new file mode 100644 index 0000000..8e7e2ec --- /dev/null +++ b/include/linux/acpi_iort.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016, Semihalf + * Author: Tomasz Nowicki + */ + +#ifndef __ACPI_IORT_H__ +#define __ACPI_IORT_H__ + +#include +#include +#include + +#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL) +#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL) + +/* + * PMCG model identifiers for use in smmu pmu driver. Please note + * that this is purely for the use of software and has nothing to + * do with hardware or with IORT specification. + */ +#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */ +#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */ + +int iort_register_domain_token(int trans_id, phys_addr_t base, + struct fwnode_handle *fw_node); +void iort_deregister_domain_token(int trans_id); +struct fwnode_handle *iort_find_domain_token(int trans_id); +#ifdef CONFIG_ACPI_IORT +void acpi_iort_init(void); +u32 iort_msi_map_rid(struct device *dev, u32 req_id); +struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); +void acpi_configure_pmsi_domain(struct device *dev); +int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); +/* IOMMU interface */ +void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size); +const struct iommu_ops *iort_iommu_configure(struct device *dev); +int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); +#else +static inline void acpi_iort_init(void) { } +static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) +{ return req_id; } +static inline struct irq_domain *iort_get_device_domain(struct device *dev, + u32 req_id) +{ return NULL; } +static inline void acpi_configure_pmsi_domain(struct device *dev) { } +/* IOMMU interface */ +static inline void iort_dma_setup(struct device *dev, u64 *dma_addr, + u64 *size) { } +static inline const struct iommu_ops *iort_iommu_configure( + struct device *dev) +{ return NULL; } +static inline +int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) +{ return 0; } +#endif + +#endif /* __ACPI_IORT_H__ */ diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h new file mode 100644 index 0000000..50d88bf --- /dev/null +++ b/include/linux/acpi_pmtmr.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ACPI_PMTMR_H_ +#define _ACPI_PMTMR_H_ + +#include + +/* Number of PMTMR ticks expected during calibration run */ +#define PMTMR_TICKS_PER_SEC 3579545 + +/* limit it to 24 bits */ +#define ACPI_PM_MASK CLOCKSOURCE_MASK(24) + +/* Overrun value */ +#define ACPI_PM_OVRRUN (1<<24) + +#ifdef CONFIG_X86_PM_TIMER + +extern u32 acpi_pm_read_verified(void); +extern u32 pmtmr_ioport; + +static inline u32 acpi_pm_read_early(void) +{ + if (!pmtmr_ioport) + return 0; + /* mask the output to 24 bits */ + return acpi_pm_read_verified() & ACPI_PM_MASK; +} + +#else + +static inline u32 acpi_pm_read_early(void) +{ + return 0; +} + +#endif + +#endif + diff --git a/include/linux/adb.h b/include/linux/adb.h new file mode 100644 index 0000000..f6306fc --- /dev/null +++ b/include/linux/adb.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for ADB (Apple Desktop Bus) support. + */ +#ifndef __ADB_H +#define __ADB_H + +#include + + +struct adb_request { + unsigned char data[32]; + int nbytes; + unsigned char reply[32]; + int reply_len; + unsigned char reply_expected; + unsigned char sent; + unsigned char complete; + void (*done)(struct adb_request *); + void *arg; + struct adb_request *next; +}; + +struct adb_ids { + int nids; + unsigned char id[16]; +}; + +/* Structure which encapsulates a low-level ADB driver */ + +struct adb_driver { + char name[16]; + int (*probe)(void); + int (*init)(void); + int (*send_request)(struct adb_request *req, int sync); + int (*autopoll)(int devs); + void (*poll)(void); + int (*reset_bus)(void); +}; + +/* Values for adb_request flags */ +#define ADBREQ_REPLY 1 /* expect reply */ +#define ADBREQ_SYNC 2 /* poll until done */ +#define ADBREQ_NOSEND 4 /* build the request, but don't send it */ + +/* Messages sent thru the client_list notifier. You should NOT stop + the operation, at least not with this version */ +enum adb_message { + ADB_MSG_POWERDOWN, /* Currently called before sleep only */ + ADB_MSG_PRE_RESET, /* Called before resetting the bus */ + ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */ +}; +extern struct blocking_notifier_head adb_client_list; + +int adb_request(struct adb_request *req, void (*done)(struct adb_request *), + int flags, int nbytes, ...); +int adb_register(int default_id,int handler_id,struct adb_ids *ids, + void (*handler)(unsigned char *, int, int)); +int adb_unregister(int index); +void adb_poll(void); +void adb_input(unsigned char *, int, int); +int adb_reset_bus(void); + +int adb_try_handler_change(int address, int new_id); +int adb_get_infos(int address, int *original_address, int *handler_id); + +#endif /* __ADB_H */ diff --git a/include/linux/adfs_fs.h b/include/linux/adfs_fs.h new file mode 100644 index 0000000..4836e38 --- /dev/null +++ b/include/linux/adfs_fs.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ADFS_FS_H +#define _ADFS_FS_H + +#include + +/* + * Calculate the boot block checksum on an ADFS drive. Note that this will + * appear to be correct if the sector contains all zeros, so also check that + * the disk size is non-zero!!! + */ +static inline int adfs_checkbblk(unsigned char *ptr) +{ + unsigned int result = 0; + unsigned char *p = ptr + 511; + + do { + result = (result & 0xff) + (result >> 8); + result = result + *--p; + } while (p != ptr); + + return (result & 0xff) != ptr[511]; +} +#endif diff --git a/include/linux/adxl.h b/include/linux/adxl.h new file mode 100644 index 0000000..2a629ac --- /dev/null +++ b/include/linux/adxl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Address translation interface via ACPI DSM. + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef _LINUX_ADXL_H +#define _LINUX_ADXL_H + +const char * const *adxl_get_component_names(void); +int adxl_decode(u64 addr, u64 component_values[]); + +#endif /* _LINUX_ADXL_H */ diff --git a/include/linux/aer.h b/include/linux/aer.h new file mode 100644 index 0000000..514bffa --- /dev/null +++ b/include/linux/aer.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2006 Intel Corp. + * Tom Long Nguyen (tom.l.nguyen@intel.com) + * Zhang Yanmin (yanmin.zhang@intel.com) + */ + +#ifndef _AER_H_ +#define _AER_H_ + +#include +#include + +#define AER_NONFATAL 0 +#define AER_FATAL 1 +#define AER_CORRECTABLE 2 +#define DPC_FATAL 3 + +struct pci_dev; + +struct aer_header_log_regs { + unsigned int dw0; + unsigned int dw1; + unsigned int dw2; + unsigned int dw3; +}; + +struct aer_capability_regs { + u32 header; + u32 uncor_status; + u32 uncor_mask; + u32 uncor_severity; + u32 cor_status; + u32 cor_mask; + u32 cap_control; + struct aer_header_log_regs header_log; + u32 root_command; + u32 root_status; + u16 cor_err_source; + u16 uncor_err_source; +}; + +#if defined(CONFIG_PCIEAER) +/* PCIe port driver needs this function to enable AER */ +int pci_enable_pcie_error_reporting(struct pci_dev *dev); +int pci_disable_pcie_error_reporting(struct pci_dev *dev); +int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); +int pci_cleanup_aer_error_status_regs(struct pci_dev *dev); +#else +static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) +{ + return -EINVAL; +} +static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev) +{ + return -EINVAL; +} +static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) +{ + return -EINVAL; +} +static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) +{ + return -EINVAL; +} +#endif + +void cper_print_aer(struct pci_dev *dev, int aer_severity, + struct aer_capability_regs *aer); +int cper_severity_to_aer(int cper_severity); +void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, + int severity, struct aer_capability_regs *aer_regs); +#endif //_AER_H_ + diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h new file mode 100644 index 0000000..eaf6cd7 --- /dev/null +++ b/include/linux/agp_backend.h @@ -0,0 +1,109 @@ +/* + * AGPGART backend specific includes. Not for userspace consumption. + * + * Copyright (C) 2004 Silicon Graphics, Inc. + * Copyright (C) 2002-2003 Dave Jones + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _AGP_BACKEND_H +#define _AGP_BACKEND_H 1 + +#include + +enum chipset_type { + NOT_SUPPORTED, + SUPPORTED, +}; + +struct agp_version { + u16 major; + u16 minor; +}; + +struct agp_kern_info { + struct agp_version version; + struct pci_dev *device; + enum chipset_type chipset; + unsigned long mode; + unsigned long aper_base; + size_t aper_size; + int max_memory; /* In pages */ + int current_memory; + bool cant_use_aperture; + unsigned long page_mask; + const struct vm_operations_struct *vm_ops; +}; + +/* + * The agp_memory structure has information about the block of agp memory + * allocated. A caller may manipulate the next and prev pointers to link + * each allocated item into a list. These pointers are ignored by the backend. + * Everything else should never be written to, but the caller may read any of + * the items to determine the status of this block of agp memory. + */ + +struct agp_bridge_data; + +struct agp_memory { + struct agp_memory *next; + struct agp_memory *prev; + struct agp_bridge_data *bridge; + struct page **pages; + size_t page_count; + int key; + int num_scratch_pages; + off_t pg_start; + u32 type; + u32 physical; + bool is_bound; + bool is_flushed; + /* list of agp_memory mapped to the aperture */ + struct list_head mapped_list; + /* DMA-mapped addresses */ + struct scatterlist *sg_list; + int num_sg; +}; + +#define AGP_NORMAL_MEMORY 0 + +#define AGP_USER_TYPES (1 << 16) +#define AGP_USER_MEMORY (AGP_USER_TYPES) +#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) + +extern struct agp_bridge_data *agp_bridge; +extern struct list_head agp_bridges; + +extern struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *); + +extern void agp_free_memory(struct agp_memory *); +extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, u32); +extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); +extern int agp_bind_memory(struct agp_memory *, off_t); +extern int agp_unbind_memory(struct agp_memory *); +extern void agp_enable(struct agp_bridge_data *, u32); +extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); +extern void agp_backend_release(struct agp_bridge_data *); + +#endif /* _AGP_BACKEND_H */ diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h new file mode 100644 index 0000000..c6b61ca --- /dev/null +++ b/include/linux/agpgart.h @@ -0,0 +1,130 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _AGP_H +#define _AGP_H 1 + +#include +#include +#include + +#define AGPGART_MINOR 175 + +struct agp_info { + struct agp_version version; /* version of the driver */ + u32 bridge_id; /* bridge vendor/device */ + u32 agp_mode; /* mode info of bridge */ + unsigned long aper_base;/* base of aperture */ + size_t aper_size; /* size of aperture */ + size_t pg_total; /* max pages (swap + system) */ + size_t pg_system; /* max pages (system) */ + size_t pg_used; /* current pages used */ +}; + +struct agp_setup { + u32 agp_mode; /* mode info of bridge */ +}; + +/* + * The "prot" down below needs still a "sleep" flag somehow ... + */ +struct agp_segment { + off_t pg_start; /* starting page to populate */ + size_t pg_count; /* number of pages */ + int prot; /* prot flags for mmap */ +}; + +struct agp_segment_priv { + off_t pg_start; + size_t pg_count; + pgprot_t prot; +}; + +struct agp_region { + pid_t pid; /* pid of process */ + size_t seg_count; /* number of segments */ + struct agp_segment *seg_list; +}; + +struct agp_allocate { + int key; /* tag of allocation */ + size_t pg_count; /* number of pages */ + u32 type; /* 0 == normal, other devspec */ + u32 physical; /* device specific (some devices + * need a phys address of the + * actual page behind the gatt + * table) */ +}; + +struct agp_bind { + int key; /* tag of allocation */ + off_t pg_start; /* starting page to populate */ +}; + +struct agp_unbind { + int key; /* tag of allocation */ + u32 priority; /* priority for paging out */ +}; + +struct agp_client { + struct agp_client *next; + struct agp_client *prev; + pid_t pid; + int num_segments; + struct agp_segment_priv **segments; +}; + +struct agp_controller { + struct agp_controller *next; + struct agp_controller *prev; + pid_t pid; + int num_clients; + struct agp_memory *pool; + struct agp_client *clients; +}; + +#define AGP_FF_ALLOW_CLIENT 0 +#define AGP_FF_ALLOW_CONTROLLER 1 +#define AGP_FF_IS_CLIENT 2 +#define AGP_FF_IS_CONTROLLER 3 +#define AGP_FF_IS_VALID 4 + +struct agp_file_private { + struct agp_file_private *next; + struct agp_file_private *prev; + pid_t my_pid; + unsigned long access_flags; /* long req'd for set_bit --RR */ +}; + +struct agp_front_data { + struct mutex agp_mutex; + struct agp_controller *current_controller; + struct agp_controller *controllers; + struct agp_file_private *file_priv_list; + bool used_by_controller; + bool backend_acquired; +}; + +#endif /* _AGP_H */ diff --git a/include/linux/ahci-remap.h b/include/linux/ahci-remap.h new file mode 100644 index 0000000..230c871 --- /dev/null +++ b/include/linux/ahci-remap.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_AHCI_REMAP_H +#define _LINUX_AHCI_REMAP_H + +#include + +#define AHCI_VSCAP 0xa4 +#define AHCI_REMAP_CAP 0x800 + +/* device class code */ +#define AHCI_REMAP_N_DCC 0x880 + +/* remap-device base relative to ahci-bar */ +#define AHCI_REMAP_N_OFFSET SZ_16K +#define AHCI_REMAP_N_SIZE SZ_16K + +#define AHCI_MAX_REMAP 3 + +static inline unsigned int ahci_remap_dcc(int i) +{ + return AHCI_REMAP_N_DCC + i * 0x80; +} + +static inline unsigned int ahci_remap_base(int i) +{ + return AHCI_REMAP_N_OFFSET + i * AHCI_REMAP_N_SIZE; +} + +#endif /* _LINUX_AHCI_REMAP_H */ diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h new file mode 100644 index 0000000..49e5383 --- /dev/null +++ b/include/linux/ahci_platform.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * AHCI SATA platform driver + * + * Copyright 2004-2005 Red Hat, Inc. + * Jeff Garzik + * Copyright 2010 MontaVista Software, LLC. + * Anton Vorontsov + */ + +#ifndef _AHCI_PLATFORM_H +#define _AHCI_PLATFORM_H + +#include + +struct device; +struct ata_port_info; +struct ahci_host_priv; +struct platform_device; +struct scsi_host_template; + +int ahci_platform_enable_phys(struct ahci_host_priv *hpriv); +void ahci_platform_disable_phys(struct ahci_host_priv *hpriv); +int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); +void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); +int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv); +void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv); +int ahci_platform_enable_resources(struct ahci_host_priv *hpriv); +void ahci_platform_disable_resources(struct ahci_host_priv *hpriv); +struct ahci_host_priv *ahci_platform_get_resources( + struct platform_device *pdev, unsigned int flags); +int ahci_platform_init_host(struct platform_device *pdev, + struct ahci_host_priv *hpriv, + const struct ata_port_info *pi_template, + struct scsi_host_template *sht); + +void ahci_platform_shutdown(struct platform_device *pdev); + +int ahci_platform_suspend_host(struct device *dev); +int ahci_platform_resume_host(struct device *dev); +int ahci_platform_suspend(struct device *dev); +int ahci_platform_resume(struct device *dev); + +#define AHCI_PLATFORM_GET_RESETS 0x01 + +#endif /* _AHCI_PLATFORM_H */ diff --git a/include/linux/aio.h b/include/linux/aio.h new file mode 100644 index 0000000..b83e68d --- /dev/null +++ b/include/linux/aio.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX__AIO_H +#define __LINUX__AIO_H + +#include + +struct kioctx; +struct kiocb; +struct mm_struct; + +typedef int (kiocb_cancel_fn)(struct kiocb *); + +/* prototypes */ +#ifdef CONFIG_AIO +extern void exit_aio(struct mm_struct *mm); +void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel); +#else +static inline void exit_aio(struct mm_struct *mm) { } +static inline void kiocb_set_cancel_fn(struct kiocb *req, + kiocb_cancel_fn *cancel) { } +#endif /* CONFIG_AIO */ + +/* for sysctl: */ +extern unsigned long aio_nr; +extern unsigned long aio_max_nr; + +#endif /* __LINUX__AIO_H */ diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h new file mode 100644 index 0000000..74748e3 --- /dev/null +++ b/include/linux/alarmtimer.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ALARMTIMER_H +#define _LINUX_ALARMTIMER_H + +#include +#include +#include + +struct rtc_device; + +enum alarmtimer_type { + ALARM_REALTIME, + ALARM_BOOTTIME, + + /* Supported types end here */ + ALARM_NUMTYPE, + + /* Used for tracing information. No usable types. */ + ALARM_REALTIME_FREEZER, + ALARM_BOOTTIME_FREEZER, +}; + +enum alarmtimer_restart { + ALARMTIMER_NORESTART, + ALARMTIMER_RESTART, +}; + + +#define ALARMTIMER_STATE_INACTIVE 0x00 +#define ALARMTIMER_STATE_ENQUEUED 0x01 + +/** + * struct alarm - Alarm timer structure + * @node: timerqueue node for adding to the event list this value + * also includes the expiration time. + * @timer: hrtimer used to schedule events while running + * @function: Function pointer to be executed when the timer fires. + * @type: Alarm type (BOOTTIME/REALTIME). + * @state: Flag that represents if the alarm is set to fire or not. + * @data: Internal data value. + */ +struct alarm { + struct timerqueue_node node; + struct hrtimer timer; + enum alarmtimer_restart (*function)(struct alarm *, ktime_t now); + enum alarmtimer_type type; + int state; + void *data; +}; + +void alarm_init(struct alarm *alarm, enum alarmtimer_type type, + enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); +void alarm_start(struct alarm *alarm, ktime_t start); +void alarm_start_relative(struct alarm *alarm, ktime_t start); +void alarm_restart(struct alarm *alarm); +int alarm_try_to_cancel(struct alarm *alarm); +int alarm_cancel(struct alarm *alarm); + +u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval); +u64 alarm_forward_now(struct alarm *alarm, ktime_t interval); +ktime_t alarm_expires_remaining(const struct alarm *alarm); + +/* Provide way to access the rtc device being used by alarmtimers */ +struct rtc_device *alarmtimer_get_rtcdev(void); + +#endif diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h new file mode 100644 index 0000000..4416df5 --- /dev/null +++ b/include/linux/alcor_pci.h @@ -0,0 +1,286 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2018 Oleksij Rempel + * + * Driver for Alcor Micro AU6601 and AU6621 controllers + */ + +#ifndef __ALCOR_PCI_H +#define __ALCOR_PCI_H + +#define ALCOR_SD_CARD 0 +#define ALCOR_MS_CARD 1 + +#define DRV_NAME_ALCOR_PCI_SDMMC "alcor_sdmmc" +#define DRV_NAME_ALCOR_PCI_MS "alcor_ms" + +#define PCI_ID_ALCOR_MICRO 0x1AEA +#define PCI_ID_AU6601 0x6601 +#define PCI_ID_AU6621 0x6621 + +#define MHZ_TO_HZ(freq) ((freq) * 1000 * 1000) + +#define AU6601_BASE_CLOCK 31000000 +#define AU6601_MIN_CLOCK 150000 +#define AU6601_MAX_CLOCK 208000000 +#define AU6601_MAX_DMA_SEGMENTS 64 +#define AU6601_MAX_PIO_SEGMENTS 1 +#define AU6601_MAX_DMA_BLOCK_SIZE 0x1000 +#define AU6601_MAX_PIO_BLOCK_SIZE 0x200 +#define AU6601_MAX_DMA_BLOCKS 1 +#define AU6601_DMA_LOCAL_SEGMENTS 1 + +/* registers spotter by reverse engineering but still + * with unknown functionality: + * 0x10 - ADMA phy address. AU6621 only? + * 0x51 - LED ctrl? + * 0x52 - unknown + * 0x61 - LED related? Always toggled BIT0 + * 0x63 - Same as 0x61? + * 0x77 - unknown + */ + +/* SDMA phy address. Higher then 0x0800.0000? + * The au6601 and au6621 have different DMA engines with different issues. One + * For example au6621 engine is triggered by addr change. No other interaction + * is needed. This means, if we get two buffers with same address, then engine + * will stall. + */ +#define AU6601_REG_SDMA_ADDR 0x00 +#define AU6601_SDMA_MASK 0xffffffff + +#define AU6601_DMA_BOUNDARY 0x05 +#define AU6621_DMA_PAGE_CNT 0x05 +/* PIO */ +#define AU6601_REG_BUFFER 0x08 +/* ADMA ctrl? AU6621 only. */ +#define AU6621_DMA_CTRL 0x0c +#define AU6621_DMA_ENABLE BIT(0) +/* CMD index */ +#define AU6601_REG_CMD_OPCODE 0x23 +/* CMD parametr */ +#define AU6601_REG_CMD_ARG 0x24 +/* CMD response 4x4 Bytes */ +#define AU6601_REG_CMD_RSP0 0x30 +#define AU6601_REG_CMD_RSP1 0x34 +#define AU6601_REG_CMD_RSP2 0x38 +#define AU6601_REG_CMD_RSP3 0x3C +/* default timeout set to 125: 125 * 40ms = 5 sec + * how exactly it is calculated? + */ +#define AU6601_TIME_OUT_CTRL 0x69 +/* Block size for SDMA or PIO */ +#define AU6601_REG_BLOCK_SIZE 0x6c +/* Some power related reg, used together with AU6601_OUTPUT_ENABLE */ +#define AU6601_POWER_CONTROL 0x70 + +/* PLL ctrl */ +#define AU6601_CLK_SELECT 0x72 +#define AU6601_CLK_OVER_CLK 0x80 +#define AU6601_CLK_384_MHZ 0x30 +#define AU6601_CLK_125_MHZ 0x20 +#define AU6601_CLK_48_MHZ 0x10 +#define AU6601_CLK_EXT_PLL 0x04 +#define AU6601_CLK_X2_MODE 0x02 +#define AU6601_CLK_ENABLE 0x01 +#define AU6601_CLK_31_25_MHZ 0x00 + +#define AU6601_CLK_DIVIDER 0x73 + +#define AU6601_INTERFACE_MODE_CTRL 0x74 +#define AU6601_DLINK_MODE 0x80 +#define AU6601_INTERRUPT_DELAY_TIME 0x40 +#define AU6601_SIGNAL_REQ_CTRL 0x30 +#define AU6601_MS_CARD_WP BIT(3) +#define AU6601_SD_CARD_WP BIT(0) + +/* same register values are used for: + * - AU6601_OUTPUT_ENABLE + * - AU6601_POWER_CONTROL + */ +#define AU6601_ACTIVE_CTRL 0x75 +#define AU6601_XD_CARD BIT(4) +/* AU6601_MS_CARD_ACTIVE - will cativate MS card section? */ +#define AU6601_MS_CARD BIT(3) +#define AU6601_SD_CARD BIT(0) + +/* card slot state. It should automatically detect type of + * the card + */ +#define AU6601_DETECT_STATUS 0x76 +#define AU6601_DETECT_EN BIT(7) +#define AU6601_MS_DETECTED BIT(3) +#define AU6601_SD_DETECTED BIT(0) +#define AU6601_DETECT_STATUS_M 0xf + +#define AU6601_REG_SW_RESET 0x79 +#define AU6601_BUF_CTRL_RESET BIT(7) +#define AU6601_RESET_DATA BIT(3) +#define AU6601_RESET_CMD BIT(0) + +#define AU6601_OUTPUT_ENABLE 0x7a + +#define AU6601_PAD_DRIVE0 0x7b +#define AU6601_PAD_DRIVE1 0x7c +#define AU6601_PAD_DRIVE2 0x7d +/* read EEPROM? */ +#define AU6601_FUNCTION 0x7f + +#define AU6601_CMD_XFER_CTRL 0x81 +#define AU6601_CMD_17_BYTE_CRC 0xc0 +#define AU6601_CMD_6_BYTE_WO_CRC 0x80 +#define AU6601_CMD_6_BYTE_CRC 0x40 +#define AU6601_CMD_START_XFER 0x20 +#define AU6601_CMD_STOP_WAIT_RDY 0x10 +#define AU6601_CMD_NO_RESP 0x00 + +#define AU6601_REG_BUS_CTRL 0x82 +#define AU6601_BUS_WIDTH_4BIT 0x20 +#define AU6601_BUS_WIDTH_8BIT 0x10 +#define AU6601_BUS_WIDTH_1BIT 0x00 + +#define AU6601_DATA_XFER_CTRL 0x83 +#define AU6601_DATA_WRITE BIT(7) +#define AU6601_DATA_DMA_MODE BIT(6) +#define AU6601_DATA_START_XFER BIT(0) + +#define AU6601_DATA_PIN_STATE 0x84 +#define AU6601_BUS_STAT_CMD BIT(15) +/* BIT(4) - BIT(7) are permanently 1. + * May be reserved or not attached DAT4-DAT7 + */ +#define AU6601_BUS_STAT_DAT3 BIT(3) +#define AU6601_BUS_STAT_DAT2 BIT(2) +#define AU6601_BUS_STAT_DAT1 BIT(1) +#define AU6601_BUS_STAT_DAT0 BIT(0) +#define AU6601_BUS_STAT_DAT_MASK 0xf + +#define AU6601_OPT 0x85 +#define AU6601_OPT_CMD_LINE_LEVEL 0x80 +#define AU6601_OPT_NCRC_16_CLK BIT(4) +#define AU6601_OPT_CMD_NWT BIT(3) +#define AU6601_OPT_STOP_CLK BIT(2) +#define AU6601_OPT_DDR_MODE BIT(1) +#define AU6601_OPT_SD_18V BIT(0) + +#define AU6601_CLK_DELAY 0x86 +#define AU6601_CLK_DATA_POSITIVE_EDGE 0x80 +#define AU6601_CLK_CMD_POSITIVE_EDGE 0x40 +#define AU6601_CLK_POSITIVE_EDGE_ALL (AU6601_CLK_CMD_POSITIVE_EDGE \ + | AU6601_CLK_DATA_POSITIVE_EDGE) + + +#define AU6601_REG_INT_STATUS 0x90 +#define AU6601_REG_INT_ENABLE 0x94 +#define AU6601_INT_DATA_END_BIT_ERR BIT(22) +#define AU6601_INT_DATA_CRC_ERR BIT(21) +#define AU6601_INT_DATA_TIMEOUT_ERR BIT(20) +#define AU6601_INT_CMD_INDEX_ERR BIT(19) +#define AU6601_INT_CMD_END_BIT_ERR BIT(18) +#define AU6601_INT_CMD_CRC_ERR BIT(17) +#define AU6601_INT_CMD_TIMEOUT_ERR BIT(16) +#define AU6601_INT_ERROR BIT(15) +#define AU6601_INT_OVER_CURRENT_ERR BIT(8) +#define AU6601_INT_CARD_INSERT BIT(7) +#define AU6601_INT_CARD_REMOVE BIT(6) +#define AU6601_INT_READ_BUF_RDY BIT(5) +#define AU6601_INT_WRITE_BUF_RDY BIT(4) +#define AU6601_INT_DMA_END BIT(3) +#define AU6601_INT_DATA_END BIT(1) +#define AU6601_INT_CMD_END BIT(0) + +#define AU6601_INT_NORMAL_MASK 0x00007FFF +#define AU6601_INT_ERROR_MASK 0xFFFF8000 + +#define AU6601_INT_CMD_MASK (AU6601_INT_CMD_END | \ + AU6601_INT_CMD_TIMEOUT_ERR | AU6601_INT_CMD_CRC_ERR | \ + AU6601_INT_CMD_END_BIT_ERR | AU6601_INT_CMD_INDEX_ERR) +#define AU6601_INT_DATA_MASK (AU6601_INT_DATA_END | AU6601_INT_DMA_END | \ + AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY | \ + AU6601_INT_DATA_TIMEOUT_ERR | AU6601_INT_DATA_CRC_ERR | \ + AU6601_INT_DATA_END_BIT_ERR) +#define AU6601_INT_ALL_MASK ((u32)-1) + +/* MS_CARD mode registers */ + +#define AU6601_MS_STATUS 0xa0 + +#define AU6601_MS_BUS_MODE_CTRL 0xa1 +#define AU6601_MS_BUS_8BIT_MODE 0x03 +#define AU6601_MS_BUS_4BIT_MODE 0x01 +#define AU6601_MS_BUS_1BIT_MODE 0x00 + +#define AU6601_MS_TPC_CMD 0xa2 +#define AU6601_MS_TPC_READ_PAGE_DATA 0x02 +#define AU6601_MS_TPC_READ_REG 0x04 +#define AU6601_MS_TPC_GET_INT 0x07 +#define AU6601_MS_TPC_WRITE_PAGE_DATA 0x0D +#define AU6601_MS_TPC_WRITE_REG 0x0B +#define AU6601_MS_TPC_SET_RW_REG_ADRS 0x08 +#define AU6601_MS_TPC_SET_CMD 0x0E +#define AU6601_MS_TPC_EX_SET_CMD 0x09 +#define AU6601_MS_TPC_READ_SHORT_DATA 0x03 +#define AU6601_MS_TPC_WRITE_SHORT_DATA 0x0C + +#define AU6601_MS_TRANSFER_MODE 0xa3 +#define AU6601_MS_XFER_INT_TIMEOUT_CHK BIT(2) +#define AU6601_MS_XFER_DMA_ENABLE BIT(1) +#define AU6601_MS_XFER_START BIT(0) + +#define AU6601_MS_DATA_PIN_STATE 0xa4 + +#define AU6601_MS_INT_STATUS 0xb0 +#define AU6601_MS_INT_ENABLE 0xb4 +#define AU6601_MS_INT_OVER_CURRENT_ERROR BIT(23) +#define AU6601_MS_INT_DATA_CRC_ERROR BIT(21) +#define AU6601_MS_INT_INT_TIMEOUT BIT(20) +#define AU6601_MS_INT_INT_RESP_ERROR BIT(19) +#define AU6601_MS_INT_CED_ERROR BIT(18) +#define AU6601_MS_INT_TPC_TIMEOUT BIT(16) +#define AU6601_MS_INT_ERROR BIT(15) +#define AU6601_MS_INT_CARD_INSERT BIT(7) +#define AU6601_MS_INT_CARD_REMOVE BIT(6) +#define AU6601_MS_INT_BUF_READ_RDY BIT(5) +#define AU6601_MS_INT_BUF_WRITE_RDY BIT(4) +#define AU6601_MS_INT_DMA_END BIT(3) +#define AU6601_MS_INT_TPC_END BIT(1) + +#define AU6601_MS_INT_DATA_MASK 0x00000038 +#define AU6601_MS_INT_TPC_MASK 0x003d8002 +#define AU6601_MS_INT_TPC_ERROR 0x003d0000 + +#define ALCOR_PCIE_LINK_CTRL_OFFSET 0x10 +#define ALCOR_PCIE_LINK_CAP_OFFSET 0x0c +#define ALCOR_CAP_START_OFFSET 0x34 + +struct alcor_dev_cfg { + u8 dma; +}; + +struct alcor_pci_priv { + struct pci_dev *pdev; + struct pci_dev *parent_pdev; + struct device *dev; + void __iomem *iobase; + unsigned int irq; + + unsigned long id; /* idr id */ + + struct alcor_dev_cfg *cfg; + + /* PCI ASPM related vars */ + int pdev_cap_off; + u8 pdev_aspm_cap; + int parent_cap_off; + u8 parent_aspm_cap; + u8 ext_config_dev_aspm; +}; + +void alcor_write8(struct alcor_pci_priv *priv, u8 val, unsigned int addr); +void alcor_write16(struct alcor_pci_priv *priv, u16 val, unsigned int addr); +void alcor_write32(struct alcor_pci_priv *priv, u32 val, unsigned int addr); +void alcor_write32be(struct alcor_pci_priv *priv, u32 val, unsigned int addr); +u8 alcor_read8(struct alcor_pci_priv *priv, unsigned int addr); +u32 alcor_read32(struct alcor_pci_priv *priv, unsigned int addr); +u32 alcor_read32be(struct alcor_pci_priv *priv, unsigned int addr); +#endif diff --git a/include/linux/altera_jtaguart.h b/include/linux/altera_jtaguart.h new file mode 100644 index 0000000..527a142 --- /dev/null +++ b/include/linux/altera_jtaguart.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * altera_jtaguart.h -- Altera JTAG UART driver defines. + */ + +#ifndef __ALTJUART_H +#define __ALTJUART_H + +#define ALTERA_JTAGUART_MAJOR 204 +#define ALTERA_JTAGUART_MINOR 186 + +struct altera_jtaguart_platform_uart { + unsigned long mapbase; /* Physical address base */ + unsigned int irq; /* Interrupt vector */ +}; + +#endif /* __ALTJUART_H */ diff --git a/include/linux/altera_uart.h b/include/linux/altera_uart.h new file mode 100644 index 0000000..3eb73b8 --- /dev/null +++ b/include/linux/altera_uart.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * altera_uart.h -- Altera UART driver defines. + */ + +#ifndef __ALTUART_H +#define __ALTUART_H + +struct altera_uart_platform_uart { + unsigned long mapbase; /* Physical address base */ + unsigned int irq; /* Interrupt vector */ + unsigned int uartclk; /* UART clock rate */ + unsigned int bus_shift; /* Bus shift (address stride) */ +}; + +#endif /* __ALTUART_H */ diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h new file mode 100644 index 0000000..26f0ecf --- /dev/null +++ b/include/linux/amba/bus.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/include/amba/bus.h + * + * This device type deals with ARM PrimeCells and anything else that + * presents a proper CID (0xB105F00D) at the end of the I/O register + * region or that is derived from a PrimeCell. + * + * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. + */ +#ifndef ASMARM_AMBA_H +#define ASMARM_AMBA_H + +#include +#include +#include +#include +#include +#include + +#define AMBA_NR_IRQS 9 +#define AMBA_CID 0xb105f00d +#define CORESIGHT_CID 0xb105900d + +/* + * CoreSight Architecture specification updates the ID specification + * for components on the AMBA bus. (ARM IHI 0029E) + * + * Bits 15:12 of the CID are the device class. + * + * Class 0xF remains for PrimeCell and legacy components. (AMBA_CID above) + * Class 0x9 defines the component as CoreSight (CORESIGHT_CID above) + * Class 0x0, 0x1, 0xB, 0xE define components that do not have driver support + * at present. + * Class 0x2-0x8,0xA and 0xD-0xD are presently reserved. + * + * Remaining CID bits stay as 0xb105-00d + */ + +/** + * Class 0x9 components use additional values to form a Unique Component + * Identifier (UCI), where peripheral ID values are identical for different + * components. Passed to the amba bus code from the component driver via + * the amba_id->data pointer. + * @devarch : coresight devarch register value + * @devarch_mask: mask bits used for matching. 0 indicates UCI not used. + * @devtype : coresight device type value + * @data : additional driver data. As we have usurped the original + * pointer some devices may still need additional data + */ +struct amba_cs_uci_id { + unsigned int devarch; + unsigned int devarch_mask; + unsigned int devtype; + void *data; +}; + +/* define offsets for registers used by UCI */ +#define UCI_REG_DEVTYPE_OFFSET 0xFCC +#define UCI_REG_DEVARCH_OFFSET 0xFBC + +struct clk; + +struct amba_device { + struct device dev; + struct resource res; + struct clk *pclk; + unsigned int periphid; + unsigned int cid; + struct amba_cs_uci_id uci; + unsigned int irq[AMBA_NR_IRQS]; + char *driver_override; +}; + +struct amba_driver { + struct device_driver drv; + int (*probe)(struct amba_device *, const struct amba_id *); + int (*remove)(struct amba_device *); + void (*shutdown)(struct amba_device *); + const struct amba_id *id_table; +}; + +/* + * Constants for the designer field of the Peripheral ID register. When bit 7 + * is set to '1', bits [6:0] should be the JEP106 manufacturer identity code. + */ +enum amba_vendor { + AMBA_VENDOR_ARM = 0x41, + AMBA_VENDOR_ST = 0x80, + AMBA_VENDOR_QCOM = 0x51, + AMBA_VENDOR_LSI = 0xb6, + AMBA_VENDOR_LINUX = 0xfe, /* This value is not official */ +}; + +/* This is used to generate pseudo-ID for AMBA device */ +#define AMBA_LINUX_ID(conf, rev, part) \ + (((conf) & 0xff) << 24 | ((rev) & 0xf) << 20 | \ + AMBA_VENDOR_LINUX << 12 | ((part) & 0xfff)) + +extern struct bus_type amba_bustype; + +#define to_amba_device(d) container_of(d, struct amba_device, dev) + +#define amba_get_drvdata(d) dev_get_drvdata(&d->dev) +#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p) + +int amba_driver_register(struct amba_driver *); +void amba_driver_unregister(struct amba_driver *); +struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t); +void amba_device_put(struct amba_device *); +int amba_device_add(struct amba_device *, struct resource *); +int amba_device_register(struct amba_device *, struct resource *); +struct amba_device *amba_apb_device_add(struct device *parent, const char *name, + resource_size_t base, size_t size, + int irq1, int irq2, void *pdata, + unsigned int periphid); +struct amba_device *amba_ahb_device_add(struct device *parent, const char *name, + resource_size_t base, size_t size, + int irq1, int irq2, void *pdata, + unsigned int periphid); +struct amba_device * +amba_apb_device_add_res(struct device *parent, const char *name, + resource_size_t base, size_t size, int irq1, + int irq2, void *pdata, unsigned int periphid, + struct resource *resbase); +struct amba_device * +amba_ahb_device_add_res(struct device *parent, const char *name, + resource_size_t base, size_t size, int irq1, + int irq2, void *pdata, unsigned int periphid, + struct resource *resbase); +void amba_device_unregister(struct amba_device *); +struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int); +int amba_request_regions(struct amba_device *, const char *); +void amba_release_regions(struct amba_device *); + +static inline int amba_pclk_enable(struct amba_device *dev) +{ + return clk_enable(dev->pclk); +} + +static inline void amba_pclk_disable(struct amba_device *dev) +{ + clk_disable(dev->pclk); +} + +static inline int amba_pclk_prepare(struct amba_device *dev) +{ + return clk_prepare(dev->pclk); +} + +static inline void amba_pclk_unprepare(struct amba_device *dev) +{ + clk_unprepare(dev->pclk); +} + +/* Some drivers don't use the struct amba_device */ +#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) +#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) +#define AMBA_MANF_BITS(a) (((a) >> 12) & 0xff) +#define AMBA_PART_BITS(a) ((a) & 0xfff) + +#define amba_config(d) AMBA_CONFIG_BITS((d)->periphid) +#define amba_rev(d) AMBA_REV_BITS((d)->periphid) +#define amba_manf(d) AMBA_MANF_BITS((d)->periphid) +#define amba_part(d) AMBA_PART_BITS((d)->periphid) + +#define __AMBA_DEV(busid, data, mask) \ + { \ + .coherent_dma_mask = mask, \ + .init_name = busid, \ + .platform_data = data, \ + } + +/* + * APB devices do not themselves have the ability to address memory, + * so DMA masks should be zero (much like USB peripheral devices.) + * The DMA controller DMA masks should be used instead (much like + * USB host controllers in conventional PCs.) + */ +#define AMBA_APB_DEVICE(name, busid, id, base, irqs, data) \ +struct amba_device name##_device = { \ + .dev = __AMBA_DEV(busid, data, 0), \ + .res = DEFINE_RES_MEM(base, SZ_4K), \ + .irq = irqs, \ + .periphid = id, \ +} + +/* + * AHB devices are DMA capable, so set their DMA masks + */ +#define AMBA_AHB_DEVICE(name, busid, id, base, irqs, data) \ +struct amba_device name##_device = { \ + .dev = __AMBA_DEV(busid, data, ~0ULL), \ + .res = DEFINE_RES_MEM(base, SZ_4K), \ + .irq = irqs, \ + .periphid = id, \ +} + +/* + * module_amba_driver() - Helper macro for drivers that don't do anything + * special in module init/exit. This eliminates a lot of boilerplate. Each + * module may only use this macro once, and calling it replaces module_init() + * and module_exit() + */ +#define module_amba_driver(__amba_drv) \ + module_driver(__amba_drv, amba_driver_register, amba_driver_unregister) + +/* + * builtin_amba_driver() - Helper macro for drivers that don't do anything + * special in driver initcall. This eliminates a lot of boilerplate. Each + * driver may only use this macro once, and calling it replaces the instance + * device_initcall(). + */ +#define builtin_amba_driver(__amba_drv) \ + builtin_driver(__amba_drv, amba_driver_register) + +#endif diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h new file mode 100644 index 0000000..421b0fa --- /dev/null +++ b/include/linux/amba/clcd-regs.h @@ -0,0 +1,87 @@ +/* + * David A Rusling + * + * Copyright (C) 2001 ARM Limited + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef AMBA_CLCD_REGS_H +#define AMBA_CLCD_REGS_H + +/* + * CLCD Controller Internal Register addresses + */ +#define CLCD_TIM0 0x00000000 +#define CLCD_TIM1 0x00000004 +#define CLCD_TIM2 0x00000008 +#define CLCD_TIM3 0x0000000c +#define CLCD_UBAS 0x00000010 +#define CLCD_LBAS 0x00000014 + +#define CLCD_PL110_IENB 0x00000018 +#define CLCD_PL110_CNTL 0x0000001c +#define CLCD_PL110_STAT 0x00000020 +#define CLCD_PL110_INTR 0x00000024 +#define CLCD_PL110_UCUR 0x00000028 +#define CLCD_PL110_LCUR 0x0000002C + +#define CLCD_PL111_CNTL 0x00000018 +#define CLCD_PL111_IENB 0x0000001c +#define CLCD_PL111_RIS 0x00000020 +#define CLCD_PL111_MIS 0x00000024 +#define CLCD_PL111_ICR 0x00000028 +#define CLCD_PL111_UCUR 0x0000002c +#define CLCD_PL111_LCUR 0x00000030 + +#define CLCD_PALL 0x00000200 +#define CLCD_PALETTE 0x00000200 + +#define TIM2_PCD_LO_MASK GENMASK(4, 0) +#define TIM2_PCD_LO_BITS 5 +#define TIM2_CLKSEL (1 << 5) +#define TIM2_ACB_MASK GENMASK(10, 6) +#define TIM2_IVS (1 << 11) +#define TIM2_IHS (1 << 12) +#define TIM2_IPC (1 << 13) +#define TIM2_IOE (1 << 14) +#define TIM2_BCD (1 << 26) +#define TIM2_PCD_HI_MASK GENMASK(31, 27) +#define TIM2_PCD_HI_BITS 5 +#define TIM2_PCD_HI_SHIFT 27 + +#define CNTL_LCDEN (1 << 0) +#define CNTL_LCDBPP1 (0 << 1) +#define CNTL_LCDBPP2 (1 << 1) +#define CNTL_LCDBPP4 (2 << 1) +#define CNTL_LCDBPP8 (3 << 1) +#define CNTL_LCDBPP16 (4 << 1) +#define CNTL_LCDBPP16_565 (6 << 1) +#define CNTL_LCDBPP16_444 (7 << 1) +#define CNTL_LCDBPP24 (5 << 1) +#define CNTL_LCDBW (1 << 4) +#define CNTL_LCDTFT (1 << 5) +#define CNTL_LCDMONO8 (1 << 6) +#define CNTL_LCDDUAL (1 << 7) +#define CNTL_BGR (1 << 8) +#define CNTL_BEBO (1 << 9) +#define CNTL_BEPO (1 << 10) +#define CNTL_LCDPWR (1 << 11) +#define CNTL_LCDVCOMP(x) ((x) << 12) +#define CNTL_LDMAFIFOTIME (1 << 15) +#define CNTL_WATERMARK (1 << 16) + +/* ST Microelectronics variant bits */ +#define CNTL_ST_1XBPP_444 0x0 +#define CNTL_ST_1XBPP_5551 (1 << 17) +#define CNTL_ST_1XBPP_565 (1 << 18) +#define CNTL_ST_CDWID_12 0x0 +#define CNTL_ST_CDWID_16 (1 << 19) +#define CNTL_ST_CDWID_18 (1 << 20) +#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20)) +#define CNTL_ST_CEAEN (1 << 21) +#define CNTL_ST_LCDBPP24_PACKED (6 << 1) + +#endif /* AMBA_CLCD_REGS_H */ diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h new file mode 100644 index 0000000..b6e0cbe --- /dev/null +++ b/include/linux/amba/clcd.h @@ -0,0 +1,290 @@ +/* + * linux/include/asm-arm/hardware/amba_clcd.h -- Integrator LCD panel. + * + * David A Rusling + * + * Copyright (C) 2001 ARM Limited + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ +#include +#include + +enum { + /* individual formats */ + CLCD_CAP_RGB444 = (1 << 0), + CLCD_CAP_RGB5551 = (1 << 1), + CLCD_CAP_RGB565 = (1 << 2), + CLCD_CAP_RGB888 = (1 << 3), + CLCD_CAP_BGR444 = (1 << 4), + CLCD_CAP_BGR5551 = (1 << 5), + CLCD_CAP_BGR565 = (1 << 6), + CLCD_CAP_BGR888 = (1 << 7), + + /* connection layouts */ + CLCD_CAP_444 = CLCD_CAP_RGB444 | CLCD_CAP_BGR444, + CLCD_CAP_5551 = CLCD_CAP_RGB5551 | CLCD_CAP_BGR5551, + CLCD_CAP_565 = CLCD_CAP_RGB565 | CLCD_CAP_BGR565, + CLCD_CAP_888 = CLCD_CAP_RGB888 | CLCD_CAP_BGR888, + + /* red/blue ordering */ + CLCD_CAP_RGB = CLCD_CAP_RGB444 | CLCD_CAP_RGB5551 | + CLCD_CAP_RGB565 | CLCD_CAP_RGB888, + CLCD_CAP_BGR = CLCD_CAP_BGR444 | CLCD_CAP_BGR5551 | + CLCD_CAP_BGR565 | CLCD_CAP_BGR888, + + CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB, +}; + +struct backlight_device; + +struct clcd_panel { + struct fb_videomode mode; + signed short width; /* width in mm */ + signed short height; /* height in mm */ + u32 tim2; + u32 tim3; + u32 cntl; + u32 caps; + unsigned int bpp:8, + fixedtimings:1, + grayscale:1; + unsigned int connector; + struct backlight_device *backlight; + /* + * If the B/R lines are switched between the CLCD + * and the panel we need to know this and not try to + * compensate with the BGR bit in the control register. + */ + bool bgr_connection; +}; + +struct clcd_regs { + u32 tim0; + u32 tim1; + u32 tim2; + u32 tim3; + u32 cntl; + unsigned long pixclock; +}; + +struct clcd_fb; + +/* + * the board-type specific routines + */ +struct clcd_board { + const char *name; + + /* + * Optional. Hardware capability flags. + */ + u32 caps; + + /* + * Optional. Check whether the var structure is acceptable + * for this display. + */ + int (*check)(struct clcd_fb *fb, struct fb_var_screeninfo *var); + + /* + * Compulsory. Decode fb->fb.var into regs->*. In the case of + * fixed timing, set regs->* to the register values required. + */ + void (*decode)(struct clcd_fb *fb, struct clcd_regs *regs); + + /* + * Optional. Disable any extra display hardware. + */ + void (*disable)(struct clcd_fb *); + + /* + * Optional. Enable any extra display hardware. + */ + void (*enable)(struct clcd_fb *); + + /* + * Setup platform specific parts of CLCD driver + */ + int (*setup)(struct clcd_fb *); + + /* + * mmap the framebuffer memory + */ + int (*mmap)(struct clcd_fb *, struct vm_area_struct *); + + /* + * Remove platform specific parts of CLCD driver + */ + void (*remove)(struct clcd_fb *); +}; + +struct amba_device; +struct clk; + +/* this data structure describes each frame buffer device we find */ +struct clcd_fb { + struct fb_info fb; + struct amba_device *dev; + struct clk *clk; + struct clcd_panel *panel; + struct clcd_board *board; + void *board_data; + void __iomem *regs; + u16 off_ienb; + u16 off_cntl; + u32 clcd_cntl; + u32 cmap[16]; + bool clk_enabled; +}; + +static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs) +{ + struct fb_var_screeninfo *var = &fb->fb.var; + u32 val, cpl; + + /* + * Program the CLCD controller registers and start the CLCD + */ + val = ((var->xres / 16) - 1) << 2; + val |= (var->hsync_len - 1) << 8; + val |= (var->right_margin - 1) << 16; + val |= (var->left_margin - 1) << 24; + regs->tim0 = val; + + val = var->yres; + if (fb->panel->cntl & CNTL_LCDDUAL) + val /= 2; + val -= 1; + val |= (var->vsync_len - 1) << 10; + val |= var->lower_margin << 16; + val |= var->upper_margin << 24; + regs->tim1 = val; + + val = fb->panel->tim2; + val |= var->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS; + val |= var->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS; + + cpl = var->xres_virtual; + if (fb->panel->cntl & CNTL_LCDTFT) /* TFT */ + /* / 1 */; + else if (!var->grayscale) /* STN color */ + cpl = cpl * 8 / 3; + else if (fb->panel->cntl & CNTL_LCDMONO8) /* STN monochrome, 8bit */ + cpl /= 8; + else /* STN monochrome, 4bit */ + cpl /= 4; + + regs->tim2 = val | ((cpl - 1) << 16); + + regs->tim3 = fb->panel->tim3; + + val = fb->panel->cntl; + if (var->grayscale) + val |= CNTL_LCDBW; + + if (fb->panel->caps && fb->board->caps && var->bits_per_pixel >= 16) { + /* + * if board and panel supply capabilities, we can support + * changing BGR/RGB depending on supplied parameters. Here + * we switch to what the framebuffer is providing if need + * be, so if the framebuffer is BGR but the display connection + * is RGB (first case) we switch it around. Vice versa mutatis + * mutandis if the framebuffer is RGB but the display connection + * is BGR, we flip it around. + */ + if (var->red.offset == 0) + val &= ~CNTL_BGR; + else + val |= CNTL_BGR; + if (fb->panel->bgr_connection) + val ^= CNTL_BGR; + } + + switch (var->bits_per_pixel) { + case 1: + val |= CNTL_LCDBPP1; + break; + case 2: + val |= CNTL_LCDBPP2; + break; + case 4: + val |= CNTL_LCDBPP4; + break; + case 8: + val |= CNTL_LCDBPP8; + break; + case 16: + /* + * PL110 cannot choose between 5551 and 565 modes in its + * control register. It is possible to use 565 with + * custom external wiring. + */ + if (amba_part(fb->dev) == 0x110 || + var->green.length == 5) + val |= CNTL_LCDBPP16; + else if (var->green.length == 6) + val |= CNTL_LCDBPP16_565; + else + val |= CNTL_LCDBPP16_444; + break; + case 32: + val |= CNTL_LCDBPP24; + break; + } + + regs->cntl = val; + regs->pixclock = var->pixclock; +} + +static inline int clcdfb_check(struct clcd_fb *fb, struct fb_var_screeninfo *var) +{ + var->xres_virtual = var->xres = (var->xres + 15) & ~15; + var->yres_virtual = var->yres = (var->yres + 1) & ~1; + +#define CHECK(e,l,h) (var->e < l || var->e > h) + if (CHECK(right_margin, (5+1), 256) || /* back porch */ + CHECK(left_margin, (5+1), 256) || /* front porch */ + CHECK(hsync_len, (5+1), 256) || + var->xres > 4096 || + var->lower_margin > 255 || /* back porch */ + var->upper_margin > 255 || /* front porch */ + var->vsync_len > 32 || + var->yres > 1024) + return -EINVAL; +#undef CHECK + + /* single panel mode: PCD = max(PCD, 1) */ + /* dual panel mode: PCD = max(PCD, 5) */ + + /* + * You can't change the grayscale setting, and + * we can only do non-interlaced video. + */ + if (var->grayscale != fb->fb.var.grayscale || + (var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED) + return -EINVAL; + +#define CHECK(e) (var->e != fb->fb.var.e) + if (fb->panel->fixedtimings && + (CHECK(xres) || + CHECK(yres) || + CHECK(bits_per_pixel) || + CHECK(pixclock) || + CHECK(left_margin) || + CHECK(right_margin) || + CHECK(upper_margin) || + CHECK(lower_margin) || + CHECK(hsync_len) || + CHECK(vsync_len) || + CHECK(sync))) + return -EINVAL; +#undef CHECK + + var->nonstd = 0; + var->accel_flags = 0; + + return 0; +} diff --git a/include/linux/amba/kmi.h b/include/linux/amba/kmi.h new file mode 100644 index 0000000..94dd727 --- /dev/null +++ b/include/linux/amba/kmi.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * linux/include/asm-arm/hardware/amba_kmi.h + * + * Internal header file for AMBA KMI ports + * + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * --------------------------------------------------------------------------- + * From ARM PrimeCell(tm) PS2 Keyboard/Mouse Interface (PL050) Technical + * Reference Manual - ARM DDI 0143B - see http://www.arm.com/ + * --------------------------------------------------------------------------- + */ +#ifndef ASM_ARM_HARDWARE_AMBA_KMI_H +#define ASM_ARM_HARDWARE_AMBA_KMI_H + +/* + * KMI control register: + * KMICR_TYPE 0 = PS2/AT mode, 1 = No line control bit mode + * KMICR_RXINTREN 1 = enable RX interrupts + * KMICR_TXINTREN 1 = enable TX interrupts + * KMICR_EN 1 = enable KMI + * KMICR_FD 1 = force KMI data low + * KMICR_FC 1 = force KMI clock low + */ +#define KMICR (KMI_BASE + 0x00) +#define KMICR_TYPE (1 << 5) +#define KMICR_RXINTREN (1 << 4) +#define KMICR_TXINTREN (1 << 3) +#define KMICR_EN (1 << 2) +#define KMICR_FD (1 << 1) +#define KMICR_FC (1 << 0) + +/* + * KMI status register: + * KMISTAT_TXEMPTY 1 = transmitter register empty + * KMISTAT_TXBUSY 1 = currently sending data + * KMISTAT_RXFULL 1 = receiver register ready to be read + * KMISTAT_RXBUSY 1 = currently receiving data + * KMISTAT_RXPARITY parity of last databyte received + * KMISTAT_IC current level of KMI clock input + * KMISTAT_ID current level of KMI data input + */ +#define KMISTAT (KMI_BASE + 0x04) +#define KMISTAT_TXEMPTY (1 << 6) +#define KMISTAT_TXBUSY (1 << 5) +#define KMISTAT_RXFULL (1 << 4) +#define KMISTAT_RXBUSY (1 << 3) +#define KMISTAT_RXPARITY (1 << 2) +#define KMISTAT_IC (1 << 1) +#define KMISTAT_ID (1 << 0) + +/* + * KMI data register + */ +#define KMIDATA (KMI_BASE + 0x08) + +/* + * KMI clock divisor: to generate 8MHz internal clock + * div = (ref / 8MHz) - 1; 0 <= div <= 15 + */ +#define KMICLKDIV (KMI_BASE + 0x0c) + +/* + * KMI interrupt register: + * KMIIR_TXINTR 1 = transmit interrupt asserted + * KMIIR_RXINTR 1 = receive interrupt asserted + */ +#define KMIIR (KMI_BASE + 0x10) +#define KMIIR_TXINTR (1 << 1) +#define KMIIR_RXINTR (1 << 0) + +/* + * The size of the KMI primecell + */ +#define KMI_SIZE (0x100) + +#endif diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h new file mode 100644 index 0000000..c92ebc3 --- /dev/null +++ b/include/linux/amba/mmci.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/amba/mmci.h + */ +#ifndef AMBA_MMCI_H +#define AMBA_MMCI_H + +#include + +/** + * struct mmci_platform_data - platform configuration for the MMCI + * (also known as PL180) block. + * @ocr_mask: available voltages on the 4 pins from the block, this + * is ignored if a regulator is used, see the MMC_VDD_* masks in + * mmc/host.h + * @ios_handler: a callback function to act on specfic ios changes, + * used for example to control a levelshifter + * mask into a value to be binary (or set some other custom bits + * in MMCIPWR) or:ed and written into the MMCIPWR register of the + * block. May also control external power based on the power_mode. + * @status: if no GPIO line was given to the block in this function will + * be called to determine whether a card is present in the MMC slot or not + */ +struct mmci_platform_data { + unsigned int ocr_mask; + int (*ios_handler)(struct device *, struct mmc_ios *); + unsigned int (*status)(struct device *); +}; + +#endif diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h new file mode 100644 index 0000000..131b27c --- /dev/null +++ b/include/linux/amba/pl022.h @@ -0,0 +1,286 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * include/linux/amba/pl022.h + * + * Copyright (C) 2008-2009 ST-Ericsson AB + * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. + * + * Author: Linus Walleij + * + * Initial version inspired by: + * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c + * Initial adoption to PL022 by: + * Sachin Verma + */ + +#ifndef _SSP_PL022_H +#define _SSP_PL022_H + +#include + +/** + * whether SSP is in loopback mode or not + */ +enum ssp_loopback { + LOOPBACK_DISABLED, + LOOPBACK_ENABLED +}; + +/** + * enum ssp_interface - interfaces allowed for this SSP Controller + * @SSP_INTERFACE_MOTOROLA_SPI: Motorola Interface + * @SSP_INTERFACE_TI_SYNC_SERIAL: Texas Instrument Synchronous Serial + * interface + * @SSP_INTERFACE_NATIONAL_MICROWIRE: National Semiconductor Microwire + * interface + * @SSP_INTERFACE_UNIDIRECTIONAL: Unidirectional interface (STn8810 + * &STn8815 only) + */ +enum ssp_interface { + SSP_INTERFACE_MOTOROLA_SPI, + SSP_INTERFACE_TI_SYNC_SERIAL, + SSP_INTERFACE_NATIONAL_MICROWIRE, + SSP_INTERFACE_UNIDIRECTIONAL +}; + +/** + * enum ssp_hierarchy - whether SSP is configured as Master or Slave + */ +enum ssp_hierarchy { + SSP_MASTER, + SSP_SLAVE +}; + +/** + * enum ssp_clock_params - clock parameters, to set SSP clock at a + * desired freq + */ +struct ssp_clock_params { + u8 cpsdvsr; /* value from 2 to 254 (even only!) */ + u8 scr; /* value from 0 to 255 */ +}; + +/** + * enum ssp_rx_endian - endianess of Rx FIFO Data + * this feature is only available in ST versionf of PL022 + */ +enum ssp_rx_endian { + SSP_RX_MSB, + SSP_RX_LSB +}; + +/** + * enum ssp_tx_endian - endianess of Tx FIFO Data + */ +enum ssp_tx_endian { + SSP_TX_MSB, + SSP_TX_LSB +}; + +/** + * enum ssp_data_size - number of bits in one data element + */ +enum ssp_data_size { + SSP_DATA_BITS_4 = 0x03, SSP_DATA_BITS_5, SSP_DATA_BITS_6, + SSP_DATA_BITS_7, SSP_DATA_BITS_8, SSP_DATA_BITS_9, + SSP_DATA_BITS_10, SSP_DATA_BITS_11, SSP_DATA_BITS_12, + SSP_DATA_BITS_13, SSP_DATA_BITS_14, SSP_DATA_BITS_15, + SSP_DATA_BITS_16, SSP_DATA_BITS_17, SSP_DATA_BITS_18, + SSP_DATA_BITS_19, SSP_DATA_BITS_20, SSP_DATA_BITS_21, + SSP_DATA_BITS_22, SSP_DATA_BITS_23, SSP_DATA_BITS_24, + SSP_DATA_BITS_25, SSP_DATA_BITS_26, SSP_DATA_BITS_27, + SSP_DATA_BITS_28, SSP_DATA_BITS_29, SSP_DATA_BITS_30, + SSP_DATA_BITS_31, SSP_DATA_BITS_32 +}; + +/** + * enum ssp_mode - SSP mode of operation (Communication modes) + */ +enum ssp_mode { + INTERRUPT_TRANSFER, + POLLING_TRANSFER, + DMA_TRANSFER +}; + +/** + * enum ssp_rx_level_trig - receive FIFO watermark level which triggers + * IT: Interrupt fires when _N_ or more elements in RX FIFO. + */ +enum ssp_rx_level_trig { + SSP_RX_1_OR_MORE_ELEM, + SSP_RX_4_OR_MORE_ELEM, + SSP_RX_8_OR_MORE_ELEM, + SSP_RX_16_OR_MORE_ELEM, + SSP_RX_32_OR_MORE_ELEM +}; + +/** + * Transmit FIFO watermark level which triggers (IT Interrupt fires + * when _N_ or more empty locations in TX FIFO) + */ +enum ssp_tx_level_trig { + SSP_TX_1_OR_MORE_EMPTY_LOC, + SSP_TX_4_OR_MORE_EMPTY_LOC, + SSP_TX_8_OR_MORE_EMPTY_LOC, + SSP_TX_16_OR_MORE_EMPTY_LOC, + SSP_TX_32_OR_MORE_EMPTY_LOC +}; + +/** + * enum SPI Clock Phase - clock phase (Motorola SPI interface only) + * @SSP_CLK_FIRST_EDGE: Receive data on first edge transition (actual direction depends on polarity) + * @SSP_CLK_SECOND_EDGE: Receive data on second edge transition (actual direction depends on polarity) + */ +enum ssp_spi_clk_phase { + SSP_CLK_FIRST_EDGE, + SSP_CLK_SECOND_EDGE +}; + +/** + * enum SPI Clock Polarity - clock polarity (Motorola SPI interface only) + * @SSP_CLK_POL_IDLE_LOW: Low inactive level + * @SSP_CLK_POL_IDLE_HIGH: High inactive level + */ +enum ssp_spi_clk_pol { + SSP_CLK_POL_IDLE_LOW, + SSP_CLK_POL_IDLE_HIGH +}; + +/** + * Microwire Conrol Lengths Command size in microwire format + */ +enum ssp_microwire_ctrl_len { + SSP_BITS_4 = 0x03, SSP_BITS_5, SSP_BITS_6, + SSP_BITS_7, SSP_BITS_8, SSP_BITS_9, + SSP_BITS_10, SSP_BITS_11, SSP_BITS_12, + SSP_BITS_13, SSP_BITS_14, SSP_BITS_15, + SSP_BITS_16, SSP_BITS_17, SSP_BITS_18, + SSP_BITS_19, SSP_BITS_20, SSP_BITS_21, + SSP_BITS_22, SSP_BITS_23, SSP_BITS_24, + SSP_BITS_25, SSP_BITS_26, SSP_BITS_27, + SSP_BITS_28, SSP_BITS_29, SSP_BITS_30, + SSP_BITS_31, SSP_BITS_32 +}; + +/** + * enum Microwire Wait State + * @SSP_MWIRE_WAIT_ZERO: No wait state inserted after last command bit + * @SSP_MWIRE_WAIT_ONE: One wait state inserted after last command bit + */ +enum ssp_microwire_wait_state { + SSP_MWIRE_WAIT_ZERO, + SSP_MWIRE_WAIT_ONE +}; + +/** + * enum ssp_duplex - whether Full/Half Duplex on microwire, only + * available in the ST Micro variant. + * @SSP_MICROWIRE_CHANNEL_FULL_DUPLEX: SSPTXD becomes bi-directional, + * SSPRXD not used + * @SSP_MICROWIRE_CHANNEL_HALF_DUPLEX: SSPTXD is an output, SSPRXD is + * an input. + */ +enum ssp_duplex { + SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, + SSP_MICROWIRE_CHANNEL_HALF_DUPLEX +}; + +/** + * enum ssp_clkdelay - an optional clock delay on the feedback clock + * only available in the ST Micro PL023 variant. + * @SSP_FEEDBACK_CLK_DELAY_NONE: no delay, the data coming in from the + * slave is sampled directly + * @SSP_FEEDBACK_CLK_DELAY_1T: the incoming slave data is sampled with + * a delay of T-dt + * @SSP_FEEDBACK_CLK_DELAY_2T: dito with a delay if 2T-dt + * @SSP_FEEDBACK_CLK_DELAY_3T: dito with a delay if 3T-dt + * @SSP_FEEDBACK_CLK_DELAY_4T: dito with a delay if 4T-dt + * @SSP_FEEDBACK_CLK_DELAY_5T: dito with a delay if 5T-dt + * @SSP_FEEDBACK_CLK_DELAY_6T: dito with a delay if 6T-dt + * @SSP_FEEDBACK_CLK_DELAY_7T: dito with a delay if 7T-dt + */ +enum ssp_clkdelay { + SSP_FEEDBACK_CLK_DELAY_NONE, + SSP_FEEDBACK_CLK_DELAY_1T, + SSP_FEEDBACK_CLK_DELAY_2T, + SSP_FEEDBACK_CLK_DELAY_3T, + SSP_FEEDBACK_CLK_DELAY_4T, + SSP_FEEDBACK_CLK_DELAY_5T, + SSP_FEEDBACK_CLK_DELAY_6T, + SSP_FEEDBACK_CLK_DELAY_7T +}; + +/** + * CHIP select/deselect commands + */ +enum ssp_chip_select { + SSP_CHIP_SELECT, + SSP_CHIP_DESELECT +}; + + +struct dma_chan; +/** + * struct pl022_ssp_master - device.platform_data for SPI controller devices. + * @bus_id: identifier for this bus + * @num_chipselect: chipselects are used to distinguish individual + * SPI slaves, and are numbered from zero to num_chipselects - 1. + * each slave has a chipselect signal, but it's common that not + * every chipselect is connected to a slave. + * @enable_dma: if true enables DMA driven transfers. + * @dma_rx_param: parameter to locate an RX DMA channel. + * @dma_tx_param: parameter to locate a TX DMA channel. + * @autosuspend_delay: delay in ms following transfer completion before the + * runtime power management system suspends the device. A setting of 0 + * indicates no delay and the device will be suspended immediately. + * @rt: indicates the controller should run the message pump with realtime + * priority to minimise the transfer latency on the bus. + * @chipselects: list of chip select gpios + */ +struct pl022_ssp_controller { + u16 bus_id; + u8 num_chipselect; + u8 enable_dma:1; + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); + void *dma_rx_param; + void *dma_tx_param; + int autosuspend_delay; + bool rt; + int *chipselects; +}; + +/** + * struct ssp_config_chip - spi_board_info.controller_data for SPI + * slave devices, copied to spi_device.controller_data. + * + * @iface: Interface type(Motorola, TI, Microwire, Universal) + * @hierarchy: sets whether interface is master or slave + * @slave_tx_disable: SSPTXD is disconnected (in slave mode only) + * @clk_freq: Tune freq parameters of SSP(when in master mode) + * @com_mode: communication mode: polling, Interrupt or DMA + * @rx_lev_trig: Rx FIFO watermark level (for IT & DMA mode) + * @tx_lev_trig: Tx FIFO watermark level (for IT & DMA mode) + * @ctrl_len: Microwire interface: Control length + * @wait_state: Microwire interface: Wait state + * @duplex: Microwire interface: Full/Half duplex + * @clkdelay: on the PL023 variant, the delay in feeback clock cycles + * before sampling the incoming line + * @cs_control: function pointer to board-specific function to + * assert/deassert I/O port to control HW generation of devices chip-select. + */ +struct pl022_config_chip { + enum ssp_interface iface; + enum ssp_hierarchy hierarchy; + bool slave_tx_disable; + struct ssp_clock_params clk_freq; + enum ssp_mode com_mode; + enum ssp_rx_level_trig rx_lev_trig; + enum ssp_tx_level_trig tx_lev_trig; + enum ssp_microwire_ctrl_len ctrl_len; + enum ssp_microwire_wait_state wait_state; + enum ssp_duplex duplex; + enum ssp_clkdelay clkdelay; + void (*cs_control) (u32 control); +}; + +#endif /* _SSP_PL022_H */ diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h new file mode 100644 index 0000000..e192d54 --- /dev/null +++ b/include/linux/amba/pl080.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* include/linux/amba/pl080.h + * + * Copyright 2008 Openmoko, Inc. + * Copyright 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks + * + * ARM PrimeCell PL080 DMA controller +*/ + +/* Note, there are some Samsung updates to this controller block which + * make it not entierly compatible with the PL080 specification from + * ARM. When in doubt, check the Samsung documentation first. + * + * The Samsung defines are PL080S, and add an extra control register, + * the ability to move more than 2^11 counts of data and some extra + * OneNAND features. +*/ + +#ifndef ASM_PL080_H +#define ASM_PL080_H + +#define PL080_INT_STATUS (0x00) +#define PL080_TC_STATUS (0x04) +#define PL080_TC_CLEAR (0x08) +#define PL080_ERR_STATUS (0x0C) +#define PL080_ERR_CLEAR (0x10) +#define PL080_RAW_TC_STATUS (0x14) +#define PL080_RAW_ERR_STATUS (0x18) +#define PL080_EN_CHAN (0x1c) +#define PL080_SOFT_BREQ (0x20) +#define PL080_SOFT_SREQ (0x24) +#define PL080_SOFT_LBREQ (0x28) +#define PL080_SOFT_LSREQ (0x2C) + +#define PL080_CONFIG (0x30) +#define PL080_CONFIG_M2_BE BIT(2) +#define PL080_CONFIG_M1_BE BIT(1) +#define PL080_CONFIG_ENABLE BIT(0) + +#define PL080_SYNC (0x34) + +/* The Faraday Technology FTDMAC020 variant registers */ +#define FTDMAC020_CH_BUSY (0x20) +/* Identical to PL080_CONFIG */ +#define FTDMAC020_CSR (0x24) +/* Identical to PL080_SYNC */ +#define FTDMAC020_SYNC (0x2C) +#define FTDMAC020_REVISION (0x30) +#define FTDMAC020_FEATURE (0x34) + +/* Per channel configuration registers */ +#define PL080_Cx_BASE(x) ((0x100 + (x * 0x20))) +#define PL080_CH_SRC_ADDR (0x00) +#define PL080_CH_DST_ADDR (0x04) +#define PL080_CH_LLI (0x08) +#define PL080_CH_CONTROL (0x0C) +#define PL080_CH_CONFIG (0x10) +#define PL080S_CH_CONTROL2 (0x10) +#define PL080S_CH_CONFIG (0x14) +/* The Faraday FTDMAC020 derivative shuffles the registers around */ +#define FTDMAC020_CH_CSR (0x00) +#define FTDMAC020_CH_CFG (0x04) +#define FTDMAC020_CH_SRC_ADDR (0x08) +#define FTDMAC020_CH_DST_ADDR (0x0C) +#define FTDMAC020_CH_LLP (0x10) +#define FTDMAC020_CH_SIZE (0x14) + +#define PL080_LLI_ADDR_MASK GENMASK(31, 2) +#define PL080_LLI_ADDR_SHIFT (2) +#define PL080_LLI_LM_AHB2 BIT(0) + +#define PL080_CONTROL_TC_IRQ_EN BIT(31) +#define PL080_CONTROL_PROT_MASK GENMASK(30, 28) +#define PL080_CONTROL_PROT_SHIFT (28) +#define PL080_CONTROL_PROT_CACHE BIT(30) +#define PL080_CONTROL_PROT_BUFF BIT(29) +#define PL080_CONTROL_PROT_SYS BIT(28) +#define PL080_CONTROL_DST_INCR BIT(27) +#define PL080_CONTROL_SRC_INCR BIT(26) +#define PL080_CONTROL_DST_AHB2 BIT(25) +#define PL080_CONTROL_SRC_AHB2 BIT(24) +#define PL080_CONTROL_DWIDTH_MASK GENMASK(23, 21) +#define PL080_CONTROL_DWIDTH_SHIFT (21) +#define PL080_CONTROL_SWIDTH_MASK GENMASK(20, 18) +#define PL080_CONTROL_SWIDTH_SHIFT (18) +#define PL080_CONTROL_DB_SIZE_MASK GENMASK(17, 15) +#define PL080_CONTROL_DB_SIZE_SHIFT (15) +#define PL080_CONTROL_SB_SIZE_MASK GENMASK(14, 12) +#define PL080_CONTROL_SB_SIZE_SHIFT (12) +#define PL080_CONTROL_TRANSFER_SIZE_MASK GENMASK(11, 0) +#define PL080S_CONTROL_TRANSFER_SIZE_MASK GENMASK(24, 0) +#define PL080_CONTROL_TRANSFER_SIZE_SHIFT (0) + +#define PL080_BSIZE_1 (0x0) +#define PL080_BSIZE_4 (0x1) +#define PL080_BSIZE_8 (0x2) +#define PL080_BSIZE_16 (0x3) +#define PL080_BSIZE_32 (0x4) +#define PL080_BSIZE_64 (0x5) +#define PL080_BSIZE_128 (0x6) +#define PL080_BSIZE_256 (0x7) + +#define PL080_WIDTH_8BIT (0x0) +#define PL080_WIDTH_16BIT (0x1) +#define PL080_WIDTH_32BIT (0x2) + +#define PL080N_CONFIG_ITPROT BIT(20) +#define PL080N_CONFIG_SECPROT BIT(19) +#define PL080_CONFIG_HALT BIT(18) +#define PL080_CONFIG_ACTIVE BIT(17) /* RO */ +#define PL080_CONFIG_LOCK BIT(16) +#define PL080_CONFIG_TC_IRQ_MASK BIT(15) +#define PL080_CONFIG_ERR_IRQ_MASK BIT(14) +#define PL080_CONFIG_FLOW_CONTROL_MASK GENMASK(13, 11) +#define PL080_CONFIG_FLOW_CONTROL_SHIFT (11) +#define PL080_CONFIG_DST_SEL_MASK GENMASK(9, 6) +#define PL080_CONFIG_DST_SEL_SHIFT (6) +#define PL080_CONFIG_SRC_SEL_MASK GENMASK(4, 1) +#define PL080_CONFIG_SRC_SEL_SHIFT (1) +#define PL080_CONFIG_ENABLE BIT(0) + +#define PL080_FLOW_MEM2MEM (0x0) +#define PL080_FLOW_MEM2PER (0x1) +#define PL080_FLOW_PER2MEM (0x2) +#define PL080_FLOW_SRC2DST (0x3) +#define PL080_FLOW_SRC2DST_DST (0x4) +#define PL080_FLOW_MEM2PER_PER (0x5) +#define PL080_FLOW_PER2MEM_PER (0x6) +#define PL080_FLOW_SRC2DST_SRC (0x7) + +#define FTDMAC020_CH_CSR_TC_MSK BIT(31) +/* Later versions have a threshold in bits 24..26, */ +#define FTDMAC020_CH_CSR_FIFOTH_MSK GENMASK(26, 24) +#define FTDMAC020_CH_CSR_FIFOTH_SHIFT (24) +#define FTDMAC020_CH_CSR_CHPR1_MSK GENMASK(23, 22) +#define FTDMAC020_CH_CSR_PROT3 BIT(21) +#define FTDMAC020_CH_CSR_PROT2 BIT(20) +#define FTDMAC020_CH_CSR_PROT1 BIT(19) +#define FTDMAC020_CH_CSR_SRC_SIZE_MSK GENMASK(18, 16) +#define FTDMAC020_CH_CSR_SRC_SIZE_SHIFT (16) +#define FTDMAC020_CH_CSR_ABT BIT(15) +#define FTDMAC020_CH_CSR_SRC_WIDTH_MSK GENMASK(13, 11) +#define FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT (11) +#define FTDMAC020_CH_CSR_DST_WIDTH_MSK GENMASK(10, 8) +#define FTDMAC020_CH_CSR_DST_WIDTH_SHIFT (8) +#define FTDMAC020_CH_CSR_MODE BIT(7) +/* 00 = increase, 01 = decrease, 10 = fix */ +#define FTDMAC020_CH_CSR_SRCAD_CTL_MSK GENMASK(6, 5) +#define FTDMAC020_CH_CSR_SRCAD_CTL_SHIFT (5) +#define FTDMAC020_CH_CSR_DSTAD_CTL_MSK GENMASK(4, 3) +#define FTDMAC020_CH_CSR_DSTAD_CTL_SHIFT (3) +#define FTDMAC020_CH_CSR_SRC_SEL BIT(2) +#define FTDMAC020_CH_CSR_DST_SEL BIT(1) +#define FTDMAC020_CH_CSR_EN BIT(0) + +/* FIFO threshold setting */ +#define FTDMAC020_CH_CSR_FIFOTH_1 (0x0) +#define FTDMAC020_CH_CSR_FIFOTH_2 (0x1) +#define FTDMAC020_CH_CSR_FIFOTH_4 (0x2) +#define FTDMAC020_CH_CSR_FIFOTH_8 (0x3) +#define FTDMAC020_CH_CSR_FIFOTH_16 (0x4) +/* The FTDMAC020 supports 64bit wide transfers */ +#define FTDMAC020_WIDTH_64BIT (0x3) +/* Address can be increased, decreased or fixed */ +#define FTDMAC020_CH_CSR_SRCAD_CTL_INC (0x0) +#define FTDMAC020_CH_CSR_SRCAD_CTL_DEC (0x1) +#define FTDMAC020_CH_CSR_SRCAD_CTL_FIXED (0x2) + +#define FTDMAC020_CH_CFG_LLP_CNT_MASK GENMASK(19, 16) +#define FTDMAC020_CH_CFG_LLP_CNT_SHIFT (16) +#define FTDMAC020_CH_CFG_BUSY BIT(8) +#define FTDMAC020_CH_CFG_INT_ABT_MASK BIT(2) +#define FTDMAC020_CH_CFG_INT_ERR_MASK BIT(1) +#define FTDMAC020_CH_CFG_INT_TC_MASK BIT(0) + +/* Inside the LLIs, the applicable CSR fields are mapped differently */ +#define FTDMAC020_LLI_TC_MSK BIT(28) +#define FTDMAC020_LLI_SRC_WIDTH_MSK GENMASK(27, 25) +#define FTDMAC020_LLI_SRC_WIDTH_SHIFT (25) +#define FTDMAC020_LLI_DST_WIDTH_MSK GENMASK(24, 22) +#define FTDMAC020_LLI_DST_WIDTH_SHIFT (22) +#define FTDMAC020_LLI_SRCAD_CTL_MSK GENMASK(21, 20) +#define FTDMAC020_LLI_SRCAD_CTL_SHIFT (20) +#define FTDMAC020_LLI_DSTAD_CTL_MSK GENMASK(19, 18) +#define FTDMAC020_LLI_DSTAD_CTL_SHIFT (18) +#define FTDMAC020_LLI_SRC_SEL BIT(17) +#define FTDMAC020_LLI_DST_SEL BIT(16) +#define FTDMAC020_LLI_TRANSFER_SIZE_MASK GENMASK(11, 0) +#define FTDMAC020_LLI_TRANSFER_SIZE_SHIFT (0) + +#define FTDMAC020_CFG_LLP_CNT_MASK GENMASK(19, 16) +#define FTDMAC020_CFG_LLP_CNT_SHIFT (16) +#define FTDMAC020_CFG_BUSY BIT(8) +#define FTDMAC020_CFG_INT_ABT_MSK BIT(2) +#define FTDMAC020_CFG_INT_ERR_MSK BIT(1) +#define FTDMAC020_CFG_INT_TC_MSK BIT(0) + +/* DMA linked list chain structure */ + +struct pl080_lli { + u32 src_addr; + u32 dst_addr; + u32 next_lli; + u32 control0; +}; + +struct pl080s_lli { + u32 src_addr; + u32 dst_addr; + u32 next_lli; + u32 control0; + u32 control1; +}; + +#endif /* ASM_PL080_H */ diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h new file mode 100644 index 0000000..3100e0d --- /dev/null +++ b/include/linux/amba/pl08x.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver + * + * Copyright (C) 2005 ARM Ltd + * Copyright (C) 2010 ST-Ericsson SA + * + * pl08x information required by platform code + * + * Please credit ARM.com + * Documentation: ARM DDI 0196D + */ + +#ifndef AMBA_PL08X_H +#define AMBA_PL08X_H + +/* We need sizes of structs from this header */ +#include +#include + +struct pl08x_driver_data; +struct pl08x_phy_chan; +struct pl08x_txd; + +/* Bitmasks for selecting AHB ports for DMA transfers */ +enum { + PL08X_AHB1 = (1 << 0), + PL08X_AHB2 = (1 << 1) +}; + +/** + * struct pl08x_channel_data - data structure to pass info between + * platform and PL08x driver regarding channel configuration + * @bus_id: name of this device channel, not just a device name since + * devices may have more than one channel e.g. "foo_tx" + * @min_signal: the minimum DMA signal number to be muxed in for this + * channel (for platforms supporting muxed signals). If you have + * static assignments, make sure this is set to the assigned signal + * number, PL08x have 16 possible signals in number 0 thru 15 so + * when these are not enough they often get muxed (in hardware) + * disabling simultaneous use of the same channel for two devices. + * @max_signal: the maximum DMA signal number to be muxed in for + * the channel. Set to the same as min_signal for + * devices with static assignments + * @muxval: a number usually used to poke into some mux regiser to + * mux in the signal to this channel + * @addr: source/target address in physical memory for this DMA channel, + * can be the address of a FIFO register for burst requests for example. + * This can be left undefined if the PrimeCell API is used for configuring + * this. + * @single: the device connected to this channel will request single DMA + * transfers, not bursts. (Bursts are default.) + * @periph_buses: the device connected to this channel is accessible via + * these buses (use PL08X_AHB1 | PL08X_AHB2). + */ +struct pl08x_channel_data { + const char *bus_id; + int min_signal; + int max_signal; + u32 muxval; + dma_addr_t addr; + bool single; + u8 periph_buses; +}; + +enum pl08x_burst_size { + PL08X_BURST_SZ_1, + PL08X_BURST_SZ_4, + PL08X_BURST_SZ_8, + PL08X_BURST_SZ_16, + PL08X_BURST_SZ_32, + PL08X_BURST_SZ_64, + PL08X_BURST_SZ_128, + PL08X_BURST_SZ_256, +}; + +enum pl08x_bus_width { + PL08X_BUS_WIDTH_8_BITS, + PL08X_BUS_WIDTH_16_BITS, + PL08X_BUS_WIDTH_32_BITS, +}; + +/** + * struct pl08x_platform_data - the platform configuration for the PL08x + * PrimeCells. + * @slave_channels: the channels defined for the different devices on the + * platform, all inclusive, including multiplexed channels. The available + * physical channels will be multiplexed around these signals as they are + * requested, just enumerate all possible channels. + * @num_slave_channels: number of elements in the slave channel array + * @memcpy_burst_size: the appropriate burst size for memcpy operations + * @memcpy_bus_width: memory bus width + * @memcpy_prot_buff: whether memcpy DMA is bufferable + * @memcpy_prot_cache: whether memcpy DMA is cacheable + * @get_xfer_signal: request a physical signal to be used for a DMA transfer + * immediately: if there is some multiplexing or similar blocking the use + * of the channel the transfer can be denied by returning less than zero, + * else it returns the allocated signal number + * @put_xfer_signal: indicate to the platform that this physical signal is not + * running any DMA transfer and multiplexing can be recycled + * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2 + * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 + * @slave_map: DMA slave matching table + * @slave_map_len: number of elements in @slave_map + */ +struct pl08x_platform_data { + struct pl08x_channel_data *slave_channels; + unsigned int num_slave_channels; + enum pl08x_burst_size memcpy_burst_size; + enum pl08x_bus_width memcpy_bus_width; + bool memcpy_prot_buff; + bool memcpy_prot_cache; + int (*get_xfer_signal)(const struct pl08x_channel_data *); + void (*put_xfer_signal)(const struct pl08x_channel_data *, int); + u8 lli_buses; + u8 mem_buses; + const struct dma_slave_map *slave_map; + int slave_map_len; +}; + +#ifdef CONFIG_AMBA_PL08X +bool pl08x_filter_id(struct dma_chan *chan, void *chan_id); +#else +static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) +{ + return false; +} +#endif + +#endif /* AMBA_PL08X_H */ diff --git a/include/linux/amba/pl093.h b/include/linux/amba/pl093.h new file mode 100644 index 0000000..b17166e --- /dev/null +++ b/include/linux/amba/pl093.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* linux/amba/pl093.h + * + * Copyright (c) 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks + * + * AMBA PL093 SSMC (synchronous static memory controller) + * See DDI0236.pdf (r0p4) for more details +*/ + +#define SMB_BANK(x) ((x) * 0x20) /* each bank control set is 0x20 apart */ + +/* Offsets for SMBxxxxRy registers */ + +#define SMBIDCYR (0x00) +#define SMBWSTRDR (0x04) +#define SMBWSTWRR (0x08) +#define SMBWSTOENR (0x0C) +#define SMBWSTWENR (0x10) +#define SMBCR (0x14) +#define SMBSR (0x18) +#define SMBWSTBRDR (0x1C) + +/* Masks for SMB registers */ +#define IDCY_MASK (0xf) +#define WSTRD_MASK (0xf) +#define WSTWR_MASK (0xf) +#define WSTOEN_MASK (0xf) +#define WSTWEN_MASK (0xf) + +/* Notes from datasheet: + * WSTOEN <= WSTRD + * WSTWEN <= WSTWR + * + * WSTOEN is not used with nWAIT + */ + +/* SMBCR bit definitions */ +#define SMBCR_BIWRITEEN (1 << 21) +#define SMBCR_ADDRVALIDWRITEEN (1 << 20) +#define SMBCR_SYNCWRITE (1 << 17) +#define SMBCR_BMWRITE (1 << 16) +#define SMBCR_WRAPREAD (1 << 14) +#define SMBCR_BIREADEN (1 << 13) +#define SMBCR_ADDRVALIDREADEN (1 << 12) +#define SMBCR_SYNCREAD (1 << 9) +#define SMBCR_BMREAD (1 << 8) +#define SMBCR_SMBLSPOL (1 << 6) +#define SMBCR_WP (1 << 3) +#define SMBCR_WAITEN (1 << 2) +#define SMBCR_WAITPOL (1 << 1) +#define SMBCR_RBLE (1 << 0) + +#define SMBCR_BURSTLENWRITE_MASK (3 << 18) +#define SMBCR_BURSTLENWRITE_4 (0 << 18) +#define SMBCR_BURSTLENWRITE_8 (1 << 18) +#define SMBCR_BURSTLENWRITE_RESERVED (2 << 18) +#define SMBCR_BURSTLENWRITE_CONTINUOUS (3 << 18) + +#define SMBCR_BURSTLENREAD_MASK (3 << 10) +#define SMBCR_BURSTLENREAD_4 (0 << 10) +#define SMBCR_BURSTLENREAD_8 (1 << 10) +#define SMBCR_BURSTLENREAD_16 (2 << 10) +#define SMBCR_BURSTLENREAD_CONTINUOUS (3 << 10) + +#define SMBCR_MW_MASK (3 << 4) +#define SMBCR_MW_8BIT (0 << 4) +#define SMBCR_MW_16BIT (1 << 4) +#define SMBCR_MW_M32BIT (2 << 4) + +/* SSMC status registers */ +#define SSMCCSR (0x200) +#define SSMCCR (0x204) +#define SSMCITCR (0x208) +#define SSMCITIP (0x20C) +#define SSMCITIOP (0x210) diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h new file mode 100644 index 0000000..a1307b5 --- /dev/null +++ b/include/linux/amba/serial.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * linux/include/asm-arm/hardware/serial_amba.h + * + * Internal header file for AMBA serial ports + * + * Copyright (C) ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd. + */ +#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H +#define ASM_ARM_HARDWARE_SERIAL_AMBA_H + +#include + +/* ------------------------------------------------------------------------------- + * From AMBA UART (PL010) Block Specification + * ------------------------------------------------------------------------------- + * UART Register Offsets. + */ +#define UART01x_DR 0x00 /* Data read or written from the interface. */ +#define UART01x_RSR 0x04 /* Receive status register (Read). */ +#define UART01x_ECR 0x04 /* Error clear register (Write). */ +#define UART010_LCRH 0x08 /* Line control register, high byte. */ +#define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */ +#define UART010_LCRM 0x0C /* Line control register, middle byte. */ +#define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */ +#define UART010_LCRL 0x10 /* Line control register, low byte. */ +#define UART010_CR 0x14 /* Control register. */ +#define UART01x_FR 0x18 /* Flag register (Read only). */ +#define UART010_IIR 0x1C /* Interrupt identification register (Read). */ +#define UART010_ICR 0x1C /* Interrupt clear register (Write). */ +#define ST_UART011_LCRH_RX 0x1C /* Rx line control register. */ +#define UART01x_ILPR 0x20 /* IrDA low power counter register. */ +#define UART011_IBRD 0x24 /* Integer baud rate divisor register. */ +#define UART011_FBRD 0x28 /* Fractional baud rate divisor register. */ +#define UART011_LCRH 0x2c /* Line control register. */ +#define ST_UART011_LCRH_TX 0x2c /* Tx Line control register. */ +#define UART011_CR 0x30 /* Control register. */ +#define UART011_IFLS 0x34 /* Interrupt fifo level select. */ +#define UART011_IMSC 0x38 /* Interrupt mask. */ +#define UART011_RIS 0x3c /* Raw interrupt status. */ +#define UART011_MIS 0x40 /* Masked interrupt status. */ +#define UART011_ICR 0x44 /* Interrupt clear register. */ +#define UART011_DMACR 0x48 /* DMA control register. */ +#define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */ +#define ST_UART011_XON1 0x54 /* XON1 register. */ +#define ST_UART011_XON2 0x58 /* XON2 register. */ +#define ST_UART011_XOFF1 0x5C /* XON1 register. */ +#define ST_UART011_XOFF2 0x60 /* XON2 register. */ +#define ST_UART011_ITCR 0x80 /* Integration test control register. */ +#define ST_UART011_ITIP 0x84 /* Integration test input register. */ +#define ST_UART011_ABCR 0x100 /* Autobaud control register. */ +#define ST_UART011_ABIMSC 0x15C /* Autobaud interrupt mask/clear register. */ + +/* + * ZTE UART register offsets. This UART has a radically different address + * allocation from the ARM and ST variants, so we list all registers here. + * We assume unlisted registers do not exist. + */ +#define ZX_UART011_DR 0x04 +#define ZX_UART011_FR 0x14 +#define ZX_UART011_IBRD 0x24 +#define ZX_UART011_FBRD 0x28 +#define ZX_UART011_LCRH 0x30 +#define ZX_UART011_CR 0x34 +#define ZX_UART011_IFLS 0x38 +#define ZX_UART011_IMSC 0x40 +#define ZX_UART011_RIS 0x44 +#define ZX_UART011_MIS 0x48 +#define ZX_UART011_ICR 0x4c +#define ZX_UART011_DMACR 0x50 + +#define UART011_DR_OE (1 << 11) +#define UART011_DR_BE (1 << 10) +#define UART011_DR_PE (1 << 9) +#define UART011_DR_FE (1 << 8) + +#define UART01x_RSR_OE 0x08 +#define UART01x_RSR_BE 0x04 +#define UART01x_RSR_PE 0x02 +#define UART01x_RSR_FE 0x01 + +#define UART011_FR_RI 0x100 +#define UART011_FR_TXFE 0x080 +#define UART011_FR_RXFF 0x040 +#define UART01x_FR_TXFF 0x020 +#define UART01x_FR_RXFE 0x010 +#define UART01x_FR_BUSY 0x008 +#define UART01x_FR_DCD 0x004 +#define UART01x_FR_DSR 0x002 +#define UART01x_FR_CTS 0x001 +#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY) + +/* + * Some bits of Flag Register on ZTE device have different position from + * standard ones. + */ +#define ZX_UART01x_FR_BUSY 0x100 +#define ZX_UART01x_FR_DSR 0x008 +#define ZX_UART01x_FR_CTS 0x002 +#define ZX_UART011_FR_RI 0x001 + +#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */ +#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */ +#define UART011_CR_OUT2 0x2000 /* OUT2 */ +#define UART011_CR_OUT1 0x1000 /* OUT1 */ +#define UART011_CR_RTS 0x0800 /* RTS */ +#define UART011_CR_DTR 0x0400 /* DTR */ +#define UART011_CR_RXE 0x0200 /* receive enable */ +#define UART011_CR_TXE 0x0100 /* transmit enable */ +#define UART011_CR_LBE 0x0080 /* loopback enable */ +#define UART010_CR_RTIE 0x0040 +#define UART010_CR_TIE 0x0020 +#define UART010_CR_RIE 0x0010 +#define UART010_CR_MSIE 0x0008 +#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */ +#define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */ +#define UART01x_CR_SIREN 0x0002 /* SIR enable */ +#define UART01x_CR_UARTEN 0x0001 /* UART enable */ + +#define UART011_LCRH_SPS 0x80 +#define UART01x_LCRH_WLEN_8 0x60 +#define UART01x_LCRH_WLEN_7 0x40 +#define UART01x_LCRH_WLEN_6 0x20 +#define UART01x_LCRH_WLEN_5 0x00 +#define UART01x_LCRH_FEN 0x10 +#define UART01x_LCRH_STP2 0x08 +#define UART01x_LCRH_EPS 0x04 +#define UART01x_LCRH_PEN 0x02 +#define UART01x_LCRH_BRK 0x01 + +#define ST_UART011_DMAWM_RX_1 (0 << 3) +#define ST_UART011_DMAWM_RX_2 (1 << 3) +#define ST_UART011_DMAWM_RX_4 (2 << 3) +#define ST_UART011_DMAWM_RX_8 (3 << 3) +#define ST_UART011_DMAWM_RX_16 (4 << 3) +#define ST_UART011_DMAWM_RX_32 (5 << 3) +#define ST_UART011_DMAWM_RX_48 (6 << 3) +#define ST_UART011_DMAWM_TX_1 0 +#define ST_UART011_DMAWM_TX_2 1 +#define ST_UART011_DMAWM_TX_4 2 +#define ST_UART011_DMAWM_TX_8 3 +#define ST_UART011_DMAWM_TX_16 4 +#define ST_UART011_DMAWM_TX_32 5 +#define ST_UART011_DMAWM_TX_48 6 + +#define UART010_IIR_RTIS 0x08 +#define UART010_IIR_TIS 0x04 +#define UART010_IIR_RIS 0x02 +#define UART010_IIR_MIS 0x01 + +#define UART011_IFLS_RX1_8 (0 << 3) +#define UART011_IFLS_RX2_8 (1 << 3) +#define UART011_IFLS_RX4_8 (2 << 3) +#define UART011_IFLS_RX6_8 (3 << 3) +#define UART011_IFLS_RX7_8 (4 << 3) +#define UART011_IFLS_TX1_8 (0 << 0) +#define UART011_IFLS_TX2_8 (1 << 0) +#define UART011_IFLS_TX4_8 (2 << 0) +#define UART011_IFLS_TX6_8 (3 << 0) +#define UART011_IFLS_TX7_8 (4 << 0) +/* special values for ST vendor with deeper fifo */ +#define UART011_IFLS_RX_HALF (5 << 3) +#define UART011_IFLS_TX_HALF (5 << 0) + +#define UART011_OEIM (1 << 10) /* overrun error interrupt mask */ +#define UART011_BEIM (1 << 9) /* break error interrupt mask */ +#define UART011_PEIM (1 << 8) /* parity error interrupt mask */ +#define UART011_FEIM (1 << 7) /* framing error interrupt mask */ +#define UART011_RTIM (1 << 6) /* receive timeout interrupt mask */ +#define UART011_TXIM (1 << 5) /* transmit interrupt mask */ +#define UART011_RXIM (1 << 4) /* receive interrupt mask */ +#define UART011_DSRMIM (1 << 3) /* DSR interrupt mask */ +#define UART011_DCDMIM (1 << 2) /* DCD interrupt mask */ +#define UART011_CTSMIM (1 << 1) /* CTS interrupt mask */ +#define UART011_RIMIM (1 << 0) /* RI interrupt mask */ + +#define UART011_OEIS (1 << 10) /* overrun error interrupt status */ +#define UART011_BEIS (1 << 9) /* break error interrupt status */ +#define UART011_PEIS (1 << 8) /* parity error interrupt status */ +#define UART011_FEIS (1 << 7) /* framing error interrupt status */ +#define UART011_RTIS (1 << 6) /* receive timeout interrupt status */ +#define UART011_TXIS (1 << 5) /* transmit interrupt status */ +#define UART011_RXIS (1 << 4) /* receive interrupt status */ +#define UART011_DSRMIS (1 << 3) /* DSR interrupt status */ +#define UART011_DCDMIS (1 << 2) /* DCD interrupt status */ +#define UART011_CTSMIS (1 << 1) /* CTS interrupt status */ +#define UART011_RIMIS (1 << 0) /* RI interrupt status */ + +#define UART011_OEIC (1 << 10) /* overrun error interrupt clear */ +#define UART011_BEIC (1 << 9) /* break error interrupt clear */ +#define UART011_PEIC (1 << 8) /* parity error interrupt clear */ +#define UART011_FEIC (1 << 7) /* framing error interrupt clear */ +#define UART011_RTIC (1 << 6) /* receive timeout interrupt clear */ +#define UART011_TXIC (1 << 5) /* transmit interrupt clear */ +#define UART011_RXIC (1 << 4) /* receive interrupt clear */ +#define UART011_DSRMIC (1 << 3) /* DSR interrupt clear */ +#define UART011_DCDMIC (1 << 2) /* DCD interrupt clear */ +#define UART011_CTSMIC (1 << 1) /* CTS interrupt clear */ +#define UART011_RIMIC (1 << 0) /* RI interrupt clear */ + +#define UART011_DMAONERR (1 << 2) /* disable dma on error */ +#define UART011_TXDMAE (1 << 1) /* enable transmit dma */ +#define UART011_RXDMAE (1 << 0) /* enable receive dma */ + +#define UART01x_RSR_ANY (UART01x_RSR_OE|UART01x_RSR_BE|UART01x_RSR_PE|UART01x_RSR_FE) +#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS) + +#ifndef __ASSEMBLY__ +struct amba_device; /* in uncompress this is included but amba/bus.h is not */ +struct amba_pl010_data { + void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl); +}; + +struct dma_chan; +struct amba_pl011_data { + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); + void *dma_rx_param; + void *dma_tx_param; + bool dma_rx_poll_enable; + unsigned int dma_rx_poll_rate; + unsigned int dma_rx_poll_timeout; + void (*init) (void); + void (*exit) (void); +}; +#endif + +#endif diff --git a/include/linux/amba/sp810.h b/include/linux/amba/sp810.h new file mode 100644 index 0000000..58fe9e8 --- /dev/null +++ b/include/linux/amba/sp810.h @@ -0,0 +1,62 @@ +/* + * ARM PrimeXsys System Controller SP810 header file + * + * Copyright (C) 2009 ST Microelectronics + * Viresh Kumar + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __AMBA_SP810_H +#define __AMBA_SP810_H + +#include + +/* sysctl registers offset */ +#define SCCTRL 0x000 +#define SCSYSSTAT 0x004 +#define SCIMCTRL 0x008 +#define SCIMSTAT 0x00C +#define SCXTALCTRL 0x010 +#define SCPLLCTRL 0x014 +#define SCPLLFCTRL 0x018 +#define SCPERCTRL0 0x01C +#define SCPERCTRL1 0x020 +#define SCPEREN 0x024 +#define SCPERDIS 0x028 +#define SCPERCLKEN 0x02C +#define SCPERSTAT 0x030 +#define SCSYSID0 0xEE0 +#define SCSYSID1 0xEE4 +#define SCSYSID2 0xEE8 +#define SCSYSID3 0xEEC +#define SCITCR 0xF00 +#define SCITIR0 0xF04 +#define SCITIR1 0xF08 +#define SCITOR 0xF0C +#define SCCNTCTRL 0xF10 +#define SCCNTDATA 0xF14 +#define SCCNTSTEP 0xF18 +#define SCPERIPHID0 0xFE0 +#define SCPERIPHID1 0xFE4 +#define SCPERIPHID2 0xFE8 +#define SCPERIPHID3 0xFEC +#define SCPCELLID0 0xFF0 +#define SCPCELLID1 0xFF4 +#define SCPCELLID2 0xFF8 +#define SCPCELLID3 0xFFC + +#define SCCTRL_TIMERENnSEL_SHIFT(n) (15 + ((n) * 2)) + +static inline void sysctl_soft_reset(void __iomem *base) +{ + /* switch to slow mode */ + writel(0x2, base + SCCTRL); + + /* writing any value to SCSYSSTAT reg will reset system */ + writel(0, base + SCSYSSTAT); +} + +#endif /* __AMBA_SP810_H */ diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h new file mode 100644 index 0000000..21e950e --- /dev/null +++ b/include/linux/amd-iommu.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. + * Author: Joerg Roedel + * Leo Duran + */ + +#ifndef _ASM_X86_AMD_IOMMU_H +#define _ASM_X86_AMD_IOMMU_H + +#include + +/* + * This is mainly used to communicate information back-and-forth + * between SVM and IOMMU for setting up and tearing down posted + * interrupt + */ +struct amd_iommu_pi_data { + u32 ga_tag; + u32 prev_ga_tag; + u64 base; + bool is_guest_mode; + struct vcpu_data *vcpu_data; + void *ir_data; +}; + +#ifdef CONFIG_AMD_IOMMU + +struct task_struct; +struct pci_dev; + +extern int amd_iommu_detect(void); +extern int amd_iommu_init_hardware(void); + +/** + * amd_iommu_enable_device_erratum() - Enable erratum workaround for device + * in the IOMMUv2 driver + * @pdev: The PCI device the workaround is necessary for + * @erratum: The erratum workaround to enable + * + * The function needs to be called before amd_iommu_init_device(). + * Possible values for the erratum number are for now: + * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI + * is enabled + * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI + * requests to one + */ +#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0 +#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1 + +extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum); + +/** + * amd_iommu_init_device() - Init device for use with IOMMUv2 driver + * @pdev: The PCI device to initialize + * @pasids: Number of PASIDs to support for this device + * + * This function does all setup for the device pdev so that it can be + * used with IOMMUv2. + * Returns 0 on success or negative value on error. + */ +extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids); + +/** + * amd_iommu_free_device() - Free all IOMMUv2 related device resources + * and disable IOMMUv2 usage for this device + * @pdev: The PCI device to disable IOMMUv2 usage for' + */ +extern void amd_iommu_free_device(struct pci_dev *pdev); + +/** + * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device + * @pdev: The PCI device to bind the task to + * @pasid: The PASID on the device the task should be bound to + * @task: the task to bind + * + * The function returns 0 on success or a negative value on error. + */ +extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, + struct task_struct *task); + +/** + * amd_iommu_unbind_pasid() - Unbind a PASID from its task on + * a device + * @pdev: The device of the PASID + * @pasid: The PASID to unbind + * + * When this function returns the device is no longer using the PASID + * and the PASID is no longer bound to its task. + */ +extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid); + +/** + * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed + * PRI requests + * @pdev: The PCI device the call-back should be registered for + * @cb: The call-back function + * + * The IOMMUv2 driver invokes this call-back when it is unable to + * successfully handle a PRI request. The device driver can then decide + * which PRI response the device should see. Possible return values for + * the call-back are: + * + * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device + * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device + * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device, + * the device is required to disable + * PRI when it receives this response + * + * The function returns 0 on success or negative value on error. + */ +#define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0 +#define AMD_IOMMU_INV_PRI_RSP_INVALID 1 +#define AMD_IOMMU_INV_PRI_RSP_FAIL 2 + +typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev, + int pasid, + unsigned long address, + u16); + +extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, + amd_iommu_invalid_ppr_cb cb); + +#define PPR_FAULT_EXEC (1 << 1) +#define PPR_FAULT_READ (1 << 2) +#define PPR_FAULT_WRITE (1 << 5) +#define PPR_FAULT_USER (1 << 6) +#define PPR_FAULT_RSVD (1 << 7) +#define PPR_FAULT_GN (1 << 8) + +/** + * amd_iommu_device_info() - Get information about IOMMUv2 support of a + * PCI device + * @pdev: PCI device to query information from + * @info: A pointer to an amd_iommu_device_info structure which will contain + * the information about the PCI device + * + * Returns 0 on success, negative value on error + */ + +#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */ +#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */ +#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */ +#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution + on memory pages */ +#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request + super-user privileges */ + +struct amd_iommu_device_info { + int max_pasids; + u32 flags; +}; + +extern int amd_iommu_device_info(struct pci_dev *pdev, + struct amd_iommu_device_info *info); + +/** + * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating + * a pasid context. This call-back is + * invoked when the IOMMUv2 driver needs to + * invalidate a PASID context, for example + * because the task that is bound to that + * context is about to exit. + * + * @pdev: The PCI device the call-back should be registered for + * @cb: The call-back function + */ + +typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid); + +extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, + amd_iommu_invalidate_ctx cb); +#else /* CONFIG_AMD_IOMMU */ + +static inline int amd_iommu_detect(void) { return -ENODEV; } + +#endif /* CONFIG_AMD_IOMMU */ + +#if defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) + +/* IOMMU AVIC Function */ +extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)); + +extern int +amd_iommu_update_ga(int cpu, bool is_run, void *data); + +extern int amd_iommu_activate_guest_mode(void *data); +extern int amd_iommu_deactivate_guest_mode(void *data); + +#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ + +static inline int +amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) +{ + return 0; +} + +static inline int +amd_iommu_update_ga(int cpu, bool is_run, void *data) +{ + return 0; +} + +static inline int amd_iommu_activate_guest_mode(void *data) +{ + return 0; +} + +static inline int amd_iommu_deactivate_guest_mode(void *data) +{ + return 0; +} +#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ + +#endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h new file mode 100644 index 0000000..d0d7d96 --- /dev/null +++ b/include/linux/anon_inodes.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/anon_inodes.h + * + * Copyright (C) 2007 Davide Libenzi + * + */ + +#ifndef _LINUX_ANON_INODES_H +#define _LINUX_ANON_INODES_H + +struct file_operations; + +struct file *anon_inode_getfile(const char *name, + const struct file_operations *fops, + void *priv, int flags); +int anon_inode_getfd(const char *name, const struct file_operations *fops, + void *priv, int flags); + +#endif /* _LINUX_ANON_INODES_H */ + diff --git a/include/linux/apm-emulation.h b/include/linux/apm-emulation.h new file mode 100644 index 0000000..94c0369 --- /dev/null +++ b/include/linux/apm-emulation.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* -*- linux-c -*- + * + * (C) 2003 zecke@handhelds.org + * + * based on arch/arm/kernel/apm.c + * factor out the information needed by architectures to provide + * apm status + */ +#ifndef __LINUX_APM_EMULATION_H +#define __LINUX_APM_EMULATION_H + +#include + +/* + * This structure gets filled in by the machine specific 'get_power_status' + * implementation. Any fields which are not set default to a safe value. + */ +struct apm_power_info { + unsigned char ac_line_status; +#define APM_AC_OFFLINE 0 +#define APM_AC_ONLINE 1 +#define APM_AC_BACKUP 2 +#define APM_AC_UNKNOWN 0xff + + unsigned char battery_status; +#define APM_BATTERY_STATUS_HIGH 0 +#define APM_BATTERY_STATUS_LOW 1 +#define APM_BATTERY_STATUS_CRITICAL 2 +#define APM_BATTERY_STATUS_CHARGING 3 +#define APM_BATTERY_STATUS_NOT_PRESENT 4 +#define APM_BATTERY_STATUS_UNKNOWN 0xff + + unsigned char battery_flag; +#define APM_BATTERY_FLAG_HIGH (1 << 0) +#define APM_BATTERY_FLAG_LOW (1 << 1) +#define APM_BATTERY_FLAG_CRITICAL (1 << 2) +#define APM_BATTERY_FLAG_CHARGING (1 << 3) +#define APM_BATTERY_FLAG_NOT_PRESENT (1 << 7) +#define APM_BATTERY_FLAG_UNKNOWN 0xff + + int battery_life; + int time; + int units; +#define APM_UNITS_MINS 0 +#define APM_UNITS_SECS 1 +#define APM_UNITS_UNKNOWN -1 + +}; + +/* + * This allows machines to provide their own "apm get power status" function. + */ +extern void (*apm_get_power_status)(struct apm_power_info *); + +/* + * Queue an event (APM_SYS_SUSPEND or APM_CRITICAL_SUSPEND) + */ +void apm_queue_event(apm_event_t event); + +#endif /* __LINUX_APM_EMULATION_H */ diff --git a/include/linux/apm_bios.h b/include/linux/apm_bios.h new file mode 100644 index 0000000..7554192 --- /dev/null +++ b/include/linux/apm_bios.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Include file for the interface to an APM BIOS + * Copyright 1994-2001 Stephen Rothwell (sfr@canb.auug.org.au) + */ +#ifndef _LINUX_APM_H +#define _LINUX_APM_H + +#include + + +#define APM_CS (GDT_ENTRY_APMBIOS_BASE * 8) +#define APM_CS_16 (APM_CS + 8) +#define APM_DS (APM_CS_16 + 8) + +/* Results of APM Installation Check */ +#define APM_16_BIT_SUPPORT 0x0001 +#define APM_32_BIT_SUPPORT 0x0002 +#define APM_IDLE_SLOWS_CLOCK 0x0004 +#define APM_BIOS_DISABLED 0x0008 +#define APM_BIOS_DISENGAGED 0x0010 + +/* + * Data for APM that is persistent across module unload/load + */ +struct apm_info { + struct apm_bios_info bios; + unsigned short connection_version; + int get_power_status_broken; + int get_power_status_swabinminutes; + int allow_ints; + int forbid_idle; + int realmode_power_off; + int disabled; +}; + +/* + * The APM function codes + */ +#define APM_FUNC_INST_CHECK 0x5300 +#define APM_FUNC_REAL_CONN 0x5301 +#define APM_FUNC_16BIT_CONN 0x5302 +#define APM_FUNC_32BIT_CONN 0x5303 +#define APM_FUNC_DISCONN 0x5304 +#define APM_FUNC_IDLE 0x5305 +#define APM_FUNC_BUSY 0x5306 +#define APM_FUNC_SET_STATE 0x5307 +#define APM_FUNC_ENABLE_PM 0x5308 +#define APM_FUNC_RESTORE_BIOS 0x5309 +#define APM_FUNC_GET_STATUS 0x530a +#define APM_FUNC_GET_EVENT 0x530b +#define APM_FUNC_GET_STATE 0x530c +#define APM_FUNC_ENABLE_DEV_PM 0x530d +#define APM_FUNC_VERSION 0x530e +#define APM_FUNC_ENGAGE_PM 0x530f +#define APM_FUNC_GET_CAP 0x5310 +#define APM_FUNC_RESUME_TIMER 0x5311 +#define APM_FUNC_RESUME_ON_RING 0x5312 +#define APM_FUNC_TIMER 0x5313 + +/* + * Function code for APM_FUNC_RESUME_TIMER + */ +#define APM_FUNC_DISABLE_TIMER 0 +#define APM_FUNC_GET_TIMER 1 +#define APM_FUNC_SET_TIMER 2 + +/* + * Function code for APM_FUNC_RESUME_ON_RING + */ +#define APM_FUNC_DISABLE_RING 0 +#define APM_FUNC_ENABLE_RING 1 +#define APM_FUNC_GET_RING 2 + +/* + * Function code for APM_FUNC_TIMER_STATUS + */ +#define APM_FUNC_TIMER_DISABLE 0 +#define APM_FUNC_TIMER_ENABLE 1 +#define APM_FUNC_TIMER_GET 2 + +/* + * in arch/i386/kernel/setup.c + */ +extern struct apm_info apm_info; + +/* + * This is the "All Devices" ID communicated to the BIOS + */ +#define APM_DEVICE_BALL ((apm_info.connection_version > 0x0100) ? \ + APM_DEVICE_ALL : APM_DEVICE_OLD_ALL) +#endif /* LINUX_APM_H */ diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h new file mode 100644 index 0000000..ddb10aa --- /dev/null +++ b/include/linux/apple-gmux.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro + * Copyright (C) 2015 Lukas Wunner + */ + +#ifndef LINUX_APPLE_GMUX_H +#define LINUX_APPLE_GMUX_H + +#include + +#define GMUX_ACPI_HID "APP000B" + +#if IS_ENABLED(CONFIG_APPLE_GMUX) + +/** + * apple_gmux_present() - detect if gmux is built into the machine + * + * Drivers may use this to activate quirks specific to dual GPU MacBook Pros + * and Mac Pros, e.g. for deferred probing, runtime pm and backlight. + * + * Return: %true if gmux is present and the kernel was configured + * with CONFIG_APPLE_GMUX, %false otherwise. + */ +static inline bool apple_gmux_present(void) +{ + return acpi_dev_found(GMUX_ACPI_HID); +} + +#else /* !CONFIG_APPLE_GMUX */ + +static inline bool apple_gmux_present(void) +{ + return false; +} + +#endif /* !CONFIG_APPLE_GMUX */ + +#endif /* LINUX_APPLE_GMUX_H */ diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h new file mode 100644 index 0000000..445af2e --- /dev/null +++ b/include/linux/apple_bl.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * apple_bl exported symbols + */ + +#ifndef _LINUX_APPLE_BL_H +#define _LINUX_APPLE_BL_H + +#if defined(CONFIG_BACKLIGHT_APPLE) || defined(CONFIG_BACKLIGHT_APPLE_MODULE) + +extern int apple_bl_register(void); +extern void apple_bl_unregister(void); + +#else /* !CONFIG_BACKLIGHT_APPLE */ + +static inline int apple_bl_register(void) +{ + return 0; +} + +static inline void apple_bl_unregister(void) +{ +} + +#endif /* !CONFIG_BACKLIGHT_APPLE */ + +#endif /* _LINUX_APPLE_BL_H */ diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h new file mode 100644 index 0000000..42f2b51 --- /dev/null +++ b/include/linux/arch_topology.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/arch_topology.h - arch specific cpu topology information + */ +#ifndef _LINUX_ARCH_TOPOLOGY_H_ +#define _LINUX_ARCH_TOPOLOGY_H_ + +#include +#include + +void topology_normalize_cpu_scale(void); +int topology_update_cpu_topology(void); + +struct device_node; +bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu); + +DECLARE_PER_CPU(unsigned long, cpu_scale); + +struct sched_domain; +static inline +unsigned long topology_get_cpu_scale(int cpu) +{ + return per_cpu(cpu_scale, cpu); +} + +void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity); + +DECLARE_PER_CPU(unsigned long, freq_scale); + +static inline +unsigned long topology_get_freq_scale(int cpu) +{ + return per_cpu(freq_scale, cpu); +} + +struct cpu_topology { + int thread_id; + int core_id; + int package_id; + int llc_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; + cpumask_t llc_sibling; +}; + +#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY +extern struct cpu_topology cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) +void init_cpu_topology(void); +void store_cpu_topology(unsigned int cpuid); +const struct cpumask *cpu_coregroup_mask(int cpu); +void update_siblings_masks(unsigned int cpu); +void remove_cpu_topology(unsigned int cpuid); +void reset_cpu_topology(void); +#endif + +#endif /* _LINUX_ARCH_TOPOLOGY_H_ */ diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h new file mode 100644 index 0000000..d0e4420 --- /dev/null +++ b/include/linux/arm-cci.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * CCI cache coherent interconnect support + * + * Copyright (C) 2013 ARM Ltd. + */ + +#ifndef __LINUX_ARM_CCI_H +#define __LINUX_ARM_CCI_H + +#include +#include + +#include + +struct device_node; + +#ifdef CONFIG_ARM_CCI +extern bool cci_probed(void); +#else +static inline bool cci_probed(void) { return false; } +#endif + +#ifdef CONFIG_ARM_CCI400_PORT_CTRL +extern int cci_ace_get_port(struct device_node *dn); +extern int cci_disable_port_by_cpu(u64 mpidr); +extern int __cci_control_port_by_device(struct device_node *dn, bool enable); +extern int __cci_control_port_by_index(u32 port, bool enable); +#else +static inline int cci_ace_get_port(struct device_node *dn) +{ + return -ENODEV; +} +static inline int cci_disable_port_by_cpu(u64 mpidr) { return -ENODEV; } +static inline int __cci_control_port_by_device(struct device_node *dn, + bool enable) +{ + return -ENODEV; +} +static inline int __cci_control_port_by_index(u32 port, bool enable) +{ + return -ENODEV; +} +#endif + +#define cci_disable_port_by_device(dev) \ + __cci_control_port_by_device(dev, false) +#define cci_enable_port_by_device(dev) \ + __cci_control_port_by_device(dev, true) +#define cci_disable_port_by_index(dev) \ + __cci_control_port_by_index(dev, false) +#define cci_enable_port_by_index(dev) \ + __cci_control_port_by_index(dev, true) + +#endif diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h new file mode 100644 index 0000000..080012a --- /dev/null +++ b/include/linux/arm-smccc.h @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015, Linaro Limited + */ +#ifndef __LINUX_ARM_SMCCC_H +#define __LINUX_ARM_SMCCC_H + +#include + +/* + * This file provides common defines for ARM SMC Calling Convention as + * specified in + * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + */ + +#define ARM_SMCCC_STD_CALL _AC(0,U) +#define ARM_SMCCC_FAST_CALL _AC(1,U) +#define ARM_SMCCC_TYPE_SHIFT 31 + +#define ARM_SMCCC_SMC_32 0 +#define ARM_SMCCC_SMC_64 1 +#define ARM_SMCCC_CALL_CONV_SHIFT 30 + +#define ARM_SMCCC_OWNER_MASK 0x3F +#define ARM_SMCCC_OWNER_SHIFT 24 + +#define ARM_SMCCC_FUNC_MASK 0xFFFF + +#define ARM_SMCCC_IS_FAST_CALL(smc_val) \ + ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT)) +#define ARM_SMCCC_IS_64(smc_val) \ + ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT)) +#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK) +#define ARM_SMCCC_OWNER_NUM(smc_val) \ + (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK) + +#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \ + (((type) << ARM_SMCCC_TYPE_SHIFT) | \ + ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \ + (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \ + ((func_num) & ARM_SMCCC_FUNC_MASK)) + +#define ARM_SMCCC_OWNER_ARCH 0 +#define ARM_SMCCC_OWNER_CPU 1 +#define ARM_SMCCC_OWNER_SIP 2 +#define ARM_SMCCC_OWNER_OEM 3 +#define ARM_SMCCC_OWNER_STANDARD 4 +#define ARM_SMCCC_OWNER_TRUSTED_APP 48 +#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 +#define ARM_SMCCC_OWNER_TRUSTED_OS 50 +#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 + +#define ARM_SMCCC_QUIRK_NONE 0 +#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ + +#define ARM_SMCCC_VERSION_1_0 0x10000 +#define ARM_SMCCC_VERSION_1_1 0x10001 + +#define ARM_SMCCC_VERSION_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0) + +#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 1) + +#define ARM_SMCCC_ARCH_WORKAROUND_1 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x8000) + +#define ARM_SMCCC_ARCH_WORKAROUND_2 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x7fff) + +#ifndef __ASSEMBLY__ + +#include +#include +/** + * struct arm_smccc_res - Result from SMC/HVC call + * @a0-a3 result values from registers 0 to 3 + */ +struct arm_smccc_res { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; +}; + +/** + * struct arm_smccc_quirk - Contains quirk information + * @id: quirk identification + * @state: quirk specific information + * @a6: Qualcomm quirk entry for returning post-smc call contents of a6 + */ +struct arm_smccc_quirk { + int id; + union { + unsigned long a6; + } state; +}; + +/** + * __arm_smccc_smc() - make SMC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required. + * + * This function is used to make SMC calls following SMC Calling Convention. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction. An optional + * quirk structure provides vendor specific behavior. + */ +asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res, struct arm_smccc_quirk *quirk); + +/** + * __arm_smccc_hvc() - make HVC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required. + * + * This function is used to make HVC calls following SMC Calling + * Convention. The content of the supplied param are copied to registers 0 + * to 7 prior to the HVC instruction. The return values are updated with + * the content from register 0 to 3 on return from the HVC instruction. An + * optional quirk structure provides vendor specific behavior. + */ +asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res, struct arm_smccc_quirk *quirk); + +#define arm_smccc_smc(...) __arm_smccc_smc(__VA_ARGS__, NULL) + +#define arm_smccc_smc_quirk(...) __arm_smccc_smc(__VA_ARGS__) + +#define arm_smccc_hvc(...) __arm_smccc_hvc(__VA_ARGS__, NULL) + +#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) + +/* SMCCC v1.1 implementation madness follows */ +#ifdef CONFIG_ARM64 + +#define SMCCC_SMC_INST "smc #0" +#define SMCCC_HVC_INST "hvc #0" + +#elif defined(CONFIG_ARM) +#include +#include + +#define SMCCC_SMC_INST __SMC(0) +#define SMCCC_HVC_INST __HVC(0) + +#endif + +#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x + +#define __count_args(...) \ + ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) + +#define __constraint_write_0 \ + "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3) +#define __constraint_write_1 \ + "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3) +#define __constraint_write_2 \ + "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3) +#define __constraint_write_3 \ + "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) +#define __constraint_write_4 __constraint_write_3 +#define __constraint_write_5 __constraint_write_4 +#define __constraint_write_6 __constraint_write_5 +#define __constraint_write_7 __constraint_write_6 + +#define __constraint_read_0 +#define __constraint_read_1 +#define __constraint_read_2 +#define __constraint_read_3 +#define __constraint_read_4 "r" (r4) +#define __constraint_read_5 __constraint_read_4, "r" (r5) +#define __constraint_read_6 __constraint_read_5, "r" (r6) +#define __constraint_read_7 __constraint_read_6, "r" (r7) + +#define __declare_arg_0(a0, res) \ + struct arm_smccc_res *___res = res; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1"); \ + register unsigned long r2 asm("r2"); \ + register unsigned long r3 asm("r3") + +#define __declare_arg_1(a0, a1, res) \ + typeof(a1) __a1 = a1; \ + struct arm_smccc_res *___res = res; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = __a1; \ + register unsigned long r2 asm("r2"); \ + register unsigned long r3 asm("r3") + +#define __declare_arg_2(a0, a1, a2, res) \ + typeof(a1) __a1 = a1; \ + typeof(a2) __a2 = a2; \ + struct arm_smccc_res *___res = res; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = __a1; \ + register unsigned long r2 asm("r2") = __a2; \ + register unsigned long r3 asm("r3") + +#define __declare_arg_3(a0, a1, a2, a3, res) \ + typeof(a1) __a1 = a1; \ + typeof(a2) __a2 = a2; \ + typeof(a3) __a3 = a3; \ + struct arm_smccc_res *___res = res; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = __a1; \ + register unsigned long r2 asm("r2") = __a2; \ + register unsigned long r3 asm("r3") = __a3 + +#define __declare_arg_4(a0, a1, a2, a3, a4, res) \ + typeof(a4) __a4 = a4; \ + __declare_arg_3(a0, a1, a2, a3, res); \ + register unsigned long r4 asm("r4") = __a4 + +#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ + typeof(a5) __a5 = a5; \ + __declare_arg_4(a0, a1, a2, a3, a4, res); \ + register unsigned long r5 asm("r5") = __a5 + +#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ + typeof(a6) __a6 = a6; \ + __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ + register unsigned long r6 asm("r6") = __a6 + +#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ + typeof(a7) __a7 = a7; \ + __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ + register unsigned long r7 asm("r7") = __a7 + +#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) +#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) + +#define ___constraints(count) \ + : __constraint_write_ ## count \ + : __constraint_read_ ## count \ + : "memory" +#define __constraints(count) ___constraints(count) + +/* + * We have an output list that is not necessarily used, and GCC feels + * entitled to optimise the whole sequence away. "volatile" is what + * makes it stick. + */ +#define __arm_smccc_1_1(inst, ...) \ + do { \ + __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ + asm volatile(inst "\n" \ + __constraints(__count_args(__VA_ARGS__))); \ + if (___res) \ + *___res = (typeof(*___res)){r0, r1, r2, r3}; \ + } while (0) + +/* + * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call + * + * This is a variadic macro taking one to eight source arguments, and + * an optional return structure. + * + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This macro is used to make SMC calls following SMC Calling Convention v1.1. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction if not NULL. + */ +#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__) + +/* + * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call + * + * This is a variadic macro taking one to eight source arguments, and + * an optional return structure. + * + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This macro is used to make HVC calls following SMC Calling Convention v1.1. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the HVC instruction. The return values are updated with the content + * from register 0 to 3 on return from the HVC instruction if not NULL. + */ +#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) + +/* Return codes defined in ARM DEN 0070A */ +#define SMCCC_RET_SUCCESS 0 +#define SMCCC_RET_NOT_SUPPORTED -1 +#define SMCCC_RET_NOT_REQUIRED -2 + +#endif /*__ASSEMBLY__*/ +#endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h new file mode 100644 index 0000000..3305ea7 --- /dev/null +++ b/include/linux/arm_sdei.h @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2017 Arm Ltd. +#ifndef __LINUX_ARM_SDEI_H +#define __LINUX_ARM_SDEI_H + +#include + +enum sdei_conduit_types { + CONDUIT_INVALID = 0, + CONDUIT_SMC, + CONDUIT_HVC, +}; + +#include + +#ifdef CONFIG_ARM_SDE_INTERFACE +#include +#endif + +/* Arch code should override this to set the entry point from firmware... */ +#ifndef sdei_arch_get_entry_point +#define sdei_arch_get_entry_point(conduit) (0) +#endif + +/* + * When an event occurs sdei_event_handler() will call a user-provided callback + * like this in NMI context on the CPU that received the event. + */ +typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg); + +/* + * Register your callback to claim an event. The event must be described + * by firmware. + */ +int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg); + +/* + * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling + * it until it succeeds. + */ +int sdei_event_unregister(u32 event_num); + +int sdei_event_enable(u32 event_num); +int sdei_event_disable(u32 event_num); + +/* GHES register/unregister helpers */ +int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, + sdei_event_callback *critical_cb); +int sdei_unregister_ghes(struct ghes *ghes); + +#ifdef CONFIG_ARM_SDE_INTERFACE +/* For use by arch code when CPU hotplug notifiers are not appropriate. */ +int sdei_mask_local_cpu(void); +int sdei_unmask_local_cpu(void); +#else +static inline int sdei_mask_local_cpu(void) { return 0; } +static inline int sdei_unmask_local_cpu(void) { return 0; } +#endif /* CONFIG_ARM_SDE_INTERFACE */ + + +/* + * This struct represents an event that has been registered. The driver + * maintains a list of all events, and which ones are registered. (Private + * events have one entry in the list, but are registered on each CPU). + * A pointer to this struct is passed to firmware, and back to the event + * handler. The event handler can then use this to invoke the registered + * callback, without having to walk the list. + * + * For CPU private events, this structure is per-cpu. + */ +struct sdei_registered_event { + /* For use by arch code: */ + struct pt_regs interrupted_regs; + + sdei_event_callback *callback; + void *callback_arg; + u32 event_num; + u8 priority; +}; + +/* The arch code entry point should then call this when an event arrives. */ +int notrace sdei_event_handler(struct pt_regs *regs, + struct sdei_registered_event *arg); + +/* arch code may use this to retrieve the extra registers. */ +int sdei_api_event_context(u32 query, u64 *result); + +#endif /* __LINUX_ARM_SDEI_H */ diff --git a/include/linux/armada-37xx-rwtm-mailbox.h b/include/linux/armada-37xx-rwtm-mailbox.h new file mode 100644 index 0000000..57bb54f --- /dev/null +++ b/include/linux/armada-37xx-rwtm-mailbox.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * rWTM BIU Mailbox driver for Armada 37xx + * + * Author: Marek Behun + */ + +#ifndef _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_ +#define _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_ + +#include + +struct armada_37xx_rwtm_tx_msg { + u16 command; + u32 args[16]; +}; + +struct armada_37xx_rwtm_rx_msg { + u32 retval; + u32 status[16]; +}; + +#endif /* _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_ */ diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h new file mode 100644 index 0000000..4cc4020 --- /dev/null +++ b/include/linux/ascii85.h @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2008 Intel Corporation + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + */ + +#ifndef _ASCII85_H_ +#define _ASCII85_H_ + +#include + +#define ASCII85_BUFSZ 6 + +static inline long +ascii85_encode_len(long len) +{ + return DIV_ROUND_UP(len, 4); +} + +static inline const char * +ascii85_encode(u32 in, char *out) +{ + int i; + + if (in == 0) + return "z"; + + out[5] = '\0'; + for (i = 5; i--; ) { + out[i] = '!' + in % 85; + in /= 85; + } + + return out; +} + +#endif diff --git a/include/linux/asn1.h b/include/linux/asn1.h new file mode 100644 index 0000000..a4d0bdd --- /dev/null +++ b/include/linux/asn1.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* ASN.1 BER/DER/CER encoding definitions + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_ASN1_H +#define _LINUX_ASN1_H + +/* Class */ +enum asn1_class { + ASN1_UNIV = 0, /* Universal */ + ASN1_APPL = 1, /* Application */ + ASN1_CONT = 2, /* Context */ + ASN1_PRIV = 3 /* Private */ +}; +#define ASN1_CLASS_BITS 0xc0 + + +enum asn1_method { + ASN1_PRIM = 0, /* Primitive */ + ASN1_CONS = 1 /* Constructed */ +}; +#define ASN1_CONS_BIT 0x20 + +/* Tag */ +enum asn1_tag { + ASN1_EOC = 0, /* End Of Contents or N/A */ + ASN1_BOOL = 1, /* Boolean */ + ASN1_INT = 2, /* Integer */ + ASN1_BTS = 3, /* Bit String */ + ASN1_OTS = 4, /* Octet String */ + ASN1_NULL = 5, /* Null */ + ASN1_OID = 6, /* Object Identifier */ + ASN1_ODE = 7, /* Object Description */ + ASN1_EXT = 8, /* External */ + ASN1_REAL = 9, /* Real float */ + ASN1_ENUM = 10, /* Enumerated */ + ASN1_EPDV = 11, /* Embedded PDV */ + ASN1_UTF8STR = 12, /* UTF8 String */ + ASN1_RELOID = 13, /* Relative OID */ + /* 14 - Reserved */ + /* 15 - Reserved */ + ASN1_SEQ = 16, /* Sequence and Sequence of */ + ASN1_SET = 17, /* Set and Set of */ + ASN1_NUMSTR = 18, /* Numerical String */ + ASN1_PRNSTR = 19, /* Printable String */ + ASN1_TEXSTR = 20, /* T61 String / Teletext String */ + ASN1_VIDSTR = 21, /* Videotex String */ + ASN1_IA5STR = 22, /* IA5 String */ + ASN1_UNITIM = 23, /* Universal Time */ + ASN1_GENTIM = 24, /* General Time */ + ASN1_GRASTR = 25, /* Graphic String */ + ASN1_VISSTR = 26, /* Visible String */ + ASN1_GENSTR = 27, /* General String */ + ASN1_UNISTR = 28, /* Universal String */ + ASN1_CHRSTR = 29, /* Character String */ + ASN1_BMPSTR = 30, /* BMP String */ + ASN1_LONG_TAG = 31 /* Long form tag */ +}; + +#define ASN1_INDEFINITE_LENGTH 0x80 + +#endif /* _LINUX_ASN1_H */ diff --git a/include/linux/asn1_ber_bytecode.h b/include/linux/asn1_ber_bytecode.h new file mode 100644 index 0000000..b383619 --- /dev/null +++ b/include/linux/asn1_ber_bytecode.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* ASN.1 BER/DER/CER parsing state machine internal definitions + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_ASN1_BER_BYTECODE_H +#define _LINUX_ASN1_BER_BYTECODE_H + +#ifdef __KERNEL__ +#include +#endif +#include + +typedef int (*asn1_action_t)(void *context, + size_t hdrlen, /* In case of ANY type */ + unsigned char tag, /* In case of ANY type */ + const void *value, size_t vlen); + +struct asn1_decoder { + const unsigned char *machine; + size_t machlen; + const asn1_action_t *actions; +}; + +enum asn1_opcode { + /* The tag-matching ops come first and the odd-numbered slots + * are for OR_SKIP ops. + */ +#define ASN1_OP_MATCH__SKIP 0x01 +#define ASN1_OP_MATCH__ACT 0x02 +#define ASN1_OP_MATCH__JUMP 0x04 +#define ASN1_OP_MATCH__ANY 0x08 +#define ASN1_OP_MATCH__COND 0x10 + + ASN1_OP_MATCH = 0x00, + ASN1_OP_MATCH_OR_SKIP = 0x01, + ASN1_OP_MATCH_ACT = 0x02, + ASN1_OP_MATCH_ACT_OR_SKIP = 0x03, + ASN1_OP_MATCH_JUMP = 0x04, + ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05, + ASN1_OP_MATCH_ANY = 0x08, + ASN1_OP_MATCH_ANY_OR_SKIP = 0x09, + ASN1_OP_MATCH_ANY_ACT = 0x0a, + ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 0x0b, + /* Everything before here matches unconditionally */ + + ASN1_OP_COND_MATCH_OR_SKIP = 0x11, + ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13, + ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15, + ASN1_OP_COND_MATCH_ANY = 0x18, + ASN1_OP_COND_MATCH_ANY_OR_SKIP = 0x19, + ASN1_OP_COND_MATCH_ANY_ACT = 0x1a, + ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 0x1b, + + /* Everything before here will want a tag from the data */ +#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP + + /* These are here to help fill up space */ + ASN1_OP_COND_FAIL = 0x1c, + ASN1_OP_COMPLETE = 0x1d, + ASN1_OP_ACT = 0x1e, + ASN1_OP_MAYBE_ACT = 0x1f, + + /* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */ + ASN1_OP_END_SEQ = 0x20, + ASN1_OP_END_SET = 0x21, + ASN1_OP_END_SEQ_OF = 0x22, + ASN1_OP_END_SET_OF = 0x23, + ASN1_OP_END_SEQ_ACT = 0x24, + ASN1_OP_END_SET_ACT = 0x25, + ASN1_OP_END_SEQ_OF_ACT = 0x26, + ASN1_OP_END_SET_OF_ACT = 0x27, +#define ASN1_OP_END__SET 0x01 +#define ASN1_OP_END__OF 0x02 +#define ASN1_OP_END__ACT 0x04 + + ASN1_OP_RETURN = 0x28, + + ASN1_OP__NR +}; + +#define _tag(CLASS, CP, TAG) ((ASN1_##CLASS << 6) | (ASN1_##CP << 5) | ASN1_##TAG) +#define _tagn(CLASS, CP, TAG) ((ASN1_##CLASS << 6) | (ASN1_##CP << 5) | TAG) +#define _jump_target(N) (N) +#define _action(N) (N) + +#endif /* _LINUX_ASN1_BER_BYTECODE_H */ diff --git a/include/linux/asn1_decoder.h b/include/linux/asn1_decoder.h new file mode 100644 index 0000000..83f9c6e --- /dev/null +++ b/include/linux/asn1_decoder.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* ASN.1 decoder + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_ASN1_DECODER_H +#define _LINUX_ASN1_DECODER_H + +#include + +struct asn1_decoder; + +extern int asn1_ber_decoder(const struct asn1_decoder *decoder, + void *context, + const unsigned char *data, + size_t datalen); + +#endif /* _LINUX_ASN1_DECODER_H */ diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h new file mode 100644 index 0000000..8b3f230 --- /dev/null +++ b/include/linux/assoc_array.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Generic associative array implementation. + * + * See Documentation/core-api/assoc_array.rst for information. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_ASSOC_ARRAY_H +#define _LINUX_ASSOC_ARRAY_H + +#ifdef CONFIG_ASSOCIATIVE_ARRAY + +#include + +#define ASSOC_ARRAY_KEY_CHUNK_SIZE BITS_PER_LONG /* Key data retrieved in chunks of this size */ + +/* + * Generic associative array. + */ +struct assoc_array { + struct assoc_array_ptr *root; /* The node at the root of the tree */ + unsigned long nr_leaves_on_tree; +}; + +/* + * Operations on objects and index keys for use by array manipulation routines. + */ +struct assoc_array_ops { + /* Method to get a chunk of an index key from caller-supplied data */ + unsigned long (*get_key_chunk)(const void *index_key, int level); + + /* Method to get a piece of an object's index key */ + unsigned long (*get_object_key_chunk)(const void *object, int level); + + /* Is this the object we're looking for? */ + bool (*compare_object)(const void *object, const void *index_key); + + /* How different is an object from an index key, to a bit position in + * their keys? (or -1 if they're the same) + */ + int (*diff_objects)(const void *object, const void *index_key); + + /* Method to free an object. */ + void (*free_object)(void *object); +}; + +/* + * Access and manipulation functions. + */ +struct assoc_array_edit; + +static inline void assoc_array_init(struct assoc_array *array) +{ + array->root = NULL; + array->nr_leaves_on_tree = 0; +} + +extern int assoc_array_iterate(const struct assoc_array *array, + int (*iterator)(const void *object, + void *iterator_data), + void *iterator_data); +extern void *assoc_array_find(const struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key); +extern void assoc_array_destroy(struct assoc_array *array, + const struct assoc_array_ops *ops); +extern struct assoc_array_edit *assoc_array_insert(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key, + void *object); +extern void assoc_array_insert_set_object(struct assoc_array_edit *edit, + void *object); +extern struct assoc_array_edit *assoc_array_delete(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key); +extern struct assoc_array_edit *assoc_array_clear(struct assoc_array *array, + const struct assoc_array_ops *ops); +extern void assoc_array_apply_edit(struct assoc_array_edit *edit); +extern void assoc_array_cancel_edit(struct assoc_array_edit *edit); +extern int assoc_array_gc(struct assoc_array *array, + const struct assoc_array_ops *ops, + bool (*iterator)(void *object, void *iterator_data), + void *iterator_data); + +#endif /* CONFIG_ASSOCIATIVE_ARRAY */ +#endif /* _LINUX_ASSOC_ARRAY_H */ diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h new file mode 100644 index 0000000..dca733e --- /dev/null +++ b/include/linux/assoc_array_priv.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Private definitions for the generic associative array implementation. + * + * See Documentation/core-api/assoc_array.rst for information. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_ASSOC_ARRAY_PRIV_H +#define _LINUX_ASSOC_ARRAY_PRIV_H + +#ifdef CONFIG_ASSOCIATIVE_ARRAY + +#include + +#define ASSOC_ARRAY_FAN_OUT 16 /* Number of slots per node */ +#define ASSOC_ARRAY_FAN_MASK (ASSOC_ARRAY_FAN_OUT - 1) +#define ASSOC_ARRAY_LEVEL_STEP (ilog2(ASSOC_ARRAY_FAN_OUT)) +#define ASSOC_ARRAY_LEVEL_STEP_MASK (ASSOC_ARRAY_LEVEL_STEP - 1) +#define ASSOC_ARRAY_KEY_CHUNK_MASK (ASSOC_ARRAY_KEY_CHUNK_SIZE - 1) +#define ASSOC_ARRAY_KEY_CHUNK_SHIFT (ilog2(BITS_PER_LONG)) + +/* + * Undefined type representing a pointer with type information in the bottom + * two bits. + */ +struct assoc_array_ptr; + +/* + * An N-way node in the tree. + * + * Each slot contains one of four things: + * + * (1) Nothing (NULL). + * + * (2) A leaf object (pointer types 0). + * + * (3) A next-level node (pointer type 1, subtype 0). + * + * (4) A shortcut (pointer type 1, subtype 1). + * + * The tree is optimised for search-by-ID, but permits reasonable iteration + * also. + * + * The tree is navigated by constructing an index key consisting of an array of + * segments, where each segment is ilog2(ASSOC_ARRAY_FAN_OUT) bits in size. + * + * The segments correspond to levels of the tree (the first segment is used at + * level 0, the second at level 1, etc.). + */ +struct assoc_array_node { + struct assoc_array_ptr *back_pointer; + u8 parent_slot; + struct assoc_array_ptr *slots[ASSOC_ARRAY_FAN_OUT]; + unsigned long nr_leaves_on_branch; +}; + +/* + * A shortcut through the index space out to where a collection of nodes/leaves + * with the same IDs live. + */ +struct assoc_array_shortcut { + struct assoc_array_ptr *back_pointer; + int parent_slot; + int skip_to_level; + struct assoc_array_ptr *next_node; + unsigned long index_key[]; +}; + +/* + * Preallocation cache. + */ +struct assoc_array_edit { + struct rcu_head rcu; + struct assoc_array *array; + const struct assoc_array_ops *ops; + const struct assoc_array_ops *ops_for_excised_subtree; + struct assoc_array_ptr *leaf; + struct assoc_array_ptr **leaf_p; + struct assoc_array_ptr *dead_leaf; + struct assoc_array_ptr *new_meta[3]; + struct assoc_array_ptr *excised_meta[1]; + struct assoc_array_ptr *excised_subtree; + struct assoc_array_ptr **set_backpointers[ASSOC_ARRAY_FAN_OUT]; + struct assoc_array_ptr *set_backpointers_to; + struct assoc_array_node *adjust_count_on; + long adjust_count_by; + struct { + struct assoc_array_ptr **ptr; + struct assoc_array_ptr *to; + } set[2]; + struct { + u8 *p; + u8 to; + } set_parent_slot[1]; + u8 segment_cache[ASSOC_ARRAY_FAN_OUT + 1]; +}; + +/* + * Internal tree member pointers are marked in the bottom one or two bits to + * indicate what type they are so that we don't have to look behind every + * pointer to see what it points to. + * + * We provide functions to test type annotations and to create and translate + * the annotated pointers. + */ +#define ASSOC_ARRAY_PTR_TYPE_MASK 0x1UL +#define ASSOC_ARRAY_PTR_LEAF_TYPE 0x0UL /* Points to leaf (or nowhere) */ +#define ASSOC_ARRAY_PTR_META_TYPE 0x1UL /* Points to node or shortcut */ +#define ASSOC_ARRAY_PTR_SUBTYPE_MASK 0x2UL +#define ASSOC_ARRAY_PTR_NODE_SUBTYPE 0x0UL +#define ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE 0x2UL + +static inline bool assoc_array_ptr_is_meta(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & ASSOC_ARRAY_PTR_TYPE_MASK; +} +static inline bool assoc_array_ptr_is_leaf(const struct assoc_array_ptr *x) +{ + return !assoc_array_ptr_is_meta(x); +} +static inline bool assoc_array_ptr_is_shortcut(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & ASSOC_ARRAY_PTR_SUBTYPE_MASK; +} +static inline bool assoc_array_ptr_is_node(const struct assoc_array_ptr *x) +{ + return !assoc_array_ptr_is_shortcut(x); +} + +static inline void *assoc_array_ptr_to_leaf(const struct assoc_array_ptr *x) +{ + return (void *)((unsigned long)x & ~ASSOC_ARRAY_PTR_TYPE_MASK); +} + +static inline +unsigned long __assoc_array_ptr_to_meta(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & + ~(ASSOC_ARRAY_PTR_SUBTYPE_MASK | ASSOC_ARRAY_PTR_TYPE_MASK); +} +static inline +struct assoc_array_node *assoc_array_ptr_to_node(const struct assoc_array_ptr *x) +{ + return (struct assoc_array_node *)__assoc_array_ptr_to_meta(x); +} +static inline +struct assoc_array_shortcut *assoc_array_ptr_to_shortcut(const struct assoc_array_ptr *x) +{ + return (struct assoc_array_shortcut *)__assoc_array_ptr_to_meta(x); +} + +static inline +struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t) +{ + return (struct assoc_array_ptr *)((unsigned long)p | t); +} +static inline +struct assoc_array_ptr *assoc_array_leaf_to_ptr(const void *p) +{ + return __assoc_array_x_to_ptr(p, ASSOC_ARRAY_PTR_LEAF_TYPE); +} +static inline +struct assoc_array_ptr *assoc_array_node_to_ptr(const struct assoc_array_node *p) +{ + return __assoc_array_x_to_ptr( + p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_NODE_SUBTYPE); +} +static inline +struct assoc_array_ptr *assoc_array_shortcut_to_ptr(const struct assoc_array_shortcut *p) +{ + return __assoc_array_x_to_ptr( + p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE); +} + +#endif /* CONFIG_ASSOCIATIVE_ARRAY */ +#endif /* _LINUX_ASSOC_ARRAY_PRIV_H */ diff --git a/include/linux/async.h b/include/linux/async.h new file mode 100644 index 0000000..0a17cd2 --- /dev/null +++ b/include/linux/async.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * async.h: Asynchronous function calls for boot performance + * + * (C) Copyright 2009 Intel Corporation + * Author: Arjan van de Ven + */ +#ifndef __ASYNC_H__ +#define __ASYNC_H__ + +#include +#include +#include +#include + +typedef u64 async_cookie_t; +typedef void (*async_func_t) (void *data, async_cookie_t cookie); +struct async_domain { + struct list_head pending; + unsigned registered:1; +}; + +/* + * domain participates in global async_synchronize_full + */ +#define ASYNC_DOMAIN(_name) \ + struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ + .registered = 1 } + +/* + * domain is free to go out of scope as soon as all pending work is + * complete, this domain does not participate in async_synchronize_full + */ +#define ASYNC_DOMAIN_EXCLUSIVE(_name) \ + struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ + .registered = 0 } + +async_cookie_t async_schedule_node(async_func_t func, void *data, + int node); +async_cookie_t async_schedule_node_domain(async_func_t func, void *data, + int node, + struct async_domain *domain); + +/** + * async_schedule - schedule a function for asynchronous execution + * @func: function to execute asynchronously + * @data: data pointer to pass to the function + * + * Returns an async_cookie_t that may be used for checkpointing later. + * Note: This function may be called from atomic or non-atomic contexts. + */ +static inline async_cookie_t async_schedule(async_func_t func, void *data) +{ + return async_schedule_node(func, data, NUMA_NO_NODE); +} + +/** + * async_schedule_domain - schedule a function for asynchronous execution within a certain domain + * @func: function to execute asynchronously + * @data: data pointer to pass to the function + * @domain: the domain + * + * Returns an async_cookie_t that may be used for checkpointing later. + * @domain may be used in the async_synchronize_*_domain() functions to + * wait within a certain synchronization domain rather than globally. + * Note: This function may be called from atomic or non-atomic contexts. + */ +static inline async_cookie_t +async_schedule_domain(async_func_t func, void *data, + struct async_domain *domain) +{ + return async_schedule_node_domain(func, data, NUMA_NO_NODE, domain); +} + +/** + * async_schedule_dev - A device specific version of async_schedule + * @func: function to execute asynchronously + * @dev: device argument to be passed to function + * + * Returns an async_cookie_t that may be used for checkpointing later. + * @dev is used as both the argument for the function and to provide NUMA + * context for where to run the function. By doing this we can try to + * provide for the best possible outcome by operating on the device on the + * CPUs closest to the device. + * Note: This function may be called from atomic or non-atomic contexts. + */ +static inline async_cookie_t +async_schedule_dev(async_func_t func, struct device *dev) +{ + return async_schedule_node(func, dev, dev_to_node(dev)); +} + +/** + * async_schedule_dev_domain - A device specific version of async_schedule_domain + * @func: function to execute asynchronously + * @dev: device argument to be passed to function + * @domain: the domain + * + * Returns an async_cookie_t that may be used for checkpointing later. + * @dev is used as both the argument for the function and to provide NUMA + * context for where to run the function. By doing this we can try to + * provide for the best possible outcome by operating on the device on the + * CPUs closest to the device. + * @domain may be used in the async_synchronize_*_domain() functions to + * wait within a certain synchronization domain rather than globally. + * Note: This function may be called from atomic or non-atomic contexts. + */ +static inline async_cookie_t +async_schedule_dev_domain(async_func_t func, struct device *dev, + struct async_domain *domain) +{ + return async_schedule_node_domain(func, dev, dev_to_node(dev), domain); +} + +void async_unregister_domain(struct async_domain *domain); +extern void async_synchronize_full(void); +extern void async_synchronize_full_domain(struct async_domain *domain); +extern void async_synchronize_cookie(async_cookie_t cookie); +extern void async_synchronize_cookie_domain(async_cookie_t cookie, + struct async_domain *domain); +extern bool current_is_async(void); +#endif diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h new file mode 100644 index 0000000..75e582b --- /dev/null +++ b/include/linux/async_tx.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2006, Intel Corporation. + */ +#ifndef _ASYNC_TX_H_ +#define _ASYNC_TX_H_ +#include +#include +#include + +/* on architectures without dma-mapping capabilities we need to ensure + * that the asynchronous path compiles away + */ +#ifdef CONFIG_HAS_DMA +#define __async_inline +#else +#define __async_inline __always_inline +#endif + +/** + * dma_chan_ref - object used to manage dma channels received from the + * dmaengine core. + * @chan - the channel being tracked + * @node - node for the channel to be placed on async_tx_master_list + * @rcu - for list_del_rcu + * @count - number of times this channel is listed in the pool + * (for channels with multiple capabiities) + */ +struct dma_chan_ref { + struct dma_chan *chan; + struct list_head node; + struct rcu_head rcu; + atomic_t count; +}; + +/** + * async_tx_flags - modifiers for the async_* calls + * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the + * the destination address is not a source. The asynchronous case handles this + * implicitly, the synchronous case needs to zero the destination block. + * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is + * also one of the source addresses. In the synchronous case the destination + * address is an implied source, whereas the asynchronous case it must be listed + * as a source. The destination address must be the first address in the source + * array. + * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a + * dependency chain + * @ASYNC_TX_FENCE: specify that the next operation in the dependency + * chain uses this operation's result as an input + * @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the + * input data. Required for rmw case. + */ +enum async_tx_flags { + ASYNC_TX_XOR_ZERO_DST = (1 << 0), + ASYNC_TX_XOR_DROP_DST = (1 << 1), + ASYNC_TX_ACK = (1 << 2), + ASYNC_TX_FENCE = (1 << 3), + ASYNC_TX_PQ_XOR_DST = (1 << 4), +}; + +/** + * struct async_submit_ctl - async_tx submission/completion modifiers + * @flags: submission modifiers + * @depend_tx: parent dependency of the current operation being submitted + * @cb_fn: callback routine to run at operation completion + * @cb_param: parameter for the callback routine + * @scribble: caller provided space for dma/page address conversions + */ +struct async_submit_ctl { + enum async_tx_flags flags; + struct dma_async_tx_descriptor *depend_tx; + dma_async_tx_callback cb_fn; + void *cb_param; + void *scribble; +}; + +#if defined(CONFIG_DMA_ENGINE) && !defined(CONFIG_ASYNC_TX_CHANNEL_SWITCH) +#define async_tx_issue_pending_all dma_issue_pending_all + +/** + * async_tx_issue_pending - send pending descriptor to the hardware channel + * @tx: descriptor handle to retrieve hardware context + * + * Note: any dependent operations will have already been issued by + * async_tx_channel_switch, or (in the case of no channel switch) will + * be already pending on this channel. + */ +static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) +{ + if (likely(tx)) { + struct dma_chan *chan = tx->chan; + struct dma_device *dma = chan->device; + + dma->device_issue_pending(chan); + } +} +#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL +#include +#else +#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ + __async_tx_find_channel(dep, type) +struct dma_chan * +__async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type); +#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ +#else +static inline void async_tx_issue_pending_all(void) +{ + do { } while (0); +} + +static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) +{ + do { } while (0); +} + +static inline struct dma_chan * +async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type, struct page **dst, + int dst_count, struct page **src, int src_count, + size_t len) +{ + return NULL; +} +#endif + +/** + * async_tx_sync_epilog - actions to take if an operation is run synchronously + * @cb_fn: function to call when the transaction completes + * @cb_fn_param: parameter to pass to the callback routine + */ +static inline void +async_tx_sync_epilog(struct async_submit_ctl *submit) +{ + if (submit->cb_fn) + submit->cb_fn(submit->cb_param); +} + +typedef union { + unsigned long addr; + struct page *page; + dma_addr_t dma; +} addr_conv_t; + +static inline void +init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags, + struct dma_async_tx_descriptor *tx, + dma_async_tx_callback cb_fn, void *cb_param, + addr_conv_t *scribble) +{ + args->flags = flags; + args->depend_tx = tx; + args->cb_fn = cb_fn; + args->cb_param = cb_param; + args->scribble = scribble; +} + +void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_xor(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, enum sum_check_flags *result, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, + unsigned int src_offset, size_t len, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt, + size_t len, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt, + size_t len, enum sum_check_flags *pqres, struct page *spare, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb, + struct page **ptrs, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_raid6_datap_recov(int src_num, size_t bytes, int faila, + struct page **ptrs, struct async_submit_ctl *submit); + +void async_tx_quiesce(struct dma_async_tx_descriptor **tx); +#endif /* _ASYNC_TX_H_ */ diff --git a/include/linux/ata.h b/include/linux/ata.h new file mode 100644 index 0000000..6e67ade --- /dev/null +++ b/include/linux/ata.h @@ -0,0 +1,1140 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * Copyright 2003-2004 Red Hat, Inc. All rights reserved. + * Copyright 2003-2004 Jeff Garzik + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/driver-api/libata.rst + * + * Hardware documentation available from http://www.t13.org/ + */ + +#ifndef __LINUX_ATA_H__ +#define __LINUX_ATA_H__ + +#include +#include +#include +#include + +/* defines only for the constants which don't work well as enums */ +#define ATA_DMA_BOUNDARY 0xffffUL +#define ATA_DMA_MASK 0xffffffffULL + +enum { + /* various global constants */ + ATA_MAX_DEVICES = 2, /* per bus/port */ + ATA_MAX_PRD = 256, /* we could make these 256/256 */ + ATA_SECT_SIZE = 512, + ATA_MAX_SECTORS_128 = 128, + ATA_MAX_SECTORS = 256, + ATA_MAX_SECTORS_1024 = 1024, + ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */ + ATA_MAX_SECTORS_TAPE = 65535, + ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */ + + ATA_ID_WORDS = 256, + ATA_ID_CONFIG = 0, + ATA_ID_CYLS = 1, + ATA_ID_HEADS = 3, + ATA_ID_SECTORS = 6, + ATA_ID_SERNO = 10, + ATA_ID_BUF_SIZE = 21, + ATA_ID_FW_REV = 23, + ATA_ID_PROD = 27, + ATA_ID_MAX_MULTSECT = 47, + ATA_ID_DWORD_IO = 48, /* before ATA-8 */ + ATA_ID_TRUSTED = 48, /* ATA-8 and later */ + ATA_ID_CAPABILITY = 49, + ATA_ID_OLD_PIO_MODES = 51, + ATA_ID_OLD_DMA_MODES = 52, + ATA_ID_FIELD_VALID = 53, + ATA_ID_CUR_CYLS = 54, + ATA_ID_CUR_HEADS = 55, + ATA_ID_CUR_SECTORS = 56, + ATA_ID_MULTSECT = 59, + ATA_ID_LBA_CAPACITY = 60, + ATA_ID_SWDMA_MODES = 62, + ATA_ID_MWDMA_MODES = 63, + ATA_ID_PIO_MODES = 64, + ATA_ID_EIDE_DMA_MIN = 65, + ATA_ID_EIDE_DMA_TIME = 66, + ATA_ID_EIDE_PIO = 67, + ATA_ID_EIDE_PIO_IORDY = 68, + ATA_ID_ADDITIONAL_SUPP = 69, + ATA_ID_QUEUE_DEPTH = 75, + ATA_ID_SATA_CAPABILITY = 76, + ATA_ID_SATA_CAPABILITY_2 = 77, + ATA_ID_FEATURE_SUPP = 78, + ATA_ID_MAJOR_VER = 80, + ATA_ID_COMMAND_SET_1 = 82, + ATA_ID_COMMAND_SET_2 = 83, + ATA_ID_CFSSE = 84, + ATA_ID_CFS_ENABLE_1 = 85, + ATA_ID_CFS_ENABLE_2 = 86, + ATA_ID_CSF_DEFAULT = 87, + ATA_ID_UDMA_MODES = 88, + ATA_ID_HW_CONFIG = 93, + ATA_ID_SPG = 98, + ATA_ID_LBA_CAPACITY_2 = 100, + ATA_ID_SECTOR_SIZE = 106, + ATA_ID_WWN = 108, + ATA_ID_LOGICAL_SECTOR_SIZE = 117, /* and 118 */ + ATA_ID_COMMAND_SET_3 = 119, + ATA_ID_COMMAND_SET_4 = 120, + ATA_ID_LAST_LUN = 126, + ATA_ID_DLF = 128, + ATA_ID_CSFO = 129, + ATA_ID_CFA_POWER = 160, + ATA_ID_CFA_KEY_MGMT = 162, + ATA_ID_CFA_MODES = 163, + ATA_ID_DATA_SET_MGMT = 169, + ATA_ID_SCT_CMD_XPORT = 206, + ATA_ID_ROT_SPEED = 217, + ATA_ID_PIO4 = (1 << 1), + + ATA_ID_SERNO_LEN = 20, + ATA_ID_FW_REV_LEN = 8, + ATA_ID_PROD_LEN = 40, + ATA_ID_WWN_LEN = 8, + + ATA_PCI_CTL_OFS = 2, + + ATA_PIO0 = (1 << 0), + ATA_PIO1 = ATA_PIO0 | (1 << 1), + ATA_PIO2 = ATA_PIO1 | (1 << 2), + ATA_PIO3 = ATA_PIO2 | (1 << 3), + ATA_PIO4 = ATA_PIO3 | (1 << 4), + ATA_PIO5 = ATA_PIO4 | (1 << 5), + ATA_PIO6 = ATA_PIO5 | (1 << 6), + + ATA_PIO4_ONLY = (1 << 4), + + ATA_SWDMA0 = (1 << 0), + ATA_SWDMA1 = ATA_SWDMA0 | (1 << 1), + ATA_SWDMA2 = ATA_SWDMA1 | (1 << 2), + + ATA_SWDMA2_ONLY = (1 << 2), + + ATA_MWDMA0 = (1 << 0), + ATA_MWDMA1 = ATA_MWDMA0 | (1 << 1), + ATA_MWDMA2 = ATA_MWDMA1 | (1 << 2), + ATA_MWDMA3 = ATA_MWDMA2 | (1 << 3), + ATA_MWDMA4 = ATA_MWDMA3 | (1 << 4), + + ATA_MWDMA12_ONLY = (1 << 1) | (1 << 2), + ATA_MWDMA2_ONLY = (1 << 2), + + ATA_UDMA0 = (1 << 0), + ATA_UDMA1 = ATA_UDMA0 | (1 << 1), + ATA_UDMA2 = ATA_UDMA1 | (1 << 2), + ATA_UDMA3 = ATA_UDMA2 | (1 << 3), + ATA_UDMA4 = ATA_UDMA3 | (1 << 4), + ATA_UDMA5 = ATA_UDMA4 | (1 << 5), + ATA_UDMA6 = ATA_UDMA5 | (1 << 6), + ATA_UDMA7 = ATA_UDMA6 | (1 << 7), + /* ATA_UDMA7 is just for completeness... doesn't exist (yet?). */ + + ATA_UDMA24_ONLY = (1 << 2) | (1 << 4), + + ATA_UDMA_MASK_40C = ATA_UDMA2, /* udma0-2 */ + + /* DMA-related */ + ATA_PRD_SZ = 8, + ATA_PRD_TBL_SZ = (ATA_MAX_PRD * ATA_PRD_SZ), + ATA_PRD_EOT = (1 << 31), /* end-of-table flag */ + + ATA_DMA_TABLE_OFS = 4, + ATA_DMA_STATUS = 2, + ATA_DMA_CMD = 0, + ATA_DMA_WR = (1 << 3), + ATA_DMA_START = (1 << 0), + ATA_DMA_INTR = (1 << 2), + ATA_DMA_ERR = (1 << 1), + ATA_DMA_ACTIVE = (1 << 0), + + /* bits in ATA command block registers */ + ATA_HOB = (1 << 7), /* LBA48 selector */ + ATA_NIEN = (1 << 1), /* disable-irq flag */ + ATA_LBA = (1 << 6), /* LBA28 selector */ + ATA_DEV1 = (1 << 4), /* Select Device 1 (slave) */ + ATA_DEVICE_OBS = (1 << 7) | (1 << 5), /* obs bits in dev reg */ + ATA_DEVCTL_OBS = (1 << 3), /* obsolete bit in devctl reg */ + ATA_BUSY = (1 << 7), /* BSY status bit */ + ATA_DRDY = (1 << 6), /* device ready */ + ATA_DF = (1 << 5), /* device fault */ + ATA_DSC = (1 << 4), /* drive seek complete */ + ATA_DRQ = (1 << 3), /* data request i/o */ + ATA_CORR = (1 << 2), /* corrected data error */ + ATA_SENSE = (1 << 1), /* sense code available */ + ATA_ERR = (1 << 0), /* have an error */ + ATA_SRST = (1 << 2), /* software reset */ + ATA_ICRC = (1 << 7), /* interface CRC error */ + ATA_BBK = ATA_ICRC, /* pre-EIDE: block marked bad */ + ATA_UNC = (1 << 6), /* uncorrectable media error */ + ATA_MC = (1 << 5), /* media changed */ + ATA_IDNF = (1 << 4), /* ID not found */ + ATA_MCR = (1 << 3), /* media change requested */ + ATA_ABORTED = (1 << 2), /* command aborted */ + ATA_TRK0NF = (1 << 1), /* track 0 not found */ + ATA_AMNF = (1 << 0), /* address mark not found */ + ATAPI_LFS = 0xF0, /* last failed sense */ + ATAPI_EOM = ATA_TRK0NF, /* end of media */ + ATAPI_ILI = ATA_AMNF, /* illegal length indication */ + ATAPI_IO = (1 << 1), + ATAPI_COD = (1 << 0), + + /* ATA command block registers */ + ATA_REG_DATA = 0x00, + ATA_REG_ERR = 0x01, + ATA_REG_NSECT = 0x02, + ATA_REG_LBAL = 0x03, + ATA_REG_LBAM = 0x04, + ATA_REG_LBAH = 0x05, + ATA_REG_DEVICE = 0x06, + ATA_REG_STATUS = 0x07, + + ATA_REG_FEATURE = ATA_REG_ERR, /* and their aliases */ + ATA_REG_CMD = ATA_REG_STATUS, + ATA_REG_BYTEL = ATA_REG_LBAM, + ATA_REG_BYTEH = ATA_REG_LBAH, + ATA_REG_DEVSEL = ATA_REG_DEVICE, + ATA_REG_IRQ = ATA_REG_NSECT, + + /* ATA device commands */ + ATA_CMD_DEV_RESET = 0x08, /* ATAPI device reset */ + ATA_CMD_CHK_POWER = 0xE5, /* check power mode */ + ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */ + ATA_CMD_IDLE = 0xE3, /* place in idle power mode */ + ATA_CMD_EDD = 0x90, /* execute device diagnostic */ + ATA_CMD_DOWNLOAD_MICRO = 0x92, + ATA_CMD_DOWNLOAD_MICRO_DMA = 0x93, + ATA_CMD_NOP = 0x00, + ATA_CMD_FLUSH = 0xE7, + ATA_CMD_FLUSH_EXT = 0xEA, + ATA_CMD_ID_ATA = 0xEC, + ATA_CMD_ID_ATAPI = 0xA1, + ATA_CMD_SERVICE = 0xA2, + ATA_CMD_READ = 0xC8, + ATA_CMD_READ_EXT = 0x25, + ATA_CMD_READ_QUEUED = 0x26, + ATA_CMD_READ_STREAM_EXT = 0x2B, + ATA_CMD_READ_STREAM_DMA_EXT = 0x2A, + ATA_CMD_WRITE = 0xCA, + ATA_CMD_WRITE_EXT = 0x35, + ATA_CMD_WRITE_QUEUED = 0x36, + ATA_CMD_WRITE_STREAM_EXT = 0x3B, + ATA_CMD_WRITE_STREAM_DMA_EXT = 0x3A, + ATA_CMD_WRITE_FUA_EXT = 0x3D, + ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E, + ATA_CMD_FPDMA_READ = 0x60, + ATA_CMD_FPDMA_WRITE = 0x61, + ATA_CMD_NCQ_NON_DATA = 0x63, + ATA_CMD_FPDMA_SEND = 0x64, + ATA_CMD_FPDMA_RECV = 0x65, + ATA_CMD_PIO_READ = 0x20, + ATA_CMD_PIO_READ_EXT = 0x24, + ATA_CMD_PIO_WRITE = 0x30, + ATA_CMD_PIO_WRITE_EXT = 0x34, + ATA_CMD_READ_MULTI = 0xC4, + ATA_CMD_READ_MULTI_EXT = 0x29, + ATA_CMD_WRITE_MULTI = 0xC5, + ATA_CMD_WRITE_MULTI_EXT = 0x39, + ATA_CMD_WRITE_MULTI_FUA_EXT = 0xCE, + ATA_CMD_SET_FEATURES = 0xEF, + ATA_CMD_SET_MULTI = 0xC6, + ATA_CMD_PACKET = 0xA0, + ATA_CMD_VERIFY = 0x40, + ATA_CMD_VERIFY_EXT = 0x42, + ATA_CMD_WRITE_UNCORR_EXT = 0x45, + ATA_CMD_STANDBYNOW1 = 0xE0, + ATA_CMD_IDLEIMMEDIATE = 0xE1, + ATA_CMD_SLEEP = 0xE6, + ATA_CMD_INIT_DEV_PARAMS = 0x91, + ATA_CMD_READ_NATIVE_MAX = 0xF8, + ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, + ATA_CMD_SET_MAX = 0xF9, + ATA_CMD_SET_MAX_EXT = 0x37, + ATA_CMD_READ_LOG_EXT = 0x2F, + ATA_CMD_WRITE_LOG_EXT = 0x3F, + ATA_CMD_READ_LOG_DMA_EXT = 0x47, + ATA_CMD_WRITE_LOG_DMA_EXT = 0x57, + ATA_CMD_TRUSTED_NONDATA = 0x5B, + ATA_CMD_TRUSTED_RCV = 0x5C, + ATA_CMD_TRUSTED_RCV_DMA = 0x5D, + ATA_CMD_TRUSTED_SND = 0x5E, + ATA_CMD_TRUSTED_SND_DMA = 0x5F, + ATA_CMD_PMP_READ = 0xE4, + ATA_CMD_PMP_READ_DMA = 0xE9, + ATA_CMD_PMP_WRITE = 0xE8, + ATA_CMD_PMP_WRITE_DMA = 0xEB, + ATA_CMD_CONF_OVERLAY = 0xB1, + ATA_CMD_SEC_SET_PASS = 0xF1, + ATA_CMD_SEC_UNLOCK = 0xF2, + ATA_CMD_SEC_ERASE_PREP = 0xF3, + ATA_CMD_SEC_ERASE_UNIT = 0xF4, + ATA_CMD_SEC_FREEZE_LOCK = 0xF5, + ATA_CMD_SEC_DISABLE_PASS = 0xF6, + ATA_CMD_CONFIG_STREAM = 0x51, + ATA_CMD_SMART = 0xB0, + ATA_CMD_MEDIA_LOCK = 0xDE, + ATA_CMD_MEDIA_UNLOCK = 0xDF, + ATA_CMD_DSM = 0x06, + ATA_CMD_CHK_MED_CRD_TYP = 0xD1, + ATA_CMD_CFA_REQ_EXT_ERR = 0x03, + ATA_CMD_CFA_WRITE_NE = 0x38, + ATA_CMD_CFA_TRANS_SECT = 0x87, + ATA_CMD_CFA_ERASE = 0xC0, + ATA_CMD_CFA_WRITE_MULT_NE = 0xCD, + ATA_CMD_REQ_SENSE_DATA = 0x0B, + ATA_CMD_SANITIZE_DEVICE = 0xB4, + ATA_CMD_ZAC_MGMT_IN = 0x4A, + ATA_CMD_ZAC_MGMT_OUT = 0x9F, + + /* marked obsolete in the ATA/ATAPI-7 spec */ + ATA_CMD_RESTORE = 0x10, + + /* Subcmds for ATA_CMD_FPDMA_RECV */ + ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 0x01, + ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 0x02, + + /* Subcmds for ATA_CMD_FPDMA_SEND */ + ATA_SUBCMD_FPDMA_SEND_DSM = 0x00, + ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 0x02, + + /* Subcmds for ATA_CMD_NCQ_NON_DATA */ + ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0x00, + ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 0x05, + ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 0x06, + ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 0x07, + + /* Subcmds for ATA_CMD_ZAC_MGMT_IN */ + ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0x00, + + /* Subcmds for ATA_CMD_ZAC_MGMT_OUT */ + ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 0x01, + ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 0x02, + ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 0x03, + ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 0x04, + + /* READ_LOG_EXT pages */ + ATA_LOG_DIRECTORY = 0x0, + ATA_LOG_SATA_NCQ = 0x10, + ATA_LOG_NCQ_NON_DATA = 0x12, + ATA_LOG_NCQ_SEND_RECV = 0x13, + ATA_LOG_IDENTIFY_DEVICE = 0x30, + + /* Identify device log pages: */ + ATA_LOG_SECURITY = 0x06, + ATA_LOG_SATA_SETTINGS = 0x08, + ATA_LOG_ZONED_INFORMATION = 0x09, + + /* Identify device SATA settings log:*/ + ATA_LOG_DEVSLP_OFFSET = 0x30, + ATA_LOG_DEVSLP_SIZE = 0x08, + ATA_LOG_DEVSLP_MDAT = 0x00, + ATA_LOG_DEVSLP_MDAT_MASK = 0x1F, + ATA_LOG_DEVSLP_DETO = 0x01, + ATA_LOG_DEVSLP_VALID = 0x07, + ATA_LOG_DEVSLP_VALID_MASK = 0x80, + ATA_LOG_NCQ_PRIO_OFFSET = 0x09, + + /* NCQ send and receive log */ + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00, + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_DSM = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 0x04, + ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 0x08, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 0x0C, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 0x10, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = (1 << 0), + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = (1 << 1), + ATA_LOG_NCQ_SEND_RECV_SIZE = 0x14, + + /* NCQ Non-Data log */ + ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0x00, + ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0x00, + ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = (1 << 0), + ATA_LOG_NCQ_NON_DATA_ABORT_ALL = (1 << 1), + ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = (1 << 2), + ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = (1 << 3), + ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = (1 << 4), + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 0x1C, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = (1 << 0), + ATA_LOG_NCQ_NON_DATA_SIZE = 0x40, + + /* READ/WRITE LONG (obsolete) */ + ATA_CMD_READ_LONG = 0x22, + ATA_CMD_READ_LONG_ONCE = 0x23, + ATA_CMD_WRITE_LONG = 0x32, + ATA_CMD_WRITE_LONG_ONCE = 0x33, + + /* SETFEATURES stuff */ + SETFEATURES_XFER = 0x03, + XFER_UDMA_7 = 0x47, + XFER_UDMA_6 = 0x46, + XFER_UDMA_5 = 0x45, + XFER_UDMA_4 = 0x44, + XFER_UDMA_3 = 0x43, + XFER_UDMA_2 = 0x42, + XFER_UDMA_1 = 0x41, + XFER_UDMA_0 = 0x40, + XFER_MW_DMA_4 = 0x24, /* CFA only */ + XFER_MW_DMA_3 = 0x23, /* CFA only */ + XFER_MW_DMA_2 = 0x22, + XFER_MW_DMA_1 = 0x21, + XFER_MW_DMA_0 = 0x20, + XFER_SW_DMA_2 = 0x12, + XFER_SW_DMA_1 = 0x11, + XFER_SW_DMA_0 = 0x10, + XFER_PIO_6 = 0x0E, /* CFA only */ + XFER_PIO_5 = 0x0D, /* CFA only */ + XFER_PIO_4 = 0x0C, + XFER_PIO_3 = 0x0B, + XFER_PIO_2 = 0x0A, + XFER_PIO_1 = 0x09, + XFER_PIO_0 = 0x08, + XFER_PIO_SLOW = 0x00, + + SETFEATURES_WC_ON = 0x02, /* Enable write cache */ + SETFEATURES_WC_OFF = 0x82, /* Disable write cache */ + + SETFEATURES_RA_ON = 0xaa, /* Enable read look-ahead */ + SETFEATURES_RA_OFF = 0x55, /* Disable read look-ahead */ + + /* Enable/Disable Automatic Acoustic Management */ + SETFEATURES_AAM_ON = 0x42, + SETFEATURES_AAM_OFF = 0xC2, + + SETFEATURES_SPINUP = 0x07, /* Spin-up drive */ + SETFEATURES_SPINUP_TIMEOUT = 30000, /* 30s timeout for drive spin-up from PUIS */ + + SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */ + SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */ + + /* SETFEATURE Sector counts for SATA features */ + SATA_FPDMA_OFFSET = 0x01, /* FPDMA non-zero buffer offsets */ + SATA_FPDMA_AA = 0x02, /* FPDMA Setup FIS Auto-Activate */ + SATA_DIPM = 0x03, /* Device Initiated Power Management */ + SATA_FPDMA_IN_ORDER = 0x04, /* FPDMA in-order data delivery */ + SATA_AN = 0x05, /* Asynchronous Notification */ + SATA_SSP = 0x06, /* Software Settings Preservation */ + SATA_DEVSLP = 0x09, /* Device Sleep */ + + SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */ + + /* feature values for SET_MAX */ + ATA_SET_MAX_ADDR = 0x00, + ATA_SET_MAX_PASSWD = 0x01, + ATA_SET_MAX_LOCK = 0x02, + ATA_SET_MAX_UNLOCK = 0x03, + ATA_SET_MAX_FREEZE_LOCK = 0x04, + ATA_SET_MAX_PASSWD_DMA = 0x05, + ATA_SET_MAX_UNLOCK_DMA = 0x06, + + /* feature values for DEVICE CONFIGURATION OVERLAY */ + ATA_DCO_RESTORE = 0xC0, + ATA_DCO_FREEZE_LOCK = 0xC1, + ATA_DCO_IDENTIFY = 0xC2, + ATA_DCO_SET = 0xC3, + + /* feature values for SMART */ + ATA_SMART_ENABLE = 0xD8, + ATA_SMART_READ_VALUES = 0xD0, + ATA_SMART_READ_THRESHOLDS = 0xD1, + + /* feature values for Data Set Management */ + ATA_DSM_TRIM = 0x01, + + /* password used in LBA Mid / LBA High for executing SMART commands */ + ATA_SMART_LBAM_PASS = 0x4F, + ATA_SMART_LBAH_PASS = 0xC2, + + /* ATAPI stuff */ + ATAPI_PKT_DMA = (1 << 0), + ATAPI_DMADIR = (1 << 2), /* ATAPI data dir: + 0=to device, 1=to host */ + ATAPI_CDB_LEN = 16, + + /* PMP stuff */ + SATA_PMP_MAX_PORTS = 15, + SATA_PMP_CTRL_PORT = 15, + + SATA_PMP_GSCR_DWORDS = 128, + SATA_PMP_GSCR_PROD_ID = 0, + SATA_PMP_GSCR_REV = 1, + SATA_PMP_GSCR_PORT_INFO = 2, + SATA_PMP_GSCR_ERROR = 32, + SATA_PMP_GSCR_ERROR_EN = 33, + SATA_PMP_GSCR_FEAT = 64, + SATA_PMP_GSCR_FEAT_EN = 96, + + SATA_PMP_PSCR_STATUS = 0, + SATA_PMP_PSCR_ERROR = 1, + SATA_PMP_PSCR_CONTROL = 2, + + SATA_PMP_FEAT_BIST = (1 << 0), + SATA_PMP_FEAT_PMREQ = (1 << 1), + SATA_PMP_FEAT_DYNSSC = (1 << 2), + SATA_PMP_FEAT_NOTIFY = (1 << 3), + + /* cable types */ + ATA_CBL_NONE = 0, + ATA_CBL_PATA40 = 1, + ATA_CBL_PATA80 = 2, + ATA_CBL_PATA40_SHORT = 3, /* 40 wire cable to high UDMA spec */ + ATA_CBL_PATA_UNK = 4, /* don't know, maybe 80c? */ + ATA_CBL_PATA_IGN = 5, /* don't know, ignore cable handling */ + ATA_CBL_SATA = 6, + + /* SATA Status and Control Registers */ + SCR_STATUS = 0, + SCR_ERROR = 1, + SCR_CONTROL = 2, + SCR_ACTIVE = 3, + SCR_NOTIFICATION = 4, + + /* SError bits */ + SERR_DATA_RECOVERED = (1 << 0), /* recovered data error */ + SERR_COMM_RECOVERED = (1 << 1), /* recovered comm failure */ + SERR_DATA = (1 << 8), /* unrecovered data error */ + SERR_PERSISTENT = (1 << 9), /* persistent data/comm error */ + SERR_PROTOCOL = (1 << 10), /* protocol violation */ + SERR_INTERNAL = (1 << 11), /* host internal error */ + SERR_PHYRDY_CHG = (1 << 16), /* PHY RDY changed */ + SERR_PHY_INT_ERR = (1 << 17), /* PHY internal error */ + SERR_COMM_WAKE = (1 << 18), /* Comm wake */ + SERR_10B_8B_ERR = (1 << 19), /* 10b to 8b decode error */ + SERR_DISPARITY = (1 << 20), /* Disparity */ + SERR_CRC = (1 << 21), /* CRC error */ + SERR_HANDSHAKE = (1 << 22), /* Handshake error */ + SERR_LINK_SEQ_ERR = (1 << 23), /* Link sequence error */ + SERR_TRANS_ST_ERROR = (1 << 24), /* Transport state trans. error */ + SERR_UNRECOG_FIS = (1 << 25), /* Unrecognized FIS */ + SERR_DEV_XCHG = (1 << 26), /* device exchanged */ +}; + +enum ata_prot_flags { + /* protocol flags */ + ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */ + ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */ + ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */ + ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */ + + /* taskfile protocols */ + ATA_PROT_UNKNOWN = (u8)-1, + ATA_PROT_NODATA = 0, + ATA_PROT_PIO = ATA_PROT_FLAG_PIO, + ATA_PROT_DMA = ATA_PROT_FLAG_DMA, + ATA_PROT_NCQ_NODATA = ATA_PROT_FLAG_NCQ, + ATA_PROT_NCQ = ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ, + ATAPI_PROT_NODATA = ATA_PROT_FLAG_ATAPI, + ATAPI_PROT_PIO = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO, + ATAPI_PROT_DMA = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA, +}; + +enum ata_ioctls { + ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */ + ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */ +}; + +/* core structures */ + +struct ata_bmdma_prd { + __le32 addr; + __le32 flags_len; +}; + +/* + * id tests + */ +#define ata_id_is_ata(id) (((id)[ATA_ID_CONFIG] & (1 << 15)) == 0) +#define ata_id_has_lba(id) ((id)[ATA_ID_CAPABILITY] & (1 << 9)) +#define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8)) +#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8)) +#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1) +#define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7)) +#define ata_id_has_atapi_AN(id) \ + ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ + ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 5))) +#define ata_id_has_fpdma_aa(id) \ + ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ + ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 2))) +#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10)) +#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11)) +#define ata_id_u32(id,n) \ + (((u32) (id)[(n) + 1] << 16) | ((u32) (id)[(n)])) +#define ata_id_u64(id,n) \ + ( ((u64) (id)[(n) + 3] << 48) | \ + ((u64) (id)[(n) + 2] << 32) | \ + ((u64) (id)[(n) + 1] << 16) | \ + ((u64) (id)[(n) + 0]) ) + +#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) +#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) +#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)) +#define ata_id_has_ncq_autosense(id) \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)) + +static inline bool ata_id_has_hipm(const u16 *id) +{ + u16 val = id[ATA_ID_SATA_CAPABILITY]; + + if (val == 0 || val == 0xffff) + return false; + + return val & (1 << 9); +} + +static inline bool ata_id_has_dipm(const u16 *id) +{ + u16 val = id[ATA_ID_FEATURE_SUPP]; + + if (val == 0 || val == 0xffff) + return false; + + return val & (1 << 3); +} + + +static inline bool ata_id_has_fua(const u16 *id) +{ + if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFSSE] & (1 << 6); +} + +static inline bool ata_id_has_flush(const u16 *id) +{ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_COMMAND_SET_2] & (1 << 12); +} + +static inline bool ata_id_flush_enabled(const u16 *id) +{ + if (ata_id_has_flush(id) == 0) + return false; + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFS_ENABLE_2] & (1 << 12); +} + +static inline bool ata_id_has_flush_ext(const u16 *id) +{ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_COMMAND_SET_2] & (1 << 13); +} + +static inline bool ata_id_flush_ext_enabled(const u16 *id) +{ + if (ata_id_has_flush_ext(id) == 0) + return false; + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + /* + * some Maxtor disks have bit 13 defined incorrectly + * so check bit 10 too + */ + return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400; +} + +static inline u32 ata_id_logical_sector_size(const u16 *id) +{ + /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128. + * IDENTIFY DEVICE data, word 117-118. + * 0xd000 ignores bit 13 (logical:physical > 1) + */ + if ((id[ATA_ID_SECTOR_SIZE] & 0xd000) == 0x5000) + return (((id[ATA_ID_LOGICAL_SECTOR_SIZE+1] << 16) + + id[ATA_ID_LOGICAL_SECTOR_SIZE]) * sizeof(u16)) ; + return ATA_SECT_SIZE; +} + +static inline u8 ata_id_log2_per_physical_sector(const u16 *id) +{ + /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128. + * IDENTIFY DEVICE data, word 106. + * 0xe000 ignores bit 12 (logical sector > 512 bytes) + */ + if ((id[ATA_ID_SECTOR_SIZE] & 0xe000) == 0x6000) + return (id[ATA_ID_SECTOR_SIZE] & 0xf); + return 0; +} + +/* Offset of logical sectors relative to physical sectors. + * + * If device has more than one logical sector per physical sector + * (aka 512 byte emulation), vendors might offset the "sector 0" address + * so sector 63 is "naturally aligned" - e.g. FAT partition table. + * This avoids Read/Mod/Write penalties when using FAT partition table + * and updating "well aligned" (FS perspective) physical sectors on every + * transaction. + */ +static inline u16 ata_id_logical_sector_offset(const u16 *id, + u8 log2_per_phys) +{ + u16 word_209 = id[209]; + + if ((log2_per_phys > 1) && (word_209 & 0xc000) == 0x4000) { + u16 first = word_209 & 0x3fff; + if (first > 0) + return (1 << log2_per_phys) - first; + } + return 0; +} + +static inline bool ata_id_has_lba48(const u16 *id) +{ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + if (!ata_id_u64(id, ATA_ID_LBA_CAPACITY_2)) + return false; + return id[ATA_ID_COMMAND_SET_2] & (1 << 10); +} + +static inline bool ata_id_lba48_enabled(const u16 *id) +{ + if (ata_id_has_lba48(id) == 0) + return false; + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFS_ENABLE_2] & (1 << 10); +} + +static inline bool ata_id_hpa_enabled(const u16 *id) +{ + /* Yes children, word 83 valid bits cover word 82 data */ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + /* And 87 covers 85-87 */ + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + /* Check command sets enabled as well as supported */ + if ((id[ATA_ID_CFS_ENABLE_1] & (1 << 10)) == 0) + return false; + return id[ATA_ID_COMMAND_SET_1] & (1 << 10); +} + +static inline bool ata_id_has_wcache(const u16 *id) +{ + /* Yes children, word 83 valid bits cover word 82 data */ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_COMMAND_SET_1] & (1 << 5); +} + +static inline bool ata_id_has_pm(const u16 *id) +{ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_COMMAND_SET_1] & (1 << 3); +} + +static inline bool ata_id_rahead_enabled(const u16 *id) +{ + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFS_ENABLE_1] & (1 << 6); +} + +static inline bool ata_id_wcache_enabled(const u16 *id) +{ + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFS_ENABLE_1] & (1 << 5); +} + +static inline bool ata_id_has_read_log_dma_ext(const u16 *id) +{ + /* Word 86 must have bit 15 set */ + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + return false; + + /* READ LOG DMA EXT support can be signaled either from word 119 + * or from word 120. The format is the same for both words: Bit + * 15 must be cleared, bit 14 set and bit 3 set. + */ + if ((id[ATA_ID_COMMAND_SET_3] & 0xC008) == 0x4008 || + (id[ATA_ID_COMMAND_SET_4] & 0xC008) == 0x4008) + return true; + + return false; +} + +static inline bool ata_id_has_sense_reporting(const u16 *id) +{ + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + return false; + return id[ATA_ID_COMMAND_SET_3] & (1 << 6); +} + +static inline bool ata_id_sense_reporting_enabled(const u16 *id) +{ + if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + return false; + return id[ATA_ID_COMMAND_SET_4] & (1 << 6); +} + +/** + * + * Word: 206 - SCT Command Transport + * 15:12 - Vendor Specific + * 11:6 - Reserved + * 5 - SCT Command Transport Data Tables supported + * 4 - SCT Command Transport Features Control supported + * 3 - SCT Command Transport Error Recovery Control supported + * 2 - SCT Command Transport Write Same supported + * 1 - SCT Command Transport Long Sector Access supported + * 0 - SCT Command Transport supported + */ +static inline bool ata_id_sct_data_tables(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 5) ? true : false; +} + +static inline bool ata_id_sct_features_ctrl(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 4) ? true : false; +} + +static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false; +} + +static inline bool ata_id_sct_long_sector_access(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false; +} + +static inline bool ata_id_sct_supported(const u16 *id) +{ + return id[ATA_ID_SCT_CMD_XPORT] & (1 << 0) ? true : false; +} + +/** + * ata_id_major_version - get ATA level of drive + * @id: Identify data + * + * Caveats: + * ATA-1 considers identify optional + * ATA-2 introduces mandatory identify + * ATA-3 introduces word 80 and accurate reporting + * + * The practical impact of this is that ata_id_major_version cannot + * reliably report on drives below ATA3. + */ + +static inline unsigned int ata_id_major_version(const u16 *id) +{ + unsigned int mver; + + if (id[ATA_ID_MAJOR_VER] == 0xFFFF) + return 0; + + for (mver = 14; mver >= 1; mver--) + if (id[ATA_ID_MAJOR_VER] & (1 << mver)) + break; + return mver; +} + +static inline bool ata_id_is_sata(const u16 *id) +{ + /* + * See if word 93 is 0 AND drive is at least ATA-5 compatible + * verifying that word 80 by casting it to a signed type -- + * this trick allows us to filter out the reserved values of + * 0x0000 and 0xffff along with the earlier ATA revisions... + */ + if (id[ATA_ID_HW_CONFIG] == 0 && (short)id[ATA_ID_MAJOR_VER] >= 0x0020) + return true; + return false; +} + +static inline bool ata_id_has_tpm(const u16 *id) +{ + /* The TPM bits are only valid on ATA8 */ + if (ata_id_major_version(id) < 8) + return false; + if ((id[48] & 0xC000) != 0x4000) + return false; + return id[48] & (1 << 0); +} + +static inline bool ata_id_has_dword_io(const u16 *id) +{ + /* ATA 8 reuses this flag for "trusted" computing */ + if (ata_id_major_version(id) > 7) + return false; + return id[ATA_ID_DWORD_IO] & (1 << 0); +} + +static inline bool ata_id_has_trusted(const u16 *id) +{ + if (ata_id_major_version(id) <= 7) + return false; + return id[ATA_ID_TRUSTED] & (1 << 0); +} + +static inline bool ata_id_has_unload(const u16 *id) +{ + if (ata_id_major_version(id) >= 7 && + (id[ATA_ID_CFSSE] & 0xC000) == 0x4000 && + id[ATA_ID_CFSSE] & (1 << 13)) + return true; + return false; +} + +static inline bool ata_id_has_wwn(const u16 *id) +{ + return (id[ATA_ID_CSF_DEFAULT] & 0xC100) == 0x4100; +} + +static inline int ata_id_form_factor(const u16 *id) +{ + u16 val = id[168]; + + if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff) + return 0; + + val &= 0xf; + + if (val > 5) + return 0; + + return val; +} + +static inline int ata_id_rotation_rate(const u16 *id) +{ + u16 val = id[217]; + + if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff) + return 0; + + if (val > 1 && val < 0x401) + return 0; + + return val; +} + +static inline bool ata_id_has_ncq_send_and_recv(const u16 *id) +{ + return id[ATA_ID_SATA_CAPABILITY_2] & BIT(6); +} + +static inline bool ata_id_has_ncq_non_data(const u16 *id) +{ + return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5); +} + +static inline bool ata_id_has_ncq_prio(const u16 *id) +{ + return id[ATA_ID_SATA_CAPABILITY] & BIT(12); +} + +static inline bool ata_id_has_trim(const u16 *id) +{ + if (ata_id_major_version(id) >= 7 && + (id[ATA_ID_DATA_SET_MGMT] & 1)) + return true; + return false; +} + +static inline bool ata_id_has_zero_after_trim(const u16 *id) +{ + /* DSM supported, deterministic read, and read zero after trim set */ + if (ata_id_has_trim(id) && + (id[ATA_ID_ADDITIONAL_SUPP] & 0x4020) == 0x4020) + return true; + + return false; +} + +static inline bool ata_id_current_chs_valid(const u16 *id) +{ + /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command + has not been issued to the device then the values of + id[ATA_ID_CUR_CYLS] to id[ATA_ID_CUR_SECTORS] are vendor specific. */ + return (id[ATA_ID_FIELD_VALID] & 1) && /* Current translation valid */ + id[ATA_ID_CUR_CYLS] && /* cylinders in current translation */ + id[ATA_ID_CUR_HEADS] && /* heads in current translation */ + id[ATA_ID_CUR_HEADS] <= 16 && + id[ATA_ID_CUR_SECTORS]; /* sectors in current translation */ +} + +static inline bool ata_id_is_cfa(const u16 *id) +{ + if ((id[ATA_ID_CONFIG] == 0x848A) || /* Traditional CF */ + (id[ATA_ID_CONFIG] == 0x844A)) /* Delkin Devices CF */ + return true; + /* + * CF specs don't require specific value in the word 0 anymore and yet + * they forbid to report the ATA version in the word 80 and require the + * CFA feature set support to be indicated in the word 83 in this case. + * Unfortunately, some cards only follow either of this requirements, + * and while those that don't indicate CFA feature support need some + * sort of quirk list, it seems impractical for the ones that do... + */ + return (id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004; +} + +static inline bool ata_id_is_ssd(const u16 *id) +{ + return id[ATA_ID_ROT_SPEED] == 0x01; +} + +static inline u8 ata_id_zoned_cap(const u16 *id) +{ + return (id[ATA_ID_ADDITIONAL_SUPP] & 0x3); +} + +static inline bool ata_id_pio_need_iordy(const u16 *id, const u8 pio) +{ + /* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */ + if (pio > 4 && ata_id_is_cfa(id)) + return false; + /* For PIO3 and higher it is mandatory. */ + if (pio > 2) + return true; + /* Turn it on when possible. */ + return ata_id_has_iordy(id); +} + +static inline bool ata_drive_40wire(const u16 *dev_id) +{ + if (ata_id_is_sata(dev_id)) + return false; /* SATA */ + if ((dev_id[ATA_ID_HW_CONFIG] & 0xE000) == 0x6000) + return false; /* 80 wire */ + return true; +} + +static inline bool ata_drive_40wire_relaxed(const u16 *dev_id) +{ + if ((dev_id[ATA_ID_HW_CONFIG] & 0x2000) == 0x2000) + return false; /* 80 wire */ + return true; +} + +static inline int atapi_cdb_len(const u16 *dev_id) +{ + u16 tmp = dev_id[ATA_ID_CONFIG] & 0x3; + switch (tmp) { + case 0: return 12; + case 1: return 16; + default: return -1; + } +} + +static inline int atapi_command_packet_set(const u16 *dev_id) +{ + return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; +} + +static inline bool atapi_id_dmadir(const u16 *dev_id) +{ + return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000); +} + +/* + * ata_id_is_lba_capacity_ok() performs a sanity check on + * the claimed LBA capacity value for the device. + * + * Returns 1 if LBA capacity looks sensible, 0 otherwise. + * + * It is called only once for each device. + */ +static inline bool ata_id_is_lba_capacity_ok(u16 *id) +{ + unsigned long lba_sects, chs_sects, head, tail; + + /* No non-LBA info .. so valid! */ + if (id[ATA_ID_CYLS] == 0) + return true; + + lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY); + + /* + * The ATA spec tells large drives to return + * C/H/S = 16383/16/63 independent of their size. + * Some drives can be jumpered to use 15 heads instead of 16. + * Some drives can be jumpered to use 4092 cyls instead of 16383. + */ + if ((id[ATA_ID_CYLS] == 16383 || + (id[ATA_ID_CYLS] == 4092 && id[ATA_ID_CUR_CYLS] == 16383)) && + id[ATA_ID_SECTORS] == 63 && + (id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) && + (lba_sects >= 16383 * 63 * id[ATA_ID_HEADS])) + return true; + + chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS]; + + /* perform a rough sanity check on lba_sects: within 10% is OK */ + if (lba_sects - chs_sects < chs_sects/10) + return true; + + /* some drives have the word order reversed */ + head = (lba_sects >> 16) & 0xffff; + tail = lba_sects & 0xffff; + lba_sects = head | (tail << 16); + + if (lba_sects - chs_sects < chs_sects/10) { + *(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects); + return true; /* LBA capacity is (now) good */ + } + + return false; /* LBA capacity value may be bad */ +} + +static inline void ata_id_to_hd_driveid(u16 *id) +{ +#ifdef __BIG_ENDIAN + /* accessed in struct hd_driveid as 8-bit values */ + id[ATA_ID_MAX_MULTSECT] = __cpu_to_le16(id[ATA_ID_MAX_MULTSECT]); + id[ATA_ID_CAPABILITY] = __cpu_to_le16(id[ATA_ID_CAPABILITY]); + id[ATA_ID_OLD_PIO_MODES] = __cpu_to_le16(id[ATA_ID_OLD_PIO_MODES]); + id[ATA_ID_OLD_DMA_MODES] = __cpu_to_le16(id[ATA_ID_OLD_DMA_MODES]); + id[ATA_ID_MULTSECT] = __cpu_to_le16(id[ATA_ID_MULTSECT]); + + /* as 32-bit values */ + *(u32 *)&id[ATA_ID_LBA_CAPACITY] = ata_id_u32(id, ATA_ID_LBA_CAPACITY); + *(u32 *)&id[ATA_ID_SPG] = ata_id_u32(id, ATA_ID_SPG); + + /* as 64-bit value */ + *(u64 *)&id[ATA_ID_LBA_CAPACITY_2] = + ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); +#endif +} + +static inline bool ata_ok(u8 status) +{ + return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) + == ATA_DRDY); +} + +static inline bool lba_28_ok(u64 block, u32 n_block) +{ + /* check the ending block number: must be LESS THAN 0x0fffffff */ + return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= ATA_MAX_SECTORS); +} + +static inline bool lba_48_ok(u64 block, u32 n_block) +{ + /* check the ending block number */ + return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= ATA_MAX_SECTORS_LBA48); +} + +#define sata_pmp_gscr_vendor(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] & 0xffff) +#define sata_pmp_gscr_devid(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] >> 16) +#define sata_pmp_gscr_rev(gscr) (((gscr)[SATA_PMP_GSCR_REV] >> 8) & 0xff) +#define sata_pmp_gscr_ports(gscr) ((gscr)[SATA_PMP_GSCR_PORT_INFO] & 0xf) + +#endif /* __LINUX_ATA_H__ */ diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h new file mode 100644 index 0000000..9cafec9 --- /dev/null +++ b/include/linux/ata_platform.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_ATA_PLATFORM_H +#define __LINUX_ATA_PLATFORM_H + +struct pata_platform_info { + /* + * I/O port shift, for platforms with ports that are + * constantly spaced and need larger than the 1-byte + * spacing used by ata_std_ports(). + */ + unsigned int ioport_shift; +}; + +struct scsi_host_template; + +extern int __pata_platform_probe(struct device *dev, + struct resource *io_res, + struct resource *ctl_res, + struct resource *irq_res, + unsigned int ioport_shift, + int __pio_mask, + struct scsi_host_template *sht, + bool use16bit); + +/* + * Marvell SATA private data + */ +struct mv_sata_platform_data { + int n_ports; /* number of sata ports */ +}; + +#endif /* __LINUX_ATA_PLATFORM_H */ diff --git a/include/linux/atalk.h b/include/linux/atalk.h new file mode 100644 index 0000000..f6034ba --- /dev/null +++ b/include/linux/atalk.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_ATALK_H__ +#define __LINUX_ATALK_H__ + + +#include +#include + +struct atalk_route { + struct net_device *dev; + struct atalk_addr target; + struct atalk_addr gateway; + int flags; + struct atalk_route *next; +}; + +/** + * struct atalk_iface - AppleTalk Interface + * @dev - Network device associated with this interface + * @address - Our address + * @status - What are we doing? + * @nets - Associated direct netrange + * @next - next element in the list of interfaces + */ +struct atalk_iface { + struct net_device *dev; + struct atalk_addr address; + int status; +#define ATIF_PROBE 1 /* Probing for an address */ +#define ATIF_PROBE_FAIL 2 /* Probe collided */ + struct atalk_netrange nets; + struct atalk_iface *next; +}; + +struct atalk_sock { + /* struct sock has to be the first member of atalk_sock */ + struct sock sk; + __be16 dest_net; + __be16 src_net; + unsigned char dest_node; + unsigned char src_node; + unsigned char dest_port; + unsigned char src_port; +}; + +static inline struct atalk_sock *at_sk(struct sock *sk) +{ + return (struct atalk_sock *)sk; +} + +struct ddpehdr { + __be16 deh_len_hops; /* lower 10 bits are length, next 4 - hops */ + __be16 deh_sum; + __be16 deh_dnet; + __be16 deh_snet; + __u8 deh_dnode; + __u8 deh_snode; + __u8 deh_dport; + __u8 deh_sport; + /* And netatalk apps expect to stick the type in themselves */ +}; + +static __inline__ struct ddpehdr *ddp_hdr(struct sk_buff *skb) +{ + return (struct ddpehdr *)skb_transport_header(skb); +} + +/* AppleTalk AARP headers */ +struct elapaarp { + __be16 hw_type; +#define AARP_HW_TYPE_ETHERNET 1 +#define AARP_HW_TYPE_TOKENRING 2 + __be16 pa_type; + __u8 hw_len; + __u8 pa_len; +#define AARP_PA_ALEN 4 + __be16 function; +#define AARP_REQUEST 1 +#define AARP_REPLY 2 +#define AARP_PROBE 3 + __u8 hw_src[ETH_ALEN]; + __u8 pa_src_zero; + __be16 pa_src_net; + __u8 pa_src_node; + __u8 hw_dst[ETH_ALEN]; + __u8 pa_dst_zero; + __be16 pa_dst_net; + __u8 pa_dst_node; +} __attribute__ ((packed)); + +static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb) +{ + return (struct elapaarp *)skb_transport_header(skb); +} + +/* Not specified - how long till we drop a resolved entry */ +#define AARP_EXPIRY_TIME (5 * 60 * HZ) +/* Size of hash table */ +#define AARP_HASH_SIZE 16 +/* Fast retransmission timer when resolving */ +#define AARP_TICK_TIME (HZ / 5) +/* Send 10 requests then give up (2 seconds) */ +#define AARP_RETRANSMIT_LIMIT 10 +/* + * Some value bigger than total retransmit time + a bit for last reply to + * appear and to stop continual requests + */ +#define AARP_RESOLVE_TIME (10 * HZ) + +extern struct datalink_proto *ddp_dl, *aarp_dl; +extern int aarp_proto_init(void); + +/* Inter module exports */ + +/* Give a device find its atif control structure */ +#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK) +static inline struct atalk_iface *atalk_find_dev(struct net_device *dev) +{ + return dev->atalk_ptr; +} +#endif + +extern struct atalk_addr *atalk_find_dev_addr(struct net_device *dev); +extern struct net_device *atrtr_get_dev(struct atalk_addr *sa); +extern int aarp_send_ddp(struct net_device *dev, + struct sk_buff *skb, + struct atalk_addr *sa, void *hwaddr); +extern void aarp_device_down(struct net_device *dev); +extern void aarp_probe_network(struct atalk_iface *atif); +extern int aarp_proxy_probe_network(struct atalk_iface *atif, + struct atalk_addr *sa); +extern void aarp_proxy_remove(struct net_device *dev, + struct atalk_addr *sa); + +extern void aarp_cleanup_module(void); + +extern struct hlist_head atalk_sockets; +extern rwlock_t atalk_sockets_lock; + +extern struct atalk_route *atalk_routes; +extern rwlock_t atalk_routes_lock; + +extern struct atalk_iface *atalk_interfaces; +extern rwlock_t atalk_interfaces_lock; + +extern struct atalk_route atrtr_default; + +struct aarp_iter_state { + int bucket; + struct aarp_entry **table; +}; + +extern const struct seq_operations aarp_seq_ops; + +extern int sysctl_aarp_expiry_time; +extern int sysctl_aarp_tick_time; +extern int sysctl_aarp_retransmit_limit; +extern int sysctl_aarp_resolve_time; + +#ifdef CONFIG_SYSCTL +extern int atalk_register_sysctl(void); +extern void atalk_unregister_sysctl(void); +#else +static inline int atalk_register_sysctl(void) +{ + return 0; +} +static inline void atalk_unregister_sysctl(void) +{ +} +#endif + +#ifdef CONFIG_PROC_FS +extern int atalk_proc_init(void); +extern void atalk_proc_exit(void); +#else +static inline int atalk_proc_init(void) +{ + return 0; +} +static inline void atalk_proc_exit(void) +{ +} +#endif /* CONFIG_PROC_FS */ + +#endif /* __LINUX_ATALK_H__ */ diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h new file mode 100644 index 0000000..76860a4 --- /dev/null +++ b/include/linux/ath9k_platform.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2009 Gabor Juhos + * Copyright (c) 2009 Imre Kaloz + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LINUX_ATH9K_PLATFORM_H +#define _LINUX_ATH9K_PLATFORM_H + +#define ATH9K_PLAT_EEP_MAX_WORDS 2048 + +struct ath9k_platform_data { + const char *eeprom_name; + + u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS]; + u8 *macaddr; + + int led_pin; + u32 gpio_mask; + u32 gpio_val; + + u32 bt_active_pin; + u32 bt_priority_pin; + u32 wlan_active_pin; + + bool endian_check; + bool is_clk_25mhz; + bool tx_gain_buffalo; + bool disable_2ghz; + bool disable_5ghz; + bool led_active_high; + + int (*get_mac_revision)(void); + int (*external_reset)(void); + + bool use_eeprom; +}; + +#endif /* _LINUX_ATH9K_PLATFORM_H */ diff --git a/include/linux/atm.h b/include/linux/atm.h new file mode 100644 index 0000000..4b50fd0 --- /dev/null +++ b/include/linux/atm.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* atm.h - general ATM declarations */ +#ifndef _LINUX_ATM_H +#define _LINUX_ATM_H + +#include + +#ifdef CONFIG_COMPAT +#include +struct compat_atmif_sioc { + int number; + int length; + compat_uptr_t arg; +}; +#endif +#endif diff --git a/include/linux/atm_suni.h b/include/linux/atm_suni.h new file mode 100644 index 0000000..84f3aab --- /dev/null +++ b/include/linux/atm_suni.h @@ -0,0 +1,12 @@ +/* atm_suni.h - Driver-specific declarations of the SUNI driver (for use by + driver-specific utilities) */ + +/* Written 1998,2000 by Werner Almesberger, EPFL ICA */ + + +#ifndef LINUX_ATM_SUNI_H +#define LINUX_ATM_SUNI_H + +/* everything obsoleted */ + +#endif diff --git a/include/linux/atm_tcp.h b/include/linux/atm_tcp.h new file mode 100644 index 0000000..c8ecf6f --- /dev/null +++ b/include/linux/atm_tcp.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* atm_tcp.h - Driver-specific declarations of the ATMTCP driver (for use by + driver-specific utilities) */ + +/* Written 1997-2000 by Werner Almesberger, EPFL LRC/ICA */ + +#ifndef LINUX_ATM_TCP_H +#define LINUX_ATM_TCP_H + +#include + + +struct atm_tcp_ops { + int (*attach)(struct atm_vcc *vcc,int itf); + int (*create_persistent)(int itf); + int (*remove_persistent)(int itf); + struct module *owner; +}; + +extern struct atm_tcp_ops atm_tcp_ops; + +#endif diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h new file mode 100644 index 0000000..8124815 --- /dev/null +++ b/include/linux/atmdev.h @@ -0,0 +1,334 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* atmdev.h - ATM device driver declarations and various related items */ +#ifndef LINUX_ATMDEV_H +#define LINUX_ATMDEV_H + + +#include /* wait_queue_head_t */ +#include /* struct timeval */ +#include +#include +#include /* struct sk_buff */ +#include +#include +#include +#include +#include + +#ifdef CONFIG_PROC_FS +#include + +extern struct proc_dir_entry *atm_proc_root; +#endif + +#ifdef CONFIG_COMPAT +#include +struct compat_atm_iobuf { + int length; + compat_uptr_t buffer; +}; +#endif + +struct k_atm_aal_stats { +#define __HANDLE_ITEM(i) atomic_t i + __AAL_STAT_ITEMS +#undef __HANDLE_ITEM +}; + + +struct k_atm_dev_stats { + struct k_atm_aal_stats aal0; + struct k_atm_aal_stats aal34; + struct k_atm_aal_stats aal5; +}; + +struct device; + +enum { + ATM_VF_ADDR, /* Address is in use. Set by anybody, cleared + by device driver. */ + ATM_VF_READY, /* VC is ready to transfer data. Set by device + driver, cleared by anybody. */ + ATM_VF_PARTIAL, /* resources are bound to PVC (partial PVC + setup), controlled by socket layer */ + ATM_VF_REGIS, /* registered with demon, controlled by SVC + socket layer */ + ATM_VF_BOUND, /* local SAP is set, controlled by SVC socket + layer */ + ATM_VF_RELEASED, /* demon has indicated/requested release, + controlled by SVC socket layer */ + ATM_VF_HASQOS, /* QOS parameters have been set */ + ATM_VF_LISTEN, /* socket is used for listening */ + ATM_VF_META, /* SVC socket isn't used for normal data + traffic and doesn't depend on signaling + to be available */ + ATM_VF_SESSION, /* VCC is p2mp session control descriptor */ + ATM_VF_HASSAP, /* SAP has been set */ + ATM_VF_CLOSE, /* asynchronous close - treat like VF_RELEASED*/ + ATM_VF_WAITING, /* waiting for reply from sigd */ + ATM_VF_IS_CLIP, /* in use by CLIP protocol */ +}; + + +#define ATM_VF2VS(flags) \ + (test_bit(ATM_VF_READY,&(flags)) ? ATM_VS_CONNECTED : \ + test_bit(ATM_VF_RELEASED,&(flags)) ? ATM_VS_CLOSING : \ + test_bit(ATM_VF_LISTEN,&(flags)) ? ATM_VS_LISTEN : \ + test_bit(ATM_VF_REGIS,&(flags)) ? ATM_VS_INUSE : \ + test_bit(ATM_VF_BOUND,&(flags)) ? ATM_VS_BOUND : ATM_VS_IDLE) + + +enum { + ATM_DF_REMOVED, /* device was removed from atm_devs list */ +}; + + +#define ATM_PHY_SIG_LOST 0 /* no carrier/light */ +#define ATM_PHY_SIG_UNKNOWN 1 /* carrier/light status is unknown */ +#define ATM_PHY_SIG_FOUND 2 /* carrier/light okay */ + +#define ATM_ATMOPT_CLP 1 /* set CLP bit */ + +struct atm_vcc { + /* struct sock has to be the first member of atm_vcc */ + struct sock sk; + unsigned long flags; /* VCC flags (ATM_VF_*) */ + short vpi; /* VPI and VCI (types must be equal */ + /* with sockaddr) */ + int vci; + unsigned long aal_options; /* AAL layer options */ + unsigned long atm_options; /* ATM layer options */ + struct atm_dev *dev; /* device back pointer */ + struct atm_qos qos; /* QOS */ + struct atm_sap sap; /* SAP */ + void (*release_cb)(struct atm_vcc *vcc); /* release_sock callback */ + void (*push)(struct atm_vcc *vcc,struct sk_buff *skb); + void (*pop)(struct atm_vcc *vcc,struct sk_buff *skb); /* optional */ + int (*push_oam)(struct atm_vcc *vcc,void *cell); + int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); + void *dev_data; /* per-device data */ + void *proto_data; /* per-protocol data */ + struct k_atm_aal_stats *stats; /* pointer to AAL stats group */ + struct module *owner; /* owner of ->push function */ + /* SVC part --- may move later ------------------------------------- */ + short itf; /* interface number */ + struct sockaddr_atmsvc local; + struct sockaddr_atmsvc remote; + /* Multipoint part ------------------------------------------------- */ + struct atm_vcc *session; /* session VCC descriptor */ + /* Other stuff ----------------------------------------------------- */ + void *user_back; /* user backlink - not touched by */ + /* native ATM stack. Currently used */ + /* by CLIP and sch_atm. */ +}; + +static inline struct atm_vcc *atm_sk(struct sock *sk) +{ + return (struct atm_vcc *)sk; +} + +static inline struct atm_vcc *ATM_SD(struct socket *sock) +{ + return atm_sk(sock->sk); +} + +static inline struct sock *sk_atm(struct atm_vcc *vcc) +{ + return (struct sock *)vcc; +} + +struct atm_dev_addr { + struct sockaddr_atmsvc addr; /* ATM address */ + struct list_head entry; /* next address */ +}; + +enum atm_addr_type_t { ATM_ADDR_LOCAL, ATM_ADDR_LECS }; + +struct atm_dev { + const struct atmdev_ops *ops; /* device operations; NULL if unused */ + const struct atmphy_ops *phy; /* PHY operations, may be undefined */ + /* (NULL) */ + const char *type; /* device type name */ + int number; /* device index */ + void *dev_data; /* per-device data */ + void *phy_data; /* private PHY date */ + unsigned long flags; /* device flags (ATM_DF_*) */ + struct list_head local; /* local ATM addresses */ + struct list_head lecs; /* LECS ATM addresses learned via ILMI */ + unsigned char esi[ESI_LEN]; /* ESI ("MAC" addr) */ + struct atm_cirange ci_range; /* VPI/VCI range */ + struct k_atm_dev_stats stats; /* statistics */ + char signal; /* signal status (ATM_PHY_SIG_*) */ + int link_rate; /* link rate (default: OC3) */ + refcount_t refcnt; /* reference count */ + spinlock_t lock; /* protect internal members */ +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *proc_entry; /* proc entry */ + char *proc_name; /* proc entry name */ +#endif + struct device class_dev; /* sysfs device */ + struct list_head dev_list; /* linkage */ +}; + + +/* OF: send_Oam Flags */ + +#define ATM_OF_IMMED 1 /* Attempt immediate delivery */ +#define ATM_OF_INRATE 2 /* Attempt in-rate delivery */ + + +/* + * ioctl, getsockopt, and setsockopt are optional and can be set to NULL. + */ + +struct atmdev_ops { /* only send is required */ + void (*dev_close)(struct atm_dev *dev); + int (*open)(struct atm_vcc *vcc); + void (*close)(struct atm_vcc *vcc); + int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg); +#ifdef CONFIG_COMPAT + int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd, + void __user *arg); +#endif + int (*getsockopt)(struct atm_vcc *vcc,int level,int optname, + void __user *optval,int optlen); + int (*setsockopt)(struct atm_vcc *vcc,int level,int optname, + void __user *optval,unsigned int optlen); + int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); + int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); + void (*phy_put)(struct atm_dev *dev,unsigned char value, + unsigned long addr); + unsigned char (*phy_get)(struct atm_dev *dev,unsigned long addr); + int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags); + int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page); + struct module *owner; +}; + +struct atmphy_ops { + int (*start)(struct atm_dev *dev); + int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg); + void (*interrupt)(struct atm_dev *dev); + int (*stop)(struct atm_dev *dev); +}; + +struct atm_skb_data { + struct atm_vcc *vcc; /* ATM VCC */ + unsigned long atm_options; /* ATM layer options */ + unsigned int acct_truesize; /* truesize accounted to vcc */ +}; + +#define VCC_HTABLE_SIZE 32 + +extern struct hlist_head vcc_hash[VCC_HTABLE_SIZE]; +extern rwlock_t vcc_sklist_lock; + +#define ATM_SKB(skb) (((struct atm_skb_data *) (skb)->cb)) + +struct atm_dev *atm_dev_register(const char *type, struct device *parent, + const struct atmdev_ops *ops, + int number, /* -1 == pick first available */ + unsigned long *flags); +struct atm_dev *atm_dev_lookup(int number); +void atm_dev_deregister(struct atm_dev *dev); + +/* atm_dev_signal_change + * + * Propagate lower layer signal change in atm_dev->signal to netdevice. + * The event will be sent via a notifier call chain. + */ +void atm_dev_signal_change(struct atm_dev *dev, char signal); + +void vcc_insert_socket(struct sock *sk); + +void atm_dev_release_vccs(struct atm_dev *dev); + +static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb) +{ + /* + * Because ATM skbs may not belong to a sock (and we don't + * necessarily want to), skb->truesize may be adjusted, + * escaping the hack in pskb_expand_head() which avoids + * doing so for some cases. So stash the value of truesize + * at the time we accounted it, and atm_pop_raw() can use + * that value later, in case it changes. + */ + refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); + ATM_SKB(skb)->acct_truesize = skb->truesize; + ATM_SKB(skb)->atm_options = vcc->atm_options; +} + +static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) +{ + atomic_add(truesize, &sk_atm(vcc)->sk_rmem_alloc); +} + + +static inline void atm_return(struct atm_vcc *vcc,int truesize) +{ + atomic_sub(truesize, &sk_atm(vcc)->sk_rmem_alloc); +} + + +static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size) +{ + return (size + refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) < + sk_atm(vcc)->sk_sndbuf; +} + + +static inline void atm_dev_hold(struct atm_dev *dev) +{ + refcount_inc(&dev->refcnt); +} + + +static inline void atm_dev_put(struct atm_dev *dev) +{ + if (refcount_dec_and_test(&dev->refcnt)) { + BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); + if (dev->ops->dev_close) + dev->ops->dev_close(dev); + put_device(&dev->class_dev); + } +} + + +int atm_charge(struct atm_vcc *vcc,int truesize); +struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, + gfp_t gfp_flags); +int atm_pcr_goal(const struct atm_trafprm *tp); + +void vcc_release_async(struct atm_vcc *vcc, int reply); + +struct atm_ioctl { + struct module *owner; + /* A module reference is kept if appropriate over this call. + * Return -ENOIOCTLCMD if you don't handle it. */ + int (*ioctl)(struct socket *, unsigned int cmd, unsigned long arg); + struct list_head list; +}; + +/** + * register_atm_ioctl - register handler for ioctl operations + * + * Special (non-device) handlers of ioctl's should + * register here. If you're a normal device, you should + * set .ioctl in your atmdev_ops instead. + */ +void register_atm_ioctl(struct atm_ioctl *); + +/** + * deregister_atm_ioctl - remove the ioctl handler + */ +void deregister_atm_ioctl(struct atm_ioctl *); + + +/* register_atmdevice_notifier - register atm_dev notify events + * + * Clients like br2684 will register notify events + * Currently we notify of signal found/lost + */ +int register_atmdevice_notifier(struct notifier_block *nb); +void unregister_atmdevice_notifier(struct notifier_block *nb); + +#endif diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h new file mode 100644 index 0000000..1491af3 --- /dev/null +++ b/include/linux/atmel-mci.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_ATMEL_MCI_H +#define __LINUX_ATMEL_MCI_H + +#include +#include + +#define ATMCI_MAX_NR_SLOTS 2 + +/** + * struct mci_slot_pdata - board-specific per-slot configuration + * @bus_width: Number of data lines wired up the slot + * @detect_pin: GPIO pin wired to the card detect switch + * @wp_pin: GPIO pin wired to the write protect sensor + * @detect_is_active_high: The state of the detect pin when it is active + * @non_removable: The slot is not removable, only detect once + * + * If a given slot is not present on the board, @bus_width should be + * set to 0. The other fields are ignored in this case. + * + * Any pins that aren't available should be set to a negative value. + * + * Note that support for multiple slots is experimental -- some cards + * might get upset if we don't get the clock management exactly right. + * But in most cases, it should work just fine. + */ +struct mci_slot_pdata { + unsigned int bus_width; + int detect_pin; + int wp_pin; + bool detect_is_active_high; + bool non_removable; +}; + +/** + * struct mci_platform_data - board-specific MMC/SDcard configuration + * @dma_slave: DMA slave interface to use in data transfers. + * @slot: Per-slot configuration data. + */ +struct mci_platform_data { + void *dma_slave; + dma_filter_fn dma_filter; + struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS]; +}; + +#endif /* __LINUX_ATMEL_MCI_H */ diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h new file mode 100644 index 0000000..6091d2a --- /dev/null +++ b/include/linux/atmel-ssc.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __INCLUDE_ATMEL_SSC_H +#define __INCLUDE_ATMEL_SSC_H + +#include +#include +#include + +struct atmel_ssc_platform_data { + int use_dma; + int has_fslen_ext; +}; + +struct ssc_device { + struct list_head list; + dma_addr_t phybase; + void __iomem *regs; + struct platform_device *pdev; + struct atmel_ssc_platform_data *pdata; + struct clk *clk; + int user; + int irq; + bool clk_from_rk_pin; + bool sound_dai; +}; + +struct ssc_device * __must_check ssc_request(unsigned int ssc_num); +void ssc_free(struct ssc_device *ssc); + +/* SSC register offsets */ + +/* SSC Control Register */ +#define SSC_CR 0x00000000 +#define SSC_CR_RXDIS_SIZE 1 +#define SSC_CR_RXDIS_OFFSET 1 +#define SSC_CR_RXEN_SIZE 1 +#define SSC_CR_RXEN_OFFSET 0 +#define SSC_CR_SWRST_SIZE 1 +#define SSC_CR_SWRST_OFFSET 15 +#define SSC_CR_TXDIS_SIZE 1 +#define SSC_CR_TXDIS_OFFSET 9 +#define SSC_CR_TXEN_SIZE 1 +#define SSC_CR_TXEN_OFFSET 8 + +/* SSC Clock Mode Register */ +#define SSC_CMR 0x00000004 +#define SSC_CMR_DIV_SIZE 12 +#define SSC_CMR_DIV_OFFSET 0 + +/* SSC Receive Clock Mode Register */ +#define SSC_RCMR 0x00000010 +#define SSC_RCMR_CKG_SIZE 2 +#define SSC_RCMR_CKG_OFFSET 6 +#define SSC_RCMR_CKI_SIZE 1 +#define SSC_RCMR_CKI_OFFSET 5 +#define SSC_RCMR_CKO_SIZE 3 +#define SSC_RCMR_CKO_OFFSET 2 +#define SSC_RCMR_CKS_SIZE 2 +#define SSC_RCMR_CKS_OFFSET 0 +#define SSC_RCMR_PERIOD_SIZE 8 +#define SSC_RCMR_PERIOD_OFFSET 24 +#define SSC_RCMR_START_SIZE 4 +#define SSC_RCMR_START_OFFSET 8 +#define SSC_RCMR_STOP_SIZE 1 +#define SSC_RCMR_STOP_OFFSET 12 +#define SSC_RCMR_STTDLY_SIZE 8 +#define SSC_RCMR_STTDLY_OFFSET 16 + +/* SSC Receive Frame Mode Register */ +#define SSC_RFMR 0x00000014 +#define SSC_RFMR_DATLEN_SIZE 5 +#define SSC_RFMR_DATLEN_OFFSET 0 +#define SSC_RFMR_DATNB_SIZE 4 +#define SSC_RFMR_DATNB_OFFSET 8 +#define SSC_RFMR_FSEDGE_SIZE 1 +#define SSC_RFMR_FSEDGE_OFFSET 24 +/* + * The FSLEN_EXT exist on at91sam9rl, at91sam9g10, + * at91sam9g20, and at91sam9g45 and newer SoCs + */ +#define SSC_RFMR_FSLEN_EXT_SIZE 4 +#define SSC_RFMR_FSLEN_EXT_OFFSET 28 +#define SSC_RFMR_FSLEN_SIZE 4 +#define SSC_RFMR_FSLEN_OFFSET 16 +#define SSC_RFMR_FSOS_SIZE 4 +#define SSC_RFMR_FSOS_OFFSET 20 +#define SSC_RFMR_LOOP_SIZE 1 +#define SSC_RFMR_LOOP_OFFSET 5 +#define SSC_RFMR_MSBF_SIZE 1 +#define SSC_RFMR_MSBF_OFFSET 7 + +/* SSC Transmit Clock Mode Register */ +#define SSC_TCMR 0x00000018 +#define SSC_TCMR_CKG_SIZE 2 +#define SSC_TCMR_CKG_OFFSET 6 +#define SSC_TCMR_CKI_SIZE 1 +#define SSC_TCMR_CKI_OFFSET 5 +#define SSC_TCMR_CKO_SIZE 3 +#define SSC_TCMR_CKO_OFFSET 2 +#define SSC_TCMR_CKS_SIZE 2 +#define SSC_TCMR_CKS_OFFSET 0 +#define SSC_TCMR_PERIOD_SIZE 8 +#define SSC_TCMR_PERIOD_OFFSET 24 +#define SSC_TCMR_START_SIZE 4 +#define SSC_TCMR_START_OFFSET 8 +#define SSC_TCMR_STTDLY_SIZE 8 +#define SSC_TCMR_STTDLY_OFFSET 16 + +/* SSC Transmit Frame Mode Register */ +#define SSC_TFMR 0x0000001c +#define SSC_TFMR_DATDEF_SIZE 1 +#define SSC_TFMR_DATDEF_OFFSET 5 +#define SSC_TFMR_DATLEN_SIZE 5 +#define SSC_TFMR_DATLEN_OFFSET 0 +#define SSC_TFMR_DATNB_SIZE 4 +#define SSC_TFMR_DATNB_OFFSET 8 +#define SSC_TFMR_FSDEN_SIZE 1 +#define SSC_TFMR_FSDEN_OFFSET 23 +#define SSC_TFMR_FSEDGE_SIZE 1 +#define SSC_TFMR_FSEDGE_OFFSET 24 +/* + * The FSLEN_EXT exist on at91sam9rl, at91sam9g10, + * at91sam9g20, and at91sam9g45 and newer SoCs + */ +#define SSC_TFMR_FSLEN_EXT_SIZE 4 +#define SSC_TFMR_FSLEN_EXT_OFFSET 28 +#define SSC_TFMR_FSLEN_SIZE 4 +#define SSC_TFMR_FSLEN_OFFSET 16 +#define SSC_TFMR_FSOS_SIZE 3 +#define SSC_TFMR_FSOS_OFFSET 20 +#define SSC_TFMR_MSBF_SIZE 1 +#define SSC_TFMR_MSBF_OFFSET 7 + +/* SSC Receive Hold Register */ +#define SSC_RHR 0x00000020 +#define SSC_RHR_RDAT_SIZE 32 +#define SSC_RHR_RDAT_OFFSET 0 + +/* SSC Transmit Hold Register */ +#define SSC_THR 0x00000024 +#define SSC_THR_TDAT_SIZE 32 +#define SSC_THR_TDAT_OFFSET 0 + +/* SSC Receive Sync. Holding Register */ +#define SSC_RSHR 0x00000030 +#define SSC_RSHR_RSDAT_SIZE 16 +#define SSC_RSHR_RSDAT_OFFSET 0 + +/* SSC Transmit Sync. Holding Register */ +#define SSC_TSHR 0x00000034 +#define SSC_TSHR_TSDAT_SIZE 16 +#define SSC_TSHR_RSDAT_OFFSET 0 + +/* SSC Receive Compare 0 Register */ +#define SSC_RC0R 0x00000038 +#define SSC_RC0R_CP0_SIZE 16 +#define SSC_RC0R_CP0_OFFSET 0 + +/* SSC Receive Compare 1 Register */ +#define SSC_RC1R 0x0000003c +#define SSC_RC1R_CP1_SIZE 16 +#define SSC_RC1R_CP1_OFFSET 0 + +/* SSC Status Register */ +#define SSC_SR 0x00000040 +#define SSC_SR_CP0_SIZE 1 +#define SSC_SR_CP0_OFFSET 8 +#define SSC_SR_CP1_SIZE 1 +#define SSC_SR_CP1_OFFSET 9 +#define SSC_SR_ENDRX_SIZE 1 +#define SSC_SR_ENDRX_OFFSET 6 +#define SSC_SR_ENDTX_SIZE 1 +#define SSC_SR_ENDTX_OFFSET 2 +#define SSC_SR_OVRUN_SIZE 1 +#define SSC_SR_OVRUN_OFFSET 5 +#define SSC_SR_RXBUFF_SIZE 1 +#define SSC_SR_RXBUFF_OFFSET 7 +#define SSC_SR_RXEN_SIZE 1 +#define SSC_SR_RXEN_OFFSET 17 +#define SSC_SR_RXRDY_SIZE 1 +#define SSC_SR_RXRDY_OFFSET 4 +#define SSC_SR_RXSYN_SIZE 1 +#define SSC_SR_RXSYN_OFFSET 11 +#define SSC_SR_TXBUFE_SIZE 1 +#define SSC_SR_TXBUFE_OFFSET 3 +#define SSC_SR_TXEMPTY_SIZE 1 +#define SSC_SR_TXEMPTY_OFFSET 1 +#define SSC_SR_TXEN_SIZE 1 +#define SSC_SR_TXEN_OFFSET 16 +#define SSC_SR_TXRDY_SIZE 1 +#define SSC_SR_TXRDY_OFFSET 0 +#define SSC_SR_TXSYN_SIZE 1 +#define SSC_SR_TXSYN_OFFSET 10 + +/* SSC Interrupt Enable Register */ +#define SSC_IER 0x00000044 +#define SSC_IER_CP0_SIZE 1 +#define SSC_IER_CP0_OFFSET 8 +#define SSC_IER_CP1_SIZE 1 +#define SSC_IER_CP1_OFFSET 9 +#define SSC_IER_ENDRX_SIZE 1 +#define SSC_IER_ENDRX_OFFSET 6 +#define SSC_IER_ENDTX_SIZE 1 +#define SSC_IER_ENDTX_OFFSET 2 +#define SSC_IER_OVRUN_SIZE 1 +#define SSC_IER_OVRUN_OFFSET 5 +#define SSC_IER_RXBUFF_SIZE 1 +#define SSC_IER_RXBUFF_OFFSET 7 +#define SSC_IER_RXRDY_SIZE 1 +#define SSC_IER_RXRDY_OFFSET 4 +#define SSC_IER_RXSYN_SIZE 1 +#define SSC_IER_RXSYN_OFFSET 11 +#define SSC_IER_TXBUFE_SIZE 1 +#define SSC_IER_TXBUFE_OFFSET 3 +#define SSC_IER_TXEMPTY_SIZE 1 +#define SSC_IER_TXEMPTY_OFFSET 1 +#define SSC_IER_TXRDY_SIZE 1 +#define SSC_IER_TXRDY_OFFSET 0 +#define SSC_IER_TXSYN_SIZE 1 +#define SSC_IER_TXSYN_OFFSET 10 + +/* SSC Interrupt Disable Register */ +#define SSC_IDR 0x00000048 +#define SSC_IDR_CP0_SIZE 1 +#define SSC_IDR_CP0_OFFSET 8 +#define SSC_IDR_CP1_SIZE 1 +#define SSC_IDR_CP1_OFFSET 9 +#define SSC_IDR_ENDRX_SIZE 1 +#define SSC_IDR_ENDRX_OFFSET 6 +#define SSC_IDR_ENDTX_SIZE 1 +#define SSC_IDR_ENDTX_OFFSET 2 +#define SSC_IDR_OVRUN_SIZE 1 +#define SSC_IDR_OVRUN_OFFSET 5 +#define SSC_IDR_RXBUFF_SIZE 1 +#define SSC_IDR_RXBUFF_OFFSET 7 +#define SSC_IDR_RXRDY_SIZE 1 +#define SSC_IDR_RXRDY_OFFSET 4 +#define SSC_IDR_RXSYN_SIZE 1 +#define SSC_IDR_RXSYN_OFFSET 11 +#define SSC_IDR_TXBUFE_SIZE 1 +#define SSC_IDR_TXBUFE_OFFSET 3 +#define SSC_IDR_TXEMPTY_SIZE 1 +#define SSC_IDR_TXEMPTY_OFFSET 1 +#define SSC_IDR_TXRDY_SIZE 1 +#define SSC_IDR_TXRDY_OFFSET 0 +#define SSC_IDR_TXSYN_SIZE 1 +#define SSC_IDR_TXSYN_OFFSET 10 + +/* SSC Interrupt Mask Register */ +#define SSC_IMR 0x0000004c +#define SSC_IMR_CP0_SIZE 1 +#define SSC_IMR_CP0_OFFSET 8 +#define SSC_IMR_CP1_SIZE 1 +#define SSC_IMR_CP1_OFFSET 9 +#define SSC_IMR_ENDRX_SIZE 1 +#define SSC_IMR_ENDRX_OFFSET 6 +#define SSC_IMR_ENDTX_SIZE 1 +#define SSC_IMR_ENDTX_OFFSET 2 +#define SSC_IMR_OVRUN_SIZE 1 +#define SSC_IMR_OVRUN_OFFSET 5 +#define SSC_IMR_RXBUFF_SIZE 1 +#define SSC_IMR_RXBUFF_OFFSET 7 +#define SSC_IMR_RXRDY_SIZE 1 +#define SSC_IMR_RXRDY_OFFSET 4 +#define SSC_IMR_RXSYN_SIZE 1 +#define SSC_IMR_RXSYN_OFFSET 11 +#define SSC_IMR_TXBUFE_SIZE 1 +#define SSC_IMR_TXBUFE_OFFSET 3 +#define SSC_IMR_TXEMPTY_SIZE 1 +#define SSC_IMR_TXEMPTY_OFFSET 1 +#define SSC_IMR_TXRDY_SIZE 1 +#define SSC_IMR_TXRDY_OFFSET 0 +#define SSC_IMR_TXSYN_SIZE 1 +#define SSC_IMR_TXSYN_OFFSET 10 + +/* SSC PDC Receive Pointer Register */ +#define SSC_PDC_RPR 0x00000100 + +/* SSC PDC Receive Counter Register */ +#define SSC_PDC_RCR 0x00000104 + +/* SSC PDC Transmit Pointer Register */ +#define SSC_PDC_TPR 0x00000108 + +/* SSC PDC Receive Next Pointer Register */ +#define SSC_PDC_RNPR 0x00000110 + +/* SSC PDC Receive Next Counter Register */ +#define SSC_PDC_RNCR 0x00000114 + +/* SSC PDC Transmit Counter Register */ +#define SSC_PDC_TCR 0x0000010c + +/* SSC PDC Transmit Next Pointer Register */ +#define SSC_PDC_TNPR 0x00000118 + +/* SSC PDC Transmit Next Counter Register */ +#define SSC_PDC_TNCR 0x0000011c + +/* SSC PDC Transfer Control Register */ +#define SSC_PDC_PTCR 0x00000120 +#define SSC_PDC_PTCR_RXTDIS_SIZE 1 +#define SSC_PDC_PTCR_RXTDIS_OFFSET 1 +#define SSC_PDC_PTCR_RXTEN_SIZE 1 +#define SSC_PDC_PTCR_RXTEN_OFFSET 0 +#define SSC_PDC_PTCR_TXTDIS_SIZE 1 +#define SSC_PDC_PTCR_TXTDIS_OFFSET 9 +#define SSC_PDC_PTCR_TXTEN_SIZE 1 +#define SSC_PDC_PTCR_TXTEN_OFFSET 8 + +/* SSC PDC Transfer Status Register */ +#define SSC_PDC_PTSR 0x00000124 +#define SSC_PDC_PTSR_RXTEN_SIZE 1 +#define SSC_PDC_PTSR_RXTEN_OFFSET 0 +#define SSC_PDC_PTSR_TXTEN_SIZE 1 +#define SSC_PDC_PTSR_TXTEN_OFFSET 8 + +/* Bit manipulation macros */ +#define SSC_BIT(name) \ + (1 << SSC_##name##_OFFSET) +#define SSC_BF(name, value) \ + (((value) & ((1 << SSC_##name##_SIZE) - 1)) \ + << SSC_##name##_OFFSET) +#define SSC_BFEXT(name, value) \ + (((value) >> SSC_##name##_OFFSET) \ + & ((1 << SSC_##name##_SIZE) - 1)) +#define SSC_BFINS(name, value, old) \ + (((old) & ~(((1 << SSC_##name##_SIZE) - 1) \ + << SSC_##name##_OFFSET)) | SSC_BF(name, value)) + +/* Register access macros */ +#define ssc_readl(base, reg) __raw_readl(base + SSC_##reg) +#define ssc_writel(base, reg, value) __raw_writel((value), base + SSC_##reg) + +#endif /* __INCLUDE_ATMEL_SSC_H */ diff --git a/include/linux/atmel_pdc.h b/include/linux/atmel_pdc.h new file mode 100644 index 0000000..00a766b --- /dev/null +++ b/include/linux/atmel_pdc.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * include/linux/atmel_pdc.h + * + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * Peripheral Data Controller (PDC) registers. + * Based on AT91RM9200 datasheet revision E. + */ + +#ifndef ATMEL_PDC_H +#define ATMEL_PDC_H + +#define ATMEL_PDC_RPR 0x100 /* Receive Pointer Register */ +#define ATMEL_PDC_RCR 0x104 /* Receive Counter Register */ +#define ATMEL_PDC_TPR 0x108 /* Transmit Pointer Register */ +#define ATMEL_PDC_TCR 0x10c /* Transmit Counter Register */ +#define ATMEL_PDC_RNPR 0x110 /* Receive Next Pointer Register */ +#define ATMEL_PDC_RNCR 0x114 /* Receive Next Counter Register */ +#define ATMEL_PDC_TNPR 0x118 /* Transmit Next Pointer Register */ +#define ATMEL_PDC_TNCR 0x11c /* Transmit Next Counter Register */ + +#define ATMEL_PDC_PTCR 0x120 /* Transfer Control Register */ +#define ATMEL_PDC_RXTEN (1 << 0) /* Receiver Transfer Enable */ +#define ATMEL_PDC_RXTDIS (1 << 1) /* Receiver Transfer Disable */ +#define ATMEL_PDC_TXTEN (1 << 8) /* Transmitter Transfer Enable */ +#define ATMEL_PDC_TXTDIS (1 << 9) /* Transmitter Transfer Disable */ + +#define ATMEL_PDC_PTSR 0x124 /* Transfer Status Register */ + +#define ATMEL_PDC_SCND_BUF_OFF 0x10 /* Offset between first and second buffer registers */ + +#endif diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h new file mode 100644 index 0000000..a7d240e --- /dev/null +++ b/include/linux/atomic-fallback.h @@ -0,0 +1,2295 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-fallback.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _LINUX_ATOMIC_FALLBACK_H +#define _LINUX_ATOMIC_FALLBACK_H + +#ifndef xchg_relaxed +#define xchg_relaxed xchg +#define xchg_acquire xchg +#define xchg_release xchg +#else /* xchg_relaxed */ + +#ifndef xchg_acquire +#define xchg_acquire(...) \ + __atomic_op_acquire(xchg, __VA_ARGS__) +#endif + +#ifndef xchg_release +#define xchg_release(...) \ + __atomic_op_release(xchg, __VA_ARGS__) +#endif + +#ifndef xchg +#define xchg(...) \ + __atomic_op_fence(xchg, __VA_ARGS__) +#endif + +#endif /* xchg_relaxed */ + +#ifndef cmpxchg_relaxed +#define cmpxchg_relaxed cmpxchg +#define cmpxchg_acquire cmpxchg +#define cmpxchg_release cmpxchg +#else /* cmpxchg_relaxed */ + +#ifndef cmpxchg_acquire +#define cmpxchg_acquire(...) \ + __atomic_op_acquire(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg_release +#define cmpxchg_release(...) \ + __atomic_op_release(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg +#define cmpxchg(...) \ + __atomic_op_fence(cmpxchg, __VA_ARGS__) +#endif + +#endif /* cmpxchg_relaxed */ + +#ifndef cmpxchg64_relaxed +#define cmpxchg64_relaxed cmpxchg64 +#define cmpxchg64_acquire cmpxchg64 +#define cmpxchg64_release cmpxchg64 +#else /* cmpxchg64_relaxed */ + +#ifndef cmpxchg64_acquire +#define cmpxchg64_acquire(...) \ + __atomic_op_acquire(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64_release +#define cmpxchg64_release(...) \ + __atomic_op_release(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64 +#define cmpxchg64(...) \ + __atomic_op_fence(cmpxchg64, __VA_ARGS__) +#endif + +#endif /* cmpxchg64_relaxed */ + +#ifndef atomic_read_acquire +static inline int +atomic_read_acquire(const atomic_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define atomic_read_acquire atomic_read_acquire +#endif + +#ifndef atomic_set_release +static inline void +atomic_set_release(atomic_t *v, int i) +{ + smp_store_release(&(v)->counter, i); +} +#define atomic_set_release atomic_set_release +#endif + +#ifndef atomic_add_return_relaxed +#define atomic_add_return_acquire atomic_add_return +#define atomic_add_return_release atomic_add_return +#define atomic_add_return_relaxed atomic_add_return +#else /* atomic_add_return_relaxed */ + +#ifndef atomic_add_return_acquire +static inline int +atomic_add_return_acquire(int i, atomic_t *v) +{ + int ret = atomic_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_add_return_acquire atomic_add_return_acquire +#endif + +#ifndef atomic_add_return_release +static inline int +atomic_add_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_add_return_relaxed(i, v); +} +#define atomic_add_return_release atomic_add_return_release +#endif + +#ifndef atomic_add_return +static inline int +atomic_add_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_add_return atomic_add_return +#endif + +#endif /* atomic_add_return_relaxed */ + +#ifndef atomic_fetch_add_relaxed +#define atomic_fetch_add_acquire atomic_fetch_add +#define atomic_fetch_add_release atomic_fetch_add +#define atomic_fetch_add_relaxed atomic_fetch_add +#else /* atomic_fetch_add_relaxed */ + +#ifndef atomic_fetch_add_acquire +static inline int +atomic_fetch_add_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_add_acquire atomic_fetch_add_acquire +#endif + +#ifndef atomic_fetch_add_release +static inline int +atomic_fetch_add_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_add_relaxed(i, v); +} +#define atomic_fetch_add_release atomic_fetch_add_release +#endif + +#ifndef atomic_fetch_add +static inline int +atomic_fetch_add(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_add atomic_fetch_add +#endif + +#endif /* atomic_fetch_add_relaxed */ + +#ifndef atomic_sub_return_relaxed +#define atomic_sub_return_acquire atomic_sub_return +#define atomic_sub_return_release atomic_sub_return +#define atomic_sub_return_relaxed atomic_sub_return +#else /* atomic_sub_return_relaxed */ + +#ifndef atomic_sub_return_acquire +static inline int +atomic_sub_return_acquire(int i, atomic_t *v) +{ + int ret = atomic_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_sub_return_acquire atomic_sub_return_acquire +#endif + +#ifndef atomic_sub_return_release +static inline int +atomic_sub_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_sub_return_relaxed(i, v); +} +#define atomic_sub_return_release atomic_sub_return_release +#endif + +#ifndef atomic_sub_return +static inline int +atomic_sub_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_sub_return atomic_sub_return +#endif + +#endif /* atomic_sub_return_relaxed */ + +#ifndef atomic_fetch_sub_relaxed +#define atomic_fetch_sub_acquire atomic_fetch_sub +#define atomic_fetch_sub_release atomic_fetch_sub +#define atomic_fetch_sub_relaxed atomic_fetch_sub +#else /* atomic_fetch_sub_relaxed */ + +#ifndef atomic_fetch_sub_acquire +static inline int +atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire +#endif + +#ifndef atomic_fetch_sub_release +static inline int +atomic_fetch_sub_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_sub_relaxed(i, v); +} +#define atomic_fetch_sub_release atomic_fetch_sub_release +#endif + +#ifndef atomic_fetch_sub +static inline int +atomic_fetch_sub(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_sub atomic_fetch_sub +#endif + +#endif /* atomic_fetch_sub_relaxed */ + +#ifndef atomic_inc +static inline void +atomic_inc(atomic_t *v) +{ + atomic_add(1, v); +} +#define atomic_inc atomic_inc +#endif + +#ifndef atomic_inc_return_relaxed +#ifdef atomic_inc_return +#define atomic_inc_return_acquire atomic_inc_return +#define atomic_inc_return_release atomic_inc_return +#define atomic_inc_return_relaxed atomic_inc_return +#endif /* atomic_inc_return */ + +#ifndef atomic_inc_return +static inline int +atomic_inc_return(atomic_t *v) +{ + return atomic_add_return(1, v); +} +#define atomic_inc_return atomic_inc_return +#endif + +#ifndef atomic_inc_return_acquire +static inline int +atomic_inc_return_acquire(atomic_t *v) +{ + return atomic_add_return_acquire(1, v); +} +#define atomic_inc_return_acquire atomic_inc_return_acquire +#endif + +#ifndef atomic_inc_return_release +static inline int +atomic_inc_return_release(atomic_t *v) +{ + return atomic_add_return_release(1, v); +} +#define atomic_inc_return_release atomic_inc_return_release +#endif + +#ifndef atomic_inc_return_relaxed +static inline int +atomic_inc_return_relaxed(atomic_t *v) +{ + return atomic_add_return_relaxed(1, v); +} +#define atomic_inc_return_relaxed atomic_inc_return_relaxed +#endif + +#else /* atomic_inc_return_relaxed */ + +#ifndef atomic_inc_return_acquire +static inline int +atomic_inc_return_acquire(atomic_t *v) +{ + int ret = atomic_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_inc_return_acquire atomic_inc_return_acquire +#endif + +#ifndef atomic_inc_return_release +static inline int +atomic_inc_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return atomic_inc_return_relaxed(v); +} +#define atomic_inc_return_release atomic_inc_return_release +#endif + +#ifndef atomic_inc_return +static inline int +atomic_inc_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_inc_return atomic_inc_return +#endif + +#endif /* atomic_inc_return_relaxed */ + +#ifndef atomic_fetch_inc_relaxed +#ifdef atomic_fetch_inc +#define atomic_fetch_inc_acquire atomic_fetch_inc +#define atomic_fetch_inc_release atomic_fetch_inc +#define atomic_fetch_inc_relaxed atomic_fetch_inc +#endif /* atomic_fetch_inc */ + +#ifndef atomic_fetch_inc +static inline int +atomic_fetch_inc(atomic_t *v) +{ + return atomic_fetch_add(1, v); +} +#define atomic_fetch_inc atomic_fetch_inc +#endif + +#ifndef atomic_fetch_inc_acquire +static inline int +atomic_fetch_inc_acquire(atomic_t *v) +{ + return atomic_fetch_add_acquire(1, v); +} +#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire +#endif + +#ifndef atomic_fetch_inc_release +static inline int +atomic_fetch_inc_release(atomic_t *v) +{ + return atomic_fetch_add_release(1, v); +} +#define atomic_fetch_inc_release atomic_fetch_inc_release +#endif + +#ifndef atomic_fetch_inc_relaxed +static inline int +atomic_fetch_inc_relaxed(atomic_t *v) +{ + return atomic_fetch_add_relaxed(1, v); +} +#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed +#endif + +#else /* atomic_fetch_inc_relaxed */ + +#ifndef atomic_fetch_inc_acquire +static inline int +atomic_fetch_inc_acquire(atomic_t *v) +{ + int ret = atomic_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire +#endif + +#ifndef atomic_fetch_inc_release +static inline int +atomic_fetch_inc_release(atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_inc_relaxed(v); +} +#define atomic_fetch_inc_release atomic_fetch_inc_release +#endif + +#ifndef atomic_fetch_inc +static inline int +atomic_fetch_inc(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_inc atomic_fetch_inc +#endif + +#endif /* atomic_fetch_inc_relaxed */ + +#ifndef atomic_dec +static inline void +atomic_dec(atomic_t *v) +{ + atomic_sub(1, v); +} +#define atomic_dec atomic_dec +#endif + +#ifndef atomic_dec_return_relaxed +#ifdef atomic_dec_return +#define atomic_dec_return_acquire atomic_dec_return +#define atomic_dec_return_release atomic_dec_return +#define atomic_dec_return_relaxed atomic_dec_return +#endif /* atomic_dec_return */ + +#ifndef atomic_dec_return +static inline int +atomic_dec_return(atomic_t *v) +{ + return atomic_sub_return(1, v); +} +#define atomic_dec_return atomic_dec_return +#endif + +#ifndef atomic_dec_return_acquire +static inline int +atomic_dec_return_acquire(atomic_t *v) +{ + return atomic_sub_return_acquire(1, v); +} +#define atomic_dec_return_acquire atomic_dec_return_acquire +#endif + +#ifndef atomic_dec_return_release +static inline int +atomic_dec_return_release(atomic_t *v) +{ + return atomic_sub_return_release(1, v); +} +#define atomic_dec_return_release atomic_dec_return_release +#endif + +#ifndef atomic_dec_return_relaxed +static inline int +atomic_dec_return_relaxed(atomic_t *v) +{ + return atomic_sub_return_relaxed(1, v); +} +#define atomic_dec_return_relaxed atomic_dec_return_relaxed +#endif + +#else /* atomic_dec_return_relaxed */ + +#ifndef atomic_dec_return_acquire +static inline int +atomic_dec_return_acquire(atomic_t *v) +{ + int ret = atomic_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_dec_return_acquire atomic_dec_return_acquire +#endif + +#ifndef atomic_dec_return_release +static inline int +atomic_dec_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return atomic_dec_return_relaxed(v); +} +#define atomic_dec_return_release atomic_dec_return_release +#endif + +#ifndef atomic_dec_return +static inline int +atomic_dec_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_dec_return atomic_dec_return +#endif + +#endif /* atomic_dec_return_relaxed */ + +#ifndef atomic_fetch_dec_relaxed +#ifdef atomic_fetch_dec +#define atomic_fetch_dec_acquire atomic_fetch_dec +#define atomic_fetch_dec_release atomic_fetch_dec +#define atomic_fetch_dec_relaxed atomic_fetch_dec +#endif /* atomic_fetch_dec */ + +#ifndef atomic_fetch_dec +static inline int +atomic_fetch_dec(atomic_t *v) +{ + return atomic_fetch_sub(1, v); +} +#define atomic_fetch_dec atomic_fetch_dec +#endif + +#ifndef atomic_fetch_dec_acquire +static inline int +atomic_fetch_dec_acquire(atomic_t *v) +{ + return atomic_fetch_sub_acquire(1, v); +} +#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire +#endif + +#ifndef atomic_fetch_dec_release +static inline int +atomic_fetch_dec_release(atomic_t *v) +{ + return atomic_fetch_sub_release(1, v); +} +#define atomic_fetch_dec_release atomic_fetch_dec_release +#endif + +#ifndef atomic_fetch_dec_relaxed +static inline int +atomic_fetch_dec_relaxed(atomic_t *v) +{ + return atomic_fetch_sub_relaxed(1, v); +} +#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed +#endif + +#else /* atomic_fetch_dec_relaxed */ + +#ifndef atomic_fetch_dec_acquire +static inline int +atomic_fetch_dec_acquire(atomic_t *v) +{ + int ret = atomic_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire +#endif + +#ifndef atomic_fetch_dec_release +static inline int +atomic_fetch_dec_release(atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_dec_relaxed(v); +} +#define atomic_fetch_dec_release atomic_fetch_dec_release +#endif + +#ifndef atomic_fetch_dec +static inline int +atomic_fetch_dec(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_dec atomic_fetch_dec +#endif + +#endif /* atomic_fetch_dec_relaxed */ + +#ifndef atomic_fetch_and_relaxed +#define atomic_fetch_and_acquire atomic_fetch_and +#define atomic_fetch_and_release atomic_fetch_and +#define atomic_fetch_and_relaxed atomic_fetch_and +#else /* atomic_fetch_and_relaxed */ + +#ifndef atomic_fetch_and_acquire +static inline int +atomic_fetch_and_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_and_acquire atomic_fetch_and_acquire +#endif + +#ifndef atomic_fetch_and_release +static inline int +atomic_fetch_and_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_and_relaxed(i, v); +} +#define atomic_fetch_and_release atomic_fetch_and_release +#endif + +#ifndef atomic_fetch_and +static inline int +atomic_fetch_and(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_and atomic_fetch_and +#endif + +#endif /* atomic_fetch_and_relaxed */ + +#ifndef atomic_andnot +static inline void +atomic_andnot(int i, atomic_t *v) +{ + atomic_and(~i, v); +} +#define atomic_andnot atomic_andnot +#endif + +#ifndef atomic_fetch_andnot_relaxed +#ifdef atomic_fetch_andnot +#define atomic_fetch_andnot_acquire atomic_fetch_andnot +#define atomic_fetch_andnot_release atomic_fetch_andnot +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot +#endif /* atomic_fetch_andnot */ + +#ifndef atomic_fetch_andnot +static inline int +atomic_fetch_andnot(int i, atomic_t *v) +{ + return atomic_fetch_and(~i, v); +} +#define atomic_fetch_andnot atomic_fetch_andnot +#endif + +#ifndef atomic_fetch_andnot_acquire +static inline int +atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return atomic_fetch_and_acquire(~i, v); +} +#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire +#endif + +#ifndef atomic_fetch_andnot_release +static inline int +atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return atomic_fetch_and_release(~i, v); +} +#define atomic_fetch_andnot_release atomic_fetch_andnot_release +#endif + +#ifndef atomic_fetch_andnot_relaxed +static inline int +atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + return atomic_fetch_and_relaxed(~i, v); +} +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed +#endif + +#else /* atomic_fetch_andnot_relaxed */ + +#ifndef atomic_fetch_andnot_acquire +static inline int +atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire +#endif + +#ifndef atomic_fetch_andnot_release +static inline int +atomic_fetch_andnot_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_andnot_relaxed(i, v); +} +#define atomic_fetch_andnot_release atomic_fetch_andnot_release +#endif + +#ifndef atomic_fetch_andnot +static inline int +atomic_fetch_andnot(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_andnot atomic_fetch_andnot +#endif + +#endif /* atomic_fetch_andnot_relaxed */ + +#ifndef atomic_fetch_or_relaxed +#define atomic_fetch_or_acquire atomic_fetch_or +#define atomic_fetch_or_release atomic_fetch_or +#define atomic_fetch_or_relaxed atomic_fetch_or +#else /* atomic_fetch_or_relaxed */ + +#ifndef atomic_fetch_or_acquire +static inline int +atomic_fetch_or_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_or_acquire atomic_fetch_or_acquire +#endif + +#ifndef atomic_fetch_or_release +static inline int +atomic_fetch_or_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_or_relaxed(i, v); +} +#define atomic_fetch_or_release atomic_fetch_or_release +#endif + +#ifndef atomic_fetch_or +static inline int +atomic_fetch_or(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_or atomic_fetch_or +#endif + +#endif /* atomic_fetch_or_relaxed */ + +#ifndef atomic_fetch_xor_relaxed +#define atomic_fetch_xor_acquire atomic_fetch_xor +#define atomic_fetch_xor_release atomic_fetch_xor +#define atomic_fetch_xor_relaxed atomic_fetch_xor +#else /* atomic_fetch_xor_relaxed */ + +#ifndef atomic_fetch_xor_acquire +static inline int +atomic_fetch_xor_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire +#endif + +#ifndef atomic_fetch_xor_release +static inline int +atomic_fetch_xor_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_xor_relaxed(i, v); +} +#define atomic_fetch_xor_release atomic_fetch_xor_release +#endif + +#ifndef atomic_fetch_xor +static inline int +atomic_fetch_xor(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_xor atomic_fetch_xor +#endif + +#endif /* atomic_fetch_xor_relaxed */ + +#ifndef atomic_xchg_relaxed +#define atomic_xchg_acquire atomic_xchg +#define atomic_xchg_release atomic_xchg +#define atomic_xchg_relaxed atomic_xchg +#else /* atomic_xchg_relaxed */ + +#ifndef atomic_xchg_acquire +static inline int +atomic_xchg_acquire(atomic_t *v, int i) +{ + int ret = atomic_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define atomic_xchg_acquire atomic_xchg_acquire +#endif + +#ifndef atomic_xchg_release +static inline int +atomic_xchg_release(atomic_t *v, int i) +{ + __atomic_release_fence(); + return atomic_xchg_relaxed(v, i); +} +#define atomic_xchg_release atomic_xchg_release +#endif + +#ifndef atomic_xchg +static inline int +atomic_xchg(atomic_t *v, int i) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define atomic_xchg atomic_xchg +#endif + +#endif /* atomic_xchg_relaxed */ + +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_acquire atomic_cmpxchg +#define atomic_cmpxchg_release atomic_cmpxchg +#define atomic_cmpxchg_relaxed atomic_cmpxchg +#else /* atomic_cmpxchg_relaxed */ + +#ifndef atomic_cmpxchg_acquire +static inline int +atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + int ret = atomic_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire +#endif + +#ifndef atomic_cmpxchg_release +static inline int +atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + __atomic_release_fence(); + return atomic_cmpxchg_relaxed(v, old, new); +} +#define atomic_cmpxchg_release atomic_cmpxchg_release +#endif + +#ifndef atomic_cmpxchg +static inline int +atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define atomic_cmpxchg atomic_cmpxchg +#endif + +#endif /* atomic_cmpxchg_relaxed */ + +#ifndef atomic_try_cmpxchg_relaxed +#ifdef atomic_try_cmpxchg +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg +#define atomic_try_cmpxchg_release atomic_try_cmpxchg +#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg +#endif /* atomic_try_cmpxchg */ + +#ifndef atomic_try_cmpxchg +static inline bool +atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic_try_cmpxchg atomic_try_cmpxchg +#endif + +#ifndef atomic_try_cmpxchg_acquire +static inline bool +atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +#endif + +#ifndef atomic_try_cmpxchg_release +static inline bool +atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release +#endif + +#ifndef atomic_try_cmpxchg_relaxed +static inline bool +atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed +#endif + +#else /* atomic_try_cmpxchg_relaxed */ + +#ifndef atomic_try_cmpxchg_acquire +static inline bool +atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + bool ret = atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +#endif + +#ifndef atomic_try_cmpxchg_release +static inline bool +atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + __atomic_release_fence(); + return atomic_try_cmpxchg_relaxed(v, old, new); +} +#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release +#endif + +#ifndef atomic_try_cmpxchg +static inline bool +atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define atomic_try_cmpxchg atomic_try_cmpxchg +#endif + +#endif /* atomic_try_cmpxchg_relaxed */ + +#ifndef atomic_sub_and_test +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static inline bool +atomic_sub_and_test(int i, atomic_t *v) +{ + return atomic_sub_return(i, v) == 0; +} +#define atomic_sub_and_test atomic_sub_and_test +#endif + +#ifndef atomic_dec_and_test +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static inline bool +atomic_dec_and_test(atomic_t *v) +{ + return atomic_dec_return(v) == 0; +} +#define atomic_dec_and_test atomic_dec_and_test +#endif + +#ifndef atomic_inc_and_test +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline bool +atomic_inc_and_test(atomic_t *v) +{ + return atomic_inc_return(v) == 0; +} +#define atomic_inc_and_test atomic_inc_and_test +#endif + +#ifndef atomic_add_negative +/** + * atomic_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static inline bool +atomic_add_negative(int i, atomic_t *v) +{ + return atomic_add_return(i, v) < 0; +} +#define atomic_add_negative atomic_add_negative +#endif + +#ifndef atomic_fetch_add_unless +/** + * atomic_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static inline int +atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int c = atomic_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define atomic_fetch_add_unless atomic_fetch_add_unless +#endif + +#ifndef atomic_add_unless +/** + * atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static inline bool +atomic_add_unless(atomic_t *v, int a, int u) +{ + return atomic_fetch_add_unless(v, a, u) != u; +} +#define atomic_add_unless atomic_add_unless +#endif + +#ifndef atomic_inc_not_zero +/** + * atomic_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static inline bool +atomic_inc_not_zero(atomic_t *v) +{ + return atomic_add_unless(v, 1, 0); +} +#define atomic_inc_not_zero atomic_inc_not_zero +#endif + +#ifndef atomic_inc_unless_negative +static inline bool +atomic_inc_unless_negative(atomic_t *v) +{ + int c = atomic_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!atomic_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define atomic_inc_unless_negative atomic_inc_unless_negative +#endif + +#ifndef atomic_dec_unless_positive +static inline bool +atomic_dec_unless_positive(atomic_t *v) +{ + int c = atomic_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!atomic_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define atomic_dec_unless_positive atomic_dec_unless_positive +#endif + +#ifndef atomic_dec_if_positive +static inline int +atomic_dec_if_positive(atomic_t *v) +{ + int dec, c = atomic_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!atomic_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define atomic_dec_if_positive atomic_dec_if_positive +#endif + +#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) + +#ifdef CONFIG_GENERIC_ATOMIC64 +#include +#endif + +#ifndef atomic64_read_acquire +static inline s64 +atomic64_read_acquire(const atomic64_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define atomic64_read_acquire atomic64_read_acquire +#endif + +#ifndef atomic64_set_release +static inline void +atomic64_set_release(atomic64_t *v, s64 i) +{ + smp_store_release(&(v)->counter, i); +} +#define atomic64_set_release atomic64_set_release +#endif + +#ifndef atomic64_add_return_relaxed +#define atomic64_add_return_acquire atomic64_add_return +#define atomic64_add_return_release atomic64_add_return +#define atomic64_add_return_relaxed atomic64_add_return +#else /* atomic64_add_return_relaxed */ + +#ifndef atomic64_add_return_acquire +static inline s64 +atomic64_add_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_add_return_acquire atomic64_add_return_acquire +#endif + +#ifndef atomic64_add_return_release +static inline s64 +atomic64_add_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_add_return_relaxed(i, v); +} +#define atomic64_add_return_release atomic64_add_return_release +#endif + +#ifndef atomic64_add_return +static inline s64 +atomic64_add_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_add_return atomic64_add_return +#endif + +#endif /* atomic64_add_return_relaxed */ + +#ifndef atomic64_fetch_add_relaxed +#define atomic64_fetch_add_acquire atomic64_fetch_add +#define atomic64_fetch_add_release atomic64_fetch_add +#define atomic64_fetch_add_relaxed atomic64_fetch_add +#else /* atomic64_fetch_add_relaxed */ + +#ifndef atomic64_fetch_add_acquire +static inline s64 +atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire +#endif + +#ifndef atomic64_fetch_add_release +static inline s64 +atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_add_relaxed(i, v); +} +#define atomic64_fetch_add_release atomic64_fetch_add_release +#endif + +#ifndef atomic64_fetch_add +static inline s64 +atomic64_fetch_add(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_add atomic64_fetch_add +#endif + +#endif /* atomic64_fetch_add_relaxed */ + +#ifndef atomic64_sub_return_relaxed +#define atomic64_sub_return_acquire atomic64_sub_return +#define atomic64_sub_return_release atomic64_sub_return +#define atomic64_sub_return_relaxed atomic64_sub_return +#else /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_sub_return_acquire +static inline s64 +atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_sub_return_acquire atomic64_sub_return_acquire +#endif + +#ifndef atomic64_sub_return_release +static inline s64 +atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_sub_return_relaxed(i, v); +} +#define atomic64_sub_return_release atomic64_sub_return_release +#endif + +#ifndef atomic64_sub_return +static inline s64 +atomic64_sub_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_sub_return atomic64_sub_return +#endif + +#endif /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_fetch_sub_relaxed +#define atomic64_fetch_sub_acquire atomic64_fetch_sub +#define atomic64_fetch_sub_release atomic64_fetch_sub +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub +#else /* atomic64_fetch_sub_relaxed */ + +#ifndef atomic64_fetch_sub_acquire +static inline s64 +atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire +#endif + +#ifndef atomic64_fetch_sub_release +static inline s64 +atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_sub_relaxed(i, v); +} +#define atomic64_fetch_sub_release atomic64_fetch_sub_release +#endif + +#ifndef atomic64_fetch_sub +static inline s64 +atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_sub atomic64_fetch_sub +#endif + +#endif /* atomic64_fetch_sub_relaxed */ + +#ifndef atomic64_inc +static inline void +atomic64_inc(atomic64_t *v) +{ + atomic64_add(1, v); +} +#define atomic64_inc atomic64_inc +#endif + +#ifndef atomic64_inc_return_relaxed +#ifdef atomic64_inc_return +#define atomic64_inc_return_acquire atomic64_inc_return +#define atomic64_inc_return_release atomic64_inc_return +#define atomic64_inc_return_relaxed atomic64_inc_return +#endif /* atomic64_inc_return */ + +#ifndef atomic64_inc_return +static inline s64 +atomic64_inc_return(atomic64_t *v) +{ + return atomic64_add_return(1, v); +} +#define atomic64_inc_return atomic64_inc_return +#endif + +#ifndef atomic64_inc_return_acquire +static inline s64 +atomic64_inc_return_acquire(atomic64_t *v) +{ + return atomic64_add_return_acquire(1, v); +} +#define atomic64_inc_return_acquire atomic64_inc_return_acquire +#endif + +#ifndef atomic64_inc_return_release +static inline s64 +atomic64_inc_return_release(atomic64_t *v) +{ + return atomic64_add_return_release(1, v); +} +#define atomic64_inc_return_release atomic64_inc_return_release +#endif + +#ifndef atomic64_inc_return_relaxed +static inline s64 +atomic64_inc_return_relaxed(atomic64_t *v) +{ + return atomic64_add_return_relaxed(1, v); +} +#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed +#endif + +#else /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_inc_return_acquire +static inline s64 +atomic64_inc_return_acquire(atomic64_t *v) +{ + s64 ret = atomic64_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_inc_return_acquire atomic64_inc_return_acquire +#endif + +#ifndef atomic64_inc_return_release +static inline s64 +atomic64_inc_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_inc_return_relaxed(v); +} +#define atomic64_inc_return_release atomic64_inc_return_release +#endif + +#ifndef atomic64_inc_return +static inline s64 +atomic64_inc_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_inc_return atomic64_inc_return +#endif + +#endif /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_fetch_inc_relaxed +#ifdef atomic64_fetch_inc +#define atomic64_fetch_inc_acquire atomic64_fetch_inc +#define atomic64_fetch_inc_release atomic64_fetch_inc +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc +#endif /* atomic64_fetch_inc */ + +#ifndef atomic64_fetch_inc +static inline s64 +atomic64_fetch_inc(atomic64_t *v) +{ + return atomic64_fetch_add(1, v); +} +#define atomic64_fetch_inc atomic64_fetch_inc +#endif + +#ifndef atomic64_fetch_inc_acquire +static inline s64 +atomic64_fetch_inc_acquire(atomic64_t *v) +{ + return atomic64_fetch_add_acquire(1, v); +} +#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire +#endif + +#ifndef atomic64_fetch_inc_release +static inline s64 +atomic64_fetch_inc_release(atomic64_t *v) +{ + return atomic64_fetch_add_release(1, v); +} +#define atomic64_fetch_inc_release atomic64_fetch_inc_release +#endif + +#ifndef atomic64_fetch_inc_relaxed +static inline s64 +atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + return atomic64_fetch_add_relaxed(1, v); +} +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed +#endif + +#else /* atomic64_fetch_inc_relaxed */ + +#ifndef atomic64_fetch_inc_acquire +static inline s64 +atomic64_fetch_inc_acquire(atomic64_t *v) +{ + s64 ret = atomic64_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire +#endif + +#ifndef atomic64_fetch_inc_release +static inline s64 +atomic64_fetch_inc_release(atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_inc_relaxed(v); +} +#define atomic64_fetch_inc_release atomic64_fetch_inc_release +#endif + +#ifndef atomic64_fetch_inc +static inline s64 +atomic64_fetch_inc(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_inc atomic64_fetch_inc +#endif + +#endif /* atomic64_fetch_inc_relaxed */ + +#ifndef atomic64_dec +static inline void +atomic64_dec(atomic64_t *v) +{ + atomic64_sub(1, v); +} +#define atomic64_dec atomic64_dec +#endif + +#ifndef atomic64_dec_return_relaxed +#ifdef atomic64_dec_return +#define atomic64_dec_return_acquire atomic64_dec_return +#define atomic64_dec_return_release atomic64_dec_return +#define atomic64_dec_return_relaxed atomic64_dec_return +#endif /* atomic64_dec_return */ + +#ifndef atomic64_dec_return +static inline s64 +atomic64_dec_return(atomic64_t *v) +{ + return atomic64_sub_return(1, v); +} +#define atomic64_dec_return atomic64_dec_return +#endif + +#ifndef atomic64_dec_return_acquire +static inline s64 +atomic64_dec_return_acquire(atomic64_t *v) +{ + return atomic64_sub_return_acquire(1, v); +} +#define atomic64_dec_return_acquire atomic64_dec_return_acquire +#endif + +#ifndef atomic64_dec_return_release +static inline s64 +atomic64_dec_return_release(atomic64_t *v) +{ + return atomic64_sub_return_release(1, v); +} +#define atomic64_dec_return_release atomic64_dec_return_release +#endif + +#ifndef atomic64_dec_return_relaxed +static inline s64 +atomic64_dec_return_relaxed(atomic64_t *v) +{ + return atomic64_sub_return_relaxed(1, v); +} +#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed +#endif + +#else /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_dec_return_acquire +static inline s64 +atomic64_dec_return_acquire(atomic64_t *v) +{ + s64 ret = atomic64_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_dec_return_acquire atomic64_dec_return_acquire +#endif + +#ifndef atomic64_dec_return_release +static inline s64 +atomic64_dec_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_dec_return_relaxed(v); +} +#define atomic64_dec_return_release atomic64_dec_return_release +#endif + +#ifndef atomic64_dec_return +static inline s64 +atomic64_dec_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_dec_return atomic64_dec_return +#endif + +#endif /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_fetch_dec_relaxed +#ifdef atomic64_fetch_dec +#define atomic64_fetch_dec_acquire atomic64_fetch_dec +#define atomic64_fetch_dec_release atomic64_fetch_dec +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec +#endif /* atomic64_fetch_dec */ + +#ifndef atomic64_fetch_dec +static inline s64 +atomic64_fetch_dec(atomic64_t *v) +{ + return atomic64_fetch_sub(1, v); +} +#define atomic64_fetch_dec atomic64_fetch_dec +#endif + +#ifndef atomic64_fetch_dec_acquire +static inline s64 +atomic64_fetch_dec_acquire(atomic64_t *v) +{ + return atomic64_fetch_sub_acquire(1, v); +} +#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire +#endif + +#ifndef atomic64_fetch_dec_release +static inline s64 +atomic64_fetch_dec_release(atomic64_t *v) +{ + return atomic64_fetch_sub_release(1, v); +} +#define atomic64_fetch_dec_release atomic64_fetch_dec_release +#endif + +#ifndef atomic64_fetch_dec_relaxed +static inline s64 +atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + return atomic64_fetch_sub_relaxed(1, v); +} +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed +#endif + +#else /* atomic64_fetch_dec_relaxed */ + +#ifndef atomic64_fetch_dec_acquire +static inline s64 +atomic64_fetch_dec_acquire(atomic64_t *v) +{ + s64 ret = atomic64_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire +#endif + +#ifndef atomic64_fetch_dec_release +static inline s64 +atomic64_fetch_dec_release(atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_dec_relaxed(v); +} +#define atomic64_fetch_dec_release atomic64_fetch_dec_release +#endif + +#ifndef atomic64_fetch_dec +static inline s64 +atomic64_fetch_dec(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_dec atomic64_fetch_dec +#endif + +#endif /* atomic64_fetch_dec_relaxed */ + +#ifndef atomic64_fetch_and_relaxed +#define atomic64_fetch_and_acquire atomic64_fetch_and +#define atomic64_fetch_and_release atomic64_fetch_and +#define atomic64_fetch_and_relaxed atomic64_fetch_and +#else /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_fetch_and_acquire +static inline s64 +atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire +#endif + +#ifndef atomic64_fetch_and_release +static inline s64 +atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_and_relaxed(i, v); +} +#define atomic64_fetch_and_release atomic64_fetch_and_release +#endif + +#ifndef atomic64_fetch_and +static inline s64 +atomic64_fetch_and(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_and atomic64_fetch_and +#endif + +#endif /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_andnot +static inline void +atomic64_andnot(s64 i, atomic64_t *v) +{ + atomic64_and(~i, v); +} +#define atomic64_andnot atomic64_andnot +#endif + +#ifndef atomic64_fetch_andnot_relaxed +#ifdef atomic64_fetch_andnot +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot +#define atomic64_fetch_andnot_release atomic64_fetch_andnot +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot +#endif /* atomic64_fetch_andnot */ + +#ifndef atomic64_fetch_andnot +static inline s64 +atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + return atomic64_fetch_and(~i, v); +} +#define atomic64_fetch_andnot atomic64_fetch_andnot +#endif + +#ifndef atomic64_fetch_andnot_acquire +static inline s64 +atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + return atomic64_fetch_and_acquire(~i, v); +} +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire +#endif + +#ifndef atomic64_fetch_andnot_release +static inline s64 +atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + return atomic64_fetch_and_release(~i, v); +} +#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release +#endif + +#ifndef atomic64_fetch_andnot_relaxed +static inline s64 +atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + return atomic64_fetch_and_relaxed(~i, v); +} +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed +#endif + +#else /* atomic64_fetch_andnot_relaxed */ + +#ifndef atomic64_fetch_andnot_acquire +static inline s64 +atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire +#endif + +#ifndef atomic64_fetch_andnot_release +static inline s64 +atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_andnot_relaxed(i, v); +} +#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release +#endif + +#ifndef atomic64_fetch_andnot +static inline s64 +atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_andnot atomic64_fetch_andnot +#endif + +#endif /* atomic64_fetch_andnot_relaxed */ + +#ifndef atomic64_fetch_or_relaxed +#define atomic64_fetch_or_acquire atomic64_fetch_or +#define atomic64_fetch_or_release atomic64_fetch_or +#define atomic64_fetch_or_relaxed atomic64_fetch_or +#else /* atomic64_fetch_or_relaxed */ + +#ifndef atomic64_fetch_or_acquire +static inline s64 +atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire +#endif + +#ifndef atomic64_fetch_or_release +static inline s64 +atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_or_relaxed(i, v); +} +#define atomic64_fetch_or_release atomic64_fetch_or_release +#endif + +#ifndef atomic64_fetch_or +static inline s64 +atomic64_fetch_or(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_or atomic64_fetch_or +#endif + +#endif /* atomic64_fetch_or_relaxed */ + +#ifndef atomic64_fetch_xor_relaxed +#define atomic64_fetch_xor_acquire atomic64_fetch_xor +#define atomic64_fetch_xor_release atomic64_fetch_xor +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor +#else /* atomic64_fetch_xor_relaxed */ + +#ifndef atomic64_fetch_xor_acquire +static inline s64 +atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire +#endif + +#ifndef atomic64_fetch_xor_release +static inline s64 +atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_xor_relaxed(i, v); +} +#define atomic64_fetch_xor_release atomic64_fetch_xor_release +#endif + +#ifndef atomic64_fetch_xor +static inline s64 +atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_xor atomic64_fetch_xor +#endif + +#endif /* atomic64_fetch_xor_relaxed */ + +#ifndef atomic64_xchg_relaxed +#define atomic64_xchg_acquire atomic64_xchg +#define atomic64_xchg_release atomic64_xchg +#define atomic64_xchg_relaxed atomic64_xchg +#else /* atomic64_xchg_relaxed */ + +#ifndef atomic64_xchg_acquire +static inline s64 +atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + s64 ret = atomic64_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_xchg_acquire atomic64_xchg_acquire +#endif + +#ifndef atomic64_xchg_release +static inline s64 +atomic64_xchg_release(atomic64_t *v, s64 i) +{ + __atomic_release_fence(); + return atomic64_xchg_relaxed(v, i); +} +#define atomic64_xchg_release atomic64_xchg_release +#endif + +#ifndef atomic64_xchg +static inline s64 +atomic64_xchg(atomic64_t *v, s64 i) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_xchg atomic64_xchg +#endif + +#endif /* atomic64_xchg_relaxed */ + +#ifndef atomic64_cmpxchg_relaxed +#define atomic64_cmpxchg_acquire atomic64_cmpxchg +#define atomic64_cmpxchg_release atomic64_cmpxchg +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg +#else /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_cmpxchg_acquire +static inline s64 +atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + s64 ret = atomic64_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire +#endif + +#ifndef atomic64_cmpxchg_release +static inline s64 +atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + __atomic_release_fence(); + return atomic64_cmpxchg_relaxed(v, old, new); +} +#define atomic64_cmpxchg_release atomic64_cmpxchg_release +#endif + +#ifndef atomic64_cmpxchg +static inline s64 +atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_cmpxchg atomic64_cmpxchg +#endif + +#endif /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_try_cmpxchg_relaxed +#ifdef atomic64_try_cmpxchg +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg +#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg +#endif /* atomic64_try_cmpxchg */ + +#ifndef atomic64_try_cmpxchg +static inline bool +atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic64_try_cmpxchg atomic64_try_cmpxchg +#endif + +#ifndef atomic64_try_cmpxchg_acquire +static inline bool +atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +#endif + +#ifndef atomic64_try_cmpxchg_release +static inline bool +atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +#endif + +#ifndef atomic64_try_cmpxchg_relaxed +static inline bool +atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed +#endif + +#else /* atomic64_try_cmpxchg_relaxed */ + +#ifndef atomic64_try_cmpxchg_acquire +static inline bool +atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +#endif + +#ifndef atomic64_try_cmpxchg_release +static inline bool +atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + __atomic_release_fence(); + return atomic64_try_cmpxchg_relaxed(v, old, new); +} +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +#endif + +#ifndef atomic64_try_cmpxchg +static inline bool +atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_try_cmpxchg atomic64_try_cmpxchg +#endif + +#endif /* atomic64_try_cmpxchg_relaxed */ + +#ifndef atomic64_sub_and_test +/** + * atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static inline bool +atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + return atomic64_sub_return(i, v) == 0; +} +#define atomic64_sub_and_test atomic64_sub_and_test +#endif + +#ifndef atomic64_dec_and_test +/** + * atomic64_dec_and_test - decrement and test + * @v: pointer of type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static inline bool +atomic64_dec_and_test(atomic64_t *v) +{ + return atomic64_dec_return(v) == 0; +} +#define atomic64_dec_and_test atomic64_dec_and_test +#endif + +#ifndef atomic64_inc_and_test +/** + * atomic64_inc_and_test - increment and test + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline bool +atomic64_inc_and_test(atomic64_t *v) +{ + return atomic64_inc_return(v) == 0; +} +#define atomic64_inc_and_test atomic64_inc_and_test +#endif + +#ifndef atomic64_add_negative +/** + * atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static inline bool +atomic64_add_negative(s64 i, atomic64_t *v) +{ + return atomic64_add_return(i, v) < 0; +} +#define atomic64_add_negative atomic64_add_negative +#endif + +#ifndef atomic64_fetch_add_unless +/** + * atomic64_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static inline s64 +atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + s64 c = atomic64_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic64_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define atomic64_fetch_add_unless atomic64_fetch_add_unless +#endif + +#ifndef atomic64_add_unless +/** + * atomic64_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static inline bool +atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + return atomic64_fetch_add_unless(v, a, u) != u; +} +#define atomic64_add_unless atomic64_add_unless +#endif + +#ifndef atomic64_inc_not_zero +/** + * atomic64_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static inline bool +atomic64_inc_not_zero(atomic64_t *v) +{ + return atomic64_add_unless(v, 1, 0); +} +#define atomic64_inc_not_zero atomic64_inc_not_zero +#endif + +#ifndef atomic64_inc_unless_negative +static inline bool +atomic64_inc_unless_negative(atomic64_t *v) +{ + s64 c = atomic64_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define atomic64_inc_unless_negative atomic64_inc_unless_negative +#endif + +#ifndef atomic64_dec_unless_positive +static inline bool +atomic64_dec_unless_positive(atomic64_t *v) +{ + s64 c = atomic64_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define atomic64_dec_unless_positive atomic64_dec_unless_positive +#endif + +#ifndef atomic64_dec_if_positive +static inline s64 +atomic64_dec_if_positive(atomic64_t *v) +{ + s64 dec, c = atomic64_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!atomic64_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define atomic64_dec_if_positive atomic64_dec_if_positive +#endif + +#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) + +#endif /* _LINUX_ATOMIC_FALLBACK_H */ +// 25de4a2804d70f57e994fe3b419148658bb5378a diff --git a/include/linux/atomic.h b/include/linux/atomic.h new file mode 100644 index 0000000..4c0d009 --- /dev/null +++ b/include/linux/atomic.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Atomic operations usable in machine independent code */ +#ifndef _LINUX_ATOMIC_H +#define _LINUX_ATOMIC_H +#include + +#include +#include + +/* + * Relaxed variants of xchg, cmpxchg and some atomic operations. + * + * We support four variants: + * + * - Fully ordered: The default implementation, no suffix required. + * - Acquire: Provides ACQUIRE semantics, _acquire suffix. + * - Release: Provides RELEASE semantics, _release suffix. + * - Relaxed: No ordering guarantees, _relaxed suffix. + * + * For compound atomics performing both a load and a store, ACQUIRE + * semantics apply only to the load and RELEASE semantics only to the + * store portion of the operation. Note that a failed cmpxchg_acquire + * does -not- imply any memory ordering constraints. + * + * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. + */ + +/* + * The idea here is to build acquire/release variants by adding explicit + * barriers on top of the relaxed variant. In the case where the relaxed + * variant is already fully ordered, no additional barriers are needed. + * + * If an architecture overrides __atomic_acquire_fence() it will probably + * want to define smp_mb__after_spinlock(). + */ +#ifndef __atomic_acquire_fence +#define __atomic_acquire_fence smp_mb__after_atomic +#endif + +#ifndef __atomic_release_fence +#define __atomic_release_fence smp_mb__before_atomic +#endif + +#ifndef __atomic_pre_full_fence +#define __atomic_pre_full_fence smp_mb__before_atomic +#endif + +#ifndef __atomic_post_full_fence +#define __atomic_post_full_fence smp_mb__after_atomic +#endif + +#define __atomic_op_acquire(op, args...) \ +({ \ + typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ + __atomic_acquire_fence(); \ + __ret; \ +}) + +#define __atomic_op_release(op, args...) \ +({ \ + __atomic_release_fence(); \ + op##_relaxed(args); \ +}) + +#define __atomic_op_fence(op, args...) \ +({ \ + typeof(op##_relaxed(args)) __ret; \ + __atomic_pre_full_fence(); \ + __ret = op##_relaxed(args); \ + __atomic_post_full_fence(); \ + __ret; \ +}) + +#include + +#include + +#endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h new file mode 100644 index 0000000..d12bb21 --- /dev/null +++ b/include/linux/attribute_container.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * attribute_container.h - a generic container for all classes + * + * Copyright (c) 2005 - James Bottomley + */ + +#ifndef _ATTRIBUTE_CONTAINER_H_ +#define _ATTRIBUTE_CONTAINER_H_ + +#include +#include + +struct device; + +struct attribute_container { + struct list_head node; + struct klist containers; + struct class *class; + const struct attribute_group *grp; + struct device_attribute **attrs; + int (*match)(struct attribute_container *, struct device *); +#define ATTRIBUTE_CONTAINER_NO_CLASSDEVS 0x01 + unsigned long flags; +}; + +static inline int +attribute_container_no_classdevs(struct attribute_container *atc) +{ + return atc->flags & ATTRIBUTE_CONTAINER_NO_CLASSDEVS; +} + +static inline void +attribute_container_set_no_classdevs(struct attribute_container *atc) +{ + atc->flags |= ATTRIBUTE_CONTAINER_NO_CLASSDEVS; +} + +int attribute_container_register(struct attribute_container *cont); +int __must_check attribute_container_unregister(struct attribute_container *cont); +void attribute_container_create_device(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *)); +void attribute_container_add_device(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *)); +void attribute_container_remove_device(struct device *dev, + void (*fn)(struct attribute_container *, + struct device *, + struct device *)); +void attribute_container_device_trigger(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *)); +void attribute_container_trigger(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *)); +int attribute_container_add_attrs(struct device *classdev); +int attribute_container_add_class_device(struct device *classdev); +int attribute_container_add_class_device_adapter(struct attribute_container *cont, + struct device *dev, + struct device *classdev); +void attribute_container_remove_attrs(struct device *classdev); +void attribute_container_class_device_del(struct device *classdev); +struct attribute_container *attribute_container_classdev_to_container(struct device *); +struct device *attribute_container_find_class_device(struct attribute_container *, struct device *); +struct device_attribute **attribute_container_classdev_to_attrs(const struct device *classdev); + +#endif diff --git a/include/linux/audit.h b/include/linux/audit.h new file mode 100644 index 0000000..aee3dc9 --- /dev/null +++ b/include/linux/audit.h @@ -0,0 +1,662 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* audit.h -- Auditing support + * + * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. + * All Rights Reserved. + * + * Written by Rickard E. (Rik) Faith + */ +#ifndef _LINUX_AUDIT_H_ +#define _LINUX_AUDIT_H_ + +#include +#include +#include + +#define AUDIT_INO_UNSET ((unsigned long)-1) +#define AUDIT_DEV_UNSET ((dev_t)-1) + +struct audit_sig_info { + uid_t uid; + pid_t pid; + char ctx[0]; +}; + +struct audit_buffer; +struct audit_context; +struct inode; +struct netlink_skb_parms; +struct path; +struct linux_binprm; +struct mq_attr; +struct mqstat; +struct audit_watch; +struct audit_tree; +struct sk_buff; + +struct audit_krule { + u32 pflags; + u32 flags; + u32 listnr; + u32 action; + u32 mask[AUDIT_BITMASK_SIZE]; + u32 buflen; /* for data alloc on list rules */ + u32 field_count; + char *filterkey; /* ties events to rules */ + struct audit_field *fields; + struct audit_field *arch_f; /* quick access to arch field */ + struct audit_field *inode_f; /* quick access to an inode field */ + struct audit_watch *watch; /* associated watch */ + struct audit_tree *tree; /* associated watched tree */ + struct audit_fsnotify_mark *exe; + struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ + struct list_head list; /* for AUDIT_LIST* purposes only */ + u64 prio; +}; + +/* Flag to indicate legacy AUDIT_LOGINUID unset usage */ +#define AUDIT_LOGINUID_LEGACY 0x1 + +struct audit_field { + u32 type; + union { + u32 val; + kuid_t uid; + kgid_t gid; + struct { + char *lsm_str; + void *lsm_rule; + }; + }; + u32 op; +}; + +enum audit_ntp_type { + AUDIT_NTP_OFFSET, + AUDIT_NTP_FREQ, + AUDIT_NTP_STATUS, + AUDIT_NTP_TAI, + AUDIT_NTP_TICK, + AUDIT_NTP_ADJUST, + + AUDIT_NTP_NVALS /* count */ +}; + +#ifdef CONFIG_AUDITSYSCALL +struct audit_ntp_val { + long long oldval, newval; +}; + +struct audit_ntp_data { + struct audit_ntp_val vals[AUDIT_NTP_NVALS]; +}; +#else +struct audit_ntp_data {}; +#endif + +extern int is_audit_feature_set(int which); + +extern int __init audit_register_class(int class, unsigned *list); +extern int audit_classify_syscall(int abi, unsigned syscall); +extern int audit_classify_arch(int arch); +/* only for compat system calls */ +extern unsigned compat_write_class[]; +extern unsigned compat_read_class[]; +extern unsigned compat_dir_class[]; +extern unsigned compat_chattr_class[]; +extern unsigned compat_signal_class[]; + +extern int audit_classify_compat_syscall(int abi, unsigned syscall); + +/* audit_names->type values */ +#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */ +#define AUDIT_TYPE_NORMAL 1 /* a "normal" audit record */ +#define AUDIT_TYPE_PARENT 2 /* a parent audit record */ +#define AUDIT_TYPE_CHILD_DELETE 3 /* a child being deleted */ +#define AUDIT_TYPE_CHILD_CREATE 4 /* a child being created */ + +/* maximized args number that audit_socketcall can process */ +#define AUDITSC_ARGS 6 + +/* bit values for ->signal->audit_tty */ +#define AUDIT_TTY_ENABLE BIT(0) +#define AUDIT_TTY_LOG_PASSWD BIT(1) + +struct filename; + +#define AUDIT_OFF 0 +#define AUDIT_ON 1 +#define AUDIT_LOCKED 2 +#ifdef CONFIG_AUDIT +/* These are defined in audit.c */ + /* Public API */ +extern __printf(4, 5) +void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, + const char *fmt, ...); + +extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); +extern __printf(2, 3) +void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); +extern void audit_log_end(struct audit_buffer *ab); +extern bool audit_string_contains_control(const char *string, + size_t len); +extern void audit_log_n_hex(struct audit_buffer *ab, + const unsigned char *buf, + size_t len); +extern void audit_log_n_string(struct audit_buffer *ab, + const char *buf, + size_t n); +extern void audit_log_n_untrustedstring(struct audit_buffer *ab, + const char *string, + size_t n); +extern void audit_log_untrustedstring(struct audit_buffer *ab, + const char *string); +extern void audit_log_d_path(struct audit_buffer *ab, + const char *prefix, + const struct path *path); +extern void audit_log_key(struct audit_buffer *ab, + char *key); +extern void audit_log_link_denied(const char *operation); +extern void audit_log_lost(const char *message); + +extern int audit_log_task_context(struct audit_buffer *ab); +extern void audit_log_task_info(struct audit_buffer *ab); + +extern int audit_update_lsm_rules(void); + + /* Private API (for audit.c only) */ +extern int audit_rule_change(int type, int seq, void *data, size_t datasz); +extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); + +extern int audit_set_loginuid(kuid_t loginuid); + +static inline kuid_t audit_get_loginuid(struct task_struct *tsk) +{ + return tsk->loginuid; +} + +static inline unsigned int audit_get_sessionid(struct task_struct *tsk) +{ + return tsk->sessionid; +} + +extern u32 audit_enabled; + +extern int audit_signal_info(int sig, struct task_struct *t); + +#else /* CONFIG_AUDIT */ +static inline __printf(4, 5) +void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, + const char *fmt, ...) +{ } +static inline struct audit_buffer *audit_log_start(struct audit_context *ctx, + gfp_t gfp_mask, int type) +{ + return NULL; +} +static inline __printf(2, 3) +void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) +{ } +static inline void audit_log_end(struct audit_buffer *ab) +{ } +static inline void audit_log_n_hex(struct audit_buffer *ab, + const unsigned char *buf, size_t len) +{ } +static inline void audit_log_n_string(struct audit_buffer *ab, + const char *buf, size_t n) +{ } +static inline void audit_log_n_untrustedstring(struct audit_buffer *ab, + const char *string, size_t n) +{ } +static inline void audit_log_untrustedstring(struct audit_buffer *ab, + const char *string) +{ } +static inline void audit_log_d_path(struct audit_buffer *ab, + const char *prefix, + const struct path *path) +{ } +static inline void audit_log_key(struct audit_buffer *ab, char *key) +{ } +static inline void audit_log_link_denied(const char *string) +{ } +static inline int audit_log_task_context(struct audit_buffer *ab) +{ + return 0; +} +static inline void audit_log_task_info(struct audit_buffer *ab) +{ } + +static inline kuid_t audit_get_loginuid(struct task_struct *tsk) +{ + return INVALID_UID; +} + +static inline unsigned int audit_get_sessionid(struct task_struct *tsk) +{ + return AUDIT_SID_UNSET; +} + +#define audit_enabled AUDIT_OFF + +static inline int audit_signal_info(int sig, struct task_struct *t) +{ + return 0; +} + +#endif /* CONFIG_AUDIT */ + +#ifdef CONFIG_AUDIT_COMPAT_GENERIC +#define audit_is_compat(arch) (!((arch) & __AUDIT_ARCH_64BIT)) +#else +#define audit_is_compat(arch) false +#endif + +#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ +#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ +#define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */ + +#ifdef CONFIG_AUDITSYSCALL +#include /* for syscall_get_arch() */ + +/* These are defined in auditsc.c */ + /* Public API */ +extern int audit_alloc(struct task_struct *task); +extern void __audit_free(struct task_struct *task); +extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3); +extern void __audit_syscall_exit(int ret_success, long ret_value); +extern struct filename *__audit_reusename(const __user char *uptr); +extern void __audit_getname(struct filename *name); + +extern void __audit_inode(struct filename *name, const struct dentry *dentry, + unsigned int flags); +extern void __audit_file(const struct file *); +extern void __audit_inode_child(struct inode *parent, + const struct dentry *dentry, + const unsigned char type); +extern void audit_seccomp(unsigned long syscall, long signr, int code); +extern void audit_seccomp_actions_logged(const char *names, + const char *old_names, int res); +extern void __audit_ptrace(struct task_struct *t); + +static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) +{ + task->audit_context = ctx; +} + +static inline struct audit_context *audit_context(void) +{ + return current->audit_context; +} + +static inline bool audit_dummy_context(void) +{ + void *p = audit_context(); + return !p || *(int *)p; +} +static inline void audit_free(struct task_struct *task) +{ + if (unlikely(task->audit_context)) + __audit_free(task); +} +static inline void audit_syscall_entry(int major, unsigned long a0, + unsigned long a1, unsigned long a2, + unsigned long a3) +{ + if (unlikely(audit_context())) + __audit_syscall_entry(major, a0, a1, a2, a3); +} +static inline void audit_syscall_exit(void *pt_regs) +{ + if (unlikely(audit_context())) { + int success = is_syscall_success(pt_regs); + long return_code = regs_return_value(pt_regs); + + __audit_syscall_exit(success, return_code); + } +} +static inline struct filename *audit_reusename(const __user char *name) +{ + if (unlikely(!audit_dummy_context())) + return __audit_reusename(name); + return NULL; +} +static inline void audit_getname(struct filename *name) +{ + if (unlikely(!audit_dummy_context())) + __audit_getname(name); +} +static inline void audit_inode(struct filename *name, + const struct dentry *dentry, + unsigned int aflags) { + if (unlikely(!audit_dummy_context())) + __audit_inode(name, dentry, aflags); +} +static inline void audit_file(struct file *file) +{ + if (unlikely(!audit_dummy_context())) + __audit_file(file); +} +static inline void audit_inode_parent_hidden(struct filename *name, + const struct dentry *dentry) +{ + if (unlikely(!audit_dummy_context())) + __audit_inode(name, dentry, + AUDIT_INODE_PARENT | AUDIT_INODE_HIDDEN); +} +static inline void audit_inode_child(struct inode *parent, + const struct dentry *dentry, + const unsigned char type) { + if (unlikely(!audit_dummy_context())) + __audit_inode_child(parent, dentry, type); +} +void audit_core_dumps(long signr); + +static inline void audit_ptrace(struct task_struct *t) +{ + if (unlikely(!audit_dummy_context())) + __audit_ptrace(t); +} + + /* Private API (for audit.c only) */ +extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); +extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); +extern void __audit_bprm(struct linux_binprm *bprm); +extern int __audit_socketcall(int nargs, unsigned long *args); +extern int __audit_sockaddr(int len, void *addr); +extern void __audit_fd_pair(int fd1, int fd2); +extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr); +extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout); +extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); +extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); +extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old); +extern void __audit_log_capset(const struct cred *new, const struct cred *old); +extern void __audit_mmap_fd(int fd, int flags); +extern void __audit_log_kern_module(char *name); +extern void __audit_fanotify(unsigned int response); +extern void __audit_tk_injoffset(struct timespec64 offset); +extern void __audit_ntp_log(const struct audit_ntp_data *ad); + +static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) +{ + if (unlikely(!audit_dummy_context())) + __audit_ipc_obj(ipcp); +} +static inline void audit_fd_pair(int fd1, int fd2) +{ + if (unlikely(!audit_dummy_context())) + __audit_fd_pair(fd1, fd2); +} +static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) +{ + if (unlikely(!audit_dummy_context())) + __audit_ipc_set_perm(qbytes, uid, gid, mode); +} +static inline void audit_bprm(struct linux_binprm *bprm) +{ + if (unlikely(!audit_dummy_context())) + __audit_bprm(bprm); +} +static inline int audit_socketcall(int nargs, unsigned long *args) +{ + if (unlikely(!audit_dummy_context())) + return __audit_socketcall(nargs, args); + return 0; +} + +static inline int audit_socketcall_compat(int nargs, u32 *args) +{ + unsigned long a[AUDITSC_ARGS]; + int i; + + if (audit_dummy_context()) + return 0; + + for (i = 0; i < nargs; i++) + a[i] = (unsigned long)args[i]; + return __audit_socketcall(nargs, a); +} + +static inline int audit_sockaddr(int len, void *addr) +{ + if (unlikely(!audit_dummy_context())) + return __audit_sockaddr(len, addr); + return 0; +} +static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) +{ + if (unlikely(!audit_dummy_context())) + __audit_mq_open(oflag, mode, attr); +} +static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout) +{ + if (unlikely(!audit_dummy_context())) + __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); +} +static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) +{ + if (unlikely(!audit_dummy_context())) + __audit_mq_notify(mqdes, notification); +} +static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) +{ + if (unlikely(!audit_dummy_context())) + __audit_mq_getsetattr(mqdes, mqstat); +} + +static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old) +{ + if (unlikely(!audit_dummy_context())) + return __audit_log_bprm_fcaps(bprm, new, old); + return 0; +} + +static inline void audit_log_capset(const struct cred *new, + const struct cred *old) +{ + if (unlikely(!audit_dummy_context())) + __audit_log_capset(new, old); +} + +static inline void audit_mmap_fd(int fd, int flags) +{ + if (unlikely(!audit_dummy_context())) + __audit_mmap_fd(fd, flags); +} + +static inline void audit_log_kern_module(char *name) +{ + if (!audit_dummy_context()) + __audit_log_kern_module(name); +} + +static inline void audit_fanotify(unsigned int response) +{ + if (!audit_dummy_context()) + __audit_fanotify(response); +} + +static inline void audit_tk_injoffset(struct timespec64 offset) +{ + /* ignore no-op events */ + if (offset.tv_sec == 0 && offset.tv_nsec == 0) + return; + + if (!audit_dummy_context()) + __audit_tk_injoffset(offset); +} + +static inline void audit_ntp_init(struct audit_ntp_data *ad) +{ + memset(ad, 0, sizeof(*ad)); +} + +static inline void audit_ntp_set_old(struct audit_ntp_data *ad, + enum audit_ntp_type type, long long val) +{ + ad->vals[type].oldval = val; +} + +static inline void audit_ntp_set_new(struct audit_ntp_data *ad, + enum audit_ntp_type type, long long val) +{ + ad->vals[type].newval = val; +} + +static inline void audit_ntp_log(const struct audit_ntp_data *ad) +{ + if (!audit_dummy_context()) + __audit_ntp_log(ad); +} + +extern int audit_n_rules; +extern int audit_signals; +#else /* CONFIG_AUDITSYSCALL */ +static inline int audit_alloc(struct task_struct *task) +{ + return 0; +} +static inline void audit_free(struct task_struct *task) +{ } +static inline void audit_syscall_entry(int major, unsigned long a0, + unsigned long a1, unsigned long a2, + unsigned long a3) +{ } +static inline void audit_syscall_exit(void *pt_regs) +{ } +static inline bool audit_dummy_context(void) +{ + return true; +} +static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) +{ } +static inline struct audit_context *audit_context(void) +{ + return NULL; +} +static inline struct filename *audit_reusename(const __user char *name) +{ + return NULL; +} +static inline void audit_getname(struct filename *name) +{ } +static inline void __audit_inode(struct filename *name, + const struct dentry *dentry, + unsigned int flags) +{ } +static inline void __audit_inode_child(struct inode *parent, + const struct dentry *dentry, + const unsigned char type) +{ } +static inline void audit_inode(struct filename *name, + const struct dentry *dentry, + unsigned int aflags) +{ } +static inline void audit_file(struct file *file) +{ +} +static inline void audit_inode_parent_hidden(struct filename *name, + const struct dentry *dentry) +{ } +static inline void audit_inode_child(struct inode *parent, + const struct dentry *dentry, + const unsigned char type) +{ } +static inline void audit_core_dumps(long signr) +{ } +static inline void audit_seccomp(unsigned long syscall, long signr, int code) +{ } +static inline void audit_seccomp_actions_logged(const char *names, + const char *old_names, int res) +{ } +static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) +{ } +static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, + gid_t gid, umode_t mode) +{ } +static inline void audit_bprm(struct linux_binprm *bprm) +{ } +static inline int audit_socketcall(int nargs, unsigned long *args) +{ + return 0; +} + +static inline int audit_socketcall_compat(int nargs, u32 *args) +{ + return 0; +} + +static inline void audit_fd_pair(int fd1, int fd2) +{ } +static inline int audit_sockaddr(int len, void *addr) +{ + return 0; +} +static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) +{ } +static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, + unsigned int msg_prio, + const struct timespec64 *abs_timeout) +{ } +static inline void audit_mq_notify(mqd_t mqdes, + const struct sigevent *notification) +{ } +static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) +{ } +static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old) +{ + return 0; +} +static inline void audit_log_capset(const struct cred *new, + const struct cred *old) +{ } +static inline void audit_mmap_fd(int fd, int flags) +{ } + +static inline void audit_log_kern_module(char *name) +{ +} + +static inline void audit_fanotify(unsigned int response) +{ } + +static inline void audit_tk_injoffset(struct timespec64 offset) +{ } + +static inline void audit_ntp_init(struct audit_ntp_data *ad) +{ } + +static inline void audit_ntp_set_old(struct audit_ntp_data *ad, + enum audit_ntp_type type, long long val) +{ } + +static inline void audit_ntp_set_new(struct audit_ntp_data *ad, + enum audit_ntp_type type, long long val) +{ } + +static inline void audit_ntp_log(const struct audit_ntp_data *ad) +{ } + +static inline void audit_ptrace(struct task_struct *t) +{ } +#define audit_n_rules 0 +#define audit_signals 0 +#endif /* CONFIG_AUDITSYSCALL */ + +static inline bool audit_loginuid_set(struct task_struct *tsk) +{ + return uid_valid(audit_get_loginuid(tsk)); +} + +static inline void audit_log_string(struct audit_buffer *ab, const char *buf) +{ + audit_log_n_string(ab, buf, strlen(buf)); +} + +#endif diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h new file mode 100644 index 0000000..6e1ca6f --- /dev/null +++ b/include/linux/auto_dev-ioctl.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2008 Red Hat, Inc. All rights reserved. + * Copyright 2008 Ian Kent + */ + +#ifndef _LINUX_AUTO_DEV_IOCTL_H +#define _LINUX_AUTO_DEV_IOCTL_H + +#include +#endif /* _LINUX_AUTO_DEV_IOCTL_H */ diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h new file mode 100644 index 0000000..893f952 --- /dev/null +++ b/include/linux/auto_fs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 1997 Transmeta Corporation - All Rights Reserved + */ + +#ifndef _LINUX_AUTO_FS_H +#define _LINUX_AUTO_FS_H + +#include +#include +#include +#endif /* _LINUX_AUTO_FS_H */ diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h new file mode 100644 index 0000000..f68d0ec --- /dev/null +++ b/include/linux/auxvec.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_AUXVEC_H +#define _LINUX_AUXVEC_H + +#include + +#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */ + /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ +#endif /* _LINUX_AUXVEC_H */ diff --git a/include/linux/average.h b/include/linux/average.h new file mode 100644 index 0000000..a1a8f09 --- /dev/null +++ b/include/linux/average.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_AVERAGE_H +#define _LINUX_AVERAGE_H + +#include +#include +#include + +/* + * Exponentially weighted moving average (EWMA) + * + * This implements a fixed-precision EWMA algorithm, with both the + * precision and fall-off coefficient determined at compile-time + * and built into the generated helper funtions. + * + * The first argument to the macro is the name that will be used + * for the struct and helper functions. + * + * The second argument, the precision, expresses how many bits are + * used for the fractional part of the fixed-precision values. + * + * The third argument, the weight reciprocal, determines how the + * new values will be weighed vs. the old state, new values will + * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note + * that this parameter must be a power of two for efficiency. + */ + +#define DECLARE_EWMA(name, _precision, _weight_rcp) \ + struct ewma_##name { \ + unsigned long internal; \ + }; \ + static inline void ewma_##name##_init(struct ewma_##name *e) \ + { \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + /* \ + * Even if you want to feed it just 0/1 you should have \ + * some bits for the non-fractional part... \ + */ \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ + e->internal = 0; \ + } \ + static inline unsigned long \ + ewma_##name##_read(struct ewma_##name *e) \ + { \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ + return e->internal >> (_precision); \ + } \ + static inline void ewma_##name##_add(struct ewma_##name *e, \ + unsigned long val) \ + { \ + unsigned long internal = READ_ONCE(e->internal); \ + unsigned long weight_rcp = ilog2(_weight_rcp); \ + unsigned long precision = _precision; \ + \ + BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ + BUILD_BUG_ON((_precision) > 30); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ + \ + WRITE_ONCE(e->internal, internal ? \ + (((internal << weight_rcp) - internal) + \ + (val << precision)) >> weight_rcp : \ + (val << precision)); \ + } + +#endif /* _LINUX_AVERAGE_H */ diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h new file mode 100644 index 0000000..ca956b6 --- /dev/null +++ b/include/linux/avf/virtchnl.h @@ -0,0 +1,838 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _VIRTCHNL_H_ +#define _VIRTCHNL_H_ + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the drivers for all devices starting from our 40G product line + * + * Admin queue buffer usage: + * desc->opcode is always aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * The Firmware copies the cookie fields when sending messages between the + * PF and VF, but uses all other fields internally. Due to this limitation, + * we must send all messages as "indirect", i.e. using an external buffer. + * + * All the VSI indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value + * is of status_code type, defined in the shared type.h. + * + * In general, VF driver initialization should roughly follow the order of + * these opcodes. The VF driver must first validate the API version of the + * PF driver, then request a reset, then get resources, then configure + * queues and interrupts. After these operations are complete, the VF + * driver may start its queues, optionally add MAC and VLAN filters, and + * process traffic. + */ + +/* START GENERIC DEFINES + * Need to ensure the following enums and defines hold the same meaning and + * value in current and future projects + */ + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_STATUS_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, +}; + +/* Backward compatibility */ +#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM +#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED + +#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 +#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 +#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 +#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 +#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 +#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 +#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 +#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 + +enum virtchnl_link_speed { + VIRTCHNL_LINK_SPEED_UNKNOWN = 0, + VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), + VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), + VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), + VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), + VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), + VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), + VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), + VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), +}; + +/* for hsplit_0 field of Rx HMC context */ +/* deprecated with AVF 1.0 */ +enum virtchnl_rx_hsplit { + VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, + VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, + VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, + VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, + VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, +}; + +/* END GENERIC DEFINES */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum virtchnl_ops { +/* The PF sends status change events to VFs using + * the VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + * Use of "advanced opcode" features must be negotiated as part of capabilities + * exchange and are not considered part of base mode feature set. + */ + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_GET_STATS = 15, + VIRTCHNL_OP_RSVD = 16, + VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + VIRTCHNL_OP_SET_RSS_HENA = 26, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, + VIRTCHNL_OP_REQUEST_QUEUES = 29, + VIRTCHNL_OP_ENABLE_CHANNELS = 30, + VIRTCHNL_OP_DISABLE_CHANNELS = 31, + VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, + VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, +}; + +/* These macros are used to generate compilation errors if a structure/union + * is not exactly the correct length. It gives a divide by zero error if the + * structure/union is not of the correct size, otherwise it creates an enum + * that is never used. + */ +#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } +#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + enum virtchnl_status_code v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); + +/* Message descriptions and data structures. */ + +/* VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define VIRTCHNL_VERSION_MAJOR 1 +#define VIRTCHNL_VERSION_MINOR 1 +#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + +struct virtchnl_version_info { + u32 major; + u32 minor; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); + +#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) +#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) + +/* VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV + * vsi_type should always be 6 for backward compatibility. Add other fields + * as needed. + */ +enum virtchnl_vsi_type { + VIRTCHNL_VSI_TYPE_INVALID = 0, + VIRTCHNL_VSI_SRIOV = 6, +}; + +/* VIRTCHNL_OP_GET_VF_RESOURCES + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities + * PF responds with an indirect message containing + * virtchnl_vf_resource and one or more + * virtchnl_vsi_resource structures. + */ + +struct virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum virtchnl_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); + +/* VF capability flags + * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including + * TX/RX Checksum offloading and TSO for non-tunnelled packets. + */ +#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 +#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 +#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 +#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 +#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 + +/* Define below the capability flags that are not offloads */ +#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080 +#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ + VIRTCHNL_VF_OFFLOAD_VLAN | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) + +struct virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_cap_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct virtchnl_vsi_resource vsi_res[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); + +/* VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; /* deprecated with AVF 1.0 */ + u64 dma_ring_addr; + u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); + +/* VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of virtchnl_rxq_info. + * PF configures requested queue and returns a status code. + */ + +/* Rx queue config info */ +struct virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; /* deprecated with AVF 1.0 */ + u32 databuffer_size; + u32 max_pkt_size; + u32 pad1; + u64 dma_ring_addr; + enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ + u32 pad2; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); + +/* VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for all active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + */ +struct virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct virtchnl_txq_info txq; + struct virtchnl_rxq_info rxq; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); + +struct virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + u32 pad; + struct virtchnl_queue_pair_info qpair[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); + +/* VIRTCHNL_OP_REQUEST_QUEUES + * VF sends this message to request the PF to allocate additional queues to + * this VF. Each VF gets a guaranteed number of queues on init but asking for + * additional queues must be negotiated. This is a best effort request as it + * is possible the PF does not have enough queues left to support the request. + * If the PF cannot support the number requested it will respond with the + * maximum number it is able to support. If the request is successful, PF will + * then reset the VF to institute required changes. + */ + +/* VF resource request */ +struct virtchnl_vf_res_request { + u16 num_queue_pairs; +}; + +/* VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. + * PF configures interrupt mapping and returns status. + */ +struct virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); + +struct virtchnl_irq_map_info { + u16 num_vectors; + struct virtchnl_vector_map vecmap[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); + +/* VIRTCHNL_OP_ENABLE_QUEUES + * VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + */ +struct virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); + +/* VIRTCHNL_OP_ADD_ETH_ADDR + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* VIRTCHNL_OP_DEL_ETH_ADDR + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 pad[2]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); + +struct virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct virtchnl_ether_addr list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); + +/* VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); + +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct eth_stats in an external buffer. + */ + +/* VIRTCHNL_OP_CONFIG_RSS_KEY + * VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); + +struct virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); + +/* VIRTCHNL_OP_GET_RSS_HENA_CAPS + * VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + */ +struct virtchnl_rss_hena { + u64 hena; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); + +/* VIRTCHNL_OP_ENABLE_CHANNELS + * VIRTCHNL_OP_DISABLE_CHANNELS + * VF sends these messages to enable or disable channels based on + * the user specified queue count and queue offset for each traffic class. + * This struct encompasses all the information that the PF needs from + * VF to create a channel. + */ +struct virtchnl_channel_info { + u16 count; /* number of queues in a channel */ + u16 offset; /* queues in a channel start from 'offset' */ + u32 pad; + u64 max_tx_rate; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); + +struct virtchnl_tc_info { + u32 num_tc; + u32 pad; + struct virtchnl_channel_info list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info); + +/* VIRTCHNL_ADD_CLOUD_FILTER + * VIRTCHNL_DEL_CLOUD_FILTER + * VF sends these messages to add or delete a cloud filter based on the + * user specified match and action filters. These structures encompass + * all the information that the PF needs from the VF to add/delete a + * cloud filter. + */ + +struct virtchnl_l4_spec { + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + __be16 vlan_id; + __be16 pad; /* reserved for future use */ + __be32 src_ip[4]; + __be32 dst_ip[4]; + __be16 src_port; + __be16 dst_port; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec); + +union virtchnl_flow_spec { + struct virtchnl_l4_spec tcp_spec; + u8 buffer[128]; /* reserved for future use */ +}; + +VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec); + +enum virtchnl_action { + /* action types */ + VIRTCHNL_ACTION_DROP = 0, + VIRTCHNL_ACTION_TC_REDIRECT, +}; + +enum virtchnl_flow_type { + /* flow types */ + VIRTCHNL_TCP_V4_FLOW = 0, + VIRTCHNL_TCP_V6_FLOW, +}; + +struct virtchnl_filter { + union virtchnl_flow_spec data; + union virtchnl_flow_spec mask; + enum virtchnl_flow_type flow_type; + enum virtchnl_action action; + u32 action_meta; + u8 field_flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); + +/* VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; + +#define PF_EVENT_SEVERITY_INFO 0 +#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct virtchnl_pf_event { + enum virtchnl_event_codes event; + union { + /* If the PF driver does not support the new speed reporting + * capabilities then use link_event else use link_event_adv to + * get the speed and link information. The ability to understand + * new speeds is indicated by setting the capability flag + * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter + * in virtchnl_vf_resource struct and can be used to determine + * which link event struct to use below. + */ + struct { + enum virtchnl_link_speed link_speed; + bool link_status; + } link_event; + struct { + /* link_speed provided in Mbps */ + u32 link_speed; + u8 link_status; + } link_event_adv; + } event_data; + + int severity; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); + +/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP + * VF uses this message to request PF to map IWARP vectors to IWARP queues. + * The request for this originates from the VF IWARP driver through + * a client interface between VF LAN and VF IWARP driver. + * A vector could have an AEQ and CEQ attached to it although + * there is a single AEQ per VF IWARP instance in which case + * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. + * There will never be a case where there will be multiple CEQs attached + * to a single vector. + * PF configures interrupt mapping and returns status. + */ + +struct virtchnl_iwarp_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); + +struct virtchnl_iwarp_qvlist_info { + u32 num_vectors; + struct virtchnl_iwarp_qv_info qv_info[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info); + +/* VF reset states - these are written into the RSTAT register: + * VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum virtchnl_vfr_states { + VIRTCHNL_VFR_INPROGRESS = 0, + VIRTCHNL_VFR_COMPLETED, + VIRTCHNL_VFR_VFACTIVE, +}; + +/** + * virtchnl_vc_validate_vf_msg + * @ver: Virtchnl version info + * @v_opcode: Opcode for the message + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * validate msg format against struct for each opcode + */ +static inline int +virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, + u8 *msg, u16 msglen) +{ + bool err_msg_format = false; + int valid_len = 0; + + /* Validate message length. */ + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct virtchnl_version_info); + break; + case VIRTCHNL_OP_RESET_VF: + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + if (VF_IS_V11(ver)) + valid_len = sizeof(u32); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct virtchnl_txq_info); + break; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct virtchnl_rxq_info); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct virtchnl_vsi_queue_config_info); + if (msglen >= valid_len) { + struct virtchnl_vsi_queue_config_info *vqc = + (struct virtchnl_vsi_queue_config_info *)msg; + valid_len += (vqc->num_queue_pairs * + sizeof(struct + virtchnl_queue_pair_info)); + if (vqc->num_queue_pairs == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct virtchnl_irq_map_info); + if (msglen >= valid_len) { + struct virtchnl_irq_map_info *vimi = + (struct virtchnl_irq_map_info *)msg; + valid_len += (vimi->num_vectors * + sizeof(struct virtchnl_vector_map)); + if (vimi->num_vectors == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + case VIRTCHNL_OP_DEL_ETH_ADDR: + valid_len = sizeof(struct virtchnl_ether_addr_list); + if (msglen >= valid_len) { + struct virtchnl_ether_addr_list *veal = + (struct virtchnl_ether_addr_list *)msg; + valid_len += veal->num_elements * + sizeof(struct virtchnl_ether_addr); + if (veal->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct virtchnl_vlan_filter_list); + if (msglen >= valid_len) { + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; + valid_len += vfl->num_elements * sizeof(u16); + if (vfl->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct virtchnl_promisc_info); + break; + case VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_IWARP: + /* These messages are opaque to us and will be validated in + * the RDMA client code. We just need to check for nonzero + * length. The firmware will enforce max length restrictions. + */ + if (msglen) + valid_len = msglen; + else + err_msg_format = true; + break; + case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + break; + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); + if (msglen >= valid_len) { + struct virtchnl_iwarp_qvlist_info *qv = + (struct virtchnl_iwarp_qvlist_info *)msg; + if (qv->num_vectors == 0) { + err_msg_format = true; + break; + } + valid_len += ((qv->num_vectors - 1) * + sizeof(struct virtchnl_iwarp_qv_info)); + } + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + valid_len = sizeof(struct virtchnl_rss_key); + if (msglen >= valid_len) { + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + valid_len += vrk->key_len - 1; + } + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + valid_len = sizeof(struct virtchnl_rss_lut); + if (msglen >= valid_len) { + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; + valid_len += vrl->lut_entries - 1; + } + break; + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: + break; + case VIRTCHNL_OP_SET_RSS_HENA: + valid_len = sizeof(struct virtchnl_rss_hena); + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + valid_len = sizeof(struct virtchnl_vf_res_request); + break; + case VIRTCHNL_OP_ENABLE_CHANNELS: + valid_len = sizeof(struct virtchnl_tc_info); + if (msglen >= valid_len) { + struct virtchnl_tc_info *vti = + (struct virtchnl_tc_info *)msg; + valid_len += (vti->num_tc - 1) * + sizeof(struct virtchnl_channel_info); + if (vti->num_tc == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_DISABLE_CHANNELS: + break; + case VIRTCHNL_OP_ADD_CLOUD_FILTER: + valid_len = sizeof(struct virtchnl_filter); + break; + case VIRTCHNL_OP_DEL_CLOUD_FILTER: + valid_len = sizeof(struct virtchnl_filter); + break; + /* These are always errors coming from the VF. */ + case VIRTCHNL_OP_EVENT: + case VIRTCHNL_OP_UNKNOWN: + default: + return VIRTCHNL_STATUS_ERR_PARAM; + } + /* few more checks */ + if (err_msg_format || valid_len != msglen) + return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; + + return 0; +} +#endif /* _VIRTCHNL_H_ */ diff --git a/include/linux/b1pcmcia.h b/include/linux/b1pcmcia.h new file mode 100644 index 0000000..12a867c --- /dev/null +++ b/include/linux/b1pcmcia.h @@ -0,0 +1,21 @@ +/* $Id: b1pcmcia.h,v 1.1.8.2 2001/09/23 22:25:05 kai Exp $ + * + * Exported functions of module b1pcmcia to be called by + * avm_cs card services module. + * + * Copyright 1999 by Carsten Paeth (calle@calle.in-berlin.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef _B1PCMCIA_H_ +#define _B1PCMCIA_H_ + +int b1pcmcia_addcard_b1(unsigned int port, unsigned irq); +int b1pcmcia_addcard_m1(unsigned int port, unsigned irq); +int b1pcmcia_addcard_m2(unsigned int port, unsigned irq); +int b1pcmcia_delcard(unsigned int port, unsigned irq); + +#endif /* _B1PCMCIA_H_ */ diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h new file mode 100644 index 0000000..2849bdb --- /dev/null +++ b/include/linux/backing-dev-defs.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BACKING_DEV_DEFS_H +#define __LINUX_BACKING_DEV_DEFS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct page; +struct device; +struct dentry; + +/* + * Bits in bdi_writeback.state + */ +enum wb_state { + WB_registered, /* bdi_register() was done */ + WB_writeback_running, /* Writeback is in progress */ + WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ + WB_start_all, /* nr_pages == 0 (all) work pending */ +}; + +enum wb_congested_state { + WB_async_congested, /* The async (write) queue is getting full */ + WB_sync_congested, /* The sync queue is getting full */ +}; + +typedef int (congested_fn)(void *, int); + +enum wb_stat_item { + WB_RECLAIMABLE, + WB_WRITEBACK, + WB_DIRTIED, + WB_WRITTEN, + NR_WB_STAT_ITEMS +}; + +#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) + +/* + * why some writeback work was initiated + */ +enum wb_reason { + WB_REASON_BACKGROUND, + WB_REASON_VMSCAN, + WB_REASON_SYNC, + WB_REASON_PERIODIC, + WB_REASON_LAPTOP_TIMER, + WB_REASON_FREE_MORE_MEM, + WB_REASON_FS_FREE_SPACE, + /* + * There is no bdi forker thread any more and works are done + * by emergency worker, however, this is TPs userland visible + * and we'll be exposing exactly the same information, + * so it has a mismatch name. + */ + WB_REASON_FORKER_THREAD, + WB_REASON_FOREIGN_FLUSH, + + WB_REASON_MAX, +}; + +struct wb_completion { + atomic_t cnt; + wait_queue_head_t *waitq; +}; + +#define __WB_COMPLETION_INIT(_waitq) \ + (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) } + +/* + * If one wants to wait for one or more wb_writeback_works, each work's + * ->done should be set to a wb_completion defined using the following + * macro. Once all work items are issued with wb_queue_work(), the caller + * can wait for the completion of all using wb_wait_for_completion(). Work + * items which are waited upon aren't freed automatically on completion. + */ +#define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq) + +#define DEFINE_WB_COMPLETION(cmpl, bdi) \ + struct wb_completion cmpl = WB_COMPLETION_INIT(bdi) + +/* + * For cgroup writeback, multiple wb's may map to the same blkcg. Those + * wb's can operate mostly independently but should share the congested + * state. To facilitate such sharing, the congested state is tracked using + * the following struct which is created on demand, indexed by blkcg ID on + * its bdi, and refcounted. + */ +struct bdi_writeback_congested { + unsigned long state; /* WB_[a]sync_congested flags */ + refcount_t refcnt; /* nr of attached wb's and blkg */ + +#ifdef CONFIG_CGROUP_WRITEBACK + struct backing_dev_info *__bdi; /* the associated bdi, set to NULL + * on bdi unregistration. For memcg-wb + * internal use only! */ + int blkcg_id; /* ID of the associated blkcg */ + struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ +#endif +}; + +/* + * Each wb (bdi_writeback) can perform writeback operations, is measured + * and throttled, independently. Without cgroup writeback, each bdi + * (bdi_writeback) is served by its embedded bdi->wb. + * + * On the default hierarchy, blkcg implicitly enables memcg. This allows + * using memcg's page ownership for attributing writeback IOs, and every + * memcg - blkcg combination can be served by its own wb by assigning a + * dedicated wb to each memcg, which enables isolation across different + * cgroups and propagation of IO back pressure down from the IO layer upto + * the tasks which are generating the dirty pages to be written back. + * + * A cgroup wb is indexed on its bdi by the ID of the associated memcg, + * refcounted with the number of inodes attached to it, and pins the memcg + * and the corresponding blkcg. As the corresponding blkcg for a memcg may + * change as blkcg is disabled and enabled higher up in the hierarchy, a wb + * is tested for blkcg after lookup and removed from index on mismatch so + * that a new wb for the combination can be created. + */ +struct bdi_writeback { + struct backing_dev_info *bdi; /* our parent bdi */ + + unsigned long state; /* Always use atomic bitops on this */ + unsigned long last_old_flush; /* last old data flush */ + + struct list_head b_dirty; /* dirty inodes */ + struct list_head b_io; /* parked for writeback */ + struct list_head b_more_io; /* parked for more writeback */ + struct list_head b_dirty_time; /* time stamps are dirty */ + spinlock_t list_lock; /* protects the b_* lists */ + + struct percpu_counter stat[NR_WB_STAT_ITEMS]; + + struct bdi_writeback_congested *congested; + + unsigned long bw_time_stamp; /* last time write bw is updated */ + unsigned long dirtied_stamp; + unsigned long written_stamp; /* pages written at bw_time_stamp */ + unsigned long write_bandwidth; /* the estimated write bandwidth */ + unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */ + + /* + * The base dirty throttle rate, re-calculated on every 200ms. + * All the bdi tasks' dirty rate will be curbed under it. + * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit + * in small steps and is much more smooth/stable than the latter. + */ + unsigned long dirty_ratelimit; + unsigned long balanced_dirty_ratelimit; + + struct fprop_local_percpu completions; + int dirty_exceeded; + enum wb_reason start_all_reason; + + spinlock_t work_lock; /* protects work_list & dwork scheduling */ + struct list_head work_list; + struct delayed_work dwork; /* work item used for writeback */ + + unsigned long dirty_sleep; /* last wait */ + + struct list_head bdi_node; /* anchored at bdi->wb_list */ + +#ifdef CONFIG_CGROUP_WRITEBACK + struct percpu_ref refcnt; /* used only for !root wb's */ + struct fprop_local_percpu memcg_completions; + struct cgroup_subsys_state *memcg_css; /* the associated memcg */ + struct cgroup_subsys_state *blkcg_css; /* and blkcg */ + struct list_head memcg_node; /* anchored at memcg->cgwb_list */ + struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ + + union { + struct work_struct release_work; + struct rcu_head rcu; + }; +#endif +}; + +struct backing_dev_info { + u64 id; + struct rb_node rb_node; /* keyed by ->id */ + struct list_head bdi_list; + unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ + unsigned long io_pages; /* max allowed IO size */ + congested_fn *congested_fn; /* Function pointer if device is md/dm */ + void *congested_data; /* Pointer to aux data for congested func */ + + const char *name; + + struct kref refcnt; /* Reference counter for the structure */ + unsigned int capabilities; /* Device capabilities */ + unsigned int min_ratio; + unsigned int max_ratio, max_prop_frac; + + /* + * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are + * any dirty wbs, which is depended upon by bdi_has_dirty(). + */ + atomic_long_t tot_write_bandwidth; + + struct bdi_writeback wb; /* the root writeback info for this bdi */ + struct list_head wb_list; /* list of all wbs */ +#ifdef CONFIG_CGROUP_WRITEBACK + struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ + struct rb_root cgwb_congested_tree; /* their congested states */ + struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ + struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ +#else + struct bdi_writeback_congested *wb_congested; +#endif + wait_queue_head_t wb_waitq; + + struct device *dev; + char dev_name[64]; + struct device *owner; + + struct timer_list laptop_mode_wb_timer; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debug_dir; +#endif +}; + +enum { + BLK_RW_ASYNC = 0, + BLK_RW_SYNC = 1, +}; + +void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); +void set_wb_congested(struct bdi_writeback_congested *congested, int sync); + +static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync) +{ + clear_wb_congested(bdi->wb.congested, sync); +} + +static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) +{ + set_wb_congested(bdi->wb.congested, sync); +} + +struct wb_lock_cookie { + bool locked; + unsigned long flags; +}; + +#ifdef CONFIG_CGROUP_WRITEBACK + +/** + * wb_tryget - try to increment a wb's refcount + * @wb: bdi_writeback to get + */ +static inline bool wb_tryget(struct bdi_writeback *wb) +{ + if (wb != &wb->bdi->wb) + return percpu_ref_tryget(&wb->refcnt); + return true; +} + +/** + * wb_get - increment a wb's refcount + * @wb: bdi_writeback to get + */ +static inline void wb_get(struct bdi_writeback *wb) +{ + if (wb != &wb->bdi->wb) + percpu_ref_get(&wb->refcnt); +} + +/** + * wb_put - decrement a wb's refcount + * @wb: bdi_writeback to put + */ +static inline void wb_put(struct bdi_writeback *wb) +{ + if (WARN_ON_ONCE(!wb->bdi)) { + /* + * A driver bug might cause a file to be removed before bdi was + * initialized. + */ + return; + } + + if (wb != &wb->bdi->wb) + percpu_ref_put(&wb->refcnt); +} + +/** + * wb_dying - is a wb dying? + * @wb: bdi_writeback of interest + * + * Returns whether @wb is unlinked and being drained. + */ +static inline bool wb_dying(struct bdi_writeback *wb) +{ + return percpu_ref_is_dying(&wb->refcnt); +} + +#else /* CONFIG_CGROUP_WRITEBACK */ + +static inline bool wb_tryget(struct bdi_writeback *wb) +{ + return true; +} + +static inline void wb_get(struct bdi_writeback *wb) +{ +} + +static inline void wb_put(struct bdi_writeback *wb) +{ +} + +static inline bool wb_dying(struct bdi_writeback *wb) +{ + return false; +} + +#endif /* CONFIG_CGROUP_WRITEBACK */ + +#endif /* __LINUX_BACKING_DEV_DEFS_H */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h new file mode 100644 index 0000000..c9ad5c3 --- /dev/null +++ b/include/linux/backing-dev.h @@ -0,0 +1,510 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/backing-dev.h + * + * low-level device information and state which is propagated up through + * to high-level code. + */ + +#ifndef _LINUX_BACKING_DEV_H +#define _LINUX_BACKING_DEV_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) +{ + kref_get(&bdi->refcnt); + return bdi; +} + +struct backing_dev_info *bdi_get_by_id(u64 id); +void bdi_put(struct backing_dev_info *bdi); + +__printf(2, 3) +int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); +__printf(2, 0) +int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, + va_list args); +int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); +void bdi_unregister(struct backing_dev_info *bdi); + +struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id); +static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask) +{ + return bdi_alloc_node(gfp_mask, NUMA_NO_NODE); +} + +void wb_start_background_writeback(struct bdi_writeback *wb); +void wb_workfn(struct work_struct *work); +void wb_wakeup_delayed(struct bdi_writeback *wb); + +void wb_wait_for_completion(struct wb_completion *done); + +extern spinlock_t bdi_lock; +extern struct list_head bdi_list; + +extern struct workqueue_struct *bdi_wq; +extern struct workqueue_struct *bdi_async_bio_wq; + +static inline bool wb_has_dirty_io(struct bdi_writeback *wb) +{ + return test_bit(WB_has_dirty_io, &wb->state); +} + +static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) +{ + /* + * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are + * any dirty wbs. See wb_update_write_bandwidth(). + */ + return atomic_long_read(&bdi->tot_write_bandwidth); +} + +static inline void __add_wb_stat(struct bdi_writeback *wb, + enum wb_stat_item item, s64 amount) +{ + percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); +} + +static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) +{ + __add_wb_stat(wb, item, 1); +} + +static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) +{ + __add_wb_stat(wb, item, -1); +} + +static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) +{ + return percpu_counter_read_positive(&wb->stat[item]); +} + +static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) +{ + return percpu_counter_sum_positive(&wb->stat[item]); +} + +extern void wb_writeout_inc(struct bdi_writeback *wb); + +/* + * maximal error of a stat counter. + */ +static inline unsigned long wb_stat_error(void) +{ +#ifdef CONFIG_SMP + return nr_cpu_ids * WB_STAT_BATCH; +#else + return 1; +#endif +} + +int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); +int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); + +/* + * Flags in backing_dev_info::capability + * + * The first three flags control whether dirty pages will contribute to the + * VM's accounting and whether writepages() should be called for dirty pages + * (something that would not, for example, be appropriate for ramfs) + * + * WARNING: these flags are closely related and should not normally be + * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these + * three flags into a single convenience macro. + * + * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting + * BDI_CAP_NO_WRITEBACK: Don't write pages back + * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages + * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. + * + * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback. + * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be + * inefficient. + */ +#define BDI_CAP_NO_ACCT_DIRTY 0x00000001 +#define BDI_CAP_NO_WRITEBACK 0x00000002 +#define BDI_CAP_NO_ACCT_WB 0x00000004 +#define BDI_CAP_STABLE_WRITES 0x00000008 +#define BDI_CAP_STRICTLIMIT 0x00000010 +#define BDI_CAP_CGROUP_WRITEBACK 0x00000020 +#define BDI_CAP_SYNCHRONOUS_IO 0x00000040 + +#define BDI_CAP_NO_ACCT_AND_WRITEBACK \ + (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) + +extern struct backing_dev_info noop_backing_dev_info; + +/** + * writeback_in_progress - determine whether there is writeback in progress + * @wb: bdi_writeback of interest + * + * Determine whether there is writeback waiting to be handled against a + * bdi_writeback. + */ +static inline bool writeback_in_progress(struct bdi_writeback *wb) +{ + return test_bit(WB_writeback_running, &wb->state); +} + +static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) +{ + struct super_block *sb; + + if (!inode) + return &noop_backing_dev_info; + + sb = inode->i_sb; +#ifdef CONFIG_BLOCK + if (sb_is_blkdev_sb(sb)) + return I_BDEV(inode)->bd_bdi; +#endif + return sb->s_bdi; +} + +static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) +{ + struct backing_dev_info *bdi = wb->bdi; + + if (bdi->congested_fn) + return bdi->congested_fn(bdi->congested_data, cong_bits); + return wb->congested->state & cong_bits; +} + +long congestion_wait(int sync, long timeout); +long wait_iff_congested(int sync, long timeout); + +static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi) +{ + return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO; +} + +static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi) +{ + return bdi->capabilities & BDI_CAP_STABLE_WRITES; +} + +static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) +{ + return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK); +} + +static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi) +{ + return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY); +} + +static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi) +{ + /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */ + return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB | + BDI_CAP_NO_WRITEBACK)); +} + +static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) +{ + return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host)); +} + +static inline bool mapping_cap_account_dirty(struct address_space *mapping) +{ + return bdi_cap_account_dirty(inode_to_bdi(mapping->host)); +} + +static inline int bdi_sched_wait(void *word) +{ + schedule(); + return 0; +} + +#ifdef CONFIG_CGROUP_WRITEBACK + +struct bdi_writeback_congested * +wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp); +void wb_congested_put(struct bdi_writeback_congested *congested); +struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, + struct cgroup_subsys_state *memcg_css); +struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, + struct cgroup_subsys_state *memcg_css, + gfp_t gfp); +void wb_memcg_offline(struct mem_cgroup *memcg); +void wb_blkcg_offline(struct blkcg *blkcg); +int inode_congested(struct inode *inode, int cong_bits); + +/** + * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode + * @inode: inode of interest + * + * cgroup writeback requires support from both the bdi and filesystem. + * Also, both memcg and iocg have to be on the default hierarchy. Test + * whether all conditions are met. + * + * Note that the test result may change dynamically on the same inode + * depending on how memcg and iocg are configured. + */ +static inline bool inode_cgwb_enabled(struct inode *inode) +{ + struct backing_dev_info *bdi = inode_to_bdi(inode); + + return cgroup_subsys_on_dfl(memory_cgrp_subsys) && + cgroup_subsys_on_dfl(io_cgrp_subsys) && + bdi_cap_account_dirty(bdi) && + (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && + (inode->i_sb->s_iflags & SB_I_CGROUPWB); +} + +/** + * wb_find_current - find wb for %current on a bdi + * @bdi: bdi of interest + * + * Find the wb of @bdi which matches both the memcg and blkcg of %current. + * Must be called under rcu_read_lock() which protects the returend wb. + * NULL if not found. + */ +static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) +{ + struct cgroup_subsys_state *memcg_css; + struct bdi_writeback *wb; + + memcg_css = task_css(current, memory_cgrp_id); + if (!memcg_css->parent) + return &bdi->wb; + + wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); + + /* + * %current's blkcg equals the effective blkcg of its memcg. No + * need to use the relatively expensive cgroup_get_e_css(). + */ + if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) + return wb; + return NULL; +} + +/** + * wb_get_create_current - get or create wb for %current on a bdi + * @bdi: bdi of interest + * @gfp: allocation mask + * + * Equivalent to wb_get_create() on %current's memcg. This function is + * called from a relatively hot path and optimizes the common cases using + * wb_find_current(). + */ +static inline struct bdi_writeback * +wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) +{ + struct bdi_writeback *wb; + + rcu_read_lock(); + wb = wb_find_current(bdi); + if (wb && unlikely(!wb_tryget(wb))) + wb = NULL; + rcu_read_unlock(); + + if (unlikely(!wb)) { + struct cgroup_subsys_state *memcg_css; + + memcg_css = task_get_css(current, memory_cgrp_id); + wb = wb_get_create(bdi, memcg_css, gfp); + css_put(memcg_css); + } + return wb; +} + +/** + * inode_to_wb_is_valid - test whether an inode has a wb associated + * @inode: inode of interest + * + * Returns %true if @inode has a wb associated. May be called without any + * locking. + */ +static inline bool inode_to_wb_is_valid(struct inode *inode) +{ + return inode->i_wb; +} + +/** + * inode_to_wb - determine the wb of an inode + * @inode: inode of interest + * + * Returns the wb @inode is currently associated with. The caller must be + * holding either @inode->i_lock, the i_pages lock, or the + * associated wb's list_lock. + */ +static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) +{ +#ifdef CONFIG_LOCKDEP + WARN_ON_ONCE(debug_locks && + (!lockdep_is_held(&inode->i_lock) && + !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && + !lockdep_is_held(&inode->i_wb->list_lock))); +#endif + return inode->i_wb; +} + +/** + * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction + * @inode: target inode + * @cookie: output param, to be passed to the end function + * + * The caller wants to access the wb associated with @inode but isn't + * holding inode->i_lock, the i_pages lock or wb->list_lock. This + * function determines the wb associated with @inode and ensures that the + * association doesn't change until the transaction is finished with + * unlocked_inode_to_wb_end(). + * + * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and + * can't sleep during the transaction. IRQs may or may not be disabled on + * return. + */ +static inline struct bdi_writeback * +unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) +{ + rcu_read_lock(); + + /* + * Paired with store_release in inode_switch_wbs_work_fn() and + * ensures that we see the new wb if we see cleared I_WB_SWITCH. + */ + cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; + + if (unlikely(cookie->locked)) + xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); + + /* + * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages + * lock. inode_to_wb() will bark. Deref directly. + */ + return inode->i_wb; +} + +/** + * unlocked_inode_to_wb_end - end inode wb access transaction + * @inode: target inode + * @cookie: @cookie from unlocked_inode_to_wb_begin() + */ +static inline void unlocked_inode_to_wb_end(struct inode *inode, + struct wb_lock_cookie *cookie) +{ + if (unlikely(cookie->locked)) + xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); + + rcu_read_unlock(); +} + +#else /* CONFIG_CGROUP_WRITEBACK */ + +static inline bool inode_cgwb_enabled(struct inode *inode) +{ + return false; +} + +static inline struct bdi_writeback_congested * +wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) +{ + refcount_inc(&bdi->wb_congested->refcnt); + return bdi->wb_congested; +} + +static inline void wb_congested_put(struct bdi_writeback_congested *congested) +{ + if (refcount_dec_and_test(&congested->refcnt)) + kfree(congested); +} + +static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) +{ + return &bdi->wb; +} + +static inline struct bdi_writeback * +wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) +{ + return &bdi->wb; +} + +static inline bool inode_to_wb_is_valid(struct inode *inode) +{ + return true; +} + +static inline struct bdi_writeback *inode_to_wb(struct inode *inode) +{ + return &inode_to_bdi(inode)->wb; +} + +static inline struct bdi_writeback * +unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) +{ + return inode_to_wb(inode); +} + +static inline void unlocked_inode_to_wb_end(struct inode *inode, + struct wb_lock_cookie *cookie) +{ +} + +static inline void wb_memcg_offline(struct mem_cgroup *memcg) +{ +} + +static inline void wb_blkcg_offline(struct blkcg *blkcg) +{ +} + +static inline int inode_congested(struct inode *inode, int cong_bits) +{ + return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); +} + +#endif /* CONFIG_CGROUP_WRITEBACK */ + +static inline int inode_read_congested(struct inode *inode) +{ + return inode_congested(inode, 1 << WB_sync_congested); +} + +static inline int inode_write_congested(struct inode *inode) +{ + return inode_congested(inode, 1 << WB_async_congested); +} + +static inline int inode_rw_congested(struct inode *inode) +{ + return inode_congested(inode, (1 << WB_sync_congested) | + (1 << WB_async_congested)); +} + +static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits) +{ + return wb_congested(&bdi->wb, cong_bits); +} + +static inline int bdi_read_congested(struct backing_dev_info *bdi) +{ + return bdi_congested(bdi, 1 << WB_sync_congested); +} + +static inline int bdi_write_congested(struct backing_dev_info *bdi) +{ + return bdi_congested(bdi, 1 << WB_async_congested); +} + +static inline int bdi_rw_congested(struct backing_dev_info *bdi) +{ + return bdi_congested(bdi, (1 << WB_sync_congested) | + (1 << WB_async_congested)); +} + +const char *bdi_dev_name(struct backing_dev_info *bdi); + +#endif /* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/backlight.h b/include/linux/backlight.h new file mode 100644 index 0000000..c7d6b2e --- /dev/null +++ b/include/linux/backlight.h @@ -0,0 +1,237 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Backlight Lowlevel Control Abstraction + * + * Copyright (C) 2003,2004 Hewlett-Packard Company + * + */ + +#ifndef _LINUX_BACKLIGHT_H +#define _LINUX_BACKLIGHT_H + +#include +#include +#include +#include + +/* Notes on locking: + * + * backlight_device->ops_lock is an internal backlight lock protecting the + * ops pointer and no code outside the core should need to touch it. + * + * Access to update_status() is serialised by the update_lock mutex since + * most drivers seem to need this and historically get it wrong. + * + * Most drivers don't need locking on their get_brightness() method. + * If yours does, you need to implement it in the driver. You can use the + * update_lock mutex if appropriate. + * + * Any other use of the locks below is probably wrong. + */ + +enum backlight_update_reason { + BACKLIGHT_UPDATE_HOTKEY, + BACKLIGHT_UPDATE_SYSFS, +}; + +enum backlight_type { + BACKLIGHT_RAW = 1, + BACKLIGHT_PLATFORM, + BACKLIGHT_FIRMWARE, + BACKLIGHT_TYPE_MAX, +}; + +enum backlight_notification { + BACKLIGHT_REGISTERED, + BACKLIGHT_UNREGISTERED, +}; + +enum backlight_scale { + BACKLIGHT_SCALE_UNKNOWN = 0, + BACKLIGHT_SCALE_LINEAR, + BACKLIGHT_SCALE_NON_LINEAR, +}; + +struct backlight_device; +struct fb_info; + +struct backlight_ops { + unsigned int options; + +#define BL_CORE_SUSPENDRESUME (1 << 0) + + /* Notify the backlight driver some property has changed */ + int (*update_status)(struct backlight_device *); + /* Return the current backlight brightness (accounting for power, + fb_blank etc.) */ + int (*get_brightness)(struct backlight_device *); + /* Check if given framebuffer device is the one bound to this backlight; + return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ + int (*check_fb)(struct backlight_device *, struct fb_info *); +}; + +/* This structure defines all the properties of a backlight */ +struct backlight_properties { + /* Current User requested brightness (0 - max_brightness) */ + int brightness; + /* Maximal value for brightness (read-only) */ + int max_brightness; + /* Current FB Power mode (0: full on, 1..3: power saving + modes; 4: full off), see FB_BLANK_XXX */ + int power; + /* FB Blanking active? (values as for power) */ + /* Due to be removed, please use (state & BL_CORE_FBBLANK) */ + int fb_blank; + /* Backlight type */ + enum backlight_type type; + /* Flags used to signal drivers of state changes */ + unsigned int state; + /* Type of the brightness scale (linear, non-linear, ...) */ + enum backlight_scale scale; + +#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ +#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ + +}; + +struct backlight_device { + /* Backlight properties */ + struct backlight_properties props; + + /* Serialise access to update_status method */ + struct mutex update_lock; + + /* This protects the 'ops' field. If 'ops' is NULL, the driver that + registered this device has been unloaded, and if class_get_devdata() + points to something in the body of that driver, it is also invalid. */ + struct mutex ops_lock; + const struct backlight_ops *ops; + + /* The framebuffer notifier block */ + struct notifier_block fb_notif; + + /* list entry of all registered backlight devices */ + struct list_head entry; + + struct device dev; + + /* Multiple framebuffers may share one backlight device */ + bool fb_bl_on[FB_MAX]; + + int use_count; +}; + +static inline int backlight_update_status(struct backlight_device *bd) +{ + int ret = -ENOENT; + + mutex_lock(&bd->update_lock); + if (bd->ops && bd->ops->update_status) + ret = bd->ops->update_status(bd); + mutex_unlock(&bd->update_lock); + + return ret; +} + +/** + * backlight_enable - Enable backlight + * @bd: the backlight device to enable + */ +static inline int backlight_enable(struct backlight_device *bd) +{ + if (!bd) + return 0; + + bd->props.power = FB_BLANK_UNBLANK; + bd->props.fb_blank = FB_BLANK_UNBLANK; + bd->props.state &= ~BL_CORE_FBBLANK; + + return backlight_update_status(bd); +} + +/** + * backlight_disable - Disable backlight + * @bd: the backlight device to disable + */ +static inline int backlight_disable(struct backlight_device *bd) +{ + if (!bd) + return 0; + + bd->props.power = FB_BLANK_POWERDOWN; + bd->props.fb_blank = FB_BLANK_POWERDOWN; + bd->props.state |= BL_CORE_FBBLANK; + + return backlight_update_status(bd); +} + +/** + * backlight_put - Drop backlight reference + * @bd: the backlight device to put + */ +static inline void backlight_put(struct backlight_device *bd) +{ + if (bd) + put_device(&bd->dev); +} + +extern struct backlight_device *backlight_device_register(const char *name, + struct device *dev, void *devdata, const struct backlight_ops *ops, + const struct backlight_properties *props); +extern struct backlight_device *devm_backlight_device_register( + struct device *dev, const char *name, struct device *parent, + void *devdata, const struct backlight_ops *ops, + const struct backlight_properties *props); +extern void backlight_device_unregister(struct backlight_device *bd); +extern void devm_backlight_device_unregister(struct device *dev, + struct backlight_device *bd); +extern void backlight_force_update(struct backlight_device *bd, + enum backlight_update_reason reason); +extern int backlight_register_notifier(struct notifier_block *nb); +extern int backlight_unregister_notifier(struct notifier_block *nb); +extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness); + +#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) + +static inline void * bl_get_data(struct backlight_device *bl_dev) +{ + return dev_get_drvdata(&bl_dev->dev); +} + +struct generic_bl_info { + const char *name; + int max_intensity; + int default_intensity; + int limit_mask; + void (*set_bl_intensity)(int intensity); + void (*kick_battery)(void); +}; + +#ifdef CONFIG_OF +struct backlight_device *of_find_backlight_by_node(struct device_node *node); +#else +static inline struct backlight_device * +of_find_backlight_by_node(struct device_node *node) +{ + return NULL; +} +#endif + +#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) +struct backlight_device *of_find_backlight(struct device *dev); +struct backlight_device *devm_of_find_backlight(struct device *dev); +#else +static inline struct backlight_device *of_find_backlight(struct device *dev) +{ + return NULL; +} + +static inline struct backlight_device * +devm_of_find_backlight(struct device *dev) +{ + return NULL; +} +#endif + +#endif diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h new file mode 100644 index 0000000..2426276 --- /dev/null +++ b/include/linux/badblocks.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BADBLOCKS_H +#define _LINUX_BADBLOCKS_H + +#include +#include +#include +#include +#include + +#define BB_LEN_MASK (0x00000000000001FFULL) +#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL) +#define BB_ACK_MASK (0x8000000000000000ULL) +#define BB_MAX_LEN 512 +#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9) +#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1) +#define BB_ACK(x) (!!((x) & BB_ACK_MASK)) +#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63)) + +/* Bad block numbers are stored sorted in a single page. + * 64bits is used for each block or extent. + * 54 bits are sector number, 9 bits are extent size, + * 1 bit is an 'acknowledged' flag. + */ +#define MAX_BADBLOCKS (PAGE_SIZE/8) + +struct badblocks { + struct device *dev; /* set by devm_init_badblocks */ + int count; /* count of bad blocks */ + int unacked_exist; /* there probably are unacknowledged + * bad blocks. This is only cleared + * when a read discovers none + */ + int shift; /* shift from sectors to block size + * a -ve shift means badblocks are + * disabled.*/ + u64 *page; /* badblock list */ + int changed; + seqlock_t lock; + sector_t sector; + sector_t size; /* in sectors */ +}; + +int badblocks_check(struct badblocks *bb, sector_t s, int sectors, + sector_t *first_bad, int *bad_sectors); +int badblocks_set(struct badblocks *bb, sector_t s, int sectors, + int acknowledged); +int badblocks_clear(struct badblocks *bb, sector_t s, int sectors); +void ack_all_badblocks(struct badblocks *bb); +ssize_t badblocks_show(struct badblocks *bb, char *page, int unack); +ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len, + int unack); +int badblocks_init(struct badblocks *bb, int enable); +void badblocks_exit(struct badblocks *bb); +struct device; +int devm_init_badblocks(struct device *dev, struct badblocks *bb); +static inline void devm_exit_badblocks(struct device *dev, struct badblocks *bb) +{ + if (bb->dev != dev) { + dev_WARN_ONCE(dev, 1, "%s: badblocks instance not associated\n", + __func__); + return; + } + badblocks_exit(bb); +} +#endif diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h new file mode 100644 index 0000000..338aa27 --- /dev/null +++ b/include/linux/balloon_compaction.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/balloon_compaction.h + * + * Common interface definitions for making balloon pages movable by compaction. + * + * Balloon page migration makes use of the general non-lru movable page + * feature. + * + * page->private is used to reference the responsible balloon device. + * page->mapping is used in context of non-lru page migration to reference + * the address space operations for page isolation/migration/compaction. + * + * As the page isolation scanning step a compaction thread does is a lockless + * procedure (from a page standpoint), it might bring some racy situations while + * performing balloon page compaction. In order to sort out these racy scenarios + * and safely perform balloon's page compaction and migration we must, always, + * ensure following these simple rules: + * + * i. when updating a balloon's page ->mapping element, strictly do it under + * the following lock order, independently of the far superior + * locking scheme (lru_lock, balloon_lock): + * +-page_lock(page); + * +--spin_lock_irq(&b_dev_info->pages_lock); + * ... page->mapping updates here ... + * + * ii. isolation or dequeueing procedure must remove the page from balloon + * device page list under b_dev_info->pages_lock. + * + * The functions provided by this interface are placed to help on coping with + * the aforementioned balloon page corner case, as well as to ensure the simple + * set of exposed rules are satisfied while we are dealing with balloon pages + * compaction / migration. + * + * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini + */ +#ifndef _LINUX_BALLOON_COMPACTION_H +#define _LINUX_BALLOON_COMPACTION_H +#include +#include +#include +#include +#include +#include +#include + +/* + * Balloon device information descriptor. + * This struct is used to allow the common balloon compaction interface + * procedures to find the proper balloon device holding memory pages they'll + * have to cope for page compaction / migration, as well as it serves the + * balloon driver as a page book-keeper for its registered balloon devices. + */ +struct balloon_dev_info { + unsigned long isolated_pages; /* # of isolated pages for migration */ + spinlock_t pages_lock; /* Protection to pages list */ + struct list_head pages; /* Pages enqueued & handled to Host */ + int (*migratepage)(struct balloon_dev_info *, struct page *newpage, + struct page *page, enum migrate_mode mode); + struct inode *inode; +}; + +extern struct page *balloon_page_alloc(void); +extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, + struct page *page); +extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); +extern size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, + struct list_head *pages); +extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info, + struct list_head *pages, size_t n_req_pages); + +static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) +{ + balloon->isolated_pages = 0; + spin_lock_init(&balloon->pages_lock); + INIT_LIST_HEAD(&balloon->pages); + balloon->migratepage = NULL; + balloon->inode = NULL; +} + +#ifdef CONFIG_BALLOON_COMPACTION +extern const struct address_space_operations balloon_aops; +extern bool balloon_page_isolate(struct page *page, + isolate_mode_t mode); +extern void balloon_page_putback(struct page *page); +extern int balloon_page_migrate(struct address_space *mapping, + struct page *newpage, + struct page *page, enum migrate_mode mode); + +/* + * balloon_page_insert - insert a page into the balloon's page list and make + * the page->private assignment accordingly. + * @balloon : pointer to balloon device + * @page : page to be assigned as a 'balloon page' + * + * Caller must ensure the page is locked and the spin_lock protecting balloon + * pages list is held before inserting a page into the balloon device. + */ +static inline void balloon_page_insert(struct balloon_dev_info *balloon, + struct page *page) +{ + __SetPageOffline(page); + __SetPageMovable(page, balloon->inode->i_mapping); + set_page_private(page, (unsigned long)balloon); + list_add(&page->lru, &balloon->pages); +} + +/* + * balloon_page_delete - delete a page from balloon's page list and clear + * the page->private assignement accordingly. + * @page : page to be released from balloon's page list + * + * Caller must ensure the page is locked and the spin_lock protecting balloon + * pages list is held before deleting a page from the balloon device. + */ +static inline void balloon_page_delete(struct page *page) +{ + __ClearPageOffline(page); + __ClearPageMovable(page); + set_page_private(page, 0); + /* + * No touch page.lru field once @page has been isolated + * because VM is using the field. + */ + if (!PageIsolated(page)) + list_del(&page->lru); +} + +/* + * balloon_page_device - get the b_dev_info descriptor for the balloon device + * that enqueues the given page. + */ +static inline struct balloon_dev_info *balloon_page_device(struct page *page) +{ + return (struct balloon_dev_info *)page_private(page); +} + +static inline gfp_t balloon_mapping_gfp_mask(void) +{ + return GFP_HIGHUSER_MOVABLE; +} + +#else /* !CONFIG_BALLOON_COMPACTION */ + +static inline void balloon_page_insert(struct balloon_dev_info *balloon, + struct page *page) +{ + __SetPageOffline(page); + list_add(&page->lru, &balloon->pages); +} + +static inline void balloon_page_delete(struct page *page) +{ + __ClearPageOffline(page); + list_del(&page->lru); +} + +static inline bool balloon_page_isolate(struct page *page) +{ + return false; +} + +static inline void balloon_page_putback(struct page *page) +{ + return; +} + +static inline int balloon_page_migrate(struct page *newpage, + struct page *page, enum migrate_mode mode) +{ + return 0; +} + +static inline gfp_t balloon_mapping_gfp_mask(void) +{ + return GFP_HIGHUSER; +} + +#endif /* CONFIG_BALLOON_COMPACTION */ + +/* + * balloon_page_push - insert a page into a page list. + * @head : pointer to list + * @page : page to be added + * + * Caller must ensure the page is private and protect the list. + */ +static inline void balloon_page_push(struct list_head *pages, struct page *page) +{ + list_add(&page->lru, pages); +} + +/* + * balloon_page_pop - remove a page from a page list. + * @head : pointer to list + * @page : page to be added + * + * Caller must ensure the page is private and protect the list. + */ +static inline struct page *balloon_page_pop(struct list_head *pages) +{ + struct page *page = list_first_entry_or_null(pages, struct page, lru); + + if (!page) + return NULL; + + list_del(&page->lru); + return page; +} +#endif /* _LINUX_BALLOON_COMPACTION_H */ diff --git a/include/linux/bcd.h b/include/linux/bcd.h new file mode 100644 index 0000000..118bea3 --- /dev/null +++ b/include/linux/bcd.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCD_H +#define _BCD_H + +#include + +#define bcd2bin(x) \ + (__builtin_constant_p((u8 )(x)) ? \ + const_bcd2bin(x) : \ + _bcd2bin(x)) + +#define bin2bcd(x) \ + (__builtin_constant_p((u8 )(x)) ? \ + const_bin2bcd(x) : \ + _bin2bcd(x)) + +#define const_bcd2bin(x) (((x) & 0x0f) + ((x) >> 4) * 10) +#define const_bin2bcd(x) ((((x) / 10) << 4) + (x) % 10) + +unsigned _bcd2bin(unsigned char val) __attribute_const__; +unsigned char _bin2bcd(unsigned val) __attribute_const__; + +#endif /* _BCD_H */ diff --git a/include/linux/bch.h b/include/linux/bch.h new file mode 100644 index 0000000..aa765af --- /dev/null +++ b/include/linux/bch.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Generic binary BCH encoding/decoding library + * + * Copyright © 2011 Parrot S.A. + * + * Author: Ivan Djelic + * + * Description: + * + * This library provides runtime configurable encoding/decoding of binary + * Bose-Chaudhuri-Hocquenghem (BCH) codes. +*/ +#ifndef _BCH_H +#define _BCH_H + +#include + +/** + * struct bch_control - BCH control structure + * @m: Galois field order + * @n: maximum codeword size in bits (= 2^m-1) + * @t: error correction capability in bits + * @ecc_bits: ecc exact size in bits, i.e. generator polynomial degree (<=m*t) + * @ecc_bytes: ecc max size (m*t bits) in bytes + * @a_pow_tab: Galois field GF(2^m) exponentiation lookup table + * @a_log_tab: Galois field GF(2^m) log lookup table + * @mod8_tab: remainder generator polynomial lookup tables + * @ecc_buf: ecc parity words buffer + * @ecc_buf2: ecc parity words buffer + * @xi_tab: GF(2^m) base for solving degree 2 polynomial roots + * @syn: syndrome buffer + * @cache: log-based polynomial representation buffer + * @elp: error locator polynomial + * @poly_2t: temporary polynomials of degree 2t + */ +struct bch_control { + unsigned int m; + unsigned int n; + unsigned int t; + unsigned int ecc_bits; + unsigned int ecc_bytes; +/* private: */ + uint16_t *a_pow_tab; + uint16_t *a_log_tab; + uint32_t *mod8_tab; + uint32_t *ecc_buf; + uint32_t *ecc_buf2; + unsigned int *xi_tab; + unsigned int *syn; + int *cache; + struct gf_poly *elp; + struct gf_poly *poly_2t[4]; +}; + +struct bch_control *init_bch(int m, int t, unsigned int prim_poly); + +void free_bch(struct bch_control *bch); + +void encode_bch(struct bch_control *bch, const uint8_t *data, + unsigned int len, uint8_t *ecc); + +int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, + const uint8_t *recv_ecc, const uint8_t *calc_ecc, + const unsigned int *syn, unsigned int *errloc); + +#endif /* _BCH_H */ diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h new file mode 100644 index 0000000..53b31f6 --- /dev/null +++ b/include/linux/bcm47xx_nvram.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + */ + +#ifndef __BCM47XX_NVRAM_H +#define __BCM47XX_NVRAM_H + +#include +#include +#include +#include + +#ifdef CONFIG_BCM47XX_NVRAM +int bcm47xx_nvram_init_from_mem(u32 base, u32 lim); +int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len); +int bcm47xx_nvram_gpio_pin(const char *name); +char *bcm47xx_nvram_get_contents(size_t *val_len); +static inline void bcm47xx_nvram_release_contents(char *nvram) +{ + vfree(nvram); +}; +#else +static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim) +{ + return -ENOTSUPP; +}; +static inline int bcm47xx_nvram_getenv(const char *name, char *val, + size_t val_len) +{ + return -ENOTSUPP; +}; +static inline int bcm47xx_nvram_gpio_pin(const char *name) +{ + return -ENOTSUPP; +}; + +static inline char *bcm47xx_nvram_get_contents(size_t *val_len) +{ + return NULL; +}; + +static inline void bcm47xx_nvram_release_contents(char *nvram) +{ +}; +#endif + +#endif /* __BCM47XX_NVRAM_H */ diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h new file mode 100644 index 0000000..b0f4424 --- /dev/null +++ b/include/linux/bcm47xx_sprom.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + */ + +#ifndef __BCM47XX_SPROM_H +#define __BCM47XX_SPROM_H + +#include +#include +#include + +#ifdef CONFIG_BCM47XX_SPROM +int bcm47xx_sprom_register_fallbacks(void); +#else +static inline int bcm47xx_sprom_register_fallbacks(void) +{ + return -ENOTSUPP; +}; +#endif + +#endif /* __BCM47XX_SPROM_H */ diff --git a/include/linux/bcm47xx_wdt.h b/include/linux/bcm47xx_wdt.h new file mode 100644 index 0000000..fc9dcdb --- /dev/null +++ b/include/linux/bcm47xx_wdt.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCM47XX_WDT_H_ +#define LINUX_BCM47XX_WDT_H_ + +#include +#include +#include + + +struct bcm47xx_wdt { + u32 (*timer_set)(struct bcm47xx_wdt *, u32); + u32 (*timer_set_ms)(struct bcm47xx_wdt *, u32); + u32 max_timer_ms; + + void *driver_data; + + struct watchdog_device wdd; + + struct timer_list soft_timer; + atomic_t soft_ticks; +}; + +static inline void *bcm47xx_wdt_get_drvdata(struct bcm47xx_wdt *wdt) +{ + return wdt->driver_data; +} +#endif /* LINUX_BCM47XX_WDT_H_ */ diff --git a/include/linux/bcm963xx_nvram.h b/include/linux/bcm963xx_nvram.h new file mode 100644 index 0000000..c8c7f01 --- /dev/null +++ b/include/linux/bcm963xx_nvram.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BCM963XX_NVRAM_H__ +#define __LINUX_BCM963XX_NVRAM_H__ + +#include +#include +#include +#include + +/* + * Broadcom BCM963xx SoC board nvram data structure. + * + * The nvram structure varies in size depending on the SoC board version. Use + * the appropriate minimum BCM963XX_NVRAM_*_SIZE define for the information + * you need instead of sizeof(struct bcm963xx_nvram) as this may change. + */ + +#define BCM963XX_NVRAM_V4_SIZE 300 +#define BCM963XX_NVRAM_V5_SIZE (1 * SZ_1K) + +#define BCM963XX_DEFAULT_PSI_SIZE 64 + +enum bcm963xx_nvram_nand_part { + BCM963XX_NVRAM_NAND_PART_BOOT = 0, + BCM963XX_NVRAM_NAND_PART_ROOTFS_1, + BCM963XX_NVRAM_NAND_PART_ROOTFS_2, + BCM963XX_NVRAM_NAND_PART_DATA, + BCM963XX_NVRAM_NAND_PART_BBT, + + __BCM963XX_NVRAM_NAND_NR_PARTS +}; + +struct bcm963xx_nvram { + u32 version; + char bootline[256]; + char name[16]; + u32 main_tp_number; + u32 psi_size; + u32 mac_addr_count; + u8 mac_addr_base[ETH_ALEN]; + u8 __reserved1[2]; + u32 checksum_v4; + + u8 __reserved2[292]; + u32 nand_part_offset[__BCM963XX_NVRAM_NAND_NR_PARTS]; + u32 nand_part_size[__BCM963XX_NVRAM_NAND_NR_PARTS]; + u8 __reserved3[388]; + u32 checksum_v5; +}; + +#define BCM963XX_NVRAM_NAND_PART_OFFSET(nvram, part) \ + bcm963xx_nvram_nand_part_offset(nvram, BCM963XX_NVRAM_NAND_PART_ ##part) + +static inline u64 __pure bcm963xx_nvram_nand_part_offset( + const struct bcm963xx_nvram *nvram, + enum bcm963xx_nvram_nand_part part) +{ + return nvram->nand_part_offset[part] * SZ_1K; +} + +#define BCM963XX_NVRAM_NAND_PART_SIZE(nvram, part) \ + bcm963xx_nvram_nand_part_size(nvram, BCM963XX_NVRAM_NAND_PART_ ##part) + +static inline u64 __pure bcm963xx_nvram_nand_part_size( + const struct bcm963xx_nvram *nvram, + enum bcm963xx_nvram_nand_part part) +{ + return nvram->nand_part_size[part] * SZ_1K; +} + +/* + * bcm963xx_nvram_checksum - Verify nvram checksum + * + * @nvram: pointer to full size nvram data structure + * @expected_out: optional pointer to store expected checksum value + * @actual_out: optional pointer to store actual checksum value + * + * Return: 0 if the checksum is valid, otherwise -EINVAL + */ +static int __maybe_unused bcm963xx_nvram_checksum( + const struct bcm963xx_nvram *nvram, + u32 *expected_out, u32 *actual_out) +{ + u32 expected, actual; + size_t len; + + if (nvram->version <= 4) { + expected = nvram->checksum_v4; + len = BCM963XX_NVRAM_V4_SIZE - sizeof(u32); + } else { + expected = nvram->checksum_v5; + len = BCM963XX_NVRAM_V5_SIZE - sizeof(u32); + } + + /* + * Calculate the CRC32 value for the nvram with a checksum value + * of 0 without modifying or copying the nvram by combining: + * - The CRC32 of the nvram without the checksum value + * - The CRC32 of a zero checksum value (which is also 0) + */ + actual = crc32_le_combine( + crc32_le(~0, (u8 *)nvram, len), 0, sizeof(u32)); + + if (expected_out) + *expected_out = expected; + + if (actual_out) + *actual_out = actual; + + return expected == actual ? 0 : -EINVAL; +}; + +#endif /* __LINUX_BCM963XX_NVRAM_H__ */ diff --git a/include/linux/bcm963xx_tag.h b/include/linux/bcm963xx_tag.h new file mode 100644 index 0000000..b87945c --- /dev/null +++ b/include/linux/bcm963xx_tag.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BCM963XX_TAG_H__ +#define __LINUX_BCM963XX_TAG_H__ + +#include + +#define TAGVER_LEN 4 /* Length of Tag Version */ +#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */ +#define SIG1_LEN 20 /* Company Signature 1 Length */ +#define SIG2_LEN 14 /* Company Signature 2 Length */ +#define BOARDID_LEN 16 /* Length of BoardId */ +#define ENDIANFLAG_LEN 2 /* Endian Flag Length */ +#define CHIPID_LEN 6 /* Chip Id Length */ +#define IMAGE_LEN 10 /* Length of Length Field */ +#define ADDRESS_LEN 12 /* Length of Address field */ +#define IMAGE_SEQUENCE_LEN 4 /* Image sequence Length */ +#define RSASIG_LEN 20 /* Length of RSA Signature in tag */ +#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */ +#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */ +#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */ +#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */ + +#define NUM_PIRELLI 2 +#define IMAGETAG_CRC_START 0xFFFFFFFF + +#define PIRELLI_BOARDS { \ + "AGPF-S0", \ + "DWV-S0", \ +} + +/* Extended flash address, needs to be subtracted + * from bcm_tag flash image offsets. + */ +#define BCM963XX_EXTENDED_SIZE 0xBFC00000 + +/* + * The broadcom firmware assumes the rootfs starts the image, + * therefore uses the rootfs start (flash_image_address) + * to determine where to flash the image. Since we have the kernel first + * we have to give it the kernel address, but the crc uses the length + * associated with this address (root_length), which is added to the kernel + * length (kernel_length) to determine the length of image to flash and thus + * needs to be rootfs + deadcode (jffs2 EOF marker) +*/ + +struct bcm_tag { + /* 0-3: Version of the image tag */ + char tag_version[TAGVER_LEN]; + /* 4-23: Company Line 1 */ + char sig_1[SIG1_LEN]; + /* 24-37: Company Line 2 */ + char sig_2[SIG2_LEN]; + /* 38-43: Chip this image is for */ + char chip_id[CHIPID_LEN]; + /* 44-59: Board name */ + char board_id[BOARDID_LEN]; + /* 60-61: Map endianness -- 1 BE 0 LE */ + char big_endian[ENDIANFLAG_LEN]; + /* 62-71: Total length of image */ + char total_length[IMAGE_LEN]; + /* 72-83: Address in memory of CFE */ + char cfe__address[ADDRESS_LEN]; + /* 84-93: Size of CFE */ + char cfe_length[IMAGE_LEN]; + /* 94-105: Address in memory of image start + * (kernel for OpenWRT, rootfs for stock firmware) + */ + char flash_image_start[ADDRESS_LEN]; + /* 106-115: Size of rootfs */ + char root_length[IMAGE_LEN]; + /* 116-127: Address in memory of kernel */ + char kernel_address[ADDRESS_LEN]; + /* 128-137: Size of kernel */ + char kernel_length[IMAGE_LEN]; + /* 138-141: Image sequence number + * (to be incremented when flashed with a new image) + */ + char image_sequence[IMAGE_SEQUENCE_LEN]; + /* 142-161: RSA Signature (not used; some vendors may use this) */ + char rsa_signature[RSASIG_LEN]; + /* 162-191: Compilation and related information (not used in OpenWrt) */ + char information1[TAGINFO1_LEN]; + /* 192-195: Version flash layout */ + char flash_layout_ver[FLASHLAYOUTVER_LEN]; + /* 196-199: kernel+rootfs CRC32 */ + __u32 fskernel_crc; + /* 200-215: Unused except on Alice Gate where is is information */ + char information2[TAGINFO2_LEN]; + /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */ + __u32 image_crc; + /* 220-223: CRC32 of rootfs partition */ + __u32 rootfs_crc; + /* 224-227: CRC32 of kernel partition */ + __u32 kernel_crc; + /* 228-235: Unused at present */ + char reserved1[8]; + /* 236-239: CRC32 of header excluding last 20 bytes */ + __u32 header_crc; + /* 240-255: Unused at present */ + char reserved2[16]; +}; + +#endif /* __LINUX_BCM63XX_TAG_H__ */ diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h new file mode 100644 index 0000000..60b94b9 --- /dev/null +++ b/include/linux/bcma/bcma.h @@ -0,0 +1,489 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_H_ +#define LINUX_BCMA_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include /* SPROM sharing */ + +#include + +struct bcma_device; +struct bcma_bus; + +enum bcma_hosttype { + BCMA_HOSTTYPE_PCI, + BCMA_HOSTTYPE_SDIO, + BCMA_HOSTTYPE_SOC, +}; + +struct bcma_chipinfo { + u16 id; + u8 rev; + u8 pkg; +}; + +struct bcma_boardinfo { + u16 vendor; + u16 type; +}; + +enum bcma_clkmode { + BCMA_CLKMODE_FAST, + BCMA_CLKMODE_DYNAMIC, +}; + +struct bcma_host_ops { + u8 (*read8)(struct bcma_device *core, u16 offset); + u16 (*read16)(struct bcma_device *core, u16 offset); + u32 (*read32)(struct bcma_device *core, u16 offset); + void (*write8)(struct bcma_device *core, u16 offset, u8 value); + void (*write16)(struct bcma_device *core, u16 offset, u16 value); + void (*write32)(struct bcma_device *core, u16 offset, u32 value); +#ifdef CONFIG_BCMA_BLOCKIO + void (*block_read)(struct bcma_device *core, void *buffer, + size_t count, u16 offset, u8 reg_width); + void (*block_write)(struct bcma_device *core, const void *buffer, + size_t count, u16 offset, u8 reg_width); +#endif + /* Agent ops */ + u32 (*aread32)(struct bcma_device *core, u16 offset); + void (*awrite32)(struct bcma_device *core, u16 offset, u32 value); +}; + +/* Core manufacturers */ +#define BCMA_MANUF_ARM 0x43B +#define BCMA_MANUF_MIPS 0x4A7 +#define BCMA_MANUF_BCM 0x4BF + +/* Core class values. */ +#define BCMA_CL_SIM 0x0 +#define BCMA_CL_EROM 0x1 +#define BCMA_CL_CORESIGHT 0x9 +#define BCMA_CL_VERIF 0xB +#define BCMA_CL_OPTIMO 0xD +#define BCMA_CL_GEN 0xE +#define BCMA_CL_PRIMECELL 0xF + +/* Core-ID values. */ +#define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */ +#define BCMA_CORE_4706_CHIPCOMMON 0x500 +#define BCMA_CORE_NS_PCIEG2 0x501 +#define BCMA_CORE_NS_DMA 0x502 +#define BCMA_CORE_NS_SDIO3 0x503 +#define BCMA_CORE_NS_USB20 0x504 +#define BCMA_CORE_NS_USB30 0x505 +#define BCMA_CORE_NS_A9JTAG 0x506 +#define BCMA_CORE_NS_DDR23 0x507 +#define BCMA_CORE_NS_ROM 0x508 +#define BCMA_CORE_NS_NAND 0x509 +#define BCMA_CORE_NS_QSPI 0x50A +#define BCMA_CORE_NS_CHIPCOMMON_B 0x50B +#define BCMA_CORE_4706_SOC_RAM 0x50E +#define BCMA_CORE_ARMCA9 0x510 +#define BCMA_CORE_4706_MAC_GBIT 0x52D +#define BCMA_CORE_AMEMC 0x52E /* DDR1/2 memory controller core */ +#define BCMA_CORE_ALTA 0x534 /* I2S core */ +#define BCMA_CORE_4706_MAC_GBIT_COMMON 0x5DC +#define BCMA_CORE_DDR23_PHY 0x5DD +#define BCMA_CORE_INVALID 0x700 +#define BCMA_CORE_CHIPCOMMON 0x800 +#define BCMA_CORE_ILINE20 0x801 +#define BCMA_CORE_SRAM 0x802 +#define BCMA_CORE_SDRAM 0x803 +#define BCMA_CORE_PCI 0x804 +#define BCMA_CORE_MIPS 0x805 +#define BCMA_CORE_ETHERNET 0x806 +#define BCMA_CORE_V90 0x807 +#define BCMA_CORE_USB11_HOSTDEV 0x808 +#define BCMA_CORE_ADSL 0x809 +#define BCMA_CORE_ILINE100 0x80A +#define BCMA_CORE_IPSEC 0x80B +#define BCMA_CORE_UTOPIA 0x80C +#define BCMA_CORE_PCMCIA 0x80D +#define BCMA_CORE_INTERNAL_MEM 0x80E +#define BCMA_CORE_MEMC_SDRAM 0x80F +#define BCMA_CORE_OFDM 0x810 +#define BCMA_CORE_EXTIF 0x811 +#define BCMA_CORE_80211 0x812 +#define BCMA_CORE_PHY_A 0x813 +#define BCMA_CORE_PHY_B 0x814 +#define BCMA_CORE_PHY_G 0x815 +#define BCMA_CORE_MIPS_3302 0x816 +#define BCMA_CORE_USB11_HOST 0x817 +#define BCMA_CORE_USB11_DEV 0x818 +#define BCMA_CORE_USB20_HOST 0x819 +#define BCMA_CORE_USB20_DEV 0x81A +#define BCMA_CORE_SDIO_HOST 0x81B +#define BCMA_CORE_ROBOSWITCH 0x81C +#define BCMA_CORE_PARA_ATA 0x81D +#define BCMA_CORE_SATA_XORDMA 0x81E +#define BCMA_CORE_ETHERNET_GBIT 0x81F +#define BCMA_CORE_PCIE 0x820 +#define BCMA_CORE_PHY_N 0x821 +#define BCMA_CORE_SRAM_CTL 0x822 +#define BCMA_CORE_MINI_MACPHY 0x823 +#define BCMA_CORE_ARM_1176 0x824 +#define BCMA_CORE_ARM_7TDMI 0x825 +#define BCMA_CORE_PHY_LP 0x826 +#define BCMA_CORE_PMU 0x827 +#define BCMA_CORE_PHY_SSN 0x828 +#define BCMA_CORE_SDIO_DEV 0x829 +#define BCMA_CORE_ARM_CM3 0x82A +#define BCMA_CORE_PHY_HT 0x82B +#define BCMA_CORE_MIPS_74K 0x82C +#define BCMA_CORE_MAC_GBIT 0x82D +#define BCMA_CORE_DDR12_MEM_CTL 0x82E +#define BCMA_CORE_PCIE_RC 0x82F /* PCIe Root Complex */ +#define BCMA_CORE_OCP_OCP_BRIDGE 0x830 +#define BCMA_CORE_SHARED_COMMON 0x831 +#define BCMA_CORE_OCP_AHB_BRIDGE 0x832 +#define BCMA_CORE_SPI_HOST 0x833 +#define BCMA_CORE_I2S 0x834 +#define BCMA_CORE_SDR_DDR1_MEM_CTL 0x835 /* SDR/DDR1 memory controller core */ +#define BCMA_CORE_SHIM 0x837 /* SHIM component in ubus/6362 */ +#define BCMA_CORE_PHY_AC 0x83B +#define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ +#define BCMA_CORE_USB30_DEV 0x83D +#define BCMA_CORE_ARM_CR4 0x83E +#define BCMA_CORE_GCI 0x840 +#define BCMA_CORE_CMEM 0x846 /* CNDS DDR2/3 memory controller */ +#define BCMA_CORE_ARM_CA7 0x847 +#define BCMA_CORE_SYS_MEM 0x849 +#define BCMA_CORE_DEFAULT 0xFFF + +#define BCMA_MAX_NR_CORES 16 +#define BCMA_CORE_SIZE 0x1000 + +/* Chip IDs of PCIe devices */ +#define BCMA_CHIP_ID_BCM4313 0x4313 +#define BCMA_CHIP_ID_BCM43142 43142 +#define BCMA_CHIP_ID_BCM43131 43131 +#define BCMA_CHIP_ID_BCM43217 43217 +#define BCMA_CHIP_ID_BCM43222 43222 +#define BCMA_CHIP_ID_BCM43224 43224 +#define BCMA_PKG_ID_BCM43224_FAB_CSM 0x8 +#define BCMA_PKG_ID_BCM43224_FAB_SMIC 0xa +#define BCMA_CHIP_ID_BCM43225 43225 +#define BCMA_CHIP_ID_BCM43227 43227 +#define BCMA_CHIP_ID_BCM43228 43228 +#define BCMA_CHIP_ID_BCM43421 43421 +#define BCMA_CHIP_ID_BCM43428 43428 +#define BCMA_CHIP_ID_BCM43431 43431 +#define BCMA_CHIP_ID_BCM43460 43460 +#define BCMA_CHIP_ID_BCM4331 0x4331 +#define BCMA_CHIP_ID_BCM6362 0x6362 +#define BCMA_CHIP_ID_BCM4360 0x4360 +#define BCMA_CHIP_ID_BCM4352 0x4352 + +/* Chip IDs of SoCs */ +#define BCMA_CHIP_ID_BCM4706 0x5300 +#define BCMA_PKG_ID_BCM4706L 1 +#define BCMA_CHIP_ID_BCM4716 0x4716 +#define BCMA_PKG_ID_BCM4716 8 +#define BCMA_PKG_ID_BCM4717 9 +#define BCMA_PKG_ID_BCM4718 10 +#define BCMA_CHIP_ID_BCM47162 47162 +#define BCMA_CHIP_ID_BCM4748 0x4748 +#define BCMA_CHIP_ID_BCM4749 0x4749 +#define BCMA_CHIP_ID_BCM5356 0x5356 +#define BCMA_CHIP_ID_BCM5357 0x5357 +#define BCMA_PKG_ID_BCM5358 9 +#define BCMA_PKG_ID_BCM47186 10 +#define BCMA_PKG_ID_BCM5357 11 +#define BCMA_CHIP_ID_BCM53572 53572 +#define BCMA_PKG_ID_BCM47188 9 +#define BCMA_CHIP_ID_BCM4707 53010 +#define BCMA_PKG_ID_BCM4707 1 +#define BCMA_PKG_ID_BCM4708 2 +#define BCMA_PKG_ID_BCM4709 0 +#define BCMA_CHIP_ID_BCM47094 53030 +#define BCMA_CHIP_ID_BCM53018 53018 +#define BCMA_CHIP_ID_BCM53573 53573 +#define BCMA_PKG_ID_BCM53573 0 +#define BCMA_PKG_ID_BCM47189 1 + +/* Board types (on PCI usually equals to the subsystem dev id) */ +/* BCM4313 */ +#define BCMA_BOARD_TYPE_BCM94313BU 0X050F +#define BCMA_BOARD_TYPE_BCM94313HM 0X0510 +#define BCMA_BOARD_TYPE_BCM94313EPA 0X0511 +#define BCMA_BOARD_TYPE_BCM94313HMG 0X051C +/* BCM4716 */ +#define BCMA_BOARD_TYPE_BCM94716NR2 0X04CD +/* BCM43224 */ +#define BCMA_BOARD_TYPE_BCM943224X21 0X056E +#define BCMA_BOARD_TYPE_BCM943224X21_FCC 0X00D1 +#define BCMA_BOARD_TYPE_BCM943224X21B 0X00E9 +#define BCMA_BOARD_TYPE_BCM943224M93 0X008B +#define BCMA_BOARD_TYPE_BCM943224M93A 0X0090 +#define BCMA_BOARD_TYPE_BCM943224X16 0X0093 +#define BCMA_BOARD_TYPE_BCM94322X9 0X008D +#define BCMA_BOARD_TYPE_BCM94322M35E 0X008E +/* BCM43228 */ +#define BCMA_BOARD_TYPE_BCM943228BU8 0X0540 +#define BCMA_BOARD_TYPE_BCM943228BU9 0X0541 +#define BCMA_BOARD_TYPE_BCM943228BU 0X0542 +#define BCMA_BOARD_TYPE_BCM943227HM4L 0X0543 +#define BCMA_BOARD_TYPE_BCM943227HMB 0X0544 +#define BCMA_BOARD_TYPE_BCM943228HM4L 0X0545 +#define BCMA_BOARD_TYPE_BCM943228SD 0X0573 +/* BCM4331 */ +#define BCMA_BOARD_TYPE_BCM94331X19 0X00D6 +#define BCMA_BOARD_TYPE_BCM94331X28 0X00E4 +#define BCMA_BOARD_TYPE_BCM94331X28B 0X010E +#define BCMA_BOARD_TYPE_BCM94331PCIEBT3AX 0X00E4 +#define BCMA_BOARD_TYPE_BCM94331X12_2G 0X00EC +#define BCMA_BOARD_TYPE_BCM94331X12_5G 0X00ED +#define BCMA_BOARD_TYPE_BCM94331X29B 0X00EF +#define BCMA_BOARD_TYPE_BCM94331CSAX 0X00EF +#define BCMA_BOARD_TYPE_BCM94331X19C 0X00F5 +#define BCMA_BOARD_TYPE_BCM94331X33 0X00F4 +#define BCMA_BOARD_TYPE_BCM94331BU 0X0523 +#define BCMA_BOARD_TYPE_BCM94331S9BU 0X0524 +#define BCMA_BOARD_TYPE_BCM94331MC 0X0525 +#define BCMA_BOARD_TYPE_BCM94331MCI 0X0526 +#define BCMA_BOARD_TYPE_BCM94331PCIEBT4 0X0527 +#define BCMA_BOARD_TYPE_BCM94331HM 0X0574 +#define BCMA_BOARD_TYPE_BCM94331PCIEDUAL 0X059B +#define BCMA_BOARD_TYPE_BCM94331MCH5 0X05A9 +#define BCMA_BOARD_TYPE_BCM94331CS 0X05C6 +#define BCMA_BOARD_TYPE_BCM94331CD 0X05DA +/* BCM53572 */ +#define BCMA_BOARD_TYPE_BCM953572BU 0X058D +#define BCMA_BOARD_TYPE_BCM953572NR2 0X058E +#define BCMA_BOARD_TYPE_BCM947188NR2 0X058F +#define BCMA_BOARD_TYPE_BCM953572SDRNR2 0X0590 +/* BCM43142 */ +#define BCMA_BOARD_TYPE_BCM943142HM 0X05E0 + +struct bcma_device { + struct bcma_bus *bus; + struct bcma_device_id id; + + struct device dev; + struct device *dma_dev; + + unsigned int irq; + bool dev_registered; + + u8 core_index; + u8 core_unit; + + u32 addr; + u32 addr_s[8]; + u32 wrap; + + void __iomem *io_addr; + void __iomem *io_wrap; + + void *drvdata; + struct list_head list; +}; + +static inline void *bcma_get_drvdata(struct bcma_device *core) +{ + return core->drvdata; +} +static inline void bcma_set_drvdata(struct bcma_device *core, void *drvdata) +{ + core->drvdata = drvdata; +} + +struct bcma_driver { + const char *name; + const struct bcma_device_id *id_table; + + int (*probe)(struct bcma_device *dev); + void (*remove)(struct bcma_device *dev); + int (*suspend)(struct bcma_device *dev); + int (*resume)(struct bcma_device *dev); + void (*shutdown)(struct bcma_device *dev); + + struct device_driver drv; +}; +extern +int __bcma_driver_register(struct bcma_driver *drv, struct module *owner); +#define bcma_driver_register(drv) \ + __bcma_driver_register(drv, THIS_MODULE) + +extern void bcma_driver_unregister(struct bcma_driver *drv); + +/* module_bcma_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_bcma_driver(__bcma_driver) \ + module_driver(__bcma_driver, bcma_driver_register, \ + bcma_driver_unregister) + +/* Set a fallback SPROM. + * See kdoc at the function definition for complete documentation. */ +extern int bcma_arch_register_fallback_sprom( + int (*sprom_callback)(struct bcma_bus *bus, + struct ssb_sprom *out)); + +struct bcma_bus { + struct device *dev; + + /* The MMIO area. */ + void __iomem *mmio; + + const struct bcma_host_ops *ops; + + enum bcma_hosttype hosttype; + bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */ + struct pci_dev *host_pci; /* PCI bus pointer (BCMA_HOSTTYPE_PCI only) */ + + struct bcma_chipinfo chipinfo; + + struct bcma_boardinfo boardinfo; + + struct bcma_device *mapped_core; + struct list_head cores; + u8 nr_cores; + u8 num; + + struct bcma_drv_cc drv_cc; + struct bcma_drv_cc_b drv_cc_b; + struct bcma_drv_pci drv_pci[2]; + struct bcma_drv_pcie2 drv_pcie2; + struct bcma_drv_mips drv_mips; + struct bcma_drv_gmac_cmn drv_gmac_cmn; + + /* We decided to share SPROM struct with SSB as long as we do not need + * any hacks for BCMA. This simplifies drivers code. */ + struct ssb_sprom sprom; +}; + +static inline u32 bcma_read8(struct bcma_device *core, u16 offset) +{ + return core->bus->ops->read8(core, offset); +} +static inline u32 bcma_read16(struct bcma_device *core, u16 offset) +{ + return core->bus->ops->read16(core, offset); +} +static inline u32 bcma_read32(struct bcma_device *core, u16 offset) +{ + return core->bus->ops->read32(core, offset); +} +static inline +void bcma_write8(struct bcma_device *core, u16 offset, u32 value) +{ + core->bus->ops->write8(core, offset, value); +} +static inline +void bcma_write16(struct bcma_device *core, u16 offset, u32 value) +{ + core->bus->ops->write16(core, offset, value); +} +static inline +void bcma_write32(struct bcma_device *core, u16 offset, u32 value) +{ + core->bus->ops->write32(core, offset, value); +} +#ifdef CONFIG_BCMA_BLOCKIO +static inline void bcma_block_read(struct bcma_device *core, void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + core->bus->ops->block_read(core, buffer, count, offset, reg_width); +} +static inline void bcma_block_write(struct bcma_device *core, + const void *buffer, size_t count, + u16 offset, u8 reg_width) +{ + core->bus->ops->block_write(core, buffer, count, offset, reg_width); +} +#endif +static inline u32 bcma_aread32(struct bcma_device *core, u16 offset) +{ + return core->bus->ops->aread32(core, offset); +} +static inline +void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value) +{ + core->bus->ops->awrite32(core, offset, value); +} + +static inline void bcma_mask32(struct bcma_device *cc, u16 offset, u32 mask) +{ + bcma_write32(cc, offset, bcma_read32(cc, offset) & mask); +} +static inline void bcma_set32(struct bcma_device *cc, u16 offset, u32 set) +{ + bcma_write32(cc, offset, bcma_read32(cc, offset) | set); +} +static inline void bcma_maskset32(struct bcma_device *cc, + u16 offset, u32 mask, u32 set) +{ + bcma_write32(cc, offset, (bcma_read32(cc, offset) & mask) | set); +} +static inline void bcma_mask16(struct bcma_device *cc, u16 offset, u16 mask) +{ + bcma_write16(cc, offset, bcma_read16(cc, offset) & mask); +} +static inline void bcma_set16(struct bcma_device *cc, u16 offset, u16 set) +{ + bcma_write16(cc, offset, bcma_read16(cc, offset) | set); +} +static inline void bcma_maskset16(struct bcma_device *cc, + u16 offset, u16 mask, u16 set) +{ + bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set); +} + +extern struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid, + u8 unit); +static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus, + u16 coreid) +{ + return bcma_find_core_unit(bus, coreid, 0); +} + +#ifdef CONFIG_BCMA_HOST_PCI +extern void bcma_host_pci_up(struct bcma_bus *bus); +extern void bcma_host_pci_down(struct bcma_bus *bus); +extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus, + struct bcma_device *core, bool enable); +#else +static inline void bcma_host_pci_up(struct bcma_bus *bus) +{ +} +static inline void bcma_host_pci_down(struct bcma_bus *bus) +{ +} +static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus, + struct bcma_device *core, bool enable) +{ + if (bus->hosttype == BCMA_HOSTTYPE_PCI) + return -ENOTSUPP; + return 0; +} +#endif + +extern bool bcma_core_is_enabled(struct bcma_device *core); +extern void bcma_core_disable(struct bcma_device *core, u32 flags); +extern int bcma_core_enable(struct bcma_device *core, u32 flags); +extern void bcma_core_set_clockmode(struct bcma_device *core, + enum bcma_clkmode clkmode); +extern void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, + bool on); +extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset); +#define BCMA_DMA_TRANSLATION_MASK 0xC0000000 +#define BCMA_DMA_TRANSLATION_NONE 0x00000000 +#define BCMA_DMA_TRANSLATION_DMA32_CMT 0x40000000 /* Client Mode Translation for 32-bit DMA */ +#define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ +extern u32 bcma_core_dma_translation(struct bcma_device *core); + +extern unsigned int bcma_core_irq(struct bcma_device *core, int num); + +#endif /* LINUX_BCMA_H_ */ diff --git a/include/linux/bcma/bcma_driver_arm_c9.h b/include/linux/bcma/bcma_driver_arm_c9.h new file mode 100644 index 0000000..688cf59 --- /dev/null +++ b/include/linux/bcma/bcma_driver_arm_c9.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_DRIVER_ARM_C9_H_ +#define LINUX_BCMA_DRIVER_ARM_C9_H_ + +/* DMU (Device Management Unit) */ +#define BCMA_DMU_CRU_USB2_CONTROL 0x0164 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK 0x00000FFC +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT 2 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK 0x00007000 +#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_SHIFT 12 +#define BCMA_DMU_CRU_CLKSET_KEY 0x0180 +#define BCMA_DMU_CRU_STRAPS_CTRL 0x02A0 +#define BCMA_DMU_CRU_STRAPS_CTRL_USB3 0x00000010 +#define BCMA_DMU_CRU_STRAPS_CTRL_4BYTE 0x00008000 + +#endif /* LINUX_BCMA_DRIVER_ARM_C9_H_ */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h new file mode 100644 index 0000000..d35b920 --- /dev/null +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -0,0 +1,716 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_DRIVER_CC_H_ +#define LINUX_BCMA_DRIVER_CC_H_ + +#include +#include + +/** ChipCommon core registers. **/ +#define BCMA_CC_ID 0x0000 +#define BCMA_CC_ID_ID 0x0000FFFF +#define BCMA_CC_ID_ID_SHIFT 0 +#define BCMA_CC_ID_REV 0x000F0000 +#define BCMA_CC_ID_REV_SHIFT 16 +#define BCMA_CC_ID_PKG 0x00F00000 +#define BCMA_CC_ID_PKG_SHIFT 20 +#define BCMA_CC_ID_NRCORES 0x0F000000 +#define BCMA_CC_ID_NRCORES_SHIFT 24 +#define BCMA_CC_ID_TYPE 0xF0000000 +#define BCMA_CC_ID_TYPE_SHIFT 28 +#define BCMA_CC_CAP 0x0004 /* Capabilities */ +#define BCMA_CC_CAP_NRUART 0x00000003 /* # of UARTs */ +#define BCMA_CC_CAP_MIPSEB 0x00000004 /* MIPS in BigEndian Mode */ +#define BCMA_CC_CAP_UARTCLK 0x00000018 /* UART clock select */ +#define BCMA_CC_CAP_UARTCLK_INT 0x00000008 /* UARTs are driven by internal divided clock */ +#define BCMA_CC_CAP_UARTGPIO 0x00000020 /* UARTs on GPIO 15-12 */ +#define BCMA_CC_CAP_EXTBUS 0x000000C0 /* External buses present */ +#define BCMA_CC_CAP_FLASHT 0x00000700 /* Flash Type */ +#define BCMA_CC_FLASHT_NONE 0x00000000 /* No flash */ +#define BCMA_CC_FLASHT_STSER 0x00000100 /* ST serial flash */ +#define BCMA_CC_FLASHT_ATSER 0x00000200 /* Atmel serial flash */ +#define BCMA_CC_FLASHT_NAND 0x00000300 /* NAND flash */ +#define BCMA_CC_FLASHT_PARA 0x00000700 /* Parallel flash */ +#define BCMA_CC_CAP_PLLT 0x00038000 /* PLL Type */ +#define BCMA_PLLTYPE_NONE 0x00000000 +#define BCMA_PLLTYPE_1 0x00010000 /* 48Mhz base, 3 dividers */ +#define BCMA_PLLTYPE_2 0x00020000 /* 48Mhz, 4 dividers */ +#define BCMA_PLLTYPE_3 0x00030000 /* 25Mhz, 2 dividers */ +#define BCMA_PLLTYPE_4 0x00008000 /* 48Mhz, 4 dividers */ +#define BCMA_PLLTYPE_5 0x00018000 /* 25Mhz, 4 dividers */ +#define BCMA_PLLTYPE_6 0x00028000 /* 100/200 or 120/240 only */ +#define BCMA_PLLTYPE_7 0x00038000 /* 25Mhz, 4 dividers */ +#define BCMA_CC_CAP_PCTL 0x00040000 /* Power Control */ +#define BCMA_CC_CAP_OTPS 0x00380000 /* OTP size */ +#define BCMA_CC_CAP_OTPS_SHIFT 19 +#define BCMA_CC_CAP_OTPS_BASE 5 +#define BCMA_CC_CAP_JTAGM 0x00400000 /* JTAG master present */ +#define BCMA_CC_CAP_BROM 0x00800000 /* Internal boot ROM active */ +#define BCMA_CC_CAP_64BIT 0x08000000 /* 64-bit Backplane */ +#define BCMA_CC_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */ +#define BCMA_CC_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */ +#define BCMA_CC_CAP_SPROM 0x40000000 /* SPROM present */ +#define BCMA_CC_CAP_NFLASH 0x80000000 /* NAND flash present (rev >= 35 or BCM4706?) */ +#define BCMA_CC_CORECTL 0x0008 +#define BCMA_CC_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */ +#define BCMA_CC_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */ +#define BCMA_CC_CORECTL_UARTCLKEN 0x00000008 /* UART clock enable (rev >= 21) */ +#define BCMA_CC_BIST 0x000C +#define BCMA_CC_OTPS 0x0010 /* OTP status */ +#define BCMA_CC_OTPS_PROGFAIL 0x80000000 +#define BCMA_CC_OTPS_PROTECT 0x00000007 +#define BCMA_CC_OTPS_HW_PROTECT 0x00000001 +#define BCMA_CC_OTPS_SW_PROTECT 0x00000002 +#define BCMA_CC_OTPS_CID_PROTECT 0x00000004 +#define BCMA_CC_OTPS_GU_PROG_IND 0x00000F00 /* General Use programmed indication */ +#define BCMA_CC_OTPS_GU_PROG_IND_SHIFT 8 +#define BCMA_CC_OTPS_GU_PROG_HW 0x00000100 /* HW region programmed */ +#define BCMA_CC_OTPC 0x0014 /* OTP control */ +#define BCMA_CC_OTPC_RECWAIT 0xFF000000 +#define BCMA_CC_OTPC_PROGWAIT 0x00FFFF00 +#define BCMA_CC_OTPC_PRW_SHIFT 8 +#define BCMA_CC_OTPC_MAXFAIL 0x00000038 +#define BCMA_CC_OTPC_VSEL 0x00000006 +#define BCMA_CC_OTPC_SELVL 0x00000001 +#define BCMA_CC_OTPP 0x0018 /* OTP prog */ +#define BCMA_CC_OTPP_COL 0x000000FF +#define BCMA_CC_OTPP_ROW 0x0000FF00 +#define BCMA_CC_OTPP_ROW_SHIFT 8 +#define BCMA_CC_OTPP_READERR 0x10000000 +#define BCMA_CC_OTPP_VALUE 0x20000000 +#define BCMA_CC_OTPP_READ 0x40000000 +#define BCMA_CC_OTPP_START 0x80000000 +#define BCMA_CC_OTPP_BUSY 0x80000000 +#define BCMA_CC_OTPL 0x001C /* OTP layout */ +#define BCMA_CC_OTPL_GURGN_OFFSET 0x00000FFF /* offset of general use region */ +#define BCMA_CC_IRQSTAT 0x0020 +#define BCMA_CC_IRQMASK 0x0024 +#define BCMA_CC_IRQ_GPIO 0x00000001 /* gpio intr */ +#define BCMA_CC_IRQ_EXT 0x00000002 /* ro: ext intr pin (corerev >= 3) */ +#define BCMA_CC_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */ +#define BCMA_CC_CHIPCTL 0x0028 /* Rev >= 11 only */ +#define BCMA_CC_CHIPSTAT 0x002C /* Rev >= 11 only */ +#define BCMA_CC_CHIPST_4313_SPROM_PRESENT 1 +#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2 +#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2 +#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4 +#define BCMA_CC_CHIPST_43228_ILP_DIV_EN 0x00000001 +#define BCMA_CC_CHIPST_43228_OTP_PRESENT 0x00000002 +#define BCMA_CC_CHIPST_43228_SERDES_REFCLK_PADSEL 0x00000004 +#define BCMA_CC_CHIPST_43228_SDIO_MODE 0x00000008 +#define BCMA_CC_CHIPST_43228_SDIO_OTP_PRESENT 0x00000010 +#define BCMA_CC_CHIPST_43228_SDIO_RESET 0x00000020 +#define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */ +#define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */ +#define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */ +#define BCMA_CC_CHIPST_4706_MIPS_BENDIAN BIT(3) /* 0: little, 1: big endian */ +#define BCMA_CC_CHIPST_4706_PCIE1_DISABLE BIT(5) /* PCIE1 enable strap pin */ +#define BCMA_CC_CHIPST_5357_NAND_BOOT BIT(4) /* NAND boot, valid for CC rev 38 and/or BCM5357 */ +#define BCMA_CC_CHIPST_4360_XTAL_40MZ 0x00000001 +#define BCMA_CC_JCMD 0x0030 /* Rev >= 10 only */ +#define BCMA_CC_JCMD_START 0x80000000 +#define BCMA_CC_JCMD_BUSY 0x80000000 +#define BCMA_CC_JCMD_PAUSE 0x40000000 +#define BCMA_CC_JCMD0_ACC_MASK 0x0000F000 +#define BCMA_CC_JCMD0_ACC_IRDR 0x00000000 +#define BCMA_CC_JCMD0_ACC_DR 0x00001000 +#define BCMA_CC_JCMD0_ACC_IR 0x00002000 +#define BCMA_CC_JCMD0_ACC_RESET 0x00003000 +#define BCMA_CC_JCMD0_ACC_IRPDR 0x00004000 +#define BCMA_CC_JCMD0_ACC_PDR 0x00005000 +#define BCMA_CC_JCMD0_IRW_MASK 0x00000F00 +#define BCMA_CC_JCMD_ACC_MASK 0x000F0000 /* Changes for corerev 11 */ +#define BCMA_CC_JCMD_ACC_IRDR 0x00000000 +#define BCMA_CC_JCMD_ACC_DR 0x00010000 +#define BCMA_CC_JCMD_ACC_IR 0x00020000 +#define BCMA_CC_JCMD_ACC_RESET 0x00030000 +#define BCMA_CC_JCMD_ACC_IRPDR 0x00040000 +#define BCMA_CC_JCMD_ACC_PDR 0x00050000 +#define BCMA_CC_JCMD_IRW_MASK 0x00001F00 +#define BCMA_CC_JCMD_IRW_SHIFT 8 +#define BCMA_CC_JCMD_DRW_MASK 0x0000003F +#define BCMA_CC_JIR 0x0034 /* Rev >= 10 only */ +#define BCMA_CC_JDR 0x0038 /* Rev >= 10 only */ +#define BCMA_CC_JCTL 0x003C /* Rev >= 10 only */ +#define BCMA_CC_JCTL_FORCE_CLK 4 /* Force clock */ +#define BCMA_CC_JCTL_EXT_EN 2 /* Enable external targets */ +#define BCMA_CC_JCTL_EN 1 /* Enable Jtag master */ +#define BCMA_CC_FLASHCTL 0x0040 +/* Start/busy bit in flashcontrol */ +#define BCMA_CC_FLASHCTL_OPCODE 0x000000ff +#define BCMA_CC_FLASHCTL_ACTION 0x00000700 +#define BCMA_CC_FLASHCTL_CS_ACTIVE 0x00001000 /* Chip Select Active, rev >= 20 */ +#define BCMA_CC_FLASHCTL_START 0x80000000 +#define BCMA_CC_FLASHCTL_BUSY BCMA_CC_FLASHCTL_START +/* Flashcontrol action + opcodes for ST flashes */ +#define BCMA_CC_FLASHCTL_ST_WREN 0x0006 /* Write Enable */ +#define BCMA_CC_FLASHCTL_ST_WRDIS 0x0004 /* Write Disable */ +#define BCMA_CC_FLASHCTL_ST_RDSR 0x0105 /* Read Status Register */ +#define BCMA_CC_FLASHCTL_ST_WRSR 0x0101 /* Write Status Register */ +#define BCMA_CC_FLASHCTL_ST_READ 0x0303 /* Read Data Bytes */ +#define BCMA_CC_FLASHCTL_ST_PP 0x0302 /* Page Program */ +#define BCMA_CC_FLASHCTL_ST_SE 0x02d8 /* Sector Erase */ +#define BCMA_CC_FLASHCTL_ST_BE 0x00c7 /* Bulk Erase */ +#define BCMA_CC_FLASHCTL_ST_DP 0x00b9 /* Deep Power-down */ +#define BCMA_CC_FLASHCTL_ST_RES 0x03ab /* Read Electronic Signature */ +#define BCMA_CC_FLASHCTL_ST_CSA 0x1000 /* Keep chip select asserted */ +#define BCMA_CC_FLASHCTL_ST_SSE 0x0220 /* Sub-sector Erase */ +/* Flashcontrol action + opcodes for Atmel flashes */ +#define BCMA_CC_FLASHCTL_AT_READ 0x07e8 +#define BCMA_CC_FLASHCTL_AT_PAGE_READ 0x07d2 +#define BCMA_CC_FLASHCTL_AT_STATUS 0x01d7 +#define BCMA_CC_FLASHCTL_AT_BUF1_WRITE 0x0384 +#define BCMA_CC_FLASHCTL_AT_BUF2_WRITE 0x0387 +#define BCMA_CC_FLASHCTL_AT_BUF1_ERASE_PROGRAM 0x0283 +#define BCMA_CC_FLASHCTL_AT_BUF2_ERASE_PROGRAM 0x0286 +#define BCMA_CC_FLASHCTL_AT_BUF1_PROGRAM 0x0288 +#define BCMA_CC_FLASHCTL_AT_BUF2_PROGRAM 0x0289 +#define BCMA_CC_FLASHCTL_AT_PAGE_ERASE 0x0281 +#define BCMA_CC_FLASHCTL_AT_BLOCK_ERASE 0x0250 +#define BCMA_CC_FLASHCTL_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382 +#define BCMA_CC_FLASHCTL_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385 +#define BCMA_CC_FLASHCTL_AT_BUF1_LOAD 0x0253 +#define BCMA_CC_FLASHCTL_AT_BUF2_LOAD 0x0255 +#define BCMA_CC_FLASHCTL_AT_BUF1_COMPARE 0x0260 +#define BCMA_CC_FLASHCTL_AT_BUF2_COMPARE 0x0261 +#define BCMA_CC_FLASHCTL_AT_BUF1_REPROGRAM 0x0258 +#define BCMA_CC_FLASHCTL_AT_BUF2_REPROGRAM 0x0259 +#define BCMA_CC_FLASHADDR 0x0044 +#define BCMA_CC_FLASHDATA 0x0048 +/* Status register bits for ST flashes */ +#define BCMA_CC_FLASHDATA_ST_WIP 0x01 /* Write In Progress */ +#define BCMA_CC_FLASHDATA_ST_WEL 0x02 /* Write Enable Latch */ +#define BCMA_CC_FLASHDATA_ST_BP_MASK 0x1c /* Block Protect */ +#define BCMA_CC_FLASHDATA_ST_BP_SHIFT 2 +#define BCMA_CC_FLASHDATA_ST_SRWD 0x80 /* Status Register Write Disable */ +/* Status register bits for Atmel flashes */ +#define BCMA_CC_FLASHDATA_AT_READY 0x80 +#define BCMA_CC_FLASHDATA_AT_MISMATCH 0x40 +#define BCMA_CC_FLASHDATA_AT_ID_MASK 0x38 +#define BCMA_CC_FLASHDATA_AT_ID_SHIFT 3 +#define BCMA_CC_BCAST_ADDR 0x0050 +#define BCMA_CC_BCAST_DATA 0x0054 +#define BCMA_CC_GPIOPULLUP 0x0058 /* Rev >= 20 only */ +#define BCMA_CC_GPIOPULLDOWN 0x005C /* Rev >= 20 only */ +#define BCMA_CC_GPIOIN 0x0060 +#define BCMA_CC_GPIOOUT 0x0064 +#define BCMA_CC_GPIOOUTEN 0x0068 +#define BCMA_CC_GPIOCTL 0x006C +#define BCMA_CC_GPIOPOL 0x0070 +#define BCMA_CC_GPIOIRQ 0x0074 +#define BCMA_CC_WATCHDOG 0x0080 +#define BCMA_CC_GPIOTIMER 0x0088 /* LED powersave (corerev >= 16) */ +#define BCMA_CC_GPIOTIMER_OFFTIME 0x0000FFFF +#define BCMA_CC_GPIOTIMER_OFFTIME_SHIFT 0 +#define BCMA_CC_GPIOTIMER_ONTIME 0xFFFF0000 +#define BCMA_CC_GPIOTIMER_ONTIME_SHIFT 16 +#define BCMA_CC_GPIOTOUTM 0x008C /* LED powersave (corerev >= 16) */ +#define BCMA_CC_CLOCK_N 0x0090 +#define BCMA_CC_CLOCK_SB 0x0094 +#define BCMA_CC_CLOCK_PCI 0x0098 +#define BCMA_CC_CLOCK_M2 0x009C +#define BCMA_CC_CLOCK_MIPS 0x00A0 +#define BCMA_CC_CLKDIV 0x00A4 /* Rev >= 3 only */ +#define BCMA_CC_CLKDIV_SFLASH 0x0F000000 +#define BCMA_CC_CLKDIV_SFLASH_SHIFT 24 +#define BCMA_CC_CLKDIV_OTP 0x000F0000 +#define BCMA_CC_CLKDIV_OTP_SHIFT 16 +#define BCMA_CC_CLKDIV_JTAG 0x00000F00 +#define BCMA_CC_CLKDIV_JTAG_SHIFT 8 +#define BCMA_CC_CLKDIV_UART 0x000000FF +#define BCMA_CC_CAP_EXT 0x00AC /* Capabilities */ +#define BCMA_CC_CAP_EXT_SECI_PRESENT 0x00000001 +#define BCMA_CC_CAP_EXT_GSIO_PRESENT 0x00000002 +#define BCMA_CC_CAP_EXT_GCI_PRESENT 0x00000004 +#define BCMA_CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008 /* UART present */ +#define BCMA_CC_CAP_EXT_AOB_PRESENT 0x00000040 +#define BCMA_CC_PLLONDELAY 0x00B0 /* Rev >= 4 only */ +#define BCMA_CC_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ +#define BCMA_CC_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ +#define BCMA_CC_SLOWCLKCTL_SRC 0x00000007 /* slow clock source mask */ +#define BCMA_CC_SLOWCLKCTL_SRC_LPO 0x00000000 /* source of slow clock is LPO */ +#define BCMA_CC_SLOWCLKCTL_SRC_XTAL 0x00000001 /* source of slow clock is crystal */ +#define BCMA_CC_SLOECLKCTL_SRC_PCI 0x00000002 /* source of slow clock is PCI */ +#define BCMA_CC_SLOWCLKCTL_LPOFREQ 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */ +#define BCMA_CC_SLOWCLKCTL_LPOPD 0x00000400 /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */ +#define BCMA_CC_SLOWCLKCTL_FSLOW 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */ +#define BCMA_CC_SLOWCLKCTL_IPLL 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors PLL clock disable requests from core */ +#define BCMA_CC_SLOWCLKCTL_ENXTAL 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't disable crystal when appropriate */ +#define BCMA_CC_SLOWCLKCTL_XTALPU 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */ +#define BCMA_CC_SLOWCLKCTL_CLKDIV 0xFFFF0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */ +#define BCMA_CC_SLOWCLKCTL_CLKDIV_SHIFT 16 +#define BCMA_CC_SYSCLKCTL 0x00C0 /* Rev >= 3 only */ +#define BCMA_CC_SYSCLKCTL_IDLPEN 0x00000001 /* ILPen: Enable Idle Low Power */ +#define BCMA_CC_SYSCLKCTL_ALPEN 0x00000002 /* ALPen: Enable Active Low Power */ +#define BCMA_CC_SYSCLKCTL_PLLEN 0x00000004 /* ForcePLLOn */ +#define BCMA_CC_SYSCLKCTL_FORCEALP 0x00000008 /* Force ALP (or HT if ALPen is not set */ +#define BCMA_CC_SYSCLKCTL_FORCEHT 0x00000010 /* Force HT */ +#define BCMA_CC_SYSCLKCTL_CLKDIV 0xFFFF0000 /* ClkDiv (ILP = 1/(4+divisor)) */ +#define BCMA_CC_SYSCLKCTL_CLKDIV_SHIFT 16 +#define BCMA_CC_CLKSTSTR 0x00C4 /* Rev >= 3 only */ +#define BCMA_CC_EROM 0x00FC +#define BCMA_CC_PCMCIA_CFG 0x0100 +#define BCMA_CC_PCMCIA_MEMWAIT 0x0104 +#define BCMA_CC_PCMCIA_ATTRWAIT 0x0108 +#define BCMA_CC_PCMCIA_IOWAIT 0x010C +#define BCMA_CC_IDE_CFG 0x0110 +#define BCMA_CC_IDE_MEMWAIT 0x0114 +#define BCMA_CC_IDE_ATTRWAIT 0x0118 +#define BCMA_CC_IDE_IOWAIT 0x011C +#define BCMA_CC_PROG_CFG 0x0120 +#define BCMA_CC_PROG_WAITCNT 0x0124 +#define BCMA_CC_FLASH_CFG 0x0128 +#define BCMA_CC_FLASH_CFG_DS 0x0010 /* Data size, 0=8bit, 1=16bit */ +#define BCMA_CC_FLASH_WAITCNT 0x012C +#define BCMA_CC_SROM_CONTROL 0x0190 +#define BCMA_CC_SROM_CONTROL_START 0x80000000 +#define BCMA_CC_SROM_CONTROL_BUSY 0x80000000 +#define BCMA_CC_SROM_CONTROL_OPCODE 0x60000000 +#define BCMA_CC_SROM_CONTROL_OP_READ 0x00000000 +#define BCMA_CC_SROM_CONTROL_OP_WRITE 0x20000000 +#define BCMA_CC_SROM_CONTROL_OP_WRDIS 0x40000000 +#define BCMA_CC_SROM_CONTROL_OP_WREN 0x60000000 +#define BCMA_CC_SROM_CONTROL_OTPSEL 0x00000010 +#define BCMA_CC_SROM_CONTROL_LOCK 0x00000008 +#define BCMA_CC_SROM_CONTROL_SIZE_MASK 0x00000006 +#define BCMA_CC_SROM_CONTROL_SIZE_1K 0x00000000 +#define BCMA_CC_SROM_CONTROL_SIZE_4K 0x00000002 +#define BCMA_CC_SROM_CONTROL_SIZE_16K 0x00000004 +#define BCMA_CC_SROM_CONTROL_SIZE_SHIFT 1 +#define BCMA_CC_SROM_CONTROL_PRESENT 0x00000001 +/* Block 0x140 - 0x190 registers are chipset specific */ +#define BCMA_CC_4706_FLASHSCFG 0x18C /* Flash struct configuration */ +#define BCMA_CC_4706_FLASHSCFG_MASK 0x000000ff +#define BCMA_CC_4706_FLASHSCFG_SF1 0x00000001 /* 2nd serial flash present */ +#define BCMA_CC_4706_FLASHSCFG_PF1 0x00000002 /* 2nd parallel flash present */ +#define BCMA_CC_4706_FLASHSCFG_SF1_TYPE 0x00000004 /* 2nd serial flash type : 0 : ST, 1 : Atmel */ +#define BCMA_CC_4706_FLASHSCFG_NF1 0x00000008 /* 2nd NAND flash present */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_MASK 0x000000f0 +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_4MB 0x00000010 /* 4MB */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_8MB 0x00000020 /* 8MB */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_16MB 0x00000030 /* 16MB */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_32MB 0x00000040 /* 32MB */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_64MB 0x00000050 /* 64MB */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_128MB 0x00000060 /* 128MB */ +#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_256MB 0x00000070 /* 256MB */ +/* NAND flash registers for BCM4706 (corerev = 31) */ +#define BCMA_CC_NFLASH_CTL 0x01A0 +#define BCMA_CC_NFLASH_CTL_ERR 0x08000000 +#define BCMA_CC_NFLASH_CONF 0x01A4 +#define BCMA_CC_NFLASH_COL_ADDR 0x01A8 +#define BCMA_CC_NFLASH_ROW_ADDR 0x01AC +#define BCMA_CC_NFLASH_DATA 0x01B0 +#define BCMA_CC_NFLASH_WAITCNT0 0x01B4 +/* 0x1E0 is defined as shared BCMA_CLKCTLST */ +#define BCMA_CC_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */ +#define BCMA_CC_UART0_DATA 0x0300 +#define BCMA_CC_UART0_IMR 0x0304 +#define BCMA_CC_UART0_FCR 0x0308 +#define BCMA_CC_UART0_LCR 0x030C +#define BCMA_CC_UART0_MCR 0x0310 +#define BCMA_CC_UART0_LSR 0x0314 +#define BCMA_CC_UART0_MSR 0x0318 +#define BCMA_CC_UART0_SCRATCH 0x031C +#define BCMA_CC_UART1_DATA 0x0400 +#define BCMA_CC_UART1_IMR 0x0404 +#define BCMA_CC_UART1_FCR 0x0408 +#define BCMA_CC_UART1_LCR 0x040C +#define BCMA_CC_UART1_MCR 0x0410 +#define BCMA_CC_UART1_LSR 0x0414 +#define BCMA_CC_UART1_MSR 0x0418 +#define BCMA_CC_UART1_SCRATCH 0x041C +/* PMU registers (rev >= 20) */ +#define BCMA_CC_PMU_CTL 0x0600 /* PMU control */ +#define BCMA_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ +#define BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16 +#define BCMA_CC_PMU_CTL_RES 0x00006000 /* reset control mask */ +#define BCMA_CC_PMU_CTL_RES_SHIFT 13 +#define BCMA_CC_PMU_CTL_RES_RELOAD 0x2 /* reload POR values */ +#define BCMA_CC_PMU_CTL_PLL_UPD 0x00000400 +#define BCMA_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ +#define BCMA_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ +#define BCMA_CC_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ +#define BCMA_CC_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */ +#define BCMA_CC_PMU_CTL_XTALFREQ_SHIFT 2 +#define BCMA_CC_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */ +#define BCMA_CC_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */ +#define BCMA_CC_PMU_CAP 0x0604 /* PMU capabilities */ +#define BCMA_CC_PMU_CAP_REVISION 0x000000FF /* Revision mask */ +#define BCMA_CC_PMU_STAT 0x0608 /* PMU status */ +#define BCMA_CC_PMU_STAT_EXT_LPO_AVAIL 0x00000100 +#define BCMA_CC_PMU_STAT_WDRESET 0x00000080 +#define BCMA_CC_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */ +#define BCMA_CC_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */ +#define BCMA_CC_PMU_STAT_HAVEALP 0x00000008 /* ALP available */ +#define BCMA_CC_PMU_STAT_HAVEHT 0x00000004 /* HT available */ +#define BCMA_CC_PMU_STAT_RESINIT 0x00000003 /* Res init */ +#define BCMA_CC_PMU_RES_STAT 0x060C /* PMU res status */ +#define BCMA_CC_PMU_RES_PEND 0x0610 /* PMU res pending */ +#define BCMA_CC_PMU_TIMER 0x0614 /* PMU timer */ +#define BCMA_CC_PMU_MINRES_MSK 0x0618 /* PMU min res mask */ +#define BCMA_CC_PMU_MAXRES_MSK 0x061C /* PMU max res mask */ +#define BCMA_CC_PMU_RES_TABSEL 0x0620 /* PMU res table sel */ +#define BCMA_CC_PMU_RES_DEPMSK 0x0624 /* PMU res dep mask */ +#define BCMA_CC_PMU_RES_UPDNTM 0x0628 /* PMU res updown timer */ +#define BCMA_CC_PMU_RES_TIMER 0x062C /* PMU res timer */ +#define BCMA_CC_PMU_CLKSTRETCH 0x0630 /* PMU clockstretch */ +#define BCMA_CC_PMU_WATCHDOG 0x0634 /* PMU watchdog */ +#define BCMA_CC_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ +#define BCMA_CC_PMU_RES_REQT 0x0644 /* PMU res req timer */ +#define BCMA_CC_PMU_RES_REQM 0x0648 /* PMU res req mask */ +#define BCMA_CC_PMU_CHIPCTL_ADDR 0x0650 +#define BCMA_CC_PMU_CHIPCTL_DATA 0x0654 +#define BCMA_CC_PMU_REGCTL_ADDR 0x0658 +#define BCMA_CC_PMU_REGCTL_DATA 0x065C +#define BCMA_CC_PMU_PLLCTL_ADDR 0x0660 +#define BCMA_CC_PMU_PLLCTL_DATA 0x0664 +#define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */ +#define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */ +#define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF +#define BCMA_CC_PMU_XTAL_FREQ_MEASURE_MASK 0x80000000 +#define BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT 31 +#define BCMA_CC_SPROM 0x0800 /* SPROM beginning */ +/* NAND flash MLC controller registers (corerev >= 38) */ +#define BCMA_CC_NAND_REVISION 0x0C00 +#define BCMA_CC_NAND_CMD_START 0x0C04 +#define BCMA_CC_NAND_CMD_ADDR_X 0x0C08 +#define BCMA_CC_NAND_CMD_ADDR 0x0C0C +#define BCMA_CC_NAND_CMD_END_ADDR 0x0C10 +#define BCMA_CC_NAND_CS_NAND_SELECT 0x0C14 +#define BCMA_CC_NAND_CS_NAND_XOR 0x0C18 +#define BCMA_CC_NAND_SPARE_RD0 0x0C20 +#define BCMA_CC_NAND_SPARE_RD4 0x0C24 +#define BCMA_CC_NAND_SPARE_RD8 0x0C28 +#define BCMA_CC_NAND_SPARE_RD12 0x0C2C +#define BCMA_CC_NAND_SPARE_WR0 0x0C30 +#define BCMA_CC_NAND_SPARE_WR4 0x0C34 +#define BCMA_CC_NAND_SPARE_WR8 0x0C38 +#define BCMA_CC_NAND_SPARE_WR12 0x0C3C +#define BCMA_CC_NAND_ACC_CONTROL 0x0C40 +#define BCMA_CC_NAND_CONFIG 0x0C48 +#define BCMA_CC_NAND_TIMING_1 0x0C50 +#define BCMA_CC_NAND_TIMING_2 0x0C54 +#define BCMA_CC_NAND_SEMAPHORE 0x0C58 +#define BCMA_CC_NAND_DEVID 0x0C60 +#define BCMA_CC_NAND_DEVID_X 0x0C64 +#define BCMA_CC_NAND_BLOCK_LOCK_STATUS 0x0C68 +#define BCMA_CC_NAND_INTFC_STATUS 0x0C6C +#define BCMA_CC_NAND_ECC_CORR_ADDR_X 0x0C70 +#define BCMA_CC_NAND_ECC_CORR_ADDR 0x0C74 +#define BCMA_CC_NAND_ECC_UNC_ADDR_X 0x0C78 +#define BCMA_CC_NAND_ECC_UNC_ADDR 0x0C7C +#define BCMA_CC_NAND_READ_ERROR_COUNT 0x0C80 +#define BCMA_CC_NAND_CORR_STAT_THRESHOLD 0x0C84 +#define BCMA_CC_NAND_READ_ADDR_X 0x0C90 +#define BCMA_CC_NAND_READ_ADDR 0x0C94 +#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR_X 0x0C98 +#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR 0x0C9C +#define BCMA_CC_NAND_COPY_BACK_ADDR_X 0x0CA0 +#define BCMA_CC_NAND_COPY_BACK_ADDR 0x0CA4 +#define BCMA_CC_NAND_BLOCK_ERASE_ADDR_X 0x0CA8 +#define BCMA_CC_NAND_BLOCK_ERASE_ADDR 0x0CAC +#define BCMA_CC_NAND_INV_READ_ADDR_X 0x0CB0 +#define BCMA_CC_NAND_INV_READ_ADDR 0x0CB4 +#define BCMA_CC_NAND_BLK_WR_PROTECT 0x0CC0 +#define BCMA_CC_NAND_ACC_CONTROL_CS1 0x0CD0 +#define BCMA_CC_NAND_CONFIG_CS1 0x0CD4 +#define BCMA_CC_NAND_TIMING_1_CS1 0x0CD8 +#define BCMA_CC_NAND_TIMING_2_CS1 0x0CDC +#define BCMA_CC_NAND_SPARE_RD16 0x0D30 +#define BCMA_CC_NAND_SPARE_RD20 0x0D34 +#define BCMA_CC_NAND_SPARE_RD24 0x0D38 +#define BCMA_CC_NAND_SPARE_RD28 0x0D3C +#define BCMA_CC_NAND_CACHE_ADDR 0x0D40 +#define BCMA_CC_NAND_CACHE_DATA 0x0D44 +#define BCMA_CC_NAND_CTRL_CONFIG 0x0D48 +#define BCMA_CC_NAND_CTRL_STATUS 0x0D4C + +/* Divider allocation in 4716/47162/5356 */ +#define BCMA_CC_PMU5_MAINPLL_CPU 1 +#define BCMA_CC_PMU5_MAINPLL_MEM 2 +#define BCMA_CC_PMU5_MAINPLL_SSB 3 + +/* PLL usage in 4716/47162 */ +#define BCMA_CC_PMU4716_MAINPLL_PLL0 12 + +/* PLL usage in 5356/5357 */ +#define BCMA_CC_PMU5356_MAINPLL_PLL0 0 +#define BCMA_CC_PMU5357_MAINPLL_PLL0 0 + +/* 4706 PMU */ +#define BCMA_CC_PMU4706_MAINPLL_PLL0 0 +#define BCMA_CC_PMU6_4706_PROCPLL_OFF 4 /* The CPU PLL */ +#define BCMA_CC_PMU6_4706_PROC_P2DIV_MASK 0x000f0000 +#define BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT 16 +#define BCMA_CC_PMU6_4706_PROC_P1DIV_MASK 0x0000f000 +#define BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT 12 +#define BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK 0x00000ff8 +#define BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT 3 +#define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007 +#define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_SHIFT 0 + +/* PMU rev 15 */ +#define BCMA_CC_PMU15_PLL_PLLCTL0 0 +#define BCMA_CC_PMU15_PLL_PC0_CLKSEL_MASK 0x00000003 +#define BCMA_CC_PMU15_PLL_PC0_CLKSEL_SHIFT 0 +#define BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC +#define BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT 2 +#define BCMA_CC_PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000 +#define BCMA_CC_PMU15_PLL_PC0_PRESCALE_SHIFT 22 +#define BCMA_CC_PMU15_PLL_PC0_KPCTRL_MASK 0x07000000 +#define BCMA_CC_PMU15_PLL_PC0_KPCTRL_SHIFT 24 +#define BCMA_CC_PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000 +#define BCMA_CC_PMU15_PLL_PC0_FCNTCTRL_SHIFT 27 +#define BCMA_CC_PMU15_PLL_PC0_FDCMODE_MASK 0x40000000 +#define BCMA_CC_PMU15_PLL_PC0_FDCMODE_SHIFT 30 +#define BCMA_CC_PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000 +#define BCMA_CC_PMU15_PLL_PC0_CTRLBIAS_SHIFT 31 + +/* ALP clock on pre-PMU chips */ +#define BCMA_CC_PMU_ALP_CLOCK 20000000 +/* HT clock for systems with PMU-enabled chipcommon */ +#define BCMA_CC_PMU_HT_CLOCK 80000000 + +/* PMU rev 5 (& 6) */ +#define BCMA_CC_PPL_P1P2_OFF 0 +#define BCMA_CC_PPL_P1_MASK 0x0f000000 +#define BCMA_CC_PPL_P1_SHIFT 24 +#define BCMA_CC_PPL_P2_MASK 0x00f00000 +#define BCMA_CC_PPL_P2_SHIFT 20 +#define BCMA_CC_PPL_M14_OFF 1 +#define BCMA_CC_PPL_MDIV_MASK 0x000000ff +#define BCMA_CC_PPL_MDIV_WIDTH 8 +#define BCMA_CC_PPL_NM5_OFF 2 +#define BCMA_CC_PPL_NDIV_MASK 0xfff00000 +#define BCMA_CC_PPL_NDIV_SHIFT 20 +#define BCMA_CC_PPL_FMAB_OFF 3 +#define BCMA_CC_PPL_MRAT_MASK 0xf0000000 +#define BCMA_CC_PPL_MRAT_SHIFT 28 +#define BCMA_CC_PPL_ABRAT_MASK 0x08000000 +#define BCMA_CC_PPL_ABRAT_SHIFT 27 +#define BCMA_CC_PPL_FDIV_MASK 0x07ffffff +#define BCMA_CC_PPL_PLLCTL_OFF 4 +#define BCMA_CC_PPL_PCHI_OFF 5 +#define BCMA_CC_PPL_PCHI_MASK 0x0000003f + +#define BCMA_CC_PMU_PLL_CTL0 0 +#define BCMA_CC_PMU_PLL_CTL1 1 +#define BCMA_CC_PMU_PLL_CTL2 2 +#define BCMA_CC_PMU_PLL_CTL3 3 +#define BCMA_CC_PMU_PLL_CTL4 4 +#define BCMA_CC_PMU_PLL_CTL5 5 + +#define BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000 +#define BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT 20 + +#define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000 +#define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT 20 + +#define BCMA_CCB_MII_MNG_CTL 0x0000 +#define BCMA_CCB_MII_MNG_CMD_DATA 0x0004 + +/* BCM4331 ChipControl numbers. */ +#define BCMA_CHIPCTL_4331_BT_COEXIST BIT(0) /* 0 disable */ +#define BCMA_CHIPCTL_4331_SECI BIT(1) /* 0 SECI is disabled (JATG functional) */ +#define BCMA_CHIPCTL_4331_EXT_LNA BIT(2) /* 0 disable */ +#define BCMA_CHIPCTL_4331_SPROM_GPIO13_15 BIT(3) /* sprom/gpio13-15 mux */ +#define BCMA_CHIPCTL_4331_EXTPA_EN BIT(4) /* 0 ext pa disable, 1 ext pa enabled */ +#define BCMA_CHIPCTL_4331_GPIOCLK_ON_SPROMCS BIT(5) /* set drive out GPIO_CLK on sprom_cs pin */ +#define BCMA_CHIPCTL_4331_PCIE_MDIO_ON_SPROMCS BIT(6) /* use sprom_cs pin as PCIE mdio interface */ +#define BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5 BIT(7) /* aband extpa will be at gpio2/5 and sprom_dout */ +#define BCMA_CHIPCTL_4331_OVR_PIPEAUXCLKEN BIT(8) /* override core control on pipe_AuxClkEnable */ +#define BCMA_CHIPCTL_4331_OVR_PIPEAUXPWRDOWN BIT(9) /* override core control on pipe_AuxPowerDown */ +#define BCMA_CHIPCTL_4331_PCIE_AUXCLKEN BIT(10) /* pcie_auxclkenable */ +#define BCMA_CHIPCTL_4331_PCIE_PIPE_PLLDOWN BIT(11) /* pcie_pipe_pllpowerdown */ +#define BCMA_CHIPCTL_4331_EXTPA_EN2 BIT(12) /* 0 ext pa disable, 1 ext pa enabled */ +#define BCMA_CHIPCTL_4331_BT_SHD0_ON_GPIO4 BIT(16) /* enable bt_shd0 at gpio4 */ +#define BCMA_CHIPCTL_4331_BT_SHD1_ON_GPIO5 BIT(17) /* enable bt_shd1 at gpio5 */ + +/* 43224 chip-specific ChipControl register bits */ +#define BCMA_CCTRL_43224_GPIO_TOGGLE 0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */ +#define BCMA_CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */ +#define BCMA_CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */ + +/* 4313 Chip specific ChipControl register bits */ +#define BCMA_CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */ + +/* BCM5357 ChipControl register bits */ +#define BCMA_CHIPCTL_5357_EXTPA BIT(14) +#define BCMA_CHIPCTL_5357_ANT_MUX_2O3 BIT(15) +#define BCMA_CHIPCTL_5357_NFLASH BIT(16) +#define BCMA_CHIPCTL_5357_I2S_PINS_ENABLE BIT(18) +#define BCMA_CHIPCTL_5357_I2CSPI_PINS_ENABLE BIT(19) + +#define BCMA_RES_4314_LPLDO_PU BIT(0) +#define BCMA_RES_4314_PMU_SLEEP_DIS BIT(1) +#define BCMA_RES_4314_PMU_BG_PU BIT(2) +#define BCMA_RES_4314_CBUCK_LPOM_PU BIT(3) +#define BCMA_RES_4314_CBUCK_PFM_PU BIT(4) +#define BCMA_RES_4314_CLDO_PU BIT(5) +#define BCMA_RES_4314_LPLDO2_LVM BIT(6) +#define BCMA_RES_4314_WL_PMU_PU BIT(7) +#define BCMA_RES_4314_LNLDO_PU BIT(8) +#define BCMA_RES_4314_LDO3P3_PU BIT(9) +#define BCMA_RES_4314_OTP_PU BIT(10) +#define BCMA_RES_4314_XTAL_PU BIT(11) +#define BCMA_RES_4314_WL_PWRSW_PU BIT(12) +#define BCMA_RES_4314_LQ_AVAIL BIT(13) +#define BCMA_RES_4314_LOGIC_RET BIT(14) +#define BCMA_RES_4314_MEM_SLEEP BIT(15) +#define BCMA_RES_4314_MACPHY_RET BIT(16) +#define BCMA_RES_4314_WL_CORE_READY BIT(17) +#define BCMA_RES_4314_ILP_REQ BIT(18) +#define BCMA_RES_4314_ALP_AVAIL BIT(19) +#define BCMA_RES_4314_MISC_PWRSW_PU BIT(20) +#define BCMA_RES_4314_SYNTH_PWRSW_PU BIT(21) +#define BCMA_RES_4314_RX_PWRSW_PU BIT(22) +#define BCMA_RES_4314_RADIO_PU BIT(23) +#define BCMA_RES_4314_VCO_LDO_PU BIT(24) +#define BCMA_RES_4314_AFE_LDO_PU BIT(25) +#define BCMA_RES_4314_RX_LDO_PU BIT(26) +#define BCMA_RES_4314_TX_LDO_PU BIT(27) +#define BCMA_RES_4314_HT_AVAIL BIT(28) +#define BCMA_RES_4314_MACPHY_CLK_AVAIL BIT(29) + +/* Data for the PMU, if available. + * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU) + */ +struct bcma_chipcommon_pmu { + struct bcma_device *core; /* Can be separated core or just ChipCommon one */ + u8 rev; /* PMU revision */ + u32 crystalfreq; /* The active crystal frequency (in kHz) */ +}; + +#ifdef CONFIG_BCMA_PFLASH +struct bcma_pflash { + bool present; +}; +#endif + +#ifdef CONFIG_BCMA_SFLASH +struct mtd_info; + +struct bcma_sflash { + bool present; + u32 blocksize; + u16 numblocks; + u32 size; +}; +#endif + +#ifdef CONFIG_BCMA_NFLASH +struct bcma_nflash { + bool present; + bool boot; /* This is the flash the SoC boots from */ +}; +#endif + +#ifdef CONFIG_BCMA_DRIVER_MIPS +struct bcma_serial_port { + void *regs; + unsigned long clockspeed; + unsigned int irq; + unsigned int baud_base; + unsigned int reg_shift; +}; +#endif /* CONFIG_BCMA_DRIVER_MIPS */ + +struct bcma_drv_cc { + struct bcma_device *core; + u32 status; + u32 capabilities; + u32 capabilities_ext; + u8 setup_done:1; + u8 early_setup_done:1; + /* Fast Powerup Delay constant */ + u16 fast_pwrup_delay; + struct bcma_chipcommon_pmu pmu; +#ifdef CONFIG_BCMA_PFLASH + struct bcma_pflash pflash; +#endif +#ifdef CONFIG_BCMA_SFLASH + struct bcma_sflash sflash; +#endif +#ifdef CONFIG_BCMA_NFLASH + struct bcma_nflash nflash; +#endif + +#ifdef CONFIG_BCMA_DRIVER_MIPS + int nr_serial_ports; + struct bcma_serial_port serial_ports[4]; +#endif /* CONFIG_BCMA_DRIVER_MIPS */ + u32 ticks_per_ms; + struct platform_device *watchdog; + + /* Lock for GPIO register access. */ + spinlock_t gpio_lock; +#ifdef CONFIG_BCMA_DRIVER_GPIO + struct gpio_chip gpio; +#endif +}; + +struct bcma_drv_cc_b { + struct bcma_device *core; + u8 setup_done:1; + void __iomem *mii; +}; + +/* Register access */ +#define bcma_cc_read32(cc, offset) \ + bcma_read32((cc)->core, offset) +#define bcma_cc_write32(cc, offset, val) \ + bcma_write32((cc)->core, offset, val) + +#define bcma_cc_mask32(cc, offset, mask) \ + bcma_cc_write32(cc, offset, bcma_cc_read32(cc, offset) & (mask)) +#define bcma_cc_set32(cc, offset, set) \ + bcma_cc_write32(cc, offset, bcma_cc_read32(cc, offset) | (set)) +#define bcma_cc_maskset32(cc, offset, mask, set) \ + bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set)) + +/* PMU registers access */ +#define bcma_pmu_read32(cc, offset) \ + bcma_read32((cc)->pmu.core, offset) +#define bcma_pmu_write32(cc, offset, val) \ + bcma_write32((cc)->pmu.core, offset, val) + +#define bcma_pmu_mask32(cc, offset, mask) \ + bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) & (mask)) +#define bcma_pmu_set32(cc, offset, set) \ + bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) | (set)) +#define bcma_pmu_maskset32(cc, offset, mask, set) \ + bcma_pmu_write32(cc, offset, (bcma_pmu_read32(cc, offset) & (mask)) | (set)) + +extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks); + +extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc); + +void bcma_chipco_irq_mask(struct bcma_drv_cc *cc, u32 mask, u32 value); + +u32 bcma_chipco_irq_status(struct bcma_drv_cc *cc, u32 mask); + +/* Chipcommon GPIO pin access. */ +u32 bcma_chipco_gpio_in(struct bcma_drv_cc *cc, u32 mask); +u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value); + +/* PMU support */ +extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, + u32 value); +extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, + u32 mask, u32 set); +extern void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc, + u32 offset, u32 mask, u32 set); +extern void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, + u32 offset, u32 mask, u32 set); +extern void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid); + +extern u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc); + +void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value); + +#endif /* LINUX_BCMA_DRIVER_CC_H_ */ diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h new file mode 100644 index 0000000..420e222 --- /dev/null +++ b/include/linux/bcma/bcma_driver_gmac_cmn.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_DRIVER_GMAC_CMN_H_ +#define LINUX_BCMA_DRIVER_GMAC_CMN_H_ + +#include + +#define BCMA_GMAC_CMN_STAG0 0x000 +#define BCMA_GMAC_CMN_STAG1 0x004 +#define BCMA_GMAC_CMN_STAG2 0x008 +#define BCMA_GMAC_CMN_STAG3 0x00C +#define BCMA_GMAC_CMN_PARSER_CTL 0x020 +#define BCMA_GMAC_CMN_MIB_MAX_LEN 0x024 +#define BCMA_GMAC_CMN_PHY_ACCESS 0x100 +#define BCMA_GMAC_CMN_PA_DATA_MASK 0x0000ffff +#define BCMA_GMAC_CMN_PA_ADDR_MASK 0x001f0000 +#define BCMA_GMAC_CMN_PA_ADDR_SHIFT 16 +#define BCMA_GMAC_CMN_PA_REG_MASK 0x1f000000 +#define BCMA_GMAC_CMN_PA_REG_SHIFT 24 +#define BCMA_GMAC_CMN_PA_WRITE 0x20000000 +#define BCMA_GMAC_CMN_PA_START 0x40000000 +#define BCMA_GMAC_CMN_PHY_CTL 0x104 +#define BCMA_GMAC_CMN_PC_EPA_MASK 0x0000001f +#define BCMA_GMAC_CMN_PC_MCT_MASK 0x007f0000 +#define BCMA_GMAC_CMN_PC_MCT_SHIFT 16 +#define BCMA_GMAC_CMN_PC_MTE 0x00800000 +#define BCMA_GMAC_CMN_GMAC0_RGMII_CTL 0x110 +#define BCMA_GMAC_CMN_CFP_ACCESS 0x200 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA0 0x210 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA1 0x214 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA2 0x218 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA3 0x21C +#define BCMA_GMAC_CMN_CFP_TCAM_DATA4 0x220 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA5 0x224 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA6 0x228 +#define BCMA_GMAC_CMN_CFP_TCAM_DATA7 0x22C +#define BCMA_GMAC_CMN_CFP_TCAM_MASK0 0x230 +#define BCMA_GMAC_CMN_CFP_TCAM_MASK1 0x234 +#define BCMA_GMAC_CMN_CFP_TCAM_MASK2 0x238 +#define BCMA_GMAC_CMN_CFP_TCAM_MASK3 0x23C +#define BCMA_GMAC_CMN_CFP_TCAM_MASK4 0x240 +#define BCMA_GMAC_CMN_CFP_TCAM_MASK5 0x244 +#define BCMA_GMAC_CMN_CFP_TCAM_MASK6 0x248 +#define BCMA_GMAC_CMN_CFP_TCAM_MASK7 0x24C +#define BCMA_GMAC_CMN_CFP_ACTION_DATA 0x250 +#define BCMA_GMAC_CMN_TCAM_BIST_CTL 0x2A0 +#define BCMA_GMAC_CMN_TCAM_BIST_STATUS 0x2A4 +#define BCMA_GMAC_CMN_TCAM_CMP_STATUS 0x2A8 +#define BCMA_GMAC_CMN_TCAM_DISABLE 0x2AC +#define BCMA_GMAC_CMN_TCAM_TEST_CTL 0x2F0 +#define BCMA_GMAC_CMN_UDF_0_A3_A0 0x300 +#define BCMA_GMAC_CMN_UDF_0_A7_A4 0x304 +#define BCMA_GMAC_CMN_UDF_0_A8 0x308 +#define BCMA_GMAC_CMN_UDF_1_A3_A0 0x310 +#define BCMA_GMAC_CMN_UDF_1_A7_A4 0x314 +#define BCMA_GMAC_CMN_UDF_1_A8 0x318 +#define BCMA_GMAC_CMN_UDF_2_A3_A0 0x320 +#define BCMA_GMAC_CMN_UDF_2_A7_A4 0x324 +#define BCMA_GMAC_CMN_UDF_2_A8 0x328 +#define BCMA_GMAC_CMN_UDF_0_B3_B0 0x330 +#define BCMA_GMAC_CMN_UDF_0_B7_B4 0x334 +#define BCMA_GMAC_CMN_UDF_0_B8 0x338 +#define BCMA_GMAC_CMN_UDF_1_B3_B0 0x340 +#define BCMA_GMAC_CMN_UDF_1_B7_B4 0x344 +#define BCMA_GMAC_CMN_UDF_1_B8 0x348 +#define BCMA_GMAC_CMN_UDF_2_B3_B0 0x350 +#define BCMA_GMAC_CMN_UDF_2_B7_B4 0x354 +#define BCMA_GMAC_CMN_UDF_2_B8 0x358 +#define BCMA_GMAC_CMN_UDF_0_C3_C0 0x360 +#define BCMA_GMAC_CMN_UDF_0_C7_C4 0x364 +#define BCMA_GMAC_CMN_UDF_0_C8 0x368 +#define BCMA_GMAC_CMN_UDF_1_C3_C0 0x370 +#define BCMA_GMAC_CMN_UDF_1_C7_C4 0x374 +#define BCMA_GMAC_CMN_UDF_1_C8 0x378 +#define BCMA_GMAC_CMN_UDF_2_C3_C0 0x380 +#define BCMA_GMAC_CMN_UDF_2_C7_C4 0x384 +#define BCMA_GMAC_CMN_UDF_2_C8 0x388 +#define BCMA_GMAC_CMN_UDF_0_D3_D0 0x390 +#define BCMA_GMAC_CMN_UDF_0_D7_D4 0x394 +#define BCMA_GMAC_CMN_UDF_0_D11_D8 0x394 + +struct bcma_drv_gmac_cmn { + struct bcma_device *core; + + /* Drivers accessing BCMA_GMAC_CMN_PHY_ACCESS and + * BCMA_GMAC_CMN_PHY_CTL need to take that mutex first. */ + struct mutex phy_mutex; +}; + +/* Register access */ +#define gmac_cmn_read16(gc, offset) bcma_read16((gc)->core, offset) +#define gmac_cmn_read32(gc, offset) bcma_read32((gc)->core, offset) +#define gmac_cmn_write16(gc, offset, val) bcma_write16((gc)->core, offset, val) +#define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val) + +#endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */ diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h new file mode 100644 index 0000000..798013f --- /dev/null +++ b/include/linux/bcma/bcma_driver_mips.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_DRIVER_MIPS_H_ +#define LINUX_BCMA_DRIVER_MIPS_H_ + +#define BCMA_MIPS_IPSFLAG 0x0F08 +/* which sbflags get routed to mips interrupt 1 */ +#define BCMA_MIPS_IPSFLAG_IRQ1 0x0000003F +#define BCMA_MIPS_IPSFLAG_IRQ1_SHIFT 0 +/* which sbflags get routed to mips interrupt 2 */ +#define BCMA_MIPS_IPSFLAG_IRQ2 0x00003F00 +#define BCMA_MIPS_IPSFLAG_IRQ2_SHIFT 8 +/* which sbflags get routed to mips interrupt 3 */ +#define BCMA_MIPS_IPSFLAG_IRQ3 0x003F0000 +#define BCMA_MIPS_IPSFLAG_IRQ3_SHIFT 16 +/* which sbflags get routed to mips interrupt 4 */ +#define BCMA_MIPS_IPSFLAG_IRQ4 0x3F000000 +#define BCMA_MIPS_IPSFLAG_IRQ4_SHIFT 24 + +/* MIPS 74K core registers */ +#define BCMA_MIPS_MIPS74K_CORECTL 0x0000 +#define BCMA_MIPS_MIPS74K_EXCEPTBASE 0x0004 +#define BCMA_MIPS_MIPS74K_BIST 0x000C +#define BCMA_MIPS_MIPS74K_INTMASK_INT0 0x0014 +#define BCMA_MIPS_MIPS74K_INTMASK(int) \ + ((int) * 4 + BCMA_MIPS_MIPS74K_INTMASK_INT0) +#define BCMA_MIPS_MIPS74K_NMIMASK 0x002C +#define BCMA_MIPS_MIPS74K_GPIOSEL 0x0040 +#define BCMA_MIPS_MIPS74K_GPIOOUT 0x0044 +#define BCMA_MIPS_MIPS74K_GPIOEN 0x0048 +#define BCMA_MIPS_MIPS74K_CLKCTLST 0x01E0 + +#define BCMA_MIPS_OOBSELINA74 0x004 +#define BCMA_MIPS_OOBSELOUTA30 0x100 + +struct bcma_device; + +struct bcma_drv_mips { + struct bcma_device *core; + u8 setup_done:1; + u8 early_setup_done:1; +}; + +extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore); + +#endif /* LINUX_BCMA_DRIVER_MIPS_H_ */ diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h new file mode 100644 index 0000000..68da8db --- /dev/null +++ b/include/linux/bcma/bcma_driver_pci.h @@ -0,0 +1,264 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_DRIVER_PCI_H_ +#define LINUX_BCMA_DRIVER_PCI_H_ + +#include + +struct pci_dev; + +/** PCI core registers. **/ +#define BCMA_CORE_PCI_CTL 0x0000 /* PCI Control */ +#define BCMA_CORE_PCI_CTL_RST_OE 0x00000001 /* PCI_RESET Output Enable */ +#define BCMA_CORE_PCI_CTL_RST 0x00000002 /* PCI_RESET driven out to pin */ +#define BCMA_CORE_PCI_CTL_CLK_OE 0x00000004 /* Clock gate Output Enable */ +#define BCMA_CORE_PCI_CTL_CLK 0x00000008 /* Gate for clock driven out to pin */ +#define BCMA_CORE_PCI_ARBCTL 0x0010 /* PCI Arbiter Control */ +#define BCMA_CORE_PCI_ARBCTL_INTERN 0x00000001 /* Use internal arbiter */ +#define BCMA_CORE_PCI_ARBCTL_EXTERN 0x00000002 /* Use external arbiter */ +#define BCMA_CORE_PCI_ARBCTL_PARKID 0x00000006 /* Mask, selects which agent is parked on an idle bus */ +#define BCMA_CORE_PCI_ARBCTL_PARKID_LAST 0x00000000 /* Last requestor */ +#define BCMA_CORE_PCI_ARBCTL_PARKID_4710 0x00000002 /* 4710 */ +#define BCMA_CORE_PCI_ARBCTL_PARKID_EXT0 0x00000004 /* External requestor 0 */ +#define BCMA_CORE_PCI_ARBCTL_PARKID_EXT1 0x00000006 /* External requestor 1 */ +#define BCMA_CORE_PCI_ISTAT 0x0020 /* Interrupt status */ +#define BCMA_CORE_PCI_ISTAT_INTA 0x00000001 /* PCI INTA# */ +#define BCMA_CORE_PCI_ISTAT_INTB 0x00000002 /* PCI INTB# */ +#define BCMA_CORE_PCI_ISTAT_SERR 0x00000004 /* PCI SERR# (write to clear) */ +#define BCMA_CORE_PCI_ISTAT_PERR 0x00000008 /* PCI PERR# (write to clear) */ +#define BCMA_CORE_PCI_ISTAT_PME 0x00000010 /* PCI PME# */ +#define BCMA_CORE_PCI_IMASK 0x0024 /* Interrupt mask */ +#define BCMA_CORE_PCI_IMASK_INTA 0x00000001 /* PCI INTA# */ +#define BCMA_CORE_PCI_IMASK_INTB 0x00000002 /* PCI INTB# */ +#define BCMA_CORE_PCI_IMASK_SERR 0x00000004 /* PCI SERR# */ +#define BCMA_CORE_PCI_IMASK_PERR 0x00000008 /* PCI PERR# */ +#define BCMA_CORE_PCI_IMASK_PME 0x00000010 /* PCI PME# */ +#define BCMA_CORE_PCI_MBOX 0x0028 /* Backplane to PCI Mailbox */ +#define BCMA_CORE_PCI_MBOX_F0_0 0x00000100 /* PCI function 0, INT 0 */ +#define BCMA_CORE_PCI_MBOX_F0_1 0x00000200 /* PCI function 0, INT 1 */ +#define BCMA_CORE_PCI_MBOX_F1_0 0x00000400 /* PCI function 1, INT 0 */ +#define BCMA_CORE_PCI_MBOX_F1_1 0x00000800 /* PCI function 1, INT 1 */ +#define BCMA_CORE_PCI_MBOX_F2_0 0x00001000 /* PCI function 2, INT 0 */ +#define BCMA_CORE_PCI_MBOX_F2_1 0x00002000 /* PCI function 2, INT 1 */ +#define BCMA_CORE_PCI_MBOX_F3_0 0x00004000 /* PCI function 3, INT 0 */ +#define BCMA_CORE_PCI_MBOX_F3_1 0x00008000 /* PCI function 3, INT 1 */ +#define BCMA_CORE_PCI_BCAST_ADDR 0x0050 /* Backplane Broadcast Address */ +#define BCMA_CORE_PCI_BCAST_ADDR_MASK 0x000000FF +#define BCMA_CORE_PCI_BCAST_DATA 0x0054 /* Backplane Broadcast Data */ +#define BCMA_CORE_PCI_GPIO_IN 0x0060 /* rev >= 2 only */ +#define BCMA_CORE_PCI_GPIO_OUT 0x0064 /* rev >= 2 only */ +#define BCMA_CORE_PCI_GPIO_ENABLE 0x0068 /* rev >= 2 only */ +#define BCMA_CORE_PCI_GPIO_CTL 0x006C /* rev >= 2 only */ +#define BCMA_CORE_PCI_SBTOPCI0 0x0100 /* Backplane to PCI translation 0 (sbtopci0) */ +#define BCMA_CORE_PCI_SBTOPCI0_MASK 0xFC000000 +#define BCMA_CORE_PCI_SBTOPCI1 0x0104 /* Backplane to PCI translation 1 (sbtopci1) */ +#define BCMA_CORE_PCI_SBTOPCI1_MASK 0xFC000000 +#define BCMA_CORE_PCI_SBTOPCI2 0x0108 /* Backplane to PCI translation 2 (sbtopci2) */ +#define BCMA_CORE_PCI_SBTOPCI2_MASK 0xC0000000 +#define BCMA_CORE_PCI_CONFIG_ADDR 0x0120 /* pcie config space access */ +#define BCMA_CORE_PCI_CONFIG_DATA 0x0124 /* pcie config space access */ +#define BCMA_CORE_PCI_MDIO_CONTROL 0x0128 /* controls the mdio access */ +#define BCMA_CORE_PCI_MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */ +#define BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL 0x2 +#define BCMA_CORE_PCI_MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */ +#define BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE 0x100 /* Tranaction complete */ +#define BCMA_CORE_PCI_MDIO_DATA 0x012c /* Data to the mdio access */ +#define BCMA_CORE_PCI_MDIODATA_MASK 0x0000ffff /* data 2 bytes */ +#define BCMA_CORE_PCI_MDIODATA_TA 0x00020000 /* Turnaround */ +#define BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */ +#define BCMA_CORE_PCI_MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */ +#define BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */ +#define BCMA_CORE_PCI_MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */ +#define BCMA_CORE_PCI_MDIODATA_REGADDR_SHF 18 /* Regaddr shift */ +#define BCMA_CORE_PCI_MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */ +#define BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */ +#define BCMA_CORE_PCI_MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */ +#define BCMA_CORE_PCI_MDIODATA_WRITE 0x10000000 /* write Transaction */ +#define BCMA_CORE_PCI_MDIODATA_READ 0x20000000 /* Read Transaction */ +#define BCMA_CORE_PCI_MDIODATA_START 0x40000000 /* start of Transaction */ +#define BCMA_CORE_PCI_MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */ +#define BCMA_CORE_PCI_MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */ +#define BCMA_CORE_PCI_MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */ +#define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */ +#define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */ +#define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */ +#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal register */ +#define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */ +#define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */ +#define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */ +#define BCMA_CORE_PCI_PCICFG2 0x0600 /* PCI config space 2 (rev >= 8) */ +#define BCMA_CORE_PCI_PCICFG3 0x0700 /* PCI config space 3 (rev >= 8) */ +#define BCMA_CORE_PCI_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) /* SPROM shadow area (72 bytes) */ +#define BCMA_CORE_PCI_SPROM_PI_OFFSET 0 /* first word */ +#define BCMA_CORE_PCI_SPROM_PI_MASK 0xf000 /* bit 15:12 */ +#define BCMA_CORE_PCI_SPROM_PI_SHIFT 12 /* bit 15:12 */ +#define BCMA_CORE_PCI_SPROM_MISC_CONFIG 5 /* word 5 */ +#define BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */ +#define BCMA_CORE_PCI_SPROM_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */ +#define BCMA_CORE_PCI_SPROM_CLKREQ_ENB 0x0800 /* bit 11 */ + +/* SBtoPCIx */ +#define BCMA_CORE_PCI_SBTOPCI_MEM 0x00000000 +#define BCMA_CORE_PCI_SBTOPCI_IO 0x00000001 +#define BCMA_CORE_PCI_SBTOPCI_CFG0 0x00000002 +#define BCMA_CORE_PCI_SBTOPCI_CFG1 0x00000003 +#define BCMA_CORE_PCI_SBTOPCI_PREF 0x00000004 /* Prefetch enable */ +#define BCMA_CORE_PCI_SBTOPCI_BURST 0x00000008 /* Burst enable */ +#define BCMA_CORE_PCI_SBTOPCI_MRM 0x00000020 /* Memory Read Multiple */ +#define BCMA_CORE_PCI_SBTOPCI_RC 0x00000030 /* Read Command mask (rev >= 11) */ +#define BCMA_CORE_PCI_SBTOPCI_RC_READ 0x00000000 /* Memory read */ +#define BCMA_CORE_PCI_SBTOPCI_RC_READL 0x00000010 /* Memory read line */ +#define BCMA_CORE_PCI_SBTOPCI_RC_READM 0x00000020 /* Memory read multiple */ + +/* PCIE protocol PHY diagnostic registers */ +#define BCMA_CORE_PCI_PLP_MODEREG 0x200 /* Mode */ +#define BCMA_CORE_PCI_PLP_STATUSREG 0x204 /* Status */ +#define BCMA_CORE_PCI_PLP_POLARITYINV_STAT 0x10 /* Status reg PCIE_PLP_STATUSREG */ +#define BCMA_CORE_PCI_PLP_LTSSMCTRLREG 0x208 /* LTSSM control */ +#define BCMA_CORE_PCI_PLP_LTLINKNUMREG 0x20c /* Link Training Link number */ +#define BCMA_CORE_PCI_PLP_LTLANENUMREG 0x210 /* Link Training Lane number */ +#define BCMA_CORE_PCI_PLP_LTNFTSREG 0x214 /* Link Training N_FTS */ +#define BCMA_CORE_PCI_PLP_ATTNREG 0x218 /* Attention */ +#define BCMA_CORE_PCI_PLP_ATTNMASKREG 0x21C /* Attention Mask */ +#define BCMA_CORE_PCI_PLP_RXERRCTR 0x220 /* Rx Error */ +#define BCMA_CORE_PCI_PLP_RXFRMERRCTR 0x224 /* Rx Framing Error */ +#define BCMA_CORE_PCI_PLP_RXERRTHRESHREG 0x228 /* Rx Error threshold */ +#define BCMA_CORE_PCI_PLP_TESTCTRLREG 0x22C /* Test Control reg */ +#define BCMA_CORE_PCI_PLP_SERDESCTRLOVRDREG 0x230 /* SERDES Control Override */ +#define BCMA_CORE_PCI_PLP_TIMINGOVRDREG 0x234 /* Timing param override */ +#define BCMA_CORE_PCI_PLP_RXTXSMDIAGREG 0x238 /* RXTX State Machine Diag */ +#define BCMA_CORE_PCI_PLP_LTSSMDIAGREG 0x23C /* LTSSM State Machine Diag */ + +/* PCIE protocol DLLP diagnostic registers */ +#define BCMA_CORE_PCI_DLLP_LCREG 0x100 /* Link Control */ +#define BCMA_CORE_PCI_DLLP_LSREG 0x104 /* Link Status */ +#define BCMA_CORE_PCI_DLLP_LAREG 0x108 /* Link Attention */ +#define BCMA_CORE_PCI_DLLP_LSREG_LINKUP (1 << 16) +#define BCMA_CORE_PCI_DLLP_LAMASKREG 0x10C /* Link Attention Mask */ +#define BCMA_CORE_PCI_DLLP_NEXTTXSEQNUMREG 0x110 /* Next Tx Seq Num */ +#define BCMA_CORE_PCI_DLLP_ACKEDTXSEQNUMREG 0x114 /* Acked Tx Seq Num */ +#define BCMA_CORE_PCI_DLLP_PURGEDTXSEQNUMREG 0x118 /* Purged Tx Seq Num */ +#define BCMA_CORE_PCI_DLLP_RXSEQNUMREG 0x11C /* Rx Sequence Number */ +#define BCMA_CORE_PCI_DLLP_LRREG 0x120 /* Link Replay */ +#define BCMA_CORE_PCI_DLLP_LACKTOREG 0x124 /* Link Ack Timeout */ +#define BCMA_CORE_PCI_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */ +#define BCMA_CORE_PCI_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */ +#define BCMA_CORE_PCI_DLLP_RTRYWPREG 0x12C /* Retry buffer write ptr */ +#define BCMA_CORE_PCI_DLLP_RTRYRPREG 0x130 /* Retry buffer Read ptr */ +#define BCMA_CORE_PCI_DLLP_RTRYPPREG 0x134 /* Retry buffer Purged ptr */ +#define BCMA_CORE_PCI_DLLP_RTRRWREG 0x138 /* Retry buffer Read/Write */ +#define BCMA_CORE_PCI_DLLP_ECTHRESHREG 0x13C /* Error Count Threshold */ +#define BCMA_CORE_PCI_DLLP_TLPERRCTRREG 0x140 /* TLP Error Counter */ +#define BCMA_CORE_PCI_DLLP_ERRCTRREG 0x144 /* Error Counter */ +#define BCMA_CORE_PCI_DLLP_NAKRXCTRREG 0x148 /* NAK Received Counter */ +#define BCMA_CORE_PCI_DLLP_TESTREG 0x14C /* Test */ +#define BCMA_CORE_PCI_DLLP_PKTBIST 0x150 /* Packet BIST */ +#define BCMA_CORE_PCI_DLLP_PCIE11 0x154 /* DLLP PCIE 1.1 reg */ + +/* SERDES RX registers */ +#define BCMA_CORE_PCI_SERDES_RX_CTRL 1 /* Rx cntrl */ +#define BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */ +#define BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */ +#define BCMA_CORE_PCI_SERDES_RX_TIMER1 2 /* Rx Timer1 */ +#define BCMA_CORE_PCI_SERDES_RX_CDR 6 /* CDR */ +#define BCMA_CORE_PCI_SERDES_RX_CDRBW 7 /* CDR BW */ + +/* SERDES PLL registers */ +#define BCMA_CORE_PCI_SERDES_PLL_CTRL 1 /* PLL control reg */ +#define BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */ + +/* PCIcore specific boardflags */ +#define BCMA_CORE_PCI_BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ + +/* PCIE Config space accessing MACROS */ +#define BCMA_CORE_PCI_CFG_BUS_SHIFT 24 /* Bus shift */ +#define BCMA_CORE_PCI_CFG_SLOT_SHIFT 19 /* Slot/Device shift */ +#define BCMA_CORE_PCI_CFG_FUN_SHIFT 16 /* Function shift */ +#define BCMA_CORE_PCI_CFG_OFF_SHIFT 0 /* Register shift */ + +#define BCMA_CORE_PCI_CFG_BUS_MASK 0xff /* Bus mask */ +#define BCMA_CORE_PCI_CFG_SLOT_MASK 0x1f /* Slot/Device mask */ +#define BCMA_CORE_PCI_CFG_FUN_MASK 7 /* Function mask */ +#define BCMA_CORE_PCI_CFG_OFF_MASK 0xfff /* Register mask */ + +#define BCMA_CORE_PCI_CFG_DEVCTRL 0xd8 + +#define BCMA_CORE_PCI_ + +/* MDIO devices (SERDES modules) */ +#define BCMA_CORE_PCI_MDIO_IEEE0 0x000 +#define BCMA_CORE_PCI_MDIO_IEEE1 0x001 +#define BCMA_CORE_PCI_MDIO_BLK0 0x800 +#define BCMA_CORE_PCI_MDIO_BLK1 0x801 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT0 0x16 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT1 0x17 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT2 0x18 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT3 0x19 +#define BCMA_CORE_PCI_MDIO_BLK1_MGMT4 0x1A +#define BCMA_CORE_PCI_MDIO_BLK2 0x802 +#define BCMA_CORE_PCI_MDIO_BLK3 0x803 +#define BCMA_CORE_PCI_MDIO_BLK4 0x804 +#define BCMA_CORE_PCI_MDIO_TXPLL 0x808 /* TXPLL register block idx */ +#define BCMA_CORE_PCI_MDIO_TXCTRL0 0x820 +#define BCMA_CORE_PCI_MDIO_SERDESID 0x831 +#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840 + +/* PCIE Root Capability Register bits (Host mode only) */ +#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001 + +struct bcma_drv_pci; +struct bcma_bus; + +#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE +struct bcma_drv_pci_host { + struct bcma_drv_pci *pdev; + + u32 host_cfg_addr; + spinlock_t cfgspace_lock; + + struct pci_controller pci_controller; + struct pci_ops pci_ops; + struct resource mem_resource; + struct resource io_resource; +}; +#endif + +struct bcma_drv_pci { + struct bcma_device *core; + u8 early_setup_done:1; + u8 setup_done:1; + u8 hostmode:1; + +#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE + struct bcma_drv_pci_host *host_controller; +#endif +}; + +/* Register access */ +#define pcicore_read16(pc, offset) bcma_read16((pc)->core, offset) +#define pcicore_read32(pc, offset) bcma_read32((pc)->core, offset) +#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val) +#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) + +#ifdef CONFIG_BCMA_DRIVER_PCI +extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up); +#else +static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up) +{ +} +#endif + +#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE +extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); +extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); +#else +static inline int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev) +{ + return -ENOTSUPP; +} +static inline int bcma_core_pci_plat_dev_init(struct pci_dev *dev) +{ + return -ENOTSUPP; +} +#endif + +#endif /* LINUX_BCMA_DRIVER_PCI_H_ */ diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h new file mode 100644 index 0000000..91ce515 --- /dev/null +++ b/include/linux/bcma/bcma_driver_pcie2.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_DRIVER_PCIE2_H_ +#define LINUX_BCMA_DRIVER_PCIE2_H_ + +#define BCMA_CORE_PCIE2_CLK_CONTROL 0x0000 +#define PCIE2_CLKC_RST_OE 0x0001 /* When set, drives PCI_RESET out to pin */ +#define PCIE2_CLKC_RST 0x0002 /* Value driven out to pin */ +#define PCIE2_CLKC_SPERST 0x0004 /* SurvivePeRst */ +#define PCIE2_CLKC_DISABLE_L1CLK_GATING 0x0010 +#define PCIE2_CLKC_DLYPERST 0x0100 /* Delay PeRst to CoE Core */ +#define PCIE2_CLKC_DISSPROMLD 0x0200 /* DisableSpromLoadOnPerst */ +#define PCIE2_CLKC_WAKE_MODE_L2 0x1000 /* Wake on L2 */ +#define BCMA_CORE_PCIE2_RC_PM_CONTROL 0x0004 +#define BCMA_CORE_PCIE2_RC_PM_STATUS 0x0008 +#define BCMA_CORE_PCIE2_EP_PM_CONTROL 0x000C +#define BCMA_CORE_PCIE2_EP_PM_STATUS 0x0010 +#define BCMA_CORE_PCIE2_EP_LTR_CONTROL 0x0014 +#define BCMA_CORE_PCIE2_EP_LTR_STATUS 0x0018 +#define BCMA_CORE_PCIE2_EP_OBFF_STATUS 0x001C +#define BCMA_CORE_PCIE2_PCIE_ERR_STATUS 0x0020 +#define BCMA_CORE_PCIE2_RC_AXI_CONFIG 0x0100 +#define BCMA_CORE_PCIE2_EP_AXI_CONFIG 0x0104 +#define BCMA_CORE_PCIE2_RXDEBUG_STATUS0 0x0108 +#define BCMA_CORE_PCIE2_RXDEBUG_CONTROL0 0x010C +#define BCMA_CORE_PCIE2_CONFIGINDADDR 0x0120 +#define BCMA_CORE_PCIE2_CONFIGINDDATA 0x0124 +#define BCMA_CORE_PCIE2_MDIOCONTROL 0x0128 +#define BCMA_CORE_PCIE2_MDIOWRDATA 0x012C +#define BCMA_CORE_PCIE2_MDIORDDATA 0x0130 +#define BCMA_CORE_PCIE2_DATAINTF 0x0180 +#define BCMA_CORE_PCIE2_D2H_INTRLAZY_0 0x0188 +#define BCMA_CORE_PCIE2_H2D_INTRLAZY_0 0x018c +#define BCMA_CORE_PCIE2_H2D_INTSTAT_0 0x0190 +#define BCMA_CORE_PCIE2_H2D_INTMASK_0 0x0194 +#define BCMA_CORE_PCIE2_D2H_INTSTAT_0 0x0198 +#define BCMA_CORE_PCIE2_D2H_INTMASK_0 0x019c +#define BCMA_CORE_PCIE2_LTR_STATE 0x01A0 /* Latency Tolerance Reporting */ +#define PCIE2_LTR_ACTIVE 2 +#define PCIE2_LTR_ACTIVE_IDLE 1 +#define PCIE2_LTR_SLEEP 0 +#define PCIE2_LTR_FINAL_MASK 0x300 +#define PCIE2_LTR_FINAL_SHIFT 8 +#define BCMA_CORE_PCIE2_PWR_INT_STATUS 0x01A4 +#define BCMA_CORE_PCIE2_PWR_INT_MASK 0x01A8 +#define BCMA_CORE_PCIE2_CFG_ADDR 0x01F8 +#define BCMA_CORE_PCIE2_CFG_DATA 0x01FC +#define BCMA_CORE_PCIE2_SYS_EQ_PAGE 0x0200 +#define BCMA_CORE_PCIE2_SYS_MSI_PAGE 0x0204 +#define BCMA_CORE_PCIE2_SYS_MSI_INTREN 0x0208 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL0 0x0210 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL1 0x0214 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL2 0x0218 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL3 0x021C +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL4 0x0220 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL5 0x0224 +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD0 0x0250 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL0 0x0254 +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD1 0x0258 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL1 0x025C +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD2 0x0260 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL2 0x0264 +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD3 0x0268 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL3 0x026C +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD4 0x0270 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL4 0x0274 +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD5 0x0278 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL5 0x027C +#define BCMA_CORE_PCIE2_SYS_RC_INTX_EN 0x0330 +#define BCMA_CORE_PCIE2_SYS_RC_INTX_CSR 0x0334 +#define BCMA_CORE_PCIE2_SYS_MSI_REQ 0x0340 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR_EN 0x0344 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR_CSR 0x0348 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR0 0x0350 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR1 0x0354 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR2 0x0358 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR3 0x035C +#define BCMA_CORE_PCIE2_SYS_EP_INT_EN0 0x0360 +#define BCMA_CORE_PCIE2_SYS_EP_INT_EN1 0x0364 +#define BCMA_CORE_PCIE2_SYS_EP_INT_CSR0 0x0370 +#define BCMA_CORE_PCIE2_SYS_EP_INT_CSR1 0x0374 +#define BCMA_CORE_PCIE2_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_0 0x0C00 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_1 0x0C04 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_2 0x0C08 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_3 0x0C0C +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_4 0x0C10 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_5 0x0C14 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_6 0x0C18 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_7 0x0C1C +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_0 0x0C20 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_1 0x0C24 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_2 0x0C28 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_3 0x0C2C +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_4 0x0C30 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_5 0x0C34 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_6 0x0C38 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_7 0x0C3C +#define BCMA_CORE_PCIE2_FUNC0_IMAP1 0x0C80 +#define BCMA_CORE_PCIE2_FUNC1_IMAP1 0x0C88 +#define BCMA_CORE_PCIE2_FUNC0_IMAP2 0x0CC0 +#define BCMA_CORE_PCIE2_FUNC1_IMAP2 0x0CC8 +#define BCMA_CORE_PCIE2_IARR0_LOWER 0x0D00 +#define BCMA_CORE_PCIE2_IARR0_UPPER 0x0D04 +#define BCMA_CORE_PCIE2_IARR1_LOWER 0x0D08 +#define BCMA_CORE_PCIE2_IARR1_UPPER 0x0D0C +#define BCMA_CORE_PCIE2_IARR2_LOWER 0x0D10 +#define BCMA_CORE_PCIE2_IARR2_UPPER 0x0D14 +#define BCMA_CORE_PCIE2_OARR0 0x0D20 +#define BCMA_CORE_PCIE2_OARR1 0x0D28 +#define BCMA_CORE_PCIE2_OARR2 0x0D30 +#define BCMA_CORE_PCIE2_OMAP0_LOWER 0x0D40 +#define BCMA_CORE_PCIE2_OMAP0_UPPER 0x0D44 +#define BCMA_CORE_PCIE2_OMAP1_LOWER 0x0D48 +#define BCMA_CORE_PCIE2_OMAP1_UPPER 0x0D4C +#define BCMA_CORE_PCIE2_OMAP2_LOWER 0x0D50 +#define BCMA_CORE_PCIE2_OMAP2_UPPER 0x0D54 +#define BCMA_CORE_PCIE2_FUNC1_IARR1_SIZE 0x0D58 +#define BCMA_CORE_PCIE2_FUNC1_IARR2_SIZE 0x0D5C +#define BCMA_CORE_PCIE2_MEM_CONTROL 0x0F00 +#define BCMA_CORE_PCIE2_MEM_ECC_ERRLOG0 0x0F04 +#define BCMA_CORE_PCIE2_MEM_ECC_ERRLOG1 0x0F08 +#define BCMA_CORE_PCIE2_LINK_STATUS 0x0F0C +#define BCMA_CORE_PCIE2_STRAP_STATUS 0x0F10 +#define BCMA_CORE_PCIE2_RESET_STATUS 0x0F14 +#define BCMA_CORE_PCIE2_RESETEN_IN_LINKDOWN 0x0F18 +#define BCMA_CORE_PCIE2_MISC_INTR_EN 0x0F1C +#define BCMA_CORE_PCIE2_TX_DEBUG_CFG 0x0F20 +#define BCMA_CORE_PCIE2_MISC_CONFIG 0x0F24 +#define BCMA_CORE_PCIE2_MISC_STATUS 0x0F28 +#define BCMA_CORE_PCIE2_INTR_EN 0x0F30 +#define BCMA_CORE_PCIE2_INTR_CLEAR 0x0F34 +#define BCMA_CORE_PCIE2_INTR_STATUS 0x0F38 + +/* PCIE gen2 config regs */ +#define PCIE2_INTSTATUS 0x090 +#define PCIE2_INTMASK 0x094 +#define PCIE2_SBMBX 0x098 + +#define PCIE2_PMCR_REFUP 0x1814 /* Trefup time */ + +#define PCIE2_CAP_DEVSTSCTRL2_OFFSET 0xD4 +#define PCIE2_CAP_DEVSTSCTRL2_LTRENAB 0x400 +#define PCIE2_PVT_REG_PM_CLK_PERIOD 0x184c + +struct bcma_drv_pcie2 { + struct bcma_device *core; + + u16 reqsize; +}; + +#define pcie2_read16(pcie2, offset) bcma_read16((pcie2)->core, offset) +#define pcie2_read32(pcie2, offset) bcma_read32((pcie2)->core, offset) +#define pcie2_write16(pcie2, offset, val) bcma_write16((pcie2)->core, offset, val) +#define pcie2_write32(pcie2, offset, val) bcma_write32((pcie2)->core, offset, val) + +#define pcie2_set32(pcie2, offset, set) bcma_set32((pcie2)->core, offset, set) +#define pcie2_mask32(pcie2, offset, mask) bcma_mask32((pcie2)->core, offset, mask) + +#endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */ diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h new file mode 100644 index 0000000..944105c --- /dev/null +++ b/include/linux/bcma/bcma_regs.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_REGS_H_ +#define LINUX_BCMA_REGS_H_ + +/* Some single registers are shared between many cores */ +/* BCMA_CLKCTLST: ChipCommon (rev >= 20), PCIe, 80211 */ +#define BCMA_CLKCTLST 0x01E0 /* Clock control and status */ +#define BCMA_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */ +#define BCMA_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */ +#define BCMA_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */ +#define BCMA_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */ +#define BCMA_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */ +#define BCMA_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */ +#define BCMA_CLKCTLST_HQCLKREQ 0x00000040 /* HQ Clock */ +#define BCMA_CLKCTLST_EXTRESREQ 0x00000700 /* Mask of external resource requests */ +#define BCMA_CLKCTLST_EXTRESREQ_SHIFT 8 +#define BCMA_CLKCTLST_HAVEALP 0x00010000 /* ALP available */ +#define BCMA_CLKCTLST_HAVEHT 0x00020000 /* HT available */ +#define BCMA_CLKCTLST_BP_ON_ALP 0x00040000 /* RO: running on ALP clock */ +#define BCMA_CLKCTLST_BP_ON_HT 0x00080000 /* RO: running on HT clock */ +#define BCMA_CLKCTLST_EXTRESST 0x07000000 /* Mask of external resource status */ +#define BCMA_CLKCTLST_EXTRESST_SHIFT 24 +/* Is there any BCM4328 on BCMA bus? */ +#define BCMA_CLKCTLST_4328A0_HAVEHT 0x00010000 /* 4328a0 has reversed bits */ +#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */ + +/* Agent registers (common for every core) */ +#define BCMA_OOB_SEL_OUT_A30 0x0100 +#define BCMA_IOCTL 0x0408 /* IO control */ +#define BCMA_IOCTL_CLK 0x0001 +#define BCMA_IOCTL_FGC 0x0002 +#define BCMA_IOCTL_CORE_BITS 0x3FFC +#define BCMA_IOCTL_PME_EN 0x4000 +#define BCMA_IOCTL_BIST_EN 0x8000 +#define BCMA_IOST 0x0500 /* IO status */ +#define BCMA_IOST_CORE_BITS 0x0FFF +#define BCMA_IOST_DMA64 0x1000 +#define BCMA_IOST_GATED_CLK 0x2000 +#define BCMA_IOST_BIST_ERROR 0x4000 +#define BCMA_IOST_BIST_DONE 0x8000 +#define BCMA_RESET_CTL 0x0800 +#define BCMA_RESET_CTL_RESET 0x0001 +#define BCMA_RESET_ST 0x0804 + +#define BCMA_NS_ROM_IOST_BOOT_DEV_MASK 0x0003 +#define BCMA_NS_ROM_IOST_BOOT_DEV_NOR 0x0000 +#define BCMA_NS_ROM_IOST_BOOT_DEV_NAND 0x0001 +#define BCMA_NS_ROM_IOST_BOOT_DEV_ROM 0x0002 + +/* BCMA PCI config space registers. */ +#define BCMA_PCI_PMCSR 0x44 +#define BCMA_PCI_PE 0x100 +#define BCMA_PCI_BAR0_WIN 0x80 /* Backplane address space 0 */ +#define BCMA_PCI_BAR1_WIN 0x84 /* Backplane address space 1 */ +#define BCMA_PCI_SPROMCTL 0x88 /* SPROM control */ +#define BCMA_PCI_SPROMCTL_WE 0x10 /* SPROM write enable */ +#define BCMA_PCI_BAR1_CONTROL 0x8c /* Address space 1 burst control */ +#define BCMA_PCI_IRQS 0x90 /* PCI interrupts */ +#define BCMA_PCI_IRQMASK 0x94 /* PCI IRQ control and mask (pcirev >= 6 only) */ +#define BCMA_PCI_BACKPLANE_IRQS 0x98 /* Backplane Interrupts */ +#define BCMA_PCI_BAR0_WIN2 0xAC +#define BCMA_PCI_GPIO_IN 0xB0 /* GPIO Input (pcirev >= 3 only) */ +#define BCMA_PCI_GPIO_OUT 0xB4 /* GPIO Output (pcirev >= 3 only) */ +#define BCMA_PCI_GPIO_OUT_ENABLE 0xB8 /* GPIO Output Enable/Disable (pcirev >= 3 only) */ +#define BCMA_PCI_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ +#define BCMA_PCI_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ +#define BCMA_PCI_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */ +#define BCMA_PCI_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */ + +#define BCMA_PCIE2_BAR0_WIN2 0x70 + +/* SiliconBackplane Address Map. + * All regions may not exist on all chips. + */ +#define BCMA_SOC_SDRAM_BASE 0x00000000U /* Physical SDRAM */ +#define BCMA_SOC_PCI_MEM 0x08000000U /* Host Mode sb2pcitranslation0 (64 MB) */ +#define BCMA_SOC_PCI_MEM_SZ (64 * 1024 * 1024) +#define BCMA_SOC_PCI_CFG 0x0c000000U /* Host Mode sb2pcitranslation1 (64 MB) */ +#define BCMA_SOC_SDRAM_SWAPPED 0x10000000U /* Byteswapped Physical SDRAM */ +#define BCMA_SOC_SDRAM_R2 0x80000000U /* Region 2 for sdram (512 MB) */ + + +#define BCMA_SOC_PCI_DMA 0x40000000U /* Client Mode sb2pcitranslation2 (1 GB) */ +#define BCMA_SOC_PCI_DMA2 0x80000000U /* Client Mode sb2pcitranslation2 (1 GB) */ +#define BCMA_SOC_PCI_DMA_SZ 0x40000000U /* Client Mode sb2pcitranslation2 size in bytes */ +#define BCMA_SOC_PCIE_DMA_L32 0x00000000U /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), low 32 bits + */ +#define BCMA_SOC_PCIE_DMA_H32 0x80000000U /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ + +#define BCMA_SOC_PCI1_MEM 0x40000000U /* Host Mode sb2pcitranslation0 (64 MB) */ +#define BCMA_SOC_PCI1_CFG 0x44000000U /* Host Mode sb2pcitranslation1 (64 MB) */ +#define BCMA_SOC_PCIE1_DMA_H32 0xc0000000U /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ + +#define BCMA_SOC_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */ +#define BCMA_SOC_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */ +#define BCMA_SOC_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */ +#define BCMA_SOC_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */ + +#endif /* LINUX_BCMA_REGS_H_ */ diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h new file mode 100644 index 0000000..f3c4351 --- /dev/null +++ b/include/linux/bcma/bcma_soc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_BCMA_SOC_H_ +#define LINUX_BCMA_SOC_H_ + +#include + +struct bcma_soc { + struct bcma_bus bus; + struct device *dev; +}; + +int __init bcma_host_soc_register(struct bcma_soc *soc); +int __init bcma_host_soc_init(struct bcma_soc *soc); + +int bcma_bus_register(struct bcma_bus *bus); + +#endif /* LINUX_BCMA_SOC_H_ */ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h new file mode 100644 index 0000000..b40fc63 --- /dev/null +++ b/include/linux/binfmts.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BINFMTS_H +#define _LINUX_BINFMTS_H + +#include +#include +#include +#include + +struct filename; + +#define CORENAME_MAX_SIZE 128 + +/* + * This structure is used to hold the arguments that are used when loading binaries. + */ +struct linux_binprm { +#ifdef CONFIG_MMU + struct vm_area_struct *vma; + unsigned long vma_pages; +#else +# define MAX_ARG_PAGES 32 + struct page *page[MAX_ARG_PAGES]; +#endif + struct mm_struct *mm; + unsigned long p; /* current top of mem */ + unsigned long argmin; /* rlimit marker for copy_strings() */ + unsigned int + /* + * True after the bprm_set_creds hook has been called once + * (multiple calls can be made via prepare_binprm() for + * binfmt_script/misc). + */ + called_set_creds:1, + /* + * True if most recent call to the commoncaps bprm_set_creds + * hook (due to multiple prepare_binprm() calls from the + * binfmt_script/misc handlers) resulted in elevated + * privileges. + */ + cap_elevated:1, + /* + * Set by bprm_set_creds hook to indicate a privilege-gaining + * exec has happened. Used to sanitize execution environment + * and to set AT_SECURE auxv for glibc. + */ + secureexec:1; +#ifdef __alpha__ + unsigned int taso:1; +#endif + unsigned int recursion_depth; /* only for search_binary_handler() */ + struct file * file; + struct cred *cred; /* new credentials */ + int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ + unsigned int per_clear; /* bits to clear in current->personality */ + int argc, envc; + const char * filename; /* Name of binary as seen by procps */ + const char * interp; /* Name of the binary really executed. Most + of the time same as filename, but could be + different for binfmt_{misc,script} */ + unsigned interp_flags; + unsigned interp_data; + unsigned long loader, exec; + + struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */ + + char buf[BINPRM_BUF_SIZE]; +} __randomize_layout; + +#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 +#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT) + +/* fd of the binary should be passed to the interpreter */ +#define BINPRM_FLAGS_EXECFD_BIT 1 +#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) + +/* filename of the binary will be inaccessible after exec */ +#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2 +#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT) + +/* Function parameter for binfmt->coredump */ +struct coredump_params { + const kernel_siginfo_t *siginfo; + struct pt_regs *regs; + struct file *file; + unsigned long limit; + unsigned long mm_flags; + loff_t written; + loff_t pos; +}; + +/* + * This structure defines the functions that are used to load the binary formats that + * linux accepts. + */ +struct linux_binfmt { + struct list_head lh; + struct module *module; + int (*load_binary)(struct linux_binprm *); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *cprm); + unsigned long min_coredump; /* minimal dump size */ +} __randomize_layout; + +extern void __register_binfmt(struct linux_binfmt *fmt, int insert); + +/* Registration of default binfmt handlers */ +static inline void register_binfmt(struct linux_binfmt *fmt) +{ + __register_binfmt(fmt, 0); +} +/* Same as above, but adds a new binfmt at the top of the list */ +static inline void insert_binfmt(struct linux_binfmt *fmt) +{ + __register_binfmt(fmt, 1); +} + +extern void unregister_binfmt(struct linux_binfmt *); + +extern int prepare_binprm(struct linux_binprm *); +extern int __must_check remove_arg_zero(struct linux_binprm *); +extern int search_binary_handler(struct linux_binprm *); +extern int flush_old_exec(struct linux_binprm * bprm); +extern void setup_new_exec(struct linux_binprm * bprm); +extern void finalize_exec(struct linux_binprm *bprm); +extern void would_dump(struct linux_binprm *, struct file *); + +extern int suid_dumpable; + +/* Stack area protections */ +#define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */ +#define EXSTACK_DISABLE_X 1 /* Disable executable stacks */ +#define EXSTACK_ENABLE_X 2 /* Enable executable stacks */ + +extern int setup_arg_pages(struct linux_binprm * bprm, + unsigned long stack_top, + int executable_stack); +extern int transfer_args_to_stack(struct linux_binprm *bprm, + unsigned long *sp_location); +extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm); +extern int copy_strings_kernel(int argc, const char *const *argv, + struct linux_binprm *bprm); +extern void install_exec_creds(struct linux_binprm *bprm); +extern void set_binfmt(struct linux_binfmt *new); +extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); + +extern int do_execve(struct filename *, + const char __user * const __user *, + const char __user * const __user *); +extern int do_execveat(int, struct filename *, + const char __user * const __user *, + const char __user * const __user *, + int); +int do_execve_file(struct file *file, void *__argv, void *__envp); + +#endif /* _LINUX_BINFMTS_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h new file mode 100644 index 0000000..853d92c --- /dev/null +++ b/include/linux/bio.h @@ -0,0 +1,838 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2001 Jens Axboe + */ +#ifndef __LINUX_BIO_H +#define __LINUX_BIO_H + +#include +#include +#include + +#ifdef CONFIG_BLOCK +/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ +#include + +#define BIO_DEBUG + +#ifdef BIO_DEBUG +#define BIO_BUG_ON BUG_ON +#else +#define BIO_BUG_ON +#endif + +#define BIO_MAX_PAGES 256 + +#define bio_prio(bio) (bio)->bi_ioprio +#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) + +#define bio_iter_iovec(bio, iter) \ + bvec_iter_bvec((bio)->bi_io_vec, (iter)) + +#define bio_iter_page(bio, iter) \ + bvec_iter_page((bio)->bi_io_vec, (iter)) +#define bio_iter_len(bio, iter) \ + bvec_iter_len((bio)->bi_io_vec, (iter)) +#define bio_iter_offset(bio, iter) \ + bvec_iter_offset((bio)->bi_io_vec, (iter)) + +#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) +#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) +#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) + +#define bio_multiple_segments(bio) \ + ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) + +#define bvec_iter_sectors(iter) ((iter).bi_size >> 9) +#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) + +#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) +#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) + +/* + * Return the data direction, READ or WRITE. + */ +#define bio_data_dir(bio) \ + (op_is_write(bio_op(bio)) ? WRITE : READ) + +/* + * Check whether this bio carries any data or not. A NULL bio is allowed. + */ +static inline bool bio_has_data(struct bio *bio) +{ + if (bio && + bio->bi_iter.bi_size && + bio_op(bio) != REQ_OP_DISCARD && + bio_op(bio) != REQ_OP_SECURE_ERASE && + bio_op(bio) != REQ_OP_WRITE_ZEROES) + return true; + + return false; +} + +static inline bool bio_no_advance_iter(struct bio *bio) +{ + return bio_op(bio) == REQ_OP_DISCARD || + bio_op(bio) == REQ_OP_SECURE_ERASE || + bio_op(bio) == REQ_OP_WRITE_SAME || + bio_op(bio) == REQ_OP_WRITE_ZEROES; +} + +static inline bool bio_mergeable(struct bio *bio) +{ + if (bio->bi_opf & REQ_NOMERGE_FLAGS) + return false; + + return true; +} + +static inline unsigned int bio_cur_bytes(struct bio *bio) +{ + if (bio_has_data(bio)) + return bio_iovec(bio).bv_len; + else /* dataless requests such as discard */ + return bio->bi_iter.bi_size; +} + +static inline void *bio_data(struct bio *bio) +{ + if (bio_has_data(bio)) + return page_address(bio_page(bio)) + bio_offset(bio); + + return NULL; +} + +/** + * bio_full - check if the bio is full + * @bio: bio to check + * @len: length of one segment to be added + * + * Return true if @bio is full and one segment with @len bytes can't be + * added to the bio, otherwise return false + */ +static inline bool bio_full(struct bio *bio, unsigned len) +{ + if (bio->bi_vcnt >= bio->bi_max_vecs) + return true; + + if (bio->bi_iter.bi_size > UINT_MAX - len) + return true; + + return false; +} + +static inline bool bio_next_segment(const struct bio *bio, + struct bvec_iter_all *iter) +{ + if (iter->idx >= bio->bi_vcnt) + return false; + + bvec_advance(&bio->bi_io_vec[iter->idx], iter); + return true; +} + +/* + * drivers should _never_ use the all version - the bio may have been split + * before it got to the driver and the driver won't own all of it + */ +#define bio_for_each_segment_all(bvl, bio, iter) \ + for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) + +static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, + unsigned bytes) +{ + iter->bi_sector += bytes >> 9; + + if (bio_no_advance_iter(bio)) + iter->bi_size -= bytes; + else + bvec_iter_advance(bio->bi_io_vec, iter, bytes); + /* TODO: It is reasonable to complete bio with error here. */ +} + +#define __bio_for_each_segment(bvl, bio, iter, start) \ + for (iter = (start); \ + (iter).bi_size && \ + ((bvl = bio_iter_iovec((bio), (iter))), 1); \ + bio_advance_iter((bio), &(iter), (bvl).bv_len)) + +#define bio_for_each_segment(bvl, bio, iter) \ + __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) + +#define __bio_for_each_bvec(bvl, bio, iter, start) \ + for (iter = (start); \ + (iter).bi_size && \ + ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ + bio_advance_iter((bio), &(iter), (bvl).bv_len)) + +/* iterate over multi-page bvec */ +#define bio_for_each_bvec(bvl, bio, iter) \ + __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) + +#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) + +static inline unsigned bio_segments(struct bio *bio) +{ + unsigned segs = 0; + struct bio_vec bv; + struct bvec_iter iter; + + /* + * We special case discard/write same/write zeroes, because they + * interpret bi_size differently: + */ + + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: + case REQ_OP_WRITE_ZEROES: + return 0; + case REQ_OP_WRITE_SAME: + return 1; + default: + break; + } + + bio_for_each_segment(bv, bio, iter) + segs++; + + return segs; +} + +/* + * get a reference to a bio, so it won't disappear. the intended use is + * something like: + * + * bio_get(bio); + * submit_bio(rw, bio); + * if (bio->bi_flags ...) + * do_something + * bio_put(bio); + * + * without the bio_get(), it could potentially complete I/O before submit_bio + * returns. and then bio would be freed memory when if (bio->bi_flags ...) + * runs + */ +static inline void bio_get(struct bio *bio) +{ + bio->bi_flags |= (1 << BIO_REFFED); + smp_mb__before_atomic(); + atomic_inc(&bio->__bi_cnt); +} + +static inline void bio_cnt_set(struct bio *bio, unsigned int count) +{ + if (count != 1) { + bio->bi_flags |= (1 << BIO_REFFED); + smp_mb(); + } + atomic_set(&bio->__bi_cnt, count); +} + +static inline bool bio_flagged(struct bio *bio, unsigned int bit) +{ + return (bio->bi_flags & (1U << bit)) != 0; +} + +static inline void bio_set_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_flags |= (1U << bit); +} + +static inline void bio_clear_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_flags &= ~(1U << bit); +} + +static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) +{ + *bv = bio_iovec(bio); +} + +static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) +{ + struct bvec_iter iter = bio->bi_iter; + int idx; + + if (unlikely(!bio_multiple_segments(bio))) { + *bv = bio_iovec(bio); + return; + } + + bio_advance_iter(bio, &iter, iter.bi_size); + + if (!iter.bi_bvec_done) + idx = iter.bi_idx - 1; + else /* in the middle of bvec */ + idx = iter.bi_idx; + + *bv = bio->bi_io_vec[idx]; + + /* + * iter.bi_bvec_done records actual length of the last bvec + * if this bio ends in the middle of one io vector + */ + if (iter.bi_bvec_done) + bv->bv_len = iter.bi_bvec_done; +} + +static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) +{ + WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); + return bio->bi_io_vec; +} + +static inline struct page *bio_first_page_all(struct bio *bio) +{ + return bio_first_bvec_all(bio)->bv_page; +} + +static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) +{ + WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); + return &bio->bi_io_vec[bio->bi_vcnt - 1]; +} + +enum bip_flags { + BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ + BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ + BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ + BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ + BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ +}; + +/* + * bio integrity payload + */ +struct bio_integrity_payload { + struct bio *bip_bio; /* parent bio */ + + struct bvec_iter bip_iter; + + unsigned short bip_slab; /* slab the bip came from */ + unsigned short bip_vcnt; /* # of integrity bio_vecs */ + unsigned short bip_max_vcnt; /* integrity bio_vec slots */ + unsigned short bip_flags; /* control flags */ + + struct bvec_iter bio_iter; /* for rewinding parent bio */ + + struct work_struct bip_work; /* I/O completion */ + + struct bio_vec *bip_vec; + struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ +}; + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) +{ + if (bio->bi_opf & REQ_INTEGRITY) + return bio->bi_integrity; + + return NULL; +} + +static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + + if (bip) + return bip->bip_flags & flag; + + return false; +} + +static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) +{ + return bip->bip_iter.bi_sector; +} + +static inline void bip_set_seed(struct bio_integrity_payload *bip, + sector_t seed) +{ + bip->bip_iter.bi_sector = seed; +} + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +extern void bio_trim(struct bio *bio, int offset, int size); +extern struct bio *bio_split(struct bio *bio, int sectors, + gfp_t gfp, struct bio_set *bs); + +/** + * bio_next_split - get next @sectors from a bio, splitting if necessary + * @bio: bio to split + * @sectors: number of sectors to split from the front of @bio + * @gfp: gfp mask + * @bs: bio set to allocate from + * + * Returns a bio representing the next @sectors of @bio - if the bio is smaller + * than @sectors, returns the original bio unchanged. + */ +static inline struct bio *bio_next_split(struct bio *bio, int sectors, + gfp_t gfp, struct bio_set *bs) +{ + if (sectors >= bio_sectors(bio)) + return bio; + + return bio_split(bio, sectors, gfp, bs); +} + +enum { + BIOSET_NEED_BVECS = BIT(0), + BIOSET_NEED_RESCUER = BIT(1), +}; +extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); +extern void bioset_exit(struct bio_set *); +extern int biovec_init_pool(mempool_t *pool, int pool_entries); +extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); + +extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *); +extern void bio_put(struct bio *); + +extern void __bio_clone_fast(struct bio *, struct bio *); +extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); + +extern struct bio_set fs_bio_set; + +static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) +{ + return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); +} + +static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) +{ + return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); +} + +extern blk_qc_t submit_bio(struct bio *); + +extern void bio_endio(struct bio *); + +static inline void bio_io_error(struct bio *bio) +{ + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); +} + +static inline void bio_wouldblock_error(struct bio *bio) +{ + bio->bi_status = BLK_STS_AGAIN; + bio_endio(bio); +} + +struct request_queue; + +extern int submit_bio_wait(struct bio *bio); +extern void bio_advance(struct bio *, unsigned); + +extern void bio_init(struct bio *bio, struct bio_vec *table, + unsigned short max_vecs); +extern void bio_uninit(struct bio *); +extern void bio_reset(struct bio *); +void bio_chain(struct bio *, struct bio *); + +extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); +extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, + unsigned int, unsigned int); +bool __bio_try_merge_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int off, bool *same_page); +void __bio_add_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int off); +int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); +void bio_release_pages(struct bio *bio, bool mark_dirty); +struct rq_map_data; +extern struct bio *bio_map_user_iov(struct request_queue *, + struct iov_iter *, gfp_t); +extern void bio_unmap_user(struct bio *); +extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, + gfp_t); +extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, + gfp_t, int); +extern void bio_set_pages_dirty(struct bio *bio); +extern void bio_check_pages_dirty(struct bio *bio); + +void generic_start_io_acct(struct request_queue *q, int op, + unsigned long sectors, struct hd_struct *part); +void generic_end_io_acct(struct request_queue *q, int op, + struct hd_struct *part, + unsigned long start_time); + +extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, + struct bio *src, struct bvec_iter *src_iter); +extern void bio_copy_data(struct bio *dst, struct bio *src); +extern void bio_list_copy_data(struct bio *dst, struct bio *src); +extern void bio_free_pages(struct bio *bio); + +extern struct bio *bio_copy_user_iov(struct request_queue *, + struct rq_map_data *, + struct iov_iter *, + gfp_t); +extern int bio_uncopy_user(struct bio *); +void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); +void bio_truncate(struct bio *bio, unsigned new_size); + +static inline void zero_fill_bio(struct bio *bio) +{ + zero_fill_bio_iter(bio, bio->bi_iter); +} + +extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); +extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); +extern unsigned int bvec_nr_vecs(unsigned short idx); +extern const char *bio_devname(struct bio *bio, char *buffer); + +#define bio_set_dev(bio, bdev) \ +do { \ + if ((bio)->bi_disk != (bdev)->bd_disk) \ + bio_clear_flag(bio, BIO_THROTTLED);\ + (bio)->bi_disk = (bdev)->bd_disk; \ + (bio)->bi_partno = (bdev)->bd_partno; \ + bio_associate_blkg(bio); \ +} while (0) + +#define bio_copy_dev(dst, src) \ +do { \ + (dst)->bi_disk = (src)->bi_disk; \ + (dst)->bi_partno = (src)->bi_partno; \ + bio_clone_blkg_association(dst, src); \ +} while (0) + +#define bio_dev(bio) \ + disk_devt((bio)->bi_disk) + +#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) +void bio_associate_blkg_from_page(struct bio *bio, struct page *page); +#else +static inline void bio_associate_blkg_from_page(struct bio *bio, + struct page *page) { } +#endif + +#ifdef CONFIG_BLK_CGROUP +void bio_disassociate_blkg(struct bio *bio); +void bio_associate_blkg(struct bio *bio); +void bio_associate_blkg_from_css(struct bio *bio, + struct cgroup_subsys_state *css); +void bio_clone_blkg_association(struct bio *dst, struct bio *src); +#else /* CONFIG_BLK_CGROUP */ +static inline void bio_disassociate_blkg(struct bio *bio) { } +static inline void bio_associate_blkg(struct bio *bio) { } +static inline void bio_associate_blkg_from_css(struct bio *bio, + struct cgroup_subsys_state *css) +{ } +static inline void bio_clone_blkg_association(struct bio *dst, + struct bio *src) { } +#endif /* CONFIG_BLK_CGROUP */ + +#ifdef CONFIG_HIGHMEM +/* + * remember never ever reenable interrupts between a bvec_kmap_irq and + * bvec_kunmap_irq! + */ +static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) +{ + unsigned long addr; + + /* + * might not be a highmem page, but the preempt/irq count + * balancing is a lot nicer this way + */ + local_irq_save(*flags); + addr = (unsigned long) kmap_atomic(bvec->bv_page); + + BUG_ON(addr & ~PAGE_MASK); + + return (char *) addr + bvec->bv_offset; +} + +static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) +{ + unsigned long ptr = (unsigned long) buffer & PAGE_MASK; + + kunmap_atomic((void *) ptr); + local_irq_restore(*flags); +} + +#else +static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) +{ + return page_address(bvec->bv_page) + bvec->bv_offset; +} + +static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) +{ + *flags = 0; +} +#endif + +/* + * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. + * + * A bio_list anchors a singly-linked list of bios chained through the bi_next + * member of the bio. The bio_list also caches the last list member to allow + * fast access to the tail. + */ +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +static inline int bio_list_empty(const struct bio_list *bl) +{ + return bl->head == NULL; +} + +static inline void bio_list_init(struct bio_list *bl) +{ + bl->head = bl->tail = NULL; +} + +#define BIO_EMPTY_LIST { NULL, NULL } + +#define bio_list_for_each(bio, bl) \ + for (bio = (bl)->head; bio; bio = bio->bi_next) + +static inline unsigned bio_list_size(const struct bio_list *bl) +{ + unsigned sz = 0; + struct bio *bio; + + bio_list_for_each(bio, bl) + sz++; + + return sz; +} + +static inline void bio_list_add(struct bio_list *bl, struct bio *bio) +{ + bio->bi_next = NULL; + + if (bl->tail) + bl->tail->bi_next = bio; + else + bl->head = bio; + + bl->tail = bio; +} + +static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) +{ + bio->bi_next = bl->head; + + bl->head = bio; + + if (!bl->tail) + bl->tail = bio; +} + +static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) +{ + if (!bl2->head) + return; + + if (bl->tail) + bl->tail->bi_next = bl2->head; + else + bl->head = bl2->head; + + bl->tail = bl2->tail; +} + +static inline void bio_list_merge_head(struct bio_list *bl, + struct bio_list *bl2) +{ + if (!bl2->head) + return; + + if (bl->head) + bl2->tail->bi_next = bl->head; + else + bl->tail = bl2->tail; + + bl->head = bl2->head; +} + +static inline struct bio *bio_list_peek(struct bio_list *bl) +{ + return bl->head; +} + +static inline struct bio *bio_list_pop(struct bio_list *bl) +{ + struct bio *bio = bl->head; + + if (bio) { + bl->head = bl->head->bi_next; + if (!bl->head) + bl->tail = NULL; + + bio->bi_next = NULL; + } + + return bio; +} + +static inline struct bio *bio_list_get(struct bio_list *bl) +{ + struct bio *bio = bl->head; + + bl->head = bl->tail = NULL; + + return bio; +} + +/* + * Increment chain count for the bio. Make sure the CHAIN flag update + * is visible before the raised count. + */ +static inline void bio_inc_remaining(struct bio *bio) +{ + bio_set_flag(bio, BIO_CHAIN); + smp_mb__before_atomic(); + atomic_inc(&bio->__bi_remaining); +} + +/* + * bio_set is used to allow other portions of the IO system to + * allocate their own private memory pools for bio and iovec structures. + * These memory pools in turn all allocate from the bio_slab + * and the bvec_slabs[]. + */ +#define BIO_POOL_SIZE 2 + +struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + + mempool_t bio_pool; + mempool_t bvec_pool; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + mempool_t bio_integrity_pool; + mempool_t bvec_integrity_pool; +#endif + + /* + * Deadlock avoidance for stacking block drivers: see comments in + * bio_alloc_bioset() for details + */ + spinlock_t rescue_lock; + struct bio_list rescue_list; + struct work_struct rescue_work; + struct workqueue_struct *rescue_workqueue; +}; + +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +static inline bool bioset_initialized(struct bio_set *bs) +{ + return bs->bio_slab != NULL; +} + +/* + * a small number of entries is fine, not going to be performance critical. + * basically we just need to survive + */ +#define BIO_SPLIT_ENTRIES 2 + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +#define bip_for_each_vec(bvl, bip, iter) \ + for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) + +#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ + for_each_bio(_bio) \ + bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) + +extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); +extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); +extern bool bio_integrity_prep(struct bio *); +extern void bio_integrity_advance(struct bio *, unsigned int); +extern void bio_integrity_trim(struct bio *); +extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); +extern int bioset_integrity_create(struct bio_set *, int); +extern void bioset_integrity_free(struct bio_set *); +extern void bio_integrity_init(void); + +#else /* CONFIG_BLK_DEV_INTEGRITY */ + +static inline void *bio_integrity(struct bio *bio) +{ + return NULL; +} + +static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) +{ + return 0; +} + +static inline void bioset_integrity_free (struct bio_set *bs) +{ + return; +} + +static inline bool bio_integrity_prep(struct bio *bio) +{ + return true; +} + +static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, + gfp_t gfp_mask) +{ + return 0; +} + +static inline void bio_integrity_advance(struct bio *bio, + unsigned int bytes_done) +{ + return; +} + +static inline void bio_integrity_trim(struct bio *bio) +{ + return; +} + +static inline void bio_integrity_init(void) +{ + return; +} + +static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) +{ + return false; +} + +static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, + unsigned int nr) +{ + return ERR_PTR(-EINVAL); +} + +static inline int bio_integrity_add_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int offset) +{ + return 0; +} + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +/* + * Mark a bio as polled. Note that for async polled IO, the caller must + * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). + * We cannot block waiting for requests on polled IO, as those completions + * must be found by the caller. This is different than IRQ driven IO, where + * it's safe to wait for IO to complete. + */ +static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) +{ + bio->bi_opf |= REQ_HIPRI; + if (!is_sync_kiocb(kiocb)) + bio->bi_opf |= REQ_NOWAIT; +} + +#endif /* CONFIG_BLOCK */ +#endif /* __LINUX_BIO_H */ diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h new file mode 100644 index 0000000..bbc4730 --- /dev/null +++ b/include/linux/bit_spinlock.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BIT_SPINLOCK_H +#define __LINUX_BIT_SPINLOCK_H + +#include +#include +#include +#include + +/* + * bit-based spin_lock() + * + * Don't use this unless you really need to: spin_lock() and spin_unlock() + * are significantly faster. + */ +static inline void bit_spin_lock(int bitnum, unsigned long *addr) +{ + /* + * Assuming the lock is uncontended, this never enters + * the body of the outer loop. If it is contended, then + * within the inner loop a non-atomic test is used to + * busywait with less bus contention for a good time to + * attempt to acquire the lock bit. + */ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + while (unlikely(test_and_set_bit_lock(bitnum, addr))) { + preempt_enable(); + do { + cpu_relax(); + } while (test_bit(bitnum, addr)); + preempt_disable(); + } +#endif + __acquire(bitlock); +} + +/* + * Return true if it was acquired + */ +static inline int bit_spin_trylock(int bitnum, unsigned long *addr) +{ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + if (unlikely(test_and_set_bit_lock(bitnum, addr))) { + preempt_enable(); + return 0; + } +#endif + __acquire(bitlock); + return 1; +} + +/* + * bit-based spin_unlock() + */ +static inline void bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * bit-based spin_unlock() + * non-atomic version, which can be used eg. if the bit lock itself is + * protecting the rest of the flags in the word. + */ +static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * Return true if the lock is held. + */ +static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + return test_bit(bitnum, addr); +#elif defined CONFIG_PREEMPT_COUNT + return preempt_count(); +#else + return 1; +#endif +} + +#endif /* __LINUX_BIT_SPINLOCK_H */ + diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h new file mode 100644 index 0000000..4bbb5f1 --- /dev/null +++ b/include/linux/bitfield.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2004 - 2009 Ivo van Doorn + */ + +#ifndef _LINUX_BITFIELD_H +#define _LINUX_BITFIELD_H + +#include +#include + +/* + * Bitfield access macros + * + * FIELD_{GET,PREP} macros take as first parameter shifted mask + * from which they extract the base mask and shift amount. + * Mask must be a compilation time constant. + * + * Example: + * + * #define REG_FIELD_A GENMASK(6, 0) + * #define REG_FIELD_B BIT(7) + * #define REG_FIELD_C GENMASK(15, 8) + * #define REG_FIELD_D GENMASK(31, 16) + * + * Get: + * a = FIELD_GET(REG_FIELD_A, reg); + * b = FIELD_GET(REG_FIELD_B, reg); + * + * Set: + * reg = FIELD_PREP(REG_FIELD_A, 1) | + * FIELD_PREP(REG_FIELD_B, 0) | + * FIELD_PREP(REG_FIELD_C, c) | + * FIELD_PREP(REG_FIELD_D, 0x40); + * + * Modify: + * reg &= ~REG_FIELD_C; + * reg |= FIELD_PREP(REG_FIELD_C, c); + */ + +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ + ({ \ + BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ + _pfx "mask is not constant"); \ + BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \ + BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \ + ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \ + _pfx "value too large for the field"); \ + BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \ + _pfx "type of reg too small for mask"); \ + __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ + (1ULL << __bf_shf(_mask))); \ + }) + +/** + * FIELD_FIT() - check if value fits in the field + * @_mask: shifted mask defining the field's length and position + * @_val: value to test against the field + * + * Return: true if @_val can fit inside @_mask, false if @_val is too big. + */ +#define FIELD_FIT(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \ + !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ + }) + +/** + * FIELD_PREP() - prepare a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_val: value to put in the field + * + * FIELD_PREP() masks and shifts up the value. The result should + * be combined with other fields of the bitfield using logical OR. + */ +#define FIELD_PREP(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ + ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ + }) + +/** + * FIELD_GET() - extract a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_reg: value of entire bitfield + * + * FIELD_GET() extracts the field specified by @_mask from the + * bitfield passed in as @_reg by masking and shifting it down. + */ +#define FIELD_GET(_mask, _reg) \ + ({ \ + __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ + (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ + }) + +extern void __compiletime_error("value doesn't fit into mask") +__field_overflow(void); +extern void __compiletime_error("bad bitfield mask") +__bad_mask(void); +static __always_inline u64 field_multiplier(u64 field) +{ + if ((field | (field - 1)) & ((field | (field - 1)) + 1)) + __bad_mask(); + return field & -field; +} +static __always_inline u64 field_mask(u64 field) +{ + return field / field_multiplier(field); +} +#define ____MAKE_OP(type,base,to,from) \ +static __always_inline __##type type##_encode_bits(base v, base field) \ +{ \ + if (__builtin_constant_p(v) && (v & ~field_mask(field))) \ + __field_overflow(); \ + return to((v & field_mask(field)) * field_multiplier(field)); \ +} \ +static __always_inline __##type type##_replace_bits(__##type old, \ + base val, base field) \ +{ \ + return (old & ~to(field)) | type##_encode_bits(val, field); \ +} \ +static __always_inline void type##p_replace_bits(__##type *p, \ + base val, base field) \ +{ \ + *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ +} \ +static __always_inline base type##_get_bits(__##type v, base field) \ +{ \ + return (from(v) & field)/field_multiplier(field); \ +} +#define __MAKE_OP(size) \ + ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ + ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ + ____MAKE_OP(u##size,u##size,,) +____MAKE_OP(u8,u8,,) +__MAKE_OP(16) +__MAKE_OP(32) +__MAKE_OP(64) +#undef __MAKE_OP +#undef ____MAKE_OP + +#endif diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h new file mode 100644 index 0000000..29fc933 --- /dev/null +++ b/include/linux/bitmap.h @@ -0,0 +1,494 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BITMAP_H +#define __LINUX_BITMAP_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +/* + * bitmaps provide bit arrays that consume one or more unsigned + * longs. The bitmap interface and available operations are listed + * here, in bitmap.h + * + * Function implementations generic to all architectures are in + * lib/bitmap.c. Functions implementations that are architecture + * specific are in various include/asm-/bitops.h headers + * and other arch/ specific files. + * + * See lib/bitmap.c for more details. + */ + +/** + * DOC: bitmap overview + * + * The available bitmap operations and their rough meaning in the + * case that the bitmap is a single unsigned long are thus: + * + * The generated code is more efficient when nbits is known at + * compile-time and at most BITS_PER_LONG. + * + * :: + * + * bitmap_zero(dst, nbits) *dst = 0UL + * bitmap_fill(dst, nbits) *dst = ~0UL + * bitmap_copy(dst, src, nbits) *dst = *src + * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 + * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 + * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 + * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) + * bitmap_complement(dst, src, nbits) *dst = ~(*src) + * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? + * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? + * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? + * bitmap_empty(src, nbits) Are all bits zero in *src? + * bitmap_full(src, nbits) Are all bits set in *src? + * bitmap_weight(src, nbits) Hamming Weight: number set bits + * bitmap_set(dst, pos, nbits) Set specified bit area + * bitmap_clear(dst, pos, nbits) Clear specified bit area + * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above + * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n + * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n + * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) + * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) + * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap + * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz + * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf + * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf + * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf + * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf + * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region + * bitmap_release_region(bitmap, pos, order) Free specified bit region + * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region + * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst + * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst + * + * Note, bitmap_zero() and bitmap_fill() operate over the region of + * unsigned longs, that is, bits behind bitmap till the unsigned long + * boundary will be zeroed or filled as well. Consider to use + * bitmap_clear() or bitmap_set() to make explicit zeroing or filling + * respectively. + */ + +/** + * DOC: bitmap bitops + * + * Also the following operations in asm/bitops.h apply to bitmaps.:: + * + * set_bit(bit, addr) *addr |= bit + * clear_bit(bit, addr) *addr &= ~bit + * change_bit(bit, addr) *addr ^= bit + * test_bit(bit, addr) Is bit set in *addr? + * test_and_set_bit(bit, addr) Set bit and return old value + * test_and_clear_bit(bit, addr) Clear bit and return old value + * test_and_change_bit(bit, addr) Change bit and return old value + * find_first_zero_bit(addr, nbits) Position first zero bit in *addr + * find_first_bit(addr, nbits) Position first set bit in *addr + * find_next_zero_bit(addr, nbits, bit) + * Position next zero bit in *addr >= bit + * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit + * find_next_and_bit(addr1, addr2, nbits, bit) + * Same as find_next_bit, but in + * (*addr1 & *addr2) + * + */ + +/** + * DOC: declare bitmap + * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used + * to declare an array named 'name' of just enough unsigned longs to + * contain all bit positions from 0 to 'bits' - 1. + */ + +/* + * Allocation and deallocation of bitmap. + * Provided in lib/bitmap.c to avoid circular dependency. + */ +extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); +extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); +extern void bitmap_free(const unsigned long *bitmap); + +/* + * lib/bitmap.c provides these functions: + */ + +extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits); +extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits); +extern int __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern bool __pure __bitmap_or_equal(const unsigned long *src1, + const unsigned long *src2, + const unsigned long *src3, + unsigned int nbits); +extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, + unsigned int nbits); +extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); +extern void __bitmap_set(unsigned long *map, unsigned int start, int len); +extern void __bitmap_clear(unsigned long *map, unsigned int start, int len); + +extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset); + +/** + * bitmap_find_next_zero_area - find a contiguous aligned zero area + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @align_mask: Alignment mask for zero area + * + * The @align_mask should be one less than a power of 2; the effect is that + * the bit offset of all zero areas this function finds is multiples of that + * power of 2. A @align_mask of 0 means no alignment is required. + */ +static inline unsigned long +bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask) +{ + return bitmap_find_next_zero_area_off(map, size, start, nr, + align_mask, 0); +} + +extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, + unsigned long *dst, int nbits); +extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, + unsigned long *dst, int nbits); +extern int bitmap_parselist(const char *buf, unsigned long *maskp, + int nmaskbits); +extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, + unsigned long *dst, int nbits); +extern void bitmap_remap(unsigned long *dst, const unsigned long *src, + const unsigned long *old, const unsigned long *new, unsigned int nbits); +extern int bitmap_bitremap(int oldbit, + const unsigned long *old, const unsigned long *new, int bits); +extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, + const unsigned long *relmap, unsigned int bits); +extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, + unsigned int sz, unsigned int nbits); +extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); +extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); +extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); + +#ifdef __BIG_ENDIAN +extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); +#else +#define bitmap_copy_le bitmap_copy +#endif +extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); +extern int bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); + +#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) +#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) + +/* + * The static inlines below do not handle constant nbits==0 correctly, + * so make such users (should any ever turn up) call the out-of-line + * versions. + */ +#define small_const_nbits(nbits) \ + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0) + +static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) +{ + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); +} + +static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) +{ + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0xff, len); +} + +static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, + unsigned int nbits) +{ + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memcpy(dst, src, len); +} + +/* + * Copy bitmap and clear tail bits in last word. + */ +static inline void bitmap_copy_clear_tail(unsigned long *dst, + const unsigned long *src, unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % BITS_PER_LONG) + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); +} + +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#if BITS_PER_LONG == 64 +extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, + unsigned int nbits); +extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, + unsigned int nbits); +#else +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (bitmap), \ + (const unsigned long *) (buf), (nbits)) +#define bitmap_to_arr32(buf, bitmap, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (buf), \ + (const unsigned long *) (bitmap), (nbits)) +#endif + +static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; + return __bitmap_and(dst, src1, src2, nbits); +} + +static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src1 | *src2; + else + __bitmap_or(dst, src1, src2, nbits); +} + +static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src1 ^ *src2; + else + __bitmap_xor(dst, src1, src2, nbits); +} + +static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; + return __bitmap_andnot(dst, src1, src2, nbits); +} + +static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, + unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = ~(*src); + else + __bitmap_complement(dst, src, nbits); +} + +#ifdef __LITTLE_ENDIAN +#define BITMAP_MEM_ALIGNMENT 8 +#else +#define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long)) +#endif +#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1) + +static inline int bitmap_equal(const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); + if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) && + IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) + return !memcmp(src1, src2, nbits / 8); + return __bitmap_equal(src1, src2, nbits); +} + +/** + * bitmap_or_equal - Check whether the or of two bitmaps is equal to a third + * @src1: Pointer to bitmap 1 + * @src2: Pointer to bitmap 2 will be or'ed with bitmap 1 + * @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2 + * @nbits: number of bits in each of these bitmaps + * + * Returns: True if (*@src1 | *@src2) == *@src3, false otherwise + */ +static inline bool bitmap_or_equal(const unsigned long *src1, + const unsigned long *src2, + const unsigned long *src3, + unsigned int nbits) +{ + if (!small_const_nbits(nbits)) + return __bitmap_or_equal(src1, src2, src3, nbits); + + return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits)); +} + +static inline int bitmap_intersects(const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; + else + return __bitmap_intersects(src1, src2, nbits); +} + +static inline int bitmap_subset(const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_subset(src1, src2, nbits); +} + +static inline int bitmap_empty(const unsigned long *src, unsigned nbits) +{ + if (small_const_nbits(nbits)) + return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); + + return find_first_bit(src, nbits) == nbits; +} + +static inline int bitmap_full(const unsigned long *src, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); + + return find_first_zero_bit(src, nbits) == nbits; +} + +static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); + return __bitmap_weight(src, nbits); +} + +static __always_inline void bitmap_set(unsigned long *map, unsigned int start, + unsigned int nbits) +{ + if (__builtin_constant_p(nbits) && nbits == 1) + __set_bit(start, map); + else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && + IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && + __builtin_constant_p(nbits & BITMAP_MEM_MASK) && + IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) + memset((char *)map + start / 8, 0xff, nbits / 8); + else + __bitmap_set(map, start, nbits); +} + +static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, + unsigned int nbits) +{ + if (__builtin_constant_p(nbits) && nbits == 1) + __clear_bit(start, map); + else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && + IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && + __builtin_constant_p(nbits & BITMAP_MEM_MASK) && + IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) + memset((char *)map + start / 8, 0, nbits / 8); + else + __bitmap_clear(map, start, nbits); +} + +static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift; + else + __bitmap_shift_right(dst, src, shift, nbits); +} + +static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + *dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits); + else + __bitmap_shift_left(dst, src, shift, nbits); +} + +static inline int bitmap_parse(const char *buf, unsigned int buflen, + unsigned long *maskp, int nmaskbits) +{ + return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); +} + +/** + * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap. + * @n: u64 value + * + * Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit + * integers in 32-bit environment, and 64-bit integers in 64-bit one. + * + * There are four combinations of endianness and length of the word in linux + * ABIs: LE64, BE64, LE32 and BE32. + * + * On 64-bit kernels 64-bit LE and BE numbers are naturally ordered in + * bitmaps and therefore don't require any special handling. + * + * On 32-bit kernels 32-bit LE ABI orders lo word of 64-bit number in memory + * prior to hi, and 32-bit BE orders hi word prior to lo. The bitmap on the + * other hand is represented as an array of 32-bit words and the position of + * bit N may therefore be calculated as: word #(N/32) and bit #(N%32) in that + * word. For example, bit #42 is located at 10th position of 2nd word. + * It matches 32-bit LE ABI, and we can simply let the compiler store 64-bit + * values in memory as it usually does. But for BE we need to swap hi and lo + * words manually. + * + * With all that, the macro BITMAP_FROM_U64() does explicit reordering of hi and + * lo parts of u64. For LE32 it does nothing, and for BE environment it swaps + * hi and lo words, as is expected by bitmap. + */ +#if __BITS_PER_LONG == 64 +#define BITMAP_FROM_U64(n) (n) +#else +#define BITMAP_FROM_U64(n) ((unsigned long) ((u64)(n) & ULONG_MAX)), \ + ((unsigned long) ((u64)(n) >> 32)) +#endif + +/** + * bitmap_from_u64 - Check and swap words within u64. + * @mask: source bitmap + * @dst: destination bitmap + * + * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]`` + * to read u64 mask, we will get the wrong word. + * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits, + * but we expect the lower 32-bits of u64. + */ +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __LINUX_BITMAP_H */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h new file mode 100644 index 0000000..4f0e62c --- /dev/null +++ b/include/linux/bitops.h @@ -0,0 +1,289 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BITOPS_H +#define _LINUX_BITOPS_H +#include +#include + +/* Set bits in the first 'n' bytes when loaded from memory */ +#ifdef __LITTLE_ENDIAN +# define aligned_byte_mask(n) ((1UL << 8*(n))-1) +#else +# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) +#endif + +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) + +extern unsigned int __sw_hweight8(unsigned int w); +extern unsigned int __sw_hweight16(unsigned int w); +extern unsigned int __sw_hweight32(unsigned int w); +extern unsigned long __sw_hweight64(__u64 w); + +/* + * Include this here because some architectures need generic_ffs/fls in + * scope + */ +#include + +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +/* same as for_each_set_bit() but use bit as value to start with */ +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +#define for_each_clear_bit(bit, addr, size) \ + for ((bit) = find_first_zero_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) + +/* same as for_each_clear_bit() but use bit as value to start with */ +#define for_each_clear_bit_from(bit, addr, size) \ + for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) + +static inline int get_bitmask_order(unsigned int count) +{ + int order; + + order = fls(count); + return order; /* We could be slightly more clever with -1 here... */ +} + +static __always_inline unsigned long hweight_long(unsigned long w) +{ + return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); +} + +/** + * rol64 - rotate a 64-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 rol64(__u64 word, unsigned int shift) +{ + return (word << (shift & 63)) | (word >> ((-shift) & 63)); +} + +/** + * ror64 - rotate a 64-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 ror64(__u64 word, unsigned int shift) +{ + return (word >> (shift & 63)) | (word << ((-shift) & 63)); +} + +/** + * rol32 - rotate a 32-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << (shift & 31)) | (word >> ((-shift) & 31)); +} + +/** + * ror32 - rotate a 32-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 ror32(__u32 word, unsigned int shift) +{ + return (word >> (shift & 31)) | (word << ((-shift) & 31)); +} + +/** + * rol16 - rotate a 16-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u16 rol16(__u16 word, unsigned int shift) +{ + return (word << (shift & 15)) | (word >> ((-shift) & 15)); +} + +/** + * ror16 - rotate a 16-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u16 ror16(__u16 word, unsigned int shift) +{ + return (word >> (shift & 15)) | (word << ((-shift) & 15)); +} + +/** + * rol8 - rotate an 8-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u8 rol8(__u8 word, unsigned int shift) +{ + return (word << (shift & 7)) | (word >> ((-shift) & 7)); +} + +/** + * ror8 - rotate an 8-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u8 ror8(__u8 word, unsigned int shift) +{ + return (word >> (shift & 7)) | (word << ((-shift) & 7)); +} + +/** + * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit + * @value: value to sign extend + * @index: 0 based bit index (0<=index<32) to sign bit + * + * This is safe to use for 16- and 8-bit types as well. + */ +static inline __s32 sign_extend32(__u32 value, int index) +{ + __u8 shift = 31 - index; + return (__s32)(value << shift) >> shift; +} + +/** + * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit + * @value: value to sign extend + * @index: 0 based bit index (0<=index<64) to sign bit + */ +static inline __s64 sign_extend64(__u64 value, int index) +{ + __u8 shift = 63 - index; + return (__s64)(value << shift) >> shift; +} + +static inline unsigned fls_long(unsigned long l) +{ + if (sizeof(l) == 4) + return fls(l); + return fls64(l); +} + +static inline int get_count_order(unsigned int count) +{ + int order; + + order = fls(count) - 1; + if (count & (count - 1)) + order++; + return order; +} + +/** + * get_count_order_long - get order after rounding @l up to power of 2 + * @l: parameter + * + * it is same as get_count_order() but with long type parameter + */ +static inline int get_count_order_long(unsigned long l) +{ + if (l == 0UL) + return -1; + else if (l & (l - 1UL)) + return (int)fls_long(l); + else + return (int)fls_long(l) - 1; +} + +/** + * __ffs64 - find first set bit in a 64 bit word + * @word: The 64 bit word + * + * On 64 bit arches this is a synomyn for __ffs + * The result is not defined if no bits are set, so check that @word + * is non-zero before calling this. + */ +static inline unsigned long __ffs64(u64 word) +{ +#if BITS_PER_LONG == 32 + if (((u32)word) == 0UL) + return __ffs((u32)(word >> 32)) + 32; +#elif BITS_PER_LONG != 64 +#error BITS_PER_LONG not 32 or 64 +#endif + return __ffs((unsigned long)word); +} + +/** + * assign_bit - Assign value to a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * @value: the value to assign + */ +static __always_inline void assign_bit(long nr, volatile unsigned long *addr, + bool value) +{ + if (value) + set_bit(nr, addr); + else + clear_bit(nr, addr); +} + +static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, + bool value) +{ + if (value) + __set_bit(nr, addr); + else + __clear_bit(nr, addr); +} + +#ifdef __KERNEL__ + +#ifndef set_mask_bits +#define set_mask_bits(ptr, mask, bits) \ +({ \ + const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \ + typeof(*(ptr)) old__, new__; \ + \ + do { \ + old__ = READ_ONCE(*(ptr)); \ + new__ = (old__ & ~mask__) | bits__; \ + } while (cmpxchg(ptr, old__, new__) != old__); \ + \ + old__; \ +}) +#endif + +#ifndef bit_clear_unless +#define bit_clear_unless(ptr, clear, test) \ +({ \ + const typeof(*(ptr)) clear__ = (clear), test__ = (test);\ + typeof(*(ptr)) old__, new__; \ + \ + do { \ + old__ = READ_ONCE(*(ptr)); \ + new__ = old__ & ~clear__; \ + } while (!(old__ & test__) && \ + cmpxchg(ptr, old__, new__) != old__); \ + \ + !(old__ & test__); \ +}) +#endif + +#ifndef find_last_bit +/** + * find_last_bit - find the last set bit in a memory region + * @addr: The address to start the search at + * @size: The number of bits to search + * + * Returns the bit number of the last set bit, or size. + */ +extern unsigned long find_last_bit(const unsigned long *addr, + unsigned long size); +#endif + +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h new file mode 100644 index 0000000..d35b8ec --- /dev/null +++ b/include/linux/bitrev.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BITREV_H +#define _LINUX_BITREV_H + +#include + +#ifdef CONFIG_HAVE_ARCH_BITREVERSE +#include + +#define __bitrev32 __arch_bitrev32 +#define __bitrev16 __arch_bitrev16 +#define __bitrev8 __arch_bitrev8 + +#else +extern u8 const byte_rev_table[256]; +static inline u8 __bitrev8(u8 byte) +{ + return byte_rev_table[byte]; +} + +static inline u16 __bitrev16(u16 x) +{ + return (__bitrev8(x & 0xff) << 8) | __bitrev8(x >> 8); +} + +static inline u32 __bitrev32(u32 x) +{ + return (__bitrev16(x & 0xffff) << 16) | __bitrev16(x >> 16); +} + +#endif /* CONFIG_HAVE_ARCH_BITREVERSE */ + +#define __bitrev8x4(x) (__bitrev32(swab32(x))) + +#define __constant_bitrev32(x) \ +({ \ + u32 ___x = x; \ + ___x = (___x >> 16) | (___x << 16); \ + ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \ + ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ + ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ + ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ + ___x; \ +}) + +#define __constant_bitrev16(x) \ +({ \ + u16 ___x = x; \ + ___x = (___x >> 8) | (___x << 8); \ + ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \ + ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \ + ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \ + ___x; \ +}) + +#define __constant_bitrev8x4(x) \ +({ \ + u32 ___x = x; \ + ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ + ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ + ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ + ___x; \ +}) + +#define __constant_bitrev8(x) \ +({ \ + u8 ___x = x; \ + ___x = (___x >> 4) | (___x << 4); \ + ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \ + ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \ + ___x; \ +}) + +#define bitrev32(x) \ +({ \ + u32 __x = x; \ + __builtin_constant_p(__x) ? \ + __constant_bitrev32(__x) : \ + __bitrev32(__x); \ +}) + +#define bitrev16(x) \ +({ \ + u16 __x = x; \ + __builtin_constant_p(__x) ? \ + __constant_bitrev16(__x) : \ + __bitrev16(__x); \ + }) + +#define bitrev8x4(x) \ +({ \ + u32 __x = x; \ + __builtin_constant_p(__x) ? \ + __constant_bitrev8x4(__x) : \ + __bitrev8x4(__x); \ + }) + +#define bitrev8(x) \ +({ \ + u8 __x = x; \ + __builtin_constant_p(__x) ? \ + __constant_bitrev8(__x) : \ + __bitrev8(__x) ; \ + }) +#endif /* _LINUX_BITREV_H */ diff --git a/include/linux/bits.h b/include/linux/bits.h new file mode 100644 index 0000000..669d694 --- /dev/null +++ b/include/linux/bits.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BITS_H +#define __LINUX_BITS_H + +#include +#include + +#define BIT(nr) (UL(1) << (nr)) +#define BIT_ULL(nr) (ULL(1) << (nr)) +#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG)) +#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) +#define BITS_PER_BYTE 8 + +/* + * Create a contiguous bitmask starting at bit position @l and ending at + * position @h. For example + * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. + */ +#define GENMASK(h, l) \ + (((~UL(0)) - (UL(1) << (l)) + 1) & \ + (~UL(0) >> (BITS_PER_LONG - 1 - (h)))) + +#define GENMASK_ULL(h, l) \ + (((~ULL(0)) - (ULL(1) << (l)) + 1) & \ + (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#endif /* __LINUX_BITS_H */ diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h new file mode 100644 index 0000000..bed9e43 --- /dev/null +++ b/include/linux/blk-cgroup.h @@ -0,0 +1,877 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BLK_CGROUP_H +#define _BLK_CGROUP_H +/* + * Common Block IO controller cgroup interface + * + * Based on ideas and code from CFQ, CFS and BFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Copyright (C) 2009 Vivek Goyal + * Nauman Rafique + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ +#define BLKG_STAT_CPU_BATCH (INT_MAX / 2) + +/* Max limits for throttle policy */ +#define THROTL_IOPS_MAX UINT_MAX + +#ifdef CONFIG_BLK_CGROUP + +enum blkg_rwstat_type { + BLKG_RWSTAT_READ, + BLKG_RWSTAT_WRITE, + BLKG_RWSTAT_SYNC, + BLKG_RWSTAT_ASYNC, + BLKG_RWSTAT_DISCARD, + + BLKG_RWSTAT_NR, + BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, +}; + +struct blkcg_gq; + +struct blkcg { + struct cgroup_subsys_state css; + spinlock_t lock; + + struct radix_tree_root blkg_tree; + struct blkcg_gq __rcu *blkg_hint; + struct hlist_head blkg_list; + + struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; + + struct list_head all_blkcgs_node; +#ifdef CONFIG_CGROUP_WRITEBACK + struct list_head cgwb_list; + refcount_t cgwb_refcnt; +#endif +}; + +/* + * blkg_[rw]stat->aux_cnt is excluded for local stats but included for + * recursive. Used to carry stats of dead children. + */ +struct blkg_rwstat { + struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR]; + atomic64_t aux_cnt[BLKG_RWSTAT_NR]; +}; + +struct blkg_rwstat_sample { + u64 cnt[BLKG_RWSTAT_NR]; +}; + +/* + * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a + * request_queue (q). This is used by blkcg policies which need to track + * information per blkcg - q pair. + * + * There can be multiple active blkcg policies and each blkg:policy pair is + * represented by a blkg_policy_data which is allocated and freed by each + * policy's pd_alloc/free_fn() methods. A policy can allocate private data + * area by allocating larger data structure which embeds blkg_policy_data + * at the beginning. + */ +struct blkg_policy_data { + /* the blkg and policy id this per-policy data belongs to */ + struct blkcg_gq *blkg; + int plid; +}; + +/* + * Policies that need to keep per-blkcg data which is independent from any + * request_queue associated to it should implement cpd_alloc/free_fn() + * methods. A policy can allocate private data area by allocating larger + * data structure which embeds blkcg_policy_data at the beginning. + * cpd_init() is invoked to let each policy handle per-blkcg data. + */ +struct blkcg_policy_data { + /* the blkcg and policy id this per-policy data belongs to */ + struct blkcg *blkcg; + int plid; +}; + +/* association between a blk cgroup and a request queue */ +struct blkcg_gq { + /* Pointer to the associated request_queue */ + struct request_queue *q; + struct list_head q_node; + struct hlist_node blkcg_node; + struct blkcg *blkcg; + + /* + * Each blkg gets congested separately and the congestion state is + * propagated to the matching bdi_writeback_congested. + */ + struct bdi_writeback_congested *wb_congested; + + /* all non-root blkcg_gq's are guaranteed to have access to parent */ + struct blkcg_gq *parent; + + /* reference count */ + struct percpu_ref refcnt; + + /* is this blkg online? protected by both blkcg and q locks */ + bool online; + + struct blkg_rwstat stat_bytes; + struct blkg_rwstat stat_ios; + + struct blkg_policy_data *pd[BLKCG_MAX_POLS]; + + spinlock_t async_bio_lock; + struct bio_list async_bios; + struct work_struct async_bio_work; + + atomic_t use_delay; + atomic64_t delay_nsec; + atomic64_t delay_start; + u64 last_delay; + int last_use; + + struct rcu_head rcu_head; +}; + +typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); +typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); +typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); +typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); +typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, + struct request_queue *q, struct blkcg *blkcg); +typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); +typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf, + size_t size); + +struct blkcg_policy { + int plid; + /* cgroup files for the policy */ + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + + /* operations */ + blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; + blkcg_pol_init_cpd_fn *cpd_init_fn; + blkcg_pol_free_cpd_fn *cpd_free_fn; + blkcg_pol_bind_cpd_fn *cpd_bind_fn; + + blkcg_pol_alloc_pd_fn *pd_alloc_fn; + blkcg_pol_init_pd_fn *pd_init_fn; + blkcg_pol_online_pd_fn *pd_online_fn; + blkcg_pol_offline_pd_fn *pd_offline_fn; + blkcg_pol_free_pd_fn *pd_free_fn; + blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; + blkcg_pol_stat_pd_fn *pd_stat_fn; +}; + +extern struct blkcg blkcg_root; +extern struct cgroup_subsys_state * const blkcg_root_css; +extern bool blkcg_debug_stats; + +struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, + struct request_queue *q, bool update_hint); +struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q); +struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q); +int blkcg_init_queue(struct request_queue *q); +void blkcg_drain_queue(struct request_queue *q); +void blkcg_exit_queue(struct request_queue *q); + +/* Blkio controller policy registration */ +int blkcg_policy_register(struct blkcg_policy *pol); +void blkcg_policy_unregister(struct blkcg_policy *pol); +int blkcg_activate_policy(struct request_queue *q, + const struct blkcg_policy *pol); +void blkcg_deactivate_policy(struct request_queue *q, + const struct blkcg_policy *pol); + +static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat, + unsigned int idx) +{ + return atomic64_read(&rwstat->aux_cnt[idx]) + + percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]); +} + +const char *blkg_dev_name(struct blkcg_gq *blkg); +void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, + u64 (*prfill)(struct seq_file *, + struct blkg_policy_data *, int), + const struct blkcg_policy *pol, int data, + bool show_total); +u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); +u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, + const struct blkg_rwstat_sample *rwstat); +u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, + int off); +int blkg_print_stat_bytes(struct seq_file *sf, void *v); +int blkg_print_stat_ios(struct seq_file *sf, void *v); +int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v); +int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v); + +void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol, + int off, struct blkg_rwstat_sample *sum); + +struct blkg_conf_ctx { + struct gendisk *disk; + struct blkcg_gq *blkg; + char *body; +}; + +struct gendisk *blkcg_conf_get_disk(char **inputp); +int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, + char *input, struct blkg_conf_ctx *ctx); +void blkg_conf_finish(struct blkg_conf_ctx *ctx); + +/** + * blkcg_css - find the current css + * + * Find the css associated with either the kthread or the current task. + * This may return a dying css, so it is up to the caller to use tryget logic + * to confirm it is alive and well. + */ +static inline struct cgroup_subsys_state *blkcg_css(void) +{ + struct cgroup_subsys_state *css; + + css = kthread_blkcg(); + if (css) + return css; + return task_css(current, io_cgrp_id); +} + +static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct blkcg, css) : NULL; +} + +/** + * __bio_blkcg - internal, inconsistent version to get blkcg + * + * DO NOT USE. + * This function is inconsistent and consequently is dangerous to use. The + * first part of the function returns a blkcg where a reference is owned by the + * bio. This means it does not need to be rcu protected as it cannot go away + * with the bio owning a reference to it. However, the latter potentially gets + * it from task_css(). This can race against task migration and the cgroup + * dying. It is also semantically different as it must be called rcu protected + * and is susceptible to failure when trying to get a reference to it. + * Therefore, it is not ok to assume that *_get() will always succeed on the + * blkcg returned here. + */ +static inline struct blkcg *__bio_blkcg(struct bio *bio) +{ + if (bio && bio->bi_blkg) + return bio->bi_blkg->blkcg; + return css_to_blkcg(blkcg_css()); +} + +/** + * bio_blkcg - grab the blkcg associated with a bio + * @bio: target bio + * + * This returns the blkcg associated with a bio, %NULL if not associated. + * Callers are expected to either handle %NULL or know association has been + * done prior to calling this. + */ +static inline struct blkcg *bio_blkcg(struct bio *bio) +{ + if (bio && bio->bi_blkg) + return bio->bi_blkg->blkcg; + return NULL; +} + +static inline bool blk_cgroup_congested(void) +{ + struct cgroup_subsys_state *css; + bool ret = false; + + rcu_read_lock(); + css = kthread_blkcg(); + if (!css) + css = task_css(current, io_cgrp_id); + while (css) { + if (atomic_read(&css->cgroup->congestion_count)) { + ret = true; + break; + } + css = css->parent; + } + rcu_read_unlock(); + return ret; +} + +/** + * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg + * @return: true if this bio needs to be submitted with the root blkg context. + * + * In order to avoid priority inversions we sometimes need to issue a bio as if + * it were attached to the root blkg, and then backcharge to the actual owning + * blkg. The idea is we do bio_blkcg() to look up the actual context for the + * bio and attach the appropriate blkg to the bio. Then we call this helper and + * if it is true run with the root blkg for that queue and then do any + * backcharging to the originating cgroup once the io is complete. + */ +static inline bool bio_issue_as_root_blkg(struct bio *bio) +{ + return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; +} + +/** + * blkcg_parent - get the parent of a blkcg + * @blkcg: blkcg of interest + * + * Return the parent blkcg of @blkcg. Can be called anytime. + */ +static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) +{ + return css_to_blkcg(blkcg->css.parent); +} + +/** + * __blkg_lookup - internal version of blkg_lookup() + * @blkcg: blkcg of interest + * @q: request_queue of interest + * @update_hint: whether to update lookup hint with the result or not + * + * This is internal version and shouldn't be used by policy + * implementations. Looks up blkgs for the @blkcg - @q pair regardless of + * @q's bypass state. If @update_hint is %true, the caller should be + * holding @q->queue_lock and lookup hint is updated on success. + */ +static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, + struct request_queue *q, + bool update_hint) +{ + struct blkcg_gq *blkg; + + if (blkcg == &blkcg_root) + return q->root_blkg; + + blkg = rcu_dereference(blkcg->blkg_hint); + if (blkg && blkg->q == q) + return blkg; + + return blkg_lookup_slowpath(blkcg, q, update_hint); +} + +/** + * blkg_lookup - lookup blkg for the specified blkcg - q pair + * @blkcg: blkcg of interest + * @q: request_queue of interest + * + * Lookup blkg for the @blkcg - @q pair. This function should be called + * under RCU read lock. + */ +static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, + struct request_queue *q) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return __blkg_lookup(blkcg, q, false); +} + +/** + * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair + * @q: request_queue of interest + * + * Lookup blkg for @q at the root level. See also blkg_lookup(). + */ +static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) +{ + return q->root_blkg; +} + +/** + * blkg_to_pdata - get policy private data + * @blkg: blkg of interest + * @pol: policy of interest + * + * Return pointer to private data associated with the @blkg-@pol pair. + */ +static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, + struct blkcg_policy *pol) +{ + return blkg ? blkg->pd[pol->plid] : NULL; +} + +static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, + struct blkcg_policy *pol) +{ + return blkcg ? blkcg->cpd[pol->plid] : NULL; +} + +/** + * pdata_to_blkg - get blkg associated with policy private data + * @pd: policy private data of interest + * + * @pd is policy private data. Determine the blkg it's associated with. + */ +static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) +{ + return pd ? pd->blkg : NULL; +} + +static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) +{ + return cpd ? cpd->blkcg : NULL; +} + +extern void blkcg_destroy_blkgs(struct blkcg *blkcg); + +#ifdef CONFIG_CGROUP_WRITEBACK + +/** + * blkcg_cgwb_get - get a reference for blkcg->cgwb_list + * @blkcg: blkcg of interest + * + * This is used to track the number of active wb's related to a blkcg. + */ +static inline void blkcg_cgwb_get(struct blkcg *blkcg) +{ + refcount_inc(&blkcg->cgwb_refcnt); +} + +/** + * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list + * @blkcg: blkcg of interest + * + * This is used to track the number of active wb's related to a blkcg. + * When this count goes to zero, all active wb has finished so the + * blkcg can continue destruction by calling blkcg_destroy_blkgs(). + * This work may occur in cgwb_release_workfn() on the cgwb_release + * workqueue. + */ +static inline void blkcg_cgwb_put(struct blkcg *blkcg) +{ + if (refcount_dec_and_test(&blkcg->cgwb_refcnt)) + blkcg_destroy_blkgs(blkcg); +} + +#else + +static inline void blkcg_cgwb_get(struct blkcg *blkcg) { } + +static inline void blkcg_cgwb_put(struct blkcg *blkcg) +{ + /* wb isn't being accounted, so trigger destruction right away */ + blkcg_destroy_blkgs(blkcg); +} + +#endif + +/** + * blkg_path - format cgroup path of blkg + * @blkg: blkg of interest + * @buf: target buffer + * @buflen: target buffer length + * + * Format the path of the cgroup of @blkg into @buf. + */ +static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) +{ + return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); +} + +/** + * blkg_get - get a blkg reference + * @blkg: blkg to get + * + * The caller should be holding an existing reference. + */ +static inline void blkg_get(struct blkcg_gq *blkg) +{ + percpu_ref_get(&blkg->refcnt); +} + +/** + * blkg_tryget - try and get a blkg reference + * @blkg: blkg to get + * + * This is for use when doing an RCU lookup of the blkg. We may be in the midst + * of freeing this blkg, so we can only use it if the refcnt is not zero. + */ +static inline bool blkg_tryget(struct blkcg_gq *blkg) +{ + return blkg && percpu_ref_tryget(&blkg->refcnt); +} + +/** + * blkg_tryget_closest - try and get a blkg ref on the closet blkg + * @blkg: blkg to get + * + * This needs to be called rcu protected. As the failure mode here is to walk + * up the blkg tree, this ensure that the blkg->parent pointers are always + * valid. This returns the blkg that it ended up taking a reference on or %NULL + * if no reference was taken. + */ +static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) +{ + struct blkcg_gq *ret_blkg = NULL; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + while (blkg) { + if (blkg_tryget(blkg)) { + ret_blkg = blkg; + break; + } + blkg = blkg->parent; + } + + return ret_blkg; +} + +/** + * blkg_put - put a blkg reference + * @blkg: blkg to put + */ +static inline void blkg_put(struct blkcg_gq *blkg) +{ + percpu_ref_put(&blkg->refcnt); +} + +/** + * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants + * @d_blkg: loop cursor pointing to the current descendant + * @pos_css: used for iteration + * @p_blkg: target blkg to walk descendants of + * + * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU + * read locked. If called under either blkcg or queue lock, the iteration + * is guaranteed to include all and only online blkgs. The caller may + * update @pos_css by calling css_rightmost_descendant() to skip subtree. + * @p_blkg is included in the iteration and the first node to be visited. + */ +#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ + css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ + if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ + (p_blkg)->q, false))) + +/** + * blkg_for_each_descendant_post - post-order walk of a blkg's descendants + * @d_blkg: loop cursor pointing to the current descendant + * @pos_css: used for iteration + * @p_blkg: target blkg to walk descendants of + * + * Similar to blkg_for_each_descendant_pre() but performs post-order + * traversal instead. Synchronization rules are the same. @p_blkg is + * included in the iteration and the last node to be visited. + */ +#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ + css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ + if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ + (p_blkg)->q, false))) + +static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp) +{ + int i, ret; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) { + ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp); + if (ret) { + while (--i >= 0) + percpu_counter_destroy(&rwstat->cpu_cnt[i]); + return ret; + } + atomic64_set(&rwstat->aux_cnt[i], 0); + } + return 0; +} + +static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) +{ + int i; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + percpu_counter_destroy(&rwstat->cpu_cnt[i]); +} + +/** + * blkg_rwstat_add - add a value to a blkg_rwstat + * @rwstat: target blkg_rwstat + * @op: REQ_OP and flags + * @val: value to add + * + * Add @val to @rwstat. The counters are chosen according to @rw. The + * caller is responsible for synchronizing calls to this function. + */ +static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, + unsigned int op, uint64_t val) +{ + struct percpu_counter *cnt; + + if (op_is_discard(op)) + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD]; + else if (op_is_write(op)) + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; + else + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; + + percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); + + if (op_is_sync(op)) + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; + else + cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; + + percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); +} + +/** + * blkg_rwstat_read - read the current values of a blkg_rwstat + * @rwstat: blkg_rwstat to read + * + * Read the current snapshot of @rwstat and return it in the aux counts. + */ +static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat, + struct blkg_rwstat_sample *result) +{ + int i; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + result->cnt[i] = + percpu_counter_sum_positive(&rwstat->cpu_cnt[i]); +} + +/** + * blkg_rwstat_total - read the total count of a blkg_rwstat + * @rwstat: blkg_rwstat to read + * + * Return the total count of @rwstat regardless of the IO direction. This + * function can be called without synchronization and takes care of u64 + * atomicity. + */ +static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) +{ + struct blkg_rwstat_sample tmp = { }; + + blkg_rwstat_read(rwstat, &tmp); + return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; +} + +/** + * blkg_rwstat_reset - reset a blkg_rwstat + * @rwstat: blkg_rwstat to reset + */ +static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) +{ + int i; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) { + percpu_counter_set(&rwstat->cpu_cnt[i], 0); + atomic64_set(&rwstat->aux_cnt[i], 0); + } +} + +/** + * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count + * @to: the destination blkg_rwstat + * @from: the source + * + * Add @from's count including the aux one to @to's aux count. + */ +static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, + struct blkg_rwstat *from) +{ + u64 sum[BLKG_RWSTAT_NR]; + int i; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]); + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]), + &to->aux_cnt[i]); +} + +#ifdef CONFIG_BLK_DEV_THROTTLING +extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, + struct bio *bio); +#else +static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, + struct bio *bio) { return false; } +#endif + +bool __blkcg_punt_bio_submit(struct bio *bio); + +static inline bool blkcg_punt_bio_submit(struct bio *bio) +{ + if (bio->bi_opf & REQ_CGROUP_PUNT) + return __blkcg_punt_bio_submit(bio); + else + return false; +} + +static inline void blkcg_bio_issue_init(struct bio *bio) +{ + bio_issue_init(&bio->bi_issue, bio_sectors(bio)); +} + +static inline bool blkcg_bio_issue_check(struct request_queue *q, + struct bio *bio) +{ + struct blkcg_gq *blkg; + bool throtl = false; + + rcu_read_lock(); + + if (!bio->bi_blkg) { + char b[BDEVNAME_SIZE]; + + WARN_ONCE(1, + "no blkg associated for bio on block-device: %s\n", + bio_devname(bio, b)); + bio_associate_blkg(bio); + } + + blkg = bio->bi_blkg; + + throtl = blk_throtl_bio(q, blkg, bio); + + if (!throtl) { + /* + * If the bio is flagged with BIO_QUEUE_ENTERED it means this + * is a split bio and we would have already accounted for the + * size of the bio. + */ + if (!bio_flagged(bio, BIO_QUEUE_ENTERED)) + blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf, + bio->bi_iter.bi_size); + blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); + } + + blkcg_bio_issue_init(bio); + + rcu_read_unlock(); + return !throtl; +} + +static inline void blkcg_use_delay(struct blkcg_gq *blkg) +{ + if (atomic_add_return(1, &blkg->use_delay) == 1) + atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); +} + +static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) +{ + int old = atomic_read(&blkg->use_delay); + + if (old == 0) + return 0; + + /* + * We do this song and dance because we can race with somebody else + * adding or removing delay. If we just did an atomic_dec we'd end up + * negative and we'd already be in trouble. We need to subtract 1 and + * then check to see if we were the last delay so we can drop the + * congestion count on the cgroup. + */ + while (old) { + int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); + if (cur == old) + break; + old = cur; + } + + if (old == 0) + return 0; + if (old == 1) + atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); + return 1; +} + +static inline void blkcg_clear_delay(struct blkcg_gq *blkg) +{ + int old = atomic_read(&blkg->use_delay); + if (!old) + return; + /* We only want 1 person clearing the congestion count for this blkg. */ + while (old) { + int cur = atomic_cmpxchg(&blkg->use_delay, old, 0); + if (cur == old) { + atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); + break; + } + old = cur; + } +} + +void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); +void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); +void blkcg_maybe_throttle_current(void); +#else /* CONFIG_BLK_CGROUP */ + +struct blkcg { +}; + +struct blkg_policy_data { +}; + +struct blkcg_policy_data { +}; + +struct blkcg_gq { +}; + +struct blkcg_policy { +}; + +#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) + +static inline void blkcg_maybe_throttle_current(void) { } +static inline bool blk_cgroup_congested(void) { return false; } + +#ifdef CONFIG_BLOCK + +static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { } + +static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } +static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) +{ return NULL; } +static inline int blkcg_init_queue(struct request_queue *q) { return 0; } +static inline void blkcg_drain_queue(struct request_queue *q) { } +static inline void blkcg_exit_queue(struct request_queue *q) { } +static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } +static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } +static inline int blkcg_activate_policy(struct request_queue *q, + const struct blkcg_policy *pol) { return 0; } +static inline void blkcg_deactivate_policy(struct request_queue *q, + const struct blkcg_policy *pol) { } + +static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; } +static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } + +static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, + struct blkcg_policy *pol) { return NULL; } +static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } +static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } +static inline void blkg_get(struct blkcg_gq *blkg) { } +static inline void blkg_put(struct blkcg_gq *blkg) { } + +static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } +static inline void blkcg_bio_issue_init(struct bio *bio) { } +static inline bool blkcg_bio_issue_check(struct request_queue *q, + struct bio *bio) { return true; } + +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) + +#endif /* CONFIG_BLOCK */ +#endif /* CONFIG_BLK_CGROUP */ +#endif /* _BLK_CGROUP_H */ diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h new file mode 100644 index 0000000..0b1f45c --- /dev/null +++ b/include/linux/blk-mq-pci.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLK_MQ_PCI_H +#define _LINUX_BLK_MQ_PCI_H + +struct blk_mq_queue_map; +struct pci_dev; + +int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev, + int offset); + +#endif /* _LINUX_BLK_MQ_PCI_H */ diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h new file mode 100644 index 0000000..5cc5f0f --- /dev/null +++ b/include/linux/blk-mq-rdma.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLK_MQ_RDMA_H +#define _LINUX_BLK_MQ_RDMA_H + +struct blk_mq_tag_set; +struct ib_device; + +int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, + struct ib_device *dev, int first_vec); + +#endif /* _LINUX_BLK_MQ_RDMA_H */ diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h new file mode 100644 index 0000000..687ae28 --- /dev/null +++ b/include/linux/blk-mq-virtio.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLK_MQ_VIRTIO_H +#define _LINUX_BLK_MQ_VIRTIO_H + +struct blk_mq_queue_map; +struct virtio_device; + +int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap, + struct virtio_device *vdev, int first_vec); + +#endif /* _LINUX_BLK_MQ_VIRTIO_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h new file mode 100644 index 0000000..0bf056d --- /dev/null +++ b/include/linux/blk-mq.h @@ -0,0 +1,383 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef BLK_MQ_H +#define BLK_MQ_H + +#include +#include +#include + +struct blk_mq_tags; +struct blk_flush_queue; + +/** + * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device + */ +struct blk_mq_hw_ctx { + struct { + spinlock_t lock; + struct list_head dispatch; + unsigned long state; /* BLK_MQ_S_* flags */ + } ____cacheline_aligned_in_smp; + + struct delayed_work run_work; + cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; + + unsigned long flags; /* BLK_MQ_F_* flags */ + + void *sched_data; + struct request_queue *queue; + struct blk_flush_queue *fq; + + void *driver_data; + + struct sbitmap ctx_map; + + struct blk_mq_ctx *dispatch_from; + unsigned int dispatch_busy; + + unsigned short type; + unsigned short nr_ctx; + struct blk_mq_ctx **ctxs; + + spinlock_t dispatch_wait_lock; + wait_queue_entry_t dispatch_wait; + atomic_t wait_index; + + struct blk_mq_tags *tags; + struct blk_mq_tags *sched_tags; + + unsigned long queued; + unsigned long run; +#define BLK_MQ_MAX_DISPATCH_ORDER 7 + unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; + + unsigned int numa_node; + unsigned int queue_num; + + atomic_t nr_active; + + struct hlist_node cpuhp_dead; + struct kobject kobj; + + unsigned long poll_considered; + unsigned long poll_invoked; + unsigned long poll_success; + +#ifdef CONFIG_BLK_DEBUG_FS + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; +#endif + + struct list_head hctx_list; + + /* Must be the last member - see also blk_mq_hw_ctx_size(). */ + struct srcu_struct srcu[0]; +}; + +struct blk_mq_queue_map { + unsigned int *mq_map; + unsigned int nr_queues; + unsigned int queue_offset; +}; + +enum hctx_type { + HCTX_TYPE_DEFAULT, /* all I/O not otherwise accounted for */ + HCTX_TYPE_READ, /* just for READ I/O */ + HCTX_TYPE_POLL, /* polled I/O of any kind */ + + HCTX_MAX_TYPES, +}; + +struct blk_mq_tag_set { + /* + * map[] holds ctx -> hctx mappings, one map exists for each type + * that the driver wishes to support. There are no restrictions + * on maps being of the same size, and it's perfectly legal to + * share maps between types. + */ + struct blk_mq_queue_map map[HCTX_MAX_TYPES]; + unsigned int nr_maps; /* nr entries in map[] */ + const struct blk_mq_ops *ops; + unsigned int nr_hw_queues; /* nr hw queues across maps */ + unsigned int queue_depth; /* max hw supported */ + unsigned int reserved_tags; + unsigned int cmd_size; /* per-request extra data */ + int numa_node; + unsigned int timeout; + unsigned int flags; /* BLK_MQ_F_* */ + void *driver_data; + + struct blk_mq_tags **tags; + + struct mutex tag_list_lock; + struct list_head tag_list; +}; + +struct blk_mq_queue_data { + struct request *rq; + bool last; +}; + +typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, + const struct blk_mq_queue_data *); +typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *); +typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); +typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); +typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); +typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); +typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); +typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, + unsigned int, unsigned int); +typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, + unsigned int); + +typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, + bool); +typedef bool (busy_tag_iter_fn)(struct request *, void *, bool); +typedef int (poll_fn)(struct blk_mq_hw_ctx *); +typedef int (map_queues_fn)(struct blk_mq_tag_set *set); +typedef bool (busy_fn)(struct request_queue *); +typedef void (complete_fn)(struct request *); +typedef void (cleanup_rq_fn)(struct request *); + + +struct blk_mq_ops { + /* + * Queue request + */ + queue_rq_fn *queue_rq; + + /* + * If a driver uses bd->last to judge when to submit requests to + * hardware, it must define this function. In case of errors that + * make us stop issuing further requests, this hook serves the + * purpose of kicking the hardware (which the last request otherwise + * would have done). + */ + commit_rqs_fn *commit_rqs; + + /* + * Reserve budget before queue request, once .queue_rq is + * run, it is driver's responsibility to release the + * reserved budget. Also we have to handle failure case + * of .get_budget for avoiding I/O deadlock. + */ + get_budget_fn *get_budget; + put_budget_fn *put_budget; + + /* + * Called on request timeout + */ + timeout_fn *timeout; + + /* + * Called to poll for completion of a specific tag. + */ + poll_fn *poll; + + complete_fn *complete; + + /* + * Called when the block layer side of a hardware queue has been + * set up, allowing the driver to allocate/init matching structures. + * Ditto for exit/teardown. + */ + init_hctx_fn *init_hctx; + exit_hctx_fn *exit_hctx; + + /* + * Called for every command allocated by the block layer to allow + * the driver to set up driver specific data. + * + * Tag greater than or equal to queue_depth is for setting up + * flush request. + * + * Ditto for exit/teardown. + */ + init_request_fn *init_request; + exit_request_fn *exit_request; + /* Called from inside blk_get_request() */ + void (*initialize_rq_fn)(struct request *rq); + + /* + * Called before freeing one request which isn't completed yet, + * and usually for freeing the driver private data + */ + cleanup_rq_fn *cleanup_rq; + + /* + * If set, returns whether or not this queue currently is busy + */ + busy_fn *busy; + + map_queues_fn *map_queues; + +#ifdef CONFIG_BLK_DEBUG_FS + /* + * Used by the debugfs implementation to show driver-specific + * information about a request. + */ + void (*show_rq)(struct seq_file *m, struct request *rq); +#endif +}; + +enum { + BLK_MQ_F_SHOULD_MERGE = 1 << 0, + BLK_MQ_F_TAG_SHARED = 1 << 1, + BLK_MQ_F_BLOCKING = 1 << 5, + BLK_MQ_F_NO_SCHED = 1 << 6, + BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, + BLK_MQ_F_ALLOC_POLICY_BITS = 1, + + BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, + BLK_MQ_S_SCHED_RESTART = 2, + + BLK_MQ_MAX_DEPTH = 10240, + + BLK_MQ_CPU_WORK_BATCH = 8, +}; +#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ + ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ + ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) +#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ + ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ + << BLK_MQ_F_ALLOC_POLICY_START_BIT) + +struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); +struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, + struct request_queue *q, + bool elevator_init); +struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, + const struct blk_mq_ops *ops, + unsigned int queue_depth, + unsigned int set_flags); +void blk_mq_unregister_dev(struct device *, struct request_queue *); + +int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); +void blk_mq_free_tag_set(struct blk_mq_tag_set *set); + +void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); + +void blk_mq_free_request(struct request *rq); +bool blk_mq_can_queue(struct blk_mq_hw_ctx *); + +bool blk_mq_queue_inflight(struct request_queue *q); + +enum { + /* return when out of requests */ + BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), + /* allocate from reserved pool */ + BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), + /* allocate internal/sched tag */ + BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), + /* set RQF_PREEMPT */ + BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), +}; + +struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, + blk_mq_req_flags_t flags); +struct request *blk_mq_alloc_request_hctx(struct request_queue *q, + unsigned int op, blk_mq_req_flags_t flags, + unsigned int hctx_idx); +struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); + +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, +}; + +u32 blk_mq_unique_tag(struct request *rq); + +static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) +{ + return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; +} + +static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) +{ + return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; +} + + +int blk_mq_request_started(struct request *rq); +int blk_mq_request_completed(struct request *rq); +void blk_mq_start_request(struct request *rq); +void blk_mq_end_request(struct request *rq, blk_status_t error); +void __blk_mq_end_request(struct request *rq, blk_status_t error); + +void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); +void blk_mq_kick_requeue_list(struct request_queue *q); +void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); +bool blk_mq_complete_request(struct request *rq); +bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, + struct bio *bio, unsigned int nr_segs); +bool blk_mq_queue_stopped(struct request_queue *q); +void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); +void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); +void blk_mq_stop_hw_queues(struct request_queue *q); +void blk_mq_start_hw_queues(struct request_queue *q); +void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); +void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); +void blk_mq_quiesce_queue(struct request_queue *q); +void blk_mq_unquiesce_queue(struct request_queue *q); +void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); +bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); +void blk_mq_run_hw_queues(struct request_queue *q, bool async); +void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, + busy_tag_iter_fn *fn, void *priv); +void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); +void blk_mq_freeze_queue(struct request_queue *q); +void blk_mq_unfreeze_queue(struct request_queue *q); +void blk_freeze_queue_start(struct request_queue *q); +void blk_mq_freeze_queue_wait(struct request_queue *q); +int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, + unsigned long timeout); + +int blk_mq_map_queues(struct blk_mq_queue_map *qmap); +void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); + +void blk_mq_quiesce_queue_nowait(struct request_queue *q); + +unsigned int blk_mq_rq_cpu(struct request *rq); + +/* + * Driver command data is immediately after the request. So subtract request + * size to get back to the original request, add request size to get the PDU. + */ +static inline struct request *blk_mq_rq_from_pdu(void *pdu) +{ + return pdu - sizeof(struct request); +} +static inline void *blk_mq_rq_to_pdu(struct request *rq) +{ + return rq + 1; +} + +#define queue_for_each_hw_ctx(q, hctx, i) \ + for ((i) = 0; (i) < (q)->nr_hw_queues && \ + ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) + +#define hctx_for_each_ctx(hctx, ctx, i) \ + for ((i) = 0; (i) < (hctx)->nr_ctx && \ + ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) + +static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, + struct request *rq) +{ + if (rq->tag != -1) + return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT); + + return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) | + BLK_QC_T_INTERNAL; +} + +static inline void blk_mq_cleanup_rq(struct request *rq) +{ + if (rq->q->mq_ops->cleanup_rq) + rq->q->mq_ops->cleanup_rq(rq); +} + +#endif diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h new file mode 100644 index 0000000..b80c65a --- /dev/null +++ b/include/linux/blk-pm.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _BLK_PM_H_ +#define _BLK_PM_H_ + +struct device; +struct request_queue; + +/* + * block layer runtime pm functions + */ +#ifdef CONFIG_PM +extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); +extern int blk_pre_runtime_suspend(struct request_queue *q); +extern void blk_post_runtime_suspend(struct request_queue *q, int err); +extern void blk_pre_runtime_resume(struct request_queue *q); +extern void blk_post_runtime_resume(struct request_queue *q, int err); +extern void blk_set_runtime_active(struct request_queue *q); +#else +static inline void blk_pm_runtime_init(struct request_queue *q, + struct device *dev) {} +#endif + +#endif /* _BLK_PM_H_ */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h new file mode 100644 index 0000000..d688b96 --- /dev/null +++ b/include/linux/blk_types.h @@ -0,0 +1,461 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Block data types and constants. Directly include this file only to + * break include dependency loop. + */ +#ifndef __LINUX_BLK_TYPES_H +#define __LINUX_BLK_TYPES_H + +#include +#include +#include + +struct bio_set; +struct bio; +struct bio_integrity_payload; +struct page; +struct block_device; +struct io_context; +struct cgroup_subsys_state; +typedef void (bio_end_io_t) (struct bio *); + +/* + * Block error status values. See block/blk-core:blk_errors for the details. + * Alpha cannot write a byte atomically, so we need to use 32-bit value. + */ +#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) +typedef u32 __bitwise blk_status_t; +#else +typedef u8 __bitwise blk_status_t; +#endif +#define BLK_STS_OK 0 +#define BLK_STS_NOTSUPP ((__force blk_status_t)1) +#define BLK_STS_TIMEOUT ((__force blk_status_t)2) +#define BLK_STS_NOSPC ((__force blk_status_t)3) +#define BLK_STS_TRANSPORT ((__force blk_status_t)4) +#define BLK_STS_TARGET ((__force blk_status_t)5) +#define BLK_STS_NEXUS ((__force blk_status_t)6) +#define BLK_STS_MEDIUM ((__force blk_status_t)7) +#define BLK_STS_PROTECTION ((__force blk_status_t)8) +#define BLK_STS_RESOURCE ((__force blk_status_t)9) +#define BLK_STS_IOERR ((__force blk_status_t)10) + +/* hack for device mapper, don't use elsewhere: */ +#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) + +#define BLK_STS_AGAIN ((__force blk_status_t)12) + +/* + * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if + * device related resources are unavailable, but the driver can guarantee + * that the queue will be rerun in the future once resources become + * available again. This is typically the case for device specific + * resources that are consumed for IO. If the driver fails allocating these + * resources, we know that inflight (or pending) IO will free these + * resource upon completion. + * + * This is different from BLK_STS_RESOURCE in that it explicitly references + * a device specific resource. For resources of wider scope, allocation + * failure can happen without having pending IO. This means that we can't + * rely on request completions freeing these resources, as IO may not be in + * flight. Examples of that are kernel memory allocations, DMA mappings, or + * any other system wide resources. + */ +#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) + +/** + * blk_path_error - returns true if error may be path related + * @error: status the request was completed with + * + * Description: + * This classifies block error status into non-retryable errors and ones + * that may be successful if retried on a failover path. + * + * Return: + * %false - retrying failover path will not help + * %true - may succeed if retried + */ +static inline bool blk_path_error(blk_status_t error) +{ + switch (error) { + case BLK_STS_NOTSUPP: + case BLK_STS_NOSPC: + case BLK_STS_TARGET: + case BLK_STS_NEXUS: + case BLK_STS_MEDIUM: + case BLK_STS_PROTECTION: + return false; + } + + /* Anything else could be a path failure, so should be retried */ + return true; +} + +/* + * From most significant bit: + * 1 bit: reserved for other usage, see below + * 12 bits: original size of bio + * 51 bits: issue time of bio + */ +#define BIO_ISSUE_RES_BITS 1 +#define BIO_ISSUE_SIZE_BITS 12 +#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS) +#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS) +#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1) +#define BIO_ISSUE_SIZE_MASK \ + (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT) +#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1)) + +/* Reserved bit for blk-throtl */ +#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63) + +struct bio_issue { + u64 value; +}; + +static inline u64 __bio_issue_time(u64 time) +{ + return time & BIO_ISSUE_TIME_MASK; +} + +static inline u64 bio_issue_time(struct bio_issue *issue) +{ + return __bio_issue_time(issue->value); +} + +static inline sector_t bio_issue_size(struct bio_issue *issue) +{ + return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT); +} + +static inline void bio_issue_init(struct bio_issue *issue, + sector_t size) +{ + size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1; + issue->value = ((issue->value & BIO_ISSUE_RES_MASK) | + (ktime_get_ns() & BIO_ISSUE_TIME_MASK) | + ((u64)size << BIO_ISSUE_SIZE_SHIFT)); +} + +/* + * main unit of I/O for the block layer and lower layers (ie drivers and + * stacking drivers) + */ +struct bio { + struct bio *bi_next; /* request queue link */ + struct gendisk *bi_disk; + unsigned int bi_opf; /* bottom bits req flags, + * top bits REQ_OP. Use + * accessors. + */ + unsigned short bi_flags; /* status, etc and bvec pool number */ + unsigned short bi_ioprio; + unsigned short bi_write_hint; + blk_status_t bi_status; + u8 bi_partno; + + struct bvec_iter bi_iter; + + atomic_t __bi_remaining; + bio_end_io_t *bi_end_io; + + void *bi_private; +#ifdef CONFIG_BLK_CGROUP + /* + * Represents the association of the css and request_queue for the bio. + * If a bio goes direct to device, it will not have a blkg as it will + * not have a request_queue associated with it. The reference is put + * on release of the bio. + */ + struct blkcg_gq *bi_blkg; + struct bio_issue bi_issue; +#ifdef CONFIG_BLK_CGROUP_IOCOST + u64 bi_iocost_cost; +#endif +#endif + union { +#if defined(CONFIG_BLK_DEV_INTEGRITY) + struct bio_integrity_payload *bi_integrity; /* data integrity */ +#endif + }; + + unsigned short bi_vcnt; /* how many bio_vec's */ + + /* + * Everything starting with bi_max_vecs will be preserved by bio_reset() + */ + + unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ + + atomic_t __bi_cnt; /* pin count */ + + struct bio_vec *bi_io_vec; /* the actual vec list */ + + struct bio_set *bi_pool; + + /* + * We can inline a number of vecs at the end of the bio, to avoid + * double allocations for a small number of bio_vecs. This member + * MUST obviously be kept at the very end of the bio. + */ + struct bio_vec bi_inline_vecs[0]; +}; + +#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) + +/* + * bio flags + */ +enum { + BIO_NO_PAGE_REF, /* don't put release vec pages */ + BIO_CLONED, /* doesn't own data */ + BIO_BOUNCED, /* bio is a bounce bio */ + BIO_USER_MAPPED, /* contains user pages */ + BIO_NULL_MAPPED, /* contains invalid user pages */ + BIO_WORKINGSET, /* contains userspace workingset pages */ + BIO_QUIET, /* Make BIO Quiet */ + BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ + BIO_REFFED, /* bio has elevated ->bi_cnt */ + BIO_THROTTLED, /* This bio has already been subjected to + * throttling rules. Don't do it again. */ + BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion + * of this bio. */ + BIO_QUEUE_ENTERED, /* can use blk_queue_enter_live() */ + BIO_TRACKED, /* set if bio goes through the rq_qos path */ + BIO_FLAG_LAST +}; + +/* See BVEC_POOL_OFFSET below before adding new flags */ + +/* + * We support 6 different bvec pools, the last one is magic in that it + * is backed by a mempool. + */ +#define BVEC_POOL_NR 6 +#define BVEC_POOL_MAX (BVEC_POOL_NR - 1) + +/* + * Top 3 bits of bio flags indicate the pool the bvecs came from. We add + * 1 to the actual index so that 0 indicates that there are no bvecs to be + * freed. + */ +#define BVEC_POOL_BITS (3) +#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) +#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) +#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1) +# error "BVEC_POOL_BITS is too small" +#endif + +/* + * Flags starting here get preserved by bio_reset() - this includes + * only BVEC_POOL_IDX() + */ +#define BIO_RESET_BITS BVEC_POOL_OFFSET + +typedef __u32 __bitwise blk_mq_req_flags_t; + +/* + * Operations and flags common to the bio and request structures. + * We use 8 bits for encoding the operation, and the remaining 24 for flags. + * + * The least significant bit of the operation number indicates the data + * transfer direction: + * + * - if the least significant bit is set transfers are TO the device + * - if the least significant bit is not set transfers are FROM the device + * + * If a operation does not transfer data the least significant bit has no + * meaning. + */ +#define REQ_OP_BITS 8 +#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) +#define REQ_FLAG_BITS 24 + +enum req_opf { + /* read sectors from the device */ + REQ_OP_READ = 0, + /* write sectors to the device */ + REQ_OP_WRITE = 1, + /* flush the volatile write cache */ + REQ_OP_FLUSH = 2, + /* discard sectors */ + REQ_OP_DISCARD = 3, + /* securely erase sectors */ + REQ_OP_SECURE_ERASE = 5, + /* reset a zone write pointer */ + REQ_OP_ZONE_RESET = 6, + /* write the same sector many times */ + REQ_OP_WRITE_SAME = 7, + /* reset all the zone present on the device */ + REQ_OP_ZONE_RESET_ALL = 8, + /* write the zero filled sector many times */ + REQ_OP_WRITE_ZEROES = 9, + + /* SCSI passthrough using struct scsi_request */ + REQ_OP_SCSI_IN = 32, + REQ_OP_SCSI_OUT = 33, + /* Driver private requests */ + REQ_OP_DRV_IN = 34, + REQ_OP_DRV_OUT = 35, + + REQ_OP_LAST, +}; + +enum req_flag_bits { + __REQ_FAILFAST_DEV = /* no driver retries of device errors */ + REQ_OP_BITS, + __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ + __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ + __REQ_SYNC, /* request is sync (sync write or read) */ + __REQ_META, /* metadata io request */ + __REQ_PRIO, /* boost priority in cfq */ + __REQ_NOMERGE, /* don't touch this for merging */ + __REQ_IDLE, /* anticipate more IO after this one */ + __REQ_INTEGRITY, /* I/O includes block integrity payload */ + __REQ_FUA, /* forced unit access */ + __REQ_PREFLUSH, /* request for cache flush */ + __REQ_RAHEAD, /* read ahead, can fail anytime */ + __REQ_BACKGROUND, /* background IO */ + __REQ_NOWAIT, /* Don't wait if request will block */ + __REQ_NOWAIT_INLINE, /* Return would-block error inline */ + /* + * When a shared kthread needs to issue a bio for a cgroup, doing + * so synchronously can lead to priority inversions as the kthread + * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes + * submit_bio() punt the actual issuing to a dedicated per-blkcg + * work item to avoid such priority inversions. + */ + __REQ_CGROUP_PUNT, + + /* command specific flags for REQ_OP_WRITE_ZEROES: */ + __REQ_NOUNMAP, /* do not free blocks when zeroing */ + + __REQ_HIPRI, + + /* for driver use */ + __REQ_DRV, + __REQ_SWAP, /* swapping request. */ + __REQ_NR_BITS, /* stops here */ +}; + +#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) +#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) +#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) +#define REQ_SYNC (1ULL << __REQ_SYNC) +#define REQ_META (1ULL << __REQ_META) +#define REQ_PRIO (1ULL << __REQ_PRIO) +#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) +#define REQ_IDLE (1ULL << __REQ_IDLE) +#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) +#define REQ_FUA (1ULL << __REQ_FUA) +#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) +#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) +#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) +#define REQ_NOWAIT (1ULL << __REQ_NOWAIT) +#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE) +#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) + +#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) +#define REQ_HIPRI (1ULL << __REQ_HIPRI) + +#define REQ_DRV (1ULL << __REQ_DRV) +#define REQ_SWAP (1ULL << __REQ_SWAP) + +#define REQ_FAILFAST_MASK \ + (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) + +#define REQ_NOMERGE_FLAGS \ + (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) + +enum stat_group { + STAT_READ, + STAT_WRITE, + STAT_DISCARD, + + NR_STAT_GROUPS +}; + +#define bio_op(bio) \ + ((bio)->bi_opf & REQ_OP_MASK) +#define req_op(req) \ + ((req)->cmd_flags & REQ_OP_MASK) + +/* obsolete, don't use in new code */ +static inline void bio_set_op_attrs(struct bio *bio, unsigned op, + unsigned op_flags) +{ + bio->bi_opf = op | op_flags; +} + +static inline bool op_is_write(unsigned int op) +{ + return (op & 1); +} + +/* + * Check if the bio or request is one that needs special treatment in the + * flush state machine. + */ +static inline bool op_is_flush(unsigned int op) +{ + return op & (REQ_FUA | REQ_PREFLUSH); +} + +/* + * Reads are always treated as synchronous, as are requests with the FUA or + * PREFLUSH flag. Other operations may be marked as synchronous using the + * REQ_SYNC flag. + */ +static inline bool op_is_sync(unsigned int op) +{ + return (op & REQ_OP_MASK) == REQ_OP_READ || + (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); +} + +static inline bool op_is_discard(unsigned int op) +{ + return (op & REQ_OP_MASK) == REQ_OP_DISCARD; +} + +static inline int op_stat_group(unsigned int op) +{ + if (op_is_discard(op)) + return STAT_DISCARD; + return op_is_write(op); +} + +typedef unsigned int blk_qc_t; +#define BLK_QC_T_NONE -1U +#define BLK_QC_T_EAGAIN -2U +#define BLK_QC_T_SHIFT 16 +#define BLK_QC_T_INTERNAL (1U << 31) + +static inline bool blk_qc_t_valid(blk_qc_t cookie) +{ + return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; +} + +static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) +{ + return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; +} + +static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) +{ + return cookie & ((1u << BLK_QC_T_SHIFT) - 1); +} + +static inline bool blk_qc_t_is_internal(blk_qc_t cookie) +{ + return (cookie & BLK_QC_T_INTERNAL) != 0; +} + +struct blk_rq_stat { + u64 mean; + u64 min; + u64 max; + u32 nr_samples; + u64 batch; +}; + +#endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h new file mode 100644 index 0000000..51922b5 --- /dev/null +++ b/include/linux/blkdev.h @@ -0,0 +1,1841 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLKDEV_H +#define _LINUX_BLKDEV_H + +#include +#include + +#ifdef CONFIG_BLOCK + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct module; +struct scsi_ioctl_command; + +struct request_queue; +struct elevator_queue; +struct blk_trace; +struct request; +struct sg_io_hdr; +struct bsg_job; +struct blkcg_gq; +struct blk_flush_queue; +struct pr_ops; +struct rq_qos; +struct blk_queue_stats; +struct blk_stat_callback; + +#define BLKDEV_MIN_RQ 4 +#ifdef CONFIG_MCST +#define BLKDEV_MAX_RQ 1024 /* Default maximum */ +#else +#define BLKDEV_MAX_RQ 128 /* Default maximum */ +#endif + +/* Must be consistent with blk_mq_poll_stats_bkt() */ +#define BLK_MQ_POLL_STATS_BKTS 16 + +/* Doing classic polling */ +#define BLK_MQ_POLL_CLASSIC -1 + +/* + * Maximum number of blkcg policies allowed to be registered concurrently. + * Defined here to simplify include dependency. + */ +#define BLKCG_MAX_POLS 5 + +typedef void (rq_end_io_fn)(struct request *, blk_status_t); + +/* + * request flags */ +typedef __u32 __bitwise req_flags_t; + +/* elevator knows about this request */ +#define RQF_SORTED ((__force req_flags_t)(1 << 0)) +/* drive already may have started this one */ +#define RQF_STARTED ((__force req_flags_t)(1 << 1)) +/* may not be passed by ioscheduler */ +#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) +/* request for flush sequence */ +#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) +/* merge of different types, fail separately */ +#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) +/* track inflight for MQ */ +#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) +/* don't call prep for this one */ +#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) +/* set for "ide_preempt" requests and also for requests for which the SCSI + "quiesce" state must be ignored. */ +#define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) +/* contains copies of user pages */ +#define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) +/* vaguely specified driver internal error. Ignored by the block layer */ +#define RQF_FAILED ((__force req_flags_t)(1 << 10)) +/* don't warn about errors */ +#define RQF_QUIET ((__force req_flags_t)(1 << 11)) +/* elevator private data attached */ +#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) +/* account into disk and partition IO statistics */ +#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) +/* request came from our alloc pool */ +#define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) +/* runtime pm request */ +#define RQF_PM ((__force req_flags_t)(1 << 15)) +/* on IO scheduler merge hash */ +#define RQF_HASHED ((__force req_flags_t)(1 << 16)) +/* track IO completion time */ +#define RQF_STATS ((__force req_flags_t)(1 << 17)) +/* Look at ->special_vec for the actual data payload instead of the + bio chain. */ +#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) +/* The per-zone write lock is held for this request */ +#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) +/* already slept for hybrid poll */ +#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) +/* ->timeout has been called, don't expire again */ +#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) + +/* flags that prevent us from merging requests: */ +#define RQF_NOMERGE_FLAGS \ + (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) + +/* + * Request state for blk-mq. + */ +enum mq_rq_state { + MQ_RQ_IDLE = 0, + MQ_RQ_IN_FLIGHT = 1, + MQ_RQ_COMPLETE = 2, +}; + +/* + * Try to put the fields that are referenced together in the same cacheline. + * + * If you modify this structure, make sure to update blk_rq_init() and + * especially blk_mq_rq_ctx_init() to take care of the added fields. + */ +struct request { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; + + unsigned int cmd_flags; /* op and common flags */ + req_flags_t rq_flags; + + int tag; + int internal_tag; + + /* the following two fields are internal, NEVER access directly */ + unsigned int __data_len; /* total data len */ + sector_t __sector; /* sector cursor */ + + struct bio *bio; + struct bio *biotail; + + struct list_head queuelist; + + /* + * The hash is used inside the scheduler, and killed once the + * request reaches the dispatch list. The ipi_list is only used + * to queue the request for softirq completion, which is long + * after the request has been unhashed (and even removed from + * the dispatch list). + */ + union { + struct hlist_node hash; /* merge hash */ + struct list_head ipi_list; + }; + + /* + * The rb_node is only used inside the io scheduler, requests + * are pruned when moved to the dispatch queue. So let the + * completion_data share space with the rb_node. + */ + union { + struct rb_node rb_node; /* sort/lookup */ + struct bio_vec special_vec; + void *completion_data; + int error_count; /* for legacy drivers, don't use */ + }; + + /* + * Three pointers are available for the IO schedulers, if they need + * more they have to dynamically allocate it. Flush requests are + * never put on the IO scheduler. So let the flush fields share + * space with the elevator data. + */ + union { + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + + struct { + unsigned int seq; + struct list_head list; + rq_end_io_fn *saved_end_io; + } flush; + }; + + struct gendisk *rq_disk; + struct hd_struct *part; +#ifdef CONFIG_BLK_RQ_ALLOC_TIME + /* Time that the first bio started allocating this request. */ + u64 alloc_time_ns; +#endif + /* Time that this request was allocated for this IO. */ + u64 start_time_ns; + /* Time that I/O was submitted to the device. */ + u64 io_start_time_ns; + +#ifdef CONFIG_BLK_WBT + unsigned short wbt_flags; +#endif + /* + * rq sectors used for blk stats. It has the same value + * with blk_rq_sectors(rq), except that it never be zeroed + * by completion. + */ + unsigned short stats_sectors; + + /* + * Number of scatter-gather DMA addr+len pairs after + * physical address coalescing is performed. + */ + unsigned short nr_phys_segments; + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + unsigned short nr_integrity_segments; +#endif + + unsigned short write_hint; + unsigned short ioprio; + + unsigned int extra_len; /* length of alignment and padding */ + + enum mq_rq_state state; + refcount_t ref; + + unsigned int timeout; + unsigned long deadline; + + union { + struct __call_single_data csd; + u64 fifo_time; + }; + + /* + * completion callback. + */ + rq_end_io_fn *end_io; + void *end_io_data; +}; + +static inline bool blk_op_is_scsi(unsigned int op) +{ + return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; +} + +static inline bool blk_op_is_private(unsigned int op) +{ + return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; +} + +static inline bool blk_rq_is_scsi(struct request *rq) +{ + return blk_op_is_scsi(req_op(rq)); +} + +static inline bool blk_rq_is_private(struct request *rq) +{ + return blk_op_is_private(req_op(rq)); +} + +static inline bool blk_rq_is_passthrough(struct request *rq) +{ + return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); +} + +static inline bool bio_is_passthrough(struct bio *bio) +{ + unsigned op = bio_op(bio); + + return blk_op_is_scsi(op) || blk_op_is_private(op); +} + +static inline unsigned short req_get_ioprio(struct request *req) +{ + return req->ioprio; +} + +#include + +struct blk_queue_ctx; + +typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); + +struct bio_vec; +typedef int (dma_drain_needed_fn)(struct request *); + +enum blk_eh_timer_return { + BLK_EH_DONE, /* drivers has completed the command */ + BLK_EH_RESET_TIMER, /* reset timer and try again */ +}; + +enum blk_queue_state { + Queue_down, + Queue_up, +}; + +#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ +#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ + +#define BLK_SCSI_MAX_CMDS (256) +#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) + +/* + * Zoned block device models (zoned limit). + */ +enum blk_zoned_model { + BLK_ZONED_NONE, /* Regular block device */ + BLK_ZONED_HA, /* Host-aware zoned block device */ + BLK_ZONED_HM, /* Host-managed zoned block device */ +}; + +struct queue_limits { + unsigned long bounce_pfn; + unsigned long seg_boundary_mask; + unsigned long virt_boundary_mask; + + unsigned int max_hw_sectors; + unsigned int max_dev_sectors; + unsigned int chunk_sectors; + unsigned int max_sectors; + unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int logical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; + unsigned int max_discard_sectors; + unsigned int max_hw_discard_sectors; + unsigned int max_write_same_sectors; + unsigned int max_write_zeroes_sectors; + unsigned int discard_granularity; + unsigned int discard_alignment; + + unsigned short max_segments; + unsigned short max_integrity_segments; + unsigned short max_discard_segments; + + unsigned char misaligned; + unsigned char discard_misaligned; + unsigned char raid_partial_stripes_expensive; + enum blk_zoned_model zoned; +}; + +#ifdef CONFIG_BLK_DEV_ZONED + +/* + * Maximum number of zones to report with a single report zones command. + */ +#define BLK_ZONED_REPORT_MAX_ZONES 8192U + +extern unsigned int blkdev_nr_zones(struct block_device *bdev); +extern int blkdev_report_zones(struct block_device *bdev, + sector_t sector, struct blk_zone *zones, + unsigned int *nr_zones); +extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, + sector_t nr_sectors, gfp_t gfp_mask); +extern int blk_revalidate_disk_zones(struct gendisk *disk); + +extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); +extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); + +#else /* CONFIG_BLK_DEV_ZONED */ + +static inline unsigned int blkdev_nr_zones(struct block_device *bdev) +{ + return 0; +} + +static inline int blk_revalidate_disk_zones(struct gendisk *disk) +{ + return 0; +} + +static inline int blkdev_report_zones_ioctl(struct block_device *bdev, + fmode_t mode, unsigned int cmd, + unsigned long arg) +{ + return -ENOTTY; +} + +static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, + fmode_t mode, unsigned int cmd, + unsigned long arg) +{ + return -ENOTTY; +} + +#endif /* CONFIG_BLK_DEV_ZONED */ + +struct request_queue { + struct request *last_merge; + struct elevator_queue *elevator; + + struct blk_queue_stats *stats; + struct rq_qos *rq_qos; + + make_request_fn *make_request_fn; + dma_drain_needed_fn *dma_drain_needed; + + const struct blk_mq_ops *mq_ops; + + /* sw queues */ + struct blk_mq_ctx __percpu *queue_ctx; + unsigned int nr_queues; + + unsigned int queue_depth; + + /* hw dispatch queues */ + struct blk_mq_hw_ctx **queue_hw_ctx; + unsigned int nr_hw_queues; + + struct backing_dev_info *backing_dev_info; + + /* + * The queue owner gets to use this for whatever they like. + * ll_rw_blk doesn't touch it. + */ + void *queuedata; + + /* + * various queue flags, see QUEUE_* below + */ + unsigned long queue_flags; + /* + * Number of contexts that have called blk_set_pm_only(). If this + * counter is above zero then only RQF_PM and RQF_PREEMPT requests are + * processed. + */ + atomic_t pm_only; + + /* + * ida allocated id for this queue. Used to index queues from + * ioctx. + */ + int id; + + /* + * queue needs bounce pages for pages above this limit + */ + gfp_t bounce_gfp; + + spinlock_t queue_lock; + + /* + * queue kobject + */ + struct kobject kobj; + + /* + * mq queue kobject + */ + struct kobject *mq_kobj; + +#ifdef CONFIG_BLK_DEV_INTEGRITY + struct blk_integrity integrity; +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +#ifdef CONFIG_PM + struct device *dev; + int rpm_status; + unsigned int nr_pending; +#endif + + /* + * queue settings + */ + unsigned long nr_requests; /* Max # of requests */ + + unsigned int dma_drain_size; + void *dma_drain_buffer; + unsigned int dma_pad_mask; + unsigned int dma_alignment; + + unsigned int rq_timeout; + int poll_nsec; + + struct blk_stat_callback *poll_cb; + struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS]; + + struct timer_list timeout; + struct work_struct timeout_work; + + struct list_head icq_list; +#ifdef CONFIG_BLK_CGROUP + DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); + struct blkcg_gq *root_blkg; + struct list_head blkg_list; +#endif + + struct queue_limits limits; + + unsigned int required_elevator_features; + +#ifdef CONFIG_BLK_DEV_ZONED + /* + * Zoned block device information for request dispatch control. + * nr_zones is the total number of zones of the device. This is always + * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones + * bits which indicates if a zone is conventional (bit clear) or + * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones + * bits which indicates if a zone is write locked, that is, if a write + * request targeting the zone was dispatched. All three fields are + * initialized by the low level device driver (e.g. scsi/sd.c). + * Stacking drivers (device mappers) may or may not initialize + * these fields. + * + * Reads of this information must be protected with blk_queue_enter() / + * blk_queue_exit(). Modifying this information is only allowed while + * no requests are being processed. See also blk_mq_freeze_queue() and + * blk_mq_unfreeze_queue(). + */ + unsigned int nr_zones; + unsigned long *seq_zones_bitmap; + unsigned long *seq_zones_wlock; +#endif /* CONFIG_BLK_DEV_ZONED */ + + /* + * sg stuff + */ + unsigned int sg_timeout; + unsigned int sg_reserved_size; + int node; +#ifdef CONFIG_BLK_DEV_IO_TRACE + struct blk_trace __rcu *blk_trace; + struct mutex blk_trace_mutex; +#endif + /* + * for flush operations + */ + struct blk_flush_queue *fq; + + struct list_head requeue_list; + spinlock_t requeue_lock; + struct delayed_work requeue_work; + + struct mutex sysfs_lock; + struct mutex sysfs_dir_lock; + + /* + * for reusing dead hctx instance in case of updating + * nr_hw_queues + */ + struct list_head unused_hctx_list; + spinlock_t unused_hctx_lock; + + int mq_freeze_depth; + +#if defined(CONFIG_BLK_DEV_BSG) + struct bsg_class_device bsg_dev; +#endif + +#ifdef CONFIG_BLK_DEV_THROTTLING + /* Throttle data */ + struct throtl_data *td; +#endif + struct rcu_head rcu_head; + wait_queue_head_t mq_freeze_wq; + /* + * Protect concurrent access to q_usage_counter by + * percpu_ref_kill() and percpu_ref_reinit(). + */ + struct mutex mq_freeze_lock; + struct percpu_ref q_usage_counter; + + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; + struct bio_set bio_split; + +#ifdef CONFIG_BLK_DEBUG_FS + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct dentry *rqos_debugfs_dir; +#endif + + bool mq_sysfs_init_done; + + size_t cmd_size; + + struct work_struct release_work; + +#define BLK_MAX_WRITE_HINTS 5 + u64 write_hints[BLK_MAX_WRITE_HINTS]; +}; + +/* Keep blk_queue_flag_name[] in sync with the definitions below */ +#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ +#define QUEUE_FLAG_DYING 1 /* queue being torn down */ +#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ +#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ +#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ +#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ +#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ +#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ +#define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */ +#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ +#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ +#define QUEUE_FLAG_SECERASE 11 /* supports secure erase */ +#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ +#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */ +#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ +#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ +#define QUEUE_FLAG_WC 17 /* Write back caching */ +#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ +#define QUEUE_FLAG_DAX 19 /* device supports DAX */ +#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ +#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */ +#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ +#define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */ +#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ +#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ +#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ +#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ + +#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ + (1 << QUEUE_FLAG_SAME_COMP)) + +void blk_queue_flag_set(unsigned int flag, struct request_queue *q); +void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); +bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); + +#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) +#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) +#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) +#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) +#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) +#define blk_queue_noxmerges(q) \ + test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) +#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) +#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) +#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) +#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) +#define blk_queue_zone_resetall(q) \ + test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) +#define blk_queue_secure_erase(q) \ + (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) +#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) +#define blk_queue_scsi_passthrough(q) \ + test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) +#define blk_queue_pci_p2pdma(q) \ + test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) +#ifdef CONFIG_BLK_RQ_ALLOC_TIME +#define blk_queue_rq_alloc_time(q) \ + test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) +#else +#define blk_queue_rq_alloc_time(q) false +#endif + +#define blk_noretry_request(rq) \ + ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ + REQ_FAILFAST_DRIVER)) +#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) +#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) +#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) +#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) + +extern void blk_set_pm_only(struct request_queue *q); +extern void blk_clear_pm_only(struct request_queue *q); + +static inline bool blk_account_rq(struct request *rq) +{ + return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); +} + +#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) + +#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) + +#define rq_dma_dir(rq) \ + (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) + +#define dma_map_bvec(dev, bv, dir, attrs) \ + dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ + (dir), (attrs)) + +static inline bool queue_is_mq(struct request_queue *q) +{ + return q->mq_ops; +} + +static inline enum blk_zoned_model +blk_queue_zoned_model(struct request_queue *q) +{ + return q->limits.zoned; +} + +static inline bool blk_queue_is_zoned(struct request_queue *q) +{ + switch (blk_queue_zoned_model(q)) { + case BLK_ZONED_HA: + case BLK_ZONED_HM: + return true; + default: + return false; + } +} + +static inline sector_t blk_queue_zone_sectors(struct request_queue *q) +{ + return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; +} + +#ifdef CONFIG_BLK_DEV_ZONED +static inline unsigned int blk_queue_nr_zones(struct request_queue *q) +{ + return blk_queue_is_zoned(q) ? q->nr_zones : 0; +} + +static inline unsigned int blk_queue_zone_no(struct request_queue *q, + sector_t sector) +{ + if (!blk_queue_is_zoned(q)) + return 0; + return sector >> ilog2(q->limits.chunk_sectors); +} + +static inline bool blk_queue_zone_is_seq(struct request_queue *q, + sector_t sector) +{ + if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap) + return false; + return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); +} +#else /* CONFIG_BLK_DEV_ZONED */ +static inline unsigned int blk_queue_nr_zones(struct request_queue *q) +{ + return 0; +} +#endif /* CONFIG_BLK_DEV_ZONED */ + +static inline bool rq_is_sync(struct request *rq) +{ + return op_is_sync(rq->cmd_flags); +} + +static inline bool rq_mergeable(struct request *rq) +{ + if (blk_rq_is_passthrough(rq)) + return false; + + if (req_op(rq) == REQ_OP_FLUSH) + return false; + + if (req_op(rq) == REQ_OP_WRITE_ZEROES) + return false; + + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) + return false; + if (rq->rq_flags & RQF_NOMERGE_FLAGS) + return false; + + return true; +} + +static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) +{ + if (bio_page(a) == bio_page(b) && + bio_offset(a) == bio_offset(b)) + return true; + + return false; +} + +static inline unsigned int blk_queue_depth(struct request_queue *q) +{ + if (q->queue_depth) + return q->queue_depth; + + return q->nr_requests; +} + +extern unsigned long blk_max_low_pfn, blk_max_pfn; + +/* + * standard bounce addresses: + * + * BLK_BOUNCE_HIGH : bounce all highmem pages + * BLK_BOUNCE_ANY : don't bounce anything + * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary + */ + +#if BITS_PER_LONG == 32 +#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) +#else +#define BLK_BOUNCE_HIGH -1ULL +#endif +#define BLK_BOUNCE_ANY (-1ULL) +#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) + +/* + * default timeout for SG_IO if none specified + */ +#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) +#define BLK_MIN_SG_TIMEOUT (7 * HZ) + +struct rq_map_data { + struct page **pages; + int page_order; + int nr_entries; + unsigned long offset; + int null_mapped; + int from_user; +}; + +struct req_iterator { + struct bvec_iter iter; + struct bio *bio; +}; + +/* This should not be used directly - use rq_for_each_segment */ +#define for_each_bio(_bio) \ + for (; _bio; _bio = _bio->bi_next) +#define __rq_for_each_bio(_bio, rq) \ + if ((rq->bio)) \ + for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) + +#define rq_for_each_segment(bvl, _rq, _iter) \ + __rq_for_each_bio(_iter.bio, _rq) \ + bio_for_each_segment(bvl, _iter.bio, _iter.iter) + +#define rq_for_each_bvec(bvl, _rq, _iter) \ + __rq_for_each_bio(_iter.bio, _rq) \ + bio_for_each_bvec(bvl, _iter.bio, _iter.iter) + +#define rq_iter_last(bvec, _iter) \ + (_iter.bio->bi_next == NULL && \ + bio_iter_last(bvec, _iter.iter)) + +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" +#endif +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +extern void rq_flush_dcache_pages(struct request *rq); +#else +static inline void rq_flush_dcache_pages(struct request *rq) +{ +} +#endif + +extern int blk_register_queue(struct gendisk *disk); +extern void blk_unregister_queue(struct gendisk *disk); +extern blk_qc_t generic_make_request(struct bio *bio); +extern blk_qc_t direct_make_request(struct bio *bio); +extern void blk_rq_init(struct request_queue *q, struct request *rq); +extern void blk_put_request(struct request *); +extern struct request *blk_get_request(struct request_queue *, unsigned int op, + blk_mq_req_flags_t flags); +extern int blk_lld_busy(struct request_queue *q); +extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, + struct bio_set *bs, gfp_t gfp_mask, + int (*bio_ctr)(struct bio *, struct bio *, void *), + void *data); +extern void blk_rq_unprep_clone(struct request *rq); +extern blk_status_t blk_insert_cloned_request(struct request_queue *q, + struct request *rq); +extern int blk_rq_append_bio(struct request *rq, struct bio **bio); +extern void blk_queue_split(struct request_queue *, struct bio **); +extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); +extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, + unsigned int, void __user *); +extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, + unsigned int, void __user *); +extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, + struct scsi_ioctl_command __user *); + +extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); +extern void blk_queue_exit(struct request_queue *q); +extern void blk_sync_queue(struct request_queue *q); +extern int blk_rq_map_user(struct request_queue *, struct request *, + struct rq_map_data *, void __user *, unsigned long, + gfp_t); +extern int blk_rq_unmap_user(struct bio *); +extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); +extern int blk_rq_map_user_iov(struct request_queue *, struct request *, + struct rq_map_data *, const struct iov_iter *, + gfp_t); +extern void blk_execute_rq(struct request_queue *, struct gendisk *, + struct request *, int); +extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, + struct request *, int, rq_end_io_fn *); + +/* Helper to convert REQ_OP_XXX to its string format XXX */ +extern const char *blk_op_str(unsigned int op); + +int blk_status_to_errno(blk_status_t status); +blk_status_t errno_to_blk_status(int errno); + +int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin); + +static inline struct request_queue *bdev_get_queue(struct block_device *bdev) +{ + return bdev->bd_disk->queue; /* this is never NULL */ +} + +/* + * The basic unit of block I/O is a sector. It is used in a number of contexts + * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 + * bytes. Variables of type sector_t represent an offset or size that is a + * multiple of 512 bytes. Hence these two constants. + */ +#ifndef SECTOR_SHIFT +#define SECTOR_SHIFT 9 +#endif +#ifndef SECTOR_SIZE +#define SECTOR_SIZE (1 << SECTOR_SHIFT) +#endif + +/* + * blk_rq_pos() : the current sector + * blk_rq_bytes() : bytes left in the entire request + * blk_rq_cur_bytes() : bytes left in the current segment + * blk_rq_err_bytes() : bytes left till the next error boundary + * blk_rq_sectors() : sectors left in the entire request + * blk_rq_cur_sectors() : sectors left in the current segment + * blk_rq_stats_sectors() : sectors of the entire request used for stats + */ +static inline sector_t blk_rq_pos(const struct request *rq) +{ + return rq->__sector; +} + +static inline unsigned int blk_rq_bytes(const struct request *rq) +{ + return rq->__data_len; +} + +static inline int blk_rq_cur_bytes(const struct request *rq) +{ + return rq->bio ? bio_cur_bytes(rq->bio) : 0; +} + +extern unsigned int blk_rq_err_bytes(const struct request *rq); + +static inline unsigned int blk_rq_sectors(const struct request *rq) +{ + return blk_rq_bytes(rq) >> SECTOR_SHIFT; +} + +static inline unsigned int blk_rq_cur_sectors(const struct request *rq) +{ + return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; +} + +static inline unsigned int blk_rq_stats_sectors(const struct request *rq) +{ + return rq->stats_sectors; +} + +#ifdef CONFIG_BLK_DEV_ZONED +static inline unsigned int blk_rq_zone_no(struct request *rq) +{ + return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); +} + +static inline unsigned int blk_rq_zone_is_seq(struct request *rq) +{ + return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); +} +#endif /* CONFIG_BLK_DEV_ZONED */ + +/* + * Some commands like WRITE SAME have a payload or data transfer size which + * is different from the size of the request. Any driver that supports such + * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to + * calculate the data transfer size. + */ +static inline unsigned int blk_rq_payload_bytes(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return rq->special_vec.bv_len; + return blk_rq_bytes(rq); +} + +/* + * Return the first full biovec in the request. The caller needs to check that + * there are any bvecs before calling this helper. + */ +static inline struct bio_vec req_bvec(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return rq->special_vec; + return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); +} + +static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, + int op) +{ + if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) + return min(q->limits.max_discard_sectors, + UINT_MAX >> SECTOR_SHIFT); + + if (unlikely(op == REQ_OP_WRITE_SAME)) + return q->limits.max_write_same_sectors; + + if (unlikely(op == REQ_OP_WRITE_ZEROES)) + return q->limits.max_write_zeroes_sectors; + + return q->limits.max_sectors; +} + +/* + * Return maximum size of a request at given offset. Only valid for + * file system requests. + */ +static inline unsigned int blk_max_size_offset(struct request_queue *q, + sector_t offset) +{ + if (!q->limits.chunk_sectors) + return q->limits.max_sectors; + + return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - + (offset & (q->limits.chunk_sectors - 1)))); +} + +static inline unsigned int blk_rq_get_max_sectors(struct request *rq, + sector_t offset) +{ + struct request_queue *q = rq->q; + + if (blk_rq_is_passthrough(rq)) + return q->limits.max_hw_sectors; + + if (!q->limits.chunk_sectors || + req_op(rq) == REQ_OP_DISCARD || + req_op(rq) == REQ_OP_SECURE_ERASE) + return blk_queue_get_max_sectors(q, req_op(rq)); + + return min(blk_max_size_offset(q, offset), + blk_queue_get_max_sectors(q, req_op(rq))); +} + +static inline unsigned int blk_rq_count_bios(struct request *rq) +{ + unsigned int nr_bios = 0; + struct bio *bio; + + __rq_for_each_bio(bio, rq) + nr_bios++; + + return nr_bios; +} + +void blk_steal_bios(struct bio_list *list, struct request *rq); + +/* + * Request completion related functions. + * + * blk_update_request() completes given number of bytes and updates + * the request without completing it. + */ +extern bool blk_update_request(struct request *rq, blk_status_t error, + unsigned int nr_bytes); + +extern void __blk_complete_request(struct request *); +extern void blk_abort_request(struct request *); + +/* + * Access functions for manipulating queue properties + */ +extern void blk_cleanup_queue(struct request_queue *); +extern void blk_queue_make_request(struct request_queue *, make_request_fn *); +extern void blk_queue_bounce_limit(struct request_queue *, u64); +extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); +extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); +extern void blk_queue_max_segments(struct request_queue *, unsigned short); +extern void blk_queue_max_discard_segments(struct request_queue *, + unsigned short); +extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); +extern void blk_queue_max_discard_sectors(struct request_queue *q, + unsigned int max_discard_sectors); +extern void blk_queue_max_write_same_sectors(struct request_queue *q, + unsigned int max_write_same_sectors); +extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, + unsigned int max_write_same_sectors); +extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); +extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); +extern void blk_queue_alignment_offset(struct request_queue *q, + unsigned int alignment); +extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); +extern void blk_queue_io_min(struct request_queue *q, unsigned int min); +extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); +extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); +extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); +extern void blk_set_default_limits(struct queue_limits *lim); +extern void blk_set_stacking_limits(struct queue_limits *lim); +extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + sector_t offset); +extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, + sector_t offset); +extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + sector_t offset); +extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); +extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); +extern int blk_queue_dma_drain(struct request_queue *q, + dma_drain_needed_fn *dma_drain_needed, + void *buf, unsigned int size); +extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); +extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); +extern void blk_queue_dma_alignment(struct request_queue *, int); +extern void blk_queue_update_dma_alignment(struct request_queue *, int); +extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); +extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); +extern void blk_queue_required_elevator_features(struct request_queue *q, + unsigned int features); +extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, + struct device *dev); + +/* + * Number of physical segments as sent to the device. + * + * Normally this is the number of discontiguous data segments sent by the + * submitter. But for data-less command like discard we might have no + * actual data segments submitted, but the driver might have to add it's + * own special payload. In that case we still return 1 here so that this + * special payload will be mapped. + */ +static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return 1; + return rq->nr_phys_segments; +} + +/* + * Number of discard segments (or ranges) the driver needs to fill in. + * Each discard bio merged into a request is counted as one segment. + */ +static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) +{ + return max_t(unsigned short, rq->nr_phys_segments, 1); +} + +extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); +extern void blk_dump_rq_flags(struct request *, char *); +extern long nr_blockdev_pages(void); + +bool __must_check blk_get_queue(struct request_queue *); +struct request_queue *blk_alloc_queue(gfp_t); +struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id); +extern void blk_put_queue(struct request_queue *); +extern void blk_set_queue_dying(struct request_queue *); + +/* + * blk_plug permits building a queue of related requests by holding the I/O + * fragments for a short period. This allows merging of sequential requests + * into single larger request. As the requests are moved from a per-task list to + * the device's request_queue in a batch, this results in improved scalability + * as the lock contention for request_queue lock is reduced. + * + * It is ok not to disable preemption when adding the request to the plug list + * or when attempting a merge, because blk_schedule_flush_list() will only flush + * the plug list when the task sleeps by itself. For details, please see + * schedule() where blk_schedule_flush_plug() is called. + */ +struct blk_plug { + struct list_head mq_list; /* blk-mq requests */ + struct list_head cb_list; /* md requires an unplug callback */ + unsigned short rq_count; + bool multiple_queues; +}; +#define BLK_MAX_REQUEST_COUNT 16 +#define BLK_PLUG_FLUSH_SIZE (128 * 1024) + +struct blk_plug_cb; +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); +struct blk_plug_cb { + struct list_head list; + blk_plug_cb_fn callback; + void *data; +}; +extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, + void *data, int size); +extern void blk_start_plug(struct blk_plug *); +extern void blk_finish_plug(struct blk_plug *); +extern void blk_flush_plug_list(struct blk_plug *, bool); + +static inline void blk_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + if (plug) + blk_flush_plug_list(plug, false); +} + +static inline void blk_schedule_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + if (plug) + blk_flush_plug_list(plug, true); +} + +static inline bool blk_needs_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + return plug && + (!list_empty(&plug->mq_list) || + !list_empty(&plug->cb_list)); +} + +extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); +extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, struct page *page); + +#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ + +extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); +extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, int flags, + struct bio **biop); + +#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ +#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ + +extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, + unsigned flags); +extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, unsigned flags); + +static inline int sb_issue_discard(struct super_block *sb, sector_t block, + sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) +{ + return blkdev_issue_discard(sb->s_bdev, + block << (sb->s_blocksize_bits - + SECTOR_SHIFT), + nr_blocks << (sb->s_blocksize_bits - + SECTOR_SHIFT), + gfp_mask, flags); +} +static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, + sector_t nr_blocks, gfp_t gfp_mask) +{ + return blkdev_issue_zeroout(sb->s_bdev, + block << (sb->s_blocksize_bits - + SECTOR_SHIFT), + nr_blocks << (sb->s_blocksize_bits - + SECTOR_SHIFT), + gfp_mask, 0); +} + +extern int blk_verify_command(unsigned char *cmd, fmode_t mode); + +enum blk_default_limits { + BLK_MAX_SEGMENTS = 128, + BLK_SAFE_MAX_SECTORS = 255, + BLK_DEF_MAX_SECTORS = 2560, + BLK_MAX_SEGMENT_SIZE = 65536, + BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, +}; + +static inline unsigned long queue_segment_boundary(const struct request_queue *q) +{ + return q->limits.seg_boundary_mask; +} + +static inline unsigned long queue_virt_boundary(const struct request_queue *q) +{ + return q->limits.virt_boundary_mask; +} + +static inline unsigned int queue_max_sectors(const struct request_queue *q) +{ + return q->limits.max_sectors; +} + +static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) +{ + return q->limits.max_hw_sectors; +} + +static inline unsigned short queue_max_segments(const struct request_queue *q) +{ + return q->limits.max_segments; +} + +static inline unsigned short queue_max_discard_segments(const struct request_queue *q) +{ + return q->limits.max_discard_segments; +} + +static inline unsigned int queue_max_segment_size(const struct request_queue *q) +{ + return q->limits.max_segment_size; +} + +static inline unsigned queue_logical_block_size(const struct request_queue *q) +{ + int retval = 512; + + if (q && q->limits.logical_block_size) + retval = q->limits.logical_block_size; + + return retval; +} + +static inline unsigned int bdev_logical_block_size(struct block_device *bdev) +{ + return queue_logical_block_size(bdev_get_queue(bdev)); +} + +static inline unsigned int queue_physical_block_size(const struct request_queue *q) +{ + return q->limits.physical_block_size; +} + +static inline unsigned int bdev_physical_block_size(struct block_device *bdev) +{ + return queue_physical_block_size(bdev_get_queue(bdev)); +} + +static inline unsigned int queue_io_min(const struct request_queue *q) +{ + return q->limits.io_min; +} + +static inline int bdev_io_min(struct block_device *bdev) +{ + return queue_io_min(bdev_get_queue(bdev)); +} + +static inline unsigned int queue_io_opt(const struct request_queue *q) +{ + return q->limits.io_opt; +} + +static inline int bdev_io_opt(struct block_device *bdev) +{ + return queue_io_opt(bdev_get_queue(bdev)); +} + +static inline int queue_alignment_offset(const struct request_queue *q) +{ + if (q->limits.misaligned) + return -1; + + return q->limits.alignment_offset; +} + +static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) +{ + unsigned int granularity = max(lim->physical_block_size, lim->io_min); + unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) + << SECTOR_SHIFT; + + return (granularity + lim->alignment_offset - alignment) % granularity; +} + +static inline int bdev_alignment_offset(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q->limits.misaligned) + return -1; + + if (bdev != bdev->bd_contains) + return bdev->bd_part->alignment_offset; + + return q->limits.alignment_offset; +} + +static inline int queue_discard_alignment(const struct request_queue *q) +{ + if (q->limits.discard_misaligned) + return -1; + + return q->limits.discard_alignment; +} + +static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) +{ + unsigned int alignment, granularity, offset; + + if (!lim->max_discard_sectors) + return 0; + + /* Why are these in bytes, not sectors? */ + alignment = lim->discard_alignment >> SECTOR_SHIFT; + granularity = lim->discard_granularity >> SECTOR_SHIFT; + if (!granularity) + return 0; + + /* Offset of the partition start in 'granularity' sectors */ + offset = sector_div(sector, granularity); + + /* And why do we do this modulus *again* in blkdev_issue_discard()? */ + offset = (granularity + alignment - offset) % granularity; + + /* Turn it back into bytes, gaah */ + return offset << SECTOR_SHIFT; +} + +static inline int bdev_discard_alignment(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (bdev != bdev->bd_contains) + return bdev->bd_part->discard_alignment; + + return q->limits.discard_alignment; +} + +static inline unsigned int bdev_write_same(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return q->limits.max_write_same_sectors; + + return 0; +} + +static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return q->limits.max_write_zeroes_sectors; + + return 0; +} + +static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_zoned_model(q); + + return BLK_ZONED_NONE; +} + +static inline bool bdev_is_zoned(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_is_zoned(q); + + return false; +} + +static inline sector_t bdev_zone_sectors(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_zone_sectors(q); + return 0; +} + +static inline int queue_dma_alignment(const struct request_queue *q) +{ + return q ? q->dma_alignment : 511; +} + +static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, + unsigned int len) +{ + unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; + return !(addr & alignment) && !(len & alignment); +} + +/* assumes size > 256 */ +static inline unsigned int blksize_bits(unsigned int size) +{ + unsigned int bits = 8; + do { + bits++; + size >>= 1; + } while (size > 256); + return bits; +} + +static inline unsigned int block_size(struct block_device *bdev) +{ + return bdev->bd_block_size; +} + +typedef struct {struct page *v;} Sector; + +unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); + +static inline void put_dev_sector(Sector p) +{ + put_page(p.v); +} + +int kblockd_schedule_work(struct work_struct *work); +int kblockd_schedule_work_on(int cpu, struct work_struct *work); +int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); + +#define MODULE_ALIAS_BLOCKDEV(major,minor) \ + MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) +#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ + MODULE_ALIAS("block-major-" __stringify(major) "-*") + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +enum blk_integrity_flags { + BLK_INTEGRITY_VERIFY = 1 << 0, + BLK_INTEGRITY_GENERATE = 1 << 1, + BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, + BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, +}; + +struct blk_integrity_iter { + void *prot_buf; + void *data_buf; + sector_t seed; + unsigned int data_size; + unsigned short interval; + const char *disk_name; +}; + +typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); +typedef void (integrity_prepare_fn) (struct request *); +typedef void (integrity_complete_fn) (struct request *, unsigned int); + +struct blk_integrity_profile { + integrity_processing_fn *generate_fn; + integrity_processing_fn *verify_fn; + integrity_prepare_fn *prepare_fn; + integrity_complete_fn *complete_fn; + const char *name; +}; + +extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); +extern void blk_integrity_unregister(struct gendisk *); +extern int blk_integrity_compare(struct gendisk *, struct gendisk *); +extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, + struct scatterlist *); +extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); +extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, + struct request *); +extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, + struct bio *); + +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + struct blk_integrity *bi = &disk->queue->integrity; + + if (!bi->profile) + return NULL; + + return bi; +} + +static inline +struct blk_integrity *bdev_get_integrity(struct block_device *bdev) +{ + return blk_get_integrity(bdev->bd_disk); +} + +static inline bool blk_integrity_rq(struct request *rq) +{ + return rq->cmd_flags & REQ_INTEGRITY; +} + +static inline void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ + q->limits.max_integrity_segments = segs; +} + +static inline unsigned short +queue_max_integrity_segments(const struct request_queue *q) +{ + return q->limits.max_integrity_segments; +} + +/** + * bio_integrity_intervals - Return number of integrity intervals for a bio + * @bi: blk_integrity profile for device + * @sectors: Size of the bio in 512-byte sectors + * + * Description: The block layer calculates everything in 512 byte + * sectors but integrity metadata is done in terms of the data integrity + * interval size of the storage device. Convert the block layer sectors + * to the appropriate number of integrity intervals. + */ +static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, + unsigned int sectors) +{ + return sectors >> (bi->interval_exp - 9); +} + +static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, + unsigned int sectors) +{ + return bio_integrity_intervals(bi, sectors) * bi->tuple_size; +} + +/* + * Return the first bvec that contains integrity data. Only drivers that are + * limited to a single integrity segment should use this helper. + */ +static inline struct bio_vec *rq_integrity_vec(struct request *rq) +{ + if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) + return NULL; + return rq->bio->bi_integrity->bip_vec; +} + +#else /* CONFIG_BLK_DEV_INTEGRITY */ + +struct bio; +struct block_device; +struct gendisk; +struct blk_integrity; + +static inline int blk_integrity_rq(struct request *rq) +{ + return 0; +} +static inline int blk_rq_count_integrity_sg(struct request_queue *q, + struct bio *b) +{ + return 0; +} +static inline int blk_rq_map_integrity_sg(struct request_queue *q, + struct bio *b, + struct scatterlist *s) +{ + return 0; +} +static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) +{ + return NULL; +} +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + return NULL; +} +static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) +{ + return 0; +} +static inline void blk_integrity_register(struct gendisk *d, + struct blk_integrity *b) +{ +} +static inline void blk_integrity_unregister(struct gendisk *d) +{ +} +static inline void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ +} +static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) +{ + return 0; +} +static inline bool blk_integrity_merge_rq(struct request_queue *rq, + struct request *r1, + struct request *r2) +{ + return true; +} +static inline bool blk_integrity_merge_bio(struct request_queue *rq, + struct request *r, + struct bio *b) +{ + return true; +} + +static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, + unsigned int sectors) +{ + return 0; +} + +static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, + unsigned int sectors) +{ + return 0; +} + +static inline struct bio_vec *rq_integrity_vec(struct request *rq) +{ + return NULL; +} + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +struct block_device_operations { + int (*open) (struct block_device *, fmode_t); + void (*release) (struct gendisk *, fmode_t); + int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); + int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + unsigned int (*check_events) (struct gendisk *disk, + unsigned int clearing); + /* ->media_changed() is DEPRECATED, use ->check_events() instead */ + int (*media_changed) (struct gendisk *); + void (*unlock_native_capacity) (struct gendisk *); + int (*revalidate_disk) (struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + /* this callback is with swap_lock and sometimes page table lock held */ + void (*swap_slot_free_notify) (struct block_device *, unsigned long); + int (*report_zones)(struct gendisk *, sector_t sector, + struct blk_zone *zones, unsigned int *nr_zones); + struct module *owner; + const struct pr_ops *pr_ops; +}; + +extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, + unsigned long); +extern int bdev_read_page(struct block_device *, sector_t, struct page *); +extern int bdev_write_page(struct block_device *, sector_t, struct page *, + struct writeback_control *); + +#ifdef CONFIG_BLK_DEV_ZONED +bool blk_req_needs_zone_write_lock(struct request *rq); +void __blk_req_zone_write_lock(struct request *rq); +void __blk_req_zone_write_unlock(struct request *rq); + +static inline void blk_req_zone_write_lock(struct request *rq) +{ + if (blk_req_needs_zone_write_lock(rq)) + __blk_req_zone_write_lock(rq); +} + +static inline void blk_req_zone_write_unlock(struct request *rq) +{ + if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) + __blk_req_zone_write_unlock(rq); +} + +static inline bool blk_req_zone_is_write_locked(struct request *rq) +{ + return rq->q->seq_zones_wlock && + test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); +} + +static inline bool blk_req_can_dispatch_to_zone(struct request *rq) +{ + if (!blk_req_needs_zone_write_lock(rq)) + return true; + return !blk_req_zone_is_write_locked(rq); +} +#else +static inline bool blk_req_needs_zone_write_lock(struct request *rq) +{ + return false; +} + +static inline void blk_req_zone_write_lock(struct request *rq) +{ +} + +static inline void blk_req_zone_write_unlock(struct request *rq) +{ +} +static inline bool blk_req_zone_is_write_locked(struct request *rq) +{ + return false; +} + +static inline bool blk_req_can_dispatch_to_zone(struct request *rq) +{ + return true; +} +#endif /* CONFIG_BLK_DEV_ZONED */ + +#else /* CONFIG_BLOCK */ + +struct block_device; + +/* + * stubs for when the block layer is configured out + */ +#define buffer_heads_over_limit 0 + +static inline long nr_blockdev_pages(void) +{ + return 0; +} + +struct blk_plug { +}; + +static inline void blk_start_plug(struct blk_plug *plug) +{ +} + +static inline void blk_finish_plug(struct blk_plug *plug) +{ +} + +static inline void blk_flush_plug(struct task_struct *task) +{ +} + +static inline void blk_schedule_flush_plug(struct task_struct *task) +{ +} + + +static inline bool blk_needs_flush_plug(struct task_struct *tsk) +{ + return false; +} + +static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, + sector_t *error_sector) +{ + return 0; +} + +#endif /* CONFIG_BLOCK */ + +static inline void blk_wake_io_task(struct task_struct *waiter) +{ + /* + * If we're polling, the task itself is doing the completions. For + * that case, we don't need to signal a wakeup, it's enough to just + * mark us as RUNNING. + */ + if (waiter == current) + __set_current_state(TASK_RUNNING); + else + wake_up_process(waiter); +} + +#endif diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h new file mode 100644 index 0000000..1c91753 --- /dev/null +++ b/include/linux/blkpg.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLKPG_H +#define _LINUX_BLKPG_H + +/* + * Partition table and disk geometry handling + */ + +#include +#include + +#ifdef CONFIG_COMPAT +/* For 32-bit/64-bit compatibility of struct blkpg_ioctl_arg */ +struct blkpg_compat_ioctl_arg { + compat_int_t op; + compat_int_t flags; + compat_int_t datalen; + compat_uptr_t data; +}; +#endif + +#endif /* _LINUX_BLKPG_H */ diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h new file mode 100644 index 0000000..3b6ff59 --- /dev/null +++ b/include/linux/blktrace_api.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef BLKTRACE_H +#define BLKTRACE_H + +#include +#include +#include +#include +#include + +#if defined(CONFIG_BLK_DEV_IO_TRACE) + +#include + +struct blk_trace { + int trace_state; + struct rchan *rchan; + unsigned long __percpu *sequence; + unsigned char __percpu *msg_data; + u16 act_mask; + u64 start_lba; + u64 end_lba; + u32 pid; + u32 dev; + struct dentry *dir; + struct dentry *dropped_file; + struct dentry *msg_file; + struct list_head running_list; + atomic_t dropped; +}; + +struct blkcg; + +extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); +extern void blk_trace_shutdown(struct request_queue *); +extern __printf(3, 4) +void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...); + +/** + * blk_add_trace_msg - Add a (simple) message to the blktrace stream + * @q: queue the io is for + * @fmt: format to print message in + * args... Variable argument list for format + * + * Description: + * Records a (simple) message onto the blktrace stream. + * + * NOTE: BLK_TN_MAX_MSG characters are output at most. + * NOTE: Can not use 'static inline' due to presence of var args... + * + **/ +#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \ + do { \ + struct blk_trace *bt; \ + \ + rcu_read_lock(); \ + bt = rcu_dereference((q)->blk_trace); \ + if (unlikely(bt)) \ + __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\ + rcu_read_unlock(); \ + } while (0) +#define blk_add_trace_msg(q, fmt, ...) \ + blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__) +#define BLK_TN_MAX_MSG 128 + +static inline bool blk_trace_note_message_enabled(struct request_queue *q) +{ + struct blk_trace *bt; + bool ret; + + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + ret = bt && (bt->act_mask & BLK_TC_NOTIFY); + rcu_read_unlock(); + return ret; +} + +extern void blk_add_driver_data(struct request_queue *q, struct request *rq, + void *data, size_t len); +extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + struct block_device *bdev, + char __user *arg); +extern int blk_trace_startstop(struct request_queue *q, int start); +extern int blk_trace_remove(struct request_queue *q); +extern void blk_trace_remove_sysfs(struct device *dev); +extern int blk_trace_init_sysfs(struct device *dev); + +extern struct attribute_group blk_trace_attr_group; + +#else /* !CONFIG_BLK_DEV_IO_TRACE */ +# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) +# define blk_trace_shutdown(q) do { } while (0) +# define blk_add_driver_data(q, rq, data, len) do {} while (0) +# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) +# define blk_trace_startstop(q, start) (-ENOTTY) +# define blk_trace_remove(q) (-ENOTTY) +# define blk_add_trace_msg(q, fmt, ...) do { } while (0) +# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0) +# define blk_trace_remove_sysfs(dev) do { } while (0) +# define blk_trace_note_message_enabled(q) (false) +static inline int blk_trace_init_sysfs(struct device *dev) +{ + return 0; +} + +#endif /* CONFIG_BLK_DEV_IO_TRACE */ + +#ifdef CONFIG_COMPAT + +struct compat_blk_user_trace_setup { + char name[BLKTRACE_BDEV_SIZE]; + u16 act_mask; + u32 buf_size; + u32 buf_nr; + compat_u64 start_lba; + compat_u64 end_lba; + u32 pid; +}; +#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup) + +#endif + +extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); + +static inline sector_t blk_rq_trace_sector(struct request *rq) +{ + /* + * Tracing should ignore starting sector for passthrough requests and + * requests where starting sector didn't get set. + */ + if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) + return 0; + return blk_rq_pos(rq); +} + +static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) +{ + return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq); +} + +#endif diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h new file mode 100644 index 0000000..511ab12 --- /dev/null +++ b/include/linux/blockgroup_lock.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLOCKGROUP_LOCK_H +#define _LINUX_BLOCKGROUP_LOCK_H +/* + * Per-blockgroup locking for ext2 and ext3. + * + * Simple hashed spinlocking. + */ + +#include +#include + +#ifdef CONFIG_SMP +#define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32)) +#else +#define NR_BG_LOCKS 1 +#endif + +struct bgl_lock { + spinlock_t lock; +} ____cacheline_aligned_in_smp; + +struct blockgroup_lock { + struct bgl_lock locks[NR_BG_LOCKS]; +}; + +static inline void bgl_lock_init(struct blockgroup_lock *bgl) +{ + int i; + + for (i = 0; i < NR_BG_LOCKS; i++) + spin_lock_init(&bgl->locks[i].lock); +} + +static inline spinlock_t * +bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group) +{ + return &bgl->locks[block_group & (NR_BG_LOCKS-1)].lock; +} + +#endif diff --git a/include/linux/bma150.h b/include/linux/bma150.h new file mode 100644 index 0000000..31c9e32 --- /dev/null +++ b/include/linux/bma150.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2011 Bosch Sensortec GmbH + * Copyright (c) 2011 Unixphere + */ + +#ifndef _BMA150_H_ +#define _BMA150_H_ + +#define BMA150_DRIVER "bma150" + +#define BMA150_RANGE_2G 0 +#define BMA150_RANGE_4G 1 +#define BMA150_RANGE_8G 2 + +#define BMA150_BW_25HZ 0 +#define BMA150_BW_50HZ 1 +#define BMA150_BW_100HZ 2 +#define BMA150_BW_190HZ 3 +#define BMA150_BW_375HZ 4 +#define BMA150_BW_750HZ 5 +#define BMA150_BW_1500HZ 6 + +struct bma150_cfg { + bool any_motion_int; /* Set to enable any-motion interrupt */ + bool hg_int; /* Set to enable high-G interrupt */ + bool lg_int; /* Set to enable low-G interrupt */ + unsigned char any_motion_dur; /* Any-motion duration */ + unsigned char any_motion_thres; /* Any-motion threshold */ + unsigned char hg_hyst; /* High-G hysterisis */ + unsigned char hg_dur; /* High-G duration */ + unsigned char hg_thres; /* High-G threshold */ + unsigned char lg_hyst; /* Low-G hysterisis */ + unsigned char lg_dur; /* Low-G duration */ + unsigned char lg_thres; /* Low-G threshold */ + unsigned char range; /* one of BMA0150_RANGE_xxx */ + unsigned char bandwidth; /* one of BMA0150_BW_xxx */ +}; + +struct bma150_platform_data { + struct bma150_cfg cfg; + int (*irq_gpio_cfg)(void); +}; + +#endif /* _BMA150_H_ */ diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h new file mode 100644 index 0000000..ef2366a --- /dev/null +++ b/include/linux/bottom_half.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BH_H +#define _LINUX_BH_H + +#include + +#ifdef CONFIG_PREEMPT_RT +extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); +#else + +#ifdef CONFIG_TRACE_IRQFLAGS +extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); +#else +static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) +{ + preempt_count_add(cnt); + barrier(); +} +#endif +#endif + +static inline void local_bh_disable(void) +{ + __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); +} + +extern void _local_bh_enable(void); +extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt); + +static inline void local_bh_enable_ip(unsigned long ip) +{ + __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET); +} + +static inline void local_bh_enable(void) +{ + __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); +} + +#endif /* _LINUX_BH_H */ diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h new file mode 100644 index 0000000..169fd25 --- /dev/null +++ b/include/linux/bpf-cgroup.h @@ -0,0 +1,410 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BPF_CGROUP_H +#define _BPF_CGROUP_H + +#include +#include +#include +#include +#include +#include +#include + +struct sock; +struct sockaddr; +struct cgroup; +struct sk_buff; +struct bpf_map; +struct bpf_prog; +struct bpf_sock_ops_kern; +struct bpf_cgroup_storage; +struct ctl_table; +struct ctl_table_header; + +#ifdef CONFIG_CGROUP_BPF + +extern struct static_key_false cgroup_bpf_enabled_key; +#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) + +DECLARE_PER_CPU(struct bpf_cgroup_storage*, + bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); + +#define for_each_cgroup_storage_type(stype) \ + for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) + +struct bpf_cgroup_storage_map; + +struct bpf_storage_buffer { + struct rcu_head rcu; + char data[0]; +}; + +struct bpf_cgroup_storage { + union { + struct bpf_storage_buffer *buf; + void __percpu *percpu_buf; + }; + struct bpf_cgroup_storage_map *map; + struct bpf_cgroup_storage_key key; + struct list_head list; + struct rb_node node; + struct rcu_head rcu; +}; + +struct bpf_prog_list { + struct list_head node; + struct bpf_prog *prog; + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; +}; + +struct bpf_prog_array; + +struct cgroup_bpf { + /* array of effective progs in this cgroup */ + struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE]; + + /* attached progs to this cgroup and attach flags + * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will + * have either zero or one element + * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS + */ + struct list_head progs[MAX_BPF_ATTACH_TYPE]; + u32 flags[MAX_BPF_ATTACH_TYPE]; + + /* temp storage for effective prog array used by prog_attach/detach */ + struct bpf_prog_array *inactive; + + /* reference counter used to detach bpf programs after cgroup removal */ + struct percpu_ref refcnt; + + /* cgroup_bpf is released using a work queue */ + struct work_struct release_work; +}; + +int cgroup_bpf_inherit(struct cgroup *cgrp); +void cgroup_bpf_offline(struct cgroup *cgrp); + +int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, + enum bpf_attach_type type, u32 flags); +int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, + enum bpf_attach_type type); +int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, + union bpf_attr __user *uattr); + +/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */ +int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, + enum bpf_attach_type type, u32 flags); +int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, + enum bpf_attach_type type, u32 flags); +int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, + union bpf_attr __user *uattr); + +int __cgroup_bpf_run_filter_skb(struct sock *sk, + struct sk_buff *skb, + enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_sk(struct sock *sk, + enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, + struct sockaddr *uaddr, + enum bpf_attach_type type, + void *t_ctx); + +int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, + struct bpf_sock_ops_kern *sock_ops, + enum bpf_attach_type type); + +int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, + short access, enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, + struct ctl_table *table, int write, + void __user *buf, size_t *pcount, + loff_t *ppos, void **new_buf, + enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, + int *optname, char __user *optval, + int *optlen, char **kernel_optval); +int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + int optname, char __user *optval, + int __user *optlen, int max_optlen, + int retval); + +static inline enum bpf_cgroup_storage_type cgroup_storage_type( + struct bpf_map *map) +{ + if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) + return BPF_CGROUP_STORAGE_PERCPU; + + return BPF_CGROUP_STORAGE_SHARED; +} + +static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage + *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) +{ + enum bpf_cgroup_storage_type stype; + + for_each_cgroup_storage_type(stype) + this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); +} + +struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, + enum bpf_cgroup_storage_type stype); +void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); +void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, + struct cgroup *cgroup, + enum bpf_attach_type type); +void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); +int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); +void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); + +int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, + void *value, u64 flags); + +/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ +#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ + BPF_CGROUP_INET_INGRESS); \ + \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ + typeof(sk) __sk = sk_to_full_sk(sk); \ + if (sk_fullsock(__sk)) \ + __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ + BPF_CGROUP_INET_EGRESS); \ + } \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_SK_PROG(sk, type) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) { \ + __ret = __cgroup_bpf_run_filter_sk(sk, type); \ + } \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) + +#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) + +#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND) + +#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + NULL); \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) { \ + lock_sock(sk); \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + t_ctx); \ + release_sock(sk); \ + } \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND) + +#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND) + +#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \ + sk->sk_prot->pre_connect) + +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT) + +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT) + +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL) + +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL) + +#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx) + +#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx) + +#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL) + +#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL) + +#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled && (sock_ops)->sk) { \ + typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ + if (__sk && sk_fullsock(__sk)) \ + __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ + sock_ops, \ + BPF_CGROUP_SOCK_OPS); \ + } \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \ + access, \ + BPF_CGROUP_DEVICE); \ + \ + __ret; \ +}) + + +#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ + buf, count, pos, nbuf, \ + BPF_CGROUP_SYSCTL); \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ + kernel_optval) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ + optname, optval, \ + optlen, \ + kernel_optval); \ + __ret; \ +}) + +#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + get_user(__ret, optlen); \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \ + max_optlen, retval) \ +({ \ + int __ret = retval; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \ + optname, optval, \ + optlen, max_optlen, \ + retval); \ + __ret; \ +}) + +int cgroup_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, struct bpf_prog *prog); +int cgroup_bpf_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype); +int cgroup_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr); +#else + +struct bpf_prog; +struct cgroup_bpf {}; +static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } +static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} + +static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype) +{ + return -EINVAL; +} + +static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EINVAL; +} + +static inline void bpf_cgroup_storage_set( + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} +static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, + struct bpf_map *map) { return 0; } +static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, + struct bpf_map *map) {} +static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( + struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } +static inline void bpf_cgroup_storage_free( + struct bpf_cgroup_storage *storage) {} +static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, + void *value) { + return 0; +} +static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, + void *key, void *value, u64 flags) { + return 0; +} + +#define cgroup_bpf_enabled (0) +#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) +#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) +#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ + optlen, max_optlen, retval) ({ retval; }) +#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ + kernel_optval) ({ 0; }) + +#define for_each_cgroup_storage_type(stype) for (; false; ) + +#endif /* CONFIG_CGROUP_BPF */ + +#endif /* _BPF_CGROUP_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h new file mode 100644 index 0000000..576f008 --- /dev/null +++ b/include/linux/bpf.h @@ -0,0 +1,1187 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + */ +#ifndef _LINUX_BPF_H +#define _LINUX_BPF_H 1 + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +struct bpf_verifier_env; +struct perf_event; +struct bpf_prog; +struct bpf_map; +struct sock; +struct seq_file; +struct btf; +struct btf_type; + +extern struct idr btf_idr; +extern spinlock_t btf_idr_lock; + +/* map is generic key/value storage optionally accesible by eBPF programs */ +struct bpf_map_ops { + /* funcs callable from userspace (via syscall) */ + int (*map_alloc_check)(union bpf_attr *attr); + struct bpf_map *(*map_alloc)(union bpf_attr *attr); + void (*map_release)(struct bpf_map *map, struct file *map_file); + void (*map_free)(struct bpf_map *map); + int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); + void (*map_release_uref)(struct bpf_map *map); + void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); + + /* funcs callable from userspace and from eBPF programs */ + void *(*map_lookup_elem)(struct bpf_map *map, void *key); + int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); + int (*map_delete_elem)(struct bpf_map *map, void *key); + int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); + int (*map_pop_elem)(struct bpf_map *map, void *value); + int (*map_peek_elem)(struct bpf_map *map, void *value); + + /* funcs called by prog_array and perf_event_array map */ + void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, + int fd); + void (*map_fd_put_ptr)(void *ptr); + u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); + u32 (*map_fd_sys_lookup_elem)(void *ptr); + void (*map_seq_show_elem)(struct bpf_map *map, void *key, + struct seq_file *m); + int (*map_check_btf)(const struct bpf_map *map, + const struct btf *btf, + const struct btf_type *key_type, + const struct btf_type *value_type); + + /* Direct value access helpers. */ + int (*map_direct_value_addr)(const struct bpf_map *map, + u64 *imm, u32 off); + int (*map_direct_value_meta)(const struct bpf_map *map, + u64 imm, u32 *off); +}; + +struct bpf_map_memory { + u32 pages; + struct user_struct *user; +}; + +struct bpf_map { + /* The first two cachelines with read-mostly members of which some + * are also accessed in fast-path (e.g. ops, max_entries). + */ + const struct bpf_map_ops *ops ____cacheline_aligned; + struct bpf_map *inner_map_meta; +#ifdef CONFIG_SECURITY + void *security; +#endif + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + u32 map_flags; + int spin_lock_off; /* >=0 valid offset, <0 error */ + u32 id; + int numa_node; + u32 btf_key_type_id; + u32 btf_value_type_id; + struct btf *btf; + struct bpf_map_memory memory; + bool unpriv_array; + bool frozen; /* write-once */ + /* 48 bytes hole */ + + /* The 3rd and 4th cacheline with misc members to avoid false sharing + * particularly with refcounting. + */ + atomic_t refcnt ____cacheline_aligned; + atomic_t usercnt; + struct work_struct work; + char name[BPF_OBJ_NAME_LEN]; +}; + +static inline bool map_value_has_spin_lock(const struct bpf_map *map) +{ + return map->spin_lock_off >= 0; +} + +static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) +{ + if (likely(!map_value_has_spin_lock(map))) + return; + *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = + (struct bpf_spin_lock){}; +} + +/* copy everything but bpf_spin_lock */ +static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) +{ + if (unlikely(map_value_has_spin_lock(map))) { + u32 off = map->spin_lock_off; + + memcpy(dst, src, off); + memcpy(dst + off + sizeof(struct bpf_spin_lock), + src + off + sizeof(struct bpf_spin_lock), + map->value_size - off - sizeof(struct bpf_spin_lock)); + } else { + memcpy(dst, src, map->value_size); + } +} +void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, + bool lock_src); + +struct bpf_offload_dev; +struct bpf_offloaded_map; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *map, + void *key, void *next_key); + int (*map_lookup_elem)(struct bpf_offloaded_map *map, + void *key, void *value); + int (*map_update_elem)(struct bpf_offloaded_map *map, + void *key, void *value, u64 flags); + int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); +}; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; +}; + +static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) +{ + return container_of(map, struct bpf_offloaded_map, map); +} + +static inline bool bpf_map_offload_neutral(const struct bpf_map *map) +{ + return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; +} + +static inline bool bpf_map_support_seq_show(const struct bpf_map *map) +{ + return map->btf && map->ops->map_seq_show_elem; +} + +int map_check_no_btf(const struct bpf_map *map, + const struct btf *btf, + const struct btf_type *key_type, + const struct btf_type *value_type); + +extern const struct bpf_map_ops bpf_map_offload_ops; + +/* function argument constraints */ +enum bpf_arg_type { + ARG_DONTCARE = 0, /* unused argument in helper function */ + + /* the following constraints used to prototype + * bpf_map_lookup/update/delete_elem() functions + */ + ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ + ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ + ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ + ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ + ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ + + /* the following constraints used to prototype bpf_memcmp() and other + * functions that access data on eBPF program stack + */ + ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ + ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ + ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, + * helper function must fill all bytes or clear + * them in error case. + */ + + ARG_CONST_SIZE, /* number of bytes accessed from memory */ + ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ + + ARG_PTR_TO_CTX, /* pointer to context */ + ARG_ANYTHING, /* any (initialized) argument is ok */ + ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ + ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ + ARG_PTR_TO_INT, /* pointer to int */ + ARG_PTR_TO_LONG, /* pointer to long */ + ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ +}; + +/* type of values returned from helper functions */ +enum bpf_return_type { + RET_INTEGER, /* function returns integer */ + RET_VOID, /* function doesn't return anything */ + RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ + RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ + RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ + RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ + RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ +}; + +/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs + * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL + * instructions after verifying + */ +struct bpf_func_proto { + u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + bool gpl_only; + bool pkt_access; + enum bpf_return_type ret_type; + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; +}; + +/* bpf_context is intentionally undefined structure. Pointer to bpf_context is + * the first argument to eBPF programs. + * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' + */ +struct bpf_context; + +enum bpf_access_type { + BPF_READ = 1, + BPF_WRITE = 2 +}; + +/* types of values stored in eBPF registers */ +/* Pointer types represent: + * pointer + * pointer + imm + * pointer + (u16) var + * pointer + (u16) var + imm + * if (range > 0) then [ptr, ptr + range - off) is safe to access + * if (id > 0) means that some 'var' was added + * if (off > 0) means that 'imm' was added + */ +enum bpf_reg_type { + NOT_INIT = 0, /* nothing was written into register */ + SCALAR_VALUE, /* reg doesn't contain a valid pointer */ + PTR_TO_CTX, /* reg points to bpf_context */ + CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ + PTR_TO_MAP_VALUE, /* reg points to map element value */ + PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ + PTR_TO_STACK, /* reg == frame_pointer + offset */ + PTR_TO_PACKET_META, /* skb->data - meta_len */ + PTR_TO_PACKET, /* reg points to skb->data */ + PTR_TO_PACKET_END, /* skb->data + headlen */ + PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ + PTR_TO_SOCKET, /* reg points to struct bpf_sock */ + PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ + PTR_TO_SOCK_COMMON, /* reg points to sock_common */ + PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ + PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ + PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ + PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ + PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ +}; + +/* The information passed from prog-specific *_is_valid_access + * back to the verifier. + */ +struct bpf_insn_access_aux { + enum bpf_reg_type reg_type; + int ctx_field_size; +}; + +static inline void +bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) +{ + aux->ctx_field_size = size; +} + +struct bpf_prog_ops { + int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr); +}; + +struct bpf_verifier_ops { + /* return eBPF function prototype for verification */ + const struct bpf_func_proto * + (*get_func_proto)(enum bpf_func_id func_id, + const struct bpf_prog *prog); + + /* return true if 'size' wide access at offset 'off' within bpf_context + * with 'type' (read or write) is allowed + */ + bool (*is_valid_access)(int off, int size, enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info); + int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, + const struct bpf_prog *prog); + int (*gen_ld_abs)(const struct bpf_insn *orig, + struct bpf_insn *insn_buf); + u32 (*convert_ctx_access)(enum bpf_access_type type, + const struct bpf_insn *src, + struct bpf_insn *dst, + struct bpf_prog *prog, u32 *target_size); +}; + +struct bpf_prog_offload_ops { + /* verifier basic callbacks */ + int (*insn_hook)(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); + int (*finalize)(struct bpf_verifier_env *env); + /* verifier optimization callbacks (called after .finalize) */ + int (*replace_insn)(struct bpf_verifier_env *env, u32 off, + struct bpf_insn *insn); + int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); + /* program management callbacks */ + int (*prepare)(struct bpf_prog *prog); + int (*translate)(struct bpf_prog *prog); + void (*destroy)(struct bpf_prog *prog); +}; + +struct bpf_prog_offload { + struct bpf_prog *prog; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + void *dev_priv; + struct list_head offloads; + bool dev_state; + bool opt_failed; + void *jited_image; + u32 jited_len; +}; + +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED, + BPF_CGROUP_STORAGE_PERCPU, + __BPF_CGROUP_STORAGE_MAX +}; + +#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX + +struct bpf_prog_stats { + u64 cnt; + u64 nsecs; + struct u64_stats_sync syncp; +}; + +struct bpf_prog_aux { + atomic_t refcnt; + u32 used_map_cnt; + u32 max_ctx_offset; + u32 max_pkt_offset; + u32 max_tp_access; + u32 stack_depth; + u32 id; + u32 func_cnt; /* used by non-func prog as the number of func progs */ + u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ + bool verifier_zext; /* Zero extensions has been inserted by verifier. */ + bool offload_requested; + struct bpf_prog **func; + void *jit_data; /* JIT specific data. arch dependent */ + struct latch_tree_node ksym_tnode; + struct list_head ksym_lnode; + const struct bpf_prog_ops *ops; + struct bpf_map **used_maps; + struct bpf_prog *prog; + struct user_struct *user; + u64 load_time; /* ns since boottime */ + struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; + char name[BPF_OBJ_NAME_LEN]; +#ifdef CONFIG_SECURITY + void *security; +#endif + struct bpf_prog_offload *offload; + struct btf *btf; + struct bpf_func_info *func_info; + /* bpf_line_info loaded from userspace. linfo->insn_off + * has the xlated insn offset. + * Both the main and sub prog share the same linfo. + * The subprog can access its first linfo by + * using the linfo_idx. + */ + struct bpf_line_info *linfo; + /* jited_linfo is the jited addr of the linfo. It has a + * one to one mapping to linfo: + * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. + * Both the main and sub prog share the same jited_linfo. + * The subprog can access its first jited_linfo by + * using the linfo_idx. + */ + void **jited_linfo; + u32 func_info_cnt; + u32 nr_linfo; + /* subprog can use linfo_idx to access its first linfo and + * jited_linfo. + * main prog always has linfo_idx == 0 + */ + u32 linfo_idx; + struct bpf_prog_stats __percpu *stats; + union { + struct work_struct work; + struct rcu_head rcu; + }; +}; + +struct bpf_array { + struct bpf_map map; + u32 elem_size; + u32 index_mask; + /* 'ownership' of prog_array is claimed by the first program that + * is going to use this map or by the first program which FD is stored + * in the map to make sure that all callers and callees have the same + * prog_type and JITed flag + */ + enum bpf_prog_type owner_prog_type; + bool owner_jited; + union { + char value[0] __aligned(8); + void *ptrs[0] __aligned(8); + void __percpu *pptrs[0] __aligned(8); + }; +}; + +#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ +#define MAX_TAIL_CALL_CNT 32 + +#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ + BPF_F_RDONLY_PROG | \ + BPF_F_WRONLY | \ + BPF_F_WRONLY_PROG) + +#define BPF_MAP_CAN_READ BIT(0) +#define BPF_MAP_CAN_WRITE BIT(1) + +static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) +{ + u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); + + /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is + * not possible. + */ + if (access_flags & BPF_F_RDONLY_PROG) + return BPF_MAP_CAN_READ; + else if (access_flags & BPF_F_WRONLY_PROG) + return BPF_MAP_CAN_WRITE; + else + return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; +} + +static inline bool bpf_map_flags_access_ok(u32 access_flags) +{ + return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != + (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); +} + +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct rcu_head rcu; +}; + +bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); +int bpf_prog_calc_tag(struct bpf_prog *fp); + +const struct bpf_func_proto *bpf_get_trace_printk_proto(void); + +typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, + unsigned long off, unsigned long len); +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, + const struct bpf_insn *src, + struct bpf_insn *dst, + struct bpf_prog *prog, + u32 *target_size); + +u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, + void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); + +/* an array of programs to be executed under rcu_lock. + * + * Typical usage: + * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); + * + * the structure returned by bpf_prog_array_alloc() should be populated + * with program pointers and the last pointer must be NULL. + * The user has to keep refcnt on the program and make sure the program + * is removed from the array before bpf_prog_put(). + * The 'struct bpf_prog_array *' should only be replaced with xchg() + * since other cpus are walking the array of pointers in parallel. + */ +struct bpf_prog_array_item { + struct bpf_prog *prog; + struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; +}; + +struct bpf_prog_array { + struct rcu_head rcu; + struct bpf_prog_array_item items[0]; +}; + +struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); +void bpf_prog_array_free(struct bpf_prog_array *progs); +int bpf_prog_array_length(struct bpf_prog_array *progs); +bool bpf_prog_array_is_empty(struct bpf_prog_array *array); +int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, + __u32 __user *prog_ids, u32 cnt); + +void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, + struct bpf_prog *old_prog); +int bpf_prog_array_copy_info(struct bpf_prog_array *array, + u32 *prog_ids, u32 request_cnt, + u32 *prog_cnt); +int bpf_prog_array_copy(struct bpf_prog_array *old_array, + struct bpf_prog *exclude_prog, + struct bpf_prog *include_prog, + struct bpf_prog_array **new_array); + +#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ + ({ \ + struct bpf_prog_array_item *_item; \ + struct bpf_prog *_prog; \ + struct bpf_prog_array *_array; \ + u32 _ret = 1; \ + migrate_disable(); \ + rcu_read_lock(); \ + _array = rcu_dereference(array); \ + if (unlikely(check_non_null && !_array))\ + goto _out; \ + _item = &_array->items[0]; \ + while ((_prog = READ_ONCE(_item->prog))) { \ + bpf_cgroup_storage_set(_item->cgroup_storage); \ + _ret &= func(_prog, ctx); \ + _item++; \ + } \ +_out: \ + rcu_read_unlock(); \ + migrate_enable(); \ + _ret; \ + }) + +/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs + * so BPF programs can request cwr for TCP packets. + * + * Current cgroup skb programs can only return 0 or 1 (0 to drop the + * packet. This macro changes the behavior so the low order bit + * indicates whether the packet should be dropped (0) or not (1) + * and the next bit is a congestion notification bit. This could be + * used by TCP to call tcp_enter_cwr() + * + * Hence, new allowed return values of CGROUP EGRESS BPF programs are: + * 0: drop packet + * 1: keep packet + * 2: drop packet and cn + * 3: keep packet and cn + * + * This macro then converts it to one of the NET_XMIT or an error + * code that is then interpreted as drop packet (and no cn): + * 0: NET_XMIT_SUCCESS skb should be transmitted + * 1: NET_XMIT_DROP skb should be dropped and cn + * 2: NET_XMIT_CN skb should be transmitted and cn + * 3: -EPERM skb should be dropped + */ +#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ + ({ \ + struct bpf_prog_array_item *_item; \ + struct bpf_prog *_prog; \ + struct bpf_prog_array *_array; \ + u32 ret; \ + u32 _ret = 1; \ + u32 _cn = 0; \ + migrate_disable(); \ + rcu_read_lock(); \ + _array = rcu_dereference(array); \ + _item = &_array->items[0]; \ + while ((_prog = READ_ONCE(_item->prog))) { \ + bpf_cgroup_storage_set(_item->cgroup_storage); \ + ret = func(_prog, ctx); \ + _ret &= (ret & 1); \ + _cn |= (ret & 2); \ + _item++; \ + } \ + rcu_read_unlock(); \ + migrate_enable(); \ + if (_ret) \ + _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ + else \ + _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ + _ret; \ + }) + +#define BPF_PROG_RUN_ARRAY(array, ctx, func) \ + __BPF_PROG_RUN_ARRAY(array, ctx, func, false) + +#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ + __BPF_PROG_RUN_ARRAY(array, ctx, func, true) + +#ifdef CONFIG_BPF_SYSCALL +DECLARE_PER_CPU(int, bpf_prog_active); + +/* + * Block execution of BPF programs attached to instrumentation (perf, + * kprobes, tracepoints) to prevent deadlocks on map operations as any of + * these events can happen inside a region which holds a map bucket lock + * and can deadlock on it. + * + * Use the preemption safe inc/dec variants on RT because migrate disable + * is preemptible on RT and preemption in the middle of the RMW operation + * might lead to inconsistent state. Use the raw variants for non RT + * kernels as migrate_disable() maps to preempt_disable() so the slightly + * more expensive save operation can be avoided. + */ +static inline void bpf_disable_instrumentation(void) +{ + migrate_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + this_cpu_inc(bpf_prog_active); + else + __this_cpu_inc(bpf_prog_active); +} + +static inline void bpf_enable_instrumentation(void) +{ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + this_cpu_dec(bpf_prog_active); + else + __this_cpu_dec(bpf_prog_active); + migrate_enable(); +} + +extern const struct file_operations bpf_map_fops; +extern const struct file_operations bpf_prog_fops; + +#define BPF_PROG_TYPE(_id, _name) \ + extern const struct bpf_prog_ops _name ## _prog_ops; \ + extern const struct bpf_verifier_ops _name ## _verifier_ops; +#define BPF_MAP_TYPE(_id, _ops) \ + extern const struct bpf_map_ops _ops; +#include +#undef BPF_PROG_TYPE +#undef BPF_MAP_TYPE + +extern const struct bpf_prog_ops bpf_offload_prog_ops; +extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; +extern const struct bpf_verifier_ops xdp_analyzer_ops; + +struct bpf_prog *bpf_prog_get(u32 ufd); +struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, + bool attach_drv); +struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); +void bpf_prog_sub(struct bpf_prog *prog, int i); +struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); +struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); +void bpf_prog_put(struct bpf_prog *prog); +int __bpf_prog_charge(struct user_struct *user, u32 pages); +void __bpf_prog_uncharge(struct user_struct *user, u32 pages); + +void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); +void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); + +struct bpf_map *bpf_map_get_with_uref(u32 ufd); +struct bpf_map *__bpf_map_get(struct fd f); +struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); +struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map, + bool uref); +void bpf_map_put_with_uref(struct bpf_map *map); +void bpf_map_put(struct bpf_map *map); +int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); +void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); +int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); +void bpf_map_charge_finish(struct bpf_map_memory *mem); +void bpf_map_charge_move(struct bpf_map_memory *dst, + struct bpf_map_memory *src); +void *bpf_map_area_alloc(u64 size, int numa_node); +void bpf_map_area_free(void *base); +void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); + +extern int sysctl_unprivileged_bpf_disabled; + +int bpf_map_new_fd(struct bpf_map *map, int flags); +int bpf_prog_new_fd(struct bpf_prog *prog); + +int bpf_obj_pin_user(u32 ufd, const char __user *pathname); +int bpf_obj_get_user(const char __user *pathname, int flags); + +int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, + u64 flags); +int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, + u64 flags); + +int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); + +int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags); +int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); +int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags); +int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); + +int bpf_get_file_flag(int flags); +int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, + size_t actual_size); + +/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and + * forced to use 'long' read/writes to try to atomically copy long counters. + * Best-effort only. No barriers here, since it _will_ race with concurrent + * updates from BPF programs. Called from bpf syscall and mostly used with + * size 8 or 16 bytes, so ask compiler to inline it. + */ +static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) +{ + const long *lsrc = src; + long *ldst = dst; + + size /= sizeof(long); + while (size--) + *ldst++ = *lsrc++; +} + +/* verify correctness of eBPF program */ +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, + union bpf_attr __user *uattr); +void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); + +/* Map specifics */ +struct xdp_buff; +struct sk_buff; + +struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); +struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); +void __dev_map_flush(struct bpf_map *map); +int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, + struct net_device *dev_rx); +int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, + struct bpf_prog *xdp_prog); + +struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); +void __cpu_map_flush(struct bpf_map *map); +int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, + struct net_device *dev_rx); + +/* Return map's numa specified by userspace */ +static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) +{ + return (attr->map_flags & BPF_F_NUMA_NODE) ? + attr->numa_node : NUMA_NO_NODE; +} + +struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); +int array_map_alloc_check(union bpf_attr *attr); + +int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr); +int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr); +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr); +#else /* !CONFIG_BPF_SYSCALL */ +static inline struct bpf_prog *bpf_prog_get(u32 ufd) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, + enum bpf_prog_type type, + bool attach_drv) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, + int i) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void bpf_prog_sub(struct bpf_prog *prog, int i) +{ +} + +static inline void bpf_prog_put(struct bpf_prog *prog) +{ +} + +static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct bpf_prog *__must_check +bpf_prog_inc_not_zero(struct bpf_prog *prog) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) +{ + return 0; +} + +static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) +{ +} + +static inline int bpf_obj_get_user(const char __user *pathname, int flags) +{ + return -EOPNOTSUPP; +} + +static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, + u32 key) +{ + return NULL; +} + +static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, + u32 key) +{ + return NULL; +} + +static inline void __dev_map_flush(struct bpf_map *map) +{ +} + +struct xdp_buff; +struct bpf_dtab_netdev; + +static inline +int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, + struct net_device *dev_rx) +{ + return 0; +} + +struct sk_buff; + +static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, + struct sk_buff *skb, + struct bpf_prog *xdp_prog) +{ + return 0; +} + +static inline +struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) +{ + return NULL; +} + +static inline void __cpu_map_flush(struct bpf_map *map) +{ +} + +static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, + struct xdp_buff *xdp, + struct net_device *dev_rx) +{ + return 0; +} + +static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, + enum bpf_prog_type type) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + return -ENOTSUPP; +} + +static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + return -ENOTSUPP; +} + +static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + return -ENOTSUPP; +} +#endif /* CONFIG_BPF_SYSCALL */ + +static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, + enum bpf_prog_type type) +{ + return bpf_prog_get_type_dev(ufd, type, false); +} + +bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); + +int bpf_prog_offload_compile(struct bpf_prog *prog); +void bpf_prog_offload_destroy(struct bpf_prog *prog); +int bpf_prog_offload_info_fill(struct bpf_prog_info *info, + struct bpf_prog *prog); + +int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); + +int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); +int bpf_map_offload_update_elem(struct bpf_map *map, + void *key, void *value, u64 flags); +int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); +int bpf_map_offload_get_next_key(struct bpf_map *map, + void *key, void *next_key); + +bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); + +struct bpf_offload_dev * +bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); +void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); +void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); +int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, + struct net_device *netdev); +void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, + struct net_device *netdev); +bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); + +#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) +int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); + +static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) +{ + return aux->offload_requested; +} + +static inline bool bpf_map_is_dev_bound(struct bpf_map *map) +{ + return unlikely(map->ops == &bpf_map_offload_ops); +} + +struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); +void bpf_map_offload_map_free(struct bpf_map *map); +#else +static inline int bpf_prog_offload_init(struct bpf_prog *prog, + union bpf_attr *attr) +{ + return -EOPNOTSUPP; +} + +static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) +{ + return false; +} + +static inline bool bpf_map_is_dev_bound(struct bpf_map *map) +{ + return false; +} + +static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void bpf_map_offload_map_free(struct bpf_map *map) +{ +} +#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ + +#if defined(CONFIG_BPF_STREAM_PARSER) +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, + struct bpf_prog *old, u32 which); +int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); +#else +static inline int sock_map_prog_update(struct bpf_map *map, + struct bpf_prog *prog, + struct bpf_prog *old, u32 which) +{ + return -EOPNOTSUPP; +} + +static inline int sock_map_get_from_fd(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int sock_map_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype) +{ + return -EOPNOTSUPP; +} +#endif + +#if defined(CONFIG_XDP_SOCKETS) +struct xdp_sock; +struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key); +int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, + struct xdp_sock *xs); +void __xsk_map_flush(struct bpf_map *map); +#else +struct xdp_sock; +static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, + u32 key) +{ + return NULL; +} + +static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, + struct xdp_sock *xs) +{ + return -EOPNOTSUPP; +} + +static inline void __xsk_map_flush(struct bpf_map *map) +{ +} +#endif + +#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) +void bpf_sk_reuseport_detach(struct sock *sk); +int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, + void *value); +int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, + void *value, u64 map_flags); +#else +static inline void bpf_sk_reuseport_detach(struct sock *sk) +{ +} + +#ifdef CONFIG_BPF_SYSCALL +static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, + void *key, void *value) +{ + return -EOPNOTSUPP; +} + +static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, + void *key, void *value, + u64 map_flags) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_BPF_SYSCALL */ +#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ + +/* verifier prototypes for helper functions called from eBPF programs */ +extern const struct bpf_func_proto bpf_map_lookup_elem_proto; +extern const struct bpf_func_proto bpf_map_update_elem_proto; +extern const struct bpf_func_proto bpf_map_delete_elem_proto; +extern const struct bpf_func_proto bpf_map_push_elem_proto; +extern const struct bpf_func_proto bpf_map_pop_elem_proto; +extern const struct bpf_func_proto bpf_map_peek_elem_proto; + +extern const struct bpf_func_proto bpf_get_prandom_u32_proto; +extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; +extern const struct bpf_func_proto bpf_get_numa_node_id_proto; +extern const struct bpf_func_proto bpf_tail_call_proto; +extern const struct bpf_func_proto bpf_ktime_get_ns_proto; +extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; +extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; +extern const struct bpf_func_proto bpf_get_current_comm_proto; +extern const struct bpf_func_proto bpf_get_stackid_proto; +extern const struct bpf_func_proto bpf_get_stack_proto; +extern const struct bpf_func_proto bpf_sock_map_update_proto; +extern const struct bpf_func_proto bpf_sock_hash_update_proto; +extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; +extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; +extern const struct bpf_func_proto bpf_msg_redirect_map_proto; +extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; +extern const struct bpf_func_proto bpf_sk_redirect_map_proto; +extern const struct bpf_func_proto bpf_spin_lock_proto; +extern const struct bpf_func_proto bpf_spin_unlock_proto; +extern const struct bpf_func_proto bpf_get_local_storage_proto; +extern const struct bpf_func_proto bpf_strtol_proto; +extern const struct bpf_func_proto bpf_strtoul_proto; +extern const struct bpf_func_proto bpf_tcp_sock_proto; + +/* Shared helpers among cBPF and eBPF. */ +void bpf_user_rnd_init_once(void); +u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + +#if defined(CONFIG_NET) +bool bpf_sock_common_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info); +bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info); +u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size); +#else +static inline bool bpf_sock_common_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info) +{ + return false; +} +static inline bool bpf_sock_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info) +{ + return false; +} +static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + return 0; +} +#endif + +#ifdef CONFIG_INET +bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info); + +u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size); + +bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info); + +u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size); +#else +static inline bool bpf_tcp_sock_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info) +{ + return false; +} + +static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + return 0; +} +static inline bool bpf_xdp_sock_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info) +{ + return false; +} + +static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + return 0; +} +#endif /* CONFIG_INET */ + +#endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h new file mode 100644 index 0000000..9d9ff75 --- /dev/null +++ b/include/linux/bpf_lirc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BPF_LIRC_H +#define _BPF_LIRC_H + +#include + +#ifdef CONFIG_BPF_LIRC_MODE2 +int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int lirc_prog_detach(const union bpf_attr *attr); +int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); +#else +static inline int lirc_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int lirc_prog_detach(const union bpf_attr *attr) +{ + return -EINVAL; +} + +static inline int lirc_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EINVAL; +} +#endif + +#endif /* _BPF_LIRC_H */ diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h new file mode 100644 index 0000000..ddf896a --- /dev/null +++ b/include/linux/bpf_trace.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BPF_TRACE_H__ +#define __LINUX_BPF_TRACE_H__ + +#include + +#endif /* __LINUX_BPF_TRACE_H__ */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h new file mode 100644 index 0000000..36a9c23 --- /dev/null +++ b/include/linux/bpf_types.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* internal file - do not include directly */ + +#ifdef CONFIG_NET +BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter) +BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act) +BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act) +BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp) +#ifdef CONFIG_CGROUP_BPF +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr) +#endif +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local) +BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg) +BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector) +#endif +#ifdef CONFIG_BPF_EVENTS +BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) +BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint) +BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event) +BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint) +BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable) +#endif +#ifdef CONFIG_CGROUP_BPF +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt) +#endif +#ifdef CONFIG_BPF_LIRC_MODE2 +BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2) +#endif +#ifdef CONFIG_INET +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport) +#endif + +BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops) +#ifdef CONFIG_CGROUPS +BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops) +#endif +#ifdef CONFIG_CGROUP_BPF +BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, cgroup_storage_map_ops) +#endif +BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops) +#ifdef CONFIG_PERF_EVENTS +BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops) +#endif +BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) +#ifdef CONFIG_NET +BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops) +#if defined(CONFIG_BPF_STREAM_PARSER) +BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops) +#endif +BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) +#if defined(CONFIG_XDP_SOCKETS) +BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops) +#endif +#ifdef CONFIG_INET +BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops) +#endif +#endif +BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h new file mode 100644 index 0000000..26a6d58 --- /dev/null +++ b/include/linux/bpf_verifier.h @@ -0,0 +1,423 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + */ +#ifndef _LINUX_BPF_VERIFIER_H +#define _LINUX_BPF_VERIFIER_H 1 + +#include /* for enum bpf_reg_type */ +#include /* for MAX_BPF_STACK */ +#include + +/* Maximum variable offset umax_value permitted when resolving memory accesses. + * In practice this is far bigger than any realistic pointer offset; this limit + * ensures that umax_value + (int)off + (int)size cannot overflow a u64. + */ +#define BPF_MAX_VAR_OFF (1 << 29) +/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures + * that converting umax_value to int cannot overflow. + */ +#define BPF_MAX_VAR_SIZ (1 << 29) + +/* Liveness marks, used for registers and spilled-regs (in stack slots). + * Read marks propagate upwards until they find a write mark; they record that + * "one of this state's descendants read this reg" (and therefore the reg is + * relevant for states_equal() checks). + * Write marks collect downwards and do not propagate; they record that "the + * straight-line code that reached this state (from its parent) wrote this reg" + * (and therefore that reads propagated from this state or its descendants + * should not propagate to its parent). + * A state with a write mark can receive read marks; it just won't propagate + * them to its parent, since the write mark is a property, not of the state, + * but of the link between it and its parent. See mark_reg_read() and + * mark_stack_slot_read() in kernel/bpf/verifier.c. + */ +enum bpf_reg_liveness { + REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ + REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ + REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ + REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, + REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ + REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ +}; + +struct bpf_reg_state { + /* Ordering of fields matters. See states_equal() */ + enum bpf_reg_type type; + union { + /* valid when type == PTR_TO_PACKET */ + u16 range; + + /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | + * PTR_TO_MAP_VALUE_OR_NULL + */ + struct bpf_map *map_ptr; + + /* Max size from any of the above. */ + unsigned long raw; + }; + /* Fixed part of pointer offset, pointer types only */ + s32 off; + /* For PTR_TO_PACKET, used to find other pointers with the same variable + * offset, so they can share range knowledge. + * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we + * came from, when one is tested for != NULL. + * For PTR_TO_SOCKET this is used to share which pointers retain the + * same reference to the socket, to determine proper reference freeing. + */ + u32 id; + /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned + * from a pointer-cast helper, bpf_sk_fullsock() and + * bpf_tcp_sock(). + * + * Consider the following where "sk" is a reference counted + * pointer returned from "sk = bpf_sk_lookup_tcp();": + * + * 1: sk = bpf_sk_lookup_tcp(); + * 2: if (!sk) { return 0; } + * 3: fullsock = bpf_sk_fullsock(sk); + * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } + * 5: tp = bpf_tcp_sock(fullsock); + * 6: if (!tp) { bpf_sk_release(sk); return 0; } + * 7: bpf_sk_release(sk); + * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain + * + * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and + * "tp" ptr should be invalidated also. In order to do that, + * the reg holding "fullsock" and "sk" need to remember + * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id + * such that the verifier can reset all regs which have + * ref_obj_id matching the sk_reg->id. + * + * sk_reg->ref_obj_id is set to sk_reg->id at line 1. + * sk_reg->id will stay as NULL-marking purpose only. + * After NULL-marking is done, sk_reg->id can be reset to 0. + * + * After "fullsock = bpf_sk_fullsock(sk);" at line 3, + * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. + * + * After "tp = bpf_tcp_sock(fullsock);" at line 5, + * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id + * which is the same as sk_reg->ref_obj_id. + * + * From the verifier perspective, if sk, fullsock and tp + * are not NULL, they are the same ptr with different + * reg->type. In particular, bpf_sk_release(tp) is also + * allowed and has the same effect as bpf_sk_release(sk). + */ + u32 ref_obj_id; + /* For scalar types (SCALAR_VALUE), this represents our knowledge of + * the actual value. + * For pointer types, this represents the variable part of the offset + * from the pointed-to object, and is shared with all bpf_reg_states + * with the same id as us. + */ + struct tnum var_off; + /* Used to determine if any memory access using this register will + * result in a bad access. + * These refer to the same value as var_off, not necessarily the actual + * contents of the register. + */ + s64 smin_value; /* minimum possible (s64)value */ + s64 smax_value; /* maximum possible (s64)value */ + u64 umin_value; /* minimum possible (u64)value */ + u64 umax_value; /* maximum possible (u64)value */ + /* parentage chain for liveness checking */ + struct bpf_reg_state *parent; + /* Inside the callee two registers can be both PTR_TO_STACK like + * R1=fp-8 and R2=fp-8, but one of them points to this function stack + * while another to the caller's stack. To differentiate them 'frameno' + * is used which is an index in bpf_verifier_state->frame[] array + * pointing to bpf_func_state. + */ + u32 frameno; + /* Tracks subreg definition. The stored value is the insn_idx of the + * writing insn. This is safe because subreg_def is used before any insn + * patching which only happens after main verification finished. + */ + s32 subreg_def; + enum bpf_reg_liveness live; + /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ + bool precise; +}; + +enum bpf_stack_slot_type { + STACK_INVALID, /* nothing was stored in this stack slot */ + STACK_SPILL, /* register spilled into stack */ + STACK_MISC, /* BPF program wrote some data into this slot */ + STACK_ZERO, /* BPF program wrote constant zero */ +}; + +#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ + +struct bpf_stack_state { + struct bpf_reg_state spilled_ptr; + u8 slot_type[BPF_REG_SIZE]; +}; + +struct bpf_reference_state { + /* Track each reference created with a unique id, even if the same + * instruction creates the reference multiple times (eg, via CALL). + */ + int id; + /* Instruction where the allocation of this reference occurred. This + * is used purely to inform the user of a reference leak. + */ + int insn_idx; +}; + +/* state of the program: + * type of all registers and stack info + */ +struct bpf_func_state { + struct bpf_reg_state regs[MAX_BPF_REG]; + /* index of call instruction that called into this func */ + int callsite; + /* stack frame number of this function state from pov of + * enclosing bpf_verifier_state. + * 0 = main function, 1 = first callee. + */ + u32 frameno; + /* subprog number == index within subprog_stack_depth + * zero == main subprog + */ + u32 subprogno; + + /* The following fields should be last. See copy_func_state() */ + int acquired_refs; + struct bpf_reference_state *refs; + int allocated_stack; + struct bpf_stack_state *stack; +}; + +struct bpf_idx_pair { + u32 prev_idx; + u32 idx; +}; + +#define MAX_CALL_FRAMES 8 +struct bpf_verifier_state { + /* call stack tracking */ + struct bpf_func_state *frame[MAX_CALL_FRAMES]; + struct bpf_verifier_state *parent; + /* + * 'branches' field is the number of branches left to explore: + * 0 - all possible paths from this state reached bpf_exit or + * were safely pruned + * 1 - at least one path is being explored. + * This state hasn't reached bpf_exit + * 2 - at least two paths are being explored. + * This state is an immediate parent of two children. + * One is fallthrough branch with branches==1 and another + * state is pushed into stack (to be explored later) also with + * branches==1. The parent of this state has branches==1. + * The verifier state tree connected via 'parent' pointer looks like: + * 1 + * 1 + * 2 -> 1 (first 'if' pushed into stack) + * 1 + * 2 -> 1 (second 'if' pushed into stack) + * 1 + * 1 + * 1 bpf_exit. + * + * Once do_check() reaches bpf_exit, it calls update_branch_counts() + * and the verifier state tree will look: + * 1 + * 1 + * 2 -> 1 (first 'if' pushed into stack) + * 1 + * 1 -> 1 (second 'if' pushed into stack) + * 0 + * 0 + * 0 bpf_exit. + * After pop_stack() the do_check() will resume at second 'if'. + * + * If is_state_visited() sees a state with branches > 0 it means + * there is a loop. If such state is exactly equal to the current state + * it's an infinite loop. Note states_equal() checks for states + * equvalency, so two states being 'states_equal' does not mean + * infinite loop. The exact comparison is provided by + * states_maybe_looping() function. It's a stronger pre-check and + * much faster than states_equal(). + * + * This algorithm may not find all possible infinite loops or + * loop iteration count may be too high. + * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. + */ + u32 branches; + u32 insn_idx; + u32 curframe; + u32 active_spin_lock; + bool speculative; + + /* first and last insn idx of this verifier state */ + u32 first_insn_idx; + u32 last_insn_idx; + /* jmp history recorded from first to last. + * backtracking is using it to go from last to first. + * For most states jmp_history_cnt is [0-3]. + * For loops can go up to ~40. + */ + struct bpf_idx_pair *jmp_history; + u32 jmp_history_cnt; +}; + +#define bpf_get_spilled_reg(slot, frame) \ + (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ + (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ + ? &frame->stack[slot].spilled_ptr : NULL) + +/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ +#define bpf_for_each_spilled_reg(iter, frame, reg) \ + for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ + iter < frame->allocated_stack / BPF_REG_SIZE; \ + iter++, reg = bpf_get_spilled_reg(iter, frame)) + +/* linked list of verifier states used to prune search */ +struct bpf_verifier_state_list { + struct bpf_verifier_state state; + struct bpf_verifier_state_list *next; + int miss_cnt, hit_cnt; +}; + +/* Possible states for alu_state member. */ +#define BPF_ALU_SANITIZE_SRC 1U +#define BPF_ALU_SANITIZE_DST 2U +#define BPF_ALU_NEG_VALUE (1U << 2) +#define BPF_ALU_NON_POINTER (1U << 3) +#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ + BPF_ALU_SANITIZE_DST) + +struct bpf_insn_aux_data { + union { + enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ + unsigned long map_state; /* pointer/poison value for maps */ + s32 call_imm; /* saved imm field of call insn */ + u32 alu_limit; /* limit for add/sub register with pointer */ + struct { + u32 map_index; /* index into used_maps[] */ + u32 map_off; /* offset from value base address */ + }; + }; + int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ + int sanitize_stack_off; /* stack slot to be cleared */ + bool seen; /* this insn was processed by the verifier */ + bool zext_dst; /* this insn zero extends dst reg */ + u8 alu_state; /* used in combination with alu_limit */ + bool prune_point; + unsigned int orig_idx; /* original instruction index */ +}; + +#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ + +#define BPF_VERIFIER_TMP_LOG_SIZE 1024 + +struct bpf_verifier_log { + u32 level; + char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; + char __user *ubuf; + u32 len_used; + u32 len_total; +}; + +static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) +{ + return log->len_used >= log->len_total - 1; +} + +#define BPF_LOG_LEVEL1 1 +#define BPF_LOG_LEVEL2 2 +#define BPF_LOG_STATS 4 +#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) +#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) + +static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) +{ + return log->level && log->ubuf && !bpf_verifier_log_full(log); +} + +#define BPF_MAX_SUBPROGS 256 + +struct bpf_subprog_info { + u32 start; /* insn idx of function entry point */ + u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ + u16 stack_depth; /* max. stack depth used by this function */ +}; + +/* single container for all structs + * one verifier_env per bpf_check() call + */ +struct bpf_verifier_env { + u32 insn_idx; + u32 prev_insn_idx; + struct bpf_prog *prog; /* eBPF program being verified */ + const struct bpf_verifier_ops *ops; + struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ + int stack_size; /* number of states to be processed */ + bool strict_alignment; /* perform strict pointer alignment checks */ + bool test_state_freq; /* test verifier with different pruning frequency */ + struct bpf_verifier_state *cur_state; /* current verifier state */ + struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ + struct bpf_verifier_state_list *free_list; + struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ + u32 used_map_cnt; /* number of used maps */ + u32 id_gen; /* used to generate unique reg IDs */ + bool allow_ptr_leaks; + bool seen_direct_write; + struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ + const struct bpf_line_info *prev_linfo; + struct bpf_verifier_log log; + struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; + struct { + int *insn_state; + int *insn_stack; + int cur_stack; + } cfg; + u32 subprog_cnt; + /* number of instructions analyzed by the verifier */ + u32 prev_insn_processed, insn_processed; + /* number of jmps, calls, exits analyzed so far */ + u32 prev_jmps_processed, jmps_processed; + /* total verification time */ + u64 verification_time; + /* maximum number of verifier states kept in 'branching' instructions */ + u32 max_states_per_insn; + /* total number of allocated verifier states */ + u32 total_states; + /* some states are freed during program analysis. + * this is peak number of states. this number dominates kernel + * memory consumption during verification + */ + u32 peak_states; + /* longest register parentage chain walked for liveness marking */ + u32 longest_mark_read_walk; +}; + +__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, + const char *fmt, va_list args); +__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, + const char *fmt, ...); + +static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) +{ + struct bpf_verifier_state *cur = env->cur_state; + + return cur->frame[cur->curframe]; +} + +static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) +{ + return cur_func(env)->regs; +} + +int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); +int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); +int bpf_prog_offload_finalize(struct bpf_verifier_env *env); +void +bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, + struct bpf_insn *insn); +void +bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); + +#endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h new file mode 100644 index 0000000..d815622 --- /dev/null +++ b/include/linux/bpfilter.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BPFILTER_H +#define _LINUX_BPFILTER_H + +#include +#include + +struct sock; +int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, + unsigned int optlen); +int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, + int __user *optlen); +struct bpfilter_umh_ops { + struct umh_info info; + /* since ip_getsockopt() can run in parallel, serialize access to umh */ + struct mutex lock; + int (*sockopt)(struct sock *sk, int optname, + char __user *optval, + unsigned int optlen, bool is_set); + int (*start)(void); + bool stop; +}; +extern struct bpfilter_umh_ops bpfilter_ops; +#endif diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h new file mode 100644 index 0000000..6db2d9a --- /dev/null +++ b/include/linux/brcmphy.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BRCMPHY_H +#define _LINUX_BRCMPHY_H + +#include + +/* All Broadcom Ethernet switches have a pseudo-PHY at address 30 which is used + * to configure the switch internal registers via MDIO accesses. + */ +#define BRCM_PSEUDO_PHY_ADDR 30 + +#define PHY_ID_BCM50610 0x0143bd60 +#define PHY_ID_BCM50610M 0x0143bd70 +#define PHY_ID_BCM5241 0x0143bc30 +#define PHY_ID_BCMAC131 0x0143bc70 +#define PHY_ID_BCM5481 0x0143bca0 +#define PHY_ID_BCM5395 0x0143bcf0 +#define PHY_ID_BCM54810 0x03625d00 +#define PHY_ID_BCM5482 0x0143bcb0 +#define PHY_ID_BCM5411 0x00206070 +#define PHY_ID_BCM5421 0x002060e0 +#define PHY_ID_BCM54210E 0x600d84a0 +#define PHY_ID_BCM5464 0x002060b0 +#define PHY_ID_BCM5461 0x002060c0 +#define PHY_ID_BCM54612E 0x03625e60 +#define PHY_ID_BCM54616S 0x03625d10 +#define PHY_ID_BCM57780 0x03625d90 +#define PHY_ID_BCM89610 0x03625cd0 + +#define PHY_ID_BCM7250 0xae025280 +#define PHY_ID_BCM7255 0xae025120 +#define PHY_ID_BCM7260 0xae025190 +#define PHY_ID_BCM7268 0xae025090 +#define PHY_ID_BCM7271 0xae0253b0 +#define PHY_ID_BCM7278 0xae0251a0 +#define PHY_ID_BCM7364 0xae025260 +#define PHY_ID_BCM7366 0x600d8490 +#define PHY_ID_BCM7346 0x600d8650 +#define PHY_ID_BCM7362 0x600d84b0 +#define PHY_ID_BCM7425 0x600d86b0 +#define PHY_ID_BCM7429 0x600d8730 +#define PHY_ID_BCM7435 0x600d8750 +#define PHY_ID_BCM74371 0xae0252e0 +#define PHY_ID_BCM7439 0x600d8480 +#define PHY_ID_BCM7439_2 0xae025080 +#define PHY_ID_BCM7445 0x600d8510 + +#define PHY_ID_BCM_CYGNUS 0xae025200 +#define PHY_ID_BCM_OMEGA 0xae025100 + +#define PHY_BCM_OUI_MASK 0xfffffc00 +#define PHY_BCM_OUI_1 0x00206000 +#define PHY_BCM_OUI_2 0x0143bc00 +#define PHY_BCM_OUI_3 0x03625c00 +#define PHY_BCM_OUI_4 0x600d8400 +#define PHY_BCM_OUI_5 0x03625e00 +#define PHY_BCM_OUI_6 0xae025000 + +#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001 +#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002 +#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010 +#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020 +#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100 +#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200 +#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400 +#define PHY_BRCM_STD_IBND_DISABLE 0x00000800 +#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000 +#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 +#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 +#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 +#define PHY_BRCM_EN_MASTER_MODE 0x00010000 + +/* Broadcom BCM7xxx specific workarounds */ +#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) +#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff) +#define PHY_BCM_FLAGS_VALID 0x80000000 + +/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */ +#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */ +#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */ +#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */ + +#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */ +#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */ + +#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */ +#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */ +#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */ +#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */ +#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */ + +#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */ +#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */ +#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */ +#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */ +#define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */ +#define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */ +#define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */ +#define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */ +#define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */ +#define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */ +#define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */ +#define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */ +#define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */ +#define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */ +#define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */ +#define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */ +#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */ +#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */ + +#define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */ +#define MII_BCM54XX_SHD_WRITE 0x8000 +#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10) +#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0) + +/* + * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18) + */ +#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00 +#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400 +#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 + +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100 +#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 +#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 + +#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 +#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007 + +/* + * Broadcom LED source encodings. These are used in BCM5461, BCM5481, + * BCM5482, and possibly some others. + */ +#define BCM_LED_SRC_LINKSPD1 0x0 +#define BCM_LED_SRC_LINKSPD2 0x1 +#define BCM_LED_SRC_XMITLED 0x2 +#define BCM_LED_SRC_ACTIVITYLED 0x3 +#define BCM_LED_SRC_FDXLED 0x4 +#define BCM_LED_SRC_SLAVE 0x5 +#define BCM_LED_SRC_INTR 0x6 +#define BCM_LED_SRC_QUALITY 0x7 +#define BCM_LED_SRC_RCVLED 0x8 +#define BCM_LED_SRC_WIRESPEED 0x9 +#define BCM_LED_SRC_MULTICOLOR1 0xa +#define BCM_LED_SRC_OPENSHORT 0xb +#define BCM_LED_SRC_OFF 0xe /* Tied high */ +#define BCM_LED_SRC_ON 0xf /* Tied low */ + +/* + * Broadcom Multicolor LED configurations (expansion register 4) + */ +#define BCM_EXP_MULTICOLOR (MII_BCM54XX_EXP_SEL_ER + 0x04) +#define BCM_LED_MULTICOLOR_IN_PHASE BIT(8) +#define BCM_LED_MULTICOLOR_LINK_ACT 0x0 +#define BCM_LED_MULTICOLOR_SPEED 0x1 +#define BCM_LED_MULTICOLOR_ACT_FLASH 0x2 +#define BCM_LED_MULTICOLOR_FDX 0x3 +#define BCM_LED_MULTICOLOR_OFF 0x4 +#define BCM_LED_MULTICOLOR_ON 0x5 +#define BCM_LED_MULTICOLOR_ALT 0x6 +#define BCM_LED_MULTICOLOR_FLASH 0x7 +#define BCM_LED_MULTICOLOR_LINK 0x8 +#define BCM_LED_MULTICOLOR_ACT 0x9 +#define BCM_LED_MULTICOLOR_PROGRAM 0xa + +/* + * BCM5482: Shadow registers + * Shadow values go into bits [14:10] of register 0x1c to select a shadow + * register to access. + */ + +/* 00100: Reserved control register 2 */ +#define BCM54XX_SHD_SCR2 0x04 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_DIS 0x100 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT 2 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET 2 +#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK 0x7 + +/* 00101: Spare Control Register 3 */ +#define BCM54XX_SHD_SCR3 0x05 +#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 +#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002 +#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004 + +/* 01010: Auto Power-Down */ +#define BCM54XX_SHD_APD 0x0a +#define BCM_APD_CLR_MASK 0xFE9F /* clear bits 5, 6 & 8 */ +#define BCM54XX_SHD_APD_EN 0x0020 +#define BCM_NO_ANEG_APD_EN 0x0060 /* bits 5 & 6 */ +#define BCM_APD_SINGLELP_EN 0x0100 /* Bit 8 */ + +#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ + /* LED3 / ~LINKSPD[2] selector */ +#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4) + /* LED1 / ~LINKSPD[1] selector */ +#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0) +#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */ +#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ +#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ +#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ +#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ +#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ + + +/* + * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) + */ +#define MII_BCM54XX_EXP_AADJ1CH0 0x001f +#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200 +#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100 +#define MII_BCM54XX_EXP_AADJ1CH3 0x601f +#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002 +#define MII_BCM54XX_EXP_EXP08 0x0F08 +#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001 +#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200 +#define MII_BCM54XX_EXP_EXP75 0x0f75 +#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c +#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001 +#define MII_BCM54XX_EXP_EXP96 0x0f96 +#define MII_BCM54XX_EXP_EXP96_MYST 0x0010 +#define MII_BCM54XX_EXP_EXP97 0x0f97 +#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c + +/* + * BCM5482: Secondary SerDes registers + */ +#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */ +#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */ +#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */ +#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ +#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ + +/* BCM54810 Registers */ +#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90) +#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0) +#define BCM54810_SHD_CLK_CTL 0x3 +#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) + +/* BCM54612E Registers */ +#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34) +#define BCM54612E_LED4_CLK125OUT_EN (1 << 1) + +/*****************************************************************************/ +/* Fast Ethernet Transceiver definitions. */ +/*****************************************************************************/ + +#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */ +#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */ +#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */ +#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */ +#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */ +#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */ + +#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */ +#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */ + + +/*** Shadow register definitions ***/ + +#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */ +#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */ + +#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */ +#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003 +#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001 + +#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ +#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ + +#define BRCM_CL45VEN_EEE_CONTROL 0x803d +#define LPI_FEATURE_EN 0x8000 +#define LPI_FEATURE_EN_DIG1000X 0x4000 + +/* Core register definitions*/ +#define MII_BRCM_CORE_BASE12 0x12 +#define MII_BRCM_CORE_BASE13 0x13 +#define MII_BRCM_CORE_BASE14 0x14 +#define MII_BRCM_CORE_BASE1E 0x1E +#define MII_BRCM_CORE_EXPB0 0xB0 +#define MII_BRCM_CORE_EXPB1 0xB1 + +#endif /* _LINUX_BRCMPHY_H */ diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h new file mode 100644 index 0000000..62b1eb3 --- /dev/null +++ b/include/linux/bsearch.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BSEARCH_H +#define _LINUX_BSEARCH_H + +#include + +void *bsearch(const void *key, const void *base, size_t num, size_t size, + int (*cmp)(const void *key, const void *elt)); + +#endif /* _LINUX_BSEARCH_H */ diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h new file mode 100644 index 0000000..960988d --- /dev/null +++ b/include/linux/bsg-lib.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * BSG helper library + * + * Copyright (C) 2008 James Smart, Emulex Corporation + * Copyright (C) 2011 Red Hat, Inc. All rights reserved. + * Copyright (C) 2011 Mike Christie + */ +#ifndef _BLK_BSG_ +#define _BLK_BSG_ + +#include +#include + +struct request; +struct device; +struct scatterlist; +struct request_queue; + +typedef int (bsg_job_fn) (struct bsg_job *); +typedef enum blk_eh_timer_return (bsg_timeout_fn)(struct request *); + +struct bsg_buffer { + unsigned int payload_len; + int sg_cnt; + struct scatterlist *sg_list; +}; + +struct bsg_job { + struct device *dev; + + struct kref kref; + + unsigned int timeout; + + /* Transport/driver specific request/reply structs */ + void *request; + void *reply; + + unsigned int request_len; + unsigned int reply_len; + /* + * On entry : reply_len indicates the buffer size allocated for + * the reply. + * + * Upon completion : the message handler must set reply_len + * to indicates the size of the reply to be returned to the + * caller. + */ + + /* DMA payloads for the request/response */ + struct bsg_buffer request_payload; + struct bsg_buffer reply_payload; + + int result; + unsigned int reply_payload_rcv_len; + + /* BIDI support */ + struct request *bidi_rq; + struct bio *bidi_bio; + + void *dd_data; /* Used for driver-specific storage */ +}; + +void bsg_job_done(struct bsg_job *job, int result, + unsigned int reply_payload_rcv_len); +struct request_queue *bsg_setup_queue(struct device *dev, const char *name, + bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size); +void bsg_remove_queue(struct request_queue *q); +void bsg_job_put(struct bsg_job *job); +int __must_check bsg_job_get(struct bsg_job *job); + +#endif diff --git a/include/linux/bsg.h b/include/linux/bsg.h new file mode 100644 index 0000000..dac37b6 --- /dev/null +++ b/include/linux/bsg.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BSG_H +#define _LINUX_BSG_H + +#include + +struct request; + +#ifdef CONFIG_BLK_DEV_BSG +struct bsg_ops { + int (*check_proto)(struct sg_io_v4 *hdr); + int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr, + fmode_t mode); + int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr); + void (*free_rq)(struct request *rq); +}; + +struct bsg_class_device { + struct device *class_dev; + int minor; + struct request_queue *queue; + const struct bsg_ops *ops; +}; + +int bsg_register_queue(struct request_queue *q, struct device *parent, + const char *name, const struct bsg_ops *ops); +int bsg_scsi_register_queue(struct request_queue *q, struct device *parent); +void bsg_unregister_queue(struct request_queue *q); +#else +static inline int bsg_scsi_register_queue(struct request_queue *q, + struct device *parent) +{ + return 0; +} +static inline void bsg_unregister_queue(struct request_queue *q) +{ +} +#endif /* CONFIG_BLK_DEV_BSG */ +#endif /* _LINUX_BSG_H */ diff --git a/include/linux/btf.h b/include/linux/btf.h new file mode 100644 index 0000000..64cdf2a --- /dev/null +++ b/include/linux/btf.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Facebook */ + +#ifndef _LINUX_BTF_H +#define _LINUX_BTF_H 1 + +#include + +struct btf; +struct btf_member; +struct btf_type; +union bpf_attr; + +extern const struct file_operations btf_fops; + +void btf_put(struct btf *btf); +int btf_new_fd(const union bpf_attr *attr); +struct btf *btf_get_by_fd(int fd); +int btf_get_info_by_fd(const struct btf *btf, + const union bpf_attr *attr, + union bpf_attr __user *uattr); +/* Figure out the size of a type_id. If type_id is a modifier + * (e.g. const), it will be resolved to find out the type with size. + * + * For example: + * In describing "const void *", type_id is "const" and "const" + * refers to "void *". The return type will be "void *". + * + * If type_id is a simple "int", then return type will be "int". + * + * @btf: struct btf object + * @type_id: Find out the size of type_id. The type_id of the return + * type is set to *type_id. + * @ret_size: It can be NULL. If not NULL, the size of the return + * type is set to *ret_size. + * Return: The btf_type (resolved to another type with size info if needed). + * NULL is returned if type_id itself does not have size info + * (e.g. void) or it cannot be resolved to another type that + * has size info. + * *type_id and *ret_size will not be changed in the + * NULL return case. + */ +const struct btf_type *btf_type_id_size(const struct btf *btf, + u32 *type_id, + u32 *ret_size); +void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, + struct seq_file *m); +int btf_get_fd_by_id(u32 id); +u32 btf_id(const struct btf *btf); +bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, + const struct btf_member *m, + u32 expected_offset, u32 expected_size); +int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t); +bool btf_type_is_void(const struct btf_type *t); + +#ifdef CONFIG_BPF_SYSCALL +const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); +const char *btf_name_by_offset(const struct btf *btf, u32 offset); +#else +static inline const struct btf_type *btf_type_by_id(const struct btf *btf, + u32 type_id) +{ + return NULL; +} +static inline const char *btf_name_by_offset(const struct btf *btf, + u32 offset) +{ + return NULL; +} +#endif + +#endif diff --git a/include/linux/btree-128.h b/include/linux/btree-128.h new file mode 100644 index 0000000..22c09f5 --- /dev/null +++ b/include/linux/btree-128.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +extern struct btree_geo btree_geo128; + +struct btree_head128 { struct btree_head h; }; + +static inline void btree_init_mempool128(struct btree_head128 *head, + mempool_t *mempool) +{ + btree_init_mempool(&head->h, mempool); +} + +static inline int btree_init128(struct btree_head128 *head) +{ + return btree_init(&head->h); +} + +static inline void btree_destroy128(struct btree_head128 *head) +{ + btree_destroy(&head->h); +} + +static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2) +{ + u64 key[2] = {k1, k2}; + return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key); +} + +static inline void *btree_get_prev128(struct btree_head128 *head, + u64 *k1, u64 *k2) +{ + u64 key[2] = {*k1, *k2}; + void *val; + + val = btree_get_prev(&head->h, &btree_geo128, + (unsigned long *)&key); + *k1 = key[0]; + *k2 = key[1]; + return val; +} + +static inline int btree_insert128(struct btree_head128 *head, u64 k1, u64 k2, + void *val, gfp_t gfp) +{ + u64 key[2] = {k1, k2}; + return btree_insert(&head->h, &btree_geo128, + (unsigned long *)&key, val, gfp); +} + +static inline int btree_update128(struct btree_head128 *head, u64 k1, u64 k2, + void *val) +{ + u64 key[2] = {k1, k2}; + return btree_update(&head->h, &btree_geo128, + (unsigned long *)&key, val); +} + +static inline void *btree_remove128(struct btree_head128 *head, u64 k1, u64 k2) +{ + u64 key[2] = {k1, k2}; + return btree_remove(&head->h, &btree_geo128, (unsigned long *)&key); +} + +static inline void *btree_last128(struct btree_head128 *head, u64 *k1, u64 *k2) +{ + u64 key[2]; + void *val; + + val = btree_last(&head->h, &btree_geo128, (unsigned long *)&key[0]); + if (val) { + *k1 = key[0]; + *k2 = key[1]; + } + + return val; +} + +static inline int btree_merge128(struct btree_head128 *target, + struct btree_head128 *victim, + gfp_t gfp) +{ + return btree_merge(&target->h, &victim->h, &btree_geo128, gfp); +} + +void visitor128(void *elem, unsigned long opaque, unsigned long *__key, + size_t index, void *__func); + +typedef void (*visitor128_t)(void *elem, unsigned long opaque, + u64 key1, u64 key2, size_t index); + +static inline size_t btree_visitor128(struct btree_head128 *head, + unsigned long opaque, + visitor128_t func2) +{ + return btree_visitor(&head->h, &btree_geo128, opaque, + visitor128, func2); +} + +static inline size_t btree_grim_visitor128(struct btree_head128 *head, + unsigned long opaque, + visitor128_t func2) +{ + return btree_grim_visitor(&head->h, &btree_geo128, opaque, + visitor128, func2); +} + +#define btree_for_each_safe128(head, k1, k2, val) \ + for (val = btree_last128(head, &k1, &k2); \ + val; \ + val = btree_get_prev128(head, &k1, &k2)) + diff --git a/include/linux/btree-type.h b/include/linux/btree-type.h new file mode 100644 index 0000000..fb34a52 --- /dev/null +++ b/include/linux/btree-type.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#define __BTREE_TP(pfx, type, sfx) pfx ## type ## sfx +#define _BTREE_TP(pfx, type, sfx) __BTREE_TP(pfx, type, sfx) +#define BTREE_TP(pfx) _BTREE_TP(pfx, BTREE_TYPE_SUFFIX,) +#define BTREE_FN(name) BTREE_TP(btree_ ## name) +#define BTREE_TYPE_HEAD BTREE_TP(struct btree_head) +#define VISITOR_FN BTREE_TP(visitor) +#define VISITOR_FN_T _BTREE_TP(visitor, BTREE_TYPE_SUFFIX, _t) + +BTREE_TYPE_HEAD { + struct btree_head h; +}; + +static inline void BTREE_FN(init_mempool)(BTREE_TYPE_HEAD *head, + mempool_t *mempool) +{ + btree_init_mempool(&head->h, mempool); +} + +static inline int BTREE_FN(init)(BTREE_TYPE_HEAD *head) +{ + return btree_init(&head->h); +} + +static inline void BTREE_FN(destroy)(BTREE_TYPE_HEAD *head) +{ + btree_destroy(&head->h); +} + +static inline int BTREE_FN(merge)(BTREE_TYPE_HEAD *target, + BTREE_TYPE_HEAD *victim, + gfp_t gfp) +{ + return btree_merge(&target->h, &victim->h, BTREE_TYPE_GEO, gfp); +} + +#if (BITS_PER_LONG > BTREE_TYPE_BITS) +static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + unsigned long _key = key; + return btree_lookup(&head->h, BTREE_TYPE_GEO, &_key); +} + +static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val, gfp_t gfp) +{ + unsigned long _key = key; + return btree_insert(&head->h, BTREE_TYPE_GEO, &_key, val, gfp); +} + +static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val) +{ + unsigned long _key = key; + return btree_update(&head->h, BTREE_TYPE_GEO, &_key, val); +} + +static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + unsigned long _key = key; + return btree_remove(&head->h, BTREE_TYPE_GEO, &_key); +} + +static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + unsigned long _key; + void *val = btree_last(&head->h, BTREE_TYPE_GEO, &_key); + if (val) + *key = _key; + return val; +} + +static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + unsigned long _key = *key; + void *val = btree_get_prev(&head->h, BTREE_TYPE_GEO, &_key); + if (val) + *key = _key; + return val; +} +#else +static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + return btree_lookup(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); +} + +static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val, gfp_t gfp) +{ + return btree_insert(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, + val, gfp); +} + +static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val) +{ + return btree_update(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, val); +} + +static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + return btree_remove(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); +} + +static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + return btree_last(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); +} + +static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + return btree_get_prev(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); +} +#endif + +void VISITOR_FN(void *elem, unsigned long opaque, unsigned long *key, + size_t index, void *__func); + +typedef void (*VISITOR_FN_T)(void *elem, unsigned long opaque, + BTREE_KEYTYPE key, size_t index); + +static inline size_t BTREE_FN(visitor)(BTREE_TYPE_HEAD *head, + unsigned long opaque, + VISITOR_FN_T func2) +{ + return btree_visitor(&head->h, BTREE_TYPE_GEO, opaque, + visitorl, func2); +} + +static inline size_t BTREE_FN(grim_visitor)(BTREE_TYPE_HEAD *head, + unsigned long opaque, + VISITOR_FN_T func2) +{ + return btree_grim_visitor(&head->h, BTREE_TYPE_GEO, opaque, + visitorl, func2); +} + +#undef VISITOR_FN +#undef VISITOR_FN_T +#undef __BTREE_TP +#undef _BTREE_TP +#undef BTREE_TP +#undef BTREE_FN +#undef BTREE_TYPE_HEAD +#undef BTREE_TYPE_SUFFIX +#undef BTREE_TYPE_GEO +#undef BTREE_KEYTYPE +#undef BTREE_TYPE_BITS diff --git a/include/linux/btree.h b/include/linux/btree.h new file mode 100644 index 0000000..68f858c --- /dev/null +++ b/include/linux/btree.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef BTREE_H +#define BTREE_H + +#include +#include + +/** + * DOC: B+Tree basics + * + * A B+Tree is a data structure for looking up arbitrary (currently allowing + * unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure + * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not + * use binary search to find the key on lookups. + * + * Each B+Tree consists of a head, that contains bookkeeping information and + * a variable number (starting with zero) nodes. Each node contains the keys + * and pointers to sub-nodes, or, for leaf nodes, the keys and values for the + * tree entries. + * + * Each node in this implementation has the following layout: + * [key1, key2, ..., keyN] [val1, val2, ..., valN] + * + * Each key here is an array of unsigned longs, geo->no_longs in total. The + * number of keys and values (N) is geo->no_pairs. + */ + +/** + * struct btree_head - btree head + * + * @node: the first node in the tree + * @mempool: mempool used for node allocations + * @height: current of the tree + */ +struct btree_head { + unsigned long *node; + mempool_t *mempool; + int height; +}; + +/* btree geometry */ +struct btree_geo; + +/** + * btree_alloc - allocate function for the mempool + * @gfp_mask: gfp mask for the allocation + * @pool_data: unused + */ +void *btree_alloc(gfp_t gfp_mask, void *pool_data); + +/** + * btree_free - free function for the mempool + * @element: the element to free + * @pool_data: unused + */ +void btree_free(void *element, void *pool_data); + +/** + * btree_init_mempool - initialise a btree with given mempool + * + * @head: the btree head to initialise + * @mempool: the mempool to use + * + * When this function is used, there is no need to destroy + * the mempool. + */ +void btree_init_mempool(struct btree_head *head, mempool_t *mempool); + +/** + * btree_init - initialise a btree + * + * @head: the btree head to initialise + * + * This function allocates the memory pool that the + * btree needs. Returns zero or a negative error code + * (-%ENOMEM) when memory allocation fails. + * + */ +int __must_check btree_init(struct btree_head *head); + +/** + * btree_destroy - destroy mempool + * + * @head: the btree head to destroy + * + * This function destroys the internal memory pool, use only + * when using btree_init(), not with btree_init_mempool(). + */ +void btree_destroy(struct btree_head *head); + +/** + * btree_lookup - look up a key in the btree + * + * @head: the btree to look in + * @geo: the btree geometry + * @key: the key to look up + * + * This function returns the value for the given key, or %NULL. + */ +void *btree_lookup(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + +/** + * btree_insert - insert an entry into the btree + * + * @head: the btree to add to + * @geo: the btree geometry + * @key: the key to add (must not already be present) + * @val: the value to add (must not be %NULL) + * @gfp: allocation flags for node allocations + * + * This function returns 0 if the item could be added, or an + * error code if it failed (may fail due to memory pressure). + */ +int __must_check btree_insert(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val, gfp_t gfp); +/** + * btree_update - update an entry in the btree + * + * @head: the btree to update + * @geo: the btree geometry + * @key: the key to update + * @val: the value to change it to (must not be %NULL) + * + * This function returns 0 if the update was successful, or + * -%ENOENT if the key could not be found. + */ +int btree_update(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val); +/** + * btree_remove - remove an entry from the btree + * + * @head: the btree to update + * @geo: the btree geometry + * @key: the key to remove + * + * This function returns the removed entry, or %NULL if the key + * could not be found. + */ +void *btree_remove(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + +/** + * btree_merge - merge two btrees + * + * @target: the tree that gets all the entries + * @victim: the tree that gets merged into @target + * @geo: the btree geometry + * @gfp: allocation flags + * + * The two trees @target and @victim may not contain the same keys, + * that is a bug and triggers a BUG(). This function returns zero + * if the trees were merged successfully, and may return a failure + * when memory allocation fails, in which case both trees might have + * been partially merged, i.e. some entries have been moved from + * @victim to @target. + */ +int btree_merge(struct btree_head *target, struct btree_head *victim, + struct btree_geo *geo, gfp_t gfp); + +/** + * btree_last - get last entry in btree + * + * @head: btree head + * @geo: btree geometry + * @key: last key + * + * Returns the last entry in the btree, and sets @key to the key + * of that entry; returns NULL if the tree is empty, in that case + * key is not changed. + */ +void *btree_last(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + +/** + * btree_get_prev - get previous entry + * + * @head: btree head + * @geo: btree geometry + * @key: pointer to key + * + * The function returns the next item right before the value pointed to by + * @key, and updates @key with its key, or returns %NULL when there is no + * entry with a key smaller than the given key. + */ +void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + + +/* internal use, use btree_visitor{l,32,64,128} */ +size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, + unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, size_t index, + void *func2), + void *func2); + +/* internal use, use btree_grim_visitor{l,32,64,128} */ +size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, + unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, + size_t index, void *func2), + void *func2); + + +#include + +extern struct btree_geo btree_geo32; +#define BTREE_TYPE_SUFFIX l +#define BTREE_TYPE_BITS BITS_PER_LONG +#define BTREE_TYPE_GEO &btree_geo32 +#define BTREE_KEYTYPE unsigned long +#include + +#define btree_for_each_safel(head, key, val) \ + for (val = btree_lastl(head, &key); \ + val; \ + val = btree_get_prevl(head, &key)) + +#define BTREE_TYPE_SUFFIX 32 +#define BTREE_TYPE_BITS 32 +#define BTREE_TYPE_GEO &btree_geo32 +#define BTREE_KEYTYPE u32 +#include + +#define btree_for_each_safe32(head, key, val) \ + for (val = btree_last32(head, &key); \ + val; \ + val = btree_get_prev32(head, &key)) + +extern struct btree_geo btree_geo64; +#define BTREE_TYPE_SUFFIX 64 +#define BTREE_TYPE_BITS 64 +#define BTREE_TYPE_GEO &btree_geo64 +#define BTREE_KEYTYPE u64 +#include + +#define btree_for_each_safe64(head, key, val) \ + for (val = btree_last64(head, &key); \ + val; \ + val = btree_get_prev64(head, &key)) + +#endif diff --git a/include/linux/btrfs.h b/include/linux/btrfs.h new file mode 100644 index 0000000..9a37a45 --- /dev/null +++ b/include/linux/btrfs.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BTRFS_H +#define _LINUX_BTRFS_H + +#include + +#endif /* _LINUX_BTRFS_H */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h new file mode 100644 index 0000000..15b765a --- /dev/null +++ b/include/linux/buffer_head.h @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/buffer_head.h + * + * Everything to do with buffer_heads. + */ + +#ifndef _LINUX_BUFFER_HEAD_H +#define _LINUX_BUFFER_HEAD_H + +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_BLOCK + +enum bh_state_bits { + BH_Uptodate, /* Contains valid data */ + BH_Dirty, /* Is dirty */ + BH_Lock, /* Is locked */ + BH_Req, /* Has been submitted for I/O */ + + BH_Mapped, /* Has a disk mapping */ + BH_New, /* Disk mapping was newly created by get_block */ + BH_Async_Read, /* Is under end_buffer_async_read I/O */ + BH_Async_Write, /* Is under end_buffer_async_write I/O */ + BH_Delay, /* Buffer is not yet allocated on disk */ + BH_Boundary, /* Block is followed by a discontiguity */ + BH_Write_EIO, /* I/O error on write */ + BH_Unwritten, /* Buffer is allocated on disk but not written */ + BH_Quiet, /* Buffer Error Prinks to be quiet */ + BH_Meta, /* Buffer contains metadata */ + BH_Prio, /* Buffer should be submitted with REQ_PRIO */ + BH_Defer_Completion, /* Defer AIO completion to workqueue */ + + BH_PrivateStart,/* not a state bit, but the first bit available + * for private allocation by other entities + */ +}; + +#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512) + +struct page; +struct buffer_head; +struct address_space; +typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); + +/* + * Historically, a buffer_head was used to map a single block + * within a page, and of course as the unit of I/O through the + * filesystem and block layers. Nowadays the basic I/O unit + * is the bio, and buffer_heads are used for extracting block + * mappings (via a get_block_t call), for tracking state within + * a page (via a page_mapping) and for wrapping bio submission + * for backward compatibility reasons (e.g. submit_bh). + */ +struct buffer_head { + unsigned long b_state; /* buffer state bitmap (see above) */ + struct buffer_head *b_this_page;/* circular list of page's buffers */ + struct page *b_page; /* the page this bh is mapped to */ + + sector_t b_blocknr; /* start block number */ + size_t b_size; /* size of mapping */ + char *b_data; /* pointer to data within the page */ + + struct block_device *b_bdev; + bh_end_io_t *b_end_io; /* I/O completion */ + void *b_private; /* reserved for b_end_io */ + struct list_head b_assoc_buffers; /* associated with another mapping */ + struct address_space *b_assoc_map; /* mapping this buffer is + associated with */ + atomic_t b_count; /* users using this buffer_head */ + spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to + * serialise IO completion of other + * buffers in the page */ +}; + +/* + * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() + * and buffer_foo() functions. + * To avoid reset buffer flags that are already set, because that causes + * a costly cache line transition, check the flag first. + */ +#define BUFFER_FNS(bit, name) \ +static __always_inline void set_buffer_##name(struct buffer_head *bh) \ +{ \ + if (!test_bit(BH_##bit, &(bh)->b_state)) \ + set_bit(BH_##bit, &(bh)->b_state); \ +} \ +static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ +{ \ + clear_bit(BH_##bit, &(bh)->b_state); \ +} \ +static __always_inline int buffer_##name(const struct buffer_head *bh) \ +{ \ + return test_bit(BH_##bit, &(bh)->b_state); \ +} + +/* + * test_set_buffer_foo() and test_clear_buffer_foo() + */ +#define TAS_BUFFER_FNS(bit, name) \ +static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \ +{ \ + return test_and_set_bit(BH_##bit, &(bh)->b_state); \ +} \ +static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \ +{ \ + return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ +} \ + +/* + * Emit the buffer bitops functions. Note that there are also functions + * of the form "mark_buffer_foo()". These are higher-level functions which + * do something in addition to setting a b_state bit. + */ +BUFFER_FNS(Uptodate, uptodate) +BUFFER_FNS(Dirty, dirty) +TAS_BUFFER_FNS(Dirty, dirty) +BUFFER_FNS(Lock, locked) +BUFFER_FNS(Req, req) +TAS_BUFFER_FNS(Req, req) +BUFFER_FNS(Mapped, mapped) +BUFFER_FNS(New, new) +BUFFER_FNS(Async_Read, async_read) +BUFFER_FNS(Async_Write, async_write) +BUFFER_FNS(Delay, delay) +BUFFER_FNS(Boundary, boundary) +BUFFER_FNS(Write_EIO, write_io_error) +BUFFER_FNS(Unwritten, unwritten) +BUFFER_FNS(Meta, meta) +BUFFER_FNS(Prio, prio) +BUFFER_FNS(Defer_Completion, defer_completion) + +#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) + +/* If we *know* page->private refers to buffer_heads */ +#define page_buffers(page) \ + ({ \ + BUG_ON(!PagePrivate(page)); \ + ((struct buffer_head *)page_private(page)); \ + }) +#define page_has_buffers(page) PagePrivate(page) + +void buffer_check_dirty_writeback(struct page *page, + bool *dirty, bool *writeback); + +/* + * Declarations + */ + +void mark_buffer_dirty(struct buffer_head *bh); +void mark_buffer_write_io_error(struct buffer_head *bh); +void touch_buffer(struct buffer_head *bh); +void set_bh_page(struct buffer_head *bh, + struct page *page, unsigned long offset); +int try_to_free_buffers(struct page *); +struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, + bool retry); +void create_empty_buffers(struct page *, unsigned long, + unsigned long b_state); +void end_buffer_read_sync(struct buffer_head *bh, int uptodate); +void end_buffer_write_sync(struct buffer_head *bh, int uptodate); +void end_buffer_async_write(struct buffer_head *bh, int uptodate); + +/* Things to do with buffers at mapping->private_list */ +void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); +int inode_has_buffers(struct inode *); +void invalidate_inode_buffers(struct inode *); +int remove_inode_buffers(struct inode *inode); +int sync_mapping_buffers(struct address_space *mapping); +void clean_bdev_aliases(struct block_device *bdev, sector_t block, + sector_t len); +static inline void clean_bdev_bh_alias(struct buffer_head *bh) +{ + clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1); +} + +void mark_buffer_async_write(struct buffer_head *bh); +void __wait_on_buffer(struct buffer_head *); +wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); +struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, + unsigned size); +struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, + unsigned size, gfp_t gfp); +void __brelse(struct buffer_head *); +void __bforget(struct buffer_head *); +void __breadahead(struct block_device *, sector_t block, unsigned int size); +void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, + gfp_t gfp); +struct buffer_head *__bread_gfp(struct block_device *, + sector_t block, unsigned size, gfp_t gfp); +void invalidate_bh_lrus(void); +struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); +void free_buffer_head(struct buffer_head * bh); +void unlock_buffer(struct buffer_head *bh); +void __lock_buffer(struct buffer_head *bh); +void ll_rw_block(int, int, int, struct buffer_head * bh[]); +int sync_dirty_buffer(struct buffer_head *bh); +int __sync_dirty_buffer(struct buffer_head *bh, int op_flags); +void write_dirty_buffer(struct buffer_head *bh, int op_flags); +int submit_bh(int, int, struct buffer_head *); +void write_boundary_block(struct block_device *bdev, + sector_t bblock, unsigned blocksize); +int bh_uptodate_or_lock(struct buffer_head *bh); +int bh_submit_read(struct buffer_head *bh); + +extern int buffer_heads_over_limit; + +/* + * Generic address_space_operations implementations for buffer_head-backed + * address_spaces. + */ +void block_invalidatepage(struct page *page, unsigned int offset, + unsigned int length); +int block_write_full_page(struct page *page, get_block_t *get_block, + struct writeback_control *wbc); +int __block_write_full_page(struct inode *inode, struct page *page, + get_block_t *get_block, struct writeback_control *wbc, + bh_end_io_t *handler); +int block_read_full_page(struct page*, get_block_t*); +int block_is_partially_uptodate(struct page *page, unsigned long from, + unsigned long count); +int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, + unsigned flags, struct page **pagep, get_block_t *get_block); +int __block_write_begin(struct page *page, loff_t pos, unsigned len, + get_block_t *get_block); +int block_write_end(struct file *, struct address_space *, + loff_t, unsigned, unsigned, + struct page *, void *); +int generic_write_end(struct file *, struct address_space *, + loff_t, unsigned, unsigned, + struct page *, void *); +void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); +void clean_page_buffers(struct page *page); +int cont_write_begin(struct file *, struct address_space *, loff_t, + unsigned, unsigned, struct page **, void **, + get_block_t *, loff_t *); +int generic_cont_expand_simple(struct inode *inode, loff_t size); +int block_commit_write(struct page *page, unsigned from, unsigned to); +int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, + get_block_t get_block); +/* Convert errno to return value from ->page_mkwrite() call */ +static inline vm_fault_t block_page_mkwrite_return(int err) +{ + if (err == 0) + return VM_FAULT_LOCKED; + if (err == -EFAULT || err == -EAGAIN) + return VM_FAULT_NOPAGE; + if (err == -ENOMEM) + return VM_FAULT_OOM; + /* -ENOSPC, -EDQUOT, -EIO ... */ + return VM_FAULT_SIGBUS; +} +sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); +int block_truncate_page(struct address_space *, loff_t, get_block_t *); +int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, + struct page **, void **, get_block_t*); +int nobh_write_end(struct file *, struct address_space *, + loff_t, unsigned, unsigned, + struct page *, void *); +int nobh_truncate_page(struct address_space *, loff_t, get_block_t *); +int nobh_writepage(struct page *page, get_block_t *get_block, + struct writeback_control *wbc); + +void buffer_init(void); + +/* + * inline definitions + */ + +static inline void attach_page_buffers(struct page *page, + struct buffer_head *head) +{ + get_page(page); + SetPagePrivate(page); + set_page_private(page, (unsigned long)head); +} + +static inline void get_bh(struct buffer_head *bh) +{ + atomic_inc(&bh->b_count); +} + +static inline void put_bh(struct buffer_head *bh) +{ + smp_mb__before_atomic(); + atomic_dec(&bh->b_count); +} + +static inline void brelse(struct buffer_head *bh) +{ + if (bh) + __brelse(bh); +} + +static inline void bforget(struct buffer_head *bh) +{ + if (bh) + __bforget(bh); +} + +static inline struct buffer_head * +sb_bread(struct super_block *sb, sector_t block) +{ + return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); +} + +static inline struct buffer_head * +sb_bread_unmovable(struct super_block *sb, sector_t block) +{ + return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0); +} + +static inline void +sb_breadahead(struct super_block *sb, sector_t block) +{ + __breadahead(sb->s_bdev, block, sb->s_blocksize); +} + +static inline void +sb_breadahead_unmovable(struct super_block *sb, sector_t block) +{ + __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0); +} + +static inline struct buffer_head * +sb_getblk(struct super_block *sb, sector_t block) +{ + return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); +} + + +static inline struct buffer_head * +sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp) +{ + return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp); +} + +static inline struct buffer_head * +sb_find_get_block(struct super_block *sb, sector_t block) +{ + return __find_get_block(sb->s_bdev, block, sb->s_blocksize); +} + +static inline void +map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) +{ + set_buffer_mapped(bh); + bh->b_bdev = sb->s_bdev; + bh->b_blocknr = block; + bh->b_size = sb->s_blocksize; +} + +static inline void wait_on_buffer(struct buffer_head *bh) +{ + might_sleep(); + if (buffer_locked(bh)) + __wait_on_buffer(bh); +} + +static inline int trylock_buffer(struct buffer_head *bh) +{ + return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state)); +} + +static inline void lock_buffer(struct buffer_head *bh) +{ + might_sleep(); + if (!trylock_buffer(bh)) + __lock_buffer(bh); +} + +static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, + sector_t block, + unsigned size) +{ + return __getblk_gfp(bdev, block, size, 0); +} + +static inline struct buffer_head *__getblk(struct block_device *bdev, + sector_t block, + unsigned size) +{ + return __getblk_gfp(bdev, block, size, __GFP_MOVABLE); +} + +/** + * __bread() - reads a specified block and returns the bh + * @bdev: the block_device to read from + * @block: number of block + * @size: size (in bytes) to read + * + * Reads a specified block, and returns buffer head that contains it. + * The page cache is allocated from movable area so that it can be migrated. + * It returns NULL if the block was unreadable. + */ +static inline struct buffer_head * +__bread(struct block_device *bdev, sector_t block, unsigned size) +{ + return __bread_gfp(bdev, block, size, __GFP_MOVABLE); +} + +extern int __set_page_dirty_buffers(struct page *page); + +#else /* CONFIG_BLOCK */ + +static inline void buffer_init(void) {} +static inline int try_to_free_buffers(struct page *page) { return 1; } +static inline int inode_has_buffers(struct inode *inode) { return 0; } +static inline void invalidate_inode_buffers(struct inode *inode) {} +static inline int remove_inode_buffers(struct inode *inode) { return 1; } +static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } + +#endif /* CONFIG_BLOCK */ +#endif /* _LINUX_BUFFER_HEAD_H */ diff --git a/include/linux/bug.h b/include/linux/bug.h new file mode 100644 index 0000000..f639bd0 --- /dev/null +++ b/include/linux/bug.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BUG_H +#define _LINUX_BUG_H + +#include +#include +#include + +enum bug_trap_type { + BUG_TRAP_TYPE_NONE = 0, + BUG_TRAP_TYPE_WARN = 1, + BUG_TRAP_TYPE_BUG = 2, +}; + +struct pt_regs; + +#ifdef __CHECKER__ +#define MAYBE_BUILD_BUG_ON(cond) (0) +#else /* __CHECKER__ */ + +#define MAYBE_BUILD_BUG_ON(cond) \ + do { \ + if (__builtin_constant_p((cond))) \ + BUILD_BUG_ON(cond); \ + else \ + BUG_ON(cond); \ + } while (0) + +#endif /* __CHECKER__ */ + +#ifdef CONFIG_GENERIC_BUG +#include + +static inline int is_warning_bug(const struct bug_entry *bug) +{ + return bug->flags & BUGFLAG_WARNING; +} + +struct bug_entry *find_bug(unsigned long bugaddr); + +enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); + +/* These are defined by the architecture */ +int is_valid_bugaddr(unsigned long addr); + +void generic_bug_clear_once(void); + +#else /* !CONFIG_GENERIC_BUG */ + +static inline void *find_bug(unsigned long bugaddr) +{ + return NULL; +} + +static inline enum bug_trap_type report_bug(unsigned long bug_addr, + struct pt_regs *regs) +{ + return BUG_TRAP_TYPE_BUG; +} + + +static inline void generic_bug_clear_once(void) {} + +#endif /* CONFIG_GENERIC_BUG */ + +/* + * Since detected data corruption should stop operation on the affected + * structures. Return value must be checked and sanely acted on by caller. + */ +static inline __must_check bool check_data_corruption(bool v) { return v; } +#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ + check_data_corruption(({ \ + bool corruption = unlikely(condition); \ + if (corruption) { \ + if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ + pr_err(fmt, ##__VA_ARGS__); \ + BUG(); \ + } else \ + WARN(1, fmt, ##__VA_ARGS__); \ + } \ + corruption; \ + })) + +#endif /* _LINUX_BUG_H */ diff --git a/include/linux/build-salt.h b/include/linux/build-salt.h new file mode 100644 index 0000000..bb007bd --- /dev/null +++ b/include/linux/build-salt.h @@ -0,0 +1,20 @@ +#ifndef __BUILD_SALT_H +#define __BUILD_SALT_H + +#include + +#define LINUX_ELFNOTE_BUILD_SALT 0x100 + +#ifdef __ASSEMBLER__ + +#define BUILD_SALT \ + ELFNOTE(Linux, LINUX_ELFNOTE_BUILD_SALT, .asciz CONFIG_BUILD_SALT) + +#else + +#define BUILD_SALT \ + ELFNOTE32("Linux", LINUX_ELFNOTE_BUILD_SALT, CONFIG_BUILD_SALT) + +#endif + +#endif /* __BUILD_SALT_H */ diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h new file mode 100644 index 0000000..0fe5426 --- /dev/null +++ b/include/linux/build_bug.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BUILD_BUG_H +#define _LINUX_BUILD_BUG_H + +#include + +#ifdef __CHECKER__ +#define BUILD_BUG_ON_ZERO(e) (0) +#else /* __CHECKER__ */ +/* + * Force a compilation error if condition is true, but also produce a + * result (of value 0 and type size_t), so the expression can be used + * e.g. in a structure initializer (or where-ever else comma expressions + * aren't permitted). + */ +#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); })) +#endif /* __CHECKER__ */ + +/* Force a compilation error if a constant expression is not a power of 2 */ +#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON(((n) & ((n) - 1)) != 0) +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) + +/* + * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the + * expression but avoids the generation of any code, even if that expression + * has side-effects. + */ +#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e)))) + +/** + * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied + * error message. + * @condition: the condition which the compiler should know is false. + * + * See BUILD_BUG_ON for description. + */ +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) + +/** + * BUILD_BUG_ON - break compile if a condition is true. + * @condition: the condition which the compiler should know is false. + * + * If you have some code which relies on certain constants being equal, or + * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to + * detect if someone changes it. + */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) + +/** + * BUILD_BUG - break compile if used. + * + * If you have some code that you expect the compiler to eliminate at + * build time, you should use BUILD_BUG to detect if it is + * unexpectedly used. + */ +#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") + +/** + * static_assert - check integer constant expression at build time + * + * static_assert() is a wrapper for the C11 _Static_assert, with a + * little macro magic to make the message optional (defaulting to the + * stringification of the tested expression). + * + * Contrary to BUILD_BUG_ON(), static_assert() can be used at global + * scope, but requires the expression to be an integer constant + * expression (i.e., it is not enough that __builtin_constant_p() is + * true for expr). + * + * Also note that BUILD_BUG_ON() fails the build if the condition is + * true, while static_assert() fails the build if the expression is + * false. + */ +#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr) +#define __static_assert(expr, msg, ...) _Static_assert(expr, msg) + +#endif /* _LINUX_BUILD_BUG_H */ diff --git a/include/linux/bvec.h b/include/linux/bvec.h new file mode 100644 index 0000000..a032f01 --- /dev/null +++ b/include/linux/bvec.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * bvec iterator + * + * Copyright (C) 2001 Ming Lei + */ +#ifndef __LINUX_BVEC_ITER_H +#define __LINUX_BVEC_ITER_H + +#include +#include +#include +#include + +/* + * was unsigned short, but we might as well be ready for > 64kB I/O pages + */ +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct bvec_iter { + sector_t bi_sector; /* device address in 512 byte + sectors */ + unsigned int bi_size; /* residual I/O count */ + + unsigned int bi_idx; /* current index into bvl_vec */ + + unsigned int bi_bvec_done; /* number of bytes completed in + current bvec */ +}; + +struct bvec_iter_all { + struct bio_vec bv; + int idx; + unsigned done; +}; + +/* + * various member access, note that bio_data should of course not be used + * on highmem page vectors + */ +#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) + +/* multi-page (mp_bvec) helpers */ +#define mp_bvec_iter_page(bvec, iter) \ + (__bvec_iter_bvec((bvec), (iter))->bv_page) + +#define mp_bvec_iter_len(bvec, iter) \ + min((iter).bi_size, \ + __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) + +#define mp_bvec_iter_offset(bvec, iter) \ + (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) + +#define mp_bvec_iter_page_idx(bvec, iter) \ + (mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE) + +#define mp_bvec_iter_bvec(bvec, iter) \ +((struct bio_vec) { \ + .bv_page = mp_bvec_iter_page((bvec), (iter)), \ + .bv_len = mp_bvec_iter_len((bvec), (iter)), \ + .bv_offset = mp_bvec_iter_offset((bvec), (iter)), \ +}) + +/* For building single-page bvec in flight */ + #define bvec_iter_offset(bvec, iter) \ + (mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE) + +#define bvec_iter_len(bvec, iter) \ + min_t(unsigned, mp_bvec_iter_len((bvec), (iter)), \ + PAGE_SIZE - bvec_iter_offset((bvec), (iter))) + +#define bvec_iter_page(bvec, iter) \ + (mp_bvec_iter_page((bvec), (iter)) + \ + mp_bvec_iter_page_idx((bvec), (iter))) + +#define bvec_iter_bvec(bvec, iter) \ +((struct bio_vec) { \ + .bv_page = bvec_iter_page((bvec), (iter)), \ + .bv_len = bvec_iter_len((bvec), (iter)), \ + .bv_offset = bvec_iter_offset((bvec), (iter)), \ +}) + +static inline bool bvec_iter_advance(const struct bio_vec *bv, + struct bvec_iter *iter, unsigned bytes) +{ + if (WARN_ONCE(bytes > iter->bi_size, + "Attempted to advance past end of bvec iter\n")) { + iter->bi_size = 0; + return false; + } + + while (bytes) { + const struct bio_vec *cur = bv + iter->bi_idx; + unsigned len = min3(bytes, iter->bi_size, + cur->bv_len - iter->bi_bvec_done); + + bytes -= len; + iter->bi_size -= len; + iter->bi_bvec_done += len; + + if (iter->bi_bvec_done == cur->bv_len) { + iter->bi_bvec_done = 0; + iter->bi_idx++; + } + } + return true; +} + +#define for_each_bvec(bvl, bio_vec, iter, start) \ + for (iter = (start); \ + (iter).bi_size && \ + ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ + bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) + +/* for iterating one bio from start to end */ +#define BVEC_ITER_ALL_INIT (struct bvec_iter) \ +{ \ + .bi_sector = 0, \ + .bi_size = UINT_MAX, \ + .bi_idx = 0, \ + .bi_bvec_done = 0, \ +} + +static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all) +{ + iter_all->done = 0; + iter_all->idx = 0; + + return &iter_all->bv; +} + +static inline void bvec_advance(const struct bio_vec *bvec, + struct bvec_iter_all *iter_all) +{ + struct bio_vec *bv = &iter_all->bv; + + if (iter_all->done) { + bv->bv_page++; + bv->bv_offset = 0; + } else { + bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT); + bv->bv_offset = bvec->bv_offset & ~PAGE_MASK; + } + bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, + bvec->bv_len - iter_all->done); + iter_all->done += bv->bv_len; + + if (iter_all->done == bvec->bv_len) { + iter_all->idx++; + iter_all->done = 0; + } +} + +/* + * Get the last single-page segment from the multi-page bvec and store it + * in @seg + */ +static inline void mp_bvec_last_segment(const struct bio_vec *bvec, + struct bio_vec *seg) +{ + unsigned total = bvec->bv_offset + bvec->bv_len; + unsigned last_page = (total - 1) / PAGE_SIZE; + + seg->bv_page = bvec->bv_page + last_page; + + /* the whole segment is inside the last page */ + if (bvec->bv_offset >= last_page * PAGE_SIZE) { + seg->bv_offset = bvec->bv_offset % PAGE_SIZE; + seg->bv_len = bvec->bv_len; + } else { + seg->bv_offset = 0; + seg->bv_len = total - last_page * PAGE_SIZE; + } +} + +#endif /* __LINUX_BVEC_ITER_H */ diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h new file mode 100644 index 0000000..d64a524 --- /dev/null +++ b/include/linux/byteorder/big_endian.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H +#define _LINUX_BYTEORDER_BIG_ENDIAN_H + +#include + +#ifndef CONFIG_CPU_BIG_ENDIAN +#warning inconsistent configuration, needs CONFIG_CPU_BIG_ENDIAN +#endif + +#include +#endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */ diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h new file mode 100644 index 0000000..4b13e0a --- /dev/null +++ b/include/linux/byteorder/generic.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BYTEORDER_GENERIC_H +#define _LINUX_BYTEORDER_GENERIC_H + +/* + * linux/byteorder/generic.h + * Generic Byte-reordering support + * + * The "... p" macros, like le64_to_cpup, can be used with pointers + * to unaligned data, but there will be a performance penalty on + * some architectures. Use get_unaligned for unaligned data. + * + * Francois-Rene Rideau 19970707 + * gathered all the good ideas from all asm-foo/byteorder.h into one file, + * cleaned them up. + * I hope it is compliant with non-GCC compilers. + * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h, + * because I wasn't sure it would be ok to put it in types.h + * Upgraded it to 2.1.43 + * Francois-Rene Rideau 19971012 + * Upgraded it to 2.1.57 + * to please Linus T., replaced huge #ifdef's between little/big endian + * by nestedly #include'd files. + * Francois-Rene Rideau 19971205 + * Made it to 2.1.71; now a facelift: + * Put files under include/linux/byteorder/ + * Split swab from generic support. + * + * TODO: + * = Regular kernel maintainers could also replace all these manual + * byteswap macros that remain, disseminated among drivers, + * after some grep or the sources... + * = Linus might want to rename all these macros and files to fit his taste, + * to fit his personal naming scheme. + * = it seems that a few drivers would also appreciate + * nybble swapping support... + * = every architecture could add their byteswap macro in asm/byteorder.h + * see how some architectures already do (i386, alpha, ppc, etc) + * = cpu_to_beXX and beXX_to_cpu might some day need to be well + * distinguished throughout the kernel. This is not the case currently, + * since little endian, big endian, and pdp endian machines needn't it. + * But this might be the case for, say, a port of Linux to 20/21 bit + * architectures (and F21 Linux addict around?). + */ + +/* + * The following macros are to be defined by : + * + * Conversion of long and short int between network and host format + * ntohl(__u32 x) + * ntohs(__u16 x) + * htonl(__u32 x) + * htons(__u16 x) + * It seems that some programs (which? where? or perhaps a standard? POSIX?) + * might like the above to be functions, not macros (why?). + * if that's true, then detect them, and take measures. + * Anyway, the measure is: define only ___ntohl as a macro instead, + * and in a separate file, have + * unsigned long inline ntohl(x){return ___ntohl(x);} + * + * The same for constant arguments + * __constant_ntohl(__u32 x) + * __constant_ntohs(__u16 x) + * __constant_htonl(__u32 x) + * __constant_htons(__u16 x) + * + * Conversion of XX-bit integers (16- 32- or 64-) + * between native CPU format and little/big endian format + * 64-bit stuff only defined for proper architectures + * cpu_to_[bl]eXX(__uXX x) + * [bl]eXX_to_cpu(__uXX x) + * + * The same, but takes a pointer to the value to convert + * cpu_to_[bl]eXXp(__uXX x) + * [bl]eXX_to_cpup(__uXX x) + * + * The same, but change in situ + * cpu_to_[bl]eXXs(__uXX x) + * [bl]eXX_to_cpus(__uXX x) + * + * See asm-foo/byteorder.h for examples of how to provide + * architecture-optimized versions + * + */ + +#define cpu_to_le64 __cpu_to_le64 +#define le64_to_cpu __le64_to_cpu +#define cpu_to_le32 __cpu_to_le32 +#define le32_to_cpu __le32_to_cpu +#define cpu_to_le16 __cpu_to_le16 +#define le16_to_cpu __le16_to_cpu +#define cpu_to_be64 __cpu_to_be64 +#define be64_to_cpu __be64_to_cpu +#define cpu_to_be32 __cpu_to_be32 +#define be32_to_cpu __be32_to_cpu +#define cpu_to_be16 __cpu_to_be16 +#define be16_to_cpu __be16_to_cpu +#define cpu_to_le64p __cpu_to_le64p +#define le64_to_cpup __le64_to_cpup +#define cpu_to_le32p __cpu_to_le32p +#define le32_to_cpup __le32_to_cpup +#define cpu_to_le16p __cpu_to_le16p +#define le16_to_cpup __le16_to_cpup +#define cpu_to_be64p __cpu_to_be64p +#define be64_to_cpup __be64_to_cpup +#define cpu_to_be32p __cpu_to_be32p +#define be32_to_cpup __be32_to_cpup +#define cpu_to_be16p __cpu_to_be16p +#define be16_to_cpup __be16_to_cpup +#define cpu_to_le64s __cpu_to_le64s +#define le64_to_cpus __le64_to_cpus +#define cpu_to_le32s __cpu_to_le32s +#define le32_to_cpus __le32_to_cpus +#define cpu_to_le16s __cpu_to_le16s +#define le16_to_cpus __le16_to_cpus +#define cpu_to_be64s __cpu_to_be64s +#define be64_to_cpus __be64_to_cpus +#define cpu_to_be32s __cpu_to_be32s +#define be32_to_cpus __be32_to_cpus +#define cpu_to_be16s __cpu_to_be16s +#define be16_to_cpus __be16_to_cpus + +/* + * They have to be macros in order to do the constant folding + * correctly - if the argument passed into a inline function + * it is no longer constant according to gcc.. + */ + +#undef ntohl +#undef ntohs +#undef htonl +#undef htons + +#define ___htonl(x) __cpu_to_be32(x) +#define ___htons(x) __cpu_to_be16(x) +#define ___ntohl(x) __be32_to_cpu(x) +#define ___ntohs(x) __be16_to_cpu(x) + +#define htonl(x) ___htonl(x) +#define ntohl(x) ___ntohl(x) +#define htons(x) ___htons(x) +#define ntohs(x) ___ntohs(x) + +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpu(*var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpu(*var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpu(*var) + val); +} + +/* XXX: this stuff can be optimized */ +static inline void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + __le32_to_cpus(buf); + buf++; + } +} + +static inline void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + __cpu_to_le32s(buf); + buf++; + } +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + *var = cpu_to_be16(be16_to_cpu(*var) + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + *var = cpu_to_be32(be32_to_cpu(*var) + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + *var = cpu_to_be64(be64_to_cpu(*var) + val); +} + +static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + dst[i] = cpu_to_be32(src[i]); +} + +static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + dst[i] = be32_to_cpu(src[i]); +} + +#endif /* _LINUX_BYTEORDER_GENERIC_H */ diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h new file mode 100644 index 0000000..1ec650f --- /dev/null +++ b/include/linux/byteorder/little_endian.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H +#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H + +#include + +#ifdef CONFIG_CPU_BIG_ENDIAN +#warning inconsistent configuration, CONFIG_CPU_BIG_ENDIAN is set +#endif + +#include +#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */ diff --git a/include/linux/c2port.h b/include/linux/c2port.h new file mode 100644 index 0000000..4e93bc6 --- /dev/null +++ b/include/linux/c2port.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Silicon Labs C2 port Linux support + * + * Copyright (c) 2007 Rodolfo Giometti + * Copyright (c) 2007 Eurotech S.p.A. + */ + +#define C2PORT_NAME_LEN 32 + +struct device; + +/* + * C2 port basic structs + */ + +/* Main struct */ +struct c2port_ops; +struct c2port_device { + unsigned int access:1; + unsigned int flash_access:1; + + int id; + char name[C2PORT_NAME_LEN]; + struct c2port_ops *ops; + struct mutex mutex; /* prevent races during read/write */ + + struct device *dev; + + void *private_data; +}; + +/* Basic operations */ +struct c2port_ops { + /* Flash layout */ + unsigned short block_size; /* flash block size in bytes */ + unsigned short blocks_num; /* flash blocks number */ + + /* Enable or disable the access to C2 port */ + void (*access)(struct c2port_device *dev, int status); + + /* Set C2D data line as input/output */ + void (*c2d_dir)(struct c2port_device *dev, int dir); + + /* Read/write C2D data line */ + int (*c2d_get)(struct c2port_device *dev); + void (*c2d_set)(struct c2port_device *dev, int status); + + /* Write C2CK clock line */ + void (*c2ck_set)(struct c2port_device *dev, int status); +}; + +/* + * Exported functions + */ + +extern struct c2port_device *c2port_device_register(char *name, + struct c2port_ops *ops, void *devdata); +extern void c2port_device_unregister(struct c2port_device *dev); diff --git a/include/linux/cache.h b/include/linux/cache.h new file mode 100644 index 0000000..750621e --- /dev/null +++ b/include/linux/cache.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_CACHE_H +#define __LINUX_CACHE_H + +#include +#include + +#ifndef L1_CACHE_ALIGN +#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES) +#endif + +#ifndef SMP_CACHE_BYTES +#define SMP_CACHE_BYTES L1_CACHE_BYTES +#endif + +/* + * __read_mostly is used to keep rarely changing variables out of frequently + * updated cachelines. If an architecture doesn't support it, ignore the + * hint. + */ +#ifndef __read_mostly +#define __read_mostly +#endif + +/* + * __ro_after_init is used to mark things that are read-only after init (i.e. + * after mark_rodata_ro() has been called). These are effectively read-only, + * but may get written to during init, so can't live in .rodata (via "const"). + */ +#ifndef __ro_after_init +#define __ro_after_init __attribute__((__section__(".data..ro_after_init"))) +#endif + +#ifndef ____cacheline_aligned +#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#ifndef __cacheline_aligned +#define __cacheline_aligned \ + __attribute__((__aligned__(SMP_CACHE_BYTES), \ + __section__(".data..cacheline_aligned"))) +#endif /* __cacheline_aligned */ + +#ifndef __cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define __cacheline_aligned_in_smp __cacheline_aligned +#else +#define __cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +/* + * The maximum alignment needed for some critical structures + * These could be inter-node cacheline sizes/L3 cacheline + * size etc. Define this in asm/cache.h for your arch + */ +#ifndef INTERNODE_CACHE_SHIFT +#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT +#endif + +#if !defined(____cacheline_internodealigned_in_smp) +#if defined(CONFIG_SMP) +#define ____cacheline_internodealigned_in_smp \ + __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) +#else +#define ____cacheline_internodealigned_in_smp +#endif +#endif + +#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE +#define cache_line_size() L1_CACHE_BYTES +#endif + +#endif /* __LINUX_CACHE_H */ diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h new file mode 100644 index 0000000..46b92cd --- /dev/null +++ b/include/linux/cacheinfo.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CACHEINFO_H +#define _LINUX_CACHEINFO_H + +#include +#include +#include + +struct device_node; +struct attribute; + +enum cache_type { + CACHE_TYPE_NOCACHE = 0, + CACHE_TYPE_INST = BIT(0), + CACHE_TYPE_DATA = BIT(1), + CACHE_TYPE_SEPARATE = CACHE_TYPE_INST | CACHE_TYPE_DATA, + CACHE_TYPE_UNIFIED = BIT(2), +}; + +extern unsigned int coherency_max_size; + +/** + * struct cacheinfo - represent a cache leaf node + * @id: This cache's id. It is unique among caches with the same (type, level). + * @type: type of the cache - data, inst or unified + * @level: represents the hierarchy in the multi-level cache + * @coherency_line_size: size of each cache line usually representing + * the minimum amount of data that gets transferred from memory + * @number_of_sets: total number of sets, a set is a collection of cache + * lines sharing the same index + * @ways_of_associativity: number of ways in which a particular memory + * block can be placed in the cache + * @physical_line_partition: number of physical cache lines sharing the + * same cachetag + * @size: Total size of the cache + * @shared_cpu_map: logical cpumask representing all the cpus sharing + * this cache node + * @attributes: bitfield representing various cache attributes + * @fw_token: Unique value used to determine if different cacheinfo + * structures represent a single hardware cache instance. + * @disable_sysfs: indicates whether this node is visible to the user via + * sysfs or not + * @priv: pointer to any private data structure specific to particular + * cache design + * + * While @of_node, @disable_sysfs and @priv are used for internal book + * keeping, the remaining members form the core properties of the cache + */ +struct cacheinfo { + unsigned int id; + enum cache_type type; + unsigned int level; + unsigned int coherency_line_size; + unsigned int number_of_sets; + unsigned int ways_of_associativity; + unsigned int physical_line_partition; + unsigned int size; + cpumask_t shared_cpu_map; + unsigned int attributes; +#define CACHE_WRITE_THROUGH BIT(0) +#define CACHE_WRITE_BACK BIT(1) +#define CACHE_WRITE_POLICY_MASK \ + (CACHE_WRITE_THROUGH | CACHE_WRITE_BACK) +#define CACHE_READ_ALLOCATE BIT(2) +#define CACHE_WRITE_ALLOCATE BIT(3) +#define CACHE_ALLOCATE_POLICY_MASK \ + (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE) +#define CACHE_ID BIT(4) + void *fw_token; + bool disable_sysfs; + void *priv; +}; + +struct cpu_cacheinfo { + struct cacheinfo *info_list; + unsigned int num_levels; + unsigned int num_leaves; + bool cpu_map_populated; +}; + +/* + * Helpers to make sure "func" is executed on the cpu whose cache + * attributes are being detected + */ +#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \ +static inline void _##func(void *ret) \ +{ \ + int cpu = smp_processor_id(); \ + *(int *)ret = __##func(cpu); \ +} \ + \ +int func(unsigned int cpu) \ +{ \ + int ret; \ + smp_call_function_single(cpu, _##func, &ret, true); \ + return ret; \ +} + +struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); +int init_cache_level(unsigned int cpu); +int populate_cache_leaves(unsigned int cpu); +int cache_setup_acpi(unsigned int cpu); +#ifndef CONFIG_ACPI_PPTT +/* + * acpi_find_last_cache_level is only called on ACPI enabled + * platforms using the PPTT for topology. This means that if + * the platform supports other firmware configuration methods + * we need to stub out the call when ACPI is disabled. + * ACPI enabled platforms not using PPTT won't be making calls + * to this function so we need not worry about them. + */ +static inline int acpi_find_last_cache_level(unsigned int cpu) +{ + return 0; +} +#else +int acpi_find_last_cache_level(unsigned int cpu); +#endif + +const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); + +#endif /* _LINUX_CACHEINFO_H */ diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h new file mode 100644 index 0000000..2f5d731 --- /dev/null +++ b/include/linux/can/can-ml.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * Copyright (c) 2017 Pengutronix, Marc Kleine-Budde + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Volkswagen nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * Alternatively, provided that this notice is retained in full, this + * software may be distributed under the terms of the GNU General + * Public License ("GPL") version 2, in which case the provisions of the + * GPL apply INSTEAD OF those given above. + * + * The provided data structures and external interfaces from this code + * are not restricted to be used by modules with a GPL compatible license. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +#ifndef CAN_ML_H +#define CAN_ML_H + +#include +#include + +#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) +#define CAN_EFF_RCV_HASH_BITS 10 +#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS) + +enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX }; + +struct can_dev_rcv_lists { + struct hlist_head rx[RX_MAX]; + struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ]; + struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ]; + int entries; +}; + +struct can_ml_priv { + struct can_dev_rcv_lists dev_rcv_lists; +#ifdef CAN_J1939 + struct j1939_priv *j1939_priv; +#endif +}; + +#endif /* CAN_ML_H */ diff --git a/include/linux/can/core.h b/include/linux/can/core.h new file mode 100644 index 0000000..e20a0cd --- /dev/null +++ b/include/linux/can/core.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* + * linux/can/core.h + * + * Protoypes and definitions for CAN protocol modules using the PF_CAN core + * + * Authors: Oliver Hartkopp + * Urs Thuermann + * Copyright (c) 2002-2017 Volkswagen Group Electronic Research + * All rights reserved. + * + */ + +#ifndef _CAN_CORE_H +#define _CAN_CORE_H + +#include +#include +#include + +#define CAN_VERSION "20170425" + +/* increment this number each time you change some user-space interface */ +#define CAN_ABI_VERSION "9" + +#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION + +#define DNAME(dev) ((dev) ? (dev)->name : "any") + +/** + * struct can_proto - CAN protocol structure + * @type: type argument in socket() syscall, e.g. SOCK_DGRAM. + * @protocol: protocol number in socket() syscall. + * @ops: pointer to struct proto_ops for sock->ops. + * @prot: pointer to struct proto structure. + */ +struct can_proto { + int type; + int protocol; + const struct proto_ops *ops; + struct proto *prot; +}; + +/* required_size + * macro to find the minimum size of a struct + * that includes a requested member + */ +#define CAN_REQUIRED_SIZE(struct_type, member) \ + (offsetof(typeof(struct_type), member) + \ + sizeof(((typeof(struct_type) *)(NULL))->member)) + +/* function prototypes for the CAN networklayer core (af_can.c) */ + +extern int can_proto_register(const struct can_proto *cp); +extern void can_proto_unregister(const struct can_proto *cp); + +int can_rx_register(struct net *net, struct net_device *dev, + canid_t can_id, canid_t mask, + void (*func)(struct sk_buff *, void *), + void *data, char *ident, struct sock *sk); + +extern void can_rx_unregister(struct net *net, struct net_device *dev, + canid_t can_id, canid_t mask, + void (*func)(struct sk_buff *, void *), + void *data); + +extern int can_send(struct sk_buff *skb, int loop); +void can_sock_destruct(struct sock *sk); + +#endif /* !_CAN_CORE_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h new file mode 100644 index 0000000..5e3d455 --- /dev/null +++ b/include/linux/can/dev.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/can/dev.h + * + * Definitions for the CAN network device driver interface + * + * Copyright (C) 2006 Andrey Volkov + * Varma Electronics Oy + * + * Copyright (C) 2008 Wolfgang Grandegger + * + */ + +#ifndef _CAN_DEV_H +#define _CAN_DEV_H + +#include +#include +#include +#include +#include +#include + +/* + * CAN mode + */ +enum can_mode { + CAN_MODE_STOP = 0, + CAN_MODE_START, + CAN_MODE_SLEEP +}; + +/* + * CAN common private data + */ +struct can_priv { + struct net_device *dev; + struct can_device_stats can_stats; + + struct can_bittiming bittiming, data_bittiming; + const struct can_bittiming_const *bittiming_const, + *data_bittiming_const; + const u16 *termination_const; + unsigned int termination_const_cnt; + u16 termination; + const u32 *bitrate_const; + unsigned int bitrate_const_cnt; + const u32 *data_bitrate_const; + unsigned int data_bitrate_const_cnt; + u32 bitrate_max; + struct can_clock clock; + + enum can_state state; + + /* CAN controller features - see include/uapi/linux/can/netlink.h */ + u32 ctrlmode; /* current options setting */ + u32 ctrlmode_supported; /* options that can be modified by netlink */ + u32 ctrlmode_static; /* static enabled options for driver/hardware */ + + int restart_ms; + struct delayed_work restart_work; + + int (*do_set_bittiming)(struct net_device *dev); + int (*do_set_data_bittiming)(struct net_device *dev); + int (*do_set_mode)(struct net_device *dev, enum can_mode mode); + int (*do_set_termination)(struct net_device *dev, u16 term); + int (*do_get_state)(const struct net_device *dev, + enum can_state *state); + int (*do_get_berr_counter)(const struct net_device *dev, + struct can_berr_counter *bec); + + unsigned int echo_skb_max; + struct sk_buff **echo_skb; + +#ifdef CONFIG_CAN_LEDS + struct led_trigger *tx_led_trig; + char tx_led_trig_name[CAN_LED_NAME_SZ]; + struct led_trigger *rx_led_trig; + char rx_led_trig_name[CAN_LED_NAME_SZ]; + struct led_trigger *rxtx_led_trig; + char rxtx_led_trig_name[CAN_LED_NAME_SZ]; +#endif +}; + +/* + * get_can_dlc(value) - helper macro to cast a given data length code (dlc) + * to __u8 and ensure the dlc value to be max. 8 bytes. + * + * To be used in the CAN netdriver receive path to ensure conformance with + * ISO 11898-1 Chapter 8.4.2.3 (DLC field) + */ +#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC)) +#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC)) + +/* Check for outgoing skbs that have not been created by the CAN subsystem */ +static inline bool can_skb_headroom_valid(struct net_device *dev, + struct sk_buff *skb) +{ + /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */ + if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv))) + return false; + + /* af_packet does not apply CAN skb specific settings */ + if (skb->ip_summed == CHECKSUM_NONE) { + /* init headroom */ + can_skb_prv(skb)->ifindex = dev->ifindex; + can_skb_prv(skb)->skbcnt = 0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* preform proper loopback on capable devices */ + if (dev->flags & IFF_ECHO) + skb->pkt_type = PACKET_LOOPBACK; + else + skb->pkt_type = PACKET_HOST; + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + } + + return true; +} + +/* Drop a given socketbuffer if it does not contain a valid CAN frame. */ +static inline bool can_dropped_invalid_skb(struct net_device *dev, + struct sk_buff *skb) +{ + const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + + if (skb->protocol == htons(ETH_P_CAN)) { + if (unlikely(skb->len != CAN_MTU || + cfd->len > CAN_MAX_DLEN)) + goto inval_skb; + } else if (skb->protocol == htons(ETH_P_CANFD)) { + if (unlikely(skb->len != CANFD_MTU || + cfd->len > CANFD_MAX_DLEN)) + goto inval_skb; + } else + goto inval_skb; + + if (!can_skb_headroom_valid(dev, skb)) + goto inval_skb; + + return false; + +inval_skb: + kfree_skb(skb); + dev->stats.tx_dropped++; + return true; +} + +static inline bool can_is_canfd_skb(const struct sk_buff *skb) +{ + /* the CAN specific type of skb is identified by its data length */ + return skb->len == CANFD_MTU; +} + +/* helper to define static CAN controller features at device creation time */ +static inline void can_set_static_ctrlmode(struct net_device *dev, + u32 static_mode) +{ + struct can_priv *priv = netdev_priv(dev); + + /* alloc_candev() succeeded => netdev_priv() is valid at this point */ + priv->ctrlmode = static_mode; + priv->ctrlmode_static = static_mode; + + /* override MTU which was set by default in can_setup()? */ + if (static_mode & CAN_CTRLMODE_FD) + dev->mtu = CANFD_MTU; +} + +/* get data length from can_dlc with sanitized can_dlc */ +u8 can_dlc2len(u8 can_dlc); + +/* map the sanitized data length to an appropriate data length code */ +u8 can_len2dlc(u8 len); + +struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, + unsigned int txqs, unsigned int rxqs); +#define alloc_candev(sizeof_priv, echo_skb_max) \ + alloc_candev_mqs(sizeof_priv, echo_skb_max, 1, 1) +#define alloc_candev_mq(sizeof_priv, echo_skb_max, count) \ + alloc_candev_mqs(sizeof_priv, echo_skb_max, count, count) +void free_candev(struct net_device *dev); + +/* a candev safe wrapper around netdev_priv */ +struct can_priv *safe_candev_priv(struct net_device *dev); + +int open_candev(struct net_device *dev); +void close_candev(struct net_device *dev); +int can_change_mtu(struct net_device *dev, int new_mtu); + +int register_candev(struct net_device *dev); +void unregister_candev(struct net_device *dev); + +int can_restart_now(struct net_device *dev); +void can_bus_off(struct net_device *dev); + +void can_change_state(struct net_device *dev, struct can_frame *cf, + enum can_state tx_state, enum can_state rx_state); + +void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, + unsigned int idx); +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, + u8 *len_ptr); +unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); +void can_free_echo_skb(struct net_device *dev, unsigned int idx); + +#ifdef CONFIG_OF +void of_can_transceiver(struct net_device *dev); +#else +static inline void of_can_transceiver(struct net_device *dev) { } +#endif + +struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); +struct sk_buff *alloc_canfd_skb(struct net_device *dev, + struct canfd_frame **cfd); +struct sk_buff *alloc_can_err_skb(struct net_device *dev, + struct can_frame **cf); + +#endif /* !_CAN_DEV_H */ diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h new file mode 100644 index 0000000..511a373 --- /dev/null +++ b/include/linux/can/dev/peak_canfd.h @@ -0,0 +1,300 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CAN driver for PEAK System micro-CAN based adapters + * + * Copyright (C) 2003-2011 PEAK System-Technik GmbH + * Copyright (C) 2011-2013 Stephane Grosjean + */ +#ifndef PUCAN_H +#define PUCAN_H + +/* uCAN commands opcodes list (low-order 10 bits) */ +#define PUCAN_CMD_NOP 0x000 +#define PUCAN_CMD_RESET_MODE 0x001 +#define PUCAN_CMD_NORMAL_MODE 0x002 +#define PUCAN_CMD_LISTEN_ONLY_MODE 0x003 +#define PUCAN_CMD_TIMING_SLOW 0x004 +#define PUCAN_CMD_TIMING_FAST 0x005 +#define PUCAN_CMD_SET_STD_FILTER 0x006 +#define PUCAN_CMD_RESERVED2 0x007 +#define PUCAN_CMD_FILTER_STD 0x008 +#define PUCAN_CMD_TX_ABORT 0x009 +#define PUCAN_CMD_WR_ERR_CNT 0x00a +#define PUCAN_CMD_SET_EN_OPTION 0x00b +#define PUCAN_CMD_CLR_DIS_OPTION 0x00c +#define PUCAN_CMD_RX_BARRIER 0x010 +#define PUCAN_CMD_END_OF_COLLECTION 0x3ff + +/* uCAN received messages list */ +#define PUCAN_MSG_CAN_RX 0x0001 +#define PUCAN_MSG_ERROR 0x0002 +#define PUCAN_MSG_STATUS 0x0003 +#define PUCAN_MSG_BUSLOAD 0x0004 + +#define PUCAN_MSG_CACHE_CRITICAL 0x0102 + +/* uCAN transmitted messages */ +#define PUCAN_MSG_CAN_TX 0x1000 + +/* uCAN command common header */ +struct __packed pucan_command { + __le16 opcode_channel; + u16 args[3]; +}; + +/* return the opcode from the opcode_channel field of a command */ +static inline u16 pucan_cmd_get_opcode(struct pucan_command *c) +{ + return le16_to_cpu(c->opcode_channel) & 0x3ff; +} + +#define PUCAN_TSLOW_BRP_BITS 10 +#define PUCAN_TSLOW_TSGEG1_BITS 8 +#define PUCAN_TSLOW_TSGEG2_BITS 7 +#define PUCAN_TSLOW_SJW_BITS 7 + +#define PUCAN_TSLOW_BRP_MASK ((1 << PUCAN_TSLOW_BRP_BITS) - 1) +#define PUCAN_TSLOW_TSEG1_MASK ((1 << PUCAN_TSLOW_TSGEG1_BITS) - 1) +#define PUCAN_TSLOW_TSEG2_MASK ((1 << PUCAN_TSLOW_TSGEG2_BITS) - 1) +#define PUCAN_TSLOW_SJW_MASK ((1 << PUCAN_TSLOW_SJW_BITS) - 1) + +/* uCAN TIMING_SLOW command fields */ +#define PUCAN_TSLOW_SJW_T(s, t) (((s) & PUCAN_TSLOW_SJW_MASK) | \ + ((!!(t)) << 7)) +#define PUCAN_TSLOW_TSEG2(t) ((t) & PUCAN_TSLOW_TSEG2_MASK) +#define PUCAN_TSLOW_TSEG1(t) ((t) & PUCAN_TSLOW_TSEG1_MASK) +#define PUCAN_TSLOW_BRP(b) ((b) & PUCAN_TSLOW_BRP_MASK) + +struct __packed pucan_timing_slow { + __le16 opcode_channel; + + u8 ewl; /* Error Warning limit */ + u8 sjw_t; /* Sync Jump Width + Triple sampling */ + u8 tseg2; /* Timing SEGment 2 */ + u8 tseg1; /* Timing SEGment 1 */ + + __le16 brp; /* BaudRate Prescaler */ +}; + +#define PUCAN_TFAST_BRP_BITS 10 +#define PUCAN_TFAST_TSGEG1_BITS 5 +#define PUCAN_TFAST_TSGEG2_BITS 4 +#define PUCAN_TFAST_SJW_BITS 4 + +#define PUCAN_TFAST_BRP_MASK ((1 << PUCAN_TFAST_BRP_BITS) - 1) +#define PUCAN_TFAST_TSEG1_MASK ((1 << PUCAN_TFAST_TSGEG1_BITS) - 1) +#define PUCAN_TFAST_TSEG2_MASK ((1 << PUCAN_TFAST_TSGEG2_BITS) - 1) +#define PUCAN_TFAST_SJW_MASK ((1 << PUCAN_TFAST_SJW_BITS) - 1) + +/* uCAN TIMING_FAST command fields */ +#define PUCAN_TFAST_SJW(s) ((s) & PUCAN_TFAST_SJW_MASK) +#define PUCAN_TFAST_TSEG2(t) ((t) & PUCAN_TFAST_TSEG2_MASK) +#define PUCAN_TFAST_TSEG1(t) ((t) & PUCAN_TFAST_TSEG1_MASK) +#define PUCAN_TFAST_BRP(b) ((b) & PUCAN_TFAST_BRP_MASK) + +struct __packed pucan_timing_fast { + __le16 opcode_channel; + + u8 unused; + u8 sjw; /* Sync Jump Width */ + u8 tseg2; /* Timing SEGment 2 */ + u8 tseg1; /* Timing SEGment 1 */ + + __le16 brp; /* BaudRate Prescaler */ +}; + +/* uCAN FILTER_STD command fields */ +#define PUCAN_FLTSTD_ROW_IDX_BITS 6 + +struct __packed pucan_filter_std { + __le16 opcode_channel; + + __le16 idx; + __le32 mask; /* CAN-ID bitmask in idx range */ +}; + +#define PUCAN_FLTSTD_ROW_IDX_MAX ((1 << PUCAN_FLTSTD_ROW_IDX_BITS) - 1) + +/* uCAN SET_STD_FILTER command fields */ +struct __packed pucan_std_filter { + __le16 opcode_channel; + + u8 unused; + u8 idx; + __le32 mask; /* CAN-ID bitmask in idx range */ +}; + +/* uCAN TX_ABORT commands fields */ +#define PUCAN_TX_ABORT_FLUSH 0x0001 + +struct __packed pucan_tx_abort { + __le16 opcode_channel; + + __le16 flags; + u32 unused; +}; + +/* uCAN WR_ERR_CNT command fields */ +#define PUCAN_WRERRCNT_TE 0x4000 /* Tx error cntr write Enable */ +#define PUCAN_WRERRCNT_RE 0x8000 /* Rx error cntr write Enable */ + +struct __packed pucan_wr_err_cnt { + __le16 opcode_channel; + + __le16 sel_mask; + u8 tx_counter; /* Tx error counter new value */ + u8 rx_counter; /* Rx error counter new value */ + + u16 unused; +}; + +/* uCAN SET_EN/CLR_DIS _OPTION command fields */ +#define PUCAN_OPTION_ERROR 0x0001 +#define PUCAN_OPTION_BUSLOAD 0x0002 +#define PUCAN_OPTION_CANDFDISO 0x0004 + +struct __packed pucan_options { + __le16 opcode_channel; + + __le16 options; + u32 unused; +}; + +/* uCAN received messages global format */ +struct __packed pucan_msg { + __le16 size; + __le16 type; + __le32 ts_low; + __le32 ts_high; +}; + +/* uCAN flags for CAN/CANFD messages */ +#define PUCAN_MSG_SELF_RECEIVE 0x80 +#define PUCAN_MSG_ERROR_STATE_IND 0x40 /* error state indicator */ +#define PUCAN_MSG_BITRATE_SWITCH 0x20 /* bitrate switch */ +#define PUCAN_MSG_EXT_DATA_LEN 0x10 /* extended data length */ +#define PUCAN_MSG_SINGLE_SHOT 0x08 +#define PUCAN_MSG_LOOPED_BACK 0x04 +#define PUCAN_MSG_EXT_ID 0x02 +#define PUCAN_MSG_RTR 0x01 + +struct __packed pucan_rx_msg { + __le16 size; + __le16 type; + __le32 ts_low; + __le32 ts_high; + __le32 tag_low; + __le32 tag_high; + u8 channel_dlc; + u8 client; + __le16 flags; + __le32 can_id; + u8 d[0]; +}; + +/* uCAN error types */ +#define PUCAN_ERMSG_BIT_ERROR 0 +#define PUCAN_ERMSG_FORM_ERROR 1 +#define PUCAN_ERMSG_STUFF_ERROR 2 +#define PUCAN_ERMSG_OTHER_ERROR 3 +#define PUCAN_ERMSG_ERR_CNT_DEC 4 + +struct __packed pucan_error_msg { + __le16 size; + __le16 type; + __le32 ts_low; + __le32 ts_high; + u8 channel_type_d; + u8 code_g; + u8 tx_err_cnt; + u8 rx_err_cnt; +}; + +static inline int pucan_error_get_channel(const struct pucan_error_msg *msg) +{ + return msg->channel_type_d & 0x0f; +} + +#define PUCAN_RX_BARRIER 0x10 +#define PUCAN_BUS_PASSIVE 0x20 +#define PUCAN_BUS_WARNING 0x40 +#define PUCAN_BUS_BUSOFF 0x80 + +struct __packed pucan_status_msg { + __le16 size; + __le16 type; + __le32 ts_low; + __le32 ts_high; + u8 channel_p_w_b; + u8 unused[3]; +}; + +static inline int pucan_status_get_channel(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & 0x0f; +} + +static inline int pucan_status_is_rx_barrier(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & PUCAN_RX_BARRIER; +} + +static inline int pucan_status_is_passive(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & PUCAN_BUS_PASSIVE; +} + +static inline int pucan_status_is_warning(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & PUCAN_BUS_WARNING; +} + +static inline int pucan_status_is_busoff(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & PUCAN_BUS_BUSOFF; +} + +/* uCAN transmitted message format */ +#define PUCAN_MSG_CHANNEL_DLC(c, d) (((c) & 0xf) | ((d) << 4)) + +struct __packed pucan_tx_msg { + __le16 size; + __le16 type; + __le32 tag_low; + __le32 tag_high; + u8 channel_dlc; + u8 client; + __le16 flags; + __le32 can_id; + u8 d[0]; +}; + +/* build the cmd opcode_channel field with respect to the correct endianness */ +static inline __le16 pucan_cmd_opcode_channel(int index, int opcode) +{ + return cpu_to_le16(((index) << 12) | ((opcode) & 0x3ff)); +} + +/* return the channel number part from any received message channel_dlc field */ +static inline int pucan_msg_get_channel(const struct pucan_rx_msg *msg) +{ + return msg->channel_dlc & 0xf; +} + +/* return the dlc value from any received message channel_dlc field */ +static inline int pucan_msg_get_dlc(const struct pucan_rx_msg *msg) +{ + return msg->channel_dlc >> 4; +} + +static inline int pucan_ermsg_get_channel(const struct pucan_error_msg *msg) +{ + return msg->channel_type_d & 0x0f; +} + +static inline int pucan_stmsg_get_channel(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & 0x0f; +} + +#endif diff --git a/include/linux/can/led.h b/include/linux/can/led.h new file mode 100644 index 0000000..7c3cfd7 --- /dev/null +++ b/include/linux/can/led.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2012, Fabio Baltieri + */ + +#ifndef _CAN_LED_H +#define _CAN_LED_H + +#include +#include +#include + +enum can_led_event { + CAN_LED_EVENT_OPEN, + CAN_LED_EVENT_STOP, + CAN_LED_EVENT_TX, + CAN_LED_EVENT_RX, +}; + +#ifdef CONFIG_CAN_LEDS + +/* keep space for interface name + "-tx"/"-rx"/"-rxtx" + * suffix and null terminator + */ +#define CAN_LED_NAME_SZ (IFNAMSIZ + 6) + +void can_led_event(struct net_device *netdev, enum can_led_event event); +void devm_can_led_init(struct net_device *netdev); +int __init can_led_notifier_init(void); +void __exit can_led_notifier_exit(void); + +#else + +static inline void can_led_event(struct net_device *netdev, + enum can_led_event event) +{ +} +static inline void devm_can_led_init(struct net_device *netdev) +{ +} +static inline int can_led_notifier_init(void) +{ + return 0; +} +static inline void can_led_notifier_exit(void) +{ +} + +#endif + +#endif /* !_CAN_LED_H */ diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h new file mode 100644 index 0000000..9587d68 --- /dev/null +++ b/include/linux/can/platform/cc770.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CAN_PLATFORM_CC770_H +#define _CAN_PLATFORM_CC770_H + +/* CPU Interface Register (0x02) */ +#define CPUIF_CEN 0x01 /* Clock Out Enable */ +#define CPUIF_MUX 0x04 /* Multiplex */ +#define CPUIF_SLP 0x08 /* Sleep */ +#define CPUIF_PWD 0x10 /* Power Down Mode */ +#define CPUIF_DMC 0x20 /* Divide Memory Clock */ +#define CPUIF_DSC 0x40 /* Divide System Clock */ +#define CPUIF_RST 0x80 /* Hardware Reset Status */ + +/* Clock Out Register (0x1f) */ +#define CLKOUT_CD_MASK 0x0f /* Clock Divider mask */ +#define CLKOUT_SL_MASK 0x30 /* Slew Rate mask */ +#define CLKOUT_SL_SHIFT 4 + +/* Bus Configuration Register (0x2f) */ +#define BUSCFG_DR0 0x01 /* Disconnect RX0 Input / Select RX input */ +#define BUSCFG_DR1 0x02 /* Disconnect RX1 Input / Silent mode */ +#define BUSCFG_DT1 0x08 /* Disconnect TX1 Output */ +#define BUSCFG_POL 0x20 /* Polarity dominant or recessive */ +#define BUSCFG_CBY 0x40 /* Input Comparator Bypass */ + +struct cc770_platform_data { + u32 osc_freq; /* CAN bus oscillator frequency in Hz */ + + u8 cir; /* CPU Interface Register */ + u8 cor; /* Clock Out Register */ + u8 bcr; /* Bus Configuration Register */ +}; + +#endif /* !_CAN_PLATFORM_CC770_H */ diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h new file mode 100644 index 0000000..9e5ac27 --- /dev/null +++ b/include/linux/can/platform/mcp251x.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CAN_PLATFORM_MCP251X_H +#define _CAN_PLATFORM_MCP251X_H + +/* + * + * CAN bus driver for Microchip 251x CAN Controller with SPI Interface + * + */ + +#include + +/* + * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data + * @oscillator_frequency: - oscillator frequency in Hz + */ + +struct mcp251x_platform_data { + unsigned long oscillator_frequency; +}; + +#endif /* !_CAN_PLATFORM_MCP251X_H */ diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h new file mode 100644 index 0000000..5755ae5 --- /dev/null +++ b/include/linux/can/platform/sja1000.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CAN_PLATFORM_SJA1000_H +#define _CAN_PLATFORM_SJA1000_H + +/* clock divider register */ +#define CDR_CLKOUT_MASK 0x07 +#define CDR_CLK_OFF 0x08 /* Clock off (CLKOUT pin) */ +#define CDR_RXINPEN 0x20 /* TX1 output is RX irq output */ +#define CDR_CBP 0x40 /* CAN input comparator bypass */ +#define CDR_PELICAN 0x80 /* PeliCAN mode */ + +/* output control register */ +#define OCR_MODE_BIPHASE 0x00 +#define OCR_MODE_TEST 0x01 +#define OCR_MODE_NORMAL 0x02 +#define OCR_MODE_CLOCK 0x03 +#define OCR_MODE_MASK 0x07 +#define OCR_TX0_INVERT 0x04 +#define OCR_TX0_PULLDOWN 0x08 +#define OCR_TX0_PULLUP 0x10 +#define OCR_TX0_PUSHPULL 0x18 +#define OCR_TX1_INVERT 0x20 +#define OCR_TX1_PULLDOWN 0x40 +#define OCR_TX1_PULLUP 0x80 +#define OCR_TX1_PUSHPULL 0xc0 +#define OCR_TX_MASK 0xfc +#define OCR_TX_SHIFT 2 + +struct sja1000_platform_data { + u32 osc_freq; /* CAN bus oscillator frequency in Hz */ + + u8 ocr; /* output control register */ + u8 cdr; /* clock divider register */ +}; + +#endif /* !_CAN_PLATFORM_SJA1000_H */ diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h new file mode 100644 index 0000000..01219f2 --- /dev/null +++ b/include/linux/can/rx-offload.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/can/rx-offload.h + * + * Copyright (c) 2014 David Jander, Protonic Holland + * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde + */ + +#ifndef _CAN_RX_OFFLOAD_H +#define _CAN_RX_OFFLOAD_H + +#include +#include + +struct can_rx_offload { + struct net_device *dev; + + unsigned int (*mailbox_read)(struct can_rx_offload *offload, + struct can_frame *cf, + u32 *timestamp, unsigned int mb); + + struct sk_buff_head skb_queue; + u32 skb_queue_len_max; + + unsigned int mb_first; + unsigned int mb_last; + + struct napi_struct napi; + + bool inc; +}; + +int can_rx_offload_add_timestamp(struct net_device *dev, + struct can_rx_offload *offload); +int can_rx_offload_add_fifo(struct net_device *dev, + struct can_rx_offload *offload, + unsigned int weight); +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, + u64 reg); +int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); +int can_rx_offload_queue_sorted(struct can_rx_offload *offload, + struct sk_buff *skb, u32 timestamp); +unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, + unsigned int idx, u32 timestamp); +int can_rx_offload_queue_tail(struct can_rx_offload *offload, + struct sk_buff *skb); +void can_rx_offload_reset(struct can_rx_offload *offload); +void can_rx_offload_del(struct can_rx_offload *offload); +void can_rx_offload_enable(struct can_rx_offload *offload); + +static inline void can_rx_offload_schedule(struct can_rx_offload *offload) +{ + napi_schedule(&offload->napi); +} + +static inline void can_rx_offload_disable(struct can_rx_offload *offload) +{ + napi_disable(&offload->napi); +} + +#endif /* !_CAN_RX_OFFLOAD_H */ diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h new file mode 100644 index 0000000..a954def --- /dev/null +++ b/include/linux/can/skb.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* + * linux/can/skb.h + * + * Definitions for the CAN network socket buffer + * + * Copyright (C) 2012 Oliver Hartkopp + * + */ + +#ifndef _CAN_SKB_H +#define _CAN_SKB_H + +#include +#include +#include +#include + +/* + * The struct can_skb_priv is used to transport additional information along + * with the stored struct can(fd)_frame that can not be contained in existing + * struct sk_buff elements. + * N.B. that this information must not be modified in cloned CAN sk_buffs. + * To modify the CAN frame content or the struct can_skb_priv content + * skb_copy() needs to be used instead of skb_clone(). + */ + +/** + * struct can_skb_priv - private additional data inside CAN sk_buffs + * @ifindex: ifindex of the first interface the CAN frame appeared on + * @skbcnt: atomic counter to have an unique id together with skb pointer + * @cf: align to the following CAN frame at skb->data + */ +struct can_skb_priv { + int ifindex; + int skbcnt; + struct can_frame cf[0]; +}; + +static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb) +{ + return (struct can_skb_priv *)(skb->head); +} + +static inline void can_skb_reserve(struct sk_buff *skb) +{ + skb_reserve(skb, sizeof(struct can_skb_priv)); +} + +static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) +{ + if (sk) { + sock_hold(sk); + skb->destructor = sock_efree; + skb->sk = sk; + } +} + +/* + * returns an unshared skb owned by the original sock to be echo'ed back + */ +static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) +{ + if (skb_shared(skb)) { + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); + + if (likely(nskb)) { + can_skb_set_owner(nskb, skb->sk); + consume_skb(skb); + return nskb; + } else { + kfree_skb(skb); + return NULL; + } + } + + /* we can assume to have an unshared skb with proper owner */ + return skb; +} + +#endif /* !_CAN_SKB_H */ diff --git a/include/linux/capability.h b/include/linux/capability.h new file mode 100644 index 0000000..ecce0f4 --- /dev/null +++ b/include/linux/capability.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is + * + * Andrew G. Morgan + * Alexander Kjeldaas + * with help from Aleph1, Roland Buresund and Andrew Main. + * + * See here for the libcap library ("POSIX draft" compliance): + * + * ftp://www.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/ + */ +#ifndef _LINUX_CAPABILITY_H +#define _LINUX_CAPABILITY_H + +#include +#include + +#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 +#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 + +extern int file_caps_enabled; + +typedef struct kernel_cap_struct { + __u32 cap[_KERNEL_CAPABILITY_U32S]; +} kernel_cap_t; + +/* same as vfs_ns_cap_data but in cpu endian and always filled completely */ +struct cpu_vfs_cap_data { + __u32 magic_etc; + kernel_cap_t permitted; + kernel_cap_t inheritable; + kuid_t rootid; +}; + +#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct)) +#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t)) + + +struct file; +struct inode; +struct dentry; +struct task_struct; +struct user_namespace; + +extern const kernel_cap_t __cap_empty_set; +extern const kernel_cap_t __cap_init_eff_set; + +/* + * Internal kernel functions only + */ + +#define CAP_FOR_EACH_U32(__capi) \ + for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi) + +/* + * CAP_FS_MASK and CAP_NFSD_MASKS: + * + * The fs mask is all the privileges that fsuid==0 historically meant. + * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE. + * + * It has never meant setting security.* and trusted.* xattrs. + * + * We could also define fsmask as follows: + * 1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions + * 2. The security.* and trusted.* xattrs are fs-related MAC permissions + */ + +# define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \ + | CAP_TO_MASK(CAP_MKNOD) \ + | CAP_TO_MASK(CAP_DAC_OVERRIDE) \ + | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \ + | CAP_TO_MASK(CAP_FOWNER) \ + | CAP_TO_MASK(CAP_FSETID)) + +# define CAP_FS_MASK_B1 (CAP_TO_MASK(CAP_MAC_OVERRIDE)) + +#if _KERNEL_CAPABILITY_U32S != 2 +# error Fix up hand-coded capability macro initializers +#else /* HAND-CODED capability initializers */ + +#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1) +#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1) + +# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }}) +# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }}) +# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ + | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \ + CAP_FS_MASK_B1 } }) +# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ + | CAP_TO_MASK(CAP_SYS_RESOURCE), \ + CAP_FS_MASK_B1 } }) + +#endif /* _KERNEL_CAPABILITY_U32S != 2 */ + +# define cap_clear(c) do { (c) = __cap_empty_set; } while (0) + +#define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag)) +#define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag)) +#define cap_raised(c, flag) ((c).cap[CAP_TO_INDEX(flag)] & CAP_TO_MASK(flag)) + +#define CAP_BOP_ALL(c, a, b, OP) \ +do { \ + unsigned __capi; \ + CAP_FOR_EACH_U32(__capi) { \ + c.cap[__capi] = a.cap[__capi] OP b.cap[__capi]; \ + } \ +} while (0) + +#define CAP_UOP_ALL(c, a, OP) \ +do { \ + unsigned __capi; \ + CAP_FOR_EACH_U32(__capi) { \ + c.cap[__capi] = OP a.cap[__capi]; \ + } \ +} while (0) + +static inline kernel_cap_t cap_combine(const kernel_cap_t a, + const kernel_cap_t b) +{ + kernel_cap_t dest; + CAP_BOP_ALL(dest, a, b, |); + return dest; +} + +static inline kernel_cap_t cap_intersect(const kernel_cap_t a, + const kernel_cap_t b) +{ + kernel_cap_t dest; + CAP_BOP_ALL(dest, a, b, &); + return dest; +} + +static inline kernel_cap_t cap_drop(const kernel_cap_t a, + const kernel_cap_t drop) +{ + kernel_cap_t dest; + CAP_BOP_ALL(dest, a, drop, &~); + return dest; +} + +static inline kernel_cap_t cap_invert(const kernel_cap_t c) +{ + kernel_cap_t dest; + CAP_UOP_ALL(dest, c, ~); + return dest; +} + +static inline bool cap_isclear(const kernel_cap_t a) +{ + unsigned __capi; + CAP_FOR_EACH_U32(__capi) { + if (a.cap[__capi] != 0) + return false; + } + return true; +} + +/* + * Check if "a" is a subset of "set". + * return true if ALL of the capabilities in "a" are also in "set" + * cap_issubset(0101, 1111) will return true + * return false if ANY of the capabilities in "a" are not in "set" + * cap_issubset(1111, 0101) will return false + */ +static inline bool cap_issubset(const kernel_cap_t a, const kernel_cap_t set) +{ + kernel_cap_t dest; + dest = cap_drop(a, set); + return cap_isclear(dest); +} + +/* Used to decide between falling back on the old suser() or fsuser(). */ + +static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a) +{ + const kernel_cap_t __cap_fs_set = CAP_FS_SET; + return cap_drop(a, __cap_fs_set); +} + +static inline kernel_cap_t cap_raise_fs_set(const kernel_cap_t a, + const kernel_cap_t permitted) +{ + const kernel_cap_t __cap_fs_set = CAP_FS_SET; + return cap_combine(a, + cap_intersect(permitted, __cap_fs_set)); +} + +static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a) +{ + const kernel_cap_t __cap_fs_set = CAP_NFSD_SET; + return cap_drop(a, __cap_fs_set); +} + +static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a, + const kernel_cap_t permitted) +{ + const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET; + return cap_combine(a, + cap_intersect(permitted, __cap_nfsd_set)); +} + +#ifdef CONFIG_MULTIUSER +extern bool has_capability(struct task_struct *t, int cap); +extern bool has_ns_capability(struct task_struct *t, + struct user_namespace *ns, int cap); +extern bool has_capability_noaudit(struct task_struct *t, int cap); +extern bool has_ns_capability_noaudit(struct task_struct *t, + struct user_namespace *ns, int cap); +extern bool capable(int cap); +extern bool ns_capable(struct user_namespace *ns, int cap); +extern bool ns_capable_noaudit(struct user_namespace *ns, int cap); +extern bool ns_capable_setid(struct user_namespace *ns, int cap); +#else +static inline bool has_capability(struct task_struct *t, int cap) +{ + return true; +} +static inline bool has_ns_capability(struct task_struct *t, + struct user_namespace *ns, int cap) +{ + return true; +} +static inline bool has_capability_noaudit(struct task_struct *t, int cap) +{ + return true; +} +static inline bool has_ns_capability_noaudit(struct task_struct *t, + struct user_namespace *ns, int cap) +{ + return true; +} +static inline bool capable(int cap) +{ + return true; +} +static inline bool ns_capable(struct user_namespace *ns, int cap) +{ + return true; +} +static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap) +{ + return true; +} +static inline bool ns_capable_setid(struct user_namespace *ns, int cap) +{ + return true; +} +#endif /* CONFIG_MULTIUSER */ +extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode); +extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); +extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); +extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns); + +/* audit system wants to get cap info from files as well */ +extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); + +extern int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size); + +#endif /* !_LINUX_CAPABILITY_H */ diff --git a/include/linux/cb710.h b/include/linux/cb710.h new file mode 100644 index 0000000..60de3fe --- /dev/null +++ b/include/linux/cb710.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cb710/cb710.h + * + * Copyright by Michał Mirosław, 2008-2009 + */ +#ifndef LINUX_CB710_DRIVER_H +#define LINUX_CB710_DRIVER_H + +#include +#include +#include +#include +#include +#include + +struct cb710_slot; + +typedef int (*cb710_irq_handler_t)(struct cb710_slot *); + +/* per-virtual-slot structure */ +struct cb710_slot { + struct platform_device pdev; + void __iomem *iobase; + cb710_irq_handler_t irq_handler; +}; + +/* per-device structure */ +struct cb710_chip { + struct pci_dev *pdev; + void __iomem *iobase; + unsigned platform_id; +#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS + atomic_t slot_refs_count; +#endif + unsigned slot_mask; + unsigned slots; + spinlock_t irq_lock; + struct cb710_slot slot[0]; +}; + +/* NOTE: cb710_chip.slots is modified only during device init/exit and + * they are all serialized wrt themselves */ + +/* cb710_chip.slot_mask values */ +#define CB710_SLOT_MMC 1 +#define CB710_SLOT_MS 2 +#define CB710_SLOT_SM 4 + +/* slot port accessors - so the logic is more clear in the code */ +#define CB710_PORT_ACCESSORS(t) \ +static inline void cb710_write_port_##t(struct cb710_slot *slot, \ + unsigned port, u##t value) \ +{ \ + iowrite##t(value, slot->iobase + port); \ +} \ + \ +static inline u##t cb710_read_port_##t(struct cb710_slot *slot, \ + unsigned port) \ +{ \ + return ioread##t(slot->iobase + port); \ +} \ + \ +static inline void cb710_modify_port_##t(struct cb710_slot *slot, \ + unsigned port, u##t set, u##t clear) \ +{ \ + iowrite##t( \ + (ioread##t(slot->iobase + port) & ~clear)|set, \ + slot->iobase + port); \ +} + +CB710_PORT_ACCESSORS(8) +CB710_PORT_ACCESSORS(16) +CB710_PORT_ACCESSORS(32) + +void cb710_pci_update_config_reg(struct pci_dev *pdev, + int reg, uint32_t and, uint32_t xor); +void cb710_set_irq_handler(struct cb710_slot *slot, + cb710_irq_handler_t handler); + +/* some device struct walking */ + +static inline struct cb710_slot *cb710_pdev_to_slot( + struct platform_device *pdev) +{ + return container_of(pdev, struct cb710_slot, pdev); +} + +static inline struct cb710_chip *cb710_slot_to_chip(struct cb710_slot *slot) +{ + return dev_get_drvdata(slot->pdev.dev.parent); +} + +static inline struct device *cb710_slot_dev(struct cb710_slot *slot) +{ + return &slot->pdev.dev; +} + +static inline struct device *cb710_chip_dev(struct cb710_chip *chip) +{ + return &chip->pdev->dev; +} + +/* debugging aids */ + +#ifdef CONFIG_CB710_DEBUG +void cb710_dump_regs(struct cb710_chip *chip, unsigned dump); +#else +#define cb710_dump_regs(c, d) do {} while (0) +#endif + +#define CB710_DUMP_REGS_MMC 0x0F +#define CB710_DUMP_REGS_MS 0x30 +#define CB710_DUMP_REGS_SM 0xC0 +#define CB710_DUMP_REGS_ALL 0xFF +#define CB710_DUMP_REGS_MASK 0xFF + +#define CB710_DUMP_ACCESS_8 0x100 +#define CB710_DUMP_ACCESS_16 0x200 +#define CB710_DUMP_ACCESS_32 0x400 +#define CB710_DUMP_ACCESS_ALL 0x700 +#define CB710_DUMP_ACCESS_MASK 0x700 + +#endif /* LINUX_CB710_DRIVER_H */ +/* + * cb710/sgbuf2.h + * + * Copyright by Michał Mirosław, 2008-2009 + */ +#ifndef LINUX_CB710_SG_H +#define LINUX_CB710_SG_H + +#include +#include + +/* + * 32-bit PIO mapping sg iterator + * + * Hides scatterlist access issues - fragment boundaries, alignment, page + * mapping - for drivers using 32-bit-word-at-a-time-PIO (ie. PCI devices + * without DMA support). + * + * Best-case reading (transfer from device): + * sg_miter_start(, SG_MITER_TO_SG); + * cb710_sg_dwiter_write_from_io(); + * sg_miter_stop(); + * + * Best-case writing (transfer to device): + * sg_miter_start(, SG_MITER_FROM_SG); + * cb710_sg_dwiter_read_to_io(); + * sg_miter_stop(); + */ + +uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter); +void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data); + +/** + * cb710_sg_dwiter_write_from_io - transfer data to mapped buffer from 32-bit IO port + * @miter: sg mapping iter + * @port: PIO port - IO or MMIO address + * @count: number of 32-bit words to transfer + * + * Description: + * Reads @count 32-bit words from register @port and stores it in + * buffer iterated by @miter. Data that would overflow the buffer + * is silently ignored. Iterator is advanced by 4*@count bytes + * or to the buffer's end whichever is closer. + * + * Context: + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + */ +static inline void cb710_sg_dwiter_write_from_io(struct sg_mapping_iter *miter, + void __iomem *port, size_t count) +{ + while (count-- > 0) + cb710_sg_dwiter_write_next_block(miter, ioread32(port)); +} + +/** + * cb710_sg_dwiter_read_to_io - transfer data to 32-bit IO port from mapped buffer + * @miter: sg mapping iter + * @port: PIO port - IO or MMIO address + * @count: number of 32-bit words to transfer + * + * Description: + * Writes @count 32-bit words to register @port from buffer iterated + * through @miter. If buffer ends before @count words are written + * missing data is replaced by zeroes. @miter is advanced by 4*@count + * bytes or to the buffer's end whichever is closer. + * + * Context: + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + */ +static inline void cb710_sg_dwiter_read_to_io(struct sg_mapping_iter *miter, + void __iomem *port, size_t count) +{ + while (count-- > 0) + iowrite32(cb710_sg_dwiter_read_next_block(miter), port); +} + +#endif /* LINUX_CB710_SG_H */ diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h new file mode 100644 index 0000000..1d52292 --- /dev/null +++ b/include/linux/cciss_ioctl.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CCISS_IOCTLH +#define CCISS_IOCTLH + +#include + +#ifdef CONFIG_COMPAT + +/* 32 bit compatible ioctl structs */ +typedef struct _IOCTL32_Command_struct { + LUNAddr_struct LUN_info; + RequestBlock_struct Request; + ErrorInfo_struct error_info; + WORD buf_size; /* size in bytes of the buf */ + __u32 buf; /* 32 bit pointer to data buffer */ +} IOCTL32_Command_struct; + +typedef struct _BIG_IOCTL32_Command_struct { + LUNAddr_struct LUN_info; + RequestBlock_struct Request; + ErrorInfo_struct error_info; + DWORD malloc_size; /* < MAX_KMALLOC_SIZE in cciss.c */ + DWORD buf_size; /* size in bytes of the buf */ + /* < malloc_size * MAXSGENTRIES */ + __u32 buf; /* 32 bit pointer to data buffer */ +} BIG_IOCTL32_Command_struct; + +#define CCISS_PASSTHRU32 _IOWR(CCISS_IOC_MAGIC, 11, IOCTL32_Command_struct) +#define CCISS_BIG_PASSTHRU32 _IOWR(CCISS_IOC_MAGIC, 18, BIG_IOCTL32_Command_struct) + +#endif /* CONFIG_COMPAT */ +#endif diff --git a/include/linux/ccp.h b/include/linux/ccp.h new file mode 100644 index 0000000..a5dfbaf --- /dev/null +++ b/include/linux/ccp.h @@ -0,0 +1,666 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * AMD Cryptographic Coprocessor (CCP) driver + * + * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky + * Author: Gary R Hook + */ + +#ifndef __CCP_H__ +#define __CCP_H__ + +#include +#include +#include +#include +#include + +struct ccp_device; +struct ccp_cmd; + +#if defined(CONFIG_CRYPTO_DEV_SP_CCP) + +/** + * ccp_present - check if a CCP device is present + * + * Returns zero if a CCP device is present, -ENODEV otherwise. + */ +int ccp_present(void); + +#define CCP_VSIZE 16 +#define CCP_VMASK ((unsigned int)((1 << CCP_VSIZE) - 1)) +#define CCP_VERSION(v, r) ((unsigned int)((v << CCP_VSIZE) \ + | (r & CCP_VMASK))) + +/** + * ccp_version - get the version of the CCP + * + * Returns a positive version number, or zero if no CCP + */ +unsigned int ccp_version(void); + +/** + * ccp_enqueue_cmd - queue an operation for processing by the CCP + * + * @cmd: ccp_cmd struct to be processed + * + * Refer to the ccp_cmd struct below for required fields. + * + * Queue a cmd to be processed by the CCP. If queueing the cmd + * would exceed the defined length of the cmd queue the cmd will + * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will + * result in a return code of -EBUSY. + * + * The callback routine specified in the ccp_cmd struct will be + * called to notify the caller of completion (if the cmd was not + * backlogged) or advancement out of the backlog. If the cmd has + * advanced out of the backlog the "err" value of the callback + * will be -EINPROGRESS. Any other "err" value during callback is + * the result of the operation. + * + * The cmd has been successfully queued if: + * the return code is -EINPROGRESS or + * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set + */ +int ccp_enqueue_cmd(struct ccp_cmd *cmd); + +#else /* CONFIG_CRYPTO_DEV_CCP_SP_DEV is not enabled */ + +static inline int ccp_present(void) +{ + return -ENODEV; +} + +static inline unsigned int ccp_version(void) +{ + return 0; +} + +static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) +{ + return -ENODEV; +} + +#endif /* CONFIG_CRYPTO_DEV_SP_CCP */ + + +/***** AES engine *****/ +/** + * ccp_aes_type - AES key size + * + * @CCP_AES_TYPE_128: 128-bit key + * @CCP_AES_TYPE_192: 192-bit key + * @CCP_AES_TYPE_256: 256-bit key + */ +enum ccp_aes_type { + CCP_AES_TYPE_128 = 0, + CCP_AES_TYPE_192, + CCP_AES_TYPE_256, + CCP_AES_TYPE__LAST, +}; + +/** + * ccp_aes_mode - AES operation mode + * + * @CCP_AES_MODE_ECB: ECB mode + * @CCP_AES_MODE_CBC: CBC mode + * @CCP_AES_MODE_OFB: OFB mode + * @CCP_AES_MODE_CFB: CFB mode + * @CCP_AES_MODE_CTR: CTR mode + * @CCP_AES_MODE_CMAC: CMAC mode + */ +enum ccp_aes_mode { + CCP_AES_MODE_ECB = 0, + CCP_AES_MODE_CBC, + CCP_AES_MODE_OFB, + CCP_AES_MODE_CFB, + CCP_AES_MODE_CTR, + CCP_AES_MODE_CMAC, + CCP_AES_MODE_GHASH, + CCP_AES_MODE_GCTR, + CCP_AES_MODE_GCM, + CCP_AES_MODE_GMAC, + CCP_AES_MODE__LAST, +}; + +/** + * ccp_aes_mode - AES operation mode + * + * @CCP_AES_ACTION_DECRYPT: AES decrypt operation + * @CCP_AES_ACTION_ENCRYPT: AES encrypt operation + */ +enum ccp_aes_action { + CCP_AES_ACTION_DECRYPT = 0, + CCP_AES_ACTION_ENCRYPT, + CCP_AES_ACTION__LAST, +}; +/* Overloaded field */ +#define CCP_AES_GHASHAAD CCP_AES_ACTION_DECRYPT +#define CCP_AES_GHASHFINAL CCP_AES_ACTION_ENCRYPT + +/** + * struct ccp_aes_engine - CCP AES operation + * @type: AES operation key size + * @mode: AES operation mode + * @action: AES operation (decrypt/encrypt) + * @key: key to be used for this AES operation + * @key_len: length in bytes of key + * @iv: IV to be used for this AES operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @cmac_final: indicates final operation when running in CMAC mode + * @cmac_key: K1/K2 key used in final CMAC operation + * @cmac_key_len: length in bytes of cmac_key + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - type, mode, action, key, key_len, src, dst, src_len + * - iv, iv_len for any mode other than ECB + * - cmac_final for CMAC mode + * - cmac_key, cmac_key_len for CMAC mode if cmac_final is non-zero + * + * The iv variable is used as both input and output. On completion of the + * AES operation the new IV overwrites the old IV. + */ +struct ccp_aes_engine { + enum ccp_aes_type type; + enum ccp_aes_mode mode; + enum ccp_aes_action action; + + u32 authsize; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ + + u32 cmac_final; /* Indicates final cmac cmd */ + struct scatterlist *cmac_key; /* K1/K2 cmac key required for + * final cmac cmd */ + u32 cmac_key_len; /* In bytes */ + + u32 aad_len; /* In bytes */ +}; + +/***** XTS-AES engine *****/ +/** + * ccp_xts_aes_unit_size - XTS unit size + * + * @CCP_XTS_AES_UNIT_SIZE_16: Unit size of 16 bytes + * @CCP_XTS_AES_UNIT_SIZE_512: Unit size of 512 bytes + * @CCP_XTS_AES_UNIT_SIZE_1024: Unit size of 1024 bytes + * @CCP_XTS_AES_UNIT_SIZE_2048: Unit size of 2048 bytes + * @CCP_XTS_AES_UNIT_SIZE_4096: Unit size of 4096 bytes + */ +enum ccp_xts_aes_unit_size { + CCP_XTS_AES_UNIT_SIZE_16 = 0, + CCP_XTS_AES_UNIT_SIZE_512, + CCP_XTS_AES_UNIT_SIZE_1024, + CCP_XTS_AES_UNIT_SIZE_2048, + CCP_XTS_AES_UNIT_SIZE_4096, + CCP_XTS_AES_UNIT_SIZE__LAST, +}; + +/** + * struct ccp_xts_aes_engine - CCP XTS AES operation + * @action: AES operation (decrypt/encrypt) + * @unit_size: unit size of the XTS operation + * @key: key to be used for this XTS AES operation + * @key_len: length in bytes of key + * @iv: IV to be used for this XTS AES operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @final: indicates final XTS operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - action, unit_size, key, key_len, iv, iv_len, src, dst, src_len, final + * + * The iv variable is used as both input and output. On completion of the + * AES operation the new IV overwrites the old IV. + */ +struct ccp_xts_aes_engine { + enum ccp_aes_type type; + enum ccp_aes_action action; + enum ccp_xts_aes_unit_size unit_size; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ + + u32 final; +}; + +/***** SHA engine *****/ +/** + * ccp_sha_type - type of SHA operation + * + * @CCP_SHA_TYPE_1: SHA-1 operation + * @CCP_SHA_TYPE_224: SHA-224 operation + * @CCP_SHA_TYPE_256: SHA-256 operation + */ +enum ccp_sha_type { + CCP_SHA_TYPE_1 = 1, + CCP_SHA_TYPE_224, + CCP_SHA_TYPE_256, + CCP_SHA_TYPE_384, + CCP_SHA_TYPE_512, + CCP_SHA_TYPE__LAST, +}; + +/** + * struct ccp_sha_engine - CCP SHA operation + * @type: Type of SHA operation + * @ctx: current hash value + * @ctx_len: length in bytes of hash value + * @src: data to be used for this operation + * @src_len: length in bytes of data used for this operation + * @opad: data to be used for final HMAC operation + * @opad_len: length in bytes of data used for final HMAC operation + * @first: indicates first SHA operation + * @final: indicates final SHA operation + * @msg_bits: total length of the message in bits used in final SHA operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - type, ctx, ctx_len, src, src_len, final + * - msg_bits if final is non-zero + * + * The ctx variable is used as both input and output. On completion of the + * SHA operation the new hash value overwrites the old hash value. + */ +struct ccp_sha_engine { + enum ccp_sha_type type; + + struct scatterlist *ctx; + u32 ctx_len; /* In bytes */ + + struct scatterlist *src; + u64 src_len; /* In bytes */ + + struct scatterlist *opad; + u32 opad_len; /* In bytes */ + + u32 first; /* Indicates first sha cmd */ + u32 final; /* Indicates final sha cmd */ + u64 msg_bits; /* Message length in bits required for + * final sha cmd */ +}; + +/***** 3DES engine *****/ +enum ccp_des3_mode { + CCP_DES3_MODE_ECB = 0, + CCP_DES3_MODE_CBC, + CCP_DES3_MODE_CFB, + CCP_DES3_MODE__LAST, +}; + +enum ccp_des3_type { + CCP_DES3_TYPE_168 = 1, + CCP_DES3_TYPE__LAST, + }; + +enum ccp_des3_action { + CCP_DES3_ACTION_DECRYPT = 0, + CCP_DES3_ACTION_ENCRYPT, + CCP_DES3_ACTION__LAST, +}; + +/** + * struct ccp_des3_engine - CCP SHA operation + * @type: Type of 3DES operation + * @mode: cipher mode + * @action: 3DES operation (decrypt/encrypt) + * @key: key to be used for this 3DES operation + * @key_len: length of key (in bytes) + * @iv: IV to be used for this AES operation + * @iv_len: length in bytes of iv + * @src: input data to be used for this operation + * @src_len: length of input data used for this operation (in bytes) + * @dst: output data produced by this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - type, mode, action, key, key_len, src, dst, src_len + * - iv, iv_len for any mode other than ECB + * + * The iv variable is used as both input and output. On completion of the + * 3DES operation the new IV overwrites the old IV. + */ +struct ccp_des3_engine { + enum ccp_des3_type type; + enum ccp_des3_mode mode; + enum ccp_des3_action action; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; + +/***** RSA engine *****/ +/** + * struct ccp_rsa_engine - CCP RSA operation + * @key_size: length in bits of RSA key + * @exp: RSA exponent + * @exp_len: length in bytes of exponent + * @mod: RSA modulus + * @mod_len: length in bytes of modulus + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - key_size, exp, exp_len, mod, mod_len, src, dst, src_len + */ +struct ccp_rsa_engine { + u32 key_size; /* In bits */ + + struct scatterlist *exp; + u32 exp_len; /* In bytes */ + + struct scatterlist *mod; + u32 mod_len; /* In bytes */ + + struct scatterlist *src, *dst; + u32 src_len; /* In bytes */ +}; + +/***** Passthru engine *****/ +/** + * ccp_passthru_bitwise - type of bitwise passthru operation + * + * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed + * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask + * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask + * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask + * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask + */ +enum ccp_passthru_bitwise { + CCP_PASSTHRU_BITWISE_NOOP = 0, + CCP_PASSTHRU_BITWISE_AND, + CCP_PASSTHRU_BITWISE_OR, + CCP_PASSTHRU_BITWISE_XOR, + CCP_PASSTHRU_BITWISE_MASK, + CCP_PASSTHRU_BITWISE__LAST, +}; + +/** + * ccp_passthru_byteswap - type of byteswap passthru operation + * + * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed + * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words + * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words + */ +enum ccp_passthru_byteswap { + CCP_PASSTHRU_BYTESWAP_NOOP = 0, + CCP_PASSTHRU_BYTESWAP_32BIT, + CCP_PASSTHRU_BYTESWAP_256BIT, + CCP_PASSTHRU_BYTESWAP__LAST, +}; + +/** + * struct ccp_passthru_engine - CCP pass-through operation + * @bit_mod: bitwise operation to perform + * @byte_swap: byteswap operation to perform + * @mask: mask to be applied to data + * @mask_len: length in bytes of mask + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @final: indicate final pass-through operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - bit_mod, byte_swap, src, dst, src_len + * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP + */ +struct ccp_passthru_engine { + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; + + struct scatterlist *mask; + u32 mask_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ + + u32 final; +}; + +/** + * struct ccp_passthru_nomap_engine - CCP pass-through operation + * without performing DMA mapping + * @bit_mod: bitwise operation to perform + * @byte_swap: byteswap operation to perform + * @mask: mask to be applied to data + * @mask_len: length in bytes of mask + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @final: indicate final pass-through operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - bit_mod, byte_swap, src, dst, src_len + * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP + */ +struct ccp_passthru_nomap_engine { + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; + + dma_addr_t mask; + u32 mask_len; /* In bytes */ + + dma_addr_t src_dma, dst_dma; + u64 src_len; /* In bytes */ + + u32 final; +}; + +/***** ECC engine *****/ +#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ +#define CCP_ECC_MAX_OPERANDS 6 +#define CCP_ECC_MAX_OUTPUTS 3 + +/** + * ccp_ecc_function - type of ECC function + * + * @CCP_ECC_FUNCTION_MMUL_384BIT: 384-bit modular multiplication + * @CCP_ECC_FUNCTION_MADD_384BIT: 384-bit modular addition + * @CCP_ECC_FUNCTION_MINV_384BIT: 384-bit multiplicative inverse + * @CCP_ECC_FUNCTION_PADD_384BIT: 384-bit point addition + * @CCP_ECC_FUNCTION_PMUL_384BIT: 384-bit point multiplication + * @CCP_ECC_FUNCTION_PDBL_384BIT: 384-bit point doubling + */ +enum ccp_ecc_function { + CCP_ECC_FUNCTION_MMUL_384BIT = 0, + CCP_ECC_FUNCTION_MADD_384BIT, + CCP_ECC_FUNCTION_MINV_384BIT, + CCP_ECC_FUNCTION_PADD_384BIT, + CCP_ECC_FUNCTION_PMUL_384BIT, + CCP_ECC_FUNCTION_PDBL_384BIT, +}; + +/** + * struct ccp_ecc_modular_math - CCP ECC modular math parameters + * @operand_1: first operand for the modular math operation + * @operand_1_len: length of the first operand + * @operand_2: second operand for the modular math operation + * (not used for CCP_ECC_FUNCTION_MINV_384BIT) + * @operand_2_len: length of the second operand + * (not used for CCP_ECC_FUNCTION_MINV_384BIT) + * @result: result of the modular math operation + * @result_len: length of the supplied result buffer + */ +struct ccp_ecc_modular_math { + struct scatterlist *operand_1; + unsigned int operand_1_len; /* In bytes */ + + struct scatterlist *operand_2; + unsigned int operand_2_len; /* In bytes */ + + struct scatterlist *result; + unsigned int result_len; /* In bytes */ +}; + +/** + * struct ccp_ecc_point - CCP ECC point definition + * @x: the x coordinate of the ECC point + * @x_len: the length of the x coordinate + * @y: the y coordinate of the ECC point + * @y_len: the length of the y coordinate + */ +struct ccp_ecc_point { + struct scatterlist *x; + unsigned int x_len; /* In bytes */ + + struct scatterlist *y; + unsigned int y_len; /* In bytes */ +}; + +/** + * struct ccp_ecc_point_math - CCP ECC point math parameters + * @point_1: the first point of the ECC point math operation + * @point_2: the second point of the ECC point math operation + * (only used for CCP_ECC_FUNCTION_PADD_384BIT) + * @domain_a: the a parameter of the ECC curve + * @domain_a_len: the length of the a parameter + * @scalar: the scalar parameter for the point match operation + * (only used for CCP_ECC_FUNCTION_PMUL_384BIT) + * @scalar_len: the length of the scalar parameter + * (only used for CCP_ECC_FUNCTION_PMUL_384BIT) + * @result: the point resulting from the point math operation + */ +struct ccp_ecc_point_math { + struct ccp_ecc_point point_1; + struct ccp_ecc_point point_2; + + struct scatterlist *domain_a; + unsigned int domain_a_len; /* In bytes */ + + struct scatterlist *scalar; + unsigned int scalar_len; /* In bytes */ + + struct ccp_ecc_point result; +}; + +/** + * struct ccp_ecc_engine - CCP ECC operation + * @function: ECC function to perform + * @mod: ECC modulus + * @mod_len: length in bytes of modulus + * @mm: module math parameters + * @pm: point math parameters + * @ecc_result: result of the ECC operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - function, mod, mod_len + * - operand, operand_len, operand_count, output, output_len, output_count + * - ecc_result + */ +struct ccp_ecc_engine { + enum ccp_ecc_function function; + + struct scatterlist *mod; + u32 mod_len; /* In bytes */ + + union { + struct ccp_ecc_modular_math mm; + struct ccp_ecc_point_math pm; + } u; + + u16 ecc_result; +}; + + +/** + * ccp_engine - CCP operation identifiers + * + * @CCP_ENGINE_AES: AES operation + * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation + * @CCP_ENGINE_RSVD1: unused + * @CCP_ENGINE_SHA: SHA operation + * @CCP_ENGINE_RSA: RSA operation + * @CCP_ENGINE_PASSTHRU: pass-through operation + * @CCP_ENGINE_ZLIB_DECOMPRESS: unused + * @CCP_ENGINE_ECC: ECC operation + */ +enum ccp_engine { + CCP_ENGINE_AES = 0, + CCP_ENGINE_XTS_AES_128, + CCP_ENGINE_DES3, + CCP_ENGINE_SHA, + CCP_ENGINE_RSA, + CCP_ENGINE_PASSTHRU, + CCP_ENGINE_ZLIB_DECOMPRESS, + CCP_ENGINE_ECC, + CCP_ENGINE__LAST, +}; + +/* Flag values for flags member of ccp_cmd */ +#define CCP_CMD_MAY_BACKLOG 0x00000001 +#define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 + +/** + * struct ccp_cmd - CCP operation request + * @entry: list element (ccp driver use only) + * @work: work element used for callbacks (ccp driver use only) + * @ccp: CCP device to be run on + * @ret: operation return code (ccp driver use only) + * @flags: cmd processing flags + * @engine: CCP operation to perform + * @engine_error: CCP engine return code + * @u: engine specific structures, refer to specific engine struct below + * @callback: operation completion callback function + * @data: parameter value to be supplied to the callback function + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - engine, callback + * - See the operation structures below for what is required for each + * operation. + */ +struct ccp_cmd { + /* The list_head, work_struct, ccp and ret variables are for use + * by the CCP driver only. + */ + struct list_head entry; + struct work_struct work; + struct ccp_device *ccp; + int ret; + + u32 flags; + + enum ccp_engine engine; + u32 engine_error; + + union { + struct ccp_aes_engine aes; + struct ccp_xts_aes_engine xts; + struct ccp_des3_engine des3; + struct ccp_sha_engine sha; + struct ccp_rsa_engine rsa; + struct ccp_passthru_engine passthru; + struct ccp_passthru_nomap_engine passthru_nomap; + struct ccp_ecc_engine ecc; + } u; + + /* Completion callback support */ + void (*callback)(void *data, int err); + void *data; +}; + +#endif diff --git a/include/linux/cdev.h b/include/linux/cdev.h new file mode 100644 index 0000000..0e8cd62 --- /dev/null +++ b/include/linux/cdev.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CDEV_H +#define _LINUX_CDEV_H + +#include +#include +#include +#include + +struct file_operations; +struct inode; +struct module; + +struct cdev { + struct kobject kobj; + struct module *owner; + const struct file_operations *ops; + struct list_head list; + dev_t dev; + unsigned int count; +} __randomize_layout; + +void cdev_init(struct cdev *, const struct file_operations *); + +struct cdev *cdev_alloc(void); + +void cdev_put(struct cdev *p); + +int cdev_add(struct cdev *, dev_t, unsigned); + +void cdev_set_parent(struct cdev *p, struct kobject *kobj); +int cdev_device_add(struct cdev *cdev, struct device *dev); +void cdev_device_del(struct cdev *cdev, struct device *dev); + +void cdev_del(struct cdev *); + +void cd_forget(struct inode *); + +#endif diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h new file mode 100644 index 0000000..528271c --- /dev/null +++ b/include/linux/cdrom.h @@ -0,0 +1,317 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * -- + * General header file for linux CD-ROM drivers + * Copyright (C) 1992 David Giller, rafetmad@oxy.edu + * 1994, 1995 Eberhard Mönkeberg, emoenke@gwdg.de + * 1996 David van Leeuwen, david@tm.tno.nl + * 1997, 1998 Erik Andersen, andersee@debian.org + * 1998-2002 Jens Axboe, axboe@suse.de + */ +#ifndef _LINUX_CDROM_H +#define _LINUX_CDROM_H + +#include /* not really needed, later.. */ +#include +#include +#include + +struct packet_command +{ + unsigned char cmd[CDROM_PACKET_SIZE]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct scsi_sense_hdr *sshdr; + unsigned char data_direction; + int quiet; + int timeout; + void *reserved[1]; +}; + +/* + * _OLD will use PIO transfer on atapi devices, _BPC_* will use DMA + */ +#define CDDA_OLD 0 /* old style */ +#define CDDA_BPC_SINGLE 1 /* single frame block pc */ +#define CDDA_BPC_FULL 2 /* multi frame block pc */ + +/* Uniform cdrom data structures for cdrom.c */ +struct cdrom_device_info { + const struct cdrom_device_ops *ops; /* link to device_ops */ + struct list_head list; /* linked list of all device_info */ + struct gendisk *disk; /* matching block layer disk */ + void *handle; /* driver-dependent data */ +/* specifications */ + int mask; /* mask of capability: disables them */ + int speed; /* maximum speed for reading data */ + int capacity; /* number of discs in jukebox */ +/* device-related storage */ + unsigned int options : 30; /* options flags */ + unsigned mc_flags : 2; /* media change buffer flags */ + unsigned int vfs_events; /* cached events for vfs path */ + unsigned int ioctl_events; /* cached events for ioctl path */ + int use_count; /* number of times device opened */ + char name[20]; /* name of the device type */ +/* per-device flags */ + __u8 sanyo_slot : 2; /* Sanyo 3 CD changer support */ + __u8 keeplocked : 1; /* CDROM_LOCKDOOR status */ + __u8 reserved : 5; /* not used yet */ + int cdda_method; /* see flags */ + __u8 last_sense; + __u8 media_written; /* dirty flag, DVD+RW bookkeeping */ + unsigned short mmc3_profile; /* current MMC3 profile */ + int for_data; + int (*exit)(struct cdrom_device_info *); + int mrw_mode_page; +}; + +struct cdrom_device_ops { +/* routines */ + int (*open) (struct cdrom_device_info *, int); + void (*release) (struct cdrom_device_info *); + int (*drive_status) (struct cdrom_device_info *, int); + unsigned int (*check_events) (struct cdrom_device_info *cdi, + unsigned int clearing, int slot); + int (*media_changed) (struct cdrom_device_info *, int); + int (*tray_move) (struct cdrom_device_info *, int); + int (*lock_door) (struct cdrom_device_info *, int); + int (*select_speed) (struct cdrom_device_info *, int); + int (*select_disc) (struct cdrom_device_info *, int); + int (*get_last_session) (struct cdrom_device_info *, + struct cdrom_multisession *); + int (*get_mcn) (struct cdrom_device_info *, + struct cdrom_mcn *); + /* hard reset device */ + int (*reset) (struct cdrom_device_info *); + /* play stuff */ + int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *); + +/* driver specifications */ + const int capability; /* capability flags */ + /* handle uniform packets for scsi type devices (scsi,atapi) */ + int (*generic_packet) (struct cdrom_device_info *, + struct packet_command *); +}; + +/* the general block_device operations structure: */ +extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, + fmode_t mode); +extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode); +extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, + fmode_t mode, unsigned int cmd, unsigned long arg); +extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi, + unsigned int clearing); +extern int cdrom_media_changed(struct cdrom_device_info *); + +extern int register_cdrom(struct cdrom_device_info *cdi); +extern void unregister_cdrom(struct cdrom_device_info *cdi); + +typedef struct { + int data; + int audio; + int cdi; + int xa; + long error; +} tracktype; + +extern int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written); +extern int cdrom_number_of_slots(struct cdrom_device_info *cdi); +extern int cdrom_mode_select(struct cdrom_device_info *cdi, + struct packet_command *cgc); +extern int cdrom_mode_sense(struct cdrom_device_info *cdi, + struct packet_command *cgc, + int page_code, int page_control); +extern void init_cdrom_command(struct packet_command *cgc, + void *buffer, int len, int type); +extern int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, + struct packet_command *cgc); + +/* The SCSI spec says there could be 256 slots. */ +#define CDROM_MAX_SLOTS 256 + +struct cdrom_mechstat_header { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 fault : 1; + __u8 changer_state : 2; + __u8 curslot : 5; + __u8 mech_state : 3; + __u8 door_open : 1; + __u8 reserved1 : 4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 curslot : 5; + __u8 changer_state : 2; + __u8 fault : 1; + __u8 reserved1 : 4; + __u8 door_open : 1; + __u8 mech_state : 3; +#endif + __u8 curlba[3]; + __u8 nslots; + __u16 slot_tablelen; +}; + +struct cdrom_slot { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 disc_present : 1; + __u8 reserved1 : 6; + __u8 change : 1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 change : 1; + __u8 reserved1 : 6; + __u8 disc_present : 1; +#endif + __u8 reserved2[3]; +}; + +struct cdrom_changer_info { + struct cdrom_mechstat_header hdr; + struct cdrom_slot slots[CDROM_MAX_SLOTS]; +}; + +typedef enum { + mechtype_caddy = 0, + mechtype_tray = 1, + mechtype_popup = 2, + mechtype_individual_changer = 4, + mechtype_cartridge_changer = 5 +} mechtype_t; + +typedef struct { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 ps : 1; + __u8 reserved1 : 1; + __u8 page_code : 6; + __u8 page_length; + __u8 reserved2 : 1; + __u8 bufe : 1; + __u8 ls_v : 1; + __u8 test_write : 1; + __u8 write_type : 4; + __u8 multi_session : 2; /* or border, DVD */ + __u8 fp : 1; + __u8 copy : 1; + __u8 track_mode : 4; + __u8 reserved3 : 4; + __u8 data_block_type : 4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 page_code : 6; + __u8 reserved1 : 1; + __u8 ps : 1; + __u8 page_length; + __u8 write_type : 4; + __u8 test_write : 1; + __u8 ls_v : 1; + __u8 bufe : 1; + __u8 reserved2 : 1; + __u8 track_mode : 4; + __u8 copy : 1; + __u8 fp : 1; + __u8 multi_session : 2; /* or border, DVD */ + __u8 data_block_type : 4; + __u8 reserved3 : 4; +#endif + __u8 link_size; + __u8 reserved4; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved5 : 2; + __u8 app_code : 6; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 app_code : 6; + __u8 reserved5 : 2; +#endif + __u8 session_format; + __u8 reserved6; + __be32 packet_size; + __u16 audio_pause; + __u8 mcn[16]; + __u8 isrc[16]; + __u8 subhdr0; + __u8 subhdr1; + __u8 subhdr2; + __u8 subhdr3; +} __attribute__((packed)) write_param_page; + +struct modesel_head +{ + __u8 reserved1; + __u8 medium; + __u8 reserved2; + __u8 block_desc_length; + __u8 density; + __u8 number_of_blocks_hi; + __u8 number_of_blocks_med; + __u8 number_of_blocks_lo; + __u8 reserved3; + __u8 block_length_hi; + __u8 block_length_med; + __u8 block_length_lo; +}; + +typedef struct { + __u16 report_key_length; + __u8 reserved1; + __u8 reserved2; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 type_code : 2; + __u8 vra : 3; + __u8 ucca : 3; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 ucca : 3; + __u8 vra : 3; + __u8 type_code : 2; +#endif + __u8 region_mask; + __u8 rpc_scheme; + __u8 reserved3; +} rpc_state_t; + +struct event_header { + __be16 data_len; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 nea : 1; + __u8 reserved1 : 4; + __u8 notification_class : 3; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 notification_class : 3; + __u8 reserved1 : 4; + __u8 nea : 1; +#endif + __u8 supp_event_class; +}; + +struct media_event_desc { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved1 : 4; + __u8 media_event_code : 4; + __u8 reserved2 : 6; + __u8 media_present : 1; + __u8 door_open : 1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 media_event_code : 4; + __u8 reserved1 : 4; + __u8 door_open : 1; + __u8 media_present : 1; + __u8 reserved2 : 6; +#endif + __u8 start_slot; + __u8 end_slot; +}; + +extern int cdrom_get_media_event(struct cdrom_device_info *cdi, struct media_event_desc *med); + +static inline void lba_to_msf(int lba, u8 *m, u8 *s, u8 *f) +{ + lba += CD_MSF_OFFSET; + lba &= 0xffffff; /* negative lbas use only 24 bits */ + *m = lba / (CD_SECS * CD_FRAMES); + lba %= (CD_SECS * CD_FRAMES); + *s = lba / CD_FRAMES; + *f = lba % CD_FRAMES; +} + +static inline int msf_to_lba(u8 m, u8 s, u8 f) +{ + return (((m * CD_SECS) + s) * CD_FRAMES + f) - CD_MSF_OFFSET; +} +#endif /* _LINUX_CDROM_H */ diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h new file mode 100644 index 0000000..6728c2e --- /dev/null +++ b/include/linux/ceph/auth.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_AUTH_H +#define _FS_CEPH_AUTH_H + +#include +#include + +/* + * Abstract interface for communicating with the authenticate module. + * There is some handshake that takes place between us and the monitor + * to acquire the necessary keys. These are used to generate an + * 'authorizer' that we use when connecting to a service (mds, osd). + */ + +struct ceph_auth_client; +struct ceph_msg; + +struct ceph_authorizer { + void (*destroy)(struct ceph_authorizer *); +}; + +struct ceph_auth_handshake { + struct ceph_authorizer *authorizer; + void *authorizer_buf; + size_t authorizer_buf_len; + void *authorizer_reply_buf; + size_t authorizer_reply_buf_len; + int (*sign_message)(struct ceph_auth_handshake *auth, + struct ceph_msg *msg); + int (*check_message_signature)(struct ceph_auth_handshake *auth, + struct ceph_msg *msg); +}; + +struct ceph_auth_client_ops { + const char *name; + + /* + * true if we are authenticated and can connect to + * services. + */ + int (*is_authenticated)(struct ceph_auth_client *ac); + + /* + * true if we should (re)authenticate, e.g., when our tickets + * are getting old and crusty. + */ + int (*should_authenticate)(struct ceph_auth_client *ac); + + /* + * build requests and process replies during monitor + * handshake. if handle_reply returns -EAGAIN, we build + * another request. + */ + int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end); + int (*handle_reply)(struct ceph_auth_client *ac, int result, + void *buf, void *end); + + /* + * Create authorizer for connecting to a service, and verify + * the response to authenticate the service. + */ + int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type, + struct ceph_auth_handshake *auth); + /* ensure that an existing authorizer is up to date */ + int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type, + struct ceph_auth_handshake *auth); + int (*add_authorizer_challenge)(struct ceph_auth_client *ac, + struct ceph_authorizer *a, + void *challenge_buf, + int challenge_buf_len); + int (*verify_authorizer_reply)(struct ceph_auth_client *ac, + struct ceph_authorizer *a); + void (*invalidate_authorizer)(struct ceph_auth_client *ac, + int peer_type); + + /* reset when we (re)connect to a monitor */ + void (*reset)(struct ceph_auth_client *ac); + + void (*destroy)(struct ceph_auth_client *ac); + + int (*sign_message)(struct ceph_auth_handshake *auth, + struct ceph_msg *msg); + int (*check_message_signature)(struct ceph_auth_handshake *auth, + struct ceph_msg *msg); +}; + +struct ceph_auth_client { + u32 protocol; /* CEPH_AUTH_* */ + void *private; /* for use by protocol implementation */ + const struct ceph_auth_client_ops *ops; /* null iff protocol==0 */ + + bool negotiating; /* true if negotiating protocol */ + const char *name; /* entity name */ + u64 global_id; /* our unique id in system */ + const struct ceph_crypto_key *key; /* our secret key */ + unsigned want_keys; /* which services we want */ + + struct mutex mutex; +}; + +extern struct ceph_auth_client *ceph_auth_init(const char *name, + const struct ceph_crypto_key *key); +extern void ceph_auth_destroy(struct ceph_auth_client *ac); + +extern void ceph_auth_reset(struct ceph_auth_client *ac); + +extern int ceph_auth_build_hello(struct ceph_auth_client *ac, + void *buf, size_t len); +extern int ceph_handle_auth_reply(struct ceph_auth_client *ac, + void *buf, size_t len, + void *reply_buf, size_t reply_len); +int ceph_auth_entity_name_encode(const char *name, void **p, void *end); + +extern int ceph_build_auth(struct ceph_auth_client *ac, + void *msg_buf, size_t msg_len); + +extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); +extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, + int peer_type, + struct ceph_auth_handshake *auth); +void ceph_auth_destroy_authorizer(struct ceph_authorizer *a); +extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, + int peer_type, + struct ceph_auth_handshake *a); +int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac, + struct ceph_authorizer *a, + void *challenge_buf, + int challenge_buf_len); +extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, + struct ceph_authorizer *a); +extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, + int peer_type); + +static inline int ceph_auth_sign_message(struct ceph_auth_handshake *auth, + struct ceph_msg *msg) +{ + if (auth->sign_message) + return auth->sign_message(auth, msg); + return 0; +} + +static inline +int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth, + struct ceph_msg *msg) +{ + if (auth->check_message_signature) + return auth->check_message_signature(auth, msg); + return 0; +} +#endif diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h new file mode 100644 index 0000000..11cdc7c --- /dev/null +++ b/include/linux/ceph/buffer.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __FS_CEPH_BUFFER_H +#define __FS_CEPH_BUFFER_H + +#include +#include +#include +#include +#include + +/* + * a simple reference counted buffer. + * + * use kmalloc for smaller sizes, vmalloc for larger sizes. + */ +struct ceph_buffer { + struct kref kref; + struct kvec vec; + size_t alloc_len; +}; + +extern struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp); +extern void ceph_buffer_release(struct kref *kref); + +static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) +{ + kref_get(&b->kref); + return b; +} + +static inline void ceph_buffer_put(struct ceph_buffer *b) +{ + if (b) + kref_put(&b->kref, ceph_buffer_release); +} + +extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); + +#endif diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h new file mode 100644 index 0000000..d5a5da8 --- /dev/null +++ b/include/linux/ceph/ceph_debug.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_DEBUG_H +#define _FS_CEPH_DEBUG_H + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG + +/* + * wrap pr_debug to include a filename:lineno prefix on each line. + * this incurs some overhead (kernel size and execution time) due to + * the extra function call at each call site. + */ + +# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) +# define dout(fmt, ...) \ + pr_debug("%.*s %12.12s:%-4d : " fmt, \ + 8 - (int)sizeof(KBUILD_MODNAME), " ", \ + kbasename(__FILE__), __LINE__, ##__VA_ARGS__) +# else +/* faux printk call just to see any compiler warnings. */ +# define dout(fmt, ...) do { \ + if (0) \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ + } while (0) +# endif + +#else + +/* + * or, just wrap pr_debug + */ +# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__) + +#endif + +#endif diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h new file mode 100644 index 0000000..39e6f4c --- /dev/null +++ b/include/linux/ceph/ceph_features.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CEPH_FEATURES +#define __CEPH_FEATURES + +/* + * Each time we reclaim bits for reuse we need to specify another bit + * that, if present, indicates we have the new incarnation of that + * feature. Base case is 1 (first use). + */ +#define CEPH_FEATURE_INCARNATION_1 (0ull) +#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL + +#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \ + static const uint64_t CEPH_FEATURE_##name = (1ULL<> 24; +} +static inline __u32 ceph_frag_value(__u32 f) +{ + return f & 0xffffffu; +} +static inline __u32 ceph_frag_mask(__u32 f) +{ + return (0xffffffu << (24-ceph_frag_bits(f))) & 0xffffffu; +} +static inline __u32 ceph_frag_mask_shift(__u32 f) +{ + return 24 - ceph_frag_bits(f); +} + +static inline bool ceph_frag_contains_value(__u32 f, __u32 v) +{ + return (v & ceph_frag_mask(f)) == ceph_frag_value(f); +} + +static inline __u32 ceph_frag_make_child(__u32 f, int by, int i) +{ + int newbits = ceph_frag_bits(f) + by; + return ceph_frag_make(newbits, + ceph_frag_value(f) | (i << (24 - newbits))); +} +static inline bool ceph_frag_is_leftmost(__u32 f) +{ + return ceph_frag_value(f) == 0; +} +static inline bool ceph_frag_is_rightmost(__u32 f) +{ + return ceph_frag_value(f) == ceph_frag_mask(f); +} +static inline __u32 ceph_frag_next(__u32 f) +{ + return ceph_frag_make(ceph_frag_bits(f), + ceph_frag_value(f) + (0x1000000 >> ceph_frag_bits(f))); +} + +/* + * comparator to sort frags logically, as when traversing the + * number space in ascending order... + */ +int ceph_frag_compare(__u32 a, __u32 b); + +#endif diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h new file mode 100644 index 0000000..cb21c5c --- /dev/null +++ b/include/linux/ceph/ceph_fs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ceph_fs.h - Ceph constants and data types to share between kernel and + * user space. + * + * Most types in this file are defined as little-endian, and are + * primarily intended to describe data structures that pass over the + * wire or that are stored on disk. + * + * LGPL2 + */ + +#ifndef CEPH_FS_H +#define CEPH_FS_H + +#include +#include + +/* + * subprotocol versions. when specific messages types or high-level + * protocols change, bump the affected components. we keep rev + * internal cluster protocols separately from the public, + * client-facing protocol. + */ +#define CEPH_OSDC_PROTOCOL 24 /* server/client */ +#define CEPH_MDSC_PROTOCOL 32 /* server/client */ +#define CEPH_MONC_PROTOCOL 15 /* server/client */ + + +#define CEPH_INO_ROOT 1 +#define CEPH_INO_CEPH 2 /* hidden .ceph dir */ +#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */ + +/* arbitrary limit on max # of monitors (cluster of 3 is typical) */ +#define CEPH_MAX_MON 31 + +/* + * legacy ceph_file_layoute + */ +struct ceph_file_layout_legacy { + /* file -> object mapping */ + __le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple + of page size. */ + __le32 fl_stripe_count; /* over this many objects */ + __le32 fl_object_size; /* until objects are this big, then move to + new objects */ + __le32 fl_cas_hash; /* UNUSED. 0 = none; 1 = sha256 */ + + /* pg -> disk layout */ + __le32 fl_object_stripe_unit; /* UNUSED. for per-object parity, if any */ + + /* object -> pg layout */ + __le32 fl_unused; /* unused; used to be preferred primary for pg (-1 for none) */ + __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */ +} __attribute__ ((packed)); + +struct ceph_string; +/* + * ceph_file_layout - describe data layout for a file/inode + */ +struct ceph_file_layout { + /* file -> object mapping */ + u32 stripe_unit; /* stripe unit, in bytes */ + u32 stripe_count; /* over this many objects */ + u32 object_size; /* until objects are this big */ + s64 pool_id; /* rados pool id */ + struct ceph_string __rcu *pool_ns; /* rados pool namespace */ +}; + +extern int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); +extern void ceph_file_layout_from_legacy(struct ceph_file_layout *fl, + struct ceph_file_layout_legacy *legacy); +extern void ceph_file_layout_to_legacy(struct ceph_file_layout *fl, + struct ceph_file_layout_legacy *legacy); + +#define CEPH_MIN_STRIPE_UNIT 65536 + +struct ceph_dir_layout { + __u8 dl_dir_hash; /* see ceph_hash.h for ids */ + __u8 dl_unused1; + __u16 dl_unused2; + __u32 dl_unused3; +} __attribute__ ((packed)); + +/* crypto algorithms */ +#define CEPH_CRYPTO_NONE 0x0 +#define CEPH_CRYPTO_AES 0x1 + +#define CEPH_AES_IV "cephsageyudagreg" + +/* security/authentication protocols */ +#define CEPH_AUTH_UNKNOWN 0x0 +#define CEPH_AUTH_NONE 0x1 +#define CEPH_AUTH_CEPHX 0x2 + +#define CEPH_AUTH_UID_DEFAULT ((__u64) -1) + + +/********************************************* + * message layer + */ + +/* + * message types + */ + +/* misc */ +#define CEPH_MSG_SHUTDOWN 1 +#define CEPH_MSG_PING 2 + +/* client <-> monitor */ +#define CEPH_MSG_MON_MAP 4 +#define CEPH_MSG_MON_GET_MAP 5 +#define CEPH_MSG_STATFS 13 +#define CEPH_MSG_STATFS_REPLY 14 +#define CEPH_MSG_MON_SUBSCRIBE 15 +#define CEPH_MSG_MON_SUBSCRIBE_ACK 16 +#define CEPH_MSG_AUTH 17 +#define CEPH_MSG_AUTH_REPLY 18 +#define CEPH_MSG_MON_GET_VERSION 19 +#define CEPH_MSG_MON_GET_VERSION_REPLY 20 + +/* client <-> mds */ +#define CEPH_MSG_MDS_MAP 21 +#define CEPH_MSG_FS_MAP_USER 103 + +#define CEPH_MSG_CLIENT_SESSION 22 +#define CEPH_MSG_CLIENT_RECONNECT 23 + +#define CEPH_MSG_CLIENT_REQUEST 24 +#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25 +#define CEPH_MSG_CLIENT_REPLY 26 +#define CEPH_MSG_CLIENT_CAPS 0x310 +#define CEPH_MSG_CLIENT_LEASE 0x311 +#define CEPH_MSG_CLIENT_SNAP 0x312 +#define CEPH_MSG_CLIENT_CAPRELEASE 0x313 +#define CEPH_MSG_CLIENT_QUOTA 0x314 + +/* pool ops */ +#define CEPH_MSG_POOLOP_REPLY 48 +#define CEPH_MSG_POOLOP 49 + +/* mon commands */ +#define CEPH_MSG_MON_COMMAND 50 +#define CEPH_MSG_MON_COMMAND_ACK 51 + +/* osd */ +#define CEPH_MSG_OSD_MAP 41 +#define CEPH_MSG_OSD_OP 42 +#define CEPH_MSG_OSD_OPREPLY 43 +#define CEPH_MSG_WATCH_NOTIFY 44 +#define CEPH_MSG_OSD_BACKOFF 61 + + +/* watch-notify operations */ +enum { + CEPH_WATCH_EVENT_NOTIFY = 1, /* notifying watcher */ + CEPH_WATCH_EVENT_NOTIFY_COMPLETE = 2, /* notifier notified when done */ + CEPH_WATCH_EVENT_DISCONNECT = 3, /* we were disconnected */ +}; + + +struct ceph_mon_request_header { + __le64 have_version; + __le16 session_mon; + __le64 session_mon_tid; +} __attribute__ ((packed)); + +struct ceph_mon_statfs { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; + __u8 contains_data_pool; + __le64 data_pool; +} __attribute__ ((packed)); + +struct ceph_statfs { + __le64 kb, kb_used, kb_avail; + __le64 num_objects; +} __attribute__ ((packed)); + +struct ceph_mon_statfs_reply { + struct ceph_fsid fsid; + __le64 version; + struct ceph_statfs st; +} __attribute__ ((packed)); + +struct ceph_mon_command { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; + __le32 num_strs; /* always 1 */ + __le32 str_len; + char str[]; +} __attribute__ ((packed)); + +struct ceph_osd_getmap { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; + __le32 start; +} __attribute__ ((packed)); + +struct ceph_mds_getmap { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; +} __attribute__ ((packed)); + +struct ceph_client_mount { + struct ceph_mon_request_header monhdr; +} __attribute__ ((packed)); + +#define CEPH_SUBSCRIBE_ONETIME 1 /* i want only 1 update after have */ + +struct ceph_mon_subscribe_item { + __le64 start; + __u8 flags; +} __attribute__ ((packed)); + +struct ceph_mon_subscribe_ack { + __le32 duration; /* seconds */ + struct ceph_fsid fsid; +} __attribute__ ((packed)); + +#define CEPH_FS_CLUSTER_ID_NONE -1 + +/* + * mdsmap flags + */ +#define CEPH_MDSMAP_DOWN (1<<0) /* cluster deliberately down */ + +/* + * mds states + * > 0 -> in + * <= 0 -> out + */ +#define CEPH_MDS_STATE_DNE 0 /* down, does not exist. */ +#define CEPH_MDS_STATE_STOPPED -1 /* down, once existed, but no subtrees. + empty log. */ +#define CEPH_MDS_STATE_BOOT -4 /* up, boot announcement. */ +#define CEPH_MDS_STATE_STANDBY -5 /* up, idle. waiting for assignment. */ +#define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */ +#define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */ +#define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */ +#define CEPH_MDS_STATE_REPLAYONCE -9 /* up, replaying an active node's journal */ + +#define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */ +#define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed + operations (import, rename, etc.) */ +#define CEPH_MDS_STATE_RECONNECT 10 /* up, reconnect to clients */ +#define CEPH_MDS_STATE_REJOIN 11 /* up, rejoining distributed cache */ +#define CEPH_MDS_STATE_CLIENTREPLAY 12 /* up, replaying client operations */ +#define CEPH_MDS_STATE_ACTIVE 13 /* up, active */ +#define CEPH_MDS_STATE_STOPPING 14 /* up, but exporting metadata */ + +extern const char *ceph_mds_state_name(int s); + + +/* + * metadata lock types. + * - these are bitmasks.. we can compose them + * - they also define the lock ordering by the MDS + * - a few of these are internal to the mds + */ +#define CEPH_LOCK_DVERSION 1 +#define CEPH_LOCK_DN 2 +#define CEPH_LOCK_ISNAP 16 +#define CEPH_LOCK_IVERSION 32 /* mds internal */ +#define CEPH_LOCK_IFILE 64 +#define CEPH_LOCK_IAUTH 128 +#define CEPH_LOCK_ILINK 256 +#define CEPH_LOCK_IDFT 512 /* dir frag tree */ +#define CEPH_LOCK_INEST 1024 /* mds internal */ +#define CEPH_LOCK_IXATTR 2048 +#define CEPH_LOCK_IFLOCK 4096 /* advisory file locks */ +#define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */ +#define CEPH_LOCK_IPOLICY 16384 /* policy lock on dirs. MDS internal */ + +/* client_session ops */ +enum { + CEPH_SESSION_REQUEST_OPEN, + CEPH_SESSION_OPEN, + CEPH_SESSION_REQUEST_CLOSE, + CEPH_SESSION_CLOSE, + CEPH_SESSION_REQUEST_RENEWCAPS, + CEPH_SESSION_RENEWCAPS, + CEPH_SESSION_STALE, + CEPH_SESSION_RECALL_STATE, + CEPH_SESSION_FLUSHMSG, + CEPH_SESSION_FLUSHMSG_ACK, + CEPH_SESSION_FORCE_RO, + CEPH_SESSION_REJECT, +}; + +extern const char *ceph_session_op_name(int op); + +struct ceph_mds_session_head { + __le32 op; + __le64 seq; + struct ceph_timespec stamp; + __le32 max_caps, max_leases; +} __attribute__ ((packed)); + +/* client_request */ +/* + * metadata ops. + * & 0x001000 -> write op + * & 0x010000 -> follow symlink (e.g. stat(), not lstat()). + & & 0x100000 -> use weird ino/path trace + */ +#define CEPH_MDS_OP_WRITE 0x001000 +enum { + CEPH_MDS_OP_LOOKUP = 0x00100, + CEPH_MDS_OP_GETATTR = 0x00101, + CEPH_MDS_OP_LOOKUPHASH = 0x00102, + CEPH_MDS_OP_LOOKUPPARENT = 0x00103, + CEPH_MDS_OP_LOOKUPINO = 0x00104, + CEPH_MDS_OP_LOOKUPNAME = 0x00105, + + CEPH_MDS_OP_SETXATTR = 0x01105, + CEPH_MDS_OP_RMXATTR = 0x01106, + CEPH_MDS_OP_SETLAYOUT = 0x01107, + CEPH_MDS_OP_SETATTR = 0x01108, + CEPH_MDS_OP_SETFILELOCK= 0x01109, + CEPH_MDS_OP_GETFILELOCK= 0x00110, + CEPH_MDS_OP_SETDIRLAYOUT=0x0110a, + + CEPH_MDS_OP_MKNOD = 0x01201, + CEPH_MDS_OP_LINK = 0x01202, + CEPH_MDS_OP_UNLINK = 0x01203, + CEPH_MDS_OP_RENAME = 0x01204, + CEPH_MDS_OP_MKDIR = 0x01220, + CEPH_MDS_OP_RMDIR = 0x01221, + CEPH_MDS_OP_SYMLINK = 0x01222, + + CEPH_MDS_OP_CREATE = 0x01301, + CEPH_MDS_OP_OPEN = 0x00302, + CEPH_MDS_OP_READDIR = 0x00305, + + CEPH_MDS_OP_LOOKUPSNAP = 0x00400, + CEPH_MDS_OP_MKSNAP = 0x01400, + CEPH_MDS_OP_RMSNAP = 0x01401, + CEPH_MDS_OP_LSSNAP = 0x00402, + CEPH_MDS_OP_RENAMESNAP = 0x01403, +}; + +extern const char *ceph_mds_op_name(int op); + + +#define CEPH_SETATTR_MODE 1 +#define CEPH_SETATTR_UID 2 +#define CEPH_SETATTR_GID 4 +#define CEPH_SETATTR_MTIME 8 +#define CEPH_SETATTR_ATIME 16 +#define CEPH_SETATTR_SIZE 32 +#define CEPH_SETATTR_CTIME 64 + +/* + * Ceph setxattr request flags. + */ +#define CEPH_XATTR_CREATE (1 << 0) +#define CEPH_XATTR_REPLACE (1 << 1) +#define CEPH_XATTR_REMOVE (1 << 31) + +/* + * readdir request flags; + */ +#define CEPH_READDIR_REPLY_BITFLAGS (1<<0) + +/* + * readdir reply flags. + */ +#define CEPH_READDIR_FRAG_END (1<<0) +#define CEPH_READDIR_FRAG_COMPLETE (1<<8) +#define CEPH_READDIR_HASH_ORDER (1<<9) +#define CEPH_READDIR_OFFSET_HASH (1<<10) + +/* + * open request flags + */ +#define CEPH_O_RDONLY 00000000 +#define CEPH_O_WRONLY 00000001 +#define CEPH_O_RDWR 00000002 +#define CEPH_O_CREAT 00000100 +#define CEPH_O_EXCL 00000200 +#define CEPH_O_TRUNC 00001000 +#define CEPH_O_DIRECTORY 00200000 +#define CEPH_O_NOFOLLOW 00400000 + +union ceph_mds_request_args { + struct { + __le32 mask; /* CEPH_CAP_* */ + } __attribute__ ((packed)) getattr; + struct { + __le32 mode; + __le32 uid; + __le32 gid; + struct ceph_timespec mtime; + struct ceph_timespec atime; + __le64 size, old_size; /* old_size needed by truncate */ + __le32 mask; /* CEPH_SETATTR_* */ + } __attribute__ ((packed)) setattr; + struct { + __le32 frag; /* which dir fragment */ + __le32 max_entries; /* how many dentries to grab */ + __le32 max_bytes; + __le16 flags; + __le32 offset_hash; + } __attribute__ ((packed)) readdir; + struct { + __le32 mode; + __le32 rdev; + } __attribute__ ((packed)) mknod; + struct { + __le32 mode; + } __attribute__ ((packed)) mkdir; + struct { + __le32 flags; + __le32 mode; + __le32 stripe_unit; /* layout for newly created file */ + __le32 stripe_count; /* ... */ + __le32 object_size; + __le32 file_replication; + __le32 mask; /* CEPH_CAP_* */ + __le32 old_size; + } __attribute__ ((packed)) open; + struct { + __le32 flags; + } __attribute__ ((packed)) setxattr; + struct { + struct ceph_file_layout_legacy layout; + } __attribute__ ((packed)) setlayout; + struct { + __u8 rule; /* currently fcntl or flock */ + __u8 type; /* shared, exclusive, remove*/ + __le64 owner; /* owner of the lock */ + __le64 pid; /* process id requesting the lock */ + __le64 start; /* initial location to lock */ + __le64 length; /* num bytes to lock from start */ + __u8 wait; /* will caller wait for lock to become available? */ + } __attribute__ ((packed)) filelock_change; + struct { + __le32 mask; /* CEPH_CAP_* */ + __le64 snapid; + __le64 parent; + __le32 hash; + } __attribute__ ((packed)) lookupino; +} __attribute__ ((packed)); + +#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */ +#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */ + +struct ceph_mds_request_head { + __le64 oldest_client_tid; + __le32 mdsmap_epoch; /* on client */ + __le32 flags; /* CEPH_MDS_FLAG_* */ + __u8 num_retry, num_fwd; /* count retry, fwd attempts */ + __le16 num_releases; /* # include cap/lease release records */ + __le32 op; /* mds op code */ + __le32 caller_uid, caller_gid; + __le64 ino; /* use this ino for openc, mkdir, mknod, + etc. (if replaying) */ + union ceph_mds_request_args args; +} __attribute__ ((packed)); + +/* cap/lease release record */ +struct ceph_mds_request_release { + __le64 ino, cap_id; /* ino and unique cap id */ + __le32 caps, wanted; /* new issued, wanted */ + __le32 seq, issue_seq, mseq; + __le32 dname_seq; /* if releasing a dentry lease, a */ + __le32 dname_len; /* string follows. */ +} __attribute__ ((packed)); + +/* client reply */ +struct ceph_mds_reply_head { + __le32 op; + __le32 result; + __le32 mdsmap_epoch; + __u8 safe; /* true if committed to disk */ + __u8 is_dentry, is_target; /* true if dentry, target inode records + are included with reply */ +} __attribute__ ((packed)); + +/* one for each node split */ +struct ceph_frag_tree_split { + __le32 frag; /* this frag splits... */ + __le32 by; /* ...by this many bits */ +} __attribute__ ((packed)); + +struct ceph_frag_tree_head { + __le32 nsplits; /* num ceph_frag_tree_split records */ + struct ceph_frag_tree_split splits[]; +} __attribute__ ((packed)); + +/* capability issue, for bundling with mds reply */ +struct ceph_mds_reply_cap { + __le32 caps, wanted; /* caps issued, wanted */ + __le64 cap_id; + __le32 seq, mseq; + __le64 realm; /* snap realm */ + __u8 flags; /* CEPH_CAP_FLAG_* */ +} __attribute__ ((packed)); + +#define CEPH_CAP_FLAG_AUTH (1 << 0) /* cap is issued by auth mds */ +#define CEPH_CAP_FLAG_RELEASE (1 << 1) /* release the cap */ + +/* inode record, for bundling with mds reply */ +struct ceph_mds_reply_inode { + __le64 ino; + __le64 snapid; + __le32 rdev; + __le64 version; /* inode version */ + __le64 xattr_version; /* version for xattr blob */ + struct ceph_mds_reply_cap cap; /* caps issued for this inode */ + struct ceph_file_layout_legacy layout; + struct ceph_timespec ctime, mtime, atime; + __le32 time_warp_seq; + __le64 size, max_size, truncate_size; + __le32 truncate_seq; + __le32 mode, uid, gid; + __le32 nlink; + __le64 files, subdirs, rbytes, rfiles, rsubdirs; /* dir stats */ + struct ceph_timespec rctime; + struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */ +} __attribute__ ((packed)); +/* followed by frag array, symlink string, dir layout, xattr blob */ + +/* reply_lease follows dname, and reply_inode */ +struct ceph_mds_reply_lease { + __le16 mask; /* lease type(s) */ + __le32 duration_ms; /* lease duration */ + __le32 seq; +} __attribute__ ((packed)); + +struct ceph_mds_reply_dirfrag { + __le32 frag; /* fragment */ + __le32 auth; /* auth mds, if this is a delegation point */ + __le32 ndist; /* number of mds' this is replicated on */ + __le32 dist[]; +} __attribute__ ((packed)); + +#define CEPH_LOCK_FCNTL 1 +#define CEPH_LOCK_FLOCK 2 +#define CEPH_LOCK_FCNTL_INTR 3 +#define CEPH_LOCK_FLOCK_INTR 4 + + +#define CEPH_LOCK_SHARED 1 +#define CEPH_LOCK_EXCL 2 +#define CEPH_LOCK_UNLOCK 4 + +struct ceph_filelock { + __le64 start;/* file offset to start lock at */ + __le64 length; /* num bytes to lock; 0 for all following start */ + __le64 client; /* which client holds the lock */ + __le64 owner; /* owner the lock */ + __le64 pid; /* process id holding the lock on the client */ + __u8 type; /* shared lock, exclusive lock, or unlock */ +} __attribute__ ((packed)); + + +/* file access modes */ +#define CEPH_FILE_MODE_PIN 0 +#define CEPH_FILE_MODE_RD 1 +#define CEPH_FILE_MODE_WR 2 +#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */ +#define CEPH_FILE_MODE_LAZY 4 /* lazy io */ +#define CEPH_FILE_MODE_BITS 4 + +int ceph_flags_to_mode(int flags); + +#define CEPH_INLINE_NONE ((__u64)-1) + +/* capability bits */ +#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */ + +/* generic cap bits */ +#define CEPH_CAP_GSHARED 1 /* client can reads */ +#define CEPH_CAP_GEXCL 2 /* client can read and update */ +#define CEPH_CAP_GCACHE 4 /* (file) client can cache reads */ +#define CEPH_CAP_GRD 8 /* (file) client can read */ +#define CEPH_CAP_GWR 16 /* (file) client can write */ +#define CEPH_CAP_GBUFFER 32 /* (file) client can buffer writes */ +#define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */ +#define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */ + +#define CEPH_CAP_SIMPLE_BITS 2 +#define CEPH_CAP_FILE_BITS 8 + +/* per-lock shift */ +#define CEPH_CAP_SAUTH 2 +#define CEPH_CAP_SLINK 4 +#define CEPH_CAP_SXATTR 6 +#define CEPH_CAP_SFILE 8 +#define CEPH_CAP_SFLOCK 20 + +#define CEPH_CAP_BITS 22 + +/* composed values */ +#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH) +#define CEPH_CAP_AUTH_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SAUTH) +#define CEPH_CAP_LINK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SLINK) +#define CEPH_CAP_LINK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SLINK) +#define CEPH_CAP_XATTR_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SXATTR) +#define CEPH_CAP_XATTR_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SXATTR) +#define CEPH_CAP_FILE(x) (x << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_CACHE (CEPH_CAP_GCACHE << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_RD (CEPH_CAP_GRD << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_WR (CEPH_CAP_GWR << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE) +#define CEPH_CAP_FLOCK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFLOCK) +#define CEPH_CAP_FLOCK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFLOCK) + + +/* cap masks (for getattr) */ +#define CEPH_STAT_CAP_INODE CEPH_CAP_PIN +#define CEPH_STAT_CAP_TYPE CEPH_CAP_PIN /* mode >> 12 */ +#define CEPH_STAT_CAP_SYMLINK CEPH_CAP_PIN +#define CEPH_STAT_CAP_UID CEPH_CAP_AUTH_SHARED +#define CEPH_STAT_CAP_GID CEPH_CAP_AUTH_SHARED +#define CEPH_STAT_CAP_MODE CEPH_CAP_AUTH_SHARED +#define CEPH_STAT_CAP_NLINK CEPH_CAP_LINK_SHARED +#define CEPH_STAT_CAP_LAYOUT CEPH_CAP_FILE_SHARED +#define CEPH_STAT_CAP_MTIME CEPH_CAP_FILE_SHARED +#define CEPH_STAT_CAP_SIZE CEPH_CAP_FILE_SHARED +#define CEPH_STAT_CAP_ATIME CEPH_CAP_FILE_SHARED /* fixme */ +#define CEPH_STAT_CAP_XATTR CEPH_CAP_XATTR_SHARED +#define CEPH_STAT_CAP_INODE_ALL (CEPH_CAP_PIN | \ + CEPH_CAP_AUTH_SHARED | \ + CEPH_CAP_LINK_SHARED | \ + CEPH_CAP_FILE_SHARED | \ + CEPH_CAP_XATTR_SHARED) +#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \ + CEPH_CAP_FILE_RD) +#define CEPH_STAT_RSTAT CEPH_CAP_FILE_WREXTEND + +#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \ + CEPH_CAP_LINK_SHARED | \ + CEPH_CAP_XATTR_SHARED | \ + CEPH_CAP_FILE_SHARED) +#define CEPH_CAP_ANY_RD (CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_RD | \ + CEPH_CAP_FILE_CACHE) + +#define CEPH_CAP_ANY_EXCL (CEPH_CAP_AUTH_EXCL | \ + CEPH_CAP_LINK_EXCL | \ + CEPH_CAP_XATTR_EXCL | \ + CEPH_CAP_FILE_EXCL) +#define CEPH_CAP_ANY_FILE_RD (CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE | \ + CEPH_CAP_FILE_SHARED) +#define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \ + CEPH_CAP_FILE_EXCL) +#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR) +#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \ + CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \ + CEPH_CAP_PIN) + +#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \ + CEPH_LOCK_IXATTR) + +int ceph_caps_for_mode(int mode); + +enum { + CEPH_CAP_OP_GRANT, /* mds->client grant */ + CEPH_CAP_OP_REVOKE, /* mds->client revoke */ + CEPH_CAP_OP_TRUNC, /* mds->client trunc notify */ + CEPH_CAP_OP_EXPORT, /* mds has exported the cap */ + CEPH_CAP_OP_IMPORT, /* mds has imported the cap */ + CEPH_CAP_OP_UPDATE, /* client->mds update */ + CEPH_CAP_OP_DROP, /* client->mds drop cap bits */ + CEPH_CAP_OP_FLUSH, /* client->mds cap writeback */ + CEPH_CAP_OP_FLUSH_ACK, /* mds->client flushed */ + CEPH_CAP_OP_FLUSHSNAP, /* client->mds flush snapped metadata */ + CEPH_CAP_OP_FLUSHSNAP_ACK, /* mds->client flushed snapped metadata */ + CEPH_CAP_OP_RELEASE, /* client->mds release (clean) cap */ + CEPH_CAP_OP_RENEW, /* client->mds renewal request */ +}; + +extern const char *ceph_cap_op_name(int op); + +/* flags field in client cap messages (version >= 10) */ +#define CEPH_CLIENT_CAPS_SYNC (1<<0) +#define CEPH_CLIENT_CAPS_NO_CAPSNAP (1<<1) +#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2) + +/* + * caps message, used for capability callbacks, acks, requests, etc. + */ +struct ceph_mds_caps { + __le32 op; /* CEPH_CAP_OP_* */ + __le64 ino, realm; + __le64 cap_id; + __le32 seq, issue_seq; + __le32 caps, wanted, dirty; /* latest issued/wanted/dirty */ + __le32 migrate_seq; + __le64 snap_follows; + __le32 snap_trace_len; + + /* authlock */ + __le32 uid, gid, mode; + + /* linklock */ + __le32 nlink; + + /* xattrlock */ + __le32 xattr_len; + __le64 xattr_version; + + /* filelock */ + __le64 size, max_size, truncate_size; + __le32 truncate_seq; + struct ceph_timespec mtime, atime, ctime; + struct ceph_file_layout_legacy layout; + __le32 time_warp_seq; +} __attribute__ ((packed)); + +struct ceph_mds_cap_peer { + __le64 cap_id; + __le32 seq; + __le32 mseq; + __le32 mds; + __u8 flags; +} __attribute__ ((packed)); + +/* cap release msg head */ +struct ceph_mds_cap_release { + __le32 num; /* number of cap_items that follow */ +} __attribute__ ((packed)); + +struct ceph_mds_cap_item { + __le64 ino; + __le64 cap_id; + __le32 migrate_seq, seq; +} __attribute__ ((packed)); + +#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */ +#define CEPH_MDS_LEASE_RELEASE 2 /* client -> mds */ +#define CEPH_MDS_LEASE_RENEW 3 /* client <-> mds */ +#define CEPH_MDS_LEASE_REVOKE_ACK 4 /* client -> mds */ + +extern const char *ceph_lease_op_name(int o); + +/* lease msg header */ +struct ceph_mds_lease { + __u8 action; /* CEPH_MDS_LEASE_* */ + __le16 mask; /* which lease */ + __le64 ino; + __le64 first, last; /* snap range */ + __le32 seq; + __le32 duration_ms; /* duration of renewal */ +} __attribute__ ((packed)); +/* followed by a __le32+string for dname */ + +/* client reconnect */ +struct ceph_mds_cap_reconnect { + __le64 cap_id; + __le32 wanted; + __le32 issued; + __le64 snaprealm; + __le64 pathbase; /* base ino for our path to this ino */ + __le32 flock_len; /* size of flock state blob, if any */ +} __attribute__ ((packed)); +/* followed by flock blob */ + +struct ceph_mds_cap_reconnect_v1 { + __le64 cap_id; + __le32 wanted; + __le32 issued; + __le64 size; + struct ceph_timespec mtime, atime; + __le64 snaprealm; + __le64 pathbase; /* base ino for our path to this ino */ +} __attribute__ ((packed)); + +struct ceph_mds_snaprealm_reconnect { + __le64 ino; /* snap realm base */ + __le64 seq; /* snap seq for this snap realm */ + __le64 parent; /* parent realm */ +} __attribute__ ((packed)); + +/* + * snaps + */ +enum { + CEPH_SNAP_OP_UPDATE, /* CREATE or DESTROY */ + CEPH_SNAP_OP_CREATE, + CEPH_SNAP_OP_DESTROY, + CEPH_SNAP_OP_SPLIT, +}; + +extern const char *ceph_snap_op_name(int o); + +/* snap msg header */ +struct ceph_mds_snap_head { + __le32 op; /* CEPH_SNAP_OP_* */ + __le64 split; /* ino to split off, if any */ + __le32 num_split_inos; /* # inos belonging to new child realm */ + __le32 num_split_realms; /* # child realms udner new child realm */ + __le32 trace_len; /* size of snap trace blob */ +} __attribute__ ((packed)); +/* followed by split ino list, then split realms, then the trace blob */ + +/* + * encode info about a snaprealm, as viewed by a client + */ +struct ceph_mds_snap_realm { + __le64 ino; /* ino */ + __le64 created; /* snap: when created */ + __le64 parent; /* ino: parent realm */ + __le64 parent_since; /* snap: same parent since */ + __le64 seq; /* snap: version */ + __le32 num_snaps; + __le32 num_prior_parent_snaps; +} __attribute__ ((packed)); +/* followed by my snap list, then prior parent snap list */ + +/* + * quotas + */ +struct ceph_mds_quota { + __le64 ino; /* ino */ + struct ceph_timespec rctime; + __le64 rbytes; /* dir stats */ + __le64 rfiles; + __le64 rsubdirs; + __u8 struct_v; /* compat */ + __u8 struct_compat; + __le32 struct_len; + __le64 max_bytes; /* quota max. bytes */ + __le64 max_files; /* quota max. files */ +} __attribute__ ((packed)); + +#endif diff --git a/include/linux/ceph/ceph_hash.h b/include/linux/ceph/ceph_hash.h new file mode 100644 index 0000000..fda474c --- /dev/null +++ b/include/linux/ceph/ceph_hash.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef FS_CEPH_HASH_H +#define FS_CEPH_HASH_H + +#define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */ +#define CEPH_STR_HASH_RJENKINS 0x2 /* robert jenkins' */ + +extern unsigned ceph_str_hash_linux(const char *s, unsigned len); +extern unsigned ceph_str_hash_rjenkins(const char *s, unsigned len); + +extern unsigned ceph_str_hash(int type, const char *s, unsigned len); +extern const char *ceph_str_hash_name(int type); + +#endif diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h new file mode 100644 index 0000000..17bc758 --- /dev/null +++ b/include/linux/ceph/cls_lock_client.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CEPH_CLS_LOCK_CLIENT_H +#define _LINUX_CEPH_CLS_LOCK_CLIENT_H + +#include + +enum ceph_cls_lock_type { + CEPH_CLS_LOCK_NONE = 0, + CEPH_CLS_LOCK_EXCLUSIVE = 1, + CEPH_CLS_LOCK_SHARED = 2, +}; + +struct ceph_locker_id { + struct ceph_entity_name name; /* locker's client name */ + char *cookie; /* locker's cookie */ +}; + +struct ceph_locker_info { + struct ceph_entity_addr addr; /* locker's address */ +}; + +struct ceph_locker { + struct ceph_locker_id id; + struct ceph_locker_info info; +}; + +int ceph_cls_lock(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + char *lock_name, u8 type, char *cookie, + char *tag, char *desc, u8 flags); +int ceph_cls_unlock(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + char *lock_name, char *cookie); +int ceph_cls_break_lock(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + char *lock_name, char *cookie, + struct ceph_entity_name *locker); +int ceph_cls_set_cookie(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + char *lock_name, u8 type, char *old_cookie, + char *tag, char *new_cookie); + +void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers); + +int ceph_cls_lock_info(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + char *lock_name, u8 *type, char **tag, + struct ceph_locker **lockers, u32 *num_lockers); + +int ceph_cls_assert_locked(struct ceph_osd_request *req, int which, + char *lock_name, u8 type, char *cookie, char *tag); + +#endif diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h new file mode 100644 index 0000000..cf5e840 --- /dev/null +++ b/include/linux/ceph/debugfs.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_DEBUGFS_H +#define _FS_CEPH_DEBUGFS_H + +#include +#include + +#define CEPH_DEFINE_SHOW_FUNC(name) \ +static int name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, name, inode->i_private); \ +} \ + \ +static const struct file_operations name##_fops = { \ + .open = name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +}; + +/* debugfs.c */ +extern void ceph_debugfs_init(void); +extern void ceph_debugfs_cleanup(void); +extern void ceph_debugfs_client_init(struct ceph_client *client); +extern void ceph_debugfs_client_cleanup(struct ceph_client *client); + +#endif + diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h new file mode 100644 index 0000000..450384f --- /dev/null +++ b/include/linux/ceph/decode.h @@ -0,0 +1,390 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CEPH_DECODE_H +#define __CEPH_DECODE_H + +#include +#include +#include +#include +#include + +#include + +/* + * in all cases, + * void **p pointer to position pointer + * void *end pointer to end of buffer (last byte + 1) + */ + +static inline u64 ceph_decode_64(void **p) +{ + u64 v = get_unaligned_le64(*p); + *p += sizeof(u64); + return v; +} +static inline u32 ceph_decode_32(void **p) +{ + u32 v = get_unaligned_le32(*p); + *p += sizeof(u32); + return v; +} +static inline u16 ceph_decode_16(void **p) +{ + u16 v = get_unaligned_le16(*p); + *p += sizeof(u16); + return v; +} +static inline u8 ceph_decode_8(void **p) +{ + u8 v = *(u8 *)*p; + (*p)++; + return v; +} +static inline void ceph_decode_copy(void **p, void *pv, size_t n) +{ + memcpy(pv, *p, n); + *p += n; +} + +/* + * bounds check input. + */ +static inline bool ceph_has_room(void **p, void *end, size_t n) +{ + return end >= *p && n <= end - *p; +} + +#define ceph_decode_need(p, end, n, bad) \ + do { \ + if (!likely(ceph_has_room(p, end, n))) \ + goto bad; \ + } while (0) + +#define ceph_decode_64_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u64), bad); \ + v = ceph_decode_64(p); \ + } while (0) +#define ceph_decode_32_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u32), bad); \ + v = ceph_decode_32(p); \ + } while (0) +#define ceph_decode_16_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u16), bad); \ + v = ceph_decode_16(p); \ + } while (0) +#define ceph_decode_8_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u8), bad); \ + v = ceph_decode_8(p); \ + } while (0) + +#define ceph_decode_copy_safe(p, end, pv, n, bad) \ + do { \ + ceph_decode_need(p, end, n, bad); \ + ceph_decode_copy(p, pv, n); \ + } while (0) + +/* + * Allocate a buffer big enough to hold the wire-encoded string, and + * decode the string into it. The resulting string will always be + * terminated with '\0'. If successful, *p will be advanced + * past the decoded data. Also, if lenp is not a null pointer, the + * length (not including the terminating '\0') will be recorded in + * *lenp. Note that a zero-length string is a valid return value. + * + * Returns a pointer to the newly-allocated string buffer, or a + * pointer-coded errno if an error occurs. Neither *p nor *lenp + * will have been updated if an error is returned. + * + * There are two possible failures: + * - converting the string would require accessing memory at or + * beyond the "end" pointer provided (-ERANGE) + * - memory could not be allocated for the result (-ENOMEM) + */ +static inline char *ceph_extract_encoded_string(void **p, void *end, + size_t *lenp, gfp_t gfp) +{ + u32 len; + void *sp = *p; + char *buf; + + ceph_decode_32_safe(&sp, end, len, bad); + if (!ceph_has_room(&sp, end, len)) + goto bad; + + buf = kmalloc(len + 1, gfp); + if (!buf) + return ERR_PTR(-ENOMEM); + + if (len) + memcpy(buf, sp, len); + buf[len] = '\0'; + + *p = (char *) *p + sizeof (u32) + len; + if (lenp) + *lenp = (size_t) len; + + return buf; + +bad: + return ERR_PTR(-ERANGE); +} + +/* + * skip helpers + */ +#define ceph_decode_skip_n(p, end, n, bad) \ + do { \ + ceph_decode_need(p, end, n, bad); \ + *p += n; \ + } while (0) + +#define ceph_decode_skip_64(p, end, bad) \ +ceph_decode_skip_n(p, end, sizeof(u64), bad) + +#define ceph_decode_skip_32(p, end, bad) \ +ceph_decode_skip_n(p, end, sizeof(u32), bad) + +#define ceph_decode_skip_16(p, end, bad) \ +ceph_decode_skip_n(p, end, sizeof(u16), bad) + +#define ceph_decode_skip_8(p, end, bad) \ +ceph_decode_skip_n(p, end, sizeof(u8), bad) + +#define ceph_decode_skip_string(p, end, bad) \ + do { \ + u32 len; \ + \ + ceph_decode_32_safe(p, end, len, bad); \ + ceph_decode_skip_n(p, end, len, bad); \ + } while (0) + +#define ceph_decode_skip_set(p, end, type, bad) \ + do { \ + u32 len; \ + \ + ceph_decode_32_safe(p, end, len, bad); \ + while (len--) \ + ceph_decode_skip_##type(p, end, bad); \ + } while (0) + +#define ceph_decode_skip_map(p, end, ktype, vtype, bad) \ + do { \ + u32 len; \ + \ + ceph_decode_32_safe(p, end, len, bad); \ + while (len--) { \ + ceph_decode_skip_##ktype(p, end, bad); \ + ceph_decode_skip_##vtype(p, end, bad); \ + } \ + } while (0) + +#define ceph_decode_skip_map_of_map(p, end, ktype1, ktype2, vtype2, bad) \ + do { \ + u32 len; \ + \ + ceph_decode_32_safe(p, end, len, bad); \ + while (len--) { \ + ceph_decode_skip_##ktype1(p, end, bad); \ + ceph_decode_skip_map(p, end, ktype2, vtype2, bad); \ + } \ + } while (0) + +/* + * struct ceph_timespec <-> struct timespec64 + */ +static inline void ceph_decode_timespec64(struct timespec64 *ts, + const struct ceph_timespec *tv) +{ + /* + * This will still overflow in year 2106. We could extend + * the protocol to steal two more bits from tv_nsec to + * add three more 136 year epochs after that the way ext4 + * does if necessary. + */ + ts->tv_sec = (time64_t)le32_to_cpu(tv->tv_sec); + ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec); +} +static inline void ceph_encode_timespec64(struct ceph_timespec *tv, + const struct timespec64 *ts) +{ + tv->tv_sec = cpu_to_le32((u32)ts->tv_sec); + tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec); +} + +/* + * sockaddr_storage <-> ceph_sockaddr + */ +#define CEPH_ENTITY_ADDR_TYPE_NONE 0 +#define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1) + +static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a) +{ + __be16 ss_family = htons(a->in_addr.ss_family); + a->in_addr.ss_family = *(__u16 *)&ss_family; + + /* Banner addresses require TYPE_NONE */ + a->type = CEPH_ENTITY_ADDR_TYPE_NONE; +} +static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a) +{ + __be16 ss_family = *(__be16 *)&a->in_addr.ss_family; + a->in_addr.ss_family = ntohs(ss_family); + WARN_ON(a->in_addr.ss_family == 512); + a->type = CEPH_ENTITY_ADDR_TYPE_LEGACY; +} + +extern int ceph_decode_entity_addr(void **p, void *end, + struct ceph_entity_addr *addr); +/* + * encoders + */ +static inline void ceph_encode_64(void **p, u64 v) +{ + put_unaligned_le64(v, (__le64 *)*p); + *p += sizeof(u64); +} +static inline void ceph_encode_32(void **p, u32 v) +{ + put_unaligned_le32(v, (__le32 *)*p); + *p += sizeof(u32); +} +static inline void ceph_encode_16(void **p, u16 v) +{ + put_unaligned_le16(v, (__le16 *)*p); + *p += sizeof(u16); +} +static inline void ceph_encode_8(void **p, u8 v) +{ + *(u8 *)*p = v; + (*p)++; +} +static inline void ceph_encode_copy(void **p, const void *s, int len) +{ + memcpy(*p, s, len); + *p += len; +} + +/* + * filepath, string encoders + */ +static inline void ceph_encode_filepath(void **p, void *end, + u64 ino, const char *path) +{ + u32 len = path ? strlen(path) : 0; + BUG_ON(*p + 1 + sizeof(ino) + sizeof(len) + len > end); + ceph_encode_8(p, 1); + ceph_encode_64(p, ino); + ceph_encode_32(p, len); + if (len) + memcpy(*p, path, len); + *p += len; +} + +static inline void ceph_encode_string(void **p, void *end, + const char *s, u32 len) +{ + BUG_ON(*p + sizeof(len) + len > end); + ceph_encode_32(p, len); + if (len) + memcpy(*p, s, len); + *p += len; +} + +/* + * version and length starting block encoders/decoders + */ + +/* current code version (u8) + compat code version (u8) + len of struct (u32) */ +#define CEPH_ENCODING_START_BLK_LEN 6 + +/** + * ceph_start_encoding - start encoding block + * @struct_v: current (code) version of the encoding + * @struct_compat: oldest code version that can decode it + * @struct_len: length of struct encoding + */ +static inline void ceph_start_encoding(void **p, u8 struct_v, u8 struct_compat, + u32 struct_len) +{ + ceph_encode_8(p, struct_v); + ceph_encode_8(p, struct_compat); + ceph_encode_32(p, struct_len); +} + +/** + * ceph_start_decoding - start decoding block + * @v: current version of the encoding that the code supports + * @name: name of the struct (free-form) + * @struct_v: out param for the encoding version + * @struct_len: out param for the length of struct encoding + * + * Validates the length of struct encoding, so unsafe ceph_decode_* + * variants can be used for decoding. + */ +static inline int ceph_start_decoding(void **p, void *end, u8 v, + const char *name, u8 *struct_v, + u32 *struct_len) +{ + u8 struct_compat; + + ceph_decode_need(p, end, CEPH_ENCODING_START_BLK_LEN, bad); + *struct_v = ceph_decode_8(p); + struct_compat = ceph_decode_8(p); + if (v < struct_compat) { + pr_warn("got struct_v %d struct_compat %d > %d of %s\n", + *struct_v, struct_compat, v, name); + return -EINVAL; + } + + *struct_len = ceph_decode_32(p); + ceph_decode_need(p, end, *struct_len, bad); + return 0; + +bad: + return -ERANGE; +} + +#define ceph_encode_need(p, end, n, bad) \ + do { \ + if (!likely(ceph_has_room(p, end, n))) \ + goto bad; \ + } while (0) + +#define ceph_encode_64_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u64), bad); \ + ceph_encode_64(p, v); \ + } while (0) +#define ceph_encode_32_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u32), bad); \ + ceph_encode_32(p, v); \ + } while (0) +#define ceph_encode_16_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u16), bad); \ + ceph_encode_16(p, v); \ + } while (0) +#define ceph_encode_8_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u8), bad); \ + ceph_encode_8(p, v); \ + } while (0) + +#define ceph_encode_copy_safe(p, end, pv, n, bad) \ + do { \ + ceph_encode_need(p, end, n, bad); \ + ceph_encode_copy(p, pv, n); \ + } while (0) +#define ceph_encode_string_safe(p, end, s, n, bad) \ + do { \ + ceph_encode_need(p, end, n, bad); \ + ceph_encode_string(p, end, s, n); \ + } while (0) + + +#endif diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h new file mode 100644 index 0000000..b9dbda1 --- /dev/null +++ b/include/linux/ceph/libceph.h @@ -0,0 +1,320 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_LIBCEPH_H +#define _FS_CEPH_LIBCEPH_H + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * mount options + */ +#define CEPH_OPT_FSID (1<<0) +#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ +#define CEPH_OPT_MYIP (1<<2) /* specified my ip */ +#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ +#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */ +#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ +#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */ +#define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */ + +#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) + +#define ceph_set_opt(client, opt) \ + (client)->options->flags |= CEPH_OPT_##opt; +#define ceph_test_opt(client, opt) \ + (!!((client)->options->flags & CEPH_OPT_##opt)) + +struct ceph_options { + int flags; + struct ceph_fsid fsid; + struct ceph_entity_addr my_addr; + unsigned long mount_timeout; /* jiffies */ + unsigned long osd_idle_ttl; /* jiffies */ + unsigned long osd_keepalive_timeout; /* jiffies */ + unsigned long osd_request_timeout; /* jiffies */ + + /* + * any type that can't be simply compared or doesn't need + * to be compared should go beyond this point, + * ceph_compare_options() should be updated accordingly + */ + + struct ceph_entity_addr *mon_addr; /* should be the first + pointer type of args */ + int num_mon; + char *name; + struct ceph_crypto_key *key; +}; + +/* + * defaults + */ +#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) +#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) +#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) +#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */ + +#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) +#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) +#define CEPH_MONC_PING_TIMEOUT msecs_to_jiffies(30 * 1000) +#define CEPH_MONC_HUNT_BACKOFF 2 +#define CEPH_MONC_HUNT_MAX_MULT 10 + +#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) +#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) + +/* + * The largest possible rbd data object is 32M. + * The largest possible rbd object map object is 64M. + * + * There is no limit on the size of cephfs objects, but it has to obey + * rsize and wsize mount options anyway. + */ +#define CEPH_MSG_MAX_DATA_LEN (64*1024*1024) + +#define CEPH_AUTH_NAME_DEFAULT "guest" + +/* mount state */ +enum { + CEPH_MOUNT_MOUNTING, + CEPH_MOUNT_MOUNTED, + CEPH_MOUNT_UNMOUNTING, + CEPH_MOUNT_UNMOUNTED, + CEPH_MOUNT_SHUTDOWN, +}; + +static inline unsigned long ceph_timeout_jiffies(unsigned long timeout) +{ + return timeout ?: MAX_SCHEDULE_TIMEOUT; +} + +struct ceph_mds_client; + +/* + * per client state + * + * possibly shared by multiple mount points, if they are + * mounting the same ceph filesystem/cluster. + */ +struct ceph_client { + struct ceph_fsid fsid; + bool have_fsid; + + void *private; + + struct ceph_options *options; + + struct mutex mount_mutex; /* serialize mount attempts */ + wait_queue_head_t auth_wq; + int auth_err; + + int (*extra_mon_dispatch)(struct ceph_client *, struct ceph_msg *); + + u64 supported_features; + u64 required_features; + + struct ceph_messenger msgr; /* messenger instance */ + struct ceph_mon_client monc; + struct ceph_osd_client osdc; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_dir; + struct dentry *debugfs_monmap; + struct dentry *debugfs_osdmap; + struct dentry *debugfs_options; +#endif +}; + +#define from_msgr(ms) container_of(ms, struct ceph_client, msgr) + + +/* + * snapshots + */ + +/* + * A "snap context" is the set of existing snapshots when we + * write data. It is used by the OSD to guide its COW behavior. + * + * The ceph_snap_context is refcounted, and attached to each dirty + * page, indicating which context the dirty data belonged when it was + * dirtied. + */ +struct ceph_snap_context { + refcount_t nref; + u64 seq; + u32 num_snaps; + u64 snaps[]; +}; + +extern struct ceph_snap_context *ceph_create_snap_context(u32 snap_count, + gfp_t gfp_flags); +extern struct ceph_snap_context *ceph_get_snap_context( + struct ceph_snap_context *sc); +extern void ceph_put_snap_context(struct ceph_snap_context *sc); + +/* + * calculate the number of pages a given length and offset map onto, + * if we align the data. + */ +static inline int calc_pages_for(u64 off, u64 len) +{ + return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) - + (off >> PAGE_SHIFT); +} + +#define RB_BYVAL(a) (a) +#define RB_BYPTR(a) (&(a)) +#define RB_CMP3WAY(a, b) ((a) < (b) ? -1 : (a) > (b)) + +#define DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \ +static void insert_##name(struct rb_root *root, type *t) \ +{ \ + struct rb_node **n = &root->rb_node; \ + struct rb_node *parent = NULL; \ + \ + BUG_ON(!RB_EMPTY_NODE(&t->nodefld)); \ + \ + while (*n) { \ + type *cur = rb_entry(*n, type, nodefld); \ + int cmp; \ + \ + parent = *n; \ + cmp = cmpexp(keyexp(t->keyfld), keyexp(cur->keyfld)); \ + if (cmp < 0) \ + n = &(*n)->rb_left; \ + else if (cmp > 0) \ + n = &(*n)->rb_right; \ + else \ + BUG(); \ + } \ + \ + rb_link_node(&t->nodefld, parent, n); \ + rb_insert_color(&t->nodefld, root); \ +} \ +static void erase_##name(struct rb_root *root, type *t) \ +{ \ + BUG_ON(RB_EMPTY_NODE(&t->nodefld)); \ + rb_erase(&t->nodefld, root); \ + RB_CLEAR_NODE(&t->nodefld); \ +} + +/* + * @lookup_param_type is a parameter and not constructed from (@type, + * @keyfld) with typeof() because adding const is too unwieldy. + */ +#define DEFINE_RB_LOOKUP_FUNC2(name, type, keyfld, cmpexp, keyexp, \ + lookup_param_type, nodefld) \ +static type *lookup_##name(struct rb_root *root, lookup_param_type key) \ +{ \ + struct rb_node *n = root->rb_node; \ + \ + while (n) { \ + type *cur = rb_entry(n, type, nodefld); \ + int cmp; \ + \ + cmp = cmpexp(key, keyexp(cur->keyfld)); \ + if (cmp < 0) \ + n = n->rb_left; \ + else if (cmp > 0) \ + n = n->rb_right; \ + else \ + return cur; \ + } \ + \ + return NULL; \ +} + +#define DEFINE_RB_FUNCS2(name, type, keyfld, cmpexp, keyexp, \ + lookup_param_type, nodefld) \ +DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \ +DEFINE_RB_LOOKUP_FUNC2(name, type, keyfld, cmpexp, keyexp, \ + lookup_param_type, nodefld) + +/* + * Shorthands for integer keys. + */ +#define DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \ +DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, RB_CMP3WAY, RB_BYVAL, nodefld) + +#define DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) \ +extern type __lookup_##name##_key; \ +DEFINE_RB_LOOKUP_FUNC2(name, type, keyfld, RB_CMP3WAY, RB_BYVAL, \ + typeof(__lookup_##name##_key.keyfld), nodefld) + +#define DEFINE_RB_FUNCS(name, type, keyfld, nodefld) \ +DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \ +DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) + +extern struct kmem_cache *ceph_inode_cachep; +extern struct kmem_cache *ceph_cap_cachep; +extern struct kmem_cache *ceph_cap_flush_cachep; +extern struct kmem_cache *ceph_dentry_cachep; +extern struct kmem_cache *ceph_file_cachep; +extern struct kmem_cache *ceph_dir_file_cachep; + +/* ceph_common.c */ +extern bool libceph_compatible(void *data); + +extern const char *ceph_msg_type_name(int type); +extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); +extern void *ceph_kvmalloc(size_t size, gfp_t flags); + +extern struct ceph_options *ceph_parse_options(char *options, + const char *dev_name, const char *dev_name_end, + int (*parse_extra_token)(char *c, void *private), + void *private); +int ceph_print_client_options(struct seq_file *m, struct ceph_client *client, + bool show_all); +extern void ceph_destroy_options(struct ceph_options *opt); +extern int ceph_compare_options(struct ceph_options *new_opt, + struct ceph_client *client); +struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private); +struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client); +u64 ceph_client_gid(struct ceph_client *client); +extern void ceph_destroy_client(struct ceph_client *client); +extern void ceph_reset_client_addr(struct ceph_client *client); +extern int __ceph_open_session(struct ceph_client *client, + unsigned long started); +extern int ceph_open_session(struct ceph_client *client); +int ceph_wait_for_latest_osdmap(struct ceph_client *client, + unsigned long timeout); + +/* pagevec.c */ +extern void ceph_release_page_vector(struct page **pages, int num_pages); +extern void ceph_put_page_vector(struct page **pages, int num_pages, + bool dirty); +extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); +extern int ceph_copy_user_to_page_vector(struct page **pages, + const void __user *data, + loff_t off, size_t len); +extern void ceph_copy_to_page_vector(struct page **pages, + const void *data, + loff_t off, size_t len); +extern void ceph_copy_from_page_vector(struct page **pages, + void *data, + loff_t off, size_t len); +extern void ceph_zero_page_vector_range(int off, int len, struct page **pages); + + +#endif /* _FS_CEPH_SUPER_H */ diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h new file mode 100644 index 0000000..0067d76 --- /dev/null +++ b/include/linux/ceph/mdsmap.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_MDSMAP_H +#define _FS_CEPH_MDSMAP_H + +#include +#include + +/* + * mds map - describe servers in the mds cluster. + * + * we limit fields to those the client actually xcares about + */ +struct ceph_mds_info { + u64 global_id; + struct ceph_entity_addr addr; + s32 state; + int num_export_targets; + bool laggy; + u32 *export_targets; +}; + +struct ceph_mdsmap { + u32 m_epoch, m_client_epoch, m_last_failure; + u32 m_root; + u32 m_session_timeout; /* seconds */ + u32 m_session_autoclose; /* seconds */ + u64 m_max_file_size; + u32 m_max_mds; /* size of m_addr, m_state arrays */ + int m_num_mds; + struct ceph_mds_info *m_info; + + /* which object pools file data can be stored in */ + int m_num_data_pg_pools; + u64 *m_data_pg_pools; + u64 m_cas_pg_pool; + + bool m_enabled; + bool m_damaged; + int m_num_laggy; +}; + +static inline struct ceph_entity_addr * +ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w) +{ + if (w >= m->m_num_mds) + return NULL; + return &m->m_info[w].addr; +} + +static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w) +{ + BUG_ON(w < 0); + if (w >= m->m_num_mds) + return CEPH_MDS_STATE_DNE; + return m->m_info[w].state; +} + +static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w) +{ + if (w >= 0 && w < m->m_num_mds) + return m->m_info[w].laggy; + return false; +} + +extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); +extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end); +extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); +extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m); + +#endif diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h new file mode 100644 index 0000000..76371aa --- /dev/null +++ b/include/linux/ceph/messenger.h @@ -0,0 +1,381 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __FS_CEPH_MESSENGER_H +#define __FS_CEPH_MESSENGER_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct ceph_msg; +struct ceph_connection; + +/* + * Ceph defines these callbacks for handling connection events. + */ +struct ceph_connection_operations { + struct ceph_connection *(*get)(struct ceph_connection *); + void (*put)(struct ceph_connection *); + + /* handle an incoming message. */ + void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m); + + /* authorize an outgoing connection */ + struct ceph_auth_handshake *(*get_authorizer) ( + struct ceph_connection *con, + int *proto, int force_new); + int (*add_authorizer_challenge)(struct ceph_connection *con, + void *challenge_buf, + int challenge_buf_len); + int (*verify_authorizer_reply) (struct ceph_connection *con); + int (*invalidate_authorizer)(struct ceph_connection *con); + + /* there was some error on the socket (disconnect, whatever) */ + void (*fault) (struct ceph_connection *con); + + /* a remote host as terminated a message exchange session, and messages + * we sent (or they tried to send us) may be lost. */ + void (*peer_reset) (struct ceph_connection *con); + + struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, + struct ceph_msg_header *hdr, + int *skip); + + void (*reencode_message) (struct ceph_msg *msg); + + int (*sign_message) (struct ceph_msg *msg); + int (*check_message_signature) (struct ceph_msg *msg); +}; + +/* use format string %s%d */ +#define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num) + +struct ceph_messenger { + struct ceph_entity_inst inst; /* my name+address */ + struct ceph_entity_addr my_enc_addr; + + atomic_t stopping; + possible_net_t net; + + /* + * the global_seq counts connections i (attempt to) initiate + * in order to disambiguate certain connect race conditions. + */ + u32 global_seq; + spinlock_t global_seq_lock; +}; + +enum ceph_msg_data_type { + CEPH_MSG_DATA_NONE, /* message contains no data payload */ + CEPH_MSG_DATA_PAGES, /* data source/destination is a page array */ + CEPH_MSG_DATA_PAGELIST, /* data source/destination is a pagelist */ +#ifdef CONFIG_BLOCK + CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */ +#endif /* CONFIG_BLOCK */ + CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */ +}; + +#ifdef CONFIG_BLOCK + +struct ceph_bio_iter { + struct bio *bio; + struct bvec_iter iter; +}; + +#define __ceph_bio_iter_advance_step(it, n, STEP) do { \ + unsigned int __n = (n), __cur_n; \ + \ + while (__n) { \ + BUG_ON(!(it)->iter.bi_size); \ + __cur_n = min((it)->iter.bi_size, __n); \ + (void)(STEP); \ + bio_advance_iter((it)->bio, &(it)->iter, __cur_n); \ + if (!(it)->iter.bi_size && (it)->bio->bi_next) { \ + dout("__ceph_bio_iter_advance_step next bio\n"); \ + (it)->bio = (it)->bio->bi_next; \ + (it)->iter = (it)->bio->bi_iter; \ + } \ + __n -= __cur_n; \ + } \ +} while (0) + +/* + * Advance @it by @n bytes. + */ +#define ceph_bio_iter_advance(it, n) \ + __ceph_bio_iter_advance_step(it, n, 0) + +/* + * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec. + */ +#define ceph_bio_iter_advance_step(it, n, BVEC_STEP) \ + __ceph_bio_iter_advance_step(it, n, ({ \ + struct bio_vec bv; \ + struct bvec_iter __cur_iter; \ + \ + __cur_iter = (it)->iter; \ + __cur_iter.bi_size = __cur_n; \ + __bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \ + (void)(BVEC_STEP); \ + })) + +#endif /* CONFIG_BLOCK */ + +struct ceph_bvec_iter { + struct bio_vec *bvecs; + struct bvec_iter iter; +}; + +#define __ceph_bvec_iter_advance_step(it, n, STEP) do { \ + BUG_ON((n) > (it)->iter.bi_size); \ + (void)(STEP); \ + bvec_iter_advance((it)->bvecs, &(it)->iter, (n)); \ +} while (0) + +/* + * Advance @it by @n bytes. + */ +#define ceph_bvec_iter_advance(it, n) \ + __ceph_bvec_iter_advance_step(it, n, 0) + +/* + * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec. + */ +#define ceph_bvec_iter_advance_step(it, n, BVEC_STEP) \ + __ceph_bvec_iter_advance_step(it, n, ({ \ + struct bio_vec bv; \ + struct bvec_iter __cur_iter; \ + \ + __cur_iter = (it)->iter; \ + __cur_iter.bi_size = (n); \ + for_each_bvec(bv, (it)->bvecs, __cur_iter, __cur_iter) \ + (void)(BVEC_STEP); \ + })) + +#define ceph_bvec_iter_shorten(it, n) do { \ + BUG_ON((n) > (it)->iter.bi_size); \ + (it)->iter.bi_size = (n); \ +} while (0) + +struct ceph_msg_data { + enum ceph_msg_data_type type; + union { +#ifdef CONFIG_BLOCK + struct { + struct ceph_bio_iter bio_pos; + u32 bio_length; + }; +#endif /* CONFIG_BLOCK */ + struct ceph_bvec_iter bvec_pos; + struct { + struct page **pages; + size_t length; /* total # bytes */ + unsigned int alignment; /* first page */ + bool own_pages; + }; + struct ceph_pagelist *pagelist; + }; +}; + +struct ceph_msg_data_cursor { + size_t total_resid; /* across all data items */ + + struct ceph_msg_data *data; /* current data item */ + size_t resid; /* bytes not yet consumed */ + bool last_piece; /* current is last piece */ + bool need_crc; /* crc update needed */ + union { +#ifdef CONFIG_BLOCK + struct ceph_bio_iter bio_iter; +#endif /* CONFIG_BLOCK */ + struct bvec_iter bvec_iter; + struct { /* pages */ + unsigned int page_offset; /* offset in page */ + unsigned short page_index; /* index in array */ + unsigned short page_count; /* pages in array */ + }; + struct { /* pagelist */ + struct page *page; /* page from list */ + size_t offset; /* bytes from list */ + }; + }; +}; + +/* + * a single message. it contains a header (src, dest, message type, etc.), + * footer (crc values, mainly), a "front" message body, and possibly a + * data payload (stored in some number of pages). + */ +struct ceph_msg { + struct ceph_msg_header hdr; /* header */ + union { + struct ceph_msg_footer footer; /* footer */ + struct ceph_msg_footer_old old_footer; /* old format footer */ + }; + struct kvec front; /* unaligned blobs of message */ + struct ceph_buffer *middle; + + size_t data_length; + struct ceph_msg_data *data; + int num_data_items; + int max_data_items; + struct ceph_msg_data_cursor cursor; + + struct ceph_connection *con; + struct list_head list_head; /* links for connection lists */ + + struct kref kref; + bool more_to_follow; + bool needs_out_seq; + int front_alloc_len; + unsigned long ack_stamp; /* tx: when we were acked */ + + struct ceph_msgpool *pool; +}; + +/* ceph connection fault delay defaults, for exponential backoff */ +#define BASE_DELAY_INTERVAL (HZ/2) +#define MAX_DELAY_INTERVAL (5 * 60 * HZ) + +/* + * A single connection with another host. + * + * We maintain a queue of outgoing messages, and some session state to + * ensure that we can preserve the lossless, ordered delivery of + * messages in the case of a TCP disconnect. + */ +struct ceph_connection { + void *private; + + const struct ceph_connection_operations *ops; + + struct ceph_messenger *msgr; + + atomic_t sock_state; + struct socket *sock; + struct ceph_entity_addr peer_addr; /* peer address */ + struct ceph_entity_addr peer_addr_for_me; + + unsigned long flags; + unsigned long state; + const char *error_msg; /* error message, if any */ + + struct ceph_entity_name peer_name; /* peer name */ + + u64 peer_features; + u32 connect_seq; /* identify the most recent connection + attempt for this connection, client */ + u32 peer_global_seq; /* peer's global seq for this connection */ + + struct ceph_auth_handshake *auth; + int auth_retry; /* true if we need a newer authorizer */ + + struct mutex mutex; + + /* out queue */ + struct list_head out_queue; + struct list_head out_sent; /* sending or sent but unacked */ + u64 out_seq; /* last message queued for send */ + + u64 in_seq, in_seq_acked; /* last message received, acked */ + + /* connection negotiation temps */ + char in_banner[CEPH_BANNER_MAX_LEN]; + struct ceph_msg_connect out_connect; + struct ceph_msg_connect_reply in_reply; + struct ceph_entity_addr actual_peer_addr; + + /* message out temps */ + struct ceph_msg_header out_hdr; + struct ceph_msg *out_msg; /* sending message (== tail of + out_sent) */ + bool out_msg_done; + + struct kvec out_kvec[8], /* sending header/footer data */ + *out_kvec_cur; + int out_kvec_left; /* kvec's left in out_kvec */ + int out_skip; /* skip this many bytes */ + int out_kvec_bytes; /* total bytes left */ + int out_more; /* there is more data after the kvecs */ + __le64 out_temp_ack; /* for writing an ack */ + struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2 + stamp */ + + /* message in temps */ + struct ceph_msg_header in_hdr; + struct ceph_msg *in_msg; + u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */ + + char in_tag; /* protocol control byte */ + int in_base_pos; /* bytes read */ + __le64 in_temp_ack; /* for reading an ack */ + + struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */ + + struct delayed_work work; /* send|recv work */ + unsigned long delay; /* current delay interval */ +}; + + +extern const char *ceph_pr_addr(const struct ceph_entity_addr *addr); + +extern int ceph_parse_ips(const char *c, const char *end, + struct ceph_entity_addr *addr, + int max_count, int *count); + + +extern int ceph_msgr_init(void); +extern void ceph_msgr_exit(void); +extern void ceph_msgr_flush(void); + +extern void ceph_messenger_init(struct ceph_messenger *msgr, + struct ceph_entity_addr *myaddr); +extern void ceph_messenger_fini(struct ceph_messenger *msgr); +extern void ceph_messenger_reset_nonce(struct ceph_messenger *msgr); + +extern void ceph_con_init(struct ceph_connection *con, void *private, + const struct ceph_connection_operations *ops, + struct ceph_messenger *msgr); +extern void ceph_con_open(struct ceph_connection *con, + __u8 entity_type, __u64 entity_num, + struct ceph_entity_addr *addr); +extern bool ceph_con_opened(struct ceph_connection *con); +extern void ceph_con_close(struct ceph_connection *con); +extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg); + +extern void ceph_msg_revoke(struct ceph_msg *msg); +extern void ceph_msg_revoke_incoming(struct ceph_msg *msg); + +extern void ceph_con_keepalive(struct ceph_connection *con); +extern bool ceph_con_keepalive_expired(struct ceph_connection *con, + unsigned long interval); + +void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, + size_t length, size_t alignment, bool own_pages); +extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg, + struct ceph_pagelist *pagelist); +#ifdef CONFIG_BLOCK +void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos, + u32 length); +#endif /* CONFIG_BLOCK */ +void ceph_msg_data_add_bvecs(struct ceph_msg *msg, + struct ceph_bvec_iter *bvec_pos); + +struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items, + gfp_t flags, bool can_fail); +extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, + bool can_fail); + +extern struct ceph_msg *ceph_msg_get(struct ceph_msg *msg); +extern void ceph_msg_put(struct ceph_msg *msg); + +extern void ceph_msg_dump(struct ceph_msg *msg); + +#endif diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h new file mode 100644 index 0000000..dbb8a69 --- /dev/null +++ b/include/linux/ceph/mon_client.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_MON_CLIENT_H +#define _FS_CEPH_MON_CLIENT_H + +#include +#include +#include + +#include + +struct ceph_client; +struct ceph_mount_args; +struct ceph_auth_client; + +/* + * The monitor map enumerates the set of all monitors. + */ +struct ceph_monmap { + struct ceph_fsid fsid; + u32 epoch; + u32 num_mon; + struct ceph_entity_inst mon_inst[0]; +}; + +struct ceph_mon_client; +struct ceph_mon_generic_request; + + +/* + * Generic mechanism for resending monitor requests. + */ +typedef void (*ceph_monc_request_func_t)(struct ceph_mon_client *monc, + int newmon); + +/* a pending monitor request */ +struct ceph_mon_request { + struct ceph_mon_client *monc; + struct delayed_work delayed_work; + unsigned long delay; + ceph_monc_request_func_t do_request; +}; + +typedef void (*ceph_monc_callback_t)(struct ceph_mon_generic_request *); + +/* + * ceph_mon_generic_request is being used for the statfs and + * mon_get_version requests which are being done a bit differently + * because we need to get data back to the caller + */ +struct ceph_mon_generic_request { + struct ceph_mon_client *monc; + struct kref kref; + u64 tid; + struct rb_node node; + int result; + + struct completion completion; + ceph_monc_callback_t complete_cb; + u64 private_data; /* r_tid/linger_id */ + + struct ceph_msg *request; /* original request */ + struct ceph_msg *reply; /* and reply */ + + union { + struct ceph_statfs *st; + u64 newest; + } u; +}; + +struct ceph_mon_client { + struct ceph_client *client; + struct ceph_monmap *monmap; + + struct mutex mutex; + struct delayed_work delayed_work; + + struct ceph_auth_client *auth; + struct ceph_msg *m_auth, *m_auth_reply, *m_subscribe, *m_subscribe_ack; + int pending_auth; + + bool hunting; + int cur_mon; /* last monitor i contacted */ + unsigned long sub_renew_after; + unsigned long sub_renew_sent; + struct ceph_connection con; + + bool had_a_connection; + int hunt_mult; /* [1..CEPH_MONC_HUNT_MAX_MULT] */ + + /* pending generic requests */ + struct rb_root generic_request_tree; + u64 last_tid; + + /* subs, indexed with CEPH_SUB_* */ + struct { + struct ceph_mon_subscribe_item item; + bool want; + u32 have; /* epoch */ + } subs[4]; + int fs_cluster_id; /* "mdsmap." sub */ + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_file; +#endif +}; + +extern int ceph_monmap_contains(struct ceph_monmap *m, + struct ceph_entity_addr *addr); + +extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl); +extern void ceph_monc_stop(struct ceph_mon_client *monc); +extern void ceph_monc_reopen_session(struct ceph_mon_client *monc); + +enum { + CEPH_SUB_MONMAP = 0, + CEPH_SUB_OSDMAP, + CEPH_SUB_FSMAP, + CEPH_SUB_MDSMAP, +}; + +extern const char *ceph_sub_str[]; + +/* + * The model here is to indicate that we need a new map of at least + * epoch @epoch, and also call in when we receive a map. We will + * periodically rerequest the map from the monitor cluster until we + * get what we want. + */ +bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch, + bool continuous); +void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch); +void ceph_monc_renew_subs(struct ceph_mon_client *monc); + +extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, + unsigned long timeout); + +int ceph_monc_do_statfs(struct ceph_mon_client *monc, u64 data_pool, + struct ceph_statfs *buf); + +int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what, + u64 *newest); +int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what, + ceph_monc_callback_t cb, u64 private_data); + +int ceph_monc_blacklist_add(struct ceph_mon_client *monc, + struct ceph_entity_addr *client_addr); + +extern int ceph_monc_open_session(struct ceph_mon_client *monc); + +extern int ceph_monc_validate_auth(struct ceph_mon_client *monc); + +#endif diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h new file mode 100644 index 0000000..729cdf7 --- /dev/null +++ b/include/linux/ceph/msgpool.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_MSGPOOL +#define _FS_CEPH_MSGPOOL + +#include + +/* + * we use memory pools for preallocating messages we may receive, to + * avoid unexpected OOM conditions. + */ +struct ceph_msgpool { + const char *name; + mempool_t *pool; + int type; /* preallocated message type */ + int front_len; /* preallocated payload size */ + int max_data_items; +}; + +int ceph_msgpool_init(struct ceph_msgpool *pool, int type, + int front_len, int max_data_items, int size, + const char *name); +extern void ceph_msgpool_destroy(struct ceph_msgpool *pool); +struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len, + int max_data_items); +extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *); + +#endif diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h new file mode 100644 index 0000000..9e50aed --- /dev/null +++ b/include/linux/ceph/msgr.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CEPH_MSGR_H +#define CEPH_MSGR_H + +/* + * Data types for message passing layer used by Ceph. + */ + +#define CEPH_MON_PORT 6789 /* default monitor port */ + +/* + * client-side processes will try to bind to ports in this + * range, simply for the benefit of tools like nmap or wireshark + * that would like to identify the protocol. + */ +#define CEPH_PORT_FIRST 6789 +#define CEPH_PORT_START 6800 /* non-monitors start here */ +#define CEPH_PORT_LAST 6900 + +/* + * tcp connection banner. include a protocol version. and adjust + * whenever the wire protocol changes. try to keep this string length + * constant. + */ +#define CEPH_BANNER "ceph v027" +#define CEPH_BANNER_MAX_LEN 30 + + +/* + * Rollover-safe type and comparator for 32-bit sequence numbers. + * Comparator returns -1, 0, or 1. + */ +typedef __u32 ceph_seq_t; + +static inline __s32 ceph_seq_cmp(__u32 a, __u32 b) +{ + return (__s32)a - (__s32)b; +} + + +/* + * entity_name -- logical name for a process participating in the + * network, e.g. 'mds0' or 'osd3'. + */ +struct ceph_entity_name { + __u8 type; /* CEPH_ENTITY_TYPE_* */ + __le64 num; +} __attribute__ ((packed)); + +#define CEPH_ENTITY_TYPE_MON 0x01 +#define CEPH_ENTITY_TYPE_MDS 0x02 +#define CEPH_ENTITY_TYPE_OSD 0x04 +#define CEPH_ENTITY_TYPE_CLIENT 0x08 +#define CEPH_ENTITY_TYPE_AUTH 0x20 + +#define CEPH_ENTITY_TYPE_ANY 0xFF + +extern const char *ceph_entity_type_name(int type); + +/* + * entity_addr -- network address + */ +struct ceph_entity_addr { + __le32 type; + __le32 nonce; /* unique id for process (e.g. pid) */ + struct sockaddr_storage in_addr; +} __attribute__ ((packed)); + +struct ceph_entity_inst { + struct ceph_entity_name name; + struct ceph_entity_addr addr; +} __attribute__ ((packed)); + + +/* used by message exchange protocol */ +#define CEPH_MSGR_TAG_READY 1 /* server->client: ready for messages */ +#define CEPH_MSGR_TAG_RESETSESSION 2 /* server->client: reset, try again */ +#define CEPH_MSGR_TAG_WAIT 3 /* server->client: wait for racing + incoming connection */ +#define CEPH_MSGR_TAG_RETRY_SESSION 4 /* server->client + cseq: try again + with higher cseq */ +#define CEPH_MSGR_TAG_RETRY_GLOBAL 5 /* server->client + gseq: try again + with higher gseq */ +#define CEPH_MSGR_TAG_CLOSE 6 /* closing pipe */ +#define CEPH_MSGR_TAG_MSG 7 /* message */ +#define CEPH_MSGR_TAG_ACK 8 /* message ack */ +#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */ +#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ +#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ +#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */ +#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */ +#define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */ +#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */ +#define CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER 16 /* cephx v2 doing server challenge */ + +/* + * connection negotiation + */ +struct ceph_msg_connect { + __le64 features; /* supported feature bits */ + __le32 host_type; /* CEPH_ENTITY_TYPE_* */ + __le32 global_seq; /* count connections initiated by this host */ + __le32 connect_seq; /* count connections initiated in this session */ + __le32 protocol_version; + __le32 authorizer_protocol; + __le32 authorizer_len; + __u8 flags; /* CEPH_MSG_CONNECT_* */ +} __attribute__ ((packed)); + +struct ceph_msg_connect_reply { + __u8 tag; + __le64 features; /* feature bits for this session */ + __le32 global_seq; + __le32 connect_seq; + __le32 protocol_version; + __le32 authorizer_len; + __u8 flags; +} __attribute__ ((packed)); + +#define CEPH_MSG_CONNECT_LOSSY 1 /* messages i send may be safely dropped */ + + +/* + * message header + */ +struct ceph_msg_header_old { + __le64 seq; /* message seq# for this session */ + __le64 tid; /* transaction id */ + __le16 type; /* message type */ + __le16 priority; /* priority. higher value == higher priority */ + __le16 version; /* version of message encoding */ + + __le32 front_len; /* bytes in main payload */ + __le32 middle_len;/* bytes in middle payload */ + __le32 data_len; /* bytes of data payload */ + __le16 data_off; /* sender: include full offset; + receiver: mask against ~PAGE_MASK */ + + struct ceph_entity_inst src, orig_src; + __le32 reserved; + __le32 crc; /* header crc32c */ +} __attribute__ ((packed)); + +struct ceph_msg_header { + __le64 seq; /* message seq# for this session */ + __le64 tid; /* transaction id */ + __le16 type; /* message type */ + __le16 priority; /* priority. higher value == higher priority */ + __le16 version; /* version of message encoding */ + + __le32 front_len; /* bytes in main payload */ + __le32 middle_len;/* bytes in middle payload */ + __le32 data_len; /* bytes of data payload */ + __le16 data_off; /* sender: include full offset; + receiver: mask against ~PAGE_MASK */ + + struct ceph_entity_name src; + __le16 compat_version; + __le16 reserved; + __le32 crc; /* header crc32c */ +} __attribute__ ((packed)); + +#define CEPH_MSG_PRIO_LOW 64 +#define CEPH_MSG_PRIO_DEFAULT 127 +#define CEPH_MSG_PRIO_HIGH 196 +#define CEPH_MSG_PRIO_HIGHEST 255 + +/* + * follows data payload + */ +struct ceph_msg_footer_old { + __le32 front_crc, middle_crc, data_crc; + __u8 flags; +} __attribute__ ((packed)); + +struct ceph_msg_footer { + __le32 front_crc, middle_crc, data_crc; + // sig holds the 64 bits of the digital signature for the message PLR + __le64 sig; + __u8 flags; +} __attribute__ ((packed)); + +#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */ +#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */ +#define CEPH_MSG_FOOTER_SIGNED (1<<2) /* msg was signed */ + + +#endif diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h new file mode 100644 index 0000000..eaffbdd --- /dev/null +++ b/include/linux/ceph/osd_client.h @@ -0,0 +1,573 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_OSD_CLIENT_H +#define _FS_CEPH_OSD_CLIENT_H + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +struct ceph_msg; +struct ceph_snap_context; +struct ceph_osd_request; +struct ceph_osd_client; + +/* + * completion callback for async writepages + */ +typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *); + +#define CEPH_HOMELESS_OSD -1 + +/* a given osd we're communicating with */ +struct ceph_osd { + refcount_t o_ref; + struct ceph_osd_client *o_osdc; + int o_osd; + int o_incarnation; + struct rb_node o_node; + struct ceph_connection o_con; + struct rb_root o_requests; + struct rb_root o_linger_requests; + struct rb_root o_backoff_mappings; + struct rb_root o_backoffs_by_id; + struct list_head o_osd_lru; + struct ceph_auth_handshake o_auth; + unsigned long lru_ttl; + struct list_head o_keepalive_item; + struct mutex lock; +}; + +#define CEPH_OSD_SLAB_OPS 2 +#define CEPH_OSD_MAX_OPS 16 + +enum ceph_osd_data_type { + CEPH_OSD_DATA_TYPE_NONE = 0, + CEPH_OSD_DATA_TYPE_PAGES, + CEPH_OSD_DATA_TYPE_PAGELIST, +#ifdef CONFIG_BLOCK + CEPH_OSD_DATA_TYPE_BIO, +#endif /* CONFIG_BLOCK */ + CEPH_OSD_DATA_TYPE_BVECS, +}; + +struct ceph_osd_data { + enum ceph_osd_data_type type; + union { + struct { + struct page **pages; + u64 length; + u32 alignment; + bool pages_from_pool; + bool own_pages; + }; + struct ceph_pagelist *pagelist; +#ifdef CONFIG_BLOCK + struct { + struct ceph_bio_iter bio_pos; + u32 bio_length; + }; +#endif /* CONFIG_BLOCK */ + struct { + struct ceph_bvec_iter bvec_pos; + u32 num_bvecs; + }; + }; +}; + +struct ceph_osd_req_op { + u16 op; /* CEPH_OSD_OP_* */ + u32 flags; /* CEPH_OSD_OP_FLAG_* */ + u32 indata_len; /* request */ + u32 outdata_len; /* reply */ + s32 rval; + + union { + struct ceph_osd_data raw_data_in; + struct { + u64 offset, length; + u64 truncate_size; + u32 truncate_seq; + struct ceph_osd_data osd_data; + } extent; + struct { + u32 name_len; + u32 value_len; + __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ + __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ + struct ceph_osd_data osd_data; + } xattr; + struct { + const char *class_name; + const char *method_name; + struct ceph_osd_data request_info; + struct ceph_osd_data request_data; + struct ceph_osd_data response_data; + __u8 class_len; + __u8 method_len; + u32 indata_len; + } cls; + struct { + u64 cookie; + __u8 op; /* CEPH_OSD_WATCH_OP_ */ + u32 gen; + } watch; + struct { + struct ceph_osd_data request_data; + } notify_ack; + struct { + u64 cookie; + struct ceph_osd_data request_data; + struct ceph_osd_data response_data; + } notify; + struct { + struct ceph_osd_data response_data; + } list_watchers; + struct { + u64 expected_object_size; + u64 expected_write_size; + } alloc_hint; + struct { + u64 snapid; + u64 src_version; + u8 flags; + u32 src_fadvise_flags; + struct ceph_osd_data osd_data; + } copy_from; + }; +}; + +struct ceph_osd_request_target { + struct ceph_object_id base_oid; + struct ceph_object_locator base_oloc; + struct ceph_object_id target_oid; + struct ceph_object_locator target_oloc; + + struct ceph_pg pgid; /* last raw pg we mapped to */ + struct ceph_spg spgid; /* last actual spg we mapped to */ + u32 pg_num; + u32 pg_num_mask; + struct ceph_osds acting; + struct ceph_osds up; + int size; + int min_size; + bool sort_bitwise; + bool recovery_deletes; + + unsigned int flags; /* CEPH_OSD_FLAG_* */ + bool paused; + + u32 epoch; + u32 last_force_resend; + + int osd; +}; + +/* an in-flight request */ +struct ceph_osd_request { + u64 r_tid; /* unique for this client */ + struct rb_node r_node; + struct rb_node r_mc_node; /* map check */ + struct work_struct r_complete_work; + struct ceph_osd *r_osd; + + struct ceph_osd_request_target r_t; +#define r_base_oid r_t.base_oid +#define r_base_oloc r_t.base_oloc +#define r_flags r_t.flags + + struct ceph_msg *r_request, *r_reply; + u32 r_sent; /* >0 if r_request is sending/sent */ + + /* request osd ops array */ + unsigned int r_num_ops; + + int r_result; + + struct ceph_osd_client *r_osdc; + struct kref r_kref; + bool r_mempool; + struct completion r_completion; /* private to osd_client.c */ + ceph_osdc_callback_t r_callback; + + struct inode *r_inode; /* for use by callbacks */ + struct list_head r_private_item; /* ditto */ + void *r_priv; /* ditto */ + + /* set by submitter */ + u64 r_snapid; /* for reads, CEPH_NOSNAP o/w */ + struct ceph_snap_context *r_snapc; /* for writes */ + struct timespec64 r_mtime; /* ditto */ + u64 r_data_offset; /* ditto */ + bool r_linger; /* don't resend on failure */ + + /* internal */ + unsigned long r_stamp; /* jiffies, send or check time */ + unsigned long r_start_stamp; /* jiffies */ + int r_attempts; + u32 r_map_dne_bound; + + struct ceph_osd_req_op r_ops[]; +}; + +struct ceph_request_redirect { + struct ceph_object_locator oloc; +}; + +/* + * osd request identifier + * + * caller name + incarnation# + tid to unique identify this request + */ +struct ceph_osd_reqid { + struct ceph_entity_name name; + __le64 tid; + __le32 inc; +} __packed; + +struct ceph_blkin_trace_info { + __le64 trace_id; + __le64 span_id; + __le64 parent_span_id; +} __packed; + +typedef void (*rados_watchcb2_t)(void *arg, u64 notify_id, u64 cookie, + u64 notifier_id, void *data, size_t data_len); +typedef void (*rados_watcherrcb_t)(void *arg, u64 cookie, int err); + +struct ceph_osd_linger_request { + struct ceph_osd_client *osdc; + u64 linger_id; + bool committed; + bool is_watch; /* watch or notify */ + + struct ceph_osd *osd; + struct ceph_osd_request *reg_req; + struct ceph_osd_request *ping_req; + unsigned long ping_sent; + unsigned long watch_valid_thru; + struct list_head pending_lworks; + + struct ceph_osd_request_target t; + u32 map_dne_bound; + + struct timespec64 mtime; + + struct kref kref; + struct mutex lock; + struct rb_node node; /* osd */ + struct rb_node osdc_node; /* osdc */ + struct rb_node mc_node; /* map check */ + struct list_head scan_item; + + struct completion reg_commit_wait; + struct completion notify_finish_wait; + int reg_commit_error; + int notify_finish_error; + int last_error; + + u32 register_gen; + u64 notify_id; + + rados_watchcb2_t wcb; + rados_watcherrcb_t errcb; + void *data; + + struct page ***preply_pages; + size_t *preply_len; +}; + +struct ceph_watch_item { + struct ceph_entity_name name; + u64 cookie; + struct ceph_entity_addr addr; +}; + +struct ceph_spg_mapping { + struct rb_node node; + struct ceph_spg spgid; + + struct rb_root backoffs; +}; + +struct ceph_hobject_id { + void *key; + size_t key_len; + void *oid; + size_t oid_len; + u64 snapid; + u32 hash; + u8 is_max; + void *nspace; + size_t nspace_len; + s64 pool; + + /* cache */ + u32 hash_reverse_bits; +}; + +static inline void ceph_hoid_build_hash_cache(struct ceph_hobject_id *hoid) +{ + hoid->hash_reverse_bits = bitrev32(hoid->hash); +} + +/* + * PG-wide backoff: [begin, end) + * per-object backoff: begin == end + */ +struct ceph_osd_backoff { + struct rb_node spg_node; + struct rb_node id_node; + + struct ceph_spg spgid; + u64 id; + struct ceph_hobject_id *begin; + struct ceph_hobject_id *end; +}; + +#define CEPH_LINGER_ID_START 0xffff000000000000ULL + +struct ceph_osd_client { + struct ceph_client *client; + + struct ceph_osdmap *osdmap; /* current map */ + struct rw_semaphore lock; + + struct rb_root osds; /* osds */ + struct list_head osd_lru; /* idle osds */ + spinlock_t osd_lru_lock; + u32 epoch_barrier; + struct ceph_osd homeless_osd; + atomic64_t last_tid; /* tid of last request */ + u64 last_linger_id; + struct rb_root linger_requests; /* lingering requests */ + struct rb_root map_checks; + struct rb_root linger_map_checks; + atomic_t num_requests; + atomic_t num_homeless; + int abort_err; + struct delayed_work timeout_work; + struct delayed_work osds_timeout_work; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_file; +#endif + + mempool_t *req_mempool; + + struct ceph_msgpool msgpool_op; + struct ceph_msgpool msgpool_op_reply; + + struct workqueue_struct *notify_wq; + struct workqueue_struct *completion_wq; +}; + +static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag) +{ + return osdc->osdmap->flags & flag; +} + +extern int ceph_osdc_setup(void); +extern void ceph_osdc_cleanup(void); + +extern int ceph_osdc_init(struct ceph_osd_client *osdc, + struct ceph_client *client); +extern void ceph_osdc_stop(struct ceph_osd_client *osdc); +extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc); + +extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, + struct ceph_msg *msg); +extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, + struct ceph_msg *msg); +void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb); +void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err); +void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc); + +#define osd_req_op_data(oreq, whch, typ, fld) \ +({ \ + struct ceph_osd_request *__oreq = (oreq); \ + unsigned int __whch = (whch); \ + BUG_ON(__whch >= __oreq->r_num_ops); \ + &__oreq->r_ops[__whch].typ.fld; \ +}) + +extern void osd_req_op_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode, u32 flags); + +extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, + unsigned int which, + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); + +extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode, + u64 offset, u64 length, + u64 truncate_size, u32 truncate_seq); +extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req, + unsigned int which, u64 length); +extern void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, + unsigned int which, u64 offset_inc); + +extern struct ceph_osd_data *osd_req_op_extent_osd_data( + struct ceph_osd_request *osd_req, + unsigned int which); + +extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *, + unsigned int which, + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); +extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *, + unsigned int which, + struct ceph_pagelist *pagelist); +#ifdef CONFIG_BLOCK +void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, + unsigned int which, + struct ceph_bio_iter *bio_pos, + u32 bio_length); +#endif /* CONFIG_BLOCK */ +void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, + unsigned int which, + struct bio_vec *bvecs, u32 num_bvecs, + u32 bytes); +void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, + unsigned int which, + struct ceph_bvec_iter *bvec_pos); + +extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *, + unsigned int which, + struct ceph_pagelist *pagelist); +extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *, + unsigned int which, + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); +void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, + unsigned int which, + struct bio_vec *bvecs, u32 num_bvecs, + u32 bytes); +extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, + unsigned int which, + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); +int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, + const char *class, const char *method); +extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, + u16 opcode, const char *name, const void *value, + size_t size, u8 cmp_op, u8 cmp_mode); +extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, + unsigned int which, + u64 expected_object_size, + u64 expected_write_size); + +extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, + struct ceph_snap_context *snapc, + unsigned int num_ops, + bool use_mempool, + gfp_t gfp_flags); +int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp); + +extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, + struct ceph_file_layout *layout, + struct ceph_vino vino, + u64 offset, u64 *len, + unsigned int which, int num_ops, + int opcode, int flags, + struct ceph_snap_context *snapc, + u32 truncate_seq, u64 truncate_size, + bool use_mempool); + +extern void ceph_osdc_get_request(struct ceph_osd_request *req); +extern void ceph_osdc_put_request(struct ceph_osd_request *req); + +extern int ceph_osdc_start_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req, + bool nofail); +extern void ceph_osdc_cancel_request(struct ceph_osd_request *req); +extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req); +extern void ceph_osdc_sync(struct ceph_osd_client *osdc); + +extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc); +void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc); + +int ceph_osdc_call(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + const char *class, const char *method, + unsigned int flags, + struct page *req_page, size_t req_len, + struct page **resp_pages, size_t *resp_len); + +extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + u64 off, u64 *plen, + u32 truncate_seq, u64 truncate_size, + struct page **pages, int nr_pages, + int page_align); + +extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + struct ceph_snap_context *sc, + u64 off, u64 len, + u32 truncate_seq, u64 truncate_size, + struct timespec64 *mtime, + struct page **pages, int nr_pages); + +int ceph_osdc_copy_from(struct ceph_osd_client *osdc, + u64 src_snapid, u64 src_version, + struct ceph_object_id *src_oid, + struct ceph_object_locator *src_oloc, + u32 src_fadvise_flags, + struct ceph_object_id *dst_oid, + struct ceph_object_locator *dst_oloc, + u32 dst_fadvise_flags, + u8 copy_from_flags); + +/* watch/notify */ +struct ceph_osd_linger_request * +ceph_osdc_watch(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + rados_watchcb2_t wcb, + rados_watcherrcb_t errcb, + void *data); +int ceph_osdc_unwatch(struct ceph_osd_client *osdc, + struct ceph_osd_linger_request *lreq); + +int ceph_osdc_notify_ack(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + u64 notify_id, + u64 cookie, + void *payload, + u32 payload_len); +int ceph_osdc_notify(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + void *payload, + u32 payload_len, + u32 timeout, + struct page ***preply_pages, + size_t *preply_len); +int ceph_osdc_watch_check(struct ceph_osd_client *osdc, + struct ceph_osd_linger_request *lreq); +int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + struct ceph_watch_item **watchers, + u32 *num_watchers); +#endif + diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h new file mode 100644 index 0000000..28bc3e9 --- /dev/null +++ b/include/linux/ceph/osdmap.h @@ -0,0 +1,316 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_OSDMAP_H +#define _FS_CEPH_OSDMAP_H + +#include +#include +#include +#include + +/* + * The osd map describes the current membership of the osd cluster and + * specifies the mapping of objects to placement groups and placement + * groups to (sets of) osds. That is, it completely specifies the + * (desired) distribution of all data objects in the system at some + * point in time. + * + * Each map version is identified by an epoch, which increases monotonically. + * + * The map can be updated either via an incremental map (diff) describing + * the change between two successive epochs, or as a fully encoded map. + */ +struct ceph_pg { + uint64_t pool; + uint32_t seed; +}; + +#define CEPH_SPG_NOSHARD -1 + +struct ceph_spg { + struct ceph_pg pgid; + s8 shard; +}; + +int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs); +int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs); + +#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id + together */ +#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */ +#define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota, + will set FULL too */ +#define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */ + +struct ceph_pg_pool_info { + struct rb_node node; + s64 id; + u8 type; /* CEPH_POOL_TYPE_* */ + u8 size; + u8 min_size; + u8 crush_ruleset; + u8 object_hash; + u32 last_force_request_resend; + u32 pg_num, pgp_num; + int pg_num_mask, pgp_num_mask; + s64 read_tier; + s64 write_tier; /* wins for read+write ops */ + u64 flags; /* CEPH_POOL_FLAG_* */ + char *name; + + bool was_full; /* for handle_one_map() */ +}; + +static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) +{ + switch (pool->type) { + case CEPH_POOL_TYPE_REP: + return true; + case CEPH_POOL_TYPE_EC: + return false; + default: + BUG(); + } +#ifdef CONFIG_MCST + /* lcc warns */ + return false; +#endif +} + +struct ceph_object_locator { + s64 pool; + struct ceph_string *pool_ns; +}; + +static inline void ceph_oloc_init(struct ceph_object_locator *oloc) +{ + oloc->pool = -1; + oloc->pool_ns = NULL; +} + +static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc) +{ + return oloc->pool == -1; +} + +void ceph_oloc_copy(struct ceph_object_locator *dest, + const struct ceph_object_locator *src); +void ceph_oloc_destroy(struct ceph_object_locator *oloc); + +/* + * 51-char inline_name is long enough for all cephfs and all but one + * rbd requests: in ".rbd"/"rbd_id." can be + * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all + * other rbd requests fit into inline_name. + * + * Makes ceph_object_id 64 bytes on 64-bit. + */ +#define CEPH_OID_INLINE_LEN 52 + +/* + * Both inline and external buffers have space for a NUL-terminator, + * which is carried around. It's not required though - RADOS object + * names don't have to be NUL-terminated and may contain NULs. + */ +struct ceph_object_id { + char *name; + char inline_name[CEPH_OID_INLINE_LEN]; + int name_len; +}; + +#define __CEPH_OID_INITIALIZER(oid) { .name = (oid).inline_name } + +#define CEPH_DEFINE_OID_ONSTACK(oid) \ + struct ceph_object_id oid = __CEPH_OID_INITIALIZER(oid) + +static inline void ceph_oid_init(struct ceph_object_id *oid) +{ + *oid = (struct ceph_object_id) __CEPH_OID_INITIALIZER(*oid); +} + +static inline bool ceph_oid_empty(const struct ceph_object_id *oid) +{ + return oid->name == oid->inline_name && !oid->name_len; +} + +void ceph_oid_copy(struct ceph_object_id *dest, + const struct ceph_object_id *src); +__printf(2, 3) +void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...); +__printf(3, 4) +int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, + const char *fmt, ...); +void ceph_oid_destroy(struct ceph_object_id *oid); + +struct ceph_pg_mapping { + struct rb_node node; + struct ceph_pg pgid; + + union { + struct { + int len; + int osds[]; + } pg_temp, pg_upmap; + struct { + int osd; + } primary_temp; + struct { + int len; + int from_to[][2]; + } pg_upmap_items; + }; +}; + +struct ceph_osdmap { + struct ceph_fsid fsid; + u32 epoch; + struct ceph_timespec created, modified; + + u32 flags; /* CEPH_OSDMAP_* */ + + u32 max_osd; /* size of osd_state, _offload, _addr arrays */ + u32 *osd_state; /* CEPH_OSD_* */ + u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */ + struct ceph_entity_addr *osd_addr; + + struct rb_root pg_temp; + struct rb_root primary_temp; + + /* remap (post-CRUSH, pre-up) */ + struct rb_root pg_upmap; /* PG := raw set */ + struct rb_root pg_upmap_items; /* from -> to within raw set */ + + u32 *osd_primary_affinity; + + struct rb_root pg_pools; + u32 pool_max; + + /* the CRUSH map specifies the mapping of placement groups to + * the list of osds that store+replicate them. */ + struct crush_map *crush; + + struct mutex crush_workspace_mutex; + void *crush_workspace; +}; + +static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd) +{ + return osd >= 0 && osd < map->max_osd && + (map->osd_state[osd] & CEPH_OSD_EXISTS); +} + +static inline bool ceph_osd_is_up(struct ceph_osdmap *map, int osd) +{ + return ceph_osd_exists(map, osd) && + (map->osd_state[osd] & CEPH_OSD_UP); +} + +static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd) +{ + return !ceph_osd_is_up(map, osd); +} + +char *ceph_osdmap_state_str(char *str, int len, u32 state); +extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); + +static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map, + int osd) +{ + if (osd >= map->max_osd) + return NULL; + return &map->osd_addr[osd]; +} + +#define CEPH_PGID_ENCODING_LEN (1 + 8 + 4 + 4) + +static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid) +{ + __u8 version; + + if (!ceph_has_room(p, end, CEPH_PGID_ENCODING_LEN)) { + pr_warn("incomplete pg encoding\n"); + return -EINVAL; + } + version = ceph_decode_8(p); + if (version > 1) { + pr_warn("do not understand pg encoding %d > 1\n", + (int)version); + return -EINVAL; + } + + pgid->pool = ceph_decode_64(p); + pgid->seed = ceph_decode_32(p); + *p += 4; /* skip deprecated preferred value */ + + return 0; +} + +struct ceph_osdmap *ceph_osdmap_alloc(void); +extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end); +struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, + struct ceph_osdmap *map); +extern void ceph_osdmap_destroy(struct ceph_osdmap *map); + +struct ceph_osds { + int osds[CEPH_PG_MAX_SIZE]; + int size; + int primary; /* id, NOT index */ +}; + +static inline void ceph_osds_init(struct ceph_osds *set) +{ + set->size = 0; + set->primary = -1; +} + +void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src); + +bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num, + u32 new_pg_num); +bool ceph_is_new_interval(const struct ceph_osds *old_acting, + const struct ceph_osds *new_acting, + const struct ceph_osds *old_up, + const struct ceph_osds *new_up, + int old_size, + int new_size, + int old_min_size, + int new_min_size, + u32 old_pg_num, + u32 new_pg_num, + bool old_sort_bitwise, + bool new_sort_bitwise, + bool old_recovery_deletes, + bool new_recovery_deletes, + const struct ceph_pg *pgid); +bool ceph_osds_changed(const struct ceph_osds *old_acting, + const struct ceph_osds *new_acting, + bool any_change); + +void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi, + const struct ceph_object_id *oid, + const struct ceph_object_locator *oloc, + struct ceph_pg *raw_pgid); +int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, + const struct ceph_object_id *oid, + const struct ceph_object_locator *oloc, + struct ceph_pg *raw_pgid); + +void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, + struct ceph_pg_pool_info *pi, + const struct ceph_pg *raw_pgid, + struct ceph_osds *up, + struct ceph_osds *acting); +bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap, + struct ceph_pg_pool_info *pi, + const struct ceph_pg *raw_pgid, + struct ceph_spg *spgid); +int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, + const struct ceph_pg *raw_pgid); + +extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, + u64 id); + +extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id); +extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); +u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id); + +#endif diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h new file mode 100644 index 0000000..5dead84 --- /dev/null +++ b/include/linux/ceph/pagelist.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __FS_CEPH_PAGELIST_H +#define __FS_CEPH_PAGELIST_H + +#include +#include +#include +#include + +struct ceph_pagelist { + struct list_head head; + void *mapped_tail; + size_t length; + size_t room; + struct list_head free_list; + size_t num_pages_free; + refcount_t refcnt; +}; + +struct ceph_pagelist_cursor { + struct ceph_pagelist *pl; /* pagelist, for error checking */ + struct list_head *page_lru; /* page in list */ + size_t room; /* room remaining to reset to */ +}; + +struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags); + +extern void ceph_pagelist_release(struct ceph_pagelist *pl); + +extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l); + +extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space); + +extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl); + +extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, + struct ceph_pagelist_cursor *c); + +extern int ceph_pagelist_truncate(struct ceph_pagelist *pl, + struct ceph_pagelist_cursor *c); + +static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v) +{ + __le64 ev = cpu_to_le64(v); + return ceph_pagelist_append(pl, &ev, sizeof(ev)); +} +static inline int ceph_pagelist_encode_32(struct ceph_pagelist *pl, u32 v) +{ + __le32 ev = cpu_to_le32(v); + return ceph_pagelist_append(pl, &ev, sizeof(ev)); +} +static inline int ceph_pagelist_encode_16(struct ceph_pagelist *pl, u16 v) +{ + __le16 ev = cpu_to_le16(v); + return ceph_pagelist_append(pl, &ev, sizeof(ev)); +} +static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v) +{ + return ceph_pagelist_append(pl, &v, 1); +} +static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl, + char *s, u32 len) +{ + int ret = ceph_pagelist_encode_32(pl, len); + if (ret) + return ret; + if (len) + return ceph_pagelist_append(pl, s, len); + return 0; +} + +#endif diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h new file mode 100644 index 0000000..c004bce --- /dev/null +++ b/include/linux/ceph/rados.h @@ -0,0 +1,535 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CEPH_RADOS_H +#define CEPH_RADOS_H + +/* + * Data types for the Ceph distributed object storage layer RADOS + * (Reliable Autonomic Distributed Object Store). + */ + +#include + +/* + * fs id + */ +struct ceph_fsid { + unsigned char fsid[16]; +}; + +static inline int ceph_fsid_compare(const struct ceph_fsid *a, + const struct ceph_fsid *b) +{ + return memcmp(a, b, sizeof(*a)); +} + +/* + * ino, object, etc. + */ +typedef __le64 ceph_snapid_t; +#define CEPH_SNAPDIR ((__u64)(-1)) /* reserved for hidden .snap dir */ +#define CEPH_NOSNAP ((__u64)(-2)) /* "head", "live" revision */ +#define CEPH_MAXSNAP ((__u64)(-3)) /* largest valid snapid */ + +struct ceph_timespec { + __le32 tv_sec; + __le32 tv_nsec; +} __attribute__ ((packed)); + + +/* + * object layout - how objects are mapped into PGs + */ +#define CEPH_OBJECT_LAYOUT_HASH 1 +#define CEPH_OBJECT_LAYOUT_LINEAR 2 +#define CEPH_OBJECT_LAYOUT_HASHINO 3 + +/* + * pg layout -- how PGs are mapped onto (sets of) OSDs + */ +#define CEPH_PG_LAYOUT_CRUSH 0 +#define CEPH_PG_LAYOUT_HASH 1 +#define CEPH_PG_LAYOUT_LINEAR 2 +#define CEPH_PG_LAYOUT_HYBRID 3 + +#define CEPH_PG_MAX_SIZE 32 /* max # osds in a single pg */ + +/* + * placement group. + * we encode this into one __le64. + */ +struct ceph_pg_v1 { + __le16 preferred; /* preferred primary osd */ + __le16 ps; /* placement seed */ + __le32 pool; /* object pool */ +} __attribute__ ((packed)); + +/* + * pg_pool is a set of pgs storing a pool of objects + * + * pg_num -- base number of pseudorandomly placed pgs + * + * pgp_num -- effective number when calculating pg placement. this + * is used for pg_num increases. new pgs result in data being "split" + * into new pgs. for this to proceed smoothly, new pgs are intiially + * colocated with their parents; that is, pgp_num doesn't increase + * until the new pgs have successfully split. only _then_ are the new + * pgs placed independently. + * + * lpg_num -- localized pg count (per device). replicas are randomly + * selected. + * + * lpgp_num -- as above. + */ +#define CEPH_NOPOOL ((__u64) (-1)) /* pool id not defined */ + +#define CEPH_POOL_TYPE_REP 1 +#define CEPH_POOL_TYPE_RAID4 2 /* never implemented */ +#define CEPH_POOL_TYPE_EC 3 + +/* + * stable_mod func is used to control number of placement groups. + * similar to straight-up modulo, but produces a stable mapping as b + * increases over time. b is the number of bins, and bmask is the + * containing power of 2 minus 1. + * + * b <= bmask and bmask=(2**n)-1 + * e.g., b=12 -> bmask=15, b=123 -> bmask=127 + */ +static inline int ceph_stable_mod(int x, int b, int bmask) +{ + if ((x & bmask) < b) + return x & bmask; + else + return x & (bmask >> 1); +} + +/* + * object layout - how a given object should be stored. + */ +struct ceph_object_layout { + struct ceph_pg_v1 ol_pgid; /* raw pg, with _full_ ps precision. */ + __le32 ol_stripe_unit; /* for per-object parity, if any */ +} __attribute__ ((packed)); + +/* + * compound epoch+version, used by storage layer to serialize mutations + */ +struct ceph_eversion { + __le64 version; + __le32 epoch; +} __attribute__ ((packed)); + +/* + * osd map bits + */ + +/* status bits */ +#define CEPH_OSD_EXISTS (1<<0) +#define CEPH_OSD_UP (1<<1) +#define CEPH_OSD_AUTOOUT (1<<2) /* osd was automatically marked out */ +#define CEPH_OSD_NEW (1<<3) /* osd is new, never marked in */ + +extern const char *ceph_osd_state_name(int s); + +/* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */ +#define CEPH_OSD_IN 0x10000 +#define CEPH_OSD_OUT 0 + +/* osd primary-affinity. fixed point value: 0x10000 == baseline */ +#define CEPH_OSD_MAX_PRIMARY_AFFINITY 0x10000 +#define CEPH_OSD_DEFAULT_PRIMARY_AFFINITY 0x10000 + + +/* + * osd map flag bits + */ +#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC), + not set since ~luminous */ +#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC), + not set since ~luminous */ +#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */ +#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */ +#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */ +#define CEPH_OSDMAP_NOUP (1<<5) /* block osd boot */ +#define CEPH_OSDMAP_NODOWN (1<<6) /* block osd mark-down/failure */ +#define CEPH_OSDMAP_NOOUT (1<<7) /* block osd auto mark-out */ +#define CEPH_OSDMAP_NOIN (1<<8) /* block osd auto mark-in */ +#define CEPH_OSDMAP_NOBACKFILL (1<<9) /* block osd backfill */ +#define CEPH_OSDMAP_NORECOVER (1<<10) /* block osd recovery and backfill */ +#define CEPH_OSDMAP_NOSCRUB (1<<11) /* block periodic scrub */ +#define CEPH_OSDMAP_NODEEP_SCRUB (1<<12) /* block periodic deep-scrub */ +#define CEPH_OSDMAP_NOTIERAGENT (1<<13) /* disable tiering agent */ +#define CEPH_OSDMAP_NOREBALANCE (1<<14) /* block osd backfill unless pg is degraded */ +#define CEPH_OSDMAP_SORTBITWISE (1<<15) /* use bitwise hobject_t sort */ +#define CEPH_OSDMAP_REQUIRE_JEWEL (1<<16) /* require jewel for booting osds */ +#define CEPH_OSDMAP_REQUIRE_KRAKEN (1<<17) /* require kraken for booting osds */ +#define CEPH_OSDMAP_REQUIRE_LUMINOUS (1<<18) /* require l for booting osds */ +#define CEPH_OSDMAP_RECOVERY_DELETES (1<<19) /* deletes performed during recovery instead of peering */ + +/* + * The error code to return when an OSD can't handle a write + * because it is too large. + */ +#define OSD_WRITETOOBIG EMSGSIZE + +/* + * osd ops + * + * WARNING: do not use these op codes directly. Use the helpers + * defined below instead. In certain cases, op code behavior was + * redefined, resulting in special-cases in the helpers. + */ +#define CEPH_OSD_OP_MODE 0xf000 +#define CEPH_OSD_OP_MODE_RD 0x1000 +#define CEPH_OSD_OP_MODE_WR 0x2000 +#define CEPH_OSD_OP_MODE_RMW 0x3000 +#define CEPH_OSD_OP_MODE_SUB 0x4000 +#define CEPH_OSD_OP_MODE_CACHE 0x8000 + +#define CEPH_OSD_OP_TYPE 0x0f00 +#define CEPH_OSD_OP_TYPE_LOCK 0x0100 +#define CEPH_OSD_OP_TYPE_DATA 0x0200 +#define CEPH_OSD_OP_TYPE_ATTR 0x0300 +#define CEPH_OSD_OP_TYPE_EXEC 0x0400 +#define CEPH_OSD_OP_TYPE_PG 0x0500 +#define CEPH_OSD_OP_TYPE_MULTI 0x0600 /* multiobject */ + +#define __CEPH_OSD_OP1(mode, nr) \ + (CEPH_OSD_OP_MODE_##mode | (nr)) + +#define __CEPH_OSD_OP(mode, type, nr) \ + (CEPH_OSD_OP_MODE_##mode | CEPH_OSD_OP_TYPE_##type | (nr)) + +#define __CEPH_FORALL_OSD_OPS(f) \ + /** data **/ \ + /* read */ \ + f(READ, __CEPH_OSD_OP(RD, DATA, 1), "read") \ + f(STAT, __CEPH_OSD_OP(RD, DATA, 2), "stat") \ + f(MAPEXT, __CEPH_OSD_OP(RD, DATA, 3), "mapext") \ + \ + /* fancy read */ \ + f(MASKTRUNC, __CEPH_OSD_OP(RD, DATA, 4), "masktrunc") \ + f(SPARSE_READ, __CEPH_OSD_OP(RD, DATA, 5), "sparse-read") \ + \ + f(NOTIFY, __CEPH_OSD_OP(RD, DATA, 6), "notify") \ + f(NOTIFY_ACK, __CEPH_OSD_OP(RD, DATA, 7), "notify-ack") \ + \ + /* versioning */ \ + f(ASSERT_VER, __CEPH_OSD_OP(RD, DATA, 8), "assert-version") \ + \ + f(LIST_WATCHERS, __CEPH_OSD_OP(RD, DATA, 9), "list-watchers") \ + \ + f(LIST_SNAPS, __CEPH_OSD_OP(RD, DATA, 10), "list-snaps") \ + \ + /* sync */ \ + f(SYNC_READ, __CEPH_OSD_OP(RD, DATA, 11), "sync_read") \ + \ + /* write */ \ + f(WRITE, __CEPH_OSD_OP(WR, DATA, 1), "write") \ + f(WRITEFULL, __CEPH_OSD_OP(WR, DATA, 2), "writefull") \ + f(TRUNCATE, __CEPH_OSD_OP(WR, DATA, 3), "truncate") \ + f(ZERO, __CEPH_OSD_OP(WR, DATA, 4), "zero") \ + f(DELETE, __CEPH_OSD_OP(WR, DATA, 5), "delete") \ + \ + /* fancy write */ \ + f(APPEND, __CEPH_OSD_OP(WR, DATA, 6), "append") \ + f(SETTRUNC, __CEPH_OSD_OP(WR, DATA, 8), "settrunc") \ + f(TRIMTRUNC, __CEPH_OSD_OP(WR, DATA, 9), "trimtrunc") \ + \ + f(TMAPUP, __CEPH_OSD_OP(RMW, DATA, 10), "tmapup") \ + f(TMAPPUT, __CEPH_OSD_OP(WR, DATA, 11), "tmapput") \ + f(TMAPGET, __CEPH_OSD_OP(RD, DATA, 12), "tmapget") \ + \ + f(CREATE, __CEPH_OSD_OP(WR, DATA, 13), "create") \ + f(ROLLBACK, __CEPH_OSD_OP(WR, DATA, 14), "rollback") \ + \ + f(WATCH, __CEPH_OSD_OP(WR, DATA, 15), "watch") \ + \ + /* omap */ \ + f(OMAPGETKEYS, __CEPH_OSD_OP(RD, DATA, 17), "omap-get-keys") \ + f(OMAPGETVALS, __CEPH_OSD_OP(RD, DATA, 18), "omap-get-vals") \ + f(OMAPGETHEADER, __CEPH_OSD_OP(RD, DATA, 19), "omap-get-header") \ + f(OMAPGETVALSBYKEYS, __CEPH_OSD_OP(RD, DATA, 20), "omap-get-vals-by-keys") \ + f(OMAPSETVALS, __CEPH_OSD_OP(WR, DATA, 21), "omap-set-vals") \ + f(OMAPSETHEADER, __CEPH_OSD_OP(WR, DATA, 22), "omap-set-header") \ + f(OMAPCLEAR, __CEPH_OSD_OP(WR, DATA, 23), "omap-clear") \ + f(OMAPRMKEYS, __CEPH_OSD_OP(WR, DATA, 24), "omap-rm-keys") \ + f(OMAP_CMP, __CEPH_OSD_OP(RD, DATA, 25), "omap-cmp") \ + \ + /* tiering */ \ + f(COPY_FROM, __CEPH_OSD_OP(WR, DATA, 26), "copy-from") \ + f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \ + f(UNDIRTY, __CEPH_OSD_OP(WR, DATA, 28), "undirty") \ + f(ISDIRTY, __CEPH_OSD_OP(RD, DATA, 29), "isdirty") \ + f(COPY_GET, __CEPH_OSD_OP(RD, DATA, 30), "copy-get") \ + f(CACHE_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 31), "cache-flush") \ + f(CACHE_EVICT, __CEPH_OSD_OP(CACHE, DATA, 32), "cache-evict") \ + f(CACHE_TRY_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 33), "cache-try-flush") \ + \ + /* convert tmap to omap */ \ + f(TMAP2OMAP, __CEPH_OSD_OP(RMW, DATA, 34), "tmap2omap") \ + \ + /* hints */ \ + f(SETALLOCHINT, __CEPH_OSD_OP(WR, DATA, 35), "set-alloc-hint") \ + \ + /** multi **/ \ + f(CLONERANGE, __CEPH_OSD_OP(WR, MULTI, 1), "clonerange") \ + f(ASSERT_SRC_VERSION, __CEPH_OSD_OP(RD, MULTI, 2), "assert-src-version") \ + f(SRC_CMPXATTR, __CEPH_OSD_OP(RD, MULTI, 3), "src-cmpxattr") \ + \ + /** attrs **/ \ + /* read */ \ + f(GETXATTR, __CEPH_OSD_OP(RD, ATTR, 1), "getxattr") \ + f(GETXATTRS, __CEPH_OSD_OP(RD, ATTR, 2), "getxattrs") \ + f(CMPXATTR, __CEPH_OSD_OP(RD, ATTR, 3), "cmpxattr") \ + \ + /* write */ \ + f(SETXATTR, __CEPH_OSD_OP(WR, ATTR, 1), "setxattr") \ + f(SETXATTRS, __CEPH_OSD_OP(WR, ATTR, 2), "setxattrs") \ + f(RESETXATTRS, __CEPH_OSD_OP(WR, ATTR, 3), "resetxattrs") \ + f(RMXATTR, __CEPH_OSD_OP(WR, ATTR, 4), "rmxattr") \ + \ + /** subop **/ \ + f(PULL, __CEPH_OSD_OP1(SUB, 1), "pull") \ + f(PUSH, __CEPH_OSD_OP1(SUB, 2), "push") \ + f(BALANCEREADS, __CEPH_OSD_OP1(SUB, 3), "balance-reads") \ + f(UNBALANCEREADS, __CEPH_OSD_OP1(SUB, 4), "unbalance-reads") \ + f(SCRUB, __CEPH_OSD_OP1(SUB, 5), "scrub") \ + f(SCRUB_RESERVE, __CEPH_OSD_OP1(SUB, 6), "scrub-reserve") \ + f(SCRUB_UNRESERVE, __CEPH_OSD_OP1(SUB, 7), "scrub-unreserve") \ + f(SCRUB_STOP, __CEPH_OSD_OP1(SUB, 8), "scrub-stop") \ + f(SCRUB_MAP, __CEPH_OSD_OP1(SUB, 9), "scrub-map") \ + \ + /** lock **/ \ + f(WRLOCK, __CEPH_OSD_OP(WR, LOCK, 1), "wrlock") \ + f(WRUNLOCK, __CEPH_OSD_OP(WR, LOCK, 2), "wrunlock") \ + f(RDLOCK, __CEPH_OSD_OP(WR, LOCK, 3), "rdlock") \ + f(RDUNLOCK, __CEPH_OSD_OP(WR, LOCK, 4), "rdunlock") \ + f(UPLOCK, __CEPH_OSD_OP(WR, LOCK, 5), "uplock") \ + f(DNLOCK, __CEPH_OSD_OP(WR, LOCK, 6), "dnlock") \ + \ + /** exec **/ \ + /* note: the RD bit here is wrong; see special-case below in helper */ \ + f(CALL, __CEPH_OSD_OP(RD, EXEC, 1), "call") \ + \ + /** pg **/ \ + f(PGLS, __CEPH_OSD_OP(RD, PG, 1), "pgls") \ + f(PGLS_FILTER, __CEPH_OSD_OP(RD, PG, 2), "pgls-filter") \ + f(PG_HITSET_LS, __CEPH_OSD_OP(RD, PG, 3), "pg-hitset-ls") \ + f(PG_HITSET_GET, __CEPH_OSD_OP(RD, PG, 4), "pg-hitset-get") + +enum { +#define GENERATE_ENUM_ENTRY(op, opcode, str) CEPH_OSD_OP_##op = (opcode), +__CEPH_FORALL_OSD_OPS(GENERATE_ENUM_ENTRY) +#undef GENERATE_ENUM_ENTRY +}; + +static inline int ceph_osd_op_type_lock(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_LOCK; +} +static inline int ceph_osd_op_type_data(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_DATA; +} +static inline int ceph_osd_op_type_attr(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_ATTR; +} +static inline int ceph_osd_op_type_exec(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_EXEC; +} +static inline int ceph_osd_op_type_pg(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG; +} +static inline int ceph_osd_op_type_multi(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_MULTI; +} + +static inline int ceph_osd_op_mode_subop(int op) +{ + return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_SUB; +} +static inline int ceph_osd_op_mode_read(int op) +{ + return (op & CEPH_OSD_OP_MODE_RD) && + op != CEPH_OSD_OP_CALL; +} +static inline int ceph_osd_op_mode_modify(int op) +{ + return op & CEPH_OSD_OP_MODE_WR; +} + +/* + * note that the following tmap stuff is also defined in the ceph librados.h + * any modification here needs to be updated there + */ +#define CEPH_OSD_TMAP_HDR 'h' +#define CEPH_OSD_TMAP_SET 's' +#define CEPH_OSD_TMAP_CREATE 'c' /* create key */ +#define CEPH_OSD_TMAP_RM 'r' +#define CEPH_OSD_TMAP_RMSLOPPY 'R' + +extern const char *ceph_osd_op_name(int op); + +/* + * osd op flags + * + * An op may be READ, WRITE, or READ|WRITE. + */ +enum { + CEPH_OSD_FLAG_ACK = 0x0001, /* want (or is) "ack" ack */ + CEPH_OSD_FLAG_ONNVRAM = 0x0002, /* want (or is) "onnvram" ack */ + CEPH_OSD_FLAG_ONDISK = 0x0004, /* want (or is) "ondisk" ack */ + CEPH_OSD_FLAG_RETRY = 0x0008, /* resend attempt */ + CEPH_OSD_FLAG_READ = 0x0010, /* op may read */ + CEPH_OSD_FLAG_WRITE = 0x0020, /* op may write */ + CEPH_OSD_FLAG_ORDERSNAP = 0x0040, /* EOLDSNAP if snapc is out of order */ + CEPH_OSD_FLAG_PEERSTAT_OLD = 0x0080, /* DEPRECATED msg includes osd_peer_stat */ + CEPH_OSD_FLAG_BALANCE_READS = 0x0100, + CEPH_OSD_FLAG_PARALLELEXEC = 0x0200, /* execute op in parallel */ + CEPH_OSD_FLAG_PGOP = 0x0400, /* pg op, no object */ + CEPH_OSD_FLAG_EXEC = 0x0800, /* op may exec */ + CEPH_OSD_FLAG_EXEC_PUBLIC = 0x1000, /* DEPRECATED op may exec (public) */ + CEPH_OSD_FLAG_LOCALIZE_READS = 0x2000, /* read from nearby replica, if any */ + CEPH_OSD_FLAG_RWORDERED = 0x4000, /* order wrt concurrent reads */ + CEPH_OSD_FLAG_IGNORE_CACHE = 0x8000, /* ignore cache logic */ + CEPH_OSD_FLAG_SKIPRWLOCKS = 0x10000, /* skip rw locks */ + CEPH_OSD_FLAG_IGNORE_OVERLAY = 0x20000, /* ignore pool overlay */ + CEPH_OSD_FLAG_FLUSH = 0x40000, /* this is part of flush */ + CEPH_OSD_FLAG_MAP_SNAP_CLONE = 0x80000, /* map snap direct to clone id */ + CEPH_OSD_FLAG_ENFORCE_SNAPC = 0x100000, /* use snapc provided even if + pool uses pool snaps */ + CEPH_OSD_FLAG_REDIRECTED = 0x200000, /* op has been redirected */ + CEPH_OSD_FLAG_KNOWN_REDIR = 0x400000, /* redirect bit is authoritative */ + CEPH_OSD_FLAG_FULL_TRY = 0x800000, /* try op despite full flag */ + CEPH_OSD_FLAG_FULL_FORCE = 0x1000000, /* force op despite full flag */ +}; + +enum { + CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */ + CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */ + CEPH_OSD_OP_FLAG_FADVISE_RANDOM = 0x4, /* the op is random */ + CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL = 0x8, /* the op is sequential */ + CEPH_OSD_OP_FLAG_FADVISE_WILLNEED = 0x10,/* data will be accessed in + the near future */ + CEPH_OSD_OP_FLAG_FADVISE_DONTNEED = 0x20,/* data will not be accessed + in the near future */ + CEPH_OSD_OP_FLAG_FADVISE_NOCACHE = 0x40,/* data will be accessed only + once by this client */ +}; + +#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/ +#define EBLACKLISTED ESHUTDOWN /* blacklisted */ + +/* xattr comparison */ +enum { + CEPH_OSD_CMPXATTR_OP_NOP = 0, + CEPH_OSD_CMPXATTR_OP_EQ = 1, + CEPH_OSD_CMPXATTR_OP_NE = 2, + CEPH_OSD_CMPXATTR_OP_GT = 3, + CEPH_OSD_CMPXATTR_OP_GTE = 4, + CEPH_OSD_CMPXATTR_OP_LT = 5, + CEPH_OSD_CMPXATTR_OP_LTE = 6 +}; + +enum { + CEPH_OSD_CMPXATTR_MODE_STRING = 1, + CEPH_OSD_CMPXATTR_MODE_U64 = 2 +}; + +enum { + CEPH_OSD_COPY_FROM_FLAG_FLUSH = 1, /* part of a flush operation */ + CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY = 2, /* ignore pool overlay */ + CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE = 4, /* ignore osd cache logic */ + CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to + * cloneid */ + CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16, /* order with write */ +}; + +enum { + CEPH_OSD_WATCH_OP_UNWATCH = 0, + CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1, + /* note: use only ODD ids to prevent pre-giant code from + interpreting the op as UNWATCH */ + CEPH_OSD_WATCH_OP_WATCH = 3, + CEPH_OSD_WATCH_OP_RECONNECT = 5, + CEPH_OSD_WATCH_OP_PING = 7, +}; + +const char *ceph_osd_watch_op_name(int o); + +enum { + CEPH_OSD_BACKOFF_OP_BLOCK = 1, + CEPH_OSD_BACKOFF_OP_ACK_BLOCK = 2, + CEPH_OSD_BACKOFF_OP_UNBLOCK = 3, +}; + +/* + * an individual object operation. each may be accompanied by some data + * payload + */ +struct ceph_osd_op { + __le16 op; /* CEPH_OSD_OP_* */ + __le32 flags; /* CEPH_OSD_OP_FLAG_* */ + union { + struct { + __le64 offset, length; + __le64 truncate_size; + __le32 truncate_seq; + } __attribute__ ((packed)) extent; + struct { + __le32 name_len; + __le32 value_len; + __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ + __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ + } __attribute__ ((packed)) xattr; + struct { + __u8 class_len; + __u8 method_len; + __u8 argc; + __le32 indata_len; + } __attribute__ ((packed)) cls; + struct { + __le64 cookie, count; + } __attribute__ ((packed)) pgls; + struct { + __le64 snapid; + } __attribute__ ((packed)) snap; + struct { + __le64 cookie; + __le64 ver; /* no longer used */ + __u8 op; /* CEPH_OSD_WATCH_OP_* */ + __le32 gen; /* registration generation */ + } __attribute__ ((packed)) watch; + struct { + __le64 cookie; + } __attribute__ ((packed)) notify; + struct { + __le64 offset, length; + __le64 src_offset; + } __attribute__ ((packed)) clonerange; + struct { + __le64 expected_object_size; + __le64 expected_write_size; + } __attribute__ ((packed)) alloc_hint; + struct { + __le64 snapid; + __le64 src_version; + __u8 flags; /* CEPH_OSD_COPY_FROM_FLAG_* */ + /* + * CEPH_OSD_OP_FLAG_FADVISE_*: fadvise flags + * for src object, flags for dest object are in + * ceph_osd_op::flags. + */ + __le32 src_fadvise_flags; + } __attribute__ ((packed)) copy_from; + }; + __le32 payload_len; +} __attribute__ ((packed)); + + +#endif diff --git a/include/linux/ceph/string_table.h b/include/linux/ceph/string_table.h new file mode 100644 index 0000000..a4a9962 --- /dev/null +++ b/include/linux/ceph/string_table.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_STRING_TABLE_H +#define _FS_CEPH_STRING_TABLE_H + +#include +#include +#include +#include + +struct ceph_string { + struct kref kref; + union { + struct rb_node node; + struct rcu_head rcu; + }; + size_t len; + char str[]; +}; + +extern void ceph_release_string(struct kref *ref); +extern struct ceph_string *ceph_find_or_create_string(const char *str, + size_t len); +extern bool ceph_strings_empty(void); + +static inline struct ceph_string *ceph_get_string(struct ceph_string *str) +{ + kref_get(&str->kref); + return str; +} + +static inline void ceph_put_string(struct ceph_string *str) +{ + if (!str) + return; + kref_put(&str->kref, ceph_release_string); +} + +static inline int ceph_compare_string(struct ceph_string *cs, + const char* str, size_t len) +{ + size_t cs_len = cs ? cs->len : 0; + if (cs_len != len) + return cs_len - len; + if (len == 0) + return 0; + return strncmp(cs->str, str, len); +} + +#define ceph_try_get_string(x) \ +({ \ + struct ceph_string *___str; \ + rcu_read_lock(); \ + for (;;) { \ + ___str = rcu_dereference(x); \ + if (!___str || \ + kref_get_unless_zero(&___str->kref)) \ + break; \ + } \ + rcu_read_unlock(); \ + (___str); \ +}) + +#endif diff --git a/include/linux/ceph/striper.h b/include/linux/ceph/striper.h new file mode 100644 index 0000000..3486636 --- /dev/null +++ b/include/linux/ceph/striper.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CEPH_STRIPER_H +#define _LINUX_CEPH_STRIPER_H + +#include +#include + +struct ceph_file_layout; + +void ceph_calc_file_object_mapping(struct ceph_file_layout *l, + u64 off, u64 len, + u64 *objno, u64 *objoff, u32 *xlen); + +struct ceph_object_extent { + struct list_head oe_item; + u64 oe_objno; + u64 oe_off; + u64 oe_len; +}; + +static inline void ceph_object_extent_init(struct ceph_object_extent *ex) +{ + INIT_LIST_HEAD(&ex->oe_item); +} + +/* + * Called for each mapped stripe unit. + * + * @bytes: number of bytes mapped, i.e. the minimum of the full length + * requested (file extent length) or the remainder of the stripe + * unit within an object + */ +typedef void (*ceph_object_extent_fn_t)(struct ceph_object_extent *ex, + u32 bytes, void *arg); + +int ceph_file_to_extents(struct ceph_file_layout *l, u64 off, u64 len, + struct list_head *object_extents, + struct ceph_object_extent *alloc_fn(void *arg), + void *alloc_arg, + ceph_object_extent_fn_t action_fn, + void *action_arg); +int ceph_iterate_extents(struct ceph_file_layout *l, u64 off, u64 len, + struct list_head *object_extents, + ceph_object_extent_fn_t action_fn, + void *action_arg); + +struct ceph_file_extent { + u64 fe_off; + u64 fe_len; +}; + +static inline u64 ceph_file_extents_bytes(struct ceph_file_extent *file_extents, + u32 num_file_extents) +{ + u64 bytes = 0; + u32 i; + + for (i = 0; i < num_file_extents; i++) + bytes += file_extents[i].fe_len; + + return bytes; +} + +int ceph_extent_to_file(struct ceph_file_layout *l, + u64 objno, u64 objoff, u64 objlen, + struct ceph_file_extent **file_extents, + u32 *num_file_extents); + +u64 ceph_get_num_objects(struct ceph_file_layout *l, u64 size); + +#endif diff --git a/include/linux/ceph/types.h b/include/linux/ceph/types.h new file mode 100644 index 0000000..bd3d532 --- /dev/null +++ b/include/linux/ceph/types.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_CEPH_TYPES_H +#define _FS_CEPH_TYPES_H + +/* needed before including ceph_fs.h */ +#include +#include +#include +#include + +#include +#include +#include + +/* + * Identify inodes by both their ino AND snapshot id (a u64). + */ +struct ceph_vino { + u64 ino; + u64 snap; +}; + + +/* context for the caps reservation mechanism */ +struct ceph_cap_reservation { + int count; + int used; +}; + + +#endif diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h new file mode 100644 index 0000000..4060004 --- /dev/null +++ b/include/linux/cfag12864b.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Filename: cfag12864b.h + * Version: 0.1.0 + * Description: cfag12864b LCD driver header + * + * Author: Copyright (C) Miguel Ojeda Sandonis + * Date: 2006-10-12 + */ + +#ifndef _CFAG12864B_H_ +#define _CFAG12864B_H_ + +#define CFAG12864B_WIDTH (128) +#define CFAG12864B_HEIGHT (64) +#define CFAG12864B_CONTROLLERS (2) +#define CFAG12864B_PAGES (8) +#define CFAG12864B_ADDRESSES (64) +#define CFAG12864B_SIZE ((CFAG12864B_CONTROLLERS) * \ + (CFAG12864B_PAGES) * \ + (CFAG12864B_ADDRESSES)) + +/* + * The driver will blit this buffer to the LCD + * + * Its size is CFAG12864B_SIZE. + */ +extern unsigned char * cfag12864b_buffer; + +/* + * Get the refresh rate of the LCD + * + * Returns the refresh rate (hertz). + */ +extern unsigned int cfag12864b_getrate(void); + +/* + * Enable refreshing + * + * Returns 0 if successful (anyone was using it), + * or != 0 if failed (someone is using it). + */ +extern unsigned char cfag12864b_enable(void); + +/* + * Disable refreshing + * + * You should call this only when you finish using the LCD. + */ +extern void cfag12864b_disable(void); + +/* + * Is enabled refreshing? (is anyone using the module?) + * + * Returns 0 if refreshing is not enabled (anyone is using it), + * or != 0 if refreshing is enabled (someone is using it). + * + * Useful for buffer read-only modules. + */ +extern unsigned char cfag12864b_isenabled(void); + +/* + * Is the module inited? + */ +extern unsigned char cfag12864b_isinited(void); + +#endif /* _CFAG12864B_H_ */ + diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h new file mode 100644 index 0000000..1162ab7 --- /dev/null +++ b/include/linux/cgroup-defs.h @@ -0,0 +1,878 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/cgroup-defs.h - basic definitions for cgroup + * + * This file provides basic type and interface. Include this file directly + * only if necessary to avoid cyclic dependencies. + */ +#ifndef _LINUX_CGROUP_DEFS_H +#define _LINUX_CGROUP_DEFS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_CGROUPS + +struct cgroup; +struct cgroup_root; +struct cgroup_subsys; +struct cgroup_taskset; +struct kernfs_node; +struct kernfs_ops; +struct kernfs_open_file; +struct seq_file; +struct poll_table_struct; + +#define MAX_CGROUP_TYPE_NAMELEN 32 +#define MAX_CGROUP_ROOT_NAMELEN 64 +#define MAX_CFTYPE_NAME 64 + +/* define the enumeration of all cgroup subsystems */ +#define SUBSYS(_x) _x ## _cgrp_id, +enum cgroup_subsys_id { +#include + CGROUP_SUBSYS_COUNT, +}; +#undef SUBSYS + +/* bits in struct cgroup_subsys_state flags field */ +enum { + CSS_NO_REF = (1 << 0), /* no reference counting for this css */ + CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ + CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ + CSS_VISIBLE = (1 << 3), /* css is visible to userland */ + CSS_DYING = (1 << 4), /* css is dying */ +}; + +/* bits in struct cgroup flags field */ +enum { + /* Control Group requires release notifications to userspace */ + CGRP_NOTIFY_ON_RELEASE, + /* + * Clone the parent's configuration when creating a new child + * cpuset cgroup. For historical reasons, this option can be + * specified at mount time and thus is implemented here. + */ + CGRP_CPUSET_CLONE_CHILDREN, + + /* Control group has to be frozen. */ + CGRP_FREEZE, + + /* Cgroup is frozen. */ + CGRP_FROZEN, +}; + +/* cgroup_root->flags */ +enum { + CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ + CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ + + /* + * Consider namespaces as delegation boundaries. If this flag is + * set, controller specific interface files in a namespace root + * aren't writeable from inside the namespace. + */ + CGRP_ROOT_NS_DELEGATE = (1 << 3), + + /* + * Enable cpuset controller in v1 cgroup to use v2 behavior. + */ + CGRP_ROOT_CPUSET_V2_MODE = (1 << 4), + + /* + * Enable legacy local memory.events. + */ + CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5), +}; + +/* cftype->flags */ +enum { + CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ + CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ + CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */ + + CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ + CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ + CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ + + /* internal flags, do not use outside cgroup core proper */ + __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ + __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ +}; + +/* + * cgroup_file is the handle for a file instance created in a cgroup which + * is used, for example, to generate file changed notifications. This can + * be obtained by setting cftype->file_offset. + */ +struct cgroup_file { + /* do not access any fields from outside cgroup core */ + struct kernfs_node *kn; + unsigned long notified_at; + struct timer_list notify_timer; +}; + +/* + * Per-subsystem/per-cgroup state maintained by the system. This is the + * fundamental structural building block that controllers deal with. + * + * Fields marked with "PI:" are public and immutable and may be accessed + * directly without synchronization. + */ +struct cgroup_subsys_state { + /* PI: the cgroup that this css is attached to */ + struct cgroup *cgroup; + + /* PI: the cgroup subsystem that this css is attached to */ + struct cgroup_subsys *ss; + + /* reference count - access via css_[try]get() and css_put() */ + struct percpu_ref refcnt; + + /* siblings list anchored at the parent's ->children */ + struct list_head sibling; + struct list_head children; + + /* + * PI: Subsys-unique ID. 0 is unused and root is always 1. The + * matching css can be looked up using css_from_id(). + */ + int id; + + unsigned int flags; + + /* + * Monotonically increasing unique serial number which defines a + * uniform order among all csses. It's guaranteed that all + * ->children lists are in the ascending order of ->serial_nr and + * used to allow interrupting and resuming iterations. + */ + u64 serial_nr; + + /* + * Incremented by online self and children. Used to guarantee that + * parents are not offlined before their children. + */ + atomic_t online_cnt; + + /* percpu_ref killing and RCU release */ + struct work_struct destroy_work; + struct rcu_work destroy_rwork; + + /* + * PI: the parent css. Placed here for cache proximity to following + * fields of the containing structure. + */ + struct cgroup_subsys_state *parent; +}; + +/* + * A css_set is a structure holding pointers to a set of + * cgroup_subsys_state objects. This saves space in the task struct + * object and speeds up fork()/exit(), since a single inc/dec and a + * list_add()/del() can bump the reference count on the entire cgroup + * set for a task. + */ +struct css_set { + /* + * Set of subsystem states, one for each subsystem. This array is + * immutable after creation apart from the init_css_set during + * subsystem registration (at boot time). + */ + struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + + /* reference count */ + refcount_t refcount; + + /* + * For a domain cgroup, the following points to self. If threaded, + * to the matching cset of the nearest domain ancestor. The + * dom_cset provides access to the domain cgroup and its csses to + * which domain level resource consumptions should be charged. + */ + struct css_set *dom_cset; + + /* the default cgroup associated with this css_set */ + struct cgroup *dfl_cgrp; + + /* internal task count, protected by css_set_lock */ + int nr_tasks; + + /* + * Lists running through all tasks using this cgroup group. + * mg_tasks lists tasks which belong to this cset but are in the + * process of being migrated out or in. Protected by + * css_set_rwsem, but, during migration, once tasks are moved to + * mg_tasks, it can be read safely while holding cgroup_mutex. + */ + struct list_head tasks; + struct list_head mg_tasks; + struct list_head dying_tasks; + + /* all css_task_iters currently walking this cset */ + struct list_head task_iters; + + /* + * On the default hierarhcy, ->subsys[ssid] may point to a css + * attached to an ancestor instead of the cgroup this css_set is + * associated with. The following node is anchored at + * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to + * iterate through all css's attached to a given cgroup. + */ + struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; + + /* all threaded csets whose ->dom_cset points to this cset */ + struct list_head threaded_csets; + struct list_head threaded_csets_node; + + /* + * List running through all cgroup groups in the same hash + * slot. Protected by css_set_lock + */ + struct hlist_node hlist; + + /* + * List of cgrp_cset_links pointing at cgroups referenced from this + * css_set. Protected by css_set_lock. + */ + struct list_head cgrp_links; + + /* + * List of csets participating in the on-going migration either as + * source or destination. Protected by cgroup_mutex. + */ + struct list_head mg_preload_node; + struct list_head mg_node; + + /* + * If this cset is acting as the source of migration the following + * two fields are set. mg_src_cgrp and mg_dst_cgrp are + * respectively the source and destination cgroups of the on-going + * migration. mg_dst_cset is the destination cset the target tasks + * on this cset should be migrated to. Protected by cgroup_mutex. + */ + struct cgroup *mg_src_cgrp; + struct cgroup *mg_dst_cgrp; + struct css_set *mg_dst_cset; + + /* dead and being drained, ignore for migration */ + bool dead; + + /* For RCU-protected deletion */ + struct rcu_head rcu_head; +}; + +struct cgroup_base_stat { + struct task_cputime cputime; +}; + +/* + * rstat - cgroup scalable recursive statistics. Accounting is done + * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the + * hierarchy on reads. + * + * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are + * linked into the updated tree. On the following read, propagation only + * considers and consumes the updated tree. This makes reading O(the + * number of descendants which have been active since last read) instead of + * O(the total number of descendants). + * + * This is important because there can be a lot of (draining) cgroups which + * aren't active and stat may be read frequently. The combination can + * become very expensive. By propagating selectively, increasing reading + * frequency decreases the cost of each read. + * + * This struct hosts both the fields which implement the above - + * updated_children and updated_next - and the fields which track basic + * resource statistics on top of it - bsync, bstat and last_bstat. + */ +struct cgroup_rstat_cpu { + /* + * ->bsync protects ->bstat. These are the only fields which get + * updated in the hot path. + */ + struct u64_stats_sync bsync; + struct cgroup_base_stat bstat; + + /* + * Snapshots at the last reading. These are used to calculate the + * deltas to propagate to the global counters. + */ + struct cgroup_base_stat last_bstat; + + /* + * Child cgroups with stat updates on this cpu since the last read + * are linked on the parent's ->updated_children through + * ->updated_next. + * + * In addition to being more compact, singly-linked list pointing + * to the cgroup makes it unnecessary for each per-cpu struct to + * point back to the associated cgroup. + * + * Protected by per-cpu cgroup_rstat_cpu_lock. + */ + struct cgroup *updated_children; /* terminated by self cgroup */ + struct cgroup *updated_next; /* NULL iff not on the list */ +}; + +struct cgroup_freezer_state { + /* Should the cgroup and its descendants be frozen. */ + bool freeze; + + /* Should the cgroup actually be frozen? */ + int e_freeze; + + /* Fields below are protected by css_set_lock */ + + /* Number of frozen descendant cgroups */ + int nr_frozen_descendants; + + /* + * Number of tasks, which are counted as frozen: + * frozen, SIGSTOPped, and PTRACEd. + */ + int nr_frozen_tasks; +}; + +struct cgroup { + /* self css with NULL ->ss, points back to this cgroup */ + struct cgroup_subsys_state self; + + unsigned long flags; /* "unsigned long" so bitops work */ + + /* + * idr allocated in-hierarchy ID. + * + * ID 0 is not used, the ID of the root cgroup is always 1, and a + * new cgroup will be assigned with a smallest available ID. + * + * Allocating/Removing ID must be protected by cgroup_mutex. + */ + int id; + + /* + * The depth this cgroup is at. The root is at depth zero and each + * step down the hierarchy increments the level. This along with + * ancestor_ids[] can determine whether a given cgroup is a + * descendant of another without traversing the hierarchy. + */ + int level; + + /* Maximum allowed descent tree depth */ + int max_depth; + + /* + * Keep track of total numbers of visible and dying descent cgroups. + * Dying cgroups are cgroups which were deleted by a user, + * but are still existing because someone else is holding a reference. + * max_descendants is a maximum allowed number of descent cgroups. + * + * nr_descendants and nr_dying_descendants are protected + * by cgroup_mutex and css_set_lock. It's fine to read them holding + * any of cgroup_mutex and css_set_lock; for writing both locks + * should be held. + */ + int nr_descendants; + int nr_dying_descendants; + int max_descendants; + + /* + * Each non-empty css_set associated with this cgroup contributes + * one to nr_populated_csets. The counter is zero iff this cgroup + * doesn't have any tasks. + * + * All children which have non-zero nr_populated_csets and/or + * nr_populated_children of their own contribute one to either + * nr_populated_domain_children or nr_populated_threaded_children + * depending on their type. Each counter is zero iff all cgroups + * of the type in the subtree proper don't have any tasks. + */ + int nr_populated_csets; + int nr_populated_domain_children; + int nr_populated_threaded_children; + + int nr_threaded_children; /* # of live threaded child cgroups */ + + struct kernfs_node *kn; /* cgroup kernfs entry */ + struct cgroup_file procs_file; /* handle for "cgroup.procs" */ + struct cgroup_file events_file; /* handle for "cgroup.events" */ + + /* + * The bitmask of subsystems enabled on the child cgroups. + * ->subtree_control is the one configured through + * "cgroup.subtree_control" while ->child_ss_mask is the effective + * one which may have more subsystems enabled. Controller knobs + * are made available iff it's enabled in ->subtree_control. + */ + u16 subtree_control; + u16 subtree_ss_mask; + u16 old_subtree_control; + u16 old_subtree_ss_mask; + + /* Private pointers for each registered subsystem */ + struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; + + struct cgroup_root *root; + + /* + * List of cgrp_cset_links pointing at css_sets with tasks in this + * cgroup. Protected by css_set_lock. + */ + struct list_head cset_links; + + /* + * On the default hierarchy, a css_set for a cgroup with some + * susbsys disabled will point to css's which are associated with + * the closest ancestor which has the subsys enabled. The + * following lists all css_sets which point to this cgroup's css + * for the given subsystem. + */ + struct list_head e_csets[CGROUP_SUBSYS_COUNT]; + + /* + * If !threaded, self. If threaded, it points to the nearest + * domain ancestor. Inside a threaded subtree, cgroups are exempt + * from process granularity and no-internal-task constraint. + * Domain level resource consumptions which aren't tied to a + * specific task are charged to the dom_cgrp. + */ + struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; /* used while enabling threaded */ + + /* per-cpu recursive resource statistics */ + struct cgroup_rstat_cpu __percpu *rstat_cpu; + + /* cgroup basic resource statistics */ + struct cgroup_base_stat pending_bstat; /* pending from children */ + struct cgroup_base_stat bstat; + struct prev_cputime prev_cputime; /* for printing out cputime */ + + /* + * list of pidlists, up to two for each namespace (one for procs, one + * for tasks); created on demand. + */ + struct list_head pidlists; + struct mutex pidlist_mutex; + + /* used to wait for offlining of csses */ + wait_queue_head_t offline_waitq; + + /* used to schedule release agent */ + struct work_struct release_agent_work; + + /* used to track pressure stalls */ + struct psi_group psi; + + /* used to store eBPF programs */ + struct cgroup_bpf bpf; + + /* If there is block congestion on this cgroup. */ + atomic_t congestion_count; + + /* Used to store internal freezer state */ + struct cgroup_freezer_state freezer; + + /* ids of the ancestors at each level including self */ + int ancestor_ids[]; +}; + +/* + * A cgroup_root represents the root of a cgroup hierarchy, and may be + * associated with a kernfs_root to form an active hierarchy. This is + * internal to cgroup core. Don't access directly from controllers. + */ +struct cgroup_root { + struct kernfs_root *kf_root; + + /* The bitmask of subsystems attached to this hierarchy */ + unsigned int subsys_mask; + + /* Unique id for this hierarchy. */ + int hierarchy_id; + + /* The root cgroup. Root is destroyed on its release. */ + struct cgroup cgrp; + + /* for cgrp->ancestor_ids[0] */ + int cgrp_ancestor_id_storage; + + /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ + atomic_t nr_cgrps; + + /* A list running through the active hierarchies */ + struct list_head root_list; + + /* Hierarchy-specific flags */ + unsigned int flags; + + /* IDs for cgroups in this hierarchy */ + struct idr cgroup_idr; + + /* The path to use for release notifications. */ + char release_agent_path[PATH_MAX]; + + /* The name for this hierarchy - may be empty */ + char name[MAX_CGROUP_ROOT_NAMELEN]; +}; + +/* + * struct cftype: handler definitions for cgroup control files + * + * When reading/writing to a file: + * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata + * - the 'cftype' of the file is file->f_path.dentry->d_fsdata + */ +struct cftype { + /* + * By convention, the name should begin with the name of the + * subsystem, followed by a period. Zero length string indicates + * end of cftype array. + */ + char name[MAX_CFTYPE_NAME]; + unsigned long private; + + /* + * The maximum length of string, excluding trailing nul, that can + * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed. + */ + size_t max_write_len; + + /* CFTYPE_* flags */ + unsigned int flags; + + /* + * If non-zero, should contain the offset from the start of css to + * a struct cgroup_file field. cgroup will record the handle of + * the created file into it. The recorded handle can be used as + * long as the containing css remains accessible. + */ + unsigned int file_offset; + + /* + * Fields used for internal bookkeeping. Initialized automatically + * during registration. + */ + struct cgroup_subsys *ss; /* NULL for cgroup core files */ + struct list_head node; /* anchored at ss->cfts */ + struct kernfs_ops *kf_ops; + + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); + + /* + * read_u64() is a shortcut for the common case of returning a + * single integer. Use it in place of read() + */ + u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); + /* + * read_s64() is a signed version of read_u64() + */ + s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); + + /* generic seq_file read interface */ + int (*seq_show)(struct seq_file *sf, void *v); + + /* optional ops, implement all or none */ + void *(*seq_start)(struct seq_file *sf, loff_t *ppos); + void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); + void (*seq_stop)(struct seq_file *sf, void *v); + + /* + * write_u64() is a shortcut for the common case of accepting + * a single integer (as parsed by simple_strtoull) from + * userspace. Use in place of write(); return 0 or error. + */ + int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, + u64 val); + /* + * write_s64() is a signed version of write_u64() + */ + int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, + s64 val); + + /* + * write() is the generic write callback which maps directly to + * kernfs write operation and overrides all other operations. + * Maximum write size is determined by ->max_write_len. Use + * of_css/cft() to access the associated css and cft. + */ + ssize_t (*write)(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off); + + __poll_t (*poll)(struct kernfs_open_file *of, + struct poll_table_struct *pt); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lock_class_key lockdep_key; +#endif +}; + +/* + * Control Group subsystem type. + * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details + */ +struct cgroup_subsys { + struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); + int (*css_online)(struct cgroup_subsys_state *css); + void (*css_offline)(struct cgroup_subsys_state *css); + void (*css_released)(struct cgroup_subsys_state *css); + void (*css_free)(struct cgroup_subsys_state *css); + void (*css_reset)(struct cgroup_subsys_state *css); + int (*css_extra_stat_show)(struct seq_file *seq, + struct cgroup_subsys_state *css); + + int (*can_attach)(struct cgroup_taskset *tset); + void (*cancel_attach)(struct cgroup_taskset *tset); + void (*attach)(struct cgroup_taskset *tset); + void (*post_attach)(void); + int (*can_fork)(struct task_struct *task); + void (*cancel_fork)(struct task_struct *task); + void (*fork)(struct task_struct *task); + void (*exit)(struct task_struct *task); + void (*release)(struct task_struct *task); + void (*bind)(struct cgroup_subsys_state *root_css); + + bool early_init:1; + + /* + * If %true, the controller, on the default hierarchy, doesn't show + * up in "cgroup.controllers" or "cgroup.subtree_control", is + * implicitly enabled on all cgroups on the default hierarchy, and + * bypasses the "no internal process" constraint. This is for + * utility type controllers which is transparent to userland. + * + * An implicit controller can be stolen from the default hierarchy + * anytime and thus must be okay with offline csses from previous + * hierarchies coexisting with csses for the current one. + */ + bool implicit_on_dfl:1; + + /* + * If %true, the controller, supports threaded mode on the default + * hierarchy. In a threaded subtree, both process granularity and + * no-internal-process constraint are ignored and a threaded + * controllers should be able to handle that. + * + * Note that as an implicit controller is automatically enabled on + * all cgroups on the default hierarchy, it should also be + * threaded. implicit && !threaded is not supported. + */ + bool threaded:1; + + /* + * If %false, this subsystem is properly hierarchical - + * configuration, resource accounting and restriction on a parent + * cgroup cover those of its children. If %true, hierarchy support + * is broken in some ways - some subsystems ignore hierarchy + * completely while others are only implemented half-way. + * + * It's now disallowed to create nested cgroups if the subsystem is + * broken and cgroup core will emit a warning message on such + * cases. Eventually, all subsystems will be made properly + * hierarchical and this will go away. + */ + bool broken_hierarchy:1; + bool warned_broken_hierarchy:1; + + /* the following two fields are initialized automtically during boot */ + int id; + const char *name; + + /* optional, initialized automatically during boot if not set */ + const char *legacy_name; + + /* link to parent, protected by cgroup_lock() */ + struct cgroup_root *root; + + /* idr for css->id */ + struct idr css_idr; + + /* + * List of cftypes. Each entry is the first entry of an array + * terminated by zero length name. + */ + struct list_head cfts; + + /* + * Base cftypes which are automatically registered. The two can + * point to the same array. + */ + struct cftype *dfl_cftypes; /* for the default hierarchy */ + struct cftype *legacy_cftypes; /* for the legacy hierarchies */ + + /* + * A subsystem may depend on other subsystems. When such subsystem + * is enabled on a cgroup, the depended-upon subsystems are enabled + * together if available. Subsystems enabled due to dependency are + * not visible to userland until explicitly enabled. The following + * specifies the mask of subsystems that this one depends on. + */ + unsigned int depends_on; +}; + +extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; + +/** + * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups + * @tsk: target task + * + * Allows cgroup operations to synchronize against threadgroup changes + * using a percpu_rw_semaphore. + */ +static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + percpu_down_read(&cgroup_threadgroup_rwsem); +} + +/** + * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups + * @tsk: target task + * + * Counterpart of cgroup_threadcgroup_change_begin(). + */ +static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) +{ + percpu_up_read(&cgroup_threadgroup_rwsem); +} + +#else /* CONFIG_CGROUPS */ + +#define CGROUP_SUBSYS_COUNT 0 + +static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + might_sleep(); +} + +static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} + +#endif /* CONFIG_CGROUPS */ + +#ifdef CONFIG_SOCK_CGROUP_DATA + +/* + * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains + * per-socket cgroup information except for memcg association. + * + * On legacy hierarchies, net_prio and net_cls controllers directly set + * attributes on each sock which can then be tested by the network layer. + * On the default hierarchy, each sock is associated with the cgroup it was + * created in and the networking layer can match the cgroup directly. + * + * To avoid carrying all three cgroup related fields separately in sock, + * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer. + * On boot, sock_cgroup_data records the cgroup that the sock was created + * in so that cgroup2 matches can be made; however, once either net_prio or + * net_cls starts being used, the area is overriden to carry prioidx and/or + * classid. The two modes are distinguished by whether the lowest bit is + * set. Clear bit indicates cgroup pointer while set bit prioidx and + * classid. + * + * While userland may start using net_prio or net_cls at any time, once + * either is used, cgroup2 matching no longer works. There is no reason to + * mix the two and this is in line with how legacy and v2 compatibility is + * handled. On mode switch, cgroup references which are already being + * pointed to by socks may be leaked. While this can be remedied by adding + * synchronization around sock_cgroup_data, given that the number of leaked + * cgroups is bound and highly unlikely to be high, this seems to be the + * better trade-off. + */ +struct sock_cgroup_data { + union { +#ifdef __LITTLE_ENDIAN + struct { + u8 is_data : 1; + u8 no_refcnt : 1; + u8 unused : 6; + u8 padding; + u16 prioidx; + u32 classid; + } __packed; +#else + struct { + u32 classid; + u16 prioidx; + u8 padding; + u8 unused : 6; + u8 no_refcnt : 1; + u8 is_data : 1; + } __packed; +#endif + u64 val; + }; +}; + +/* + * There's a theoretical window where the following accessors race with + * updaters and return part of the previous pointer as the prioidx or + * classid. Such races are short-lived and the result isn't critical. + */ +static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) +{ + /* fallback to 1 which is always the ID of the root cgroup */ + return (skcd->is_data & 1) ? skcd->prioidx : 1; +} + +static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) +{ + /* fallback to 0 which is the unconfigured default classid */ + return (skcd->is_data & 1) ? skcd->classid : 0; +} + +/* + * If invoked concurrently, the updaters may clobber each other. The + * caller is responsible for synchronization. + */ +static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, + u16 prioidx) +{ + struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; + + if (sock_cgroup_prioidx(&skcd_buf) == prioidx) + return; + + if (!(skcd_buf.is_data & 1)) { + skcd_buf.val = 0; + skcd_buf.is_data = 1; + } + + skcd_buf.prioidx = prioidx; + WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ +} + +static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, + u32 classid) +{ + struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; + + if (sock_cgroup_classid(&skcd_buf) == classid) + return; + + if (!(skcd_buf.is_data & 1)) { + skcd_buf.val = 0; + skcd_buf.is_data = 1; + } + + skcd_buf.classid = classid; + WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ +} + +#else /* CONFIG_SOCK_CGROUP_DATA */ + +struct sock_cgroup_data { +}; + +#endif /* CONFIG_SOCK_CGROUP_DATA */ + +#endif /* _LINUX_CGROUP_DEFS_H */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h new file mode 100644 index 0000000..1843a1a --- /dev/null +++ b/include/linux/cgroup.h @@ -0,0 +1,959 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CGROUP_H +#define _LINUX_CGROUP_H +/* + * cgroup interface + * + * Copyright (C) 2003 BULL SA + * Copyright (C) 2004-2006 Silicon Graphics, Inc. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_CGROUPS + +/* + * All weight knobs on the default hierarhcy should use the following min, + * default and max values. The default value is the logarithmic center of + * MIN and MAX and allows 100x to be expressed in both directions. + */ +#define CGROUP_WEIGHT_MIN 1 +#define CGROUP_WEIGHT_DFL 100 +#define CGROUP_WEIGHT_MAX 10000 + +/* walk only threadgroup leaders */ +#define CSS_TASK_ITER_PROCS (1U << 0) +/* walk all threaded css_sets in the domain */ +#define CSS_TASK_ITER_THREADED (1U << 1) + +/* internal flags */ +#define CSS_TASK_ITER_SKIPPED (1U << 16) + +/* a css_task_iter should be treated as an opaque object */ +struct css_task_iter { + struct cgroup_subsys *ss; + unsigned int flags; + + struct list_head *cset_pos; + struct list_head *cset_head; + + struct list_head *tcset_pos; + struct list_head *tcset_head; + + struct list_head *task_pos; + struct list_head *tasks_head; + struct list_head *mg_tasks_head; + struct list_head *dying_tasks_head; + + struct list_head *cur_tasks_head; + struct css_set *cur_cset; + struct css_set *cur_dcset; + struct task_struct *cur_task; + struct list_head iters_node; /* css_set->task_iters */ +}; + +extern struct cgroup_root cgrp_dfl_root; +extern struct css_set init_css_set; + +#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; +#include +#undef SUBSYS + +#define SUBSYS(_x) \ + extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \ + extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key; +#include +#undef SUBSYS + +/** + * cgroup_subsys_enabled - fast test on whether a subsys is enabled + * @ss: subsystem in question + */ +#define cgroup_subsys_enabled(ss) \ + static_branch_likely(&ss ## _enabled_key) + +/** + * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy + * @ss: subsystem in question + */ +#define cgroup_subsys_on_dfl(ss) \ + static_branch_likely(&ss ## _on_dfl_key) + +bool css_has_online_children(struct cgroup_subsys_state *css); +struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); +struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup, + struct cgroup_subsys *ss); +struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, + struct cgroup_subsys *ss); +struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, + struct cgroup_subsys *ss); + +struct cgroup *cgroup_get_from_path(const char *path); +struct cgroup *cgroup_get_from_fd(int fd); + +int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); +int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); + +int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); +int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); +int cgroup_rm_cftypes(struct cftype *cfts); +void cgroup_file_notify(struct cgroup_file *cfile); + +int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); +int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); +int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *tsk); + +void cgroup_fork(struct task_struct *p); +extern int cgroup_can_fork(struct task_struct *p); +extern void cgroup_cancel_fork(struct task_struct *p); +extern void cgroup_post_fork(struct task_struct *p); +void cgroup_exit(struct task_struct *p); +void cgroup_release(struct task_struct *p); +void cgroup_free(struct task_struct *p); + +int cgroup_init_early(void); +int cgroup_init(void); + +int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v); + +/* + * Iteration helpers and macros. + */ + +struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, + struct cgroup_subsys_state *parent); +struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos, + struct cgroup_subsys_state *css); +struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos); +struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, + struct cgroup_subsys_state *css); + +struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, + struct cgroup_subsys_state **dst_cssp); +struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, + struct cgroup_subsys_state **dst_cssp); + +void cgroup_enable_task_cg_lists(void); +void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, + struct css_task_iter *it); +struct task_struct *css_task_iter_next(struct css_task_iter *it); +void css_task_iter_end(struct css_task_iter *it); + +/** + * css_for_each_child - iterate through children of a css + * @pos: the css * to use as the loop cursor + * @parent: css whose children to walk + * + * Walk @parent's children. Must be called under rcu_read_lock(). + * + * If a subsystem synchronizes ->css_online() and the start of iteration, a + * css which finished ->css_online() is guaranteed to be visible in the + * future iterations and will stay visible until the last reference is put. + * A css which hasn't finished ->css_online() or already finished + * ->css_offline() may show up during traversal. It's each subsystem's + * responsibility to synchronize against on/offlining. + * + * It is allowed to temporarily drop RCU read lock during iteration. The + * caller is responsible for ensuring that @pos remains accessible until + * the start of the next iteration by, for example, bumping the css refcnt. + */ +#define css_for_each_child(pos, parent) \ + for ((pos) = css_next_child(NULL, (parent)); (pos); \ + (pos) = css_next_child((pos), (parent))) + +/** + * css_for_each_descendant_pre - pre-order walk of a css's descendants + * @pos: the css * to use as the loop cursor + * @root: css whose descendants to walk + * + * Walk @root's descendants. @root is included in the iteration and the + * first node to be visited. Must be called under rcu_read_lock(). + * + * If a subsystem synchronizes ->css_online() and the start of iteration, a + * css which finished ->css_online() is guaranteed to be visible in the + * future iterations and will stay visible until the last reference is put. + * A css which hasn't finished ->css_online() or already finished + * ->css_offline() may show up during traversal. It's each subsystem's + * responsibility to synchronize against on/offlining. + * + * For example, the following guarantees that a descendant can't escape + * state updates of its ancestors. + * + * my_online(@css) + * { + * Lock @css's parent and @css; + * Inherit state from the parent; + * Unlock both. + * } + * + * my_update_state(@css) + * { + * css_for_each_descendant_pre(@pos, @css) { + * Lock @pos; + * if (@pos == @css) + * Update @css's state; + * else + * Verify @pos is alive and inherit state from its parent; + * Unlock @pos; + * } + * } + * + * As long as the inheriting step, including checking the parent state, is + * enclosed inside @pos locking, double-locking the parent isn't necessary + * while inheriting. The state update to the parent is guaranteed to be + * visible by walking order and, as long as inheriting operations to the + * same @pos are atomic to each other, multiple updates racing each other + * still result in the correct state. It's guaranateed that at least one + * inheritance happens for any css after the latest update to its parent. + * + * If checking parent's state requires locking the parent, each inheriting + * iteration should lock and unlock both @pos->parent and @pos. + * + * Alternatively, a subsystem may choose to use a single global lock to + * synchronize ->css_online() and ->css_offline() against tree-walking + * operations. + * + * It is allowed to temporarily drop RCU read lock during iteration. The + * caller is responsible for ensuring that @pos remains accessible until + * the start of the next iteration by, for example, bumping the css refcnt. + */ +#define css_for_each_descendant_pre(pos, css) \ + for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ + (pos) = css_next_descendant_pre((pos), (css))) + +/** + * css_for_each_descendant_post - post-order walk of a css's descendants + * @pos: the css * to use as the loop cursor + * @css: css whose descendants to walk + * + * Similar to css_for_each_descendant_pre() but performs post-order + * traversal instead. @root is included in the iteration and the last + * node to be visited. + * + * If a subsystem synchronizes ->css_online() and the start of iteration, a + * css which finished ->css_online() is guaranteed to be visible in the + * future iterations and will stay visible until the last reference is put. + * A css which hasn't finished ->css_online() or already finished + * ->css_offline() may show up during traversal. It's each subsystem's + * responsibility to synchronize against on/offlining. + * + * Note that the walk visibility guarantee example described in pre-order + * walk doesn't apply the same to post-order walks. + */ +#define css_for_each_descendant_post(pos, css) \ + for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ + (pos) = css_next_descendant_post((pos), (css))) + +/** + * cgroup_taskset_for_each - iterate cgroup_taskset + * @task: the loop cursor + * @dst_css: the destination css + * @tset: taskset to iterate + * + * @tset may contain multiple tasks and they may belong to multiple + * processes. + * + * On the v2 hierarchy, there may be tasks from multiple processes and they + * may not share the source or destination csses. + * + * On traditional hierarchies, when there are multiple tasks in @tset, if a + * task of a process is in @tset, all tasks of the process are in @tset. + * Also, all are guaranteed to share the same source and destination csses. + * + * Iteration is not in any specific order. + */ +#define cgroup_taskset_for_each(task, dst_css, tset) \ + for ((task) = cgroup_taskset_first((tset), &(dst_css)); \ + (task); \ + (task) = cgroup_taskset_next((tset), &(dst_css))) + +/** + * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset + * @leader: the loop cursor + * @dst_css: the destination css + * @tset: taskset to iterate + * + * Iterate threadgroup leaders of @tset. For single-task migrations, @tset + * may not contain any. + */ +#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \ + for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \ + (leader); \ + (leader) = cgroup_taskset_next((tset), &(dst_css))) \ + if ((leader) != (leader)->group_leader) \ + ; \ + else + +/* + * Inline functions. + */ + +/** + * css_get - obtain a reference on the specified css + * @css: target css + * + * The caller must already have a reference. + */ +static inline void css_get(struct cgroup_subsys_state *css) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_get(&css->refcnt); +} + +/** + * css_get_many - obtain references on the specified css + * @css: target css + * @n: number of references to get + * + * The caller must already have a reference. + */ +static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_get_many(&css->refcnt, n); +} + +/** + * css_tryget - try to obtain a reference on the specified css + * @css: target css + * + * Obtain a reference on @css unless it already has reached zero and is + * being released. This function doesn't care whether @css is on or + * offline. The caller naturally needs to ensure that @css is accessible + * but doesn't have to be holding a reference on it - IOW, RCU protected + * access is good enough for this function. Returns %true if a reference + * count was successfully obtained; %false otherwise. + */ +static inline bool css_tryget(struct cgroup_subsys_state *css) +{ + if (!(css->flags & CSS_NO_REF)) + return percpu_ref_tryget(&css->refcnt); + return true; +} + +/** + * css_tryget_online - try to obtain a reference on the specified css if online + * @css: target css + * + * Obtain a reference on @css if it's online. The caller naturally needs + * to ensure that @css is accessible but doesn't have to be holding a + * reference on it - IOW, RCU protected access is good enough for this + * function. Returns %true if a reference count was successfully obtained; + * %false otherwise. + */ +static inline bool css_tryget_online(struct cgroup_subsys_state *css) +{ + if (!(css->flags & CSS_NO_REF)) + return percpu_ref_tryget_live(&css->refcnt); + return true; +} + +/** + * css_is_dying - test whether the specified css is dying + * @css: target css + * + * Test whether @css is in the process of offlining or already offline. In + * most cases, ->css_online() and ->css_offline() callbacks should be + * enough; however, the actual offline operations are RCU delayed and this + * test returns %true also when @css is scheduled to be offlined. + * + * This is useful, for example, when the use case requires synchronous + * behavior with respect to cgroup removal. cgroup removal schedules css + * offlining but the css can seem alive while the operation is being + * delayed. If the delay affects user visible semantics, this test can be + * used to resolve the situation. + */ +static inline bool css_is_dying(struct cgroup_subsys_state *css) +{ + return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); +} + +/** + * css_put - put a css reference + * @css: target css + * + * Put a reference obtained via css_get() and css_tryget_online(). + */ +static inline void css_put(struct cgroup_subsys_state *css) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_put(&css->refcnt); +} + +/** + * css_put_many - put css references + * @css: target css + * @n: number of references to put + * + * Put references obtained via css_get() and css_tryget_online(). + */ +static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_put_many(&css->refcnt, n); +} + +static inline void cgroup_get(struct cgroup *cgrp) +{ + css_get(&cgrp->self); +} + +static inline bool cgroup_tryget(struct cgroup *cgrp) +{ + return css_tryget(&cgrp->self); +} + +static inline void cgroup_put(struct cgroup *cgrp) +{ + css_put(&cgrp->self); +} + +/** + * task_css_set_check - obtain a task's css_set with extra access conditions + * @task: the task to obtain css_set for + * @__c: extra condition expression to be passed to rcu_dereference_check() + * + * A task's css_set is RCU protected, initialized and exited while holding + * task_lock(), and can only be modified while holding both cgroup_mutex + * and task_lock() while the task is alive. This macro verifies that the + * caller is inside proper critical section and returns @task's css_set. + * + * The caller can also specify additional allowed conditions via @__c, such + * as locks used during the cgroup_subsys::attach() methods. + */ +#ifdef CONFIG_PROVE_RCU +extern struct mutex cgroup_mutex; +extern spinlock_t css_set_lock; +#define task_css_set_check(task, __c) \ + rcu_dereference_check((task)->cgroups, \ + lockdep_is_held(&cgroup_mutex) || \ + lockdep_is_held(&css_set_lock) || \ + ((task)->flags & PF_EXITING) || (__c)) +#else +#define task_css_set_check(task, __c) \ + rcu_dereference((task)->cgroups) +#endif + +/** + * task_css_check - obtain css for (task, subsys) w/ extra access conds + * @task: the target task + * @subsys_id: the target subsystem ID + * @__c: extra condition expression to be passed to rcu_dereference_check() + * + * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The + * synchronization rules are the same as task_css_set_check(). + */ +#define task_css_check(task, subsys_id, __c) \ + task_css_set_check((task), (__c))->subsys[(subsys_id)] + +/** + * task_css_set - obtain a task's css_set + * @task: the task to obtain css_set for + * + * See task_css_set_check(). + */ +static inline struct css_set *task_css_set(struct task_struct *task) +{ + return task_css_set_check(task, false); +} + +/** + * task_css - obtain css for (task, subsys) + * @task: the target task + * @subsys_id: the target subsystem ID + * + * See task_css_check(). + */ +static inline struct cgroup_subsys_state *task_css(struct task_struct *task, + int subsys_id) +{ + return task_css_check(task, subsys_id, false); +} + +/** + * task_get_css - find and get the css for (task, subsys) + * @task: the target task + * @subsys_id: the target subsystem ID + * + * Find the css for the (@task, @subsys_id) combination, increment a + * reference on and return it. This function is guaranteed to return a + * valid css. The returned css may already have been offlined. + */ +static inline struct cgroup_subsys_state * +task_get_css(struct task_struct *task, int subsys_id) +{ + struct cgroup_subsys_state *css; + + rcu_read_lock(); + while (true) { + css = task_css(task, subsys_id); + /* + * Can't use css_tryget_online() here. A task which has + * PF_EXITING set may stay associated with an offline css. + * If such task calls this function, css_tryget_online() + * will keep failing. + */ + if (likely(css_tryget(css))) + break; + cpu_relax(); + } + rcu_read_unlock(); + return css; +} + +/** + * task_css_is_root - test whether a task belongs to the root css + * @task: the target task + * @subsys_id: the target subsystem ID + * + * Test whether @task belongs to the root css on the specified subsystem. + * May be invoked in any context. + */ +static inline bool task_css_is_root(struct task_struct *task, int subsys_id) +{ + return task_css_check(task, subsys_id, true) == + init_css_set.subsys[subsys_id]; +} + +static inline struct cgroup *task_cgroup(struct task_struct *task, + int subsys_id) +{ + return task_css(task, subsys_id)->cgroup; +} + +static inline struct cgroup *task_dfl_cgroup(struct task_struct *task) +{ + return task_css_set(task)->dfl_cgrp; +} + +static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) +{ + struct cgroup_subsys_state *parent_css = cgrp->self.parent; + + if (parent_css) + return container_of(parent_css, struct cgroup, self); + return NULL; +} + +/** + * cgroup_is_descendant - test ancestry + * @cgrp: the cgroup to be tested + * @ancestor: possible ancestor of @cgrp + * + * Test whether @cgrp is a descendant of @ancestor. It also returns %true + * if @cgrp == @ancestor. This function is safe to call as long as @cgrp + * and @ancestor are accessible. + */ +static inline bool cgroup_is_descendant(struct cgroup *cgrp, + struct cgroup *ancestor) +{ + if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) + return false; + return cgrp->ancestor_ids[ancestor->level] == ancestor->id; +} + +/** + * cgroup_ancestor - find ancestor of cgroup + * @cgrp: cgroup to find ancestor of + * @ancestor_level: level of ancestor to find starting from root + * + * Find ancestor of cgroup at specified level starting from root if it exists + * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at + * @ancestor_level. + * + * This function is safe to call as long as @cgrp is accessible. + */ +static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp, + int ancestor_level) +{ + if (cgrp->level < ancestor_level) + return NULL; + while (cgrp && cgrp->level > ancestor_level) + cgrp = cgroup_parent(cgrp); + return cgrp; +} + +/** + * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry + * @task: the task to be tested + * @ancestor: possible ancestor of @task's cgroup + * + * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. + * It follows all the same rules as cgroup_is_descendant, and only applies + * to the default hierarchy. + */ +static inline bool task_under_cgroup_hierarchy(struct task_struct *task, + struct cgroup *ancestor) +{ + struct css_set *cset = task_css_set(task); + + return cgroup_is_descendant(cset->dfl_cgrp, ancestor); +} + +/* no synchronization, the result can only be used as a hint */ +static inline bool cgroup_is_populated(struct cgroup *cgrp) +{ + return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children + + cgrp->nr_populated_threaded_children; +} + +/* returns ino associated with a cgroup */ +static inline ino_t cgroup_ino(struct cgroup *cgrp) +{ + return cgrp->kn->id.ino; +} + +/* cft/css accessors for cftype->write() operation */ +static inline struct cftype *of_cft(struct kernfs_open_file *of) +{ + return of->kn->priv; +} + +struct cgroup_subsys_state *of_css(struct kernfs_open_file *of); + +/* cft/css accessors for cftype->seq_*() operations */ +static inline struct cftype *seq_cft(struct seq_file *seq) +{ + return of_cft(seq->private); +} + +static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq) +{ + return of_css(seq->private); +} + +/* + * Name / path handling functions. All are thin wrappers around the kernfs + * counterparts and can be called under any context. + */ + +static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen) +{ + return kernfs_name(cgrp->kn, buf, buflen); +} + +static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen) +{ + return kernfs_path(cgrp->kn, buf, buflen); +} + +static inline void pr_cont_cgroup_name(struct cgroup *cgrp) +{ + pr_cont_kernfs_name(cgrp->kn); +} + +static inline void pr_cont_cgroup_path(struct cgroup *cgrp) +{ + pr_cont_kernfs_path(cgrp->kn); +} + +static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) +{ + return &cgrp->psi; +} + +static inline void cgroup_init_kthreadd(void) +{ + /* + * kthreadd is inherited by all kthreads, keep it in the root so + * that the new kthreads are guaranteed to stay in the root until + * initialization is finished. + */ + current->no_cgroup_migration = 1; +} + +static inline void cgroup_kthread_ready(void) +{ + /* + * This kthread finished initialization. The creator should have + * set PF_NO_SETAFFINITY if this kthread should stay in the root. + */ + current->no_cgroup_migration = 0; +} + +static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp) +{ + return &cgrp->kn->id; +} + +void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, + char *buf, size_t buflen); +#else /* !CONFIG_CGROUPS */ + +struct cgroup_subsys_state; +struct cgroup; + +static inline void css_get(struct cgroup_subsys_state *css) {} +static inline void css_put(struct cgroup_subsys_state *css) {} +static inline int cgroup_attach_task_all(struct task_struct *from, + struct task_struct *t) { return 0; } +static inline int cgroupstats_build(struct cgroupstats *stats, + struct dentry *dentry) { return -EINVAL; } + +static inline void cgroup_fork(struct task_struct *p) {} +static inline int cgroup_can_fork(struct task_struct *p) { return 0; } +static inline void cgroup_cancel_fork(struct task_struct *p) {} +static inline void cgroup_post_fork(struct task_struct *p) {} +static inline void cgroup_exit(struct task_struct *p) {} +static inline void cgroup_release(struct task_struct *p) {} +static inline void cgroup_free(struct task_struct *p) {} + +static inline int cgroup_init_early(void) { return 0; } +static inline int cgroup_init(void) { return 0; } +static inline void cgroup_init_kthreadd(void) {} +static inline void cgroup_kthread_ready(void) {} +static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp) +{ + return NULL; +} + +static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) +{ + return NULL; +} + +static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) +{ + return NULL; +} + +static inline bool task_under_cgroup_hierarchy(struct task_struct *task, + struct cgroup *ancestor) +{ + return true; +} + +static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, + char *buf, size_t buflen) {} +#endif /* !CONFIG_CGROUPS */ + +#ifdef CONFIG_CGROUPS +/* + * cgroup scalable recursive statistics. + */ +void cgroup_rstat_updated(struct cgroup *cgrp, int cpu); +void cgroup_rstat_flush(struct cgroup *cgrp); + +/* + * Basic resource stats. + */ +#ifdef CONFIG_CGROUP_CPUACCT +void cpuacct_charge(struct task_struct *tsk, u64 cputime); +void cpuacct_account_field(struct task_struct *tsk, int index, u64 val); +#else +static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} +static inline void cpuacct_account_field(struct task_struct *tsk, int index, + u64 val) {} +#endif + +void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec); +void __cgroup_account_cputime_field(struct cgroup *cgrp, + enum cpu_usage_stat index, u64 delta_exec); + +static inline void cgroup_account_cputime(struct task_struct *task, + u64 delta_exec) +{ + struct cgroup *cgrp; + + cpuacct_charge(task, delta_exec); + + rcu_read_lock(); + cgrp = task_dfl_cgroup(task); + if (cgroup_parent(cgrp)) + __cgroup_account_cputime(cgrp, delta_exec); + rcu_read_unlock(); +} + +static inline void cgroup_account_cputime_field(struct task_struct *task, + enum cpu_usage_stat index, + u64 delta_exec) +{ + struct cgroup *cgrp; + + cpuacct_account_field(task, index, delta_exec); + + rcu_read_lock(); + cgrp = task_dfl_cgroup(task); + if (cgroup_parent(cgrp)) + __cgroup_account_cputime_field(cgrp, index, delta_exec); + rcu_read_unlock(); +} + +#else /* CONFIG_CGROUPS */ + +static inline void cgroup_account_cputime(struct task_struct *task, + u64 delta_exec) {} +static inline void cgroup_account_cputime_field(struct task_struct *task, + enum cpu_usage_stat index, + u64 delta_exec) {} + +#endif /* CONFIG_CGROUPS */ + +/* + * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data + * definition in cgroup-defs.h. + */ +#ifdef CONFIG_SOCK_CGROUP_DATA + +#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) +extern spinlock_t cgroup_sk_update_lock; +#endif + +void cgroup_sk_alloc_disable(void); +void cgroup_sk_alloc(struct sock_cgroup_data *skcd); +void cgroup_sk_clone(struct sock_cgroup_data *skcd); +void cgroup_sk_free(struct sock_cgroup_data *skcd); + +static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) +{ +#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) + unsigned long v; + + /* + * @skcd->val is 64bit but the following is safe on 32bit too as we + * just need the lower ulong to be written and read atomically. + */ + v = READ_ONCE(skcd->val); + + if (v & 3) + return &cgrp_dfl_root.cgrp; + + return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; +#else + return (struct cgroup *)(unsigned long)skcd->val; +#endif +} + +#else /* CONFIG_CGROUP_DATA */ + +static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} +static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {} +static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} + +#endif /* CONFIG_CGROUP_DATA */ + +struct cgroup_namespace { + refcount_t count; + struct ns_common ns; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct css_set *root_cset; +}; + +extern struct cgroup_namespace init_cgroup_ns; + +#ifdef CONFIG_CGROUPS + +void free_cgroup_ns(struct cgroup_namespace *ns); + +struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, + struct user_namespace *user_ns, + struct cgroup_namespace *old_ns); + +int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, + struct cgroup_namespace *ns); + +#else /* !CONFIG_CGROUPS */ + +static inline void free_cgroup_ns(struct cgroup_namespace *ns) { } +static inline struct cgroup_namespace * +copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, + struct cgroup_namespace *old_ns) +{ + return old_ns; +} + +#endif /* !CONFIG_CGROUPS */ + +static inline void get_cgroup_ns(struct cgroup_namespace *ns) +{ + if (ns) + refcount_inc(&ns->count); +} + +static inline void put_cgroup_ns(struct cgroup_namespace *ns) +{ + if (ns && refcount_dec_and_test(&ns->count)) + free_cgroup_ns(ns); +} + +#ifdef CONFIG_CGROUPS + +void cgroup_enter_frozen(void); +void cgroup_leave_frozen(bool always_leave); +void cgroup_update_frozen(struct cgroup *cgrp); +void cgroup_freeze(struct cgroup *cgrp, bool freeze); +void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src, + struct cgroup *dst); + +static inline bool cgroup_task_freeze(struct task_struct *task) +{ + bool ret; + + if (task->flags & PF_KTHREAD) + return false; + + rcu_read_lock(); + ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags); + rcu_read_unlock(); + + return ret; +} + +static inline bool cgroup_task_frozen(struct task_struct *task) +{ + return task->frozen; +} + +#else /* !CONFIG_CGROUPS */ + +static inline void cgroup_enter_frozen(void) { } +static inline void cgroup_leave_frozen(bool always_leave) { } +static inline bool cgroup_task_freeze(struct task_struct *task) +{ + return false; +} +static inline bool cgroup_task_frozen(struct task_struct *task) +{ + return false; +} + +#endif /* !CONFIG_CGROUPS */ + +#ifdef CONFIG_CGROUP_BPF +static inline void cgroup_bpf_get(struct cgroup *cgrp) +{ + percpu_ref_get(&cgrp->bpf.refcnt); +} + +static inline void cgroup_bpf_put(struct cgroup *cgrp) +{ + percpu_ref_put(&cgrp->bpf.refcnt); +} + +#else /* CONFIG_CGROUP_BPF */ + +static inline void cgroup_bpf_get(struct cgroup *cgrp) {} +static inline void cgroup_bpf_put(struct cgroup *cgrp) {} + +#endif /* CONFIG_CGROUP_BPF */ + +#endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/cgroup_rdma.h b/include/linux/cgroup_rdma.h new file mode 100644 index 0000000..80edae0 --- /dev/null +++ b/include/linux/cgroup_rdma.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Parav Pandit + */ + +#ifndef _CGROUP_RDMA_H +#define _CGROUP_RDMA_H + +#include + +enum rdmacg_resource_type { + RDMACG_RESOURCE_HCA_HANDLE, + RDMACG_RESOURCE_HCA_OBJECT, + RDMACG_RESOURCE_MAX, +}; + +#ifdef CONFIG_CGROUP_RDMA + +struct rdma_cgroup { + struct cgroup_subsys_state css; + + /* + * head to keep track of all resource pools + * that belongs to this cgroup. + */ + struct list_head rpools; +}; + +struct rdmacg_device { + struct list_head dev_node; + struct list_head rpools; + char *name; +}; + +/* + * APIs for RDMA/IB stack to publish when a device wants to + * participate in resource accounting + */ +void rdmacg_register_device(struct rdmacg_device *device); +void rdmacg_unregister_device(struct rdmacg_device *device); + +/* APIs for RDMA/IB stack to charge/uncharge pool specific resources */ +int rdmacg_try_charge(struct rdma_cgroup **rdmacg, + struct rdmacg_device *device, + enum rdmacg_resource_type index); +void rdmacg_uncharge(struct rdma_cgroup *cg, + struct rdmacg_device *device, + enum rdmacg_resource_type index); +#endif /* CONFIG_CGROUP_RDMA */ +#endif /* _CGROUP_RDMA_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h new file mode 100644 index 0000000..acb77dc --- /dev/null +++ b/include/linux/cgroup_subsys.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * List of cgroup subsystems. + * + * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. + */ + +/* + * This file *must* be included with SUBSYS() defined. + */ + +#if IS_ENABLED(CONFIG_CPUSETS) +SUBSYS(cpuset) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_SCHED) +SUBSYS(cpu) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_CPUACCT) +SUBSYS(cpuacct) +#endif + +#if IS_ENABLED(CONFIG_BLK_CGROUP) +SUBSYS(io) +#endif + +#if IS_ENABLED(CONFIG_MEMCG) +SUBSYS(memory) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_DEVICE) +SUBSYS(devices) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_FREEZER) +SUBSYS(freezer) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID) +SUBSYS(net_cls) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_PERF) +SUBSYS(perf_event) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) +SUBSYS(net_prio) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_HUGETLB) +SUBSYS(hugetlb) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_PIDS) +SUBSYS(pids) +#endif + +#if IS_ENABLED(CONFIG_CGROUP_RDMA) +SUBSYS(rdma) +#endif + +/* + * The following subsystems are not supported on the default hierarchy. + */ +#if IS_ENABLED(CONFIG_CGROUP_DEBUG) +SUBSYS(debug) +#endif + +/* + * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. + */ diff --git a/include/linux/circ_buf.h b/include/linux/circ_buf.h new file mode 100644 index 0000000..b3233e8 --- /dev/null +++ b/include/linux/circ_buf.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * See Documentation/core-api/circular-buffers.rst for more information. + */ + +#ifndef _LINUX_CIRC_BUF_H +#define _LINUX_CIRC_BUF_H 1 + +struct circ_buf { + char *buf; + int head; + int tail; +}; + +/* Return count in buffer. */ +#define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) + +/* Return space available, 0..size-1. We always leave one free char + as a completely full buffer has head == tail, which is the same as + empty. */ +#define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) + +/* Return count up to the end of the buffer. Carefully avoid + accessing head and tail more than once, so they can change + underneath us without returning inconsistent results. */ +#define CIRC_CNT_TO_END(head,tail,size) \ + ({int end = (size) - (tail); \ + int n = ((head) + end) & ((size)-1); \ + n < end ? n : end;}) + +/* Return space available up to the end of the buffer. */ +#define CIRC_SPACE_TO_END(head,tail,size) \ + ({int end = (size) - 1 - (head); \ + int n = (end + (tail)) & ((size)-1); \ + n <= end ? n : end+1;}) + +#endif /* _LINUX_CIRC_BUF_H */ diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h new file mode 100644 index 0000000..5f5730c --- /dev/null +++ b/include/linux/cleancache.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CLEANCACHE_H +#define _LINUX_CLEANCACHE_H + +#include +#include +#include + +#define CLEANCACHE_NO_POOL -1 +#define CLEANCACHE_NO_BACKEND -2 +#define CLEANCACHE_NO_BACKEND_SHARED -3 + +#define CLEANCACHE_KEY_MAX 6 + +/* + * cleancache requires every file with a page in cleancache to have a + * unique key unless/until the file is removed/truncated. For some + * filesystems, the inode number is unique, but for "modern" filesystems + * an exportable filehandle is required (see exportfs.h) + */ +struct cleancache_filekey { + union { + ino_t ino; + __u32 fh[CLEANCACHE_KEY_MAX]; + u32 key[CLEANCACHE_KEY_MAX]; + } u; +}; + +struct cleancache_ops { + int (*init_fs)(size_t); + int (*init_shared_fs)(uuid_t *uuid, size_t); + int (*get_page)(int, struct cleancache_filekey, + pgoff_t, struct page *); + void (*put_page)(int, struct cleancache_filekey, + pgoff_t, struct page *); + void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t); + void (*invalidate_inode)(int, struct cleancache_filekey); + void (*invalidate_fs)(int); +}; + +extern int cleancache_register_ops(const struct cleancache_ops *ops); +extern void __cleancache_init_fs(struct super_block *); +extern void __cleancache_init_shared_fs(struct super_block *); +extern int __cleancache_get_page(struct page *); +extern void __cleancache_put_page(struct page *); +extern void __cleancache_invalidate_page(struct address_space *, struct page *); +extern void __cleancache_invalidate_inode(struct address_space *); +extern void __cleancache_invalidate_fs(struct super_block *); + +#ifdef CONFIG_CLEANCACHE +#define cleancache_enabled (1) +static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) +{ + return mapping->host->i_sb->cleancache_poolid >= 0; +} +static inline bool cleancache_fs_enabled(struct page *page) +{ + return cleancache_fs_enabled_mapping(page->mapping); +} +#else +#define cleancache_enabled (0) +#define cleancache_fs_enabled(_page) (0) +#define cleancache_fs_enabled_mapping(_page) (0) +#endif + +/* + * The shim layer provided by these inline functions allows the compiler + * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE + * is disabled, to a single global variable check if CONFIG_CLEANCACHE + * is enabled but no cleancache "backend" has dynamically enabled it, + * and, for the most frequent cleancache ops, to a single global variable + * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled + * and a cleancache backend has dynamically enabled cleancache, but the + * filesystem referenced by that cleancache op has not enabled cleancache. + * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially + * no measurable performance impact. + */ + +static inline void cleancache_init_fs(struct super_block *sb) +{ + if (cleancache_enabled) + __cleancache_init_fs(sb); +} + +static inline void cleancache_init_shared_fs(struct super_block *sb) +{ + if (cleancache_enabled) + __cleancache_init_shared_fs(sb); +} + +static inline int cleancache_get_page(struct page *page) +{ + if (cleancache_enabled && cleancache_fs_enabled(page)) + return __cleancache_get_page(page); + return -1; +} + +static inline void cleancache_put_page(struct page *page) +{ + if (cleancache_enabled && cleancache_fs_enabled(page)) + __cleancache_put_page(page); +} + +static inline void cleancache_invalidate_page(struct address_space *mapping, + struct page *page) +{ + /* careful... page->mapping is NULL sometimes when this is called */ + if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) + __cleancache_invalidate_page(mapping, page); +} + +static inline void cleancache_invalidate_inode(struct address_space *mapping) +{ + if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) + __cleancache_invalidate_inode(mapping); +} + +static inline void cleancache_invalidate_fs(struct super_block *sb) +{ + if (cleancache_enabled) + __cleancache_invalidate_fs(sb); +} + +#endif /* _LINUX_CLEANCACHE_H */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h new file mode 100644 index 0000000..2fdfe80 --- /dev/null +++ b/include/linux/clk-provider.h @@ -0,0 +1,1114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2010-2011 Jeremy Kerr + * Copyright (C) 2011-2012 Linaro Ltd + */ +#ifndef __LINUX_CLK_PROVIDER_H +#define __LINUX_CLK_PROVIDER_H + +#include +#include + +/* + * flags used across common struct clk. these flags should only affect the + * top-level framework. custom flags for dealing with hardware specifics + * belong in struct clk_foo + * + * Please update clk_flags[] in drivers/clk/clk.c when making changes here! + */ +#define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */ +#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ +#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */ +#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ + /* unused */ + /* unused */ +#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ +#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ +#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ +#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */ +#define CLK_SET_RATE_UNGATE BIT(10) /* clock needs to run to set rate */ +#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */ +/* parents need enable during gate/ungate, set rate and re-parent */ +#define CLK_OPS_PARENT_ENABLE BIT(12) +/* duty cycle call may be forwarded to the parent clock */ +#define CLK_DUTY_CYCLE_PARENT BIT(13) + +struct clk; +struct clk_hw; +struct clk_core; +struct dentry; + +/** + * struct clk_rate_request - Structure encoding the clk constraints that + * a clock user might require. + * + * @rate: Requested clock rate. This field will be adjusted by + * clock drivers according to hardware capabilities. + * @min_rate: Minimum rate imposed by clk users. + * @max_rate: Maximum rate imposed by clk users. + * @best_parent_rate: The best parent rate a parent can provide to fulfill the + * requested constraints. + * @best_parent_hw: The most appropriate parent clock that fulfills the + * requested constraints. + * + */ +struct clk_rate_request { + unsigned long rate; + unsigned long min_rate; + unsigned long max_rate; + unsigned long best_parent_rate; + struct clk_hw *best_parent_hw; +}; + +/** + * struct clk_duty - Struture encoding the duty cycle ratio of a clock + * + * @num: Numerator of the duty cycle ratio + * @den: Denominator of the duty cycle ratio + */ +struct clk_duty { + unsigned int num; + unsigned int den; +}; + +/** + * struct clk_ops - Callback operations for hardware clocks; these are to + * be provided by the clock implementation, and will be called by drivers + * through the clk_* api. + * + * @prepare: Prepare the clock for enabling. This must not return until + * the clock is fully prepared, and it's safe to call clk_enable. + * This callback is intended to allow clock implementations to + * do any initialisation that may sleep. Called with + * prepare_lock held. + * + * @unprepare: Release the clock from its prepared state. This will typically + * undo any work done in the @prepare callback. Called with + * prepare_lock held. + * + * @is_prepared: Queries the hardware to determine if the clock is prepared. + * This function is allowed to sleep. Optional, if this op is not + * set then the prepare count will be used. + * + * @unprepare_unused: Unprepare the clock atomically. Only called from + * clk_disable_unused for prepare clocks with special needs. + * Called with prepare mutex held. This function may sleep. + * + * @enable: Enable the clock atomically. This must not return until the + * clock is generating a valid clock signal, usable by consumer + * devices. Called with enable_lock held. This function must not + * sleep. + * + * @disable: Disable the clock atomically. Called with enable_lock held. + * This function must not sleep. + * + * @is_enabled: Queries the hardware to determine if the clock is enabled. + * This function must not sleep. Optional, if this op is not + * set then the enable count will be used. + * + * @disable_unused: Disable the clock atomically. Only called from + * clk_disable_unused for gate clocks with special needs. + * Called with enable_lock held. This function must not + * sleep. + * + * @save_context: Save the context of the clock in prepration for poweroff. + * + * @restore_context: Restore the context of the clock after a restoration + * of power. + * + * @recalc_rate Recalculate the rate of this clock, by querying hardware. The + * parent rate is an input parameter. It is up to the caller to + * ensure that the prepare_mutex is held across this call. + * Returns the calculated rate. Optional, but recommended - if + * this op is not set then clock rate will be initialized to 0. + * + * @round_rate: Given a target rate as input, returns the closest rate actually + * supported by the clock. The parent rate is an input/output + * parameter. + * + * @determine_rate: Given a target rate as input, returns the closest rate + * actually supported by the clock, and optionally the parent clock + * that should be used to provide the clock rate. + * + * @set_parent: Change the input source of this clock; for clocks with multiple + * possible parents specify a new parent by passing in the index + * as a u8 corresponding to the parent in either the .parent_names + * or .parents arrays. This function in affect translates an + * array index into the value programmed into the hardware. + * Returns 0 on success, -EERROR otherwise. + * + * @get_parent: Queries the hardware to determine the parent of a clock. The + * return value is a u8 which specifies the index corresponding to + * the parent clock. This index can be applied to either the + * .parent_names or .parents arrays. In short, this function + * translates the parent value read from hardware into an array + * index. Currently only called when the clock is initialized by + * __clk_init. This callback is mandatory for clocks with + * multiple parents. It is optional (and unnecessary) for clocks + * with 0 or 1 parents. + * + * @set_rate: Change the rate of this clock. The requested rate is specified + * by the second argument, which should typically be the return + * of .round_rate call. The third argument gives the parent rate + * which is likely helpful for most .set_rate implementation. + * Returns 0 on success, -EERROR otherwise. + * + * @set_rate_and_parent: Change the rate and the parent of this clock. The + * requested rate is specified by the second argument, which + * should typically be the return of .round_rate call. The + * third argument gives the parent rate which is likely helpful + * for most .set_rate_and_parent implementation. The fourth + * argument gives the parent index. This callback is optional (and + * unnecessary) for clocks with 0 or 1 parents as well as + * for clocks that can tolerate switching the rate and the parent + * separately via calls to .set_parent and .set_rate. + * Returns 0 on success, -EERROR otherwise. + * + * @recalc_accuracy: Recalculate the accuracy of this clock. The clock accuracy + * is expressed in ppb (parts per billion). The parent accuracy is + * an input parameter. + * Returns the calculated accuracy. Optional - if this op is not + * set then clock accuracy will be initialized to parent accuracy + * or 0 (perfect clock) if clock has no parent. + * + * @get_phase: Queries the hardware to get the current phase of a clock. + * Returned values are 0-359 degrees on success, negative + * error codes on failure. + * + * @set_phase: Shift the phase this clock signal in degrees specified + * by the second argument. Valid values for degrees are + * 0-359. Return 0 on success, otherwise -EERROR. + * + * @get_duty_cycle: Queries the hardware to get the current duty cycle ratio + * of a clock. Returned values denominator cannot be 0 and must be + * superior or equal to the numerator. + * + * @set_duty_cycle: Apply the duty cycle ratio to this clock signal specified by + * the numerator (2nd argurment) and denominator (3rd argument). + * Argument must be a valid ratio (denominator > 0 + * and >= numerator) Return 0 on success, otherwise -EERROR. + * + * @init: Perform platform-specific initialization magic. + * This is not not used by any of the basic clock types. + * Please consider other ways of solving initialization problems + * before using this callback, as its use is discouraged. + * + * @debug_init: Set up type-specific debugfs entries for this clock. This + * is called once, after the debugfs directory entry for this + * clock has been created. The dentry pointer representing that + * directory is provided as an argument. Called with + * prepare_lock held. Returns 0 on success, -EERROR otherwise. + * + * + * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow + * implementations to split any work between atomic (enable) and sleepable + * (prepare) contexts. If enabling a clock requires code that might sleep, + * this must be done in clk_prepare. Clock enable code that will never be + * called in a sleepable context may be implemented in clk_enable. + * + * Typically, drivers will call clk_prepare when a clock may be needed later + * (eg. when a device is opened), and clk_enable when the clock is actually + * required (eg. from an interrupt). Note that clk_prepare MUST have been + * called before clk_enable. + */ +struct clk_ops { + int (*prepare)(struct clk_hw *hw); + void (*unprepare)(struct clk_hw *hw); + int (*is_prepared)(struct clk_hw *hw); + void (*unprepare_unused)(struct clk_hw *hw); + int (*enable)(struct clk_hw *hw); + void (*disable)(struct clk_hw *hw); + int (*is_enabled)(struct clk_hw *hw); + void (*disable_unused)(struct clk_hw *hw); + int (*save_context)(struct clk_hw *hw); + void (*restore_context)(struct clk_hw *hw); + unsigned long (*recalc_rate)(struct clk_hw *hw, + unsigned long parent_rate); + long (*round_rate)(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate); + int (*determine_rate)(struct clk_hw *hw, + struct clk_rate_request *req); + int (*set_parent)(struct clk_hw *hw, u8 index); + u8 (*get_parent)(struct clk_hw *hw); + int (*set_rate)(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate); + int (*set_rate_and_parent)(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate, u8 index); + unsigned long (*recalc_accuracy)(struct clk_hw *hw, + unsigned long parent_accuracy); + int (*get_phase)(struct clk_hw *hw); + int (*set_phase)(struct clk_hw *hw, int degrees); + int (*get_duty_cycle)(struct clk_hw *hw, + struct clk_duty *duty); + int (*set_duty_cycle)(struct clk_hw *hw, + struct clk_duty *duty); + void (*init)(struct clk_hw *hw); + void (*debug_init)(struct clk_hw *hw, struct dentry *dentry); +}; + +/** + * struct clk_parent_data - clk parent information + * @hw: parent clk_hw pointer (used for clk providers with internal clks) + * @fw_name: parent name local to provider registering clk + * @name: globally unique parent name (used as a fallback) + * @index: parent index local to provider registering clk (if @fw_name absent) + */ +struct clk_parent_data { + const struct clk_hw *hw; + const char *fw_name; + const char *name; + int index; +}; + +/** + * struct clk_init_data - holds init data that's common to all clocks and is + * shared between the clock provider and the common clock framework. + * + * @name: clock name + * @ops: operations this clock supports + * @parent_names: array of string names for all possible parents + * @parent_data: array of parent data for all possible parents (when some + * parents are external to the clk controller) + * @parent_hws: array of pointers to all possible parents (when all parents + * are internal to the clk controller) + * @num_parents: number of possible parents + * @flags: framework-level hints and quirks + */ +struct clk_init_data { + const char *name; + const struct clk_ops *ops; + /* Only one of the following three should be assigned */ + const char * const *parent_names; + const struct clk_parent_data *parent_data; + const struct clk_hw **parent_hws; + u8 num_parents; + unsigned long flags; +}; + +/** + * struct clk_hw - handle for traversing from a struct clk to its corresponding + * hardware-specific structure. struct clk_hw should be declared within struct + * clk_foo and then referenced by the struct clk instance that uses struct + * clk_foo's clk_ops + * + * @core: pointer to the struct clk_core instance that points back to this + * struct clk_hw instance + * + * @clk: pointer to the per-user struct clk instance that can be used to call + * into the clk API + * + * @init: pointer to struct clk_init_data that contains the init data shared + * with the common clock framework. This pointer will be set to NULL once + * a clk_register() variant is called on this clk_hw pointer. + */ +struct clk_hw { + struct clk_core *core; + struct clk *clk; + const struct clk_init_data *init; +}; + +/* + * DOC: Basic clock implementations common to many platforms + * + * Each basic clock hardware type is comprised of a structure describing the + * clock hardware, implementations of the relevant callbacks in struct clk_ops, + * unique flags for that hardware type, a registration function and an + * alternative macro for static initialization + */ + +/** + * struct clk_fixed_rate - fixed-rate clock + * @hw: handle between common and hardware-specific interfaces + * @fixed_rate: constant frequency of clock + */ +struct clk_fixed_rate { + struct clk_hw hw; + unsigned long fixed_rate; + unsigned long fixed_accuracy; +}; + +#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw) + +extern const struct clk_ops clk_fixed_rate_ops; +struct clk *clk_register_fixed_rate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned long fixed_rate); +struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned long fixed_rate); +struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned long fixed_rate, unsigned long fixed_accuracy); +void clk_unregister_fixed_rate(struct clk *clk); +struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned long fixed_rate, unsigned long fixed_accuracy); +void clk_hw_unregister_fixed_rate(struct clk_hw *hw); + +void of_fixed_clk_setup(struct device_node *np); + +/** + * struct clk_gate - gating clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register controlling gate + * @bit_idx: single bit controlling gate + * @flags: hardware-specific flags + * @lock: register lock + * + * Clock which can gate its output. Implements .enable & .disable + * + * Flags: + * CLK_GATE_SET_TO_DISABLE - by default this clock sets the bit at bit_idx to + * enable the clock. Setting this flag does the opposite: setting the bit + * disable the clock and clearing it enables the clock + * CLK_GATE_HIWORD_MASK - The gate settings are only in lower 16-bit + * of this register, and mask of gate bits are in higher 16-bit of this + * register. While setting the gate bits, higher 16-bit should also be + * updated to indicate changing gate bits. + * CLK_GATE_BIG_ENDIAN - by default little endian register accesses are used for + * the gate register. Setting this flag makes the register accesses big + * endian. + */ +struct clk_gate { + struct clk_hw hw; + void __iomem *reg; + u8 bit_idx; + u8 flags; + spinlock_t *lock; +}; + +#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw) + +#define CLK_GATE_SET_TO_DISABLE BIT(0) +#define CLK_GATE_HIWORD_MASK BIT(1) +#define CLK_GATE_BIG_ENDIAN BIT(2) + +extern const struct clk_ops clk_gate_ops; +struct clk *clk_register_gate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, + u8 clk_gate_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, + u8 clk_gate_flags, spinlock_t *lock); +void clk_unregister_gate(struct clk *clk); +void clk_hw_unregister_gate(struct clk_hw *hw); +int clk_gate_is_enabled(struct clk_hw *hw); + +struct clk_div_table { + unsigned int val; + unsigned int div; +}; + +/** + * struct clk_divider - adjustable divider clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing the divider + * @shift: shift to the divider bit field + * @width: width of the divider bit field + * @table: array of value/divider pairs, last entry should have div = 0 + * @lock: register lock + * + * Clock with an adjustable divider affecting its output frequency. Implements + * .recalc_rate, .set_rate and .round_rate + * + * Flags: + * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the + * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is + * the raw value read from the register, with the value of zero considered + * invalid, unless CLK_DIVIDER_ALLOW_ZERO is set. + * CLK_DIVIDER_POWER_OF_TWO - clock divisor is 2 raised to the value read from + * the hardware register + * CLK_DIVIDER_ALLOW_ZERO - Allow zero divisors. For dividers which have + * CLK_DIVIDER_ONE_BASED set, it is possible to end up with a zero divisor. + * Some hardware implementations gracefully handle this case and allow a + * zero divisor by not modifying their input clock + * (divide by one / bypass). + * CLK_DIVIDER_HIWORD_MASK - The divider settings are only in lower 16-bit + * of this register, and mask of divider bits are in higher 16-bit of this + * register. While setting the divider bits, higher 16-bit should also be + * updated to indicate changing divider bits. + * CLK_DIVIDER_ROUND_CLOSEST - Makes the best calculated divider to be rounded + * to the closest integer instead of the up one. + * CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should + * not be changed by the clock framework. + * CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED + * except when the value read from the register is zero, the divisor is + * 2^width of the field. + * CLK_DIVIDER_BIG_ENDIAN - By default little endian register accesses are used + * for the divider register. Setting this flag makes the register accesses + * big endian. + */ +struct clk_divider { + struct clk_hw hw; + void __iomem *reg; + u8 shift; + u8 width; + u8 flags; + const struct clk_div_table *table; + spinlock_t *lock; +}; + +#define clk_div_mask(width) ((1 << (width)) - 1) +#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) + +#define CLK_DIVIDER_ONE_BASED BIT(0) +#define CLK_DIVIDER_POWER_OF_TWO BIT(1) +#define CLK_DIVIDER_ALLOW_ZERO BIT(2) +#define CLK_DIVIDER_HIWORD_MASK BIT(3) +#define CLK_DIVIDER_ROUND_CLOSEST BIT(4) +#define CLK_DIVIDER_READ_ONLY BIT(5) +#define CLK_DIVIDER_MAX_AT_ZERO BIT(6) +#define CLK_DIVIDER_BIG_ENDIAN BIT(7) + +extern const struct clk_ops clk_divider_ops; +extern const struct clk_ops clk_divider_ro_ops; + +unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, + unsigned int val, const struct clk_div_table *table, + unsigned long flags, unsigned long width); +long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, + unsigned long rate, unsigned long *prate, + const struct clk_div_table *table, + u8 width, unsigned long flags); +long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, + unsigned long rate, unsigned long *prate, + const struct clk_div_table *table, u8 width, + unsigned long flags, unsigned int val); +int divider_get_val(unsigned long rate, unsigned long parent_rate, + const struct clk_div_table *table, u8 width, + unsigned long flags); + +struct clk *clk_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, spinlock_t *lock); +struct clk *clk_register_divider_table(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, const struct clk_div_table *table, + spinlock_t *lock); +struct clk_hw *clk_hw_register_divider_table(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, const struct clk_div_table *table, + spinlock_t *lock); +void clk_unregister_divider(struct clk *clk); +void clk_hw_unregister_divider(struct clk_hw *hw); + +/** + * struct clk_mux - multiplexer clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register controlling multiplexer + * @table: array of register values corresponding to the parent index + * @shift: shift to multiplexer bit field + * @mask: mask of mutliplexer bit field + * @flags: hardware-specific flags + * @lock: register lock + * + * Clock with multiple selectable parents. Implements .get_parent, .set_parent + * and .recalc_rate + * + * Flags: + * CLK_MUX_INDEX_ONE - register index starts at 1, not 0 + * CLK_MUX_INDEX_BIT - register index is a single bit (power of two) + * CLK_MUX_HIWORD_MASK - The mux settings are only in lower 16-bit of this + * register, and mask of mux bits are in higher 16-bit of this register. + * While setting the mux bits, higher 16-bit should also be updated to + * indicate changing mux bits. + * CLK_MUX_READ_ONLY - The mux registers can't be written, only read in the + * .get_parent clk_op. + * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired + * frequency. + * CLK_MUX_BIG_ENDIAN - By default little endian register accesses are used for + * the mux register. Setting this flag makes the register accesses big + * endian. + */ +struct clk_mux { + struct clk_hw hw; + void __iomem *reg; + u32 *table; + u32 mask; + u8 shift; + u8 flags; + spinlock_t *lock; +}; + +#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw) + +#define CLK_MUX_INDEX_ONE BIT(0) +#define CLK_MUX_INDEX_BIT BIT(1) +#define CLK_MUX_HIWORD_MASK BIT(2) +#define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */ +#define CLK_MUX_ROUND_CLOSEST BIT(4) +#define CLK_MUX_BIG_ENDIAN BIT(5) + +extern const struct clk_ops clk_mux_ops; +extern const struct clk_ops clk_mux_ro_ops; + +struct clk *clk_register_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_mux_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_mux_flags, spinlock_t *lock); + +struct clk *clk_register_mux_table(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table, spinlock_t *lock); +struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table, spinlock_t *lock); + +int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags, + unsigned int val); +unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index); + +void clk_unregister_mux(struct clk *clk); +void clk_hw_unregister_mux(struct clk_hw *hw); + +void of_fixed_factor_clk_setup(struct device_node *node); + +/** + * struct clk_fixed_factor - fixed multiplier and divider clock + * + * @hw: handle between common and hardware-specific interfaces + * @mult: multiplier + * @div: divider + * + * Clock with a fixed multiplier and divider. The output frequency is the + * parent clock rate divided by div and multiplied by mult. + * Implements .recalc_rate, .set_rate and .round_rate + */ + +struct clk_fixed_factor { + struct clk_hw hw; + unsigned int mult; + unsigned int div; +}; + +#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw) + +extern const struct clk_ops clk_fixed_factor_ops; +struct clk *clk_register_fixed_factor(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned int mult, unsigned int div); +void clk_unregister_fixed_factor(struct clk *clk); +struct clk_hw *clk_hw_register_fixed_factor(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned int mult, unsigned int div); +void clk_hw_unregister_fixed_factor(struct clk_hw *hw); + +/** + * struct clk_fractional_divider - adjustable fractional divider clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing the divider + * @mshift: shift to the numerator bit field + * @mwidth: width of the numerator bit field + * @nshift: shift to the denominator bit field + * @nwidth: width of the denominator bit field + * @lock: register lock + * + * Clock with adjustable fractional divider affecting its output frequency. + * + * Flags: + * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator + * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED + * is set then the numerator and denominator are both the value read + * plus one. + * CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are + * used for the divider register. Setting this flag makes the register + * accesses big endian. + */ +struct clk_fractional_divider { + struct clk_hw hw; + void __iomem *reg; + u8 mshift; + u8 mwidth; + u32 mmask; + u8 nshift; + u8 nwidth; + u32 nmask; + u8 flags; + void (*approximation)(struct clk_hw *hw, + unsigned long rate, unsigned long *parent_rate, + unsigned long *m, unsigned long *n); + spinlock_t *lock; +}; + +#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw) + +#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0) +#define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1) + +extern const struct clk_ops clk_fractional_divider_ops; +struct clk *clk_register_fractional_divider(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, + u8 clk_divider_flags, spinlock_t *lock); +struct clk_hw *clk_hw_register_fractional_divider(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, + u8 clk_divider_flags, spinlock_t *lock); +void clk_hw_unregister_fractional_divider(struct clk_hw *hw); + +/** + * struct clk_multiplier - adjustable multiplier clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing the multiplier + * @shift: shift to the multiplier bit field + * @width: width of the multiplier bit field + * @lock: register lock + * + * Clock with an adjustable multiplier affecting its output frequency. + * Implements .recalc_rate, .set_rate and .round_rate + * + * Flags: + * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read + * from the register, with 0 being a valid value effectively + * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is + * set, then a null multiplier will be considered as a bypass, + * leaving the parent rate unmodified. + * CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be + * rounded to the closest integer instead of the down one. + * CLK_MULTIPLIER_BIG_ENDIAN - By default little endian register accesses are + * used for the multiplier register. Setting this flag makes the register + * accesses big endian. + */ +struct clk_multiplier { + struct clk_hw hw; + void __iomem *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; +}; + +#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw) + +#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) +#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1) +#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2) + +extern const struct clk_ops clk_multiplier_ops; + +/*** + * struct clk_composite - aggregate clock of mux, divider and gate clocks + * + * @hw: handle between common and hardware-specific interfaces + * @mux_hw: handle between composite and hardware-specific mux clock + * @rate_hw: handle between composite and hardware-specific rate clock + * @gate_hw: handle between composite and hardware-specific gate clock + * @mux_ops: clock ops for mux + * @rate_ops: clock ops for rate + * @gate_ops: clock ops for gate + */ +struct clk_composite { + struct clk_hw hw; + struct clk_ops ops; + + struct clk_hw *mux_hw; + struct clk_hw *rate_hw; + struct clk_hw *gate_hw; + + const struct clk_ops *mux_ops; + const struct clk_ops *rate_ops; + const struct clk_ops *gate_ops; +}; + +#define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw) + +struct clk *clk_register_composite(struct device *dev, const char *name, + const char * const *parent_names, int num_parents, + struct clk_hw *mux_hw, const struct clk_ops *mux_ops, + struct clk_hw *rate_hw, const struct clk_ops *rate_ops, + struct clk_hw *gate_hw, const struct clk_ops *gate_ops, + unsigned long flags); +void clk_unregister_composite(struct clk *clk); +struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name, + const char * const *parent_names, int num_parents, + struct clk_hw *mux_hw, const struct clk_ops *mux_ops, + struct clk_hw *rate_hw, const struct clk_ops *rate_ops, + struct clk_hw *gate_hw, const struct clk_ops *gate_ops, + unsigned long flags); +void clk_hw_unregister_composite(struct clk_hw *hw); + +/** + * struct clk_gpio - gpio gated clock + * + * @hw: handle between common and hardware-specific interfaces + * @gpiod: gpio descriptor + * + * Clock with a gpio control for enabling and disabling the parent clock + * or switching between two parents by asserting or deasserting the gpio. + * + * Implements .enable, .disable and .is_enabled or + * .get_parent, .set_parent and .determine_rate depending on which clk_ops + * is used. + */ +struct clk_gpio { + struct clk_hw hw; + struct gpio_desc *gpiod; +}; + +#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw) + +extern const struct clk_ops clk_gpio_gate_ops; +struct clk *clk_register_gpio_gate(struct device *dev, const char *name, + const char *parent_name, struct gpio_desc *gpiod, + unsigned long flags); +struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name, + const char *parent_name, struct gpio_desc *gpiod, + unsigned long flags); +void clk_hw_unregister_gpio_gate(struct clk_hw *hw); + +extern const struct clk_ops clk_gpio_mux_ops; +struct clk *clk_register_gpio_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod, + unsigned long flags); +struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod, + unsigned long flags); +void clk_hw_unregister_gpio_mux(struct clk_hw *hw); + +struct clk *clk_register(struct device *dev, struct clk_hw *hw); +struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw); + +int __must_check clk_hw_register(struct device *dev, struct clk_hw *hw); +int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw); +int __must_check of_clk_hw_register(struct device_node *node, struct clk_hw *hw); + +void clk_unregister(struct clk *clk); +void devm_clk_unregister(struct device *dev, struct clk *clk); + +void clk_hw_unregister(struct clk_hw *hw); +void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw); + +/* helper functions */ +const char *__clk_get_name(const struct clk *clk); +const char *clk_hw_get_name(const struct clk_hw *hw); +#ifdef CONFIG_COMMON_CLK +struct clk_hw *__clk_get_hw(struct clk *clk); +#else +static inline struct clk_hw *__clk_get_hw(struct clk *clk) +{ + return (struct clk_hw *)clk; +} +#endif +unsigned int clk_hw_get_num_parents(const struct clk_hw *hw); +struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw); +struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw, + unsigned int index); +int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent); +unsigned int __clk_get_enable_count(struct clk *clk); +unsigned long clk_hw_get_rate(const struct clk_hw *hw); +unsigned long __clk_get_flags(struct clk *clk); +unsigned long clk_hw_get_flags(const struct clk_hw *hw); +#define clk_hw_can_set_rate_parent(hw) \ + (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT) + +bool clk_hw_is_prepared(const struct clk_hw *hw); +bool clk_hw_rate_is_protected(const struct clk_hw *hw); +bool clk_hw_is_enabled(const struct clk_hw *hw); +bool __clk_is_enabled(struct clk *clk); +struct clk *__clk_lookup(const char *name); +int __clk_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req); +int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req); +int __clk_mux_determine_rate_closest(struct clk_hw *hw, + struct clk_rate_request *req); +int clk_mux_determine_rate_flags(struct clk_hw *hw, + struct clk_rate_request *req, + unsigned long flags); +void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent); +void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, + unsigned long max_rate); + +static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src) +{ + dst->clk = src->clk; + dst->core = src->core; +} + +static inline long divider_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate, + const struct clk_div_table *table, + u8 width, unsigned long flags) +{ + return divider_round_rate_parent(hw, clk_hw_get_parent(hw), + rate, prate, table, width, flags); +} + +static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate, + const struct clk_div_table *table, + u8 width, unsigned long flags, + unsigned int val) +{ + return divider_ro_round_rate_parent(hw, clk_hw_get_parent(hw), + rate, prate, table, width, flags, + val); +} + +/* + * FIXME clock api without lock protection + */ +unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate); + +struct clk_onecell_data { + struct clk **clks; + unsigned int clk_num; +}; + +struct clk_hw_onecell_data { + unsigned int num; + struct clk_hw *hws[]; +}; + +#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn) + +/* + * Use this macro when you have a driver that requires two initialization + * routines, one at of_clk_init(), and one at platform device probe + */ +#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \ + static void __init name##_of_clk_init_driver(struct device_node *np) \ + { \ + of_node_clear_flag(np, OF_POPULATED); \ + fn(np); \ + } \ + OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver) + +#define CLK_HW_INIT(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = (const char *[]) { _parent }, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_HW(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_hws = (const struct clk_hw*[]) { _parent }, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + +/* + * This macro is intended for drivers to be able to share the otherwise + * individual struct clk_hw[] compound literals created by the compiler + * when using CLK_HW_INIT_HW. It does NOT support multiple parents. + */ +#define CLK_HW_INIT_HWS(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_hws = _parent, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_FW_NAME(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_data = (const struct clk_parent_data[]) { \ + { .fw_name = _parent }, \ + }, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_PARENTS_HW(_name, _parents, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_hws = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_PARENTS_DATA(_name, _parents, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_data = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = NULL, \ + .num_parents = 0, \ + .ops = _ops, \ + }) + +#define CLK_FIXED_FACTOR(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + +#define CLK_FIXED_FACTOR_HW(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT_HW(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + +/* + * This macro allows the driver to reuse the _parent array for multiple + * fixed factor clk declarations. + */ +#define CLK_FIXED_FACTOR_HWS(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT_HWS(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + +#define CLK_FIXED_FACTOR_FW_NAME(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT_FW_NAME(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + +#ifdef CONFIG_OF +int of_clk_add_provider(struct device_node *np, + struct clk *(*clk_src_get)(struct of_phandle_args *args, + void *data), + void *data); +int of_clk_add_hw_provider(struct device_node *np, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data); +int devm_of_clk_add_hw_provider(struct device *dev, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data); +void of_clk_del_provider(struct device_node *np); +void devm_of_clk_del_provider(struct device *dev); +struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, + void *data); +struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, + void *data); +struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); +struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec, + void *data); +int of_clk_parent_fill(struct device_node *np, const char **parents, + unsigned int size); +int of_clk_detect_critical(struct device_node *np, int index, + unsigned long *flags); + +#else /* !CONFIG_OF */ + +static inline int of_clk_add_provider(struct device_node *np, + struct clk *(*clk_src_get)(struct of_phandle_args *args, + void *data), + void *data) +{ + return 0; +} +static inline int of_clk_add_hw_provider(struct device_node *np, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data) +{ + return 0; +} +static inline int devm_of_clk_add_hw_provider(struct device *dev, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, + void *data), + void *data) +{ + return 0; +} +static inline void of_clk_del_provider(struct device_node *np) {} +static inline void devm_of_clk_del_provider(struct device *dev) {} +static inline struct clk *of_clk_src_simple_get( + struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} +static inline struct clk_hw * +of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} +static inline struct clk *of_clk_src_onecell_get( + struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} +static inline struct clk_hw * +of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) +{ + return ERR_PTR(-ENOENT); +} +static inline int of_clk_parent_fill(struct device_node *np, + const char **parents, unsigned int size) +{ + return 0; +} +static inline int of_clk_detect_critical(struct device_node *np, int index, + unsigned long *flags) +{ + return 0; +} +#endif /* CONFIG_OF */ + +void clk_gate_restore_context(struct clk_hw *hw); + +#endif /* CLK_PROVIDER_H */ diff --git a/include/linux/clk.h b/include/linux/clk.h new file mode 100644 index 0000000..18b7b95 --- /dev/null +++ b/include/linux/clk.h @@ -0,0 +1,985 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/include/linux/clk.h + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * Copyright (C) 2011-2012 Linaro Ltd + */ +#ifndef __LINUX_CLK_H +#define __LINUX_CLK_H + +#include +#include +#include + +struct device; +struct clk; +struct device_node; +struct of_phandle_args; + +/** + * DOC: clk notifier callback types + * + * PRE_RATE_CHANGE - called immediately before the clk rate is changed, + * to indicate that the rate change will proceed. Drivers must + * immediately terminate any operations that will be affected by the + * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, + * NOTIFY_STOP or NOTIFY_BAD. + * + * ABORT_RATE_CHANGE: called if the rate change failed for some reason + * after PRE_RATE_CHANGE. In this case, all registered notifiers on + * the clk will be called with ABORT_RATE_CHANGE. Callbacks must + * always return NOTIFY_DONE or NOTIFY_OK. + * + * POST_RATE_CHANGE - called after the clk rate change has successfully + * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. + * + */ +#define PRE_RATE_CHANGE BIT(0) +#define POST_RATE_CHANGE BIT(1) +#define ABORT_RATE_CHANGE BIT(2) + +/** + * struct clk_notifier - associate a clk with a notifier + * @clk: struct clk * to associate the notifier with + * @notifier_head: a blocking_notifier_head for this clk + * @node: linked list pointers + * + * A list of struct clk_notifier is maintained by the notifier code. + * An entry is created whenever code registers the first notifier on a + * particular @clk. Future notifiers on that @clk are added to the + * @notifier_head. + */ +struct clk_notifier { + struct clk *clk; + struct srcu_notifier_head notifier_head; + struct list_head node; +}; + +/** + * struct clk_notifier_data - rate data to pass to the notifier callback + * @clk: struct clk * being changed + * @old_rate: previous rate of this clk + * @new_rate: new rate of this clk + * + * For a pre-notifier, old_rate is the clk's rate before this rate + * change, and new_rate is what the rate will be in the future. For a + * post-notifier, old_rate and new_rate are both set to the clk's + * current rate (this was done to optimize the implementation). + */ +struct clk_notifier_data { + struct clk *clk; + unsigned long old_rate; + unsigned long new_rate; +}; + +/** + * struct clk_bulk_data - Data used for bulk clk operations. + * + * @id: clock consumer ID + * @clk: struct clk * to store the associated clock + * + * The CLK APIs provide a series of clk_bulk_() API calls as + * a convenience to consumers which require multiple clks. This + * structure is used to manage data for these calls. + */ +struct clk_bulk_data { + const char *id; + struct clk *clk; +}; + +#ifdef CONFIG_COMMON_CLK + +/** + * clk_notifier_register: register a clock rate-change notifier callback + * @clk: clock whose rate we are interested in + * @nb: notifier block with callback function pointer + * + * ProTip: debugging across notifier chains can be frustrating. Make sure that + * your notifier callback function prints a nice big warning in case of + * failure. + */ +int clk_notifier_register(struct clk *clk, struct notifier_block *nb); + +/** + * clk_notifier_unregister: unregister a clock rate-change notifier callback + * @clk: clock whose rate we are no longer interested in + * @nb: notifier block which will be unregistered + */ +int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); + +/** + * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) + * for a clock source. + * @clk: clock source + * + * This gets the clock source accuracy expressed in ppb. + * A perfect clock returns 0. + */ +long clk_get_accuracy(struct clk *clk); + +/** + * clk_set_phase - adjust the phase shift of a clock signal + * @clk: clock signal source + * @degrees: number of degrees the signal is shifted + * + * Shifts the phase of a clock signal by the specified degrees. Returns 0 on + * success, -EERROR otherwise. + */ +int clk_set_phase(struct clk *clk, int degrees); + +/** + * clk_get_phase - return the phase shift of a clock signal + * @clk: clock signal source + * + * Returns the phase shift of a clock node in degrees, otherwise returns + * -EERROR. + */ +int clk_get_phase(struct clk *clk); + +/** + * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal + * @clk: clock signal source + * @num: numerator of the duty cycle ratio to be applied + * @den: denominator of the duty cycle ratio to be applied + * + * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on + * success, -EERROR otherwise. + */ +int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); + +/** + * clk_get_duty_cycle - return the duty cycle ratio of a clock signal + * @clk: clock signal source + * @scale: scaling factor to be applied to represent the ratio as an integer + * + * Returns the duty cycle ratio multiplied by the scale provided, otherwise + * returns -EERROR. + */ +int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); + +/** + * clk_is_match - check if two clk's point to the same hardware clock + * @p: clk compared against q + * @q: clk compared against p + * + * Returns true if the two struct clk pointers both point to the same hardware + * clock node. Put differently, returns true if @p and @q + * share the same &struct clk_core object. + * + * Returns false otherwise. Note that two NULL clks are treated as matching. + */ +bool clk_is_match(const struct clk *p, const struct clk *q); + +#else + +static inline int clk_notifier_register(struct clk *clk, + struct notifier_block *nb) +{ + return -ENOTSUPP; +} + +static inline int clk_notifier_unregister(struct clk *clk, + struct notifier_block *nb) +{ + return -ENOTSUPP; +} + +static inline long clk_get_accuracy(struct clk *clk) +{ + return -ENOTSUPP; +} + +static inline long clk_set_phase(struct clk *clk, int phase) +{ + return -ENOTSUPP; +} + +static inline long clk_get_phase(struct clk *clk) +{ + return -ENOTSUPP; +} + +static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, + unsigned int den) +{ + return -ENOTSUPP; +} + +static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, + unsigned int scale) +{ + return 0; +} + +static inline bool clk_is_match(const struct clk *p, const struct clk *q) +{ + return p == q; +} + +#endif + +/** + * clk_prepare - prepare a clock source + * @clk: clock source + * + * This prepares the clock source for use. + * + * Must not be called from within atomic context. + */ +#ifdef CONFIG_HAVE_CLK_PREPARE +int clk_prepare(struct clk *clk); +int __must_check clk_bulk_prepare(int num_clks, + const struct clk_bulk_data *clks); +#else +static inline int clk_prepare(struct clk *clk) +{ + might_sleep(); + return 0; +} + +static inline int __must_check +clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) +{ + might_sleep(); + return 0; +} +#endif + +/** + * clk_unprepare - undo preparation of a clock source + * @clk: clock source + * + * This undoes a previously prepared clock. The caller must balance + * the number of prepare and unprepare calls. + * + * Must not be called from within atomic context. + */ +#ifdef CONFIG_HAVE_CLK_PREPARE +void clk_unprepare(struct clk *clk); +void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); +#else +static inline void clk_unprepare(struct clk *clk) +{ + might_sleep(); +} +static inline void clk_bulk_unprepare(int num_clks, + const struct clk_bulk_data *clks) +{ + might_sleep(); +} +#endif + +#ifdef CONFIG_HAVE_CLK +/** + * clk_get - lookup and obtain a reference to a clock producer. + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Returns a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. (IOW, @id may be identical strings, but + * clk_get may return different clock producers depending on @dev.) + * + * Drivers must assume that the clock source is not enabled. + * + * clk_get should not be called from within interrupt context. + */ +struct clk *clk_get(struct device *dev, const char *id); + +/** + * clk_bulk_get - lookup and obtain a number of references to clock producer. + * @dev: device for clock "consumer" + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * This helper function allows drivers to get several clk consumers in one + * operation. If any of the clk cannot be acquired then any clks + * that were obtained will be freed before returning to the caller. + * + * Returns 0 if all clocks specified in clk_bulk_data table are obtained + * successfully, or valid IS_ERR() condition containing errno. + * The implementation uses @dev and @clk_bulk_data.id to determine the + * clock consumer, and thereby the clock producer. + * The clock returned is stored in each @clk_bulk_data.clk field. + * + * Drivers must assume that the clock source is not enabled. + * + * clk_bulk_get should not be called from within interrupt context. + */ +int __must_check clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks); +/** + * clk_bulk_get_all - lookup and obtain all available references to clock + * producer. + * @dev: device for clock "consumer" + * @clks: pointer to the clk_bulk_data table of consumer + * + * This helper function allows drivers to get all clk consumers in one + * operation. If any of the clk cannot be acquired then any clks + * that were obtained will be freed before returning to the caller. + * + * Returns a positive value for the number of clocks obtained while the + * clock references are stored in the clk_bulk_data table in @clks field. + * Returns 0 if there're none and a negative value if something failed. + * + * Drivers must assume that the clock source is not enabled. + * + * clk_bulk_get should not be called from within interrupt context. + */ +int __must_check clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks); + +/** + * clk_bulk_get_optional - lookup and obtain a number of references to clock producer + * @dev: device for clock "consumer" + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * Behaves the same as clk_bulk_get() except where there is no clock producer. + * In this case, instead of returning -ENOENT, the function returns 0 and + * NULL for a clk for which a clock producer could not be determined. + */ +int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, + struct clk_bulk_data *clks); +/** + * devm_clk_bulk_get - managed get multiple clk consumers + * @dev: device for clock "consumer" + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * Return 0 on success, an errno on failure. + * + * This helper function allows drivers to get several clk + * consumers in one operation with management, the clks will + * automatically be freed when the device is unbound. + */ +int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks); +/** + * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks + * @dev: device for clock "consumer" + * @num_clks: the number of clk_bulk_data + * @clks: pointer to the clk_bulk_data table of consumer + * + * Behaves the same as devm_clk_bulk_get() except where there is no clock + * producer. In this case, instead of returning -ENOENT, the function returns + * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. + * + * Returns 0 if all clocks specified in clk_bulk_data table are obtained + * successfully or for any clk there was no clk provider available, otherwise + * returns valid IS_ERR() condition containing errno. + * The implementation uses @dev and @clk_bulk_data.id to determine the + * clock consumer, and thereby the clock producer. + * The clock returned is stored in each @clk_bulk_data.clk field. + * + * Drivers must assume that the clock source is not enabled. + * + * clk_bulk_get should not be called from within interrupt context. + */ +int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, + struct clk_bulk_data *clks); +/** + * devm_clk_bulk_get_all - managed get multiple clk consumers + * @dev: device for clock "consumer" + * @clks: pointer to the clk_bulk_data table of consumer + * + * Returns a positive value for the number of clocks obtained while the + * clock references are stored in the clk_bulk_data table in @clks field. + * Returns 0 if there're none and a negative value if something failed. + * + * This helper function allows drivers to get several clk + * consumers in one operation with management, the clks will + * automatically be freed when the device is unbound. + */ + +int __must_check devm_clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks); + +/** + * devm_clk_get - lookup and obtain a managed reference to a clock producer. + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Returns a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. (IOW, @id may be identical strings, but + * clk_get may return different clock producers depending on @dev.) + * + * Drivers must assume that the clock source is not enabled. + * + * devm_clk_get should not be called from within interrupt context. + * + * The clock will automatically be freed when the device is unbound + * from the bus. + */ +struct clk *devm_clk_get(struct device *dev, const char *id); + +/** + * devm_clk_get_optional - lookup and obtain a managed reference to an optional + * clock producer. + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Behaves the same as devm_clk_get() except where there is no clock producer. + * In this case, instead of returning -ENOENT, the function returns NULL. + */ +struct clk *devm_clk_get_optional(struct device *dev, const char *id); + +/** + * devm_get_clk_from_child - lookup and obtain a managed reference to a + * clock producer from child node. + * @dev: device for clock "consumer" + * @np: pointer to clock consumer node + * @con_id: clock consumer ID + * + * This function parses the clocks, and uses them to look up the + * struct clk from the registered list of clock providers by using + * @np and @con_id + * + * The clock will automatically be freed when the device is unbound + * from the bus. + */ +struct clk *devm_get_clk_from_child(struct device *dev, + struct device_node *np, const char *con_id); +/** + * clk_rate_exclusive_get - get exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to get exclusive control over the rate of a + * provider. It prevents any other consumer to execute, even indirectly, + * opereation which could alter the rate of the provider or cause glitches + * + * If exlusivity is claimed more than once on clock, even by the same driver, + * the rate effectively gets locked as exclusivity can't be preempted. + * + * Must not be called from within atomic context. + * + * Returns success (0) or negative errno. + */ +int clk_rate_exclusive_get(struct clk *clk); + +/** + * clk_rate_exclusive_put - release exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to release the exclusivity it previously got + * from clk_rate_exclusive_get() + * + * The caller must balance the number of clk_rate_exclusive_get() and + * clk_rate_exclusive_put() calls. + * + * Must not be called from within atomic context. + */ +void clk_rate_exclusive_put(struct clk *clk); + +/** + * clk_enable - inform the system when the clock source should be running. + * @clk: clock source + * + * If the clock can not be enabled/disabled, this should return success. + * + * May be called from atomic contexts. + * + * Returns success (0) or negative errno. + */ +int clk_enable(struct clk *clk); + +/** + * clk_bulk_enable - inform the system when the set of clks should be running. + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * May be called from atomic contexts. + * + * Returns success (0) or negative errno. + */ +int __must_check clk_bulk_enable(int num_clks, + const struct clk_bulk_data *clks); + +/** + * clk_disable - inform the system when the clock source is no longer required. + * @clk: clock source + * + * Inform the system that a clock source is no longer required by + * a driver and may be shut down. + * + * May be called from atomic contexts. + * + * Implementation detail: if the clock source is shared between + * multiple drivers, clk_enable() calls must be balanced by the + * same number of clk_disable() calls for the clock source to be + * disabled. + */ +void clk_disable(struct clk *clk); + +/** + * clk_bulk_disable - inform the system when the set of clks is no + * longer required. + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * Inform the system that a set of clks is no longer required by + * a driver and may be shut down. + * + * May be called from atomic contexts. + * + * Implementation detail: if the set of clks is shared between + * multiple drivers, clk_bulk_enable() calls must be balanced by the + * same number of clk_bulk_disable() calls for the clock source to be + * disabled. + */ +void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); + +/** + * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. + * This is only valid once the clock source has been enabled. + * @clk: clock source + */ +unsigned long clk_get_rate(struct clk *clk); + +/** + * clk_put - "free" the clock source + * @clk: clock source + * + * Note: drivers must ensure that all clk_enable calls made on this + * clock source are balanced by clk_disable calls prior to calling + * this function. + * + * clk_put should not be called from within interrupt context. + */ +void clk_put(struct clk *clk); + +/** + * clk_bulk_put - "free" the clock source + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * Note: drivers must ensure that all clk_bulk_enable calls made on this + * clock source are balanced by clk_bulk_disable calls prior to calling + * this function. + * + * clk_bulk_put should not be called from within interrupt context. + */ +void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); + +/** + * clk_bulk_put_all - "free" all the clock source + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * Note: drivers must ensure that all clk_bulk_enable calls made on this + * clock source are balanced by clk_bulk_disable calls prior to calling + * this function. + * + * clk_bulk_put_all should not be called from within interrupt context. + */ +void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); + +/** + * devm_clk_put - "free" a managed clock source + * @dev: device used to acquire the clock + * @clk: clock source acquired with devm_clk_get() + * + * Note: drivers must ensure that all clk_enable calls made on this + * clock source are balanced by clk_disable calls prior to calling + * this function. + * + * clk_put should not be called from within interrupt context. + */ +void devm_clk_put(struct device *dev, struct clk *clk); + +/* + * The remaining APIs are optional for machine class support. + */ + + +/** + * clk_round_rate - adjust a rate to the exact rate a clock can provide + * @clk: clock source + * @rate: desired clock rate in Hz + * + * This answers the question "if I were to pass @rate to clk_set_rate(), + * what clock rate would I end up with?" without changing the hardware + * in any way. In other words: + * + * rate = clk_round_rate(clk, r); + * + * and: + * + * clk_set_rate(clk, r); + * rate = clk_get_rate(clk); + * + * are equivalent except the former does not modify the clock hardware + * in any way. + * + * Returns rounded clock rate in Hz, or negative errno. + */ +long clk_round_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_rate - set the clock rate for a clock source + * @clk: clock source + * @rate: desired clock rate in Hz + * + * Returns success (0) or negative errno. + */ +int clk_set_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_rate_exclusive- set the clock rate and claim exclusivity over + * clock source + * @clk: clock source + * @rate: desired clock rate in Hz + * + * This helper function allows drivers to atomically set the rate of a producer + * and claim exclusivity over the rate control of the producer. + * + * It is essentially a combination of clk_set_rate() and + * clk_rate_exclusite_get(). Caller must balance this call with a call to + * clk_rate_exclusive_put() + * + * Returns success (0) or negative errno. + */ +int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); + +/** + * clk_has_parent - check if a clock is a possible parent for another + * @clk: clock source + * @parent: parent clock source + * + * This function can be used in drivers that need to check that a clock can be + * the parent of another without actually changing the parent. + * + * Returns true if @parent is a possible parent for @clk, false otherwise. + */ +bool clk_has_parent(struct clk *clk, struct clk *parent); + +/** + * clk_set_rate_range - set a rate range for a clock source + * @clk: clock source + * @min: desired minimum clock rate in Hz, inclusive + * @max: desired maximum clock rate in Hz, inclusive + * + * Returns success (0) or negative errno. + */ +int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); + +/** + * clk_set_min_rate - set a minimum clock rate for a clock source + * @clk: clock source + * @rate: desired minimum clock rate in Hz, inclusive + * + * Returns success (0) or negative errno. + */ +int clk_set_min_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_max_rate - set a maximum clock rate for a clock source + * @clk: clock source + * @rate: desired maximum clock rate in Hz, inclusive + * + * Returns success (0) or negative errno. + */ +int clk_set_max_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_parent - set the parent clock source for this clock + * @clk: clock source + * @parent: parent clock source + * + * Returns success (0) or negative errno. + */ +int clk_set_parent(struct clk *clk, struct clk *parent); + +/** + * clk_get_parent - get the parent clock source for this clock + * @clk: clock source + * + * Returns struct clk corresponding to parent clock source, or + * valid IS_ERR() condition containing errno. + */ +struct clk *clk_get_parent(struct clk *clk); + +/** + * clk_get_sys - get a clock based upon the device name + * @dev_id: device name + * @con_id: connection ID + * + * Returns a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev_id and @con_id to determine the clock consumer, and + * thereby the clock producer. In contrast to clk_get() this function + * takes the device name instead of the device itself for identification. + * + * Drivers must assume that the clock source is not enabled. + * + * clk_get_sys should not be called from within interrupt context. + */ +struct clk *clk_get_sys(const char *dev_id, const char *con_id); + +/** + * clk_save_context - save clock context for poweroff + * + * Saves the context of the clock register for powerstates in which the + * contents of the registers will be lost. Occurs deep within the suspend + * code so locking is not necessary. + */ +int clk_save_context(void); + +/** + * clk_restore_context - restore clock context after poweroff + * + * This occurs with all clocks enabled. Occurs deep within the resume code + * so locking is not necessary. + */ +void clk_restore_context(void); + +#else /* !CONFIG_HAVE_CLK */ + +static inline struct clk *clk_get(struct device *dev, const char *id) +{ + return NULL; +} + +static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks) +{ + return 0; +} + +static inline int __must_check clk_bulk_get_optional(struct device *dev, + int num_clks, struct clk_bulk_data *clks) +{ + return 0; +} + +static inline int __must_check clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks) +{ + return 0; +} + +static inline struct clk *devm_clk_get(struct device *dev, const char *id) +{ + return NULL; +} + +static inline struct clk *devm_clk_get_optional(struct device *dev, + const char *id) +{ + return NULL; +} + +static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks) +{ + return 0; +} + +static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, + int num_clks, struct clk_bulk_data *clks) +{ + return 0; +} + +static inline int __must_check devm_clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks) +{ + + return 0; +} + +static inline struct clk *devm_get_clk_from_child(struct device *dev, + struct device_node *np, const char *con_id) +{ + return NULL; +} + +static inline void clk_put(struct clk *clk) {} + +static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} + +static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} + +static inline void devm_clk_put(struct device *dev, struct clk *clk) {} + + +static inline int clk_rate_exclusive_get(struct clk *clk) +{ + return 0; +} + +static inline void clk_rate_exclusive_put(struct clk *clk) {} + +static inline int clk_enable(struct clk *clk) +{ + return 0; +} + +static inline int __must_check clk_bulk_enable(int num_clks, + const struct clk_bulk_data *clks) +{ + return 0; +} + +static inline void clk_disable(struct clk *clk) {} + + +static inline void clk_bulk_disable(int num_clks, + const struct clk_bulk_data *clks) {} + +static inline unsigned long clk_get_rate(struct clk *clk) +{ + return 0; +} + +static inline int clk_set_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} + +static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) +{ + return 0; +} + +static inline long clk_round_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} + +static inline bool clk_has_parent(struct clk *clk, struct clk *parent) +{ + return true; +} + +static inline int clk_set_rate_range(struct clk *clk, unsigned long min, + unsigned long max) +{ + return 0; +} + +static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} + +static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} + +static inline int clk_set_parent(struct clk *clk, struct clk *parent) +{ + return 0; +} + +static inline struct clk *clk_get_parent(struct clk *clk) +{ + return NULL; +} + +static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) +{ + return NULL; +} + +static inline int clk_save_context(void) +{ + return 0; +} + +static inline void clk_restore_context(void) {} + +#endif + +/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ +static inline int clk_prepare_enable(struct clk *clk) +{ + int ret; + + ret = clk_prepare(clk); + if (ret) + return ret; + ret = clk_enable(clk); + if (ret) + clk_unprepare(clk); + + return ret; +} + +/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ +static inline void clk_disable_unprepare(struct clk *clk) +{ + clk_disable(clk); + clk_unprepare(clk); +} + +static inline int __must_check +clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) +{ + int ret; + + ret = clk_bulk_prepare(num_clks, clks); + if (ret) + return ret; + ret = clk_bulk_enable(num_clks, clks); + if (ret) + clk_bulk_unprepare(num_clks, clks); + + return ret; +} + +static inline void clk_bulk_disable_unprepare(int num_clks, + const struct clk_bulk_data *clks) +{ + clk_bulk_disable(num_clks, clks); + clk_bulk_unprepare(num_clks, clks); +} + +/** + * clk_get_optional - lookup and obtain a reference to an optional clock + * producer. + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Behaves the same as clk_get() except where there is no clock producer. In + * this case, instead of returning -ENOENT, the function returns NULL. + */ +static inline struct clk *clk_get_optional(struct device *dev, const char *id) +{ + struct clk *clk = clk_get(dev, id); + + if (clk == ERR_PTR(-ENOENT)) + return NULL; + + return clk; +} + +#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) +struct clk *of_clk_get(struct device_node *np, int index); +struct clk *of_clk_get_by_name(struct device_node *np, const char *name); +struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); +#else +static inline struct clk *of_clk_get(struct device_node *np, int index) +{ + return ERR_PTR(-ENOENT); +} +static inline struct clk *of_clk_get_by_name(struct device_node *np, + const char *name) +{ + return ERR_PTR(-ENOENT); +} +static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) +{ + return ERR_PTR(-ENOENT); +} +#endif + +#endif diff --git a/include/linux/clk/analogbits-wrpll-cln28hpc.h b/include/linux/clk/analogbits-wrpll-cln28hpc.h new file mode 100644 index 0000000..0327909 --- /dev/null +++ b/include/linux/clk/analogbits-wrpll-cln28hpc.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018-2019 SiFive, Inc. + * Wesley Terpstra + * Paul Walmsley + */ + +#ifndef __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H +#define __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H + +#include + +/* DIVQ_VALUES: number of valid DIVQ values */ +#define DIVQ_VALUES 6 + +/* + * Bit definitions for struct wrpll_cfg.flags + * + * WRPLL_FLAGS_BYPASS_FLAG: if set, the PLL is either in bypass, or should be + * programmed to enter bypass + * WRPLL_FLAGS_RESET_FLAG: if set, the PLL is in reset + * WRPLL_FLAGS_INT_FEEDBACK_FLAG: if set, the PLL is configured for internal + * feedback mode + * WRPLL_FLAGS_EXT_FEEDBACK_FLAG: if set, the PLL is configured for external + * feedback mode (not yet supported by this driver) + */ +#define WRPLL_FLAGS_BYPASS_SHIFT 0 +#define WRPLL_FLAGS_BYPASS_MASK BIT(WRPLL_FLAGS_BYPASS_SHIFT) +#define WRPLL_FLAGS_RESET_SHIFT 1 +#define WRPLL_FLAGS_RESET_MASK BIT(WRPLL_FLAGS_RESET_SHIFT) +#define WRPLL_FLAGS_INT_FEEDBACK_SHIFT 2 +#define WRPLL_FLAGS_INT_FEEDBACK_MASK BIT(WRPLL_FLAGS_INT_FEEDBACK_SHIFT) +#define WRPLL_FLAGS_EXT_FEEDBACK_SHIFT 3 +#define WRPLL_FLAGS_EXT_FEEDBACK_MASK BIT(WRPLL_FLAGS_EXT_FEEDBACK_SHIFT) + +/** + * struct wrpll_cfg - WRPLL configuration values + * @divr: reference divider value (6 bits), as presented to the PLL signals + * @divf: feedback divider value (9 bits), as presented to the PLL signals + * @divq: output divider value (3 bits), as presented to the PLL signals + * @flags: PLL configuration flags. See above for more information + * @range: PLL loop filter range. See below for more information + * @output_rate_cache: cached output rates, swept across DIVQ + * @parent_rate: PLL refclk rate for which values are valid + * @max_r: maximum possible R divider value, given @parent_rate + * @init_r: initial R divider value to start the search from + * + * @divr, @divq, @divq, @range represent what the PLL expects to see + * on its input signals. Thus @divr and @divf are the actual divisors + * minus one. @divq is a power-of-two divider; for example, 1 = + * divide-by-2 and 6 = divide-by-64. 0 is an invalid @divq value. + * + * When initially passing a struct wrpll_cfg record, the + * record should be zero-initialized with the exception of the @flags + * field. The only flag bits that need to be set are either + * WRPLL_FLAGS_INT_FEEDBACK or WRPLL_FLAGS_EXT_FEEDBACK. + */ +struct wrpll_cfg { + u8 divr; + u8 divq; + u8 range; + u8 flags; + u16 divf; +/* private: */ + u32 output_rate_cache[DIVQ_VALUES]; + unsigned long parent_rate; + u8 max_r; + u8 init_r; +}; + +int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate, + unsigned long parent_rate); + +unsigned int wrpll_calc_max_lock_us(const struct wrpll_cfg *c); + +unsigned long wrpll_calc_output_rate(const struct wrpll_cfg *c, + unsigned long parent_rate); + +#endif /* __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H */ diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h new file mode 100644 index 0000000..3904378 --- /dev/null +++ b/include/linux/clk/at91_pmc.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * include/linux/clk/at91_pmc.h + * + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * Power Management Controller (PMC) - System peripherals registers. + * Based on AT91RM9200 datasheet revision E. + */ + +#ifndef AT91_PMC_H +#define AT91_PMC_H + +#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */ +#define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */ + +#define AT91_PMC_SCSR 0x08 /* System Clock Status Register */ +#define AT91_PMC_PCK (1 << 0) /* Processor Clock */ +#define AT91RM9200_PMC_UDP (1 << 1) /* USB Devcice Port Clock [AT91RM9200 only] */ +#define AT91RM9200_PMC_MCKUDP (1 << 2) /* USB Device Port Master Clock Automatic Disable on Suspend [AT91RM9200 only] */ +#define AT91RM9200_PMC_UHP (1 << 4) /* USB Host Port Clock [AT91RM9200 only] */ +#define AT91SAM926x_PMC_UHP (1 << 6) /* USB Host Port Clock [AT91SAM926x only] */ +#define AT91SAM926x_PMC_UDP (1 << 7) /* USB Devcice Port Clock [AT91SAM926x only] */ +#define AT91_PMC_PCK0 (1 << 8) /* Programmable Clock 0 */ +#define AT91_PMC_PCK1 (1 << 9) /* Programmable Clock 1 */ +#define AT91_PMC_PCK2 (1 << 10) /* Programmable Clock 2 */ +#define AT91_PMC_PCK3 (1 << 11) /* Programmable Clock 3 */ +#define AT91_PMC_PCK4 (1 << 12) /* Programmable Clock 4 [AT572D940HF only] */ +#define AT91_PMC_HCK0 (1 << 16) /* AHB Clock (USB host) [AT91SAM9261 only] */ +#define AT91_PMC_HCK1 (1 << 17) /* AHB Clock (LCD) [AT91SAM9261 only] */ + +#define AT91_PMC_PCER 0x10 /* Peripheral Clock Enable Register */ +#define AT91_PMC_PCDR 0x14 /* Peripheral Clock Disable Register */ +#define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */ + +#define AT91_CKGR_UCKR 0x1C /* UTMI Clock Register [some SAM9] */ +#define AT91_PMC_UPLLEN (1 << 16) /* UTMI PLL Enable */ +#define AT91_PMC_UPLLCOUNT (0xf << 20) /* UTMI PLL Start-up Time */ +#define AT91_PMC_BIASEN (1 << 24) /* UTMI BIAS Enable */ +#define AT91_PMC_BIASCOUNT (0xf << 28) /* UTMI BIAS Start-up Time */ + +#define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */ +#define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */ +#define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */ +#define AT91_PMC_WAITMODE (1 << 2) /* Wait Mode Command */ +#define AT91_PMC_MOSCRCEN (1 << 3) /* Main On-Chip RC Oscillator Enable [some SAM9] */ +#define AT91_PMC_OSCOUNT (0xff << 8) /* Main Oscillator Start-up Time */ +#define AT91_PMC_KEY_MASK (0xff << 16) +#define AT91_PMC_KEY (0x37 << 16) /* MOR Writing Key */ +#define AT91_PMC_MOSCSEL (1 << 24) /* Main Oscillator Selection [some SAM9] */ +#define AT91_PMC_CFDEN (1 << 25) /* Clock Failure Detector Enable [some SAM9] */ + +#define AT91_CKGR_MCFR 0x24 /* Main Clock Frequency Register */ +#define AT91_PMC_MAINF (0xffff << 0) /* Main Clock Frequency */ +#define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */ + +#define AT91_CKGR_PLLAR 0x28 /* PLL A Register */ +#define AT91_CKGR_PLLBR 0x2c /* PLL B Register */ +#define AT91_PMC_DIV (0xff << 0) /* Divider */ +#define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */ +#define AT91_PMC_OUT (3 << 14) /* PLL Clock Frequency Range */ +#define AT91_PMC_MUL (0x7ff << 16) /* PLL Multiplier */ +#define AT91_PMC_MUL_GET(n) ((n) >> 16 & 0x7ff) +#define AT91_PMC3_MUL (0x7f << 18) /* PLL Multiplier [SAMA5 only] */ +#define AT91_PMC3_MUL_GET(n) ((n) >> 18 & 0x7f) +#define AT91_PMC_USBDIV (3 << 28) /* USB Divisor (PLLB only) */ +#define AT91_PMC_USBDIV_1 (0 << 28) +#define AT91_PMC_USBDIV_2 (1 << 28) +#define AT91_PMC_USBDIV_4 (2 << 28) +#define AT91_PMC_USB96M (1 << 28) /* Divider by 2 Enable (PLLB only) */ + +#define AT91_PMC_CPU_CKR 0x28 /* CPU Clock Register */ + +#define AT91_PMC_MCKR 0x30 /* Master Clock Register */ +#define AT91_PMC_CSS (3 << 0) /* Master Clock Selection */ +#define AT91_PMC_CSS_SLOW (0 << 0) +#define AT91_PMC_CSS_MAIN (1 << 0) +#define AT91_PMC_CSS_PLLA (2 << 0) +#define AT91_PMC_CSS_PLLB (3 << 0) +#define AT91_PMC_CSS_UPLL (3 << 0) /* [some SAM9 only] */ +#define PMC_PRES_OFFSET 2 +#define AT91_PMC_PRES (7 << PMC_PRES_OFFSET) /* Master Clock Prescaler */ +#define AT91_PMC_PRES_1 (0 << PMC_PRES_OFFSET) +#define AT91_PMC_PRES_2 (1 << PMC_PRES_OFFSET) +#define AT91_PMC_PRES_4 (2 << PMC_PRES_OFFSET) +#define AT91_PMC_PRES_8 (3 << PMC_PRES_OFFSET) +#define AT91_PMC_PRES_16 (4 << PMC_PRES_OFFSET) +#define AT91_PMC_PRES_32 (5 << PMC_PRES_OFFSET) +#define AT91_PMC_PRES_64 (6 << PMC_PRES_OFFSET) +#define PMC_ALT_PRES_OFFSET 4 +#define AT91_PMC_ALT_PRES (7 << PMC_ALT_PRES_OFFSET) /* Master Clock Prescaler [alternate location] */ +#define AT91_PMC_ALT_PRES_1 (0 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_ALT_PRES_2 (1 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_ALT_PRES_4 (2 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_ALT_PRES_8 (3 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_ALT_PRES_16 (4 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_ALT_PRES_32 (5 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_ALT_PRES_64 (6 << PMC_ALT_PRES_OFFSET) +#define AT91_PMC_MDIV (3 << 8) /* Master Clock Division */ +#define AT91RM9200_PMC_MDIV_1 (0 << 8) /* [AT91RM9200 only] */ +#define AT91RM9200_PMC_MDIV_2 (1 << 8) +#define AT91RM9200_PMC_MDIV_3 (2 << 8) +#define AT91RM9200_PMC_MDIV_4 (3 << 8) +#define AT91SAM9_PMC_MDIV_1 (0 << 8) /* [SAM9 only] */ +#define AT91SAM9_PMC_MDIV_2 (1 << 8) +#define AT91SAM9_PMC_MDIV_4 (2 << 8) +#define AT91SAM9_PMC_MDIV_6 (3 << 8) /* [some SAM9 only] */ +#define AT91SAM9_PMC_MDIV_3 (3 << 8) /* [some SAM9 only] */ +#define AT91_PMC_PDIV (1 << 12) /* Processor Clock Division [some SAM9 only] */ +#define AT91_PMC_PDIV_1 (0 << 12) +#define AT91_PMC_PDIV_2 (1 << 12) +#define AT91_PMC_PLLADIV2 (1 << 12) /* PLLA divisor by 2 [some SAM9 only] */ +#define AT91_PMC_PLLADIV2_OFF (0 << 12) +#define AT91_PMC_PLLADIV2_ON (1 << 12) +#define AT91_PMC_H32MXDIV BIT(24) + +#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ +#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */ +#define AT91_PMC_USBS_PLLA (0 << 0) +#define AT91_PMC_USBS_UPLL (1 << 0) +#define AT91_PMC_USBS_PLLB (1 << 0) /* [AT91SAMN12 only] */ +#define AT91_PMC_OHCIUSBDIV (0xF << 8) /* Divider for USB OHCI Clock */ +#define AT91_PMC_OHCIUSBDIV_1 (0x0 << 8) +#define AT91_PMC_OHCIUSBDIV_2 (0x1 << 8) + +#define AT91_PMC_SMD 0x3c /* Soft Modem Clock Register [some SAM9 only] */ +#define AT91_PMC_SMDS (0x1 << 0) /* SMD input clock selection */ +#define AT91_PMC_SMD_DIV (0x1f << 8) /* SMD input clock divider */ +#define AT91_PMC_SMDDIV(n) (((n) << 8) & AT91_PMC_SMD_DIV) + +#define AT91_PMC_PCKR(n) (0x40 + ((n) * 4)) /* Programmable Clock 0-N Registers */ +#define AT91_PMC_ALT_PCKR_CSS (0x7 << 0) /* Programmable Clock Source Selection [alternate length] */ +#define AT91_PMC_CSS_MASTER (4 << 0) /* [some SAM9 only] */ +#define AT91_PMC_CSSMCK (0x1 << 8) /* CSS or Master Clock Selection */ +#define AT91_PMC_CSSMCK_CSS (0 << 8) +#define AT91_PMC_CSSMCK_MCK (1 << 8) + +#define AT91_PMC_IER 0x60 /* Interrupt Enable Register */ +#define AT91_PMC_IDR 0x64 /* Interrupt Disable Register */ +#define AT91_PMC_SR 0x68 /* Status Register */ +#define AT91_PMC_MOSCS (1 << 0) /* MOSCS Flag */ +#define AT91_PMC_LOCKA (1 << 1) /* PLLA Lock */ +#define AT91_PMC_LOCKB (1 << 2) /* PLLB Lock */ +#define AT91_PMC_MCKRDY (1 << 3) /* Master Clock */ +#define AT91_PMC_LOCKU (1 << 6) /* UPLL Lock [some SAM9] */ +#define AT91_PMC_OSCSEL (1 << 7) /* Slow Oscillator Selection [some SAM9] */ +#define AT91_PMC_PCK0RDY (1 << 8) /* Programmable Clock 0 */ +#define AT91_PMC_PCK1RDY (1 << 9) /* Programmable Clock 1 */ +#define AT91_PMC_PCK2RDY (1 << 10) /* Programmable Clock 2 */ +#define AT91_PMC_PCK3RDY (1 << 11) /* Programmable Clock 3 */ +#define AT91_PMC_MOSCSELS (1 << 16) /* Main Oscillator Selection [some SAM9] */ +#define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ +#define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ +#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */ +#define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ + +#define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */ +#define AT91_PMC_FSTT(n) BIT(n) +#define AT91_PMC_RTTAL BIT(16) +#define AT91_PMC_RTCAL BIT(17) /* RTC Alarm Enable */ +#define AT91_PMC_USBAL BIT(18) /* USB Resume Enable */ +#define AT91_PMC_SDMMC_CD BIT(19) /* SDMMC Card Detect Enable */ +#define AT91_PMC_LPM BIT(20) /* Low-power Mode */ +#define AT91_PMC_RXLP_MCE BIT(24) /* Backup UART Receive Enable */ +#define AT91_PMC_ACC_CE BIT(25) /* ACC Enable */ + +#define AT91_PMC_FSPR 0x74 /* Fast Startup Polarity Reg */ + +#define AT91_PMC_FS_INPUT_MASK 0x7ff + +#define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */ + +#define AT91_PMC_PROT 0xe4 /* Write Protect Mode Register [some SAM9] */ +#define AT91_PMC_WPEN (0x1 << 0) /* Write Protect Enable */ +#define AT91_PMC_WPKEY (0xffffff << 8) /* Write Protect Key */ +#define AT91_PMC_PROTKEY (0x504d43 << 8) /* Activation Code */ + +#define AT91_PMC_WPSR 0xe8 /* Write Protect Status Register [some SAM9] */ +#define AT91_PMC_WPVS (0x1 << 0) /* Write Protect Violation Status */ +#define AT91_PMC_WPVSRC (0xffff << 8) /* Write Protect Violation Source */ + +#define AT91_PMC_PCER1 0x100 /* Peripheral Clock Enable Register 1 [SAMA5 only]*/ +#define AT91_PMC_PCDR1 0x104 /* Peripheral Clock Enable Register 1 */ +#define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */ + +#define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */ +#define AT91_PMC_PCR_PID_MASK 0x3f +#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ +#define AT91_PMC_PCR_GCKDIV_MASK GENMASK(27, 20) +#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ +#define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */ + +#define AT91_PMC_AUDIO_PLL0 0x14c +#define AT91_PMC_AUDIO_PLL_PLLEN (1 << 0) +#define AT91_PMC_AUDIO_PLL_PADEN (1 << 1) +#define AT91_PMC_AUDIO_PLL_PMCEN (1 << 2) +#define AT91_PMC_AUDIO_PLL_RESETN (1 << 3) +#define AT91_PMC_AUDIO_PLL_ND_OFFSET 8 +#define AT91_PMC_AUDIO_PLL_ND_MASK (0x7f << AT91_PMC_AUDIO_PLL_ND_OFFSET) +#define AT91_PMC_AUDIO_PLL_ND(n) ((n) << AT91_PMC_AUDIO_PLL_ND_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPMC_OFFSET 16 +#define AT91_PMC_AUDIO_PLL_QDPMC_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPMC(n) ((n) << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET) + +#define AT91_PMC_AUDIO_PLL1 0x150 +#define AT91_PMC_AUDIO_PLL_FRACR_MASK 0x3fffff +#define AT91_PMC_AUDIO_PLL_QDPAD_OFFSET 24 +#define AT91_PMC_AUDIO_PLL_QDPAD_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPAD(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET AT91_PMC_AUDIO_PLL_QDPAD_OFFSET +#define AT91_PMC_AUDIO_PLL_QDPAD_DIV_MASK (0x3 << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPAD_DIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET 26 +#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX 0x1f +#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MASK (AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET) +#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET) + +#endif diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h new file mode 100644 index 0000000..eae9652 --- /dev/null +++ b/include/linux/clk/clk-conf.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2014 Samsung Electronics Co., Ltd. + * Sylwester Nawrocki + */ + +#ifndef __CLK_CONF_H +#define __CLK_CONF_H + +#include + +struct device_node; + +#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) +int of_clk_set_defaults(struct device_node *node, bool clk_supplier); +#else +static inline int of_clk_set_defaults(struct device_node *node, + bool clk_supplier) +{ + return 0; +} +#endif + +#endif /* __CLK_CONF_H */ diff --git a/include/linux/clk/davinci.h b/include/linux/clk/davinci.h new file mode 100644 index 0000000..8a7b5cd --- /dev/null +++ b/include/linux/clk/davinci.h @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Clock drivers for TI DaVinci PLL and PSC controllers + * + * Copyright (C) 2018 David Lechner + */ + +#ifndef __LINUX_CLK_DAVINCI_PLL_H___ +#define __LINUX_CLK_DAVINCI_PLL_H___ + +#include +#include + +/* function for registering clocks in early boot */ + +#ifdef CONFIG_ARCH_DAVINCI_DA830 +int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +#endif +#ifdef CONFIG_ARCH_DAVINCI_DA850 +int da850_pll0_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +#endif +#ifdef CONFIG_ARCH_DAVINCI_DM355 +int dm355_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +int dm355_psc_init(struct device *dev, void __iomem *base); +#endif +#ifdef CONFIG_ARCH_DAVINCI_DM365 +int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +int dm365_psc_init(struct device *dev, void __iomem *base); +#endif +#ifdef CONFIG_ARCH_DAVINCI_DM644x +int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +int dm644x_psc_init(struct device *dev, void __iomem *base); +#endif +#ifdef CONFIG_ARCH_DAVINCI_DM646x +int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); +int dm646x_psc_init(struct device *dev, void __iomem *base); +#endif + +#endif /* __LINUX_CLK_DAVINCI_PLL_H___ */ diff --git a/include/linux/clk/mmp.h b/include/linux/clk/mmp.h new file mode 100644 index 0000000..4451304 --- /dev/null +++ b/include/linux/clk/mmp.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CLK_MMP_H +#define __CLK_MMP_H + +#include + +extern void pxa168_clk_init(phys_addr_t mpmu_phys, + phys_addr_t apmu_phys, + phys_addr_t apbc_phys); +extern void pxa910_clk_init(phys_addr_t mpmu_phys, + phys_addr_t apmu_phys, + phys_addr_t apbc_phys, + phys_addr_t apbcp_phys); +extern void mmp2_clk_init(phys_addr_t mpmu_phys, + phys_addr_t apmu_phys, + phys_addr_t apbc_phys); + +#endif diff --git a/include/linux/clk/mxs.h b/include/linux/clk/mxs.h new file mode 100644 index 0000000..2674e60 --- /dev/null +++ b/include/linux/clk/mxs.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 Freescale Semiconductor, Inc. + */ + +#ifndef __LINUX_CLK_MXS_H +#define __LINUX_CLK_MXS_H + +int mxs_saif_clkmux_select(unsigned int clkmux); + +#endif diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h new file mode 100644 index 0000000..0ebbe2f --- /dev/null +++ b/include/linux/clk/renesas.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright 2013 Ideas On Board SPRL + * Copyright 2013, 2014 Horms Solutions Ltd. + * + * Contact: Laurent Pinchart + * Contact: Simon Horman + */ + +#ifndef __LINUX_CLK_RENESAS_H_ +#define __LINUX_CLK_RENESAS_H_ + +#include + +struct device; +struct device_node; +struct generic_pm_domain; + +void cpg_mstp_add_clk_domain(struct device_node *np); +#ifdef CONFIG_CLK_RENESAS_CPG_MSTP +int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev); +void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev); +#else +#define cpg_mstp_attach_dev NULL +#define cpg_mstp_detach_dev NULL +#endif + +#ifdef CONFIG_CLK_RENESAS_CPG_MSSR +int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev); +void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev); +#else +#define cpg_mssr_attach_dev NULL +#define cpg_mssr_detach_dev NULL +#endif +#endif diff --git a/include/linux/clk/sunxi-ng.h b/include/linux/clk/sunxi-ng.h new file mode 100644 index 0000000..3cd14ac --- /dev/null +++ b/include/linux/clk/sunxi-ng.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved. + */ + +#ifndef _LINUX_CLK_SUNXI_NG_H_ +#define _LINUX_CLK_SUNXI_NG_H_ + +#include + +#ifdef CONFIG_SUNXI_CCU +int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode); +int sunxi_ccu_get_mmc_timing_mode(struct clk *clk); +#else +static inline int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, + bool new_mode) +{ + return -ENOTSUPP; +} + +static inline int sunxi_ccu_get_mmc_timing_mode(struct clk *clk) +{ + return -ENOTSUPP; +} +#endif + +#endif diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h new file mode 100644 index 0000000..b8aef62 --- /dev/null +++ b/include/linux/clk/tegra.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef __LINUX_CLK_TEGRA_H_ +#define __LINUX_CLK_TEGRA_H_ + +#include +#include + +/* + * Tegra CPU clock and reset control ops + * + * wait_for_reset: + * keep waiting until the CPU in reset state + * put_in_reset: + * put the CPU in reset state + * out_of_reset: + * release the CPU from reset state + * enable_clock: + * CPU clock un-gate + * disable_clock: + * CPU clock gate + * rail_off_ready: + * CPU is ready for rail off + * suspend: + * save the clock settings when CPU go into low-power state + * resume: + * restore the clock settings when CPU exit low-power state + */ +struct tegra_cpu_car_ops { + void (*wait_for_reset)(u32 cpu); + void (*put_in_reset)(u32 cpu); + void (*out_of_reset)(u32 cpu); + void (*enable_clock)(u32 cpu); + void (*disable_clock)(u32 cpu); +#ifdef CONFIG_PM_SLEEP + bool (*rail_off_ready)(void); + void (*suspend)(void); + void (*resume)(void); +#endif +}; + +extern struct tegra_cpu_car_ops *tegra_cpu_car_ops; + +static inline void tegra_wait_cpu_in_reset(u32 cpu) +{ + if (WARN_ON(!tegra_cpu_car_ops->wait_for_reset)) + return; + + tegra_cpu_car_ops->wait_for_reset(cpu); +} + +static inline void tegra_put_cpu_in_reset(u32 cpu) +{ + if (WARN_ON(!tegra_cpu_car_ops->put_in_reset)) + return; + + tegra_cpu_car_ops->put_in_reset(cpu); +} + +static inline void tegra_cpu_out_of_reset(u32 cpu) +{ + if (WARN_ON(!tegra_cpu_car_ops->out_of_reset)) + return; + + tegra_cpu_car_ops->out_of_reset(cpu); +} + +static inline void tegra_enable_cpu_clock(u32 cpu) +{ + if (WARN_ON(!tegra_cpu_car_ops->enable_clock)) + return; + + tegra_cpu_car_ops->enable_clock(cpu); +} + +static inline void tegra_disable_cpu_clock(u32 cpu) +{ + if (WARN_ON(!tegra_cpu_car_ops->disable_clock)) + return; + + tegra_cpu_car_ops->disable_clock(cpu); +} + +#ifdef CONFIG_PM_SLEEP +static inline bool tegra_cpu_rail_off_ready(void) +{ + if (WARN_ON(!tegra_cpu_car_ops->rail_off_ready)) + return false; + + return tegra_cpu_car_ops->rail_off_ready(); +} + +static inline void tegra_cpu_clock_suspend(void) +{ + if (WARN_ON(!tegra_cpu_car_ops->suspend)) + return; + + tegra_cpu_car_ops->suspend(); +} + +static inline void tegra_cpu_clock_resume(void) +{ + if (WARN_ON(!tegra_cpu_car_ops->resume)) + return; + + tegra_cpu_car_ops->resume(); +} +#endif + +extern void tegra210_xusb_pll_hw_control_enable(void); +extern void tegra210_xusb_pll_hw_sequence_start(void); +extern void tegra210_sata_pll_hw_control_enable(void); +extern void tegra210_sata_pll_hw_sequence_start(void); +extern void tegra210_set_sata_pll_seq_sw(bool state); +extern void tegra210_put_utmipll_in_iddq(void); +extern void tegra210_put_utmipll_out_iddq(void); +extern int tegra210_clk_handle_mbist_war(unsigned int id); + +#endif /* __LINUX_CLK_TEGRA_H_ */ diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h new file mode 100644 index 0000000..1e8ef96 --- /dev/null +++ b/include/linux/clk/ti.h @@ -0,0 +1,322 @@ +/* + * TI clock drivers support + * + * Copyright (C) 2013 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __LINUX_CLK_TI_H__ +#define __LINUX_CLK_TI_H__ + +#include +#include + +/** + * struct clk_omap_reg - OMAP register declaration + * @offset: offset from the master IP module base address + * @index: index of the master IP module + */ +struct clk_omap_reg { + void __iomem *ptr; + u16 offset; + u8 index; + u8 flags; +}; + +/** + * struct dpll_data - DPLL registers and integration data + * @mult_div1_reg: register containing the DPLL M and N bitfields + * @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg + * @div1_mask: mask of the DPLL N bitfield in @mult_div1_reg + * @clk_bypass: struct clk_hw pointer to the clock's bypass clock input + * @clk_ref: struct clk_hw pointer to the clock's reference clock input + * @control_reg: register containing the DPLL mode bitfield + * @enable_mask: mask of the DPLL mode bitfield in @control_reg + * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate() + * @last_rounded_m: cache of the last M result of omap2_dpll_round_rate() + * @last_rounded_m4xen: cache of the last M4X result of + * omap4_dpll_regm4xen_round_rate() + * @last_rounded_lpmode: cache of the last lpmode result of + * omap4_dpll_lpmode_recalc() + * @max_multiplier: maximum valid non-bypass multiplier value (actual) + * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate() + * @min_divider: minimum valid non-bypass divider value (actual) + * @max_divider: maximum valid non-bypass divider value (actual) + * @max_rate: maximum clock rate for the DPLL + * @modes: possible values of @enable_mask + * @autoidle_reg: register containing the DPLL autoidle mode bitfield + * @idlest_reg: register containing the DPLL idle status bitfield + * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg + * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg + * @dcc_mask: mask of the DPLL DCC correction bitfield @mult_div1_reg + * @dcc_rate: rate atleast which DCC @dcc_mask must be set + * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg + * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg + * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg + * @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg + * @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs + * @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs + * @flags: DPLL type/features (see below) + * + * Possible values for @flags: + * DPLL_J_TYPE: "J-type DPLL" (only some 36xx, 4xxx DPLLs) + * + * @freqsel_mask is only used on the OMAP34xx family and AM35xx. + * + * XXX Some DPLLs have multiple bypass inputs, so it's not technically + * correct to only have one @clk_bypass pointer. + * + * XXX The runtime-variable fields (@last_rounded_rate, @last_rounded_m, + * @last_rounded_n) should be separated from the runtime-fixed fields + * and placed into a different structure, so that the runtime-fixed data + * can be placed into read-only space. + */ +struct dpll_data { + struct clk_omap_reg mult_div1_reg; + u32 mult_mask; + u32 div1_mask; + struct clk_hw *clk_bypass; + struct clk_hw *clk_ref; + struct clk_omap_reg control_reg; + u32 enable_mask; + unsigned long last_rounded_rate; + u16 last_rounded_m; + u8 last_rounded_m4xen; + u8 last_rounded_lpmode; + u16 max_multiplier; + u8 last_rounded_n; + u8 min_divider; + u16 max_divider; + unsigned long max_rate; + u8 modes; + struct clk_omap_reg autoidle_reg; + struct clk_omap_reg idlest_reg; + u32 autoidle_mask; + u32 freqsel_mask; + u32 idlest_mask; + u32 dco_mask; + u32 sddiv_mask; + u32 dcc_mask; + unsigned long dcc_rate; + u32 lpmode_mask; + u32 m4xen_mask; + u8 auto_recal_bit; + u8 recal_en_bit; + u8 recal_st_bit; + u8 flags; +}; + +struct clk_hw_omap; + +/** + * struct clk_hw_omap_ops - OMAP clk ops + * @find_idlest: find idlest register information for a clock + * @find_companion: find companion clock register information for a clock, + * basically converts CM_ICLKEN* <-> CM_FCLKEN* + * @allow_idle: enables autoidle hardware functionality for a clock + * @deny_idle: prevent autoidle hardware functionality for a clock + */ +struct clk_hw_omap_ops { + void (*find_idlest)(struct clk_hw_omap *oclk, + struct clk_omap_reg *idlest_reg, + u8 *idlest_bit, u8 *idlest_val); + void (*find_companion)(struct clk_hw_omap *oclk, + struct clk_omap_reg *other_reg, + u8 *other_bit); + void (*allow_idle)(struct clk_hw_omap *oclk); + void (*deny_idle)(struct clk_hw_omap *oclk); +}; + +/** + * struct clk_hw_omap - OMAP struct clk + * @node: list_head connecting this clock into the full clock list + * @enable_reg: register to write to enable the clock (see @enable_bit) + * @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg) + * @flags: see "struct clk.flags possibilities" above + * @clksel_reg: for clksel clks, register va containing src/divisor select + * @dpll_data: for DPLLs, pointer to struct dpll_data for this clock + * @clkdm_name: clockdomain name that this clock is contained in + * @clkdm: pointer to struct clockdomain, resolved from @clkdm_name at runtime + * @ops: clock ops for this clock + */ +struct clk_hw_omap { + struct clk_hw hw; + struct list_head node; + unsigned long fixed_rate; + u8 fixed_div; + struct clk_omap_reg enable_reg; + u8 enable_bit; + u8 flags; + struct clk_omap_reg clksel_reg; + struct dpll_data *dpll_data; + const char *clkdm_name; + struct clockdomain *clkdm; + const struct clk_hw_omap_ops *ops; + u32 context; + int autoidle_count; +}; + +/* + * struct clk_hw_omap.flags possibilities + * + * XXX document the rest of the clock flags here + * + * ENABLE_REG_32BIT: (OMAP1 only) clock control register must be accessed + * with 32bit ops, by default OMAP1 uses 16bit ops. + * CLOCK_IDLE_CONTROL: (OMAP1 only) clock has autoidle support. + * CLOCK_NO_IDLE_PARENT: (OMAP1 only) when clock is enabled, its parent + * clock is put to no-idle mode. + * ENABLE_ON_INIT: Clock is enabled on init. + * INVERT_ENABLE: By default, clock enable bit behavior is '1' enable, '0' + * disable. This inverts the behavior making '0' enable and '1' disable. + * CLOCK_CLKOUTX2: (OMAP4 only) DPLL CLKOUT and CLKOUTX2 GATE_CTRL + * bits share the same register. This flag allows the + * omap4_dpllmx*() code to determine which GATE_CTRL bit field + * should be used. This is a temporary solution - a better approach + * would be to associate clock type-specific data with the clock, + * similar to the struct dpll_data approach. + */ +#define ENABLE_REG_32BIT (1 << 0) /* Use 32-bit access */ +#define CLOCK_IDLE_CONTROL (1 << 1) +#define CLOCK_NO_IDLE_PARENT (1 << 2) +#define ENABLE_ON_INIT (1 << 3) /* Enable upon framework init */ +#define INVERT_ENABLE (1 << 4) /* 0 enables, 1 disables */ +#define CLOCK_CLKOUTX2 (1 << 5) + +/* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */ +#define DPLL_LOW_POWER_STOP 0x1 +#define DPLL_LOW_POWER_BYPASS 0x5 +#define DPLL_LOCKED 0x7 + +/* DPLL Type and DCO Selection Flags */ +#define DPLL_J_TYPE 0x1 + +/* Static memmap indices */ +enum { + TI_CLKM_CM = 0, + TI_CLKM_CM2, + TI_CLKM_PRM, + TI_CLKM_SCRM, + TI_CLKM_CTRL, + TI_CLKM_CTRL_AUX, + TI_CLKM_PLLSS, + CLK_MAX_MEMMAPS +}; + +/** + * struct ti_clk_ll_ops - low-level ops for clocks + * @clk_readl: pointer to register read function + * @clk_writel: pointer to register write function + * @clk_rmw: pointer to register read-modify-write function + * @clkdm_clk_enable: pointer to clockdomain enable function + * @clkdm_clk_disable: pointer to clockdomain disable function + * @clkdm_lookup: pointer to clockdomain lookup function + * @cm_wait_module_ready: pointer to CM module wait ready function + * @cm_split_idlest_reg: pointer to CM module function to split idlest reg + * + * Low-level ops are generally used by the basic clock types (clk-gate, + * clk-mux, clk-divider etc.) to provide support for various low-level + * hadrware interfaces (direct MMIO, regmap etc.), and is initialized + * by board code. Low-level ops also contain some other platform specific + * operations not provided directly by clock drivers. + */ +struct ti_clk_ll_ops { + u32 (*clk_readl)(const struct clk_omap_reg *reg); + void (*clk_writel)(u32 val, const struct clk_omap_reg *reg); + void (*clk_rmw)(u32 val, u32 mask, const struct clk_omap_reg *reg); + int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk); + int (*clkdm_clk_disable)(struct clockdomain *clkdm, + struct clk *clk); + struct clockdomain * (*clkdm_lookup)(const char *name); + int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg, + u8 idlest_shift); + int (*cm_split_idlest_reg)(struct clk_omap_reg *idlest_reg, + s16 *prcm_inst, u8 *idlest_reg_id); +}; + +#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw) + +bool omap2_clk_is_hw_omap(struct clk_hw *hw); +int omap2_clk_disable_autoidle_all(void); +int omap2_clk_enable_autoidle_all(void); +int omap2_clk_allow_idle(struct clk *clk); +int omap2_clk_deny_idle(struct clk *clk); +unsigned long omap2_dpllcore_recalc(struct clk_hw *hw, + unsigned long parent_rate); +int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate, + unsigned long parent_rate); +void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw); +void omap2xxx_clkt_vps_init(void); +unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk); + +void ti_dt_clk_init_retry_clks(void); +void ti_dt_clockdomains_setup(void); +int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops); + +struct regmap; + +int omap2_clk_provider_init(struct device_node *parent, int index, + struct regmap *syscon, void __iomem *mem); +void omap2_clk_legacy_provider_init(int index, void __iomem *mem); + +int omap3430_dt_clk_init(void); +int omap3630_dt_clk_init(void); +int am35xx_dt_clk_init(void); +int dm814x_dt_clk_init(void); +int dm816x_dt_clk_init(void); +int omap4xxx_dt_clk_init(void); +int omap5xxx_dt_clk_init(void); +int dra7xx_dt_clk_init(void); +int am33xx_dt_clk_init(void); +int am43xx_dt_clk_init(void); +int omap2420_dt_clk_init(void); +int omap2430_dt_clk_init(void); + +struct ti_clk_features { + u32 flags; + long fint_min; + long fint_max; + long fint_band1_max; + long fint_band2_min; + u8 dpll_bypass_vals; + u8 cm_idlest_val; +}; + +#define TI_CLK_DPLL_HAS_FREQSEL BIT(0) +#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1) +#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2) +#define TI_CLK_ERRATA_I810 BIT(3) +#define TI_CLK_CLKCTRL_COMPAT BIT(4) +#define TI_CLK_DEVICE_TYPE_GP BIT(5) + +void ti_clk_setup_features(struct ti_clk_features *features); +const struct ti_clk_features *ti_clk_get_features(void); +int omap3_noncore_dpll_save_context(struct clk_hw *hw); +void omap3_noncore_dpll_restore_context(struct clk_hw *hw); + +int omap3_core_dpll_save_context(struct clk_hw *hw); +void omap3_core_dpll_restore_context(struct clk_hw *hw); + +extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; + +#ifdef CONFIG_ATAGS +int omap3430_clk_legacy_init(void); +int omap3430es1_clk_legacy_init(void); +int omap36xx_clk_legacy_init(void); +int am35xx_clk_legacy_init(void); +#else +static inline int omap3430_clk_legacy_init(void) { return -ENXIO; } +static inline int omap3430es1_clk_legacy_init(void) { return -ENXIO; } +static inline int omap36xx_clk_legacy_init(void) { return -ENXIO; } +static inline int am35xx_clk_legacy_init(void) { return -ENXIO; } +#endif + + +#endif diff --git a/include/linux/clk/zynq.h b/include/linux/clk/zynq.h new file mode 100644 index 0000000..a198dd9 --- /dev/null +++ b/include/linux/clk/zynq.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2013 Xilinx Inc. + * Copyright (C) 2012 National Instruments + */ + +#ifndef __LINUX_CLK_ZYNQ_H_ +#define __LINUX_CLK_ZYNQ_H_ + +#include + +void zynq_clock_init(void); + +struct clk *clk_register_zynq_pll(const char *name, const char *parent, + void __iomem *pll_ctrl, void __iomem *pll_status, u8 lock_index, + spinlock_t *lock); +#endif diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h new file mode 100644 index 0000000..fd06b27 --- /dev/null +++ b/include/linux/clkdev.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * include/linux/clkdev.h + * + * Copyright (C) 2008 Russell King. + * + * Helper for the clk API to assist looking up a struct clk. + */ +#ifndef __CLKDEV_H +#define __CLKDEV_H + +#include + +struct clk; +struct clk_hw; +struct device; + +struct clk_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct clk *clk; + struct clk_hw *clk_hw; +}; + +#define CLKDEV_INIT(d, n, c) \ + { \ + .dev_id = d, \ + .con_id = n, \ + .clk = c, \ + } + +struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); +struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); + +void clkdev_add(struct clk_lookup *cl); +void clkdev_drop(struct clk_lookup *cl); + +struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); +struct clk_lookup *clkdev_hw_create(struct clk_hw *hw, const char *con_id, + const char *dev_fmt, ...) __printf(3, 4); + +void clkdev_add_table(struct clk_lookup *, size_t); +int clk_add_alias(const char *, const char *, const char *, struct device *); + +int clk_register_clkdev(struct clk *, const char *, const char *); +int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); + +int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw, + const char *con_id, const char *dev_id); +void devm_clk_release_clkdev(struct device *dev, const char *con_id, + const char *dev_id); +#endif diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h new file mode 100644 index 0000000..b5cebf7 --- /dev/null +++ b/include/linux/clock_cooling.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/include/linux/clock_cooling.h + * + * Copyright (C) 2014 Eduardo Valentin + * + * Copyright (C) 2013 Texas Instruments Inc. + * Contact: Eduardo Valentin + * + * Highly based on cpu_cooling.c. + * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) + * Copyright (C) 2012 Amit Daniel + */ + +#ifndef __CPU_COOLING_H__ +#define __CPU_COOLING_H__ + +#include +#include +#include + +#ifdef CONFIG_CLOCK_THERMAL +/** + * clock_cooling_register - function to create clock cooling device. + * @dev: struct device pointer to the device used as clock cooling device. + * @clock_name: string containing the clock used as cooling mechanism. + */ +struct thermal_cooling_device * +clock_cooling_register(struct device *dev, const char *clock_name); + +/** + * clock_cooling_unregister - function to remove clock cooling device. + * @cdev: thermal cooling device pointer. + */ +void clock_cooling_unregister(struct thermal_cooling_device *cdev); + +unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, + unsigned long freq); +#else /* !CONFIG_CLOCK_THERMAL */ +static inline struct thermal_cooling_device * +clock_cooling_register(struct device *dev, const char *clock_name) +{ + return NULL; +} +static inline +void clock_cooling_unregister(struct thermal_cooling_device *cdev) +{ +} +static inline +unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, + unsigned long freq) +{ + return THERMAL_CSTATE_INVALID; +} +#endif /* CONFIG_CLOCK_THERMAL */ + +#endif /* __CPU_COOLING_H__ */ diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h new file mode 100644 index 0000000..8ae9a95 --- /dev/null +++ b/include/linux/clockchips.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* linux/include/linux/clockchips.h + * + * This file contains the structure definitions for clockchips. + * + * If you are not a clockchip, or the time of day code, you should + * not be including this file! + */ +#ifndef _LINUX_CLOCKCHIPS_H +#define _LINUX_CLOCKCHIPS_H + +#ifdef CONFIG_GENERIC_CLOCKEVENTS + +# include +# include +# include +# include + +struct clock_event_device; +struct module; + +/* + * Possible states of a clock event device. + * + * DETACHED: Device is not used by clockevents core. Initial state or can be + * reached from SHUTDOWN. + * SHUTDOWN: Device is powered-off. Can be reached from PERIODIC or ONESHOT. + * PERIODIC: Device is programmed to generate events periodically. Can be + * reached from DETACHED or SHUTDOWN. + * ONESHOT: Device is programmed to generate event only once. Can be reached + * from DETACHED or SHUTDOWN. + * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily + * stopped. + */ +enum clock_event_state { + CLOCK_EVT_STATE_DETACHED, + CLOCK_EVT_STATE_SHUTDOWN, + CLOCK_EVT_STATE_PERIODIC, + CLOCK_EVT_STATE_ONESHOT, + CLOCK_EVT_STATE_ONESHOT_STOPPED, +}; + +/* + * Clock event features + */ +# define CLOCK_EVT_FEAT_PERIODIC 0x000001 +# define CLOCK_EVT_FEAT_ONESHOT 0x000002 +# define CLOCK_EVT_FEAT_KTIME 0x000004 + +/* + * x86(64) specific (mis)features: + * + * - Clockevent source stops in C3 State and needs broadcast support. + * - Local APIC timer is used as a dummy device. + */ +# define CLOCK_EVT_FEAT_C3STOP 0x000008 +# define CLOCK_EVT_FEAT_DUMMY 0x000010 + +/* + * Core shall set the interrupt affinity dynamically in broadcast mode + */ +# define CLOCK_EVT_FEAT_DYNIRQ 0x000020 +# define CLOCK_EVT_FEAT_PERCPU 0x000040 + +/* + * Clockevent device is based on a hrtimer for broadcast + */ +# define CLOCK_EVT_FEAT_HRTIMER 0x000080 + +/** + * struct clock_event_device - clock event device descriptor + * @event_handler: Assigned by the framework to be called by the low + * level handler of the event source + * @set_next_event: set next event function using a clocksource delta + * @set_next_ktime: set next event function using a direct ktime value + * @next_event: local storage for the next event in oneshot mode + * @max_delta_ns: maximum delta value in ns + * @min_delta_ns: minimum delta value in ns + * @mult: nanosecond to cycles multiplier + * @shift: nanoseconds to cycles divisor (power of two) + * @state_use_accessors:current state of the device, assigned by the core code + * @features: features + * @retries: number of forced programming retries + * @set_state_periodic: switch state to periodic + * @set_state_oneshot: switch state to oneshot + * @set_state_oneshot_stopped: switch state to oneshot_stopped + * @set_state_shutdown: switch state to shutdown + * @tick_resume: resume clkevt device + * @broadcast: function to broadcast events + * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration + * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration + * @name: ptr to clock event name + * @rating: variable to rate clock event devices + * @irq: IRQ number (only for non CPU local devices) + * @bound_on: Bound on CPU + * @cpumask: cpumask to indicate for which CPUs this device works + * @list: list head for the management code + * @owner: module reference + */ +struct clock_event_device { + void (*event_handler)(struct clock_event_device *); + int (*set_next_event)(unsigned long evt, struct clock_event_device *); + int (*set_next_ktime)(ktime_t expires, struct clock_event_device *); + ktime_t next_event; + u64 max_delta_ns; + u64 min_delta_ns; + u32 mult; + u32 shift; + enum clock_event_state state_use_accessors; + unsigned int features; + unsigned long retries; + + int (*set_state_periodic)(struct clock_event_device *); + int (*set_state_oneshot)(struct clock_event_device *); + int (*set_state_oneshot_stopped)(struct clock_event_device *); + int (*set_state_shutdown)(struct clock_event_device *); + int (*tick_resume)(struct clock_event_device *); + + void (*broadcast)(const struct cpumask *mask); + void (*suspend)(struct clock_event_device *); + void (*resume)(struct clock_event_device *); + unsigned long min_delta_ticks; + unsigned long max_delta_ticks; + + const char *name; + int rating; + int irq; + int bound_on; + const struct cpumask *cpumask; + struct list_head list; + struct module *owner; +} ____cacheline_aligned; + +/* Helpers to verify state of a clockevent device */ +static inline bool clockevent_state_detached(struct clock_event_device *dev) +{ + return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED; +} + +static inline bool clockevent_state_shutdown(struct clock_event_device *dev) +{ + return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN; +} + +static inline bool clockevent_state_periodic(struct clock_event_device *dev) +{ + return dev->state_use_accessors == CLOCK_EVT_STATE_PERIODIC; +} + +static inline bool clockevent_state_oneshot(struct clock_event_device *dev) +{ + return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT; +} + +static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev) +{ + return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED; +} + +/* + * Calculate a multiplication factor for scaled math, which is used to convert + * nanoseconds based values to clock ticks: + * + * clock_ticks = (nanoseconds * factor) >> shift. + * + * div_sc is the rearranged equation to calculate a factor from a given clock + * ticks / nanoseconds ratio: + * + * factor = (clock_ticks << shift) / nanoseconds + */ +static inline unsigned long +div_sc(unsigned long ticks, unsigned long nsec, int shift) +{ + u64 tmp = ((u64)ticks) << shift; + + do_div(tmp, nsec); + + return (unsigned long) tmp; +} + +/* Clock event layer functions */ +extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt); +extern void clockevents_register_device(struct clock_event_device *dev); +extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu); + +extern void clockevents_config_and_register(struct clock_event_device *dev, + u32 freq, unsigned long min_delta, + unsigned long max_delta); + +extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); + +static inline void +clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 maxsec) +{ + return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, maxsec); +} + +extern void clockevents_suspend(void); +extern void clockevents_resume(void); + +# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +# ifdef CONFIG_ARCH_HAS_TICK_BROADCAST +extern void tick_broadcast(const struct cpumask *mask); +# else +# define tick_broadcast NULL +# endif +extern int tick_receive_broadcast(void); +# endif + +# if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) +extern void tick_setup_hrtimer_broadcast(void); +extern int tick_check_broadcast_expired(void); +# else +static inline int tick_check_broadcast_expired(void) { return 0; } +static inline void tick_setup_hrtimer_broadcast(void) { } +# endif + +#else /* !CONFIG_GENERIC_CLOCKEVENTS: */ + +static inline void clockevents_suspend(void) { } +static inline void clockevents_resume(void) { } +static inline int tick_check_broadcast_expired(void) { return 0; } +static inline void tick_setup_hrtimer_broadcast(void) { } + +#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ + +#endif /* _LINUX_CLOCKCHIPS_H */ diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h new file mode 100644 index 0000000..89d35f5 --- /dev/null +++ b/include/linux/clocksource.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* linux/include/linux/clocksource.h + * + * This file contains the structure definitions for clocksources. + * + * If you are not a clocksource, or timekeeping code, you should + * not be including this file! + */ +#ifndef _LINUX_CLOCKSOURCE_H +#define _LINUX_CLOCKSOURCE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct clocksource; +struct module; + +#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA +#include +#endif + +#ifdef CONFIG_MCST +extern void override_clocksource(const char *buf, size_t count); +#endif + +/** + * struct clocksource - hardware abstraction for a free running counter + * Provides mostly state-free accessors to the underlying hardware. + * This is the structure used for system time. + * + * @name: ptr to clocksource name + * @list: list head for registration + * @rating: rating value for selection (higher is better) + * To avoid rating inflation the following + * list should give you a guide as to how + * to assign your clocksource a rating + * 1-99: Unfit for real use + * Only available for bootup and testing purposes. + * 100-199: Base level usability. + * Functional for real use, but not desired. + * 200-299: Good. + * A correct and usable clocksource. + * 300-399: Desired. + * A reasonably fast and accurate clocksource. + * 400-499: Perfect + * The ideal clocksource. A must-use where + * available. + * @read: returns a cycle value, passes clocksource as argument + * @enable: optional function to enable the clocksource + * @disable: optional function to disable the clocksource + * @mask: bitmask for two's complement + * subtraction of non 64 bit counters + * @mult: cycle to nanosecond multiplier + * @shift: cycle to nanosecond divisor (power of two) + * @max_idle_ns: max idle time permitted by the clocksource (nsecs) + * @maxadj: maximum adjustment value to mult (~11%) + * @max_cycles: maximum safe cycle value which won't overflow on multiplication + * @flags: flags describing special properties + * @archdata: arch-specific data + * @suspend: suspend function for the clocksource, if necessary + * @resume: resume function for the clocksource, if necessary + * @mark_unstable: Optional function to inform the clocksource driver that + * the watchdog marked the clocksource unstable + * @owner: module reference, must be set by clocksource in modules + * + * Note: This struct is not used in hotpathes of the timekeeping code + * because the timekeeper caches the hot path fields in its own data + * structure, so no line cache alignment is required, + * + * The pointer to the clocksource itself is handed to the read + * callback. If you need extra information there you can wrap struct + * clocksource into your own struct. Depending on the amount of + * information you need you should consider to cache line align that + * structure. + */ +struct clocksource { + u64 (*read)(struct clocksource *cs); + u64 mask; + u32 mult; + u32 shift; + u64 max_idle_ns; + u32 maxadj; +#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA + struct arch_clocksource_data archdata; +#endif + u64 max_cycles; + const char *name; + struct list_head list; + int rating; + int (*enable)(struct clocksource *cs); + void (*disable)(struct clocksource *cs); + unsigned long flags; + void (*suspend)(struct clocksource *cs); + void (*resume)(struct clocksource *cs); + void (*mark_unstable)(struct clocksource *cs); + void (*tick_stable)(struct clocksource *cs); + + /* private: */ +#ifdef CONFIG_CLOCKSOURCE_WATCHDOG + /* Watchdog related data, used by the framework */ + struct list_head wd_list; + u64 cs_last; + u64 wd_last; +#endif + struct module *owner; +}; + +/* + * Clock source flags bits:: + */ +#define CLOCK_SOURCE_IS_CONTINUOUS 0x01 +#define CLOCK_SOURCE_MUST_VERIFY 0x02 + +#define CLOCK_SOURCE_WATCHDOG 0x10 +#define CLOCK_SOURCE_VALID_FOR_HRES 0x20 +#define CLOCK_SOURCE_UNSTABLE 0x40 +#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80 +#define CLOCK_SOURCE_RESELECT 0x100 + +/* simplify initialization of mask field */ +#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0) + +static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) +{ + /* freq = cyc/from + * mult/2^shift = ns/cyc + * mult = ns/cyc * 2^shift + * mult = from/freq * 2^shift + * mult = from * 2^shift / freq + * mult = (from<> shift; +} + + +extern int clocksource_unregister(struct clocksource*); +extern void clocksource_touch_watchdog(void); +extern void clocksource_change_rating(struct clocksource *cs, int rating); +extern void clocksource_suspend(void); +extern void clocksource_resume(void); +extern struct clocksource * __init clocksource_default_clock(void); +extern void clocksource_mark_unstable(struct clocksource *cs); +extern void +clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles); +extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now); + +extern u64 +clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles); +extern void +clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); + +/* + * Don't call __clocksource_register_scale directly, use + * clocksource_register_hz/khz + */ +extern int +__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); +extern void +__clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq); + +/* + * Don't call this unless you are a default clocksource + * (AKA: jiffies) and absolutely have to. + */ +static inline int __clocksource_register(struct clocksource *cs) +{ + return __clocksource_register_scale(cs, 1, 0); +} + +static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) +{ + return __clocksource_register_scale(cs, 1, hz); +} + +static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) +{ + return __clocksource_register_scale(cs, 1000, khz); +} + +static inline void __clocksource_update_freq_hz(struct clocksource *cs, u32 hz) +{ + __clocksource_update_freq_scale(cs, 1, hz); +} + +static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz) +{ + __clocksource_update_freq_scale(cs, 1000, khz); +} + +#ifdef CONFIG_ARCH_CLOCKSOURCE_INIT +extern void clocksource_arch_init(struct clocksource *cs); +#else +static inline void clocksource_arch_init(struct clocksource *cs) { } +#endif + +extern int timekeeping_notify(struct clocksource *clock); + +extern u64 clocksource_mmio_readl_up(struct clocksource *); +extern u64 clocksource_mmio_readl_down(struct clocksource *); +extern u64 clocksource_mmio_readw_up(struct clocksource *); +extern u64 clocksource_mmio_readw_down(struct clocksource *); + +extern int clocksource_mmio_init(void __iomem *, const char *, + unsigned long, int, unsigned, u64 (*)(struct clocksource *)); + +extern int clocksource_i8253_init(void); + +#define TIMER_OF_DECLARE(name, compat, fn) \ + OF_DECLARE_1_RET(timer, name, compat, fn) + +#ifdef CONFIG_TIMER_PROBE +extern void timer_probe(void); +#else +static inline void timer_probe(void) {} +#endif + +#define TIMER_ACPI_DECLARE(name, table_id, fn) \ + ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn) + +#endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h new file mode 100644 index 0000000..ea4958e --- /dev/null +++ b/include/linux/cm4000_cs.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CM4000_H_ +#define _CM4000_H_ + +#include + + +#define DEVICE_NAME "cmm" +#define MODULE_NAME "cm4000_cs" + +#endif /* _CM4000_H_ */ diff --git a/include/linux/cma.h b/include/linux/cma.h new file mode 100644 index 0000000..190184b --- /dev/null +++ b/include/linux/cma.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CMA_H__ +#define __CMA_H__ + +#include +#include + +/* + * There is always at least global CMA area and a few optional + * areas configured in kernel .config. + */ +#ifdef CONFIG_CMA_AREAS +#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS) + +#else +#define MAX_CMA_AREAS (0) + +#endif + +struct cma; + +extern unsigned long totalcma_pages; +extern phys_addr_t cma_get_base(const struct cma *cma); +extern unsigned long cma_get_size(const struct cma *cma); +extern const char *cma_get_name(const struct cma *cma); + +extern int __init cma_declare_contiguous(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + bool fixed, const char *name, struct cma **res_cma); +extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, + unsigned int order_per_bit, + const char *name, + struct cma **res_cma); +extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, + bool no_warn); +extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); + +extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); +#endif diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h new file mode 100644 index 0000000..68a5418 --- /dev/null +++ b/include/linux/cmdline-parser.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Parsing command line, get the partitions information. + * + * Written by Cai Zhiyong + * + */ +#ifndef CMDLINEPARSEH +#define CMDLINEPARSEH + +#include +#include +#include + +/* partition flags */ +#define PF_RDONLY 0x01 /* Device is read only */ +#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */ + +struct cmdline_subpart { + char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */ + sector_t from; + sector_t size; + int flags; + struct cmdline_subpart *next_subpart; +}; + +struct cmdline_parts { + char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */ + unsigned int nr_subparts; + struct cmdline_subpart *subpart; + struct cmdline_parts *next_parts; +}; + +void cmdline_parts_free(struct cmdline_parts **parts); + +int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline); + +struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, + const char *bdev); + +int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, + int slot, + int (*add_part)(int, struct cmdline_subpart *, void *), + void *param); + +#endif /* CMDLINEPARSEH */ diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h new file mode 100644 index 0000000..1d5b02a --- /dev/null +++ b/include/linux/cn_proc.h @@ -0,0 +1,58 @@ +/* + * cn_proc.h - process events connector + * + * Copyright (C) Matt Helsley, IBM Corp. 2005 + * Based on cn_fork.h by Nguyen Anh Quynh and Guillaume Thouvenin + * Copyright (C) 2005 Nguyen Anh Quynh + * Copyright (C) 2005 Guillaume Thouvenin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + */ +#ifndef CN_PROC_H +#define CN_PROC_H + +#include + +#ifdef CONFIG_PROC_EVENTS +void proc_fork_connector(struct task_struct *task); +void proc_exec_connector(struct task_struct *task); +void proc_id_connector(struct task_struct *task, int which_id); +void proc_sid_connector(struct task_struct *task); +void proc_ptrace_connector(struct task_struct *task, int which_id); +void proc_comm_connector(struct task_struct *task); +void proc_coredump_connector(struct task_struct *task); +void proc_exit_connector(struct task_struct *task); +#else +static inline void proc_fork_connector(struct task_struct *task) +{} + +static inline void proc_exec_connector(struct task_struct *task) +{} + +static inline void proc_id_connector(struct task_struct *task, + int which_id) +{} + +static inline void proc_sid_connector(struct task_struct *task) +{} + +static inline void proc_comm_connector(struct task_struct *task) +{} + +static inline void proc_ptrace_connector(struct task_struct *task, + int ptrace_id) +{} + +static inline void proc_coredump_connector(struct task_struct *task) +{} + +static inline void proc_exit_connector(struct task_struct *task) +{} +#endif /* CONFIG_PROC_EVENTS */ +#endif /* CN_PROC_H */ diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h new file mode 100644 index 0000000..0644284 --- /dev/null +++ b/include/linux/cnt32_to_63.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Extend a 32-bit counter to 63 bits + * + * Author: Nicolas Pitre + * Created: December 3, 2006 + * Copyright: MontaVista Software, Inc. + */ + +#ifndef __LINUX_CNT32_TO_63_H__ +#define __LINUX_CNT32_TO_63_H__ + +#include +#include +#include + +/* this is used only to give gcc a clue about good code generation */ +union cnt32_to_63 { + struct { +#if defined(__LITTLE_ENDIAN) + u32 lo, hi; +#elif defined(__BIG_ENDIAN) + u32 hi, lo; +#endif + }; + u64 val; +}; + + +/** + * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter + * @cnt_lo: The low part of the counter + * + * Many hardware clock counters are only 32 bits wide and therefore have + * a relatively short period making wrap-arounds rather frequent. This + * is a problem when implementing sched_clock() for example, where a 64-bit + * non-wrapping monotonic value is expected to be returned. + * + * To overcome that limitation, let's extend a 32-bit counter to 63 bits + * in a completely lock free fashion. Bits 0 to 31 of the clock are provided + * by the hardware while bits 32 to 62 are stored in memory. The top bit in + * memory is used to synchronize with the hardware clock half-period. When + * the top bit of both counters (hardware and in memory) differ then the + * memory is updated with a new value, incrementing it when the hardware + * counter wraps around. + * + * Because a word store in memory is atomic then the incremented value will + * always be in synch with the top bit indicating to any potential concurrent + * reader if the value in memory is up to date or not with regards to the + * needed increment. And any race in updating the value in memory is harmless + * as the same value would simply be stored more than once. + * + * The restrictions for the algorithm to work properly are: + * + * 1) this code must be called at least once per each half period of the + * 32-bit counter; + * + * 2) this code must not be preempted for a duration longer than the + * 32-bit counter half period minus the longest period between two + * calls to this code; + * + * Those requirements ensure proper update to the state bit in memory. + * This is usually not a problem in practice, but if it is then a kernel + * timer should be scheduled to manage for this code to be executed often + * enough. + * + * And finally: + * + * 3) the cnt_lo argument must be seen as a globally incrementing value, + * meaning that it should be a direct reference to the counter data which + * can be evaluated according to a specific ordering within the macro, + * and not the result of a previous evaluation stored in a variable. + * + * For example, this is wrong: + * + * u32 partial = get_hw_count(); + * u64 full = cnt32_to_63(partial); + * return full; + * + * This is fine: + * + * u64 full = cnt32_to_63(get_hw_count()); + * return full; + * + * Note that the top bit (bit 63) in the returned value should be considered + * as garbage. It is not cleared here because callers are likely to use a + * multiplier on the returned value which can get rid of the top bit + * implicitly by making the multiplier even, therefore saving on a runtime + * clear-bit instruction. Otherwise caller must remember to clear the top + * bit explicitly. + */ +#define cnt32_to_63(cnt_lo) \ +({ \ + static u32 __m_cnt_hi; \ + union cnt32_to_63 __x; \ + __x.hi = __m_cnt_hi; \ + smp_rmb(); \ + __x.lo = (cnt_lo); \ + if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ + __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ + __x.val; \ +}) + +#endif diff --git a/include/linux/coda.h b/include/linux/coda.h new file mode 100644 index 0000000..0ca0c83 --- /dev/null +++ b/include/linux/coda.h @@ -0,0 +1,64 @@ +/* + You may distribute this file under either of the two licenses that + follow at your discretion. +*/ + +/* BLURB lgpl + + Coda File System + Release 5 + + Copyright (c) 1987-1999 Carnegie Mellon University + Additional copyrights listed below + +This code is distributed "AS IS" without warranty of any kind under +the terms of the GNU Library General Public Licence Version 2, as +shown in the file LICENSE, or under the license shown below. The +technical and financial contributors to Coda are listed in the file +CREDITS. + + Additional copyrights +*/ + +/* + + Coda: an Experimental Distributed File System + Release 4.0 + + Copyright (c) 1987-1999 Carnegie Mellon University + All Rights Reserved + +Permission to use, copy, modify and distribute this software and its +documentation is hereby granted, provided that both the copyright +notice and this permission notice appear in all copies of the +software, derivative works or modified versions, and any portions +thereof, and that both notices appear in supporting documentation, and +that credit is given to Carnegie Mellon University in all documents +and publicity pertaining to direct or indirect use of this code or its +derivatives. + +CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, +SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS +FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON +DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER +RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF +ANY DERIVATIVE WORK. + +Carnegie Mellon encourages users of this software to return any +improvements or extensions that they make, and to grant Carnegie +Mellon the rights to redistribute these changes without encumbrance. +*/ + +/* + * + * Based on cfs.h from Mach, but revamped for increased simplicity. + * Linux modifications by + * Peter Braam, Aug 1996 + */ +#ifndef _CODA_HEADER_ +#define _CODA_HEADER_ + +typedef unsigned long long u_quad_t; + +#include +#endif diff --git a/include/linux/compaction.h b/include/linux/compaction.h new file mode 100644 index 0000000..4b898cd --- /dev/null +++ b/include/linux/compaction.h @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_COMPACTION_H +#define _LINUX_COMPACTION_H + +/* + * Determines how hard direct compaction should try to succeed. + * Lower value means higher priority, analogically to reclaim priority. + */ +enum compact_priority { + COMPACT_PRIO_SYNC_FULL, + MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL, + COMPACT_PRIO_SYNC_LIGHT, + MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, + DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, + COMPACT_PRIO_ASYNC, + INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC +}; + +/* Return values for compact_zone() and try_to_compact_pages() */ +/* When adding new states, please adjust include/trace/events/compaction.h */ +enum compact_result { + /* For more detailed tracepoint output - internal to compaction */ + COMPACT_NOT_SUITABLE_ZONE, + /* + * compaction didn't start as it was not possible or direct reclaim + * was more suitable + */ + COMPACT_SKIPPED, + /* compaction didn't start as it was deferred due to past failures */ + COMPACT_DEFERRED, + + /* compaction not active last round */ + COMPACT_INACTIVE = COMPACT_DEFERRED, + + /* For more detailed tracepoint output - internal to compaction */ + COMPACT_NO_SUITABLE_PAGE, + /* compaction should continue to another pageblock */ + COMPACT_CONTINUE, + + /* + * The full zone was compacted scanned but wasn't successfull to compact + * suitable pages. + */ + COMPACT_COMPLETE, + /* + * direct compaction has scanned part of the zone but wasn't successfull + * to compact suitable pages. + */ + COMPACT_PARTIAL_SKIPPED, + + /* compaction terminated prematurely due to lock contentions */ + COMPACT_CONTENDED, + + /* + * direct compaction terminated after concluding that the allocation + * should now succeed + */ + COMPACT_SUCCESS, +}; + +struct alloc_context; /* in mm/internal.h */ + +/* + * Number of free order-0 pages that should be available above given watermark + * to make sure compaction has reasonable chance of not running out of free + * pages that it needs to isolate as migration target during its work. + */ +static inline unsigned long compact_gap(unsigned int order) +{ + /* + * Although all the isolations for migration are temporary, compaction + * free scanner may have up to 1 << order pages on its list and then + * try to split an (order - 1) free page. At that point, a gap of + * 1 << order might not be enough, so it's safer to require twice that + * amount. Note that the number of pages on the list is also + * effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum + * that the migrate scanner can have isolated on migrate list, and free + * scanner is only invoked when the number of isolated free pages is + * lower than that. But it's not worth to complicate the formula here + * as a bigger gap for higher orders than strictly necessary can also + * improve chances of compaction success. + */ + return 2UL << order; +} + +#ifdef CONFIG_COMPACTION +extern int sysctl_compact_memory; +extern int sysctl_compaction_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos); +extern int sysctl_extfrag_threshold; +extern int sysctl_compact_unevictable_allowed; + +extern int fragmentation_index(struct zone *zone, unsigned int order); +extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, + unsigned int order, unsigned int alloc_flags, + const struct alloc_context *ac, enum compact_priority prio, + struct page **page); +extern void reset_isolation_suitable(pg_data_t *pgdat); +extern enum compact_result compaction_suitable(struct zone *zone, int order, + unsigned int alloc_flags, int classzone_idx); + +extern void defer_compaction(struct zone *zone, int order); +extern bool compaction_deferred(struct zone *zone, int order); +extern void compaction_defer_reset(struct zone *zone, int order, + bool alloc_success); +extern bool compaction_restarting(struct zone *zone, int order); + +/* Compaction has made some progress and retrying makes sense */ +static inline bool compaction_made_progress(enum compact_result result) +{ + /* + * Even though this might sound confusing this in fact tells us + * that the compaction successfully isolated and migrated some + * pageblocks. + */ + if (result == COMPACT_SUCCESS) + return true; + + return false; +} + +/* Compaction has failed and it doesn't make much sense to keep retrying. */ +static inline bool compaction_failed(enum compact_result result) +{ + /* All zones were scanned completely and still not result. */ + if (result == COMPACT_COMPLETE) + return true; + + return false; +} + +/* Compaction needs reclaim to be performed first, so it can continue. */ +static inline bool compaction_needs_reclaim(enum compact_result result) +{ + /* + * Compaction backed off due to watermark checks for order-0 + * so the regular reclaim has to try harder and reclaim something. + */ + if (result == COMPACT_SKIPPED) + return true; + + return false; +} + +/* + * Compaction has backed off for some reason after doing some work or none + * at all. It might be throttling or lock contention. Retrying might be still + * worthwhile, but with a higher priority if allowed. + */ +static inline bool compaction_withdrawn(enum compact_result result) +{ + /* + * If compaction is deferred for high-order allocations, it is + * because sync compaction recently failed. If this is the case + * and the caller requested a THP allocation, we do not want + * to heavily disrupt the system, so we fail the allocation + * instead of entering direct reclaim. + */ + if (result == COMPACT_DEFERRED) + return true; + + /* + * If compaction in async mode encounters contention or blocks higher + * priority task we back off early rather than cause stalls. + */ + if (result == COMPACT_CONTENDED) + return true; + + /* + * Page scanners have met but we haven't scanned full zones so this + * is a back off in fact. + */ + if (result == COMPACT_PARTIAL_SKIPPED) + return true; + + return false; +} + + +bool compaction_zonelist_suitable(struct alloc_context *ac, int order, + int alloc_flags); + +extern int kcompactd_run(int nid); +extern void kcompactd_stop(int nid); +extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); + +#else +static inline void reset_isolation_suitable(pg_data_t *pgdat) +{ +} + +static inline enum compact_result compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx) +{ + return COMPACT_SKIPPED; +} + +static inline void defer_compaction(struct zone *zone, int order) +{ +} + +static inline bool compaction_deferred(struct zone *zone, int order) +{ + return true; +} + +static inline bool compaction_made_progress(enum compact_result result) +{ + return false; +} + +static inline bool compaction_failed(enum compact_result result) +{ + return false; +} + +static inline bool compaction_needs_reclaim(enum compact_result result) +{ + return false; +} + +static inline bool compaction_withdrawn(enum compact_result result) +{ + return true; +} + +static inline int kcompactd_run(int nid) +{ + return 0; +} +static inline void kcompactd_stop(int nid) +{ +} + +static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) +{ +} + +#endif /* CONFIG_COMPACTION */ + +struct node; +#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) +extern int compaction_register_node(struct node *node); +extern void compaction_unregister_node(struct node *node); + +#else + +static inline int compaction_register_node(struct node *node) +{ + return 0; +} + +static inline void compaction_unregister_node(struct node *node) +{ +} +#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ + +#endif /* _LINUX_COMPACTION_H */ diff --git a/include/linux/compat.h b/include/linux/compat.h new file mode 100644 index 0000000..c4c389c --- /dev/null +++ b/include/linux/compat.h @@ -0,0 +1,968 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_COMPAT_H +#define _LINUX_COMPAT_H +/* + * These are the type definitions for the architecture specific + * syscall compatibility layer. + */ + +#include +#include + +#include +#include /* for HZ */ +#include +#include +#include +#include +#include /* for aio_context_t */ +#include +#include + +#include + +#ifdef CONFIG_COMPAT +#include +#include +#endif + +#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +/* + * It may be useful for an architecture to override the definitions of the + * COMPAT_SYSCALL_DEFINE0 and COMPAT_SYSCALL_DEFINEx() macros, in particular + * to use a different calling convention for syscalls. To allow for that, + + the prototypes for the compat_sys_*() functions below will *not* be included + * if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. + */ +#include +#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ + +#ifndef COMPAT_USE_64BIT_TIME +#define COMPAT_USE_64BIT_TIME 0 +#endif + +#ifndef __SC_DELOUSE +#define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v)) +#endif + +#ifndef COMPAT_SYSCALL_DEFINE0 +#define COMPAT_SYSCALL_DEFINE0(name) \ + asmlinkage long compat_sys_##name(void); \ + ALLOW_ERROR_INJECTION(compat_sys_##name, ERRNO); \ + asmlinkage long compat_sys_##name(void) +#endif /* COMPAT_SYSCALL_DEFINE0 */ + +#define COMPAT_SYSCALL_DEFINE1(name, ...) \ + COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE2(name, ...) \ + COMPAT_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE3(name, ...) \ + COMPAT_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE4(name, ...) \ + COMPAT_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE5(name, ...) \ + COMPAT_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE6(name, ...) \ + COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) + +/* + * The asmlinkage stub is aliased to a function named __se_compat_sys_*() which + * sign-extends 32-bit ints to longs whenever needed. The actual work is + * done within __do_compat_sys_*(). + */ +#ifndef COMPAT_SYSCALL_DEFINEx +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + __diag_push(); \ + __diag_ignore(GCC, 8, "-Wattribute-alias", \ + "Type aliasing is used to sanitize syscall arguments");\ + asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ + __attribute__((alias(__stringify(__se_compat_sys##name)))); \ + ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ + asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + return ret; \ + } \ + __diag_pop(); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) +#endif /* COMPAT_SYSCALL_DEFINEx */ + +#ifdef CONFIG_COMPAT + +#ifndef compat_user_stack_pointer +#define compat_user_stack_pointer() current_user_stack_pointer() +#endif +#ifndef compat_sigaltstack /* we'll need that for MIPS */ +typedef struct compat_sigaltstack { + compat_uptr_t ss_sp; + int ss_flags; + compat_size_t ss_size; +} compat_stack_t; +#endif +#ifndef COMPAT_MINSIGSTKSZ +#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ +#endif + +#define compat_jiffies_to_clock_t(x) \ + (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) + +typedef __compat_uid32_t compat_uid_t; +typedef __compat_gid32_t compat_gid_t; + +struct compat_sel_arg_struct; +struct rusage; + +struct compat_itimerval { + struct old_timeval32 it_interval; + struct old_timeval32 it_value; +}; + +struct itimerval; +int get_compat_itimerval(struct itimerval *, const struct compat_itimerval __user *); +int put_compat_itimerval(struct compat_itimerval __user *, const struct itimerval *); + +struct compat_tms { + compat_clock_t tms_utime; + compat_clock_t tms_stime; + compat_clock_t tms_cutime; + compat_clock_t tms_cstime; +}; + +#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) + +typedef struct { + compat_sigset_word sig[_COMPAT_NSIG_WORDS]; +} compat_sigset_t; + +int set_compat_user_sigmask(const compat_sigset_t __user *umask, + size_t sigsetsize); + +struct compat_sigaction { +#ifndef __ARCH_HAS_IRIX_SIGACTION + compat_uptr_t sa_handler; + compat_ulong_t sa_flags; +#else + compat_uint_t sa_flags; + compat_uptr_t sa_handler; +#endif +#ifdef __ARCH_HAS_SA_RESTORER + compat_uptr_t sa_restorer; +#endif + compat_sigset_t sa_mask __packed; +}; + +typedef union compat_sigval { + compat_int_t sival_int; + compat_uptr_t sival_ptr; +} compat_sigval_t; + +typedef struct compat_siginfo { + int si_signo; +#ifndef __ARCH_HAS_SWAPPED_SIGINFO + int si_errno; + int si_code; +#else + int si_code; + int si_errno; +#endif + + union { + int _pad[128/sizeof(int) - 3]; + + /* kill() */ + struct { + compat_pid_t _pid; /* sender's pid */ + __compat_uid32_t _uid; /* sender's uid */ + } _kill; + + /* POSIX.1b timers */ + struct { + compat_timer_t _tid; /* timer id */ + int _overrun; /* overrun count */ + compat_sigval_t _sigval; /* same as below */ + } _timer; + + /* POSIX.1b signals */ + struct { + compat_pid_t _pid; /* sender's pid */ + __compat_uid32_t _uid; /* sender's uid */ + compat_sigval_t _sigval; + } _rt; + + /* SIGCHLD */ + struct { + compat_pid_t _pid; /* which child */ + __compat_uid32_t _uid; /* sender's uid */ + int _status; /* exit code */ + compat_clock_t _utime; + compat_clock_t _stime; + } _sigchld; + +#ifdef CONFIG_X86_X32_ABI + /* SIGCHLD (x32 version) */ + struct { + compat_pid_t _pid; /* which child */ + __compat_uid32_t _uid; /* sender's uid */ + int _status; /* exit code */ + compat_s64 _utime; + compat_s64 _stime; + } _sigchld_x32; +#endif + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ + struct { + compat_uptr_t _addr; /* faulting insn/memory ref. */ +#ifdef __ARCH_SI_TRAPNO + int _trapno; /* TRAP # which caused the signal */ +#endif +#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \ + sizeof(short) : __alignof__(compat_uptr_t)) + union { + /* + * used when si_code=BUS_MCEERR_AR or + * used when si_code=BUS_MCEERR_AO + */ + short int _addr_lsb; /* Valid LSB of the reported address. */ + /* used when si_code=SEGV_BNDERR */ + struct { + char _dummy_bnd[__COMPAT_ADDR_BND_PKEY_PAD]; + compat_uptr_t _lower; + compat_uptr_t _upper; + } _addr_bnd; + /* used when si_code=SEGV_PKUERR */ + struct { + char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD]; + u32 _pkey; + } _addr_pkey; + }; + } _sigfault; + + /* SIGPOLL */ + struct { + compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + int _fd; + } _sigpoll; + + struct { + compat_uptr_t _call_addr; /* calling user insn */ + int _syscall; /* triggering system call number */ + unsigned int _arch; /* AUDIT_ARCH_* of syscall */ + } _sigsys; + } _sifields; +} compat_siginfo_t; + +/* + * These functions operate on 32- or 64-bit specs depending on + * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments. + */ +extern int compat_get_timespec(struct timespec *, const void __user *); +extern int compat_put_timespec(const struct timespec *, void __user *); +extern int compat_get_timeval(struct timeval *, const void __user *); +extern int compat_put_timeval(const struct timeval *, void __user *); + +struct compat_iovec { + compat_uptr_t iov_base; + compat_size_t iov_len; +}; + +struct compat_rlimit { + compat_ulong_t rlim_cur; + compat_ulong_t rlim_max; +}; + +struct compat_rusage { + struct old_timeval32 ru_utime; + struct old_timeval32 ru_stime; + compat_long_t ru_maxrss; + compat_long_t ru_ixrss; + compat_long_t ru_idrss; + compat_long_t ru_isrss; + compat_long_t ru_minflt; + compat_long_t ru_majflt; + compat_long_t ru_nswap; + compat_long_t ru_inblock; + compat_long_t ru_oublock; + compat_long_t ru_msgsnd; + compat_long_t ru_msgrcv; + compat_long_t ru_nsignals; + compat_long_t ru_nvcsw; + compat_long_t ru_nivcsw; +}; + +extern int put_compat_rusage(const struct rusage *, + struct compat_rusage __user *); + +struct compat_siginfo; +struct __compat_aio_sigset; + +struct compat_dirent { + u32 d_ino; + compat_off_t d_off; + u16 d_reclen; + char d_name[256]; +}; + +struct compat_ustat { + compat_daddr_t f_tfree; + compat_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +#define COMPAT_SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3) + +typedef struct compat_sigevent { + compat_sigval_t sigev_value; + compat_int_t sigev_signo; + compat_int_t sigev_notify; + union { + compat_int_t _pad[COMPAT_SIGEV_PAD_SIZE]; + compat_int_t _tid; + + struct { + compat_uptr_t _function; + compat_uptr_t _attribute; + } _sigev_thread; + } _sigev_un; +} compat_sigevent_t; + +struct compat_ifmap { + compat_ulong_t mem_start; + compat_ulong_t mem_end; + unsigned short base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct compat_if_settings { + unsigned int type; /* Type of physical device or protocol */ + unsigned int size; /* Size of the data allocated by the caller */ + compat_uptr_t ifs_ifsu; /* union of pointers */ +}; + +struct compat_ifreq { + union { + char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short ifru_flags; + compat_int_t ifru_ivalue; + compat_int_t ifru_mtu; + struct compat_ifmap ifru_map; + char ifru_slave[IFNAMSIZ]; /* Just fits the size */ + char ifru_newname[IFNAMSIZ]; + compat_caddr_t ifru_data; + struct compat_if_settings ifru_settings; + } ifr_ifru; +}; + +struct compat_ifconf { + compat_int_t ifc_len; /* size of buffer */ + compat_caddr_t ifcbuf; +}; + +struct compat_robust_list { + compat_uptr_t next; +}; + +struct compat_robust_list_head { + struct compat_robust_list list; + compat_long_t futex_offset; + compat_uptr_t list_op_pending; +}; + +#ifdef CONFIG_COMPAT_OLD_SIGACTION +struct compat_old_sigaction { + compat_uptr_t sa_handler; + compat_old_sigset_t sa_mask; + compat_ulong_t sa_flags; + compat_uptr_t sa_restorer; +}; +#endif + +struct compat_keyctl_kdf_params { + compat_uptr_t hashname; + compat_uptr_t otherinfo; + __u32 otherinfolen; + __u32 __spare[8]; +}; + +struct compat_statfs; +struct compat_statfs64; +struct compat_old_linux_dirent; +struct compat_linux_dirent; +struct linux_dirent64; +struct compat_msghdr; +struct compat_mmsghdr; +struct compat_sysinfo; +struct compat_sysctl_args; +struct compat_kexec_segment; +struct compat_mq_attr; +struct compat_msgbuf; + +#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) + +#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG) + +long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, + unsigned long bitmap_size); +long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, + unsigned long bitmap_size); +int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from); +int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from); +int get_compat_sigevent(struct sigevent *event, + const struct compat_sigevent __user *u_event); + +static inline int old_timeval32_compare(struct old_timeval32 *lhs, + struct old_timeval32 *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_usec - rhs->tv_usec; +} + +static inline int old_timespec32_compare(struct old_timespec32 *lhs, + struct old_timespec32 *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_nsec - rhs->tv_nsec; +} + +extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat); + +/* + * Defined inline such that size can be compile time constant, which avoids + * CONFIG_HARDENED_USERCOPY complaining about copies from task_struct + */ +static inline int +put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, + unsigned int size) +{ + /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */ +#ifdef __BIG_ENDIAN + compat_sigset_t v; + switch (_NSIG_WORDS) { + case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; + /* fall through */ + case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; + /* fall through */ + case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; + /* fall through */ + case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; + } + return copy_to_user(compat, &v, size) ? -EFAULT : 0; +#else + return copy_to_user(compat, set, size) ? -EFAULT : 0; +#endif +} + +extern int compat_ptrace_request(struct task_struct *child, + compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); + +extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); + +struct epoll_event; /* fortunately, this one is fixed-layout */ + +extern ssize_t compat_rw_copy_check_uvector(int type, + const struct compat_iovec __user *uvector, + unsigned long nr_segs, + unsigned long fast_segs, struct iovec *fast_pointer, + struct iovec **ret_pointer); + +extern void __user *compat_alloc_user_space(unsigned long len); + +int compat_restore_altstack(const compat_stack_t __user *uss); +int __compat_save_altstack(compat_stack_t __user *, unsigned long); +#define compat_save_altstack_ex(uss, sp) do { \ + compat_stack_t __user *__uss = uss; \ + struct task_struct *t = current; \ + put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ + put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ + put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + if (t->sas_ss_flags & SS_AUTODISARM) \ + sas_ss_reset(t); \ +} while (0); + +/* + * These syscall function prototypes are kept in the same order as + * include/uapi/asm-generic/unistd.h. Deprecated or obsolete system calls + * go below. + * + * Please note that these prototypes here are only provided for information + * purposes, for static analysis, and for linking from the syscall table. + * These functions should not be called elsewhere from kernel code. + * + * As the syscall calling convention may be different from the default + * for architectures overriding the syscall calling convention, do not + * include the prototypes if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. + */ +#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); +asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr, + u32 __user *iocb); +asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id, + compat_long_t min_nr, + compat_long_t nr, + struct io_event __user *events, + struct old_timespec32 __user *timeout, + const struct __compat_aio_sigset __user *usig); +asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id, + compat_long_t min_nr, + compat_long_t nr, + struct io_event __user *events, + struct __kernel_timespec __user *timeout, + const struct __compat_aio_sigset __user *usig); + +/* fs/cookies.c */ +asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t); + +/* fs/eventpoll.c */ +asmlinkage long compat_sys_epoll_pwait(int epfd, + struct epoll_event __user *events, + int maxevents, int timeout, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); + +/* fs/fcntl.c */ +asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd, + compat_ulong_t arg); +asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd, + compat_ulong_t arg); + +/* fs/ioctl.c */ +asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, + compat_ulong_t arg); + +/* fs/namespace.c */ +asmlinkage long compat_sys_mount(const char __user *dev_name, + const char __user *dir_name, + const char __user *type, compat_ulong_t flags, + const void __user *data); + +/* fs/open.c */ +asmlinkage long compat_sys_statfs(const char __user *pathname, + struct compat_statfs __user *buf); +asmlinkage long compat_sys_statfs64(const char __user *pathname, + compat_size_t sz, + struct compat_statfs64 __user *buf); +asmlinkage long compat_sys_fstatfs(unsigned int fd, + struct compat_statfs __user *buf); +asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, + struct compat_statfs64 __user *buf); +asmlinkage long compat_sys_truncate(const char __user *, compat_off_t); +asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t); +/* No generic prototype for truncate64, ftruncate64, fallocate */ +asmlinkage long compat_sys_openat(int dfd, const char __user *filename, + int flags, umode_t mode); + +/* fs/readdir.c */ +asmlinkage long compat_sys_getdents(unsigned int fd, + struct compat_linux_dirent __user *dirent, + unsigned int count); + +/* fs/read_write.c */ +asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int); +asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd, + const struct compat_iovec __user *vec, compat_ulong_t vlen); +asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd, + const struct compat_iovec __user *vec, compat_ulong_t vlen); +/* No generic prototype for pread64 and pwrite64 */ +asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high); +asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high); +#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64 +asmlinkage long compat_sys_preadv64(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos); +#endif + +#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64 +asmlinkage long compat_sys_pwritev64(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos); +#endif + +/* fs/sendfile.c */ +asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, + compat_off_t __user *offset, compat_size_t count); +asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, + compat_loff_t __user *offset, compat_size_t count); + +/* fs/select.c */ +asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp, + compat_ulong_t __user *outp, + compat_ulong_t __user *exp, + struct old_timespec32 __user *tsp, + void __user *sig); +asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp, + compat_ulong_t __user *outp, + compat_ulong_t __user *exp, + struct __kernel_timespec __user *tsp, + void __user *sig); +asmlinkage long compat_sys_ppoll_time32(struct pollfd __user *ufds, + unsigned int nfds, + struct old_timespec32 __user *tsp, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); +asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds, + unsigned int nfds, + struct __kernel_timespec __user *tsp, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); + +/* fs/signalfd.c */ +asmlinkage long compat_sys_signalfd4(int ufd, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize, int flags); + +/* fs/splice.c */ +asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *, + unsigned int nr_segs, unsigned int flags); + +/* fs/stat.c */ +asmlinkage long compat_sys_newfstatat(unsigned int dfd, + const char __user *filename, + struct compat_stat __user *statbuf, + int flag); +asmlinkage long compat_sys_newfstat(unsigned int fd, + struct compat_stat __user *statbuf); + +/* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */ + +/* kernel/exit.c */ +asmlinkage long compat_sys_waitid(int, compat_pid_t, + struct compat_siginfo __user *, int, + struct compat_rusage __user *); + + + +/* kernel/futex.c */ +asmlinkage long +compat_sys_set_robust_list(struct compat_robust_list_head __user *head, + compat_size_t len); +asmlinkage long +compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, + compat_size_t __user *len_ptr); + +/* kernel/itimer.c */ +asmlinkage long compat_sys_getitimer(int which, + struct compat_itimerval __user *it); +asmlinkage long compat_sys_setitimer(int which, + struct compat_itimerval __user *in, + struct compat_itimerval __user *out); + +/* kernel/kexec.c */ +asmlinkage long compat_sys_kexec_load(compat_ulong_t entry, + compat_ulong_t nr_segments, + struct compat_kexec_segment __user *, + compat_ulong_t flags); + +/* kernel/posix-timers.c */ +asmlinkage long compat_sys_timer_create(clockid_t which_clock, + struct compat_sigevent __user *timer_event_spec, + timer_t __user *created_timer_id); + +/* kernel/ptrace.c */ +asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, + compat_long_t addr, compat_long_t data); + +/* kernel/sched/core.c */ +asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, + unsigned int len, + compat_ulong_t __user *user_mask_ptr); +asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, + unsigned int len, + compat_ulong_t __user *user_mask_ptr); + +/* kernel/signal.c */ +asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, + compat_stack_t __user *uoss_ptr); +asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, + compat_size_t sigsetsize); +#ifndef CONFIG_ODD_RT_SIGACTION +asmlinkage long compat_sys_rt_sigaction(int, + const struct compat_sigaction __user *, + struct compat_sigaction __user *, + compat_size_t); +#endif +asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, + compat_sigset_t __user *oset, + compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, + compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigtimedwait_time32(compat_sigset_t __user *uthese, + struct compat_siginfo __user *uinfo, + struct old_timespec32 __user *uts, compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese, + struct compat_siginfo __user *uinfo, + struct __kernel_timespec __user *uts, compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig, + struct compat_siginfo __user *uinfo); +/* No generic prototype for rt_sigreturn */ + +/* kernel/sys.c */ +asmlinkage long compat_sys_times(struct compat_tms __user *tbuf); +asmlinkage long compat_sys_getrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +asmlinkage long compat_sys_setrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); + +/* kernel/time.c */ +asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv, + struct timezone __user *tz); +asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv, + struct timezone __user *tz); + +/* kernel/timer.c */ +asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); + +/* ipc/mqueue.c */ +asmlinkage long compat_sys_mq_open(const char __user *u_name, + int oflag, compat_mode_t mode, + struct compat_mq_attr __user *u_attr); +asmlinkage long compat_sys_mq_notify(mqd_t mqdes, + const struct compat_sigevent __user *u_notification); +asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, + const struct compat_mq_attr __user *u_mqstat, + struct compat_mq_attr __user *u_omqstat); + +/* ipc/msg.c */ +asmlinkage long compat_sys_msgctl(int first, int second, void __user *uptr); +asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp, + compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg); +asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, + compat_ssize_t msgsz, int msgflg); + +/* ipc/sem.c */ +asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); + +/* ipc/shm.c */ +asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr); +asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); + +/* net/socket.c */ +asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len, + unsigned flags, struct sockaddr __user *addr, + int __user *addrlen); +asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, + char __user *optval, unsigned int optlen); +asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, + char __user *optval, int __user *optlen); +asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, + unsigned flags); +asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, + unsigned int flags); + +/* mm/filemap.c: No generic prototype for readahead */ + +/* security/keys/keyctl.c */ +asmlinkage long compat_sys_keyctl(u32 option, + u32 arg2, u32 arg3, u32 arg4, u32 arg5); + +/* arch/example/kernel/sys_example.c */ +asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp); + +/* mm/fadvise.c: No generic prototype for fadvise64_64 */ + +/* mm/, CONFIG_MMU only */ +asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, + compat_ulong_t mode, + compat_ulong_t __user *nmask, + compat_ulong_t maxnode, compat_ulong_t flags); +asmlinkage long compat_sys_get_mempolicy(int __user *policy, + compat_ulong_t __user *nmask, + compat_ulong_t maxnode, + compat_ulong_t addr, + compat_ulong_t flags); +asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, + compat_ulong_t maxnode); +asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, + compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, + const compat_ulong_t __user *new_nodes); +asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages, + __u32 __user *pages, + const int __user *nodes, + int __user *status, + int flags); + +asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, + compat_pid_t pid, int sig, + struct compat_siginfo __user *uinfo); +asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg, + unsigned vlen, unsigned int flags, + struct __kernel_timespec __user *timeout); +asmlinkage long compat_sys_recvmmsg_time32(int fd, struct compat_mmsghdr __user *mmsg, + unsigned vlen, unsigned int flags, + struct old_timespec32 __user *timeout); +asmlinkage long compat_sys_wait4(compat_pid_t pid, + compat_uint_t __user *stat_addr, int options, + struct compat_rusage __user *ru); +asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, + int, const char __user *); +asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, + struct file_handle __user *handle, + int flags); +asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, + unsigned vlen, unsigned int flags); +asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, + const struct compat_iovec __user *lvec, + compat_ulong_t liovcnt, const struct compat_iovec __user *rvec, + compat_ulong_t riovcnt, compat_ulong_t flags); +asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid, + const struct compat_iovec __user *lvec, + compat_ulong_t liovcnt, const struct compat_iovec __user *rvec, + compat_ulong_t riovcnt, compat_ulong_t flags); +asmlinkage long compat_sys_execveat(int dfd, const char __user *filename, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp, int flags); +asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); +asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd, + const struct compat_iovec __user *vec, + compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); +#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2 +asmlinkage long compat_sys_readv64v2(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos, rwf_t flags); +#endif + +#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2 +asmlinkage long compat_sys_pwritev64v2(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, loff_t pos, rwf_t flags); +#endif + + +/* + * Deprecated system calls which are still defined in + * include/uapi/asm-generic/unistd.h and wanted by >= 1 arch + */ + +/* __ARCH_WANT_SYSCALL_NO_AT */ +asmlinkage long compat_sys_open(const char __user *filename, int flags, + umode_t mode); + +/* __ARCH_WANT_SYSCALL_NO_FLAGS */ +asmlinkage long compat_sys_signalfd(int ufd, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); + +/* __ARCH_WANT_SYSCALL_OFF_T */ +asmlinkage long compat_sys_newstat(const char __user *filename, + struct compat_stat __user *statbuf); +asmlinkage long compat_sys_newlstat(const char __user *filename, + struct compat_stat __user *statbuf); + +/* __ARCH_WANT_SYSCALL_DEPRECATED */ +asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, + compat_ulong_t __user *outp, compat_ulong_t __user *exp, + struct old_timeval32 __user *tvp); +asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); +asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, + unsigned flags); +asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args); + +/* obsolete: fs/readdir.c */ +asmlinkage long compat_sys_old_readdir(unsigned int fd, + struct compat_old_linux_dirent __user *, + unsigned int count); + +/* obsolete: fs/select.c */ +asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg); + +/* obsolete: ipc */ +asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32); + +/* obsolete: kernel/signal.c */ +#ifdef __ARCH_WANT_SYS_SIGPENDING +asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set); +#endif + +#ifdef __ARCH_WANT_SYS_SIGPROCMASK +asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset, + compat_old_sigset_t __user *oset); +#endif +#ifdef CONFIG_COMPAT_OLD_SIGACTION +asmlinkage long compat_sys_sigaction(int sig, + const struct compat_old_sigaction __user *act, + struct compat_old_sigaction __user *oact); +#endif + +/* obsolete: net/socket.c */ +asmlinkage long compat_sys_socketcall(int call, u32 __user *args); + +#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ + + +/* + * For most but not all architectures, "am I in a compat syscall?" and + * "am I a compat task?" are the same question. For architectures on which + * they aren't the same question, arch code can override in_compat_syscall. + */ + +#ifndef in_compat_syscall +static inline bool in_compat_syscall(void) { return is_compat_task(); } +#endif + +/** + * ns_to_old_timeval32 - Compat version of ns_to_timeval + * @nsec: the nanoseconds value to be converted + * + * Returns the old_timeval32 representation of the nsec parameter. + */ +static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec) +{ + struct timeval tv; + struct old_timeval32 ctv; + + tv = ns_to_timeval(nsec); + ctv.tv_sec = tv.tv_sec; + ctv.tv_usec = tv.tv_usec; + + return ctv; +} + +/* + * Kernel code should not call compat syscalls (i.e., compat_sys_xyzyyz()) + * directly. Instead, use one of the functions which work equivalently, such + * as the kcompat_sys_xyzyyz() functions prototyped below. + */ + +int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz, + struct compat_statfs64 __user * buf); +int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, + struct compat_statfs64 __user * buf); + +#else /* !CONFIG_COMPAT */ + +#define is_compat_task() (0) +/* Ensure no one redefines in_compat_syscall() under !CONFIG_COMPAT */ +#define in_compat_syscall in_compat_syscall +static inline bool in_compat_syscall(void) { return false; } + +#endif /* CONFIG_COMPAT */ + +#endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h new file mode 100644 index 0000000..333a669 --- /dev/null +++ b/include/linux/compiler-clang.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_TYPES_H +#error "Please don't include directly, include instead." +#endif + +/* Compiler specific definitions for Clang compiler */ + +#define uninitialized_var(x) x = *(&(x)) + +/* same as gcc, this was present in clang-2.6 so we can assume it works + * with any version that can compile the kernel + */ +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +/* all clang versions usable with the kernel support KASAN ABI version 5 */ +#define KASAN_ABI_VERSION 5 + +#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) +/* emulate gcc's __SANITIZE_ADDRESS__ flag */ +#define __SANITIZE_ADDRESS__ +#define __no_sanitize_address \ + __attribute__((no_sanitize("address", "hwaddress"))) +#else +#define __no_sanitize_address +#endif + +/* + * Not all versions of clang implement the the type-generic versions + * of the builtin overflow checkers. Fortunately, clang implements + * __has_builtin allowing us to avoid awkward version + * checks. Unfortunately, we don't know which version of gcc clang + * pretends to be, so the macro may or may not be defined. + */ +#if __has_builtin(__builtin_mul_overflow) && \ + __has_builtin(__builtin_add_overflow) && \ + __has_builtin(__builtin_sub_overflow) +#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 +#endif + +/* The following are for compatibility with GCC, from compiler-gcc.h, + * and may be redefined here because they should not be shared with other + * compilers, like ICC. + */ +#define barrier() __asm__ __volatile__("" : : : "memory") diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h new file mode 100644 index 0000000..77db11a --- /dev/null +++ b/include/linux/compiler-gcc.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_TYPES_H +#error "Please don't include directly, include instead." +#endif + +/* + * Common definitions for all gcc versions go here. + */ +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) + + + +#if GCC_VERSION < 40600 +# error Sorry, your compiler is too old - please upgrade it. +#endif + +/* Optimization barrier */ +/* The "volatile" is due to gcc bugs */ + +#define barrier() __asm__ __volatile__("": : :"memory") +/* + * This version is i.e. to prevent dead stores elimination on @ptr + * where gcc and llvm may behave differently when otherwise using + * normal barrier(): while gcc behavior gets along with a normal + * barrier(), llvm needs an explicit input variable to be assumed + * clobbered. The issue is as follows: while the inline asm might + * access any memory it wants, the compiler could have fit all of + * @ptr into memory registers instead, and since @ptr never escaped + * from that, it proved that the inline asm wasn't touching any of + * it. This version works well with both compilers, i.e. we're telling + * the compiler that the inline asm absolutely may see the contents + * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 + */ +#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") + +/* + * This macro obfuscates arithmetic on a variable address so that gcc + * shouldn't recognize the original var, and make assumptions about it. + * + * This is needed because the C standard makes it undefined to do + * pointer arithmetic on "objects" outside their boundaries and the + * gcc optimizers assume this is the case. In particular they + * assume such arithmetic does not wrap. + * + * A miscompilation has been observed because of this on PPC. + * To work around it we hide the relationship of the pointer and the object + * using this macro. + * + * Versions of the ppc64 compiler before 4.1 had a bug where use of + * RELOC_HIDE could trash r30. The bug can be worked around by changing + * the inline assembly constraint from =g to =r, in this particular + * case either is valid. + */ +#define RELOC_HIDE(ptr, off) \ +({ \ + unsigned long __ptr; \ + __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ + (typeof(ptr)) (__ptr + (off)); \ +}) + +/* + * A trick to suppress uninitialized variable warning without generating any + * code + */ +#define uninitialized_var(x) x = x + +#ifdef CONFIG_RETPOLINE +#define __noretpoline __attribute__((__indirect_branch__("keep"))) +#endif + +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +#define __compiletime_object_size(obj) __builtin_object_size(obj, 0) + +#define __compiletime_warning(message) __attribute__((__warning__(message))) +#define __compiletime_error(message) __attribute__((__error__(message))) + +#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) +#define __latent_entropy __attribute__((latent_entropy)) +#endif + +#if defined __LCC__ /* MCST */ +#define unreachable() __builtin_unreachable() +#else +/* + * calling noreturn functions, __builtin_unreachable() and __builtin_trap() + * confuse the stack allocation in gcc, leading to overly large stack + * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365 + * + * Adding an empty inline assembly before it works around the problem + */ +#define barrier_before_unreachable() asm volatile("") + +/* + * Mark a position in code as unreachable. This can be used to + * suppress control flow warnings after asm blocks that transfer + * control elsewhere. + */ +#define unreachable() \ + do { \ + annotate_unreachable(); \ + barrier_before_unreachable(); \ + __builtin_unreachable(); \ + } while (0) +#endif /* __LCC__ */ + +#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__) +#define __randomize_layout __attribute__((randomize_layout)) +#define __no_randomize_layout __attribute__((no_randomize_layout)) +/* This anon struct can add padding, so only enable it under randstruct. */ +#define randomized_struct_fields_start struct { +#define randomized_struct_fields_end } __randomize_layout; +#endif + +/* + * GCC 'asm goto' miscompiles certain code sequences: + * + * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 + * + * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. + * + * (asm goto is automatically volatile - the naming reflects this.) + */ +#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) + +/* + * sparse (__CHECKER__) pretends to be gcc, but can't do constant + * folding in __builtin_bswap*() (yet), so don't set these for it. + */ +#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) +#define __HAVE_BUILTIN_BSWAP32__ +#define __HAVE_BUILTIN_BSWAP64__ +#if GCC_VERSION >= 40800 +#define __HAVE_BUILTIN_BSWAP16__ +#endif +#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ + +#if GCC_VERSION >= 70000 +#define KASAN_ABI_VERSION 5 +#elif GCC_VERSION >= 50000 +#define KASAN_ABI_VERSION 4 +#elif GCC_VERSION >= 40902 +#define KASAN_ABI_VERSION 3 +#endif + +#if __has_attribute(__no_sanitize_address__) +#define __no_sanitize_address __attribute__((no_sanitize_address)) +#else +#define __no_sanitize_address +#endif + +#if GCC_VERSION >= 50100 +# if !defined(__LCC__) +/* bug #114613 */ +#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 +# endif +#endif + +/* + * Turn individual warnings and errors on and off locally, depending + * on version. + */ +#define __diag_GCC(version, severity, s) \ + __diag_GCC_ ## version(__diag_GCC_ ## severity s) + +/* Severity used in pragma directives */ +#define __diag_GCC_ignore ignored +#define __diag_GCC_warn warning +#define __diag_GCC_error error + +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) + +#if GCC_VERSION >= 80000 +#define __diag_GCC_8(s) __diag(s) +#else +#define __diag_GCC_8(s) +#endif + +#if defined(CONFIG_MCST) && defined(__LCC__) + /* lcc bug #121410 workaround */ +#define __no_fgcse +#else +#define __no_fgcse __attribute__((optimize("-fno-gcse"))) +#endif diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h new file mode 100644 index 0000000..b17f3cd --- /dev/null +++ b/include/linux/compiler-intel.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_TYPES_H +#error "Please don't include directly, include instead." +#endif + +#ifdef __ECC + +/* Compiler specific definitions for Intel ECC compiler */ + +#include + +/* Intel ECC compiler doesn't support gcc specific asm stmts. + * It uses intrinsics to do the equivalent things. + */ + +#define barrier() __memory_barrier() +#define barrier_data(ptr) barrier() + +#define RELOC_HIDE(ptr, off) \ + ({ unsigned long __ptr; \ + __ptr = (unsigned long) (ptr); \ + (typeof(ptr)) (__ptr + (off)); }) + +/* This should act as an optimization barrier on var. + * Given that this compiler does not have inline assembly, a compiler barrier + * is the best we can do. + */ +#define OPTIMIZER_HIDE_VAR(var) barrier() + +#endif + +/* icc has this, but it's called _bswap16 */ +#define __HAVE_BUILTIN_BSWAP16__ +#define __builtin_bswap16 _bswap16 diff --git a/include/linux/compiler.h b/include/linux/compiler.h new file mode 100644 index 0000000..d8f0050 --- /dev/null +++ b/include/linux/compiler.h @@ -0,0 +1,414 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_H +#define __LINUX_COMPILER_H + +#include + +#ifndef __ASSEMBLY__ + +#ifdef __KERNEL__ + +/* + * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code + * to disable branch tracing on a per file basis. + */ +#if defined(CONFIG_TRACE_BRANCH_PROFILING) \ + && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) +void ftrace_likely_update(struct ftrace_likely_data *f, int val, + int expect, int is_constant); + +#define likely_notrace(x) __builtin_expect(!!(x), 1) +#define unlikely_notrace(x) __builtin_expect(!!(x), 0) + +#define __branch_check__(x, expect, is_constant) ({ \ + long ______r; \ + static struct ftrace_likely_data \ + __aligned(4) \ + __section(_ftrace_annotated_branch) \ + ______f = { \ + .data.func = __func__, \ + .data.file = __FILE__, \ + .data.line = __LINE__, \ + }; \ + ______r = __builtin_expect(!!(x), expect); \ + ftrace_likely_update(&______f, ______r, \ + expect, is_constant); \ + ______r; \ + }) + +/* + * Using __builtin_constant_p(x) to ignore cases where the return + * value is always the same. This idea is taken from a similar patch + * written by Daniel Walker. + */ +# ifndef likely +# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) +# endif +# ifndef unlikely +# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) +# endif + +#ifdef CONFIG_PROFILE_ALL_BRANCHES +/* + * "Define 'is'", Bill Clinton + * "Define 'if'", Steven Rostedt + */ +#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) + +#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) + +#define __trace_if_value(cond) ({ \ + static struct ftrace_branch_data \ + __aligned(4) \ + __section(_ftrace_branch) \ + __if_trace = { \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ + }; \ + (cond) ? \ + (__if_trace.miss_hit[1]++,1) : \ + (__if_trace.miss_hit[0]++,0); \ +}) + +#endif /* CONFIG_PROFILE_ALL_BRANCHES */ + +#else +#ifdef CONFIG_MCST +/* Wait for #113288 before dropping __builtin_expect_prob() */ +# ifdef __LCC__ +# if __LCC__ > 125 || __LCC__ == 125 && __LCC_MINOR__ >= 9 +# define likely(x) __builtin_expect_with_probability(!!(x), 1, 0.9999) +# define unlikely(x) __builtin_expect_with_probability(!!(x), 1, 0.0001) +# else +# define likely(x) __builtin_expect_prob(!!(x), 0.9999) +# define unlikely(x) __builtin_expect_prob(!!(x), 0.0001) +# endif +# elif __GNUC__ >= 9 +# define likely(x) __builtin_expect_with_probability(!!(x), 1, 0.9999) +# define unlikely(x) __builtin_expect_with_probability(!!(x), 1, 0.0001) +# else +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +# endif +#else +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +#endif +#endif + +/* Optimization barrier */ +#ifndef barrier +# define barrier() __memory_barrier() +#endif + +#ifndef barrier_data +# define barrier_data(ptr) barrier() +#endif + +/* workaround for GCC PR82365 if needed */ +#ifndef barrier_before_unreachable +# define barrier_before_unreachable() do { } while (0) +#endif + +/* Unreachable code */ +#ifdef CONFIG_STACK_VALIDATION +/* + * These macros help objtool understand GCC code flow for unreachable code. + * The __COUNTER__ based labels are a hack to make each instance of the macros + * unique, to convince GCC not to merge duplicate inline asm statements. + */ +#define annotate_reachable() ({ \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.reachable\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) +#define annotate_unreachable() ({ \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.unreachable\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) +#define ASM_UNREACHABLE \ + "999:\n\t" \ + ".pushsection .discard.unreachable\n\t" \ + ".long 999b - .\n\t" \ + ".popsection\n\t" + +/* Annotate a C jump table to allow objtool to follow the code flow */ +#define __annotate_jump_table __section(.rodata..c_jump_table) + +#else +#define annotate_reachable() +#define annotate_unreachable() +#define __annotate_jump_table +#endif + +#ifndef ASM_UNREACHABLE +# define ASM_UNREACHABLE +#endif +#ifndef unreachable +# define unreachable() do { \ + annotate_unreachable(); \ + __builtin_unreachable(); \ +} while (0) +#endif + +/* + * KENTRY - kernel entry point + * This can be used to annotate symbols (functions or data) that are used + * without their linker symbol being referenced explicitly. For example, + * interrupt vector handlers, or functions in the kernel image that are found + * programatically. + * + * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those + * are handled in their own way (with KEEP() in linker scripts). + * + * KENTRY can be avoided if the symbols in question are marked as KEEP() in the + * linker script. For example an architecture could KEEP() its entire + * boot/exception vector code rather than annotate each function and data. + */ +#ifndef KENTRY +# define KENTRY(sym) \ + extern typeof(sym) sym; \ + static const unsigned long __kentry_##sym \ + __used \ + __section("___kentry" "+" #sym ) \ + = (unsigned long)&sym; +#endif + +#ifndef RELOC_HIDE +# define RELOC_HIDE(ptr, off) \ + ({ unsigned long __ptr; \ + __ptr = (unsigned long) (ptr); \ + (typeof(ptr)) (__ptr + (off)); }) +#endif + +#ifndef OPTIMIZER_HIDE_VAR +/* Make the optimizer believe the variable can be manipulated arbitrarily. */ +#define OPTIMIZER_HIDE_VAR(var) \ + __asm__ ("" : "=r" (var) : "0" (var)) +#endif + +/* Not-quite-unique ID. */ +#ifndef __UNIQUE_ID +# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) +#endif + +#include + +#define __READ_ONCE_SIZE \ +({ \ + switch (size) { \ + case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ + case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ + case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ + case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ + default: \ + barrier(); \ + __builtin_memcpy((void *)res, (const void *)p, size); \ + barrier(); \ + } \ +}) + +static __always_inline +void __read_once_size(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} + +#ifdef CONFIG_KASAN +/* + * We can't declare function 'inline' because __no_sanitize_address confilcts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused +#else +# define __no_kasan_or_inline __always_inline +#endif + +static __no_kasan_or_inline +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} + +static __always_inline void __write_once_size(volatile void *p, void *res, int size) +{ + switch (size) { + case 1: *(volatile __u8 *)p = *(__u8 *)res; break; + case 2: *(volatile __u16 *)p = *(__u16 *)res; break; + case 4: *(volatile __u32 *)p = *(__u32 *)res; break; + case 8: *(volatile __u64 *)p = *(__u64 *)res; break; + default: + barrier(); + __builtin_memcpy((void *)p, (const void *)res, size); + barrier(); + } +} + +/* + * Prevent the compiler from merging or refetching reads or writes. The + * compiler is also forbidden from reordering successive instances of + * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some + * particular ordering. One way to make the compiler aware of ordering is to + * put the two invocations of READ_ONCE or WRITE_ONCE in different C + * statements. + * + * These two macros will also work on aggregate data types like structs or + * unions. If the size of the accessed data type exceeds the word size of + * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will + * fall back to memcpy(). There's at least two memcpy()s: one for the + * __builtin_memcpy() and then one for the macro doing the copy of variable + * - '__u' allocated on the stack. + * + * Their two major use cases are: (1) Mediating communication between + * process-level code and irq/NMI handlers, all running on the same CPU, + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * mutilate accesses that either do not require ordering or that interact + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + */ +#include +#include + +#ifdef __LCC__ /* bug # 84179 */ +# ifndef barrier_preemption +# define barrier_preemption() +# endif +#define __READ_ONCE(x, check) \ +({ \ + typeof(x) __dummy __attribute__((unused)); \ + union { typeof(__dummy) __val; char __c[1]; } __u; \ + if (check) \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + else \ + __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ + /* Forbid lcc to move per-cpu global registers around */ \ + barrier_preemption(); \ + __u.__val; \ +}) +#else /* __LCC__ */ +#define __READ_ONCE(x, check) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u; \ + if (check) \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + else \ + __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ + __u.__val; \ +}) +#endif /* __LCC__ */ +#define READ_ONCE(x) __READ_ONCE(x, 1) + +/* + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need + * to hide memory access from KASAN. + */ +#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) + +static __no_kasan_or_inline +unsigned long read_word_at_a_time(const void *addr) +{ + kasan_check_read(addr, 1); + return *(unsigned long *)addr; +} + +#ifdef __LCC__ /* bug # 84179 */ +#define WRITE_ONCE(x, val) \ +({ \ + typeof(x) __dummy __attribute__((unused)); \ + union { typeof(__dummy) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(x)) (val) }; \ + __write_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) +#else /* __LCC__ */ +#define WRITE_ONCE(x, val) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(x)) (val) }; \ + __write_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) +#endif /* __LCC__ */ + +#endif /* __KERNEL__ */ + +/* + * Force the compiler to emit 'sym' as a symbol, so that we can reference + * it from inline assembler. Necessary in case 'sym' could be inlined + * otherwise, or eliminated entirely due to lack of references that are + * visible to the compiler. + */ +#define __ADDRESSABLE(sym) \ + static void * __section(.discard.addressable) __used \ + __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; + +/** + * offset_to_ptr - convert a relative memory offset to an absolute pointer + * @off: the address of the 32-bit offset value + */ +static inline void *offset_to_ptr(const int *off) +{ + return (void *)((unsigned long)off + *off); +} + +#endif /* __ASSEMBLY__ */ + +/* Compile time object size, -1 for unknown */ +#ifndef __compiletime_object_size +# define __compiletime_object_size(obj) -1 +#endif +#ifndef __compiletime_warning +# define __compiletime_warning(message) +#endif +#ifndef __compiletime_error +# define __compiletime_error(message) +#endif + +#ifdef __OPTIMIZE__ +# define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (!(condition)) \ + prefix ## suffix(); \ + } while (0) +#else +# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) +#endif + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) + +/** + * compiletime_assert - break build and emit msg if condition is false + * @condition: a compile-time constant condition to check + * @msg: a message to emit if condition is false + * + * In tradition of POSIX assert, this macro will break the build if the + * supplied condition is *false*, emitting the supplied error message if the + * compiler has support to do so. + */ +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) + +#define compiletime_assert_atomic_type(t) \ + compiletime_assert(__native_word(t), \ + "Need native word sized stores/loads for atomicity.") + +/* &a[0] degrades to a pointer: a different type from an array */ +#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) + +/* + * This is needed in functions which generate the stack canary, see + * arch/x86/kernel/smpboot.c::start_secondary() for an example. + */ +#define prevent_tail_call_optimization() mb() + +#endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h new file mode 100644 index 0000000..cdf0165 --- /dev/null +++ b/include/linux/compiler_attributes.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_ATTRIBUTES_H +#define __LINUX_COMPILER_ATTRIBUTES_H + +/* + * The attributes in this file are unconditionally defined and they directly + * map to compiler attribute(s), unless one of the compilers does not support + * the attribute. In that case, __has_attribute is used to check for support + * and the reason is stated in its comment ("Optional: ..."). + * + * Any other "attributes" (i.e. those that depend on a configuration option, + * on a compiler, on an architecture, on plugins, on other attributes...) + * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h). + * The intention is to keep this file as simple as possible, as well as + * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks). + * + * This file is meant to be sorted (by actual attribute name, + * not by #define identifier). Use the __attribute__((__name__)) syntax + * (i.e. with underscores) to avoid future collisions with other macros. + * Provide links to the documentation of each supported compiler, if it exists. + */ + +/* + * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17. + * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute + * by hand. + * + * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__ + * depending on the compiler used to build it; however, these attributes have + * no semantic effects for sparse, so it does not matter. Also note that, + * in order to avoid sparse's warnings, even the unsupported ones must be + * defined to 0. + */ +#ifndef __has_attribute +# define __has_attribute(x) __GCC4_has_attribute_##x +# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) +# define __GCC4_has_attribute___copy__ 0 +# define __GCC4_has_attribute___designated_init__ 0 +# define __GCC4_has_attribute___externally_visible__ 1 +# define __GCC4_has_attribute___noclone__ 1 +# define __GCC4_has_attribute___nonstring__ 0 +# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) +# define __GCC4_has_attribute___fallthrough__ 0 +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute + */ +#define __alias(symbol) __attribute__((__alias__(#symbol))) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-aligned-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-aligned-type-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-aligned-variable-attribute + */ +#define __aligned(x) __attribute__((__aligned__(x))) +#define __aligned_largest __attribute__((__aligned__)) + +/* + * Note: users of __always_inline currently do not write "inline" themselves, + * which seems to be required by gcc to apply the attribute according + * to its docs (and also "warning: always_inline function might not be + * inlinable [-Wattributes]" is emitted). + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-always_005finline-function-attribute + * clang: mentioned + */ +#define __always_inline inline __attribute__((__always_inline__)) + +/* + * The second argument is optional (default 0), so we use a variadic macro + * to make the shorthand. + * + * Beware: Do not apply this to functions which may return + * ERR_PTRs. Also, it is probably unwise to apply it to functions + * returning extra information in the low bits (but in that case the + * compiler should see some alignment anyway, when the return value is + * massaged by 'flags = ptr & 3; ptr &= ~3;'). + * + * Optional: only supported since gcc >= 4.9 + * Optional: not supported by icc + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#assume-aligned + */ +#if __has_attribute(__assume_aligned__) +# define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) +#else +# define __assume_aligned(a, ...) +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute + */ +#define __cold __attribute__((__cold__)) + +/* + * Note the long name. + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute + */ +#define __attribute_const__ __attribute__((__const__)) + +/* + * Optional: only supported since gcc >= 9 + * Optional: not supported by clang + * Optional: not supported by icc + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute + */ +#if __has_attribute(__copy__) +# define __copy(symbol) __attribute__((__copy__(symbol))) +#else +# define __copy(symbol) +#endif + +/* + * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' + * attribute warnings entirely and for good") for more information. + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-deprecated-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-deprecated-type-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-deprecated-variable-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Enumerator-Attributes.html#index-deprecated-enumerator-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#deprecated + */ +#define __deprecated + +/* + * Optional: only supported since gcc >= 5.1 + * Optional: not supported by clang + * Optional: not supported by icc + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-designated_005finit-type-attribute + */ +#if __has_attribute(__designated_init__) +# define __designated_init __attribute__((__designated_init__)) +#else +# define __designated_init +#endif + +/* + * Optional: not supported by clang + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute + */ +#if __has_attribute(__externally_visible__) +# define __visible __attribute__((__externally_visible__)) +#else +# define __visible +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-format-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#format + */ +#define __printf(a, b) __attribute__((__format__(printf, a, b))) +#define __scanf(a, b) __attribute__((__format__(scanf, a, b))) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-gnu_005finline-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#gnu-inline + */ +#define __gnu_inline __attribute__((__gnu_inline__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute + */ +#define __malloc __attribute__((__malloc__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-mode-type-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-mode-variable-attribute + */ +#define __mode(x) __attribute__((__mode__(x))) + +/* + * Optional: not supported by clang + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute + */ +#if __has_attribute(__noclone__) +# define __noclone __attribute__((__noclone__)) +#else +# define __noclone +#endif + +/* + * Add the pseudo keyword 'fallthrough' so case statement blocks + * must end with any of these keywords: + * break; + * fallthrough; + * goto